xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision c694cb4cada0cd6c646f704e868072bbd4f55798)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/intc/arm_gicv3_its_common.h"
18 #include "gicv3_internal.h"
19 #include "qom/object.h"
20 #include "qapi/error.h"
21 
22 typedef struct GICv3ITSClass GICv3ITSClass;
23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
25                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
26 
27 struct GICv3ITSClass {
28     GICv3ITSCommonClass parent_class;
29     void (*parent_reset)(DeviceState *dev);
30 };
31 
32 /*
33  * This is an internal enum used to distinguish between LPI triggered
34  * via command queue and LPI triggered via gits_translater write.
35  */
36 typedef enum ItsCmdType {
37     NONE = 0, /* internal indication for GITS_TRANSLATER write */
38     CLEAR = 1,
39     DISCARD = 2,
40     INTERRUPT = 3,
41 } ItsCmdType;
42 
43 typedef struct {
44     uint32_t iteh;
45     uint64_t itel;
46 } IteEntry;
47 
48 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
49 {
50     uint64_t result = 0;
51 
52     switch (page_sz) {
53     case GITS_PAGE_SIZE_4K:
54     case GITS_PAGE_SIZE_16K:
55         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
56         break;
57 
58     case GITS_PAGE_SIZE_64K:
59         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
60         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
61         break;
62 
63     default:
64         break;
65     }
66     return result;
67 }
68 
69 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
70                     MemTxResult *res)
71 {
72     AddressSpace *as = &s->gicv3->dma_as;
73     uint64_t l2t_addr;
74     uint64_t value;
75     bool valid_l2t;
76     uint32_t l2t_id;
77     uint32_t max_l2_entries;
78 
79     if (s->ct.indirect) {
80         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
81 
82         value = address_space_ldq_le(as,
83                                      s->ct.base_addr +
84                                      (l2t_id * L1TABLE_ENTRY_SIZE),
85                                      MEMTXATTRS_UNSPECIFIED, res);
86 
87         if (*res == MEMTX_OK) {
88             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
89 
90             if (valid_l2t) {
91                 max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
92 
93                 l2t_addr = value & ((1ULL << 51) - 1);
94 
95                 *cte =  address_space_ldq_le(as, l2t_addr +
96                                     ((icid % max_l2_entries) * GITS_CTE_SIZE),
97                                     MEMTXATTRS_UNSPECIFIED, res);
98            }
99        }
100     } else {
101         /* Flat level table */
102         *cte =  address_space_ldq_le(as, s->ct.base_addr +
103                                      (icid * GITS_CTE_SIZE),
104                                       MEMTXATTRS_UNSPECIFIED, res);
105     }
106 
107     return (*cte & TABLE_ENTRY_VALID_MASK) != 0;
108 }
109 
110 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
111                        IteEntry ite)
112 {
113     AddressSpace *as = &s->gicv3->dma_as;
114     uint64_t itt_addr;
115     MemTxResult res = MEMTX_OK;
116 
117     itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
118     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
119 
120     address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
121                          sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
122                          &res);
123 
124     if (res == MEMTX_OK) {
125         address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
126                              sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
127                              MEMTXATTRS_UNSPECIFIED, &res);
128     }
129     if (res != MEMTX_OK) {
130         return false;
131     } else {
132         return true;
133     }
134 }
135 
136 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
137                     uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
138 {
139     AddressSpace *as = &s->gicv3->dma_as;
140     uint64_t itt_addr;
141     bool status = false;
142     IteEntry ite = {};
143 
144     itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
145     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
146 
147     ite.itel = address_space_ldq_le(as, itt_addr +
148                                     (eventid * (sizeof(uint64_t) +
149                                     sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
150                                     res);
151 
152     if (*res == MEMTX_OK) {
153         ite.iteh = address_space_ldl_le(as, itt_addr +
154                                         (eventid * (sizeof(uint64_t) +
155                                         sizeof(uint32_t))) + sizeof(uint32_t),
156                                         MEMTXATTRS_UNSPECIFIED, res);
157 
158         if (*res == MEMTX_OK) {
159             if (ite.itel & TABLE_ENTRY_VALID_MASK) {
160                 if ((ite.itel >> ITE_ENTRY_INTTYPE_SHIFT) &
161                     GITS_TYPE_PHYSICAL) {
162                     *pIntid = (ite.itel & ITE_ENTRY_INTID_MASK) >>
163                                ITE_ENTRY_INTID_SHIFT;
164                     *icid = ite.iteh & ITE_ENTRY_ICID_MASK;
165                     status = true;
166                 }
167             }
168         }
169     }
170     return status;
171 }
172 
173 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
174 {
175     AddressSpace *as = &s->gicv3->dma_as;
176     uint64_t l2t_addr;
177     uint64_t value;
178     bool valid_l2t;
179     uint32_t l2t_id;
180     uint32_t max_l2_entries;
181 
182     if (s->dt.indirect) {
183         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
184 
185         value = address_space_ldq_le(as,
186                                      s->dt.base_addr +
187                                      (l2t_id * L1TABLE_ENTRY_SIZE),
188                                      MEMTXATTRS_UNSPECIFIED, res);
189 
190         if (*res == MEMTX_OK) {
191             valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
192 
193             if (valid_l2t) {
194                 max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
195 
196                 l2t_addr = value & ((1ULL << 51) - 1);
197 
198                 value =  address_space_ldq_le(as, l2t_addr +
199                                    ((devid % max_l2_entries) * GITS_DTE_SIZE),
200                                    MEMTXATTRS_UNSPECIFIED, res);
201             }
202         }
203     } else {
204         /* Flat level table */
205         value = address_space_ldq_le(as, s->dt.base_addr +
206                                      (devid * GITS_DTE_SIZE),
207                                      MEMTXATTRS_UNSPECIFIED, res);
208     }
209 
210     return value;
211 }
212 
213 /*
214  * This function handles the processing of following commands based on
215  * the ItsCmdType parameter passed:-
216  * 1. triggering of lpi interrupt translation via ITS INT command
217  * 2. triggering of lpi interrupt translation via gits_translater register
218  * 3. handling of ITS CLEAR command
219  * 4. handling of ITS DISCARD command
220  */
221 static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
222                             ItsCmdType cmd)
223 {
224     AddressSpace *as = &s->gicv3->dma_as;
225     uint32_t devid, eventid;
226     MemTxResult res = MEMTX_OK;
227     bool dte_valid;
228     uint64_t dte = 0;
229     uint32_t max_eventid;
230     uint16_t icid = 0;
231     uint32_t pIntid = 0;
232     bool ite_valid = false;
233     uint64_t cte = 0;
234     bool cte_valid = false;
235     bool result = false;
236 
237     if (cmd == NONE) {
238         devid = offset;
239     } else {
240         devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
241 
242         offset += NUM_BYTES_IN_DW;
243         value = address_space_ldq_le(as, s->cq.base_addr + offset,
244                                      MEMTXATTRS_UNSPECIFIED, &res);
245     }
246 
247     if (res != MEMTX_OK) {
248         return result;
249     }
250 
251     eventid = (value & EVENTID_MASK);
252 
253     dte = get_dte(s, devid, &res);
254 
255     if (res != MEMTX_OK) {
256         return result;
257     }
258     dte_valid = dte & TABLE_ENTRY_VALID_MASK;
259 
260     if (dte_valid) {
261         max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
262 
263         ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
264 
265         if (res != MEMTX_OK) {
266             return result;
267         }
268 
269         if (ite_valid) {
270             cte_valid = get_cte(s, icid, &cte, &res);
271         }
272 
273         if (res != MEMTX_OK) {
274             return result;
275         }
276     }
277 
278     if ((devid > s->dt.maxids.max_devids) || !dte_valid || !ite_valid ||
279             !cte_valid || (eventid > max_eventid)) {
280         qemu_log_mask(LOG_GUEST_ERROR,
281                       "%s: invalid command attributes "
282                       "devid %d or eventid %d or invalid dte %d or"
283                       "invalid cte %d or invalid ite %d\n",
284                       __func__, devid, eventid, dte_valid, cte_valid,
285                       ite_valid);
286         /*
287          * in this implementation, in case of error
288          * we ignore this command and move onto the next
289          * command in the queue
290          */
291     } else {
292         /*
293          * Current implementation only supports rdbase == procnum
294          * Hence rdbase physical address is ignored
295          */
296         if (cmd == DISCARD) {
297             IteEntry ite = {};
298             /* remove mapping from interrupt translation table */
299             result = update_ite(s, eventid, dte, ite);
300         }
301     }
302 
303     return result;
304 }
305 
306 static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
307                           bool ignore_pInt)
308 {
309     AddressSpace *as = &s->gicv3->dma_as;
310     uint32_t devid, eventid;
311     uint32_t pIntid = 0;
312     uint32_t max_eventid, max_Intid;
313     bool dte_valid;
314     MemTxResult res = MEMTX_OK;
315     uint16_t icid = 0;
316     uint64_t dte = 0;
317     IteEntry ite;
318     uint32_t int_spurious = INTID_SPURIOUS;
319     bool result = false;
320 
321     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
322     offset += NUM_BYTES_IN_DW;
323     value = address_space_ldq_le(as, s->cq.base_addr + offset,
324                                  MEMTXATTRS_UNSPECIFIED, &res);
325 
326     if (res != MEMTX_OK) {
327         return result;
328     }
329 
330     eventid = (value & EVENTID_MASK);
331 
332     if (!ignore_pInt) {
333         pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
334     }
335 
336     offset += NUM_BYTES_IN_DW;
337     value = address_space_ldq_le(as, s->cq.base_addr + offset,
338                                  MEMTXATTRS_UNSPECIFIED, &res);
339 
340     if (res != MEMTX_OK) {
341         return result;
342     }
343 
344     icid = value & ICID_MASK;
345 
346     dte = get_dte(s, devid, &res);
347 
348     if (res != MEMTX_OK) {
349         return result;
350     }
351     dte_valid = dte & TABLE_ENTRY_VALID_MASK;
352 
353     max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
354 
355     if (!ignore_pInt) {
356         max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
357     }
358 
359     if ((devid > s->dt.maxids.max_devids) || (icid > s->ct.maxids.max_collids)
360             || !dte_valid || (eventid > max_eventid) ||
361             (!ignore_pInt && (((pIntid < GICV3_LPI_INTID_START) ||
362             (pIntid > max_Intid)) && (pIntid != INTID_SPURIOUS)))) {
363         qemu_log_mask(LOG_GUEST_ERROR,
364                       "%s: invalid command attributes "
365                       "devid %d or icid %d or eventid %d or pIntid %d or"
366                       "unmapped dte %d\n", __func__, devid, icid, eventid,
367                       pIntid, dte_valid);
368         /*
369          * in this implementation, in case of error
370          * we ignore this command and move onto the next
371          * command in the queue
372          */
373     } else {
374         /* add ite entry to interrupt translation table */
375         ite.itel = (dte_valid & TABLE_ENTRY_VALID_MASK) |
376                     (GITS_TYPE_PHYSICAL << ITE_ENTRY_INTTYPE_SHIFT);
377 
378         if (ignore_pInt) {
379             ite.itel |= (eventid << ITE_ENTRY_INTID_SHIFT);
380         } else {
381             ite.itel |= (pIntid << ITE_ENTRY_INTID_SHIFT);
382         }
383         ite.itel |= (int_spurious << ITE_ENTRY_INTSP_SHIFT);
384         ite.iteh = icid;
385 
386         result = update_ite(s, eventid, dte, ite);
387     }
388 
389     return result;
390 }
391 
392 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
393                        uint64_t rdbase)
394 {
395     AddressSpace *as = &s->gicv3->dma_as;
396     uint64_t value;
397     uint64_t l2t_addr;
398     bool valid_l2t;
399     uint32_t l2t_id;
400     uint32_t max_l2_entries;
401     uint64_t cte = 0;
402     MemTxResult res = MEMTX_OK;
403 
404     if (!s->ct.valid) {
405         return true;
406     }
407 
408     if (valid) {
409         /* add mapping entry to collection table */
410         cte = (valid & TABLE_ENTRY_VALID_MASK) | (rdbase << 1ULL);
411     }
412 
413     /*
414      * The specification defines the format of level 1 entries of a
415      * 2-level table, but the format of level 2 entries and the format
416      * of flat-mapped tables is IMPDEF.
417      */
418     if (s->ct.indirect) {
419         l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
420 
421         value = address_space_ldq_le(as,
422                                      s->ct.base_addr +
423                                      (l2t_id * L1TABLE_ENTRY_SIZE),
424                                      MEMTXATTRS_UNSPECIFIED, &res);
425 
426         if (res != MEMTX_OK) {
427             return false;
428         }
429 
430         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
431 
432         if (valid_l2t) {
433             max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
434 
435             l2t_addr = value & ((1ULL << 51) - 1);
436 
437             address_space_stq_le(as, l2t_addr +
438                                  ((icid % max_l2_entries) * GITS_CTE_SIZE),
439                                  cte, MEMTXATTRS_UNSPECIFIED, &res);
440         }
441     } else {
442         /* Flat level table */
443         address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
444                              cte, MEMTXATTRS_UNSPECIFIED, &res);
445     }
446     if (res != MEMTX_OK) {
447         return false;
448     } else {
449         return true;
450     }
451 }
452 
453 static bool process_mapc(GICv3ITSState *s, uint32_t offset)
454 {
455     AddressSpace *as = &s->gicv3->dma_as;
456     uint16_t icid;
457     uint64_t rdbase;
458     bool valid;
459     MemTxResult res = MEMTX_OK;
460     bool result = false;
461     uint64_t value;
462 
463     offset += NUM_BYTES_IN_DW;
464     offset += NUM_BYTES_IN_DW;
465 
466     value = address_space_ldq_le(as, s->cq.base_addr + offset,
467                                  MEMTXATTRS_UNSPECIFIED, &res);
468 
469     if (res != MEMTX_OK) {
470         return result;
471     }
472 
473     icid = value & ICID_MASK;
474 
475     rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
476     rdbase &= RDBASE_PROCNUM_MASK;
477 
478     valid = (value & CMD_FIELD_VALID_MASK);
479 
480     if ((icid > s->ct.maxids.max_collids) || (rdbase > s->gicv3->num_cpu)) {
481         qemu_log_mask(LOG_GUEST_ERROR,
482                       "ITS MAPC: invalid collection table attributes "
483                       "icid %d rdbase %" PRIu64 "\n",  icid, rdbase);
484         /*
485          * in this implementation, in case of error
486          * we ignore this command and move onto the next
487          * command in the queue
488          */
489     } else {
490         result = update_cte(s, icid, valid, rdbase);
491     }
492 
493     return result;
494 }
495 
496 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
497                        uint8_t size, uint64_t itt_addr)
498 {
499     AddressSpace *as = &s->gicv3->dma_as;
500     uint64_t value;
501     uint64_t l2t_addr;
502     bool valid_l2t;
503     uint32_t l2t_id;
504     uint32_t max_l2_entries;
505     uint64_t dte = 0;
506     MemTxResult res = MEMTX_OK;
507 
508     if (s->dt.valid) {
509         if (valid) {
510             /* add mapping entry to device table */
511             dte = (valid & TABLE_ENTRY_VALID_MASK) |
512                   ((size & SIZE_MASK) << 1U) |
513                   (itt_addr << GITS_DTE_ITTADDR_SHIFT);
514         }
515     } else {
516         return true;
517     }
518 
519     /*
520      * The specification defines the format of level 1 entries of a
521      * 2-level table, but the format of level 2 entries and the format
522      * of flat-mapped tables is IMPDEF.
523      */
524     if (s->dt.indirect) {
525         l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
526 
527         value = address_space_ldq_le(as,
528                                      s->dt.base_addr +
529                                      (l2t_id * L1TABLE_ENTRY_SIZE),
530                                      MEMTXATTRS_UNSPECIFIED, &res);
531 
532         if (res != MEMTX_OK) {
533             return false;
534         }
535 
536         valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
537 
538         if (valid_l2t) {
539             max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
540 
541             l2t_addr = value & ((1ULL << 51) - 1);
542 
543             address_space_stq_le(as, l2t_addr +
544                                  ((devid % max_l2_entries) * GITS_DTE_SIZE),
545                                  dte, MEMTXATTRS_UNSPECIFIED, &res);
546         }
547     } else {
548         /* Flat level table */
549         address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
550                              dte, MEMTXATTRS_UNSPECIFIED, &res);
551     }
552     if (res != MEMTX_OK) {
553         return false;
554     } else {
555         return true;
556     }
557 }
558 
559 static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
560 {
561     AddressSpace *as = &s->gicv3->dma_as;
562     uint32_t devid;
563     uint8_t size;
564     uint64_t itt_addr;
565     bool valid;
566     MemTxResult res = MEMTX_OK;
567     bool result = false;
568 
569     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
570 
571     offset += NUM_BYTES_IN_DW;
572     value = address_space_ldq_le(as, s->cq.base_addr + offset,
573                                  MEMTXATTRS_UNSPECIFIED, &res);
574 
575     if (res != MEMTX_OK) {
576         return result;
577     }
578 
579     size = (value & SIZE_MASK);
580 
581     offset += NUM_BYTES_IN_DW;
582     value = address_space_ldq_le(as, s->cq.base_addr + offset,
583                                  MEMTXATTRS_UNSPECIFIED, &res);
584 
585     if (res != MEMTX_OK) {
586         return result;
587     }
588 
589     itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
590 
591     valid = (value & CMD_FIELD_VALID_MASK);
592 
593     if ((devid > s->dt.maxids.max_devids) ||
594         (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
595         qemu_log_mask(LOG_GUEST_ERROR,
596                       "ITS MAPD: invalid device table attributes "
597                       "devid %d or size %d\n", devid, size);
598         /*
599          * in this implementation, in case of error
600          * we ignore this command and move onto the next
601          * command in the queue
602          */
603     } else {
604         result = update_dte(s, devid, valid, size, itt_addr);
605     }
606 
607     return result;
608 }
609 
610 /*
611  * Current implementation blocks until all
612  * commands are processed
613  */
614 static void process_cmdq(GICv3ITSState *s)
615 {
616     uint32_t wr_offset = 0;
617     uint32_t rd_offset = 0;
618     uint32_t cq_offset = 0;
619     uint64_t data;
620     AddressSpace *as = &s->gicv3->dma_as;
621     MemTxResult res = MEMTX_OK;
622     bool result = true;
623     uint8_t cmd;
624 
625     if (!(s->ctlr & ITS_CTLR_ENABLED)) {
626         return;
627     }
628 
629     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
630 
631     if (wr_offset > s->cq.max_entries) {
632         qemu_log_mask(LOG_GUEST_ERROR,
633                       "%s: invalid write offset "
634                       "%d\n", __func__, wr_offset);
635         return;
636     }
637 
638     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
639 
640     if (rd_offset > s->cq.max_entries) {
641         qemu_log_mask(LOG_GUEST_ERROR,
642                       "%s: invalid read offset "
643                       "%d\n", __func__, rd_offset);
644         return;
645     }
646 
647     while (wr_offset != rd_offset) {
648         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
649         data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
650                                     MEMTXATTRS_UNSPECIFIED, &res);
651         if (res != MEMTX_OK) {
652             result = false;
653         }
654         cmd = (data & CMD_MASK);
655 
656         switch (cmd) {
657         case GITS_CMD_INT:
658             res = process_its_cmd(s, data, cq_offset, INTERRUPT);
659             break;
660         case GITS_CMD_CLEAR:
661             res = process_its_cmd(s, data, cq_offset, CLEAR);
662             break;
663         case GITS_CMD_SYNC:
664             /*
665              * Current implementation makes a blocking synchronous call
666              * for every command issued earlier, hence the internal state
667              * is already consistent by the time SYNC command is executed.
668              * Hence no further processing is required for SYNC command.
669              */
670             break;
671         case GITS_CMD_MAPD:
672             result = process_mapd(s, data, cq_offset);
673             break;
674         case GITS_CMD_MAPC:
675             result = process_mapc(s, cq_offset);
676             break;
677         case GITS_CMD_MAPTI:
678             result = process_mapti(s, data, cq_offset, false);
679             break;
680         case GITS_CMD_MAPI:
681             result = process_mapti(s, data, cq_offset, true);
682             break;
683         case GITS_CMD_DISCARD:
684             result = process_its_cmd(s, data, cq_offset, DISCARD);
685             break;
686         case GITS_CMD_INV:
687         case GITS_CMD_INVALL:
688             break;
689         default:
690             break;
691         }
692         if (result) {
693             rd_offset++;
694             rd_offset %= s->cq.max_entries;
695             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
696         } else {
697             /*
698              * in this implementation, in case of dma read/write error
699              * we stall the command processing
700              */
701             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
702             qemu_log_mask(LOG_GUEST_ERROR,
703                           "%s: %x cmd processing failed\n", __func__, cmd);
704             break;
705         }
706     }
707 }
708 
709 /*
710  * This function extracts the ITS Device and Collection table specific
711  * parameters (like base_addr, size etc) from GITS_BASER register.
712  * It is called during ITS enable and also during post_load migration
713  */
714 static void extract_table_params(GICv3ITSState *s)
715 {
716     uint16_t num_pages = 0;
717     uint8_t  page_sz_type;
718     uint8_t type;
719     uint32_t page_sz = 0;
720     uint64_t value;
721 
722     for (int i = 0; i < 8; i++) {
723         value = s->baser[i];
724 
725         if (!value) {
726             continue;
727         }
728 
729         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
730 
731         switch (page_sz_type) {
732         case 0:
733             page_sz = GITS_PAGE_SIZE_4K;
734             break;
735 
736         case 1:
737             page_sz = GITS_PAGE_SIZE_16K;
738             break;
739 
740         case 2:
741         case 3:
742             page_sz = GITS_PAGE_SIZE_64K;
743             break;
744 
745         default:
746             g_assert_not_reached();
747         }
748 
749         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
750 
751         type = FIELD_EX64(value, GITS_BASER, TYPE);
752 
753         switch (type) {
754 
755         case GITS_BASER_TYPE_DEVICE:
756             memset(&s->dt, 0 , sizeof(s->dt));
757             s->dt.valid = FIELD_EX64(value, GITS_BASER, VALID);
758 
759             if (!s->dt.valid) {
760                 return;
761             }
762 
763             s->dt.page_sz = page_sz;
764             s->dt.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
765             s->dt.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
766 
767             if (!s->dt.indirect) {
768                 s->dt.max_entries = (num_pages * page_sz) / s->dt.entry_sz;
769             } else {
770                 s->dt.max_entries = (((num_pages * page_sz) /
771                                      L1TABLE_ENTRY_SIZE) *
772                                      (page_sz / s->dt.entry_sz));
773             }
774 
775             s->dt.maxids.max_devids = (1UL << (FIELD_EX64(s->typer, GITS_TYPER,
776                                        DEVBITS) + 1));
777 
778             s->dt.base_addr = baser_base_addr(value, page_sz);
779 
780             break;
781 
782         case GITS_BASER_TYPE_COLLECTION:
783             memset(&s->ct, 0 , sizeof(s->ct));
784             s->ct.valid = FIELD_EX64(value, GITS_BASER, VALID);
785 
786             /*
787              * GITS_TYPER.HCC is 0 for this implementation
788              * hence writes are discarded if ct.valid is 0
789              */
790             if (!s->ct.valid) {
791                 return;
792             }
793 
794             s->ct.page_sz = page_sz;
795             s->ct.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
796             s->ct.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
797 
798             if (!s->ct.indirect) {
799                 s->ct.max_entries = (num_pages * page_sz) / s->ct.entry_sz;
800             } else {
801                 s->ct.max_entries = (((num_pages * page_sz) /
802                                      L1TABLE_ENTRY_SIZE) *
803                                      (page_sz / s->ct.entry_sz));
804             }
805 
806             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
807                 s->ct.maxids.max_collids = (1UL << (FIELD_EX64(s->typer,
808                                             GITS_TYPER, CIDBITS) + 1));
809             } else {
810                 /* 16-bit CollectionId supported when CIL == 0 */
811                 s->ct.maxids.max_collids = (1UL << 16);
812             }
813 
814             s->ct.base_addr = baser_base_addr(value, page_sz);
815 
816             break;
817 
818         default:
819             break;
820         }
821     }
822 }
823 
824 static void extract_cmdq_params(GICv3ITSState *s)
825 {
826     uint16_t num_pages = 0;
827     uint64_t value = s->cbaser;
828 
829     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
830 
831     memset(&s->cq, 0 , sizeof(s->cq));
832     s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
833 
834     if (s->cq.valid) {
835         s->cq.max_entries = (num_pages * GITS_PAGE_SIZE_4K) /
836                              GITS_CMDQ_ENTRY_SIZE;
837         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
838         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
839     }
840 }
841 
842 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
843                                                uint64_t data, unsigned size,
844                                                MemTxAttrs attrs)
845 {
846     GICv3ITSState *s = (GICv3ITSState *)opaque;
847     bool result = true;
848     uint32_t devid = 0;
849 
850     switch (offset) {
851     case GITS_TRANSLATER:
852         if (s->ctlr & ITS_CTLR_ENABLED) {
853             devid = attrs.requester_id;
854             result = process_its_cmd(s, data, devid, NONE);
855         }
856         break;
857     default:
858         break;
859     }
860 
861     if (result) {
862         return MEMTX_OK;
863     } else {
864         return MEMTX_ERROR;
865     }
866 }
867 
868 static bool its_writel(GICv3ITSState *s, hwaddr offset,
869                               uint64_t value, MemTxAttrs attrs)
870 {
871     bool result = true;
872     int index;
873 
874     switch (offset) {
875     case GITS_CTLR:
876         s->ctlr |= (value & ~(s->ctlr));
877 
878         if (s->ctlr & ITS_CTLR_ENABLED) {
879             extract_table_params(s);
880             extract_cmdq_params(s);
881             s->creadr = 0;
882             process_cmdq(s);
883         }
884         break;
885     case GITS_CBASER:
886         /*
887          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
888          *                 already enabled
889          */
890         if (!(s->ctlr & ITS_CTLR_ENABLED)) {
891             s->cbaser = deposit64(s->cbaser, 0, 32, value);
892             s->creadr = 0;
893             s->cwriter = s->creadr;
894         }
895         break;
896     case GITS_CBASER + 4:
897         /*
898          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
899          *                 already enabled
900          */
901         if (!(s->ctlr & ITS_CTLR_ENABLED)) {
902             s->cbaser = deposit64(s->cbaser, 32, 32, value);
903             s->creadr = 0;
904             s->cwriter = s->creadr;
905         }
906         break;
907     case GITS_CWRITER:
908         s->cwriter = deposit64(s->cwriter, 0, 32,
909                                (value & ~R_GITS_CWRITER_RETRY_MASK));
910         if (s->cwriter != s->creadr) {
911             process_cmdq(s);
912         }
913         break;
914     case GITS_CWRITER + 4:
915         s->cwriter = deposit64(s->cwriter, 32, 32, value);
916         break;
917     case GITS_CREADR:
918         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
919             s->creadr = deposit64(s->creadr, 0, 32,
920                                   (value & ~R_GITS_CREADR_STALLED_MASK));
921         } else {
922             /* RO register, ignore the write */
923             qemu_log_mask(LOG_GUEST_ERROR,
924                           "%s: invalid guest write to RO register at offset "
925                           TARGET_FMT_plx "\n", __func__, offset);
926         }
927         break;
928     case GITS_CREADR + 4:
929         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
930             s->creadr = deposit64(s->creadr, 32, 32, value);
931         } else {
932             /* RO register, ignore the write */
933             qemu_log_mask(LOG_GUEST_ERROR,
934                           "%s: invalid guest write to RO register at offset "
935                           TARGET_FMT_plx "\n", __func__, offset);
936         }
937         break;
938     case GITS_BASER ... GITS_BASER + 0x3f:
939         /*
940          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
941          *                 already enabled
942          */
943         if (!(s->ctlr & ITS_CTLR_ENABLED)) {
944             index = (offset - GITS_BASER) / 8;
945 
946             if (offset & 7) {
947                 value <<= 32;
948                 value &= ~GITS_BASER_RO_MASK;
949                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
950                 s->baser[index] |= value;
951             } else {
952                 value &= ~GITS_BASER_RO_MASK;
953                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
954                 s->baser[index] |= value;
955             }
956         }
957         break;
958     case GITS_IIDR:
959     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
960         /* RO registers, ignore the write */
961         qemu_log_mask(LOG_GUEST_ERROR,
962                       "%s: invalid guest write to RO register at offset "
963                       TARGET_FMT_plx "\n", __func__, offset);
964         break;
965     default:
966         result = false;
967         break;
968     }
969     return result;
970 }
971 
972 static bool its_readl(GICv3ITSState *s, hwaddr offset,
973                              uint64_t *data, MemTxAttrs attrs)
974 {
975     bool result = true;
976     int index;
977 
978     switch (offset) {
979     case GITS_CTLR:
980         *data = s->ctlr;
981         break;
982     case GITS_IIDR:
983         *data = gicv3_iidr();
984         break;
985     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
986         /* ID registers */
987         *data = gicv3_idreg(offset - GITS_IDREGS);
988         break;
989     case GITS_TYPER:
990         *data = extract64(s->typer, 0, 32);
991         break;
992     case GITS_TYPER + 4:
993         *data = extract64(s->typer, 32, 32);
994         break;
995     case GITS_CBASER:
996         *data = extract64(s->cbaser, 0, 32);
997         break;
998     case GITS_CBASER + 4:
999         *data = extract64(s->cbaser, 32, 32);
1000         break;
1001     case GITS_CREADR:
1002         *data = extract64(s->creadr, 0, 32);
1003         break;
1004     case GITS_CREADR + 4:
1005         *data = extract64(s->creadr, 32, 32);
1006         break;
1007     case GITS_CWRITER:
1008         *data = extract64(s->cwriter, 0, 32);
1009         break;
1010     case GITS_CWRITER + 4:
1011         *data = extract64(s->cwriter, 32, 32);
1012         break;
1013     case GITS_BASER ... GITS_BASER + 0x3f:
1014         index = (offset - GITS_BASER) / 8;
1015         if (offset & 7) {
1016             *data = extract64(s->baser[index], 32, 32);
1017         } else {
1018             *data = extract64(s->baser[index], 0, 32);
1019         }
1020         break;
1021     default:
1022         result = false;
1023         break;
1024     }
1025     return result;
1026 }
1027 
1028 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1029                                uint64_t value, MemTxAttrs attrs)
1030 {
1031     bool result = true;
1032     int index;
1033 
1034     switch (offset) {
1035     case GITS_BASER ... GITS_BASER + 0x3f:
1036         /*
1037          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1038          *                 already enabled
1039          */
1040         if (!(s->ctlr & ITS_CTLR_ENABLED)) {
1041             index = (offset - GITS_BASER) / 8;
1042             s->baser[index] &= GITS_BASER_RO_MASK;
1043             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1044         }
1045         break;
1046     case GITS_CBASER:
1047         /*
1048          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1049          *                 already enabled
1050          */
1051         if (!(s->ctlr & ITS_CTLR_ENABLED)) {
1052             s->cbaser = value;
1053             s->creadr = 0;
1054             s->cwriter = s->creadr;
1055         }
1056         break;
1057     case GITS_CWRITER:
1058         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1059         if (s->cwriter != s->creadr) {
1060             process_cmdq(s);
1061         }
1062         break;
1063     case GITS_CREADR:
1064         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1065             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1066         } else {
1067             /* RO register, ignore the write */
1068             qemu_log_mask(LOG_GUEST_ERROR,
1069                           "%s: invalid guest write to RO register at offset "
1070                           TARGET_FMT_plx "\n", __func__, offset);
1071         }
1072         break;
1073     case GITS_TYPER:
1074         /* RO registers, ignore the write */
1075         qemu_log_mask(LOG_GUEST_ERROR,
1076                       "%s: invalid guest write to RO register at offset "
1077                       TARGET_FMT_plx "\n", __func__, offset);
1078         break;
1079     default:
1080         result = false;
1081         break;
1082     }
1083     return result;
1084 }
1085 
1086 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1087                               uint64_t *data, MemTxAttrs attrs)
1088 {
1089     bool result = true;
1090     int index;
1091 
1092     switch (offset) {
1093     case GITS_TYPER:
1094         *data = s->typer;
1095         break;
1096     case GITS_BASER ... GITS_BASER + 0x3f:
1097         index = (offset - GITS_BASER) / 8;
1098         *data = s->baser[index];
1099         break;
1100     case GITS_CBASER:
1101         *data = s->cbaser;
1102         break;
1103     case GITS_CREADR:
1104         *data = s->creadr;
1105         break;
1106     case GITS_CWRITER:
1107         *data = s->cwriter;
1108         break;
1109     default:
1110         result = false;
1111         break;
1112     }
1113     return result;
1114 }
1115 
1116 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1117                                   unsigned size, MemTxAttrs attrs)
1118 {
1119     GICv3ITSState *s = (GICv3ITSState *)opaque;
1120     bool result;
1121 
1122     switch (size) {
1123     case 4:
1124         result = its_readl(s, offset, data, attrs);
1125         break;
1126     case 8:
1127         result = its_readll(s, offset, data, attrs);
1128         break;
1129     default:
1130         result = false;
1131         break;
1132     }
1133 
1134     if (!result) {
1135         qemu_log_mask(LOG_GUEST_ERROR,
1136                       "%s: invalid guest read at offset " TARGET_FMT_plx
1137                       "size %u\n", __func__, offset, size);
1138         /*
1139          * The spec requires that reserved registers are RAZ/WI;
1140          * so use false returns from leaf functions as a way to
1141          * trigger the guest-error logging but don't return it to
1142          * the caller, or we'll cause a spurious guest data abort.
1143          */
1144         *data = 0;
1145     }
1146     return MEMTX_OK;
1147 }
1148 
1149 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1150                                    unsigned size, MemTxAttrs attrs)
1151 {
1152     GICv3ITSState *s = (GICv3ITSState *)opaque;
1153     bool result;
1154 
1155     switch (size) {
1156     case 4:
1157         result = its_writel(s, offset, data, attrs);
1158         break;
1159     case 8:
1160         result = its_writell(s, offset, data, attrs);
1161         break;
1162     default:
1163         result = false;
1164         break;
1165     }
1166 
1167     if (!result) {
1168         qemu_log_mask(LOG_GUEST_ERROR,
1169                       "%s: invalid guest write at offset " TARGET_FMT_plx
1170                       "size %u\n", __func__, offset, size);
1171         /*
1172          * The spec requires that reserved registers are RAZ/WI;
1173          * so use false returns from leaf functions as a way to
1174          * trigger the guest-error logging but don't return it to
1175          * the caller, or we'll cause a spurious guest data abort.
1176          */
1177     }
1178     return MEMTX_OK;
1179 }
1180 
1181 static const MemoryRegionOps gicv3_its_control_ops = {
1182     .read_with_attrs = gicv3_its_read,
1183     .write_with_attrs = gicv3_its_write,
1184     .valid.min_access_size = 4,
1185     .valid.max_access_size = 8,
1186     .impl.min_access_size = 4,
1187     .impl.max_access_size = 8,
1188     .endianness = DEVICE_NATIVE_ENDIAN,
1189 };
1190 
1191 static const MemoryRegionOps gicv3_its_translation_ops = {
1192     .write_with_attrs = gicv3_its_translation_write,
1193     .valid.min_access_size = 2,
1194     .valid.max_access_size = 4,
1195     .impl.min_access_size = 2,
1196     .impl.max_access_size = 4,
1197     .endianness = DEVICE_NATIVE_ENDIAN,
1198 };
1199 
1200 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1201 {
1202     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1203     int i;
1204 
1205     for (i = 0; i < s->gicv3->num_cpu; i++) {
1206         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1207             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1208             return;
1209         }
1210     }
1211 
1212     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1213 
1214     address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
1215                        "gicv3-its-sysmem");
1216 
1217     /* set the ITS default features supported */
1218     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL,
1219                           GITS_TYPE_PHYSICAL);
1220     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1221                           ITS_ITT_ENTRY_SIZE - 1);
1222     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1223     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1224     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1225     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1226 }
1227 
1228 static void gicv3_its_reset(DeviceState *dev)
1229 {
1230     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1231     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1232 
1233     c->parent_reset(dev);
1234 
1235     /* Quiescent bit reset to 1 */
1236     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1237 
1238     /*
1239      * setting GITS_BASER0.Type = 0b001 (Device)
1240      *         GITS_BASER1.Type = 0b100 (Collection Table)
1241      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1242      *         GITS_BASER<0,1>.Page_Size = 64KB
1243      * and default translation table entry size to 16 bytes
1244      */
1245     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1246                              GITS_BASER_TYPE_DEVICE);
1247     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1248                              GITS_BASER_PAGESIZE_64K);
1249     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1250                              GITS_DTE_SIZE - 1);
1251 
1252     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1253                              GITS_BASER_TYPE_COLLECTION);
1254     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1255                              GITS_BASER_PAGESIZE_64K);
1256     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1257                              GITS_CTE_SIZE - 1);
1258 }
1259 
1260 static void gicv3_its_post_load(GICv3ITSState *s)
1261 {
1262     if (s->ctlr & ITS_CTLR_ENABLED) {
1263         extract_table_params(s);
1264         extract_cmdq_params(s);
1265     }
1266 }
1267 
1268 static Property gicv3_its_props[] = {
1269     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1270                      GICv3State *),
1271     DEFINE_PROP_END_OF_LIST(),
1272 };
1273 
1274 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1275 {
1276     DeviceClass *dc = DEVICE_CLASS(klass);
1277     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1278     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1279 
1280     dc->realize = gicv3_arm_its_realize;
1281     device_class_set_props(dc, gicv3_its_props);
1282     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1283     icc->post_load = gicv3_its_post_load;
1284 }
1285 
1286 static const TypeInfo gicv3_its_info = {
1287     .name = TYPE_ARM_GICV3_ITS,
1288     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1289     .instance_size = sizeof(GICv3ITSState),
1290     .class_init = gicv3_its_class_init,
1291     .class_size = sizeof(GICv3ITSClass),
1292 };
1293 
1294 static void gicv3_its_register_types(void)
1295 {
1296     type_register_static(&gicv3_its_info);
1297 }
1298 
1299 type_init(gicv3_its_register_types)
1300