xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision b13148d91805143aba3e0b4441e760b9bea03b8c)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/intc/arm_gicv3_its_common.h"
18 #include "gicv3_internal.h"
19 #include "qom/object.h"
20 #include "qapi/error.h"
21 
22 typedef struct GICv3ITSClass GICv3ITSClass;
23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
25                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
26 
27 struct GICv3ITSClass {
28     GICv3ITSCommonClass parent_class;
29     void (*parent_reset)(DeviceState *dev);
30 };
31 
32 /*
33  * This is an internal enum used to distinguish between LPI triggered
34  * via command queue and LPI triggered via gits_translater write.
35  */
36 typedef enum ItsCmdType {
37     NONE = 0, /* internal indication for GITS_TRANSLATER write */
38     CLEAR = 1,
39     DISCARD = 2,
40     INTERRUPT = 3,
41 } ItsCmdType;
42 
43 typedef struct {
44     uint32_t iteh;
45     uint64_t itel;
46 } IteEntry;
47 
48 /*
49  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
50  * if a command parameter is not correct. These include both "stall
51  * processing of the command queue" and "ignore this command, and
52  * keep processing the queue". In our implementation we choose that
53  * memory transaction errors reading the command packet provoke a
54  * stall, but errors in parameters cause us to ignore the command
55  * and continue processing.
56  * The process_* functions which handle individual ITS commands all
57  * return an ItsCmdResult which tells process_cmdq() whether it should
58  * stall or keep going.
59  */
60 typedef enum ItsCmdResult {
61     CMD_STALL = 0,
62     CMD_CONTINUE = 1,
63 } ItsCmdResult;
64 
65 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
66 {
67     uint64_t result = 0;
68 
69     switch (page_sz) {
70     case GITS_PAGE_SIZE_4K:
71     case GITS_PAGE_SIZE_16K:
72         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
73         break;
74 
75     case GITS_PAGE_SIZE_64K:
76         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
77         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
78         break;
79 
80     default:
81         break;
82     }
83     return result;
84 }
85 
86 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
87                                  uint32_t idx, MemTxResult *res)
88 {
89     /*
90      * Given a TableDesc describing one of the ITS in-guest-memory
91      * tables and an index into it, return the guest address
92      * corresponding to that table entry.
93      * If there was a memory error reading the L1 table of an
94      * indirect table, *res is set accordingly, and we return -1.
95      * If the L1 table entry is marked not valid, we return -1 with
96      * *res set to MEMTX_OK.
97      *
98      * The specification defines the format of level 1 entries of a
99      * 2-level table, but the format of level 2 entries and the format
100      * of flat-mapped tables is IMPDEF.
101      */
102     AddressSpace *as = &s->gicv3->dma_as;
103     uint32_t l2idx;
104     uint64_t l2;
105     uint32_t num_l2_entries;
106 
107     *res = MEMTX_OK;
108 
109     if (!td->indirect) {
110         /* Single level table */
111         return td->base_addr + idx * td->entry_sz;
112     }
113 
114     /* Two level table */
115     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
116 
117     l2 = address_space_ldq_le(as,
118                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
119                               MEMTXATTRS_UNSPECIFIED, res);
120     if (*res != MEMTX_OK) {
121         return -1;
122     }
123     if (!(l2 & L2_TABLE_VALID_MASK)) {
124         return -1;
125     }
126 
127     num_l2_entries = td->page_sz / td->entry_sz;
128     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
129 }
130 
131 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
132                     MemTxResult *res)
133 {
134     AddressSpace *as = &s->gicv3->dma_as;
135     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, res);
136 
137     if (entry_addr == -1) {
138         return false; /* not valid */
139     }
140 
141     *cte = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res);
142     return FIELD_EX64(*cte, CTE, VALID);
143 }
144 
145 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
146                        IteEntry ite)
147 {
148     AddressSpace *as = &s->gicv3->dma_as;
149     uint64_t itt_addr;
150     MemTxResult res = MEMTX_OK;
151 
152     itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
153     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
154 
155     address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
156                          sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
157                          &res);
158 
159     if (res == MEMTX_OK) {
160         address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
161                              sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
162                              MEMTXATTRS_UNSPECIFIED, &res);
163     }
164     if (res != MEMTX_OK) {
165         return false;
166     } else {
167         return true;
168     }
169 }
170 
171 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
172                     uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
173 {
174     AddressSpace *as = &s->gicv3->dma_as;
175     uint64_t itt_addr;
176     bool status = false;
177     IteEntry ite = {};
178 
179     itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
180     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
181 
182     ite.itel = address_space_ldq_le(as, itt_addr +
183                                     (eventid * (sizeof(uint64_t) +
184                                     sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
185                                     res);
186 
187     if (*res == MEMTX_OK) {
188         ite.iteh = address_space_ldl_le(as, itt_addr +
189                                         (eventid * (sizeof(uint64_t) +
190                                         sizeof(uint32_t))) + sizeof(uint32_t),
191                                         MEMTXATTRS_UNSPECIFIED, res);
192 
193         if (*res == MEMTX_OK) {
194             if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
195                 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
196                 if (inttype == ITE_INTTYPE_PHYSICAL) {
197                     *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
198                     *icid = FIELD_EX32(ite.iteh, ITE_H, ICID);
199                     status = true;
200                 }
201             }
202         }
203     }
204     return status;
205 }
206 
207 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
208 {
209     AddressSpace *as = &s->gicv3->dma_as;
210     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, res);
211 
212     if (entry_addr == -1) {
213         return 0; /* a DTE entry with the Valid bit clear */
214     }
215     return address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res);
216 }
217 
218 /*
219  * This function handles the processing of following commands based on
220  * the ItsCmdType parameter passed:-
221  * 1. triggering of lpi interrupt translation via ITS INT command
222  * 2. triggering of lpi interrupt translation via gits_translater register
223  * 3. handling of ITS CLEAR command
224  * 4. handling of ITS DISCARD command
225  */
226 static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value,
227                                     uint32_t offset, ItsCmdType cmd)
228 {
229     AddressSpace *as = &s->gicv3->dma_as;
230     uint32_t devid, eventid;
231     MemTxResult res = MEMTX_OK;
232     bool dte_valid;
233     uint64_t dte = 0;
234     uint64_t num_eventids;
235     uint16_t icid = 0;
236     uint32_t pIntid = 0;
237     bool ite_valid = false;
238     uint64_t cte = 0;
239     bool cte_valid = false;
240     uint64_t rdbase;
241 
242     if (cmd == NONE) {
243         devid = offset;
244     } else {
245         devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
246 
247         offset += NUM_BYTES_IN_DW;
248         value = address_space_ldq_le(as, s->cq.base_addr + offset,
249                                      MEMTXATTRS_UNSPECIFIED, &res);
250     }
251 
252     if (res != MEMTX_OK) {
253         return CMD_STALL;
254     }
255 
256     eventid = (value & EVENTID_MASK);
257 
258     if (devid >= s->dt.num_ids) {
259         qemu_log_mask(LOG_GUEST_ERROR,
260                       "%s: invalid command attributes: devid %d>=%d",
261                       __func__, devid, s->dt.num_ids);
262         return CMD_CONTINUE;
263     }
264 
265     dte = get_dte(s, devid, &res);
266 
267     if (res != MEMTX_OK) {
268         return CMD_STALL;
269     }
270     dte_valid = FIELD_EX64(dte, DTE, VALID);
271 
272     if (!dte_valid) {
273         qemu_log_mask(LOG_GUEST_ERROR,
274                       "%s: invalid command attributes: "
275                       "invalid dte: %"PRIx64" for %d\n",
276                       __func__, dte, devid);
277         return CMD_CONTINUE;
278     }
279 
280     num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
281 
282     if (eventid >= num_eventids) {
283         qemu_log_mask(LOG_GUEST_ERROR,
284                       "%s: invalid command attributes: eventid %d >= %"
285                       PRId64 "\n",
286                       __func__, eventid, num_eventids);
287         return CMD_CONTINUE;
288     }
289 
290     ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
291     if (res != MEMTX_OK) {
292         return CMD_STALL;
293     }
294 
295     if (!ite_valid) {
296         qemu_log_mask(LOG_GUEST_ERROR,
297                       "%s: invalid command attributes: invalid ITE\n",
298                       __func__);
299         return CMD_CONTINUE;
300     }
301 
302     cte_valid = get_cte(s, icid, &cte, &res);
303     if (res != MEMTX_OK) {
304         return CMD_STALL;
305     }
306     if (!cte_valid) {
307         qemu_log_mask(LOG_GUEST_ERROR,
308                       "%s: invalid command attributes: "
309                       "invalid cte: %"PRIx64"\n",
310                       __func__, cte);
311         return CMD_CONTINUE;
312     }
313 
314     /*
315      * Current implementation only supports rdbase == procnum
316      * Hence rdbase physical address is ignored
317      */
318     rdbase = FIELD_EX64(cte, CTE, RDBASE);
319 
320     if (rdbase >= s->gicv3->num_cpu) {
321         return CMD_CONTINUE;
322     }
323 
324     if ((cmd == CLEAR) || (cmd == DISCARD)) {
325         gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
326     } else {
327         gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
328     }
329 
330     if (cmd == DISCARD) {
331         IteEntry ite = {};
332         /* remove mapping from interrupt translation table */
333         return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
334     }
335     return CMD_CONTINUE;
336 }
337 
338 static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value,
339                                   uint32_t offset, bool ignore_pInt)
340 {
341     AddressSpace *as = &s->gicv3->dma_as;
342     uint32_t devid, eventid;
343     uint32_t pIntid = 0;
344     uint64_t num_eventids;
345     uint32_t num_intids;
346     bool dte_valid;
347     MemTxResult res = MEMTX_OK;
348     uint16_t icid = 0;
349     uint64_t dte = 0;
350     IteEntry ite = {};
351 
352     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
353     offset += NUM_BYTES_IN_DW;
354     value = address_space_ldq_le(as, s->cq.base_addr + offset,
355                                  MEMTXATTRS_UNSPECIFIED, &res);
356 
357     if (res != MEMTX_OK) {
358         return CMD_STALL;
359     }
360 
361     eventid = (value & EVENTID_MASK);
362 
363     if (ignore_pInt) {
364         pIntid = eventid;
365     } else {
366         pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
367     }
368 
369     offset += NUM_BYTES_IN_DW;
370     value = address_space_ldq_le(as, s->cq.base_addr + offset,
371                                  MEMTXATTRS_UNSPECIFIED, &res);
372 
373     if (res != MEMTX_OK) {
374         return CMD_STALL;
375     }
376 
377     icid = value & ICID_MASK;
378 
379     if (devid >= s->dt.num_ids) {
380         qemu_log_mask(LOG_GUEST_ERROR,
381                       "%s: invalid command attributes: devid %d>=%d",
382                       __func__, devid, s->dt.num_ids);
383         return CMD_CONTINUE;
384     }
385 
386     dte = get_dte(s, devid, &res);
387 
388     if (res != MEMTX_OK) {
389         return CMD_STALL;
390     }
391     dte_valid = FIELD_EX64(dte, DTE, VALID);
392     num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
393     num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
394 
395     if ((icid >= s->ct.num_ids)
396             || !dte_valid || (eventid >= num_eventids) ||
397             (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
398              (pIntid != INTID_SPURIOUS))) {
399         qemu_log_mask(LOG_GUEST_ERROR,
400                       "%s: invalid command attributes "
401                       "icid %d or eventid %d or pIntid %d or"
402                       "unmapped dte %d\n", __func__, icid, eventid,
403                       pIntid, dte_valid);
404         /*
405          * in this implementation, in case of error
406          * we ignore this command and move onto the next
407          * command in the queue
408          */
409         return CMD_CONTINUE;
410     }
411 
412     /* add ite entry to interrupt translation table */
413     ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
414     ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
415     ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
416     ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
417     ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
418 
419     return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
420 }
421 
422 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
423                        uint64_t rdbase)
424 {
425     AddressSpace *as = &s->gicv3->dma_as;
426     uint64_t entry_addr;
427     uint64_t cte = 0;
428     MemTxResult res = MEMTX_OK;
429 
430     if (!s->ct.valid) {
431         return true;
432     }
433 
434     if (valid) {
435         /* add mapping entry to collection table */
436         cte = FIELD_DP64(cte, CTE, VALID, 1);
437         cte = FIELD_DP64(cte, CTE, RDBASE, rdbase);
438     }
439 
440     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
441     if (res != MEMTX_OK) {
442         /* memory access error: stall */
443         return false;
444     }
445     if (entry_addr == -1) {
446         /* No L2 table for this index: discard write and continue */
447         return true;
448     }
449 
450     address_space_stq_le(as, entry_addr, cte, MEMTXATTRS_UNSPECIFIED, &res);
451     return res == MEMTX_OK;
452 }
453 
454 static ItsCmdResult process_mapc(GICv3ITSState *s, uint32_t offset)
455 {
456     AddressSpace *as = &s->gicv3->dma_as;
457     uint16_t icid;
458     uint64_t rdbase;
459     bool valid;
460     MemTxResult res = MEMTX_OK;
461     uint64_t value;
462 
463     offset += NUM_BYTES_IN_DW;
464     offset += NUM_BYTES_IN_DW;
465 
466     value = address_space_ldq_le(as, s->cq.base_addr + offset,
467                                  MEMTXATTRS_UNSPECIFIED, &res);
468 
469     if (res != MEMTX_OK) {
470         return CMD_STALL;
471     }
472 
473     icid = value & ICID_MASK;
474 
475     rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
476     rdbase &= RDBASE_PROCNUM_MASK;
477 
478     valid = (value & CMD_FIELD_VALID_MASK);
479 
480     if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) {
481         qemu_log_mask(LOG_GUEST_ERROR,
482                       "ITS MAPC: invalid collection table attributes "
483                       "icid %d rdbase %" PRIu64 "\n",  icid, rdbase);
484         /*
485          * in this implementation, in case of error
486          * we ignore this command and move onto the next
487          * command in the queue
488          */
489         return CMD_CONTINUE;
490     }
491 
492     return update_cte(s, icid, valid, rdbase) ? CMD_CONTINUE : CMD_STALL;
493 }
494 
495 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
496                        uint8_t size, uint64_t itt_addr)
497 {
498     AddressSpace *as = &s->gicv3->dma_as;
499     uint64_t entry_addr;
500     uint64_t dte = 0;
501     MemTxResult res = MEMTX_OK;
502 
503     if (s->dt.valid) {
504         if (valid) {
505             /* add mapping entry to device table */
506             dte = FIELD_DP64(dte, DTE, VALID, 1);
507             dte = FIELD_DP64(dte, DTE, SIZE, size);
508             dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr);
509         }
510     } else {
511         return true;
512     }
513 
514     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
515     if (res != MEMTX_OK) {
516         /* memory access error: stall */
517         return false;
518     }
519     if (entry_addr == -1) {
520         /* No L2 table for this index: discard write and continue */
521         return true;
522     }
523     address_space_stq_le(as, entry_addr, dte, MEMTXATTRS_UNSPECIFIED, &res);
524     return res == MEMTX_OK;
525 }
526 
527 static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value,
528                                  uint32_t offset)
529 {
530     AddressSpace *as = &s->gicv3->dma_as;
531     uint32_t devid;
532     uint8_t size;
533     uint64_t itt_addr;
534     bool valid;
535     MemTxResult res = MEMTX_OK;
536 
537     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
538 
539     offset += NUM_BYTES_IN_DW;
540     value = address_space_ldq_le(as, s->cq.base_addr + offset,
541                                  MEMTXATTRS_UNSPECIFIED, &res);
542 
543     if (res != MEMTX_OK) {
544         return CMD_STALL;
545     }
546 
547     size = (value & SIZE_MASK);
548 
549     offset += NUM_BYTES_IN_DW;
550     value = address_space_ldq_le(as, s->cq.base_addr + offset,
551                                  MEMTXATTRS_UNSPECIFIED, &res);
552 
553     if (res != MEMTX_OK) {
554         return CMD_STALL;
555     }
556 
557     itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
558 
559     valid = (value & CMD_FIELD_VALID_MASK);
560 
561     if ((devid >= s->dt.num_ids) ||
562         (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
563         qemu_log_mask(LOG_GUEST_ERROR,
564                       "ITS MAPD: invalid device table attributes "
565                       "devid %d or size %d\n", devid, size);
566         /*
567          * in this implementation, in case of error
568          * we ignore this command and move onto the next
569          * command in the queue
570          */
571         return CMD_CONTINUE;
572     }
573 
574     return update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL;
575 }
576 
577 /*
578  * Current implementation blocks until all
579  * commands are processed
580  */
581 static void process_cmdq(GICv3ITSState *s)
582 {
583     uint32_t wr_offset = 0;
584     uint32_t rd_offset = 0;
585     uint32_t cq_offset = 0;
586     uint64_t data;
587     AddressSpace *as = &s->gicv3->dma_as;
588     MemTxResult res = MEMTX_OK;
589     uint8_t cmd;
590     int i;
591 
592     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
593         return;
594     }
595 
596     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
597 
598     if (wr_offset >= s->cq.num_entries) {
599         qemu_log_mask(LOG_GUEST_ERROR,
600                       "%s: invalid write offset "
601                       "%d\n", __func__, wr_offset);
602         return;
603     }
604 
605     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
606 
607     if (rd_offset >= s->cq.num_entries) {
608         qemu_log_mask(LOG_GUEST_ERROR,
609                       "%s: invalid read offset "
610                       "%d\n", __func__, rd_offset);
611         return;
612     }
613 
614     while (wr_offset != rd_offset) {
615         ItsCmdResult result = CMD_CONTINUE;
616 
617         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
618         data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
619                                     MEMTXATTRS_UNSPECIFIED, &res);
620         if (res != MEMTX_OK) {
621             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
622             qemu_log_mask(LOG_GUEST_ERROR,
623                           "%s: could not read command at 0x%" PRIx64 "\n",
624                           __func__, s->cq.base_addr + cq_offset);
625             break;
626         }
627 
628         cmd = (data & CMD_MASK);
629 
630         switch (cmd) {
631         case GITS_CMD_INT:
632             result = process_its_cmd(s, data, cq_offset, INTERRUPT);
633             break;
634         case GITS_CMD_CLEAR:
635             result = process_its_cmd(s, data, cq_offset, CLEAR);
636             break;
637         case GITS_CMD_SYNC:
638             /*
639              * Current implementation makes a blocking synchronous call
640              * for every command issued earlier, hence the internal state
641              * is already consistent by the time SYNC command is executed.
642              * Hence no further processing is required for SYNC command.
643              */
644             break;
645         case GITS_CMD_MAPD:
646             result = process_mapd(s, data, cq_offset);
647             break;
648         case GITS_CMD_MAPC:
649             result = process_mapc(s, cq_offset);
650             break;
651         case GITS_CMD_MAPTI:
652             result = process_mapti(s, data, cq_offset, false);
653             break;
654         case GITS_CMD_MAPI:
655             result = process_mapti(s, data, cq_offset, true);
656             break;
657         case GITS_CMD_DISCARD:
658             result = process_its_cmd(s, data, cq_offset, DISCARD);
659             break;
660         case GITS_CMD_INV:
661         case GITS_CMD_INVALL:
662             /*
663              * Current implementation doesn't cache any ITS tables,
664              * but the calculated lpi priority information. We only
665              * need to trigger lpi priority re-calculation to be in
666              * sync with LPI config table or pending table changes.
667              */
668             for (i = 0; i < s->gicv3->num_cpu; i++) {
669                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
670             }
671             break;
672         default:
673             break;
674         }
675         if (result == CMD_CONTINUE) {
676             rd_offset++;
677             rd_offset %= s->cq.num_entries;
678             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
679         } else {
680             /* CMD_STALL */
681             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
682             qemu_log_mask(LOG_GUEST_ERROR,
683                           "%s: 0x%x cmd processing failed, stalling\n",
684                           __func__, cmd);
685             break;
686         }
687     }
688 }
689 
690 /*
691  * This function extracts the ITS Device and Collection table specific
692  * parameters (like base_addr, size etc) from GITS_BASER register.
693  * It is called during ITS enable and also during post_load migration
694  */
695 static void extract_table_params(GICv3ITSState *s)
696 {
697     uint16_t num_pages = 0;
698     uint8_t  page_sz_type;
699     uint8_t type;
700     uint32_t page_sz = 0;
701     uint64_t value;
702 
703     for (int i = 0; i < 8; i++) {
704         TableDesc *td;
705         int idbits;
706 
707         value = s->baser[i];
708 
709         if (!value) {
710             continue;
711         }
712 
713         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
714 
715         switch (page_sz_type) {
716         case 0:
717             page_sz = GITS_PAGE_SIZE_4K;
718             break;
719 
720         case 1:
721             page_sz = GITS_PAGE_SIZE_16K;
722             break;
723 
724         case 2:
725         case 3:
726             page_sz = GITS_PAGE_SIZE_64K;
727             break;
728 
729         default:
730             g_assert_not_reached();
731         }
732 
733         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
734 
735         type = FIELD_EX64(value, GITS_BASER, TYPE);
736 
737         switch (type) {
738         case GITS_BASER_TYPE_DEVICE:
739             td = &s->dt;
740             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
741             break;
742         case GITS_BASER_TYPE_COLLECTION:
743             td = &s->ct;
744             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
745                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
746             } else {
747                 /* 16-bit CollectionId supported when CIL == 0 */
748                 idbits = 16;
749             }
750             break;
751         default:
752             /*
753              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
754              * ensures we will only see type values corresponding to
755              * the values set up in gicv3_its_reset().
756              */
757             g_assert_not_reached();
758         }
759 
760         memset(td, 0, sizeof(*td));
761         td->valid = FIELD_EX64(value, GITS_BASER, VALID);
762         /*
763          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
764          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
765          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
766          * for the register corresponding to the Collection table but we
767          * still have to process interrupts using non-memory-backed
768          * Collection table entries.)
769          */
770         if (!td->valid) {
771             continue;
772         }
773         td->page_sz = page_sz;
774         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
775         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
776         td->base_addr = baser_base_addr(value, page_sz);
777         if (!td->indirect) {
778             td->num_entries = (num_pages * page_sz) / td->entry_sz;
779         } else {
780             td->num_entries = (((num_pages * page_sz) /
781                                   L1TABLE_ENTRY_SIZE) *
782                                  (page_sz / td->entry_sz));
783         }
784         td->num_ids = 1ULL << idbits;
785     }
786 }
787 
788 static void extract_cmdq_params(GICv3ITSState *s)
789 {
790     uint16_t num_pages = 0;
791     uint64_t value = s->cbaser;
792 
793     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
794 
795     memset(&s->cq, 0 , sizeof(s->cq));
796     s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
797 
798     if (s->cq.valid) {
799         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
800                              GITS_CMDQ_ENTRY_SIZE;
801         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
802         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
803     }
804 }
805 
806 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
807                                                uint64_t data, unsigned size,
808                                                MemTxAttrs attrs)
809 {
810     GICv3ITSState *s = (GICv3ITSState *)opaque;
811     bool result = true;
812     uint32_t devid = 0;
813 
814     switch (offset) {
815     case GITS_TRANSLATER:
816         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
817             devid = attrs.requester_id;
818             result = process_its_cmd(s, data, devid, NONE);
819         }
820         break;
821     default:
822         break;
823     }
824 
825     if (result) {
826         return MEMTX_OK;
827     } else {
828         return MEMTX_ERROR;
829     }
830 }
831 
832 static bool its_writel(GICv3ITSState *s, hwaddr offset,
833                               uint64_t value, MemTxAttrs attrs)
834 {
835     bool result = true;
836     int index;
837 
838     switch (offset) {
839     case GITS_CTLR:
840         if (value & R_GITS_CTLR_ENABLED_MASK) {
841             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
842             extract_table_params(s);
843             extract_cmdq_params(s);
844             s->creadr = 0;
845             process_cmdq(s);
846         } else {
847             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
848         }
849         break;
850     case GITS_CBASER:
851         /*
852          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
853          *                 already enabled
854          */
855         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
856             s->cbaser = deposit64(s->cbaser, 0, 32, value);
857             s->creadr = 0;
858             s->cwriter = s->creadr;
859         }
860         break;
861     case GITS_CBASER + 4:
862         /*
863          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
864          *                 already enabled
865          */
866         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
867             s->cbaser = deposit64(s->cbaser, 32, 32, value);
868             s->creadr = 0;
869             s->cwriter = s->creadr;
870         }
871         break;
872     case GITS_CWRITER:
873         s->cwriter = deposit64(s->cwriter, 0, 32,
874                                (value & ~R_GITS_CWRITER_RETRY_MASK));
875         if (s->cwriter != s->creadr) {
876             process_cmdq(s);
877         }
878         break;
879     case GITS_CWRITER + 4:
880         s->cwriter = deposit64(s->cwriter, 32, 32, value);
881         break;
882     case GITS_CREADR:
883         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
884             s->creadr = deposit64(s->creadr, 0, 32,
885                                   (value & ~R_GITS_CREADR_STALLED_MASK));
886         } else {
887             /* RO register, ignore the write */
888             qemu_log_mask(LOG_GUEST_ERROR,
889                           "%s: invalid guest write to RO register at offset "
890                           TARGET_FMT_plx "\n", __func__, offset);
891         }
892         break;
893     case GITS_CREADR + 4:
894         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
895             s->creadr = deposit64(s->creadr, 32, 32, value);
896         } else {
897             /* RO register, ignore the write */
898             qemu_log_mask(LOG_GUEST_ERROR,
899                           "%s: invalid guest write to RO register at offset "
900                           TARGET_FMT_plx "\n", __func__, offset);
901         }
902         break;
903     case GITS_BASER ... GITS_BASER + 0x3f:
904         /*
905          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
906          *                 already enabled
907          */
908         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
909             index = (offset - GITS_BASER) / 8;
910 
911             if (offset & 7) {
912                 value <<= 32;
913                 value &= ~GITS_BASER_RO_MASK;
914                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
915                 s->baser[index] |= value;
916             } else {
917                 value &= ~GITS_BASER_RO_MASK;
918                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
919                 s->baser[index] |= value;
920             }
921         }
922         break;
923     case GITS_IIDR:
924     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
925         /* RO registers, ignore the write */
926         qemu_log_mask(LOG_GUEST_ERROR,
927                       "%s: invalid guest write to RO register at offset "
928                       TARGET_FMT_plx "\n", __func__, offset);
929         break;
930     default:
931         result = false;
932         break;
933     }
934     return result;
935 }
936 
937 static bool its_readl(GICv3ITSState *s, hwaddr offset,
938                              uint64_t *data, MemTxAttrs attrs)
939 {
940     bool result = true;
941     int index;
942 
943     switch (offset) {
944     case GITS_CTLR:
945         *data = s->ctlr;
946         break;
947     case GITS_IIDR:
948         *data = gicv3_iidr();
949         break;
950     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
951         /* ID registers */
952         *data = gicv3_idreg(offset - GITS_IDREGS);
953         break;
954     case GITS_TYPER:
955         *data = extract64(s->typer, 0, 32);
956         break;
957     case GITS_TYPER + 4:
958         *data = extract64(s->typer, 32, 32);
959         break;
960     case GITS_CBASER:
961         *data = extract64(s->cbaser, 0, 32);
962         break;
963     case GITS_CBASER + 4:
964         *data = extract64(s->cbaser, 32, 32);
965         break;
966     case GITS_CREADR:
967         *data = extract64(s->creadr, 0, 32);
968         break;
969     case GITS_CREADR + 4:
970         *data = extract64(s->creadr, 32, 32);
971         break;
972     case GITS_CWRITER:
973         *data = extract64(s->cwriter, 0, 32);
974         break;
975     case GITS_CWRITER + 4:
976         *data = extract64(s->cwriter, 32, 32);
977         break;
978     case GITS_BASER ... GITS_BASER + 0x3f:
979         index = (offset - GITS_BASER) / 8;
980         if (offset & 7) {
981             *data = extract64(s->baser[index], 32, 32);
982         } else {
983             *data = extract64(s->baser[index], 0, 32);
984         }
985         break;
986     default:
987         result = false;
988         break;
989     }
990     return result;
991 }
992 
993 static bool its_writell(GICv3ITSState *s, hwaddr offset,
994                                uint64_t value, MemTxAttrs attrs)
995 {
996     bool result = true;
997     int index;
998 
999     switch (offset) {
1000     case GITS_BASER ... GITS_BASER + 0x3f:
1001         /*
1002          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1003          *                 already enabled
1004          */
1005         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1006             index = (offset - GITS_BASER) / 8;
1007             s->baser[index] &= GITS_BASER_RO_MASK;
1008             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1009         }
1010         break;
1011     case GITS_CBASER:
1012         /*
1013          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1014          *                 already enabled
1015          */
1016         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1017             s->cbaser = value;
1018             s->creadr = 0;
1019             s->cwriter = s->creadr;
1020         }
1021         break;
1022     case GITS_CWRITER:
1023         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1024         if (s->cwriter != s->creadr) {
1025             process_cmdq(s);
1026         }
1027         break;
1028     case GITS_CREADR:
1029         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1030             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1031         } else {
1032             /* RO register, ignore the write */
1033             qemu_log_mask(LOG_GUEST_ERROR,
1034                           "%s: invalid guest write to RO register at offset "
1035                           TARGET_FMT_plx "\n", __func__, offset);
1036         }
1037         break;
1038     case GITS_TYPER:
1039         /* RO registers, ignore the write */
1040         qemu_log_mask(LOG_GUEST_ERROR,
1041                       "%s: invalid guest write to RO register at offset "
1042                       TARGET_FMT_plx "\n", __func__, offset);
1043         break;
1044     default:
1045         result = false;
1046         break;
1047     }
1048     return result;
1049 }
1050 
1051 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1052                               uint64_t *data, MemTxAttrs attrs)
1053 {
1054     bool result = true;
1055     int index;
1056 
1057     switch (offset) {
1058     case GITS_TYPER:
1059         *data = s->typer;
1060         break;
1061     case GITS_BASER ... GITS_BASER + 0x3f:
1062         index = (offset - GITS_BASER) / 8;
1063         *data = s->baser[index];
1064         break;
1065     case GITS_CBASER:
1066         *data = s->cbaser;
1067         break;
1068     case GITS_CREADR:
1069         *data = s->creadr;
1070         break;
1071     case GITS_CWRITER:
1072         *data = s->cwriter;
1073         break;
1074     default:
1075         result = false;
1076         break;
1077     }
1078     return result;
1079 }
1080 
1081 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1082                                   unsigned size, MemTxAttrs attrs)
1083 {
1084     GICv3ITSState *s = (GICv3ITSState *)opaque;
1085     bool result;
1086 
1087     switch (size) {
1088     case 4:
1089         result = its_readl(s, offset, data, attrs);
1090         break;
1091     case 8:
1092         result = its_readll(s, offset, data, attrs);
1093         break;
1094     default:
1095         result = false;
1096         break;
1097     }
1098 
1099     if (!result) {
1100         qemu_log_mask(LOG_GUEST_ERROR,
1101                       "%s: invalid guest read at offset " TARGET_FMT_plx
1102                       "size %u\n", __func__, offset, size);
1103         /*
1104          * The spec requires that reserved registers are RAZ/WI;
1105          * so use false returns from leaf functions as a way to
1106          * trigger the guest-error logging but don't return it to
1107          * the caller, or we'll cause a spurious guest data abort.
1108          */
1109         *data = 0;
1110     }
1111     return MEMTX_OK;
1112 }
1113 
1114 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1115                                    unsigned size, MemTxAttrs attrs)
1116 {
1117     GICv3ITSState *s = (GICv3ITSState *)opaque;
1118     bool result;
1119 
1120     switch (size) {
1121     case 4:
1122         result = its_writel(s, offset, data, attrs);
1123         break;
1124     case 8:
1125         result = its_writell(s, offset, data, attrs);
1126         break;
1127     default:
1128         result = false;
1129         break;
1130     }
1131 
1132     if (!result) {
1133         qemu_log_mask(LOG_GUEST_ERROR,
1134                       "%s: invalid guest write at offset " TARGET_FMT_plx
1135                       "size %u\n", __func__, offset, size);
1136         /*
1137          * The spec requires that reserved registers are RAZ/WI;
1138          * so use false returns from leaf functions as a way to
1139          * trigger the guest-error logging but don't return it to
1140          * the caller, or we'll cause a spurious guest data abort.
1141          */
1142     }
1143     return MEMTX_OK;
1144 }
1145 
1146 static const MemoryRegionOps gicv3_its_control_ops = {
1147     .read_with_attrs = gicv3_its_read,
1148     .write_with_attrs = gicv3_its_write,
1149     .valid.min_access_size = 4,
1150     .valid.max_access_size = 8,
1151     .impl.min_access_size = 4,
1152     .impl.max_access_size = 8,
1153     .endianness = DEVICE_NATIVE_ENDIAN,
1154 };
1155 
1156 static const MemoryRegionOps gicv3_its_translation_ops = {
1157     .write_with_attrs = gicv3_its_translation_write,
1158     .valid.min_access_size = 2,
1159     .valid.max_access_size = 4,
1160     .impl.min_access_size = 2,
1161     .impl.max_access_size = 4,
1162     .endianness = DEVICE_NATIVE_ENDIAN,
1163 };
1164 
1165 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1166 {
1167     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1168     int i;
1169 
1170     for (i = 0; i < s->gicv3->num_cpu; i++) {
1171         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1172             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1173             return;
1174         }
1175     }
1176 
1177     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1178 
1179     address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
1180                        "gicv3-its-sysmem");
1181 
1182     /* set the ITS default features supported */
1183     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1184     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1185                           ITS_ITT_ENTRY_SIZE - 1);
1186     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1187     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1188     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1189     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1190 }
1191 
1192 static void gicv3_its_reset(DeviceState *dev)
1193 {
1194     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1195     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1196 
1197     c->parent_reset(dev);
1198 
1199     /* Quiescent bit reset to 1 */
1200     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1201 
1202     /*
1203      * setting GITS_BASER0.Type = 0b001 (Device)
1204      *         GITS_BASER1.Type = 0b100 (Collection Table)
1205      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1206      *         GITS_BASER<0,1>.Page_Size = 64KB
1207      * and default translation table entry size to 16 bytes
1208      */
1209     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1210                              GITS_BASER_TYPE_DEVICE);
1211     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1212                              GITS_BASER_PAGESIZE_64K);
1213     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1214                              GITS_DTE_SIZE - 1);
1215 
1216     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1217                              GITS_BASER_TYPE_COLLECTION);
1218     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1219                              GITS_BASER_PAGESIZE_64K);
1220     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1221                              GITS_CTE_SIZE - 1);
1222 }
1223 
1224 static void gicv3_its_post_load(GICv3ITSState *s)
1225 {
1226     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1227         extract_table_params(s);
1228         extract_cmdq_params(s);
1229     }
1230 }
1231 
1232 static Property gicv3_its_props[] = {
1233     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1234                      GICv3State *),
1235     DEFINE_PROP_END_OF_LIST(),
1236 };
1237 
1238 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1239 {
1240     DeviceClass *dc = DEVICE_CLASS(klass);
1241     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1242     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1243 
1244     dc->realize = gicv3_arm_its_realize;
1245     device_class_set_props(dc, gicv3_its_props);
1246     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1247     icc->post_load = gicv3_its_post_load;
1248 }
1249 
1250 static const TypeInfo gicv3_its_info = {
1251     .name = TYPE_ARM_GICV3_ITS,
1252     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1253     .instance_size = sizeof(GICv3ITSState),
1254     .class_init = gicv3_its_class_init,
1255     .class_size = sizeof(GICv3ITSClass),
1256 };
1257 
1258 static void gicv3_its_register_types(void)
1259 {
1260     type_register_static(&gicv3_its_info);
1261 }
1262 
1263 type_init(gicv3_its_register_types)
1264