xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision 2f93d8b0)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/intc/arm_gicv3_its_common.h"
18 #include "gicv3_internal.h"
19 #include "qom/object.h"
20 #include "qapi/error.h"
21 
22 typedef struct GICv3ITSClass GICv3ITSClass;
23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
25                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
26 
27 struct GICv3ITSClass {
28     GICv3ITSCommonClass parent_class;
29     void (*parent_reset)(DeviceState *dev);
30 };
31 
32 /*
33  * This is an internal enum used to distinguish between LPI triggered
34  * via command queue and LPI triggered via gits_translater write.
35  */
36 typedef enum ItsCmdType {
37     NONE = 0, /* internal indication for GITS_TRANSLATER write */
38     CLEAR = 1,
39     DISCARD = 2,
40     INTERRUPT = 3,
41 } ItsCmdType;
42 
43 typedef struct {
44     uint32_t iteh;
45     uint64_t itel;
46 } IteEntry;
47 
48 /*
49  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
50  * if a command parameter is not correct. These include both "stall
51  * processing of the command queue" and "ignore this command, and
52  * keep processing the queue". In our implementation we choose that
53  * memory transaction errors reading the command packet provoke a
54  * stall, but errors in parameters cause us to ignore the command
55  * and continue processing.
56  * The process_* functions which handle individual ITS commands all
57  * return an ItsCmdResult which tells process_cmdq() whether it should
58  * stall or keep going.
59  */
60 typedef enum ItsCmdResult {
61     CMD_STALL = 0,
62     CMD_CONTINUE = 1,
63 } ItsCmdResult;
64 
65 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
66 {
67     uint64_t result = 0;
68 
69     switch (page_sz) {
70     case GITS_PAGE_SIZE_4K:
71     case GITS_PAGE_SIZE_16K:
72         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
73         break;
74 
75     case GITS_PAGE_SIZE_64K:
76         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
77         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
78         break;
79 
80     default:
81         break;
82     }
83     return result;
84 }
85 
86 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
87                                  uint32_t idx, MemTxResult *res)
88 {
89     /*
90      * Given a TableDesc describing one of the ITS in-guest-memory
91      * tables and an index into it, return the guest address
92      * corresponding to that table entry.
93      * If there was a memory error reading the L1 table of an
94      * indirect table, *res is set accordingly, and we return -1.
95      * If the L1 table entry is marked not valid, we return -1 with
96      * *res set to MEMTX_OK.
97      *
98      * The specification defines the format of level 1 entries of a
99      * 2-level table, but the format of level 2 entries and the format
100      * of flat-mapped tables is IMPDEF.
101      */
102     AddressSpace *as = &s->gicv3->dma_as;
103     uint32_t l2idx;
104     uint64_t l2;
105     uint32_t num_l2_entries;
106 
107     *res = MEMTX_OK;
108 
109     if (!td->indirect) {
110         /* Single level table */
111         return td->base_addr + idx * td->entry_sz;
112     }
113 
114     /* Two level table */
115     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
116 
117     l2 = address_space_ldq_le(as,
118                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
119                               MEMTXATTRS_UNSPECIFIED, res);
120     if (*res != MEMTX_OK) {
121         return -1;
122     }
123     if (!(l2 & L2_TABLE_VALID_MASK)) {
124         return -1;
125     }
126 
127     num_l2_entries = td->page_sz / td->entry_sz;
128     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
129 }
130 
131 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
132                     MemTxResult *res)
133 {
134     AddressSpace *as = &s->gicv3->dma_as;
135     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, res);
136 
137     if (entry_addr == -1) {
138         return false; /* not valid */
139     }
140 
141     *cte = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res);
142     return FIELD_EX64(*cte, CTE, VALID);
143 }
144 
145 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
146                        IteEntry ite)
147 {
148     AddressSpace *as = &s->gicv3->dma_as;
149     uint64_t itt_addr;
150     MemTxResult res = MEMTX_OK;
151 
152     itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
153     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
154 
155     address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
156                          sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
157                          &res);
158 
159     if (res == MEMTX_OK) {
160         address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
161                              sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
162                              MEMTXATTRS_UNSPECIFIED, &res);
163     }
164     if (res != MEMTX_OK) {
165         return false;
166     } else {
167         return true;
168     }
169 }
170 
171 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
172                     uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
173 {
174     AddressSpace *as = &s->gicv3->dma_as;
175     uint64_t itt_addr;
176     bool status = false;
177     IteEntry ite = {};
178 
179     itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
180     itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
181 
182     ite.itel = address_space_ldq_le(as, itt_addr +
183                                     (eventid * (sizeof(uint64_t) +
184                                     sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
185                                     res);
186 
187     if (*res == MEMTX_OK) {
188         ite.iteh = address_space_ldl_le(as, itt_addr +
189                                         (eventid * (sizeof(uint64_t) +
190                                         sizeof(uint32_t))) + sizeof(uint32_t),
191                                         MEMTXATTRS_UNSPECIFIED, res);
192 
193         if (*res == MEMTX_OK) {
194             if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
195                 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
196                 if (inttype == ITE_INTTYPE_PHYSICAL) {
197                     *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
198                     *icid = FIELD_EX32(ite.iteh, ITE_H, ICID);
199                     status = true;
200                 }
201             }
202         }
203     }
204     return status;
205 }
206 
207 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
208 {
209     AddressSpace *as = &s->gicv3->dma_as;
210     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, res);
211 
212     if (entry_addr == -1) {
213         return 0; /* a DTE entry with the Valid bit clear */
214     }
215     return address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res);
216 }
217 
218 /*
219  * This function handles the processing of following commands based on
220  * the ItsCmdType parameter passed:-
221  * 1. triggering of lpi interrupt translation via ITS INT command
222  * 2. triggering of lpi interrupt translation via gits_translater register
223  * 3. handling of ITS CLEAR command
224  * 4. handling of ITS DISCARD command
225  */
226 static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value,
227                                     uint32_t offset, ItsCmdType cmd)
228 {
229     AddressSpace *as = &s->gicv3->dma_as;
230     uint32_t devid, eventid;
231     MemTxResult res = MEMTX_OK;
232     bool dte_valid;
233     uint64_t dte = 0;
234     uint64_t num_eventids;
235     uint16_t icid = 0;
236     uint32_t pIntid = 0;
237     bool ite_valid = false;
238     uint64_t cte = 0;
239     bool cte_valid = false;
240     uint64_t rdbase;
241 
242     if (cmd == NONE) {
243         devid = offset;
244     } else {
245         devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
246 
247         offset += NUM_BYTES_IN_DW;
248         value = address_space_ldq_le(as, s->cq.base_addr + offset,
249                                      MEMTXATTRS_UNSPECIFIED, &res);
250     }
251 
252     if (res != MEMTX_OK) {
253         return CMD_STALL;
254     }
255 
256     eventid = (value & EVENTID_MASK);
257 
258     if (devid >= s->dt.num_ids) {
259         qemu_log_mask(LOG_GUEST_ERROR,
260                       "%s: invalid command attributes: devid %d>=%d",
261                       __func__, devid, s->dt.num_ids);
262         return CMD_CONTINUE;
263     }
264 
265     dte = get_dte(s, devid, &res);
266 
267     if (res != MEMTX_OK) {
268         return CMD_STALL;
269     }
270     dte_valid = FIELD_EX64(dte, DTE, VALID);
271 
272     if (!dte_valid) {
273         qemu_log_mask(LOG_GUEST_ERROR,
274                       "%s: invalid command attributes: "
275                       "invalid dte: %"PRIx64" for %d\n",
276                       __func__, dte, devid);
277         return CMD_CONTINUE;
278     }
279 
280     num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
281 
282     if (eventid >= num_eventids) {
283         qemu_log_mask(LOG_GUEST_ERROR,
284                       "%s: invalid command attributes: eventid %d >= %"
285                       PRId64 "\n",
286                       __func__, eventid, num_eventids);
287         return CMD_CONTINUE;
288     }
289 
290     ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
291     if (res != MEMTX_OK) {
292         return CMD_STALL;
293     }
294 
295     if (!ite_valid) {
296         qemu_log_mask(LOG_GUEST_ERROR,
297                       "%s: invalid command attributes: invalid ITE\n",
298                       __func__);
299         return CMD_CONTINUE;
300     }
301 
302     if (icid >= s->ct.num_ids) {
303         qemu_log_mask(LOG_GUEST_ERROR,
304                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
305                       __func__, icid);
306         return CMD_CONTINUE;
307     }
308 
309     cte_valid = get_cte(s, icid, &cte, &res);
310     if (res != MEMTX_OK) {
311         return CMD_STALL;
312     }
313     if (!cte_valid) {
314         qemu_log_mask(LOG_GUEST_ERROR,
315                       "%s: invalid command attributes: "
316                       "invalid cte: %"PRIx64"\n",
317                       __func__, cte);
318         return CMD_CONTINUE;
319     }
320 
321     /*
322      * Current implementation only supports rdbase == procnum
323      * Hence rdbase physical address is ignored
324      */
325     rdbase = FIELD_EX64(cte, CTE, RDBASE);
326 
327     if (rdbase >= s->gicv3->num_cpu) {
328         return CMD_CONTINUE;
329     }
330 
331     if ((cmd == CLEAR) || (cmd == DISCARD)) {
332         gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
333     } else {
334         gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
335     }
336 
337     if (cmd == DISCARD) {
338         IteEntry ite = {};
339         /* remove mapping from interrupt translation table */
340         return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
341     }
342     return CMD_CONTINUE;
343 }
344 
345 static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value,
346                                   uint32_t offset, bool ignore_pInt)
347 {
348     AddressSpace *as = &s->gicv3->dma_as;
349     uint32_t devid, eventid;
350     uint32_t pIntid = 0;
351     uint64_t num_eventids;
352     uint32_t num_intids;
353     bool dte_valid;
354     MemTxResult res = MEMTX_OK;
355     uint16_t icid = 0;
356     uint64_t dte = 0;
357     IteEntry ite = {};
358 
359     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
360     offset += NUM_BYTES_IN_DW;
361     value = address_space_ldq_le(as, s->cq.base_addr + offset,
362                                  MEMTXATTRS_UNSPECIFIED, &res);
363 
364     if (res != MEMTX_OK) {
365         return CMD_STALL;
366     }
367 
368     eventid = (value & EVENTID_MASK);
369 
370     if (ignore_pInt) {
371         pIntid = eventid;
372     } else {
373         pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
374     }
375 
376     offset += NUM_BYTES_IN_DW;
377     value = address_space_ldq_le(as, s->cq.base_addr + offset,
378                                  MEMTXATTRS_UNSPECIFIED, &res);
379 
380     if (res != MEMTX_OK) {
381         return CMD_STALL;
382     }
383 
384     icid = value & ICID_MASK;
385 
386     if (devid >= s->dt.num_ids) {
387         qemu_log_mask(LOG_GUEST_ERROR,
388                       "%s: invalid command attributes: devid %d>=%d",
389                       __func__, devid, s->dt.num_ids);
390         return CMD_CONTINUE;
391     }
392 
393     dte = get_dte(s, devid, &res);
394 
395     if (res != MEMTX_OK) {
396         return CMD_STALL;
397     }
398     dte_valid = FIELD_EX64(dte, DTE, VALID);
399     num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
400     num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
401 
402     if ((icid >= s->ct.num_ids)
403             || !dte_valid || (eventid >= num_eventids) ||
404             (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
405              (pIntid != INTID_SPURIOUS))) {
406         qemu_log_mask(LOG_GUEST_ERROR,
407                       "%s: invalid command attributes "
408                       "icid %d or eventid %d or pIntid %d or"
409                       "unmapped dte %d\n", __func__, icid, eventid,
410                       pIntid, dte_valid);
411         /*
412          * in this implementation, in case of error
413          * we ignore this command and move onto the next
414          * command in the queue
415          */
416         return CMD_CONTINUE;
417     }
418 
419     /* add ite entry to interrupt translation table */
420     ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
421     ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
422     ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
423     ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
424     ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
425 
426     return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
427 }
428 
429 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
430                        uint64_t rdbase)
431 {
432     AddressSpace *as = &s->gicv3->dma_as;
433     uint64_t entry_addr;
434     uint64_t cte = 0;
435     MemTxResult res = MEMTX_OK;
436 
437     if (!s->ct.valid) {
438         return true;
439     }
440 
441     if (valid) {
442         /* add mapping entry to collection table */
443         cte = FIELD_DP64(cte, CTE, VALID, 1);
444         cte = FIELD_DP64(cte, CTE, RDBASE, rdbase);
445     }
446 
447     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
448     if (res != MEMTX_OK) {
449         /* memory access error: stall */
450         return false;
451     }
452     if (entry_addr == -1) {
453         /* No L2 table for this index: discard write and continue */
454         return true;
455     }
456 
457     address_space_stq_le(as, entry_addr, cte, MEMTXATTRS_UNSPECIFIED, &res);
458     return res == MEMTX_OK;
459 }
460 
461 static ItsCmdResult process_mapc(GICv3ITSState *s, uint32_t offset)
462 {
463     AddressSpace *as = &s->gicv3->dma_as;
464     uint16_t icid;
465     uint64_t rdbase;
466     bool valid;
467     MemTxResult res = MEMTX_OK;
468     uint64_t value;
469 
470     offset += NUM_BYTES_IN_DW;
471     offset += NUM_BYTES_IN_DW;
472 
473     value = address_space_ldq_le(as, s->cq.base_addr + offset,
474                                  MEMTXATTRS_UNSPECIFIED, &res);
475 
476     if (res != MEMTX_OK) {
477         return CMD_STALL;
478     }
479 
480     icid = value & ICID_MASK;
481 
482     rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
483     rdbase &= RDBASE_PROCNUM_MASK;
484 
485     valid = (value & CMD_FIELD_VALID_MASK);
486 
487     if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) {
488         qemu_log_mask(LOG_GUEST_ERROR,
489                       "ITS MAPC: invalid collection table attributes "
490                       "icid %d rdbase %" PRIu64 "\n",  icid, rdbase);
491         /*
492          * in this implementation, in case of error
493          * we ignore this command and move onto the next
494          * command in the queue
495          */
496         return CMD_CONTINUE;
497     }
498 
499     return update_cte(s, icid, valid, rdbase) ? CMD_CONTINUE : CMD_STALL;
500 }
501 
502 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
503                        uint8_t size, uint64_t itt_addr)
504 {
505     AddressSpace *as = &s->gicv3->dma_as;
506     uint64_t entry_addr;
507     uint64_t dte = 0;
508     MemTxResult res = MEMTX_OK;
509 
510     if (s->dt.valid) {
511         if (valid) {
512             /* add mapping entry to device table */
513             dte = FIELD_DP64(dte, DTE, VALID, 1);
514             dte = FIELD_DP64(dte, DTE, SIZE, size);
515             dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr);
516         }
517     } else {
518         return true;
519     }
520 
521     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
522     if (res != MEMTX_OK) {
523         /* memory access error: stall */
524         return false;
525     }
526     if (entry_addr == -1) {
527         /* No L2 table for this index: discard write and continue */
528         return true;
529     }
530     address_space_stq_le(as, entry_addr, dte, MEMTXATTRS_UNSPECIFIED, &res);
531     return res == MEMTX_OK;
532 }
533 
534 static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value,
535                                  uint32_t offset)
536 {
537     AddressSpace *as = &s->gicv3->dma_as;
538     uint32_t devid;
539     uint8_t size;
540     uint64_t itt_addr;
541     bool valid;
542     MemTxResult res = MEMTX_OK;
543 
544     devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
545 
546     offset += NUM_BYTES_IN_DW;
547     value = address_space_ldq_le(as, s->cq.base_addr + offset,
548                                  MEMTXATTRS_UNSPECIFIED, &res);
549 
550     if (res != MEMTX_OK) {
551         return CMD_STALL;
552     }
553 
554     size = (value & SIZE_MASK);
555 
556     offset += NUM_BYTES_IN_DW;
557     value = address_space_ldq_le(as, s->cq.base_addr + offset,
558                                  MEMTXATTRS_UNSPECIFIED, &res);
559 
560     if (res != MEMTX_OK) {
561         return CMD_STALL;
562     }
563 
564     itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
565 
566     valid = (value & CMD_FIELD_VALID_MASK);
567 
568     if ((devid >= s->dt.num_ids) ||
569         (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
570         qemu_log_mask(LOG_GUEST_ERROR,
571                       "ITS MAPD: invalid device table attributes "
572                       "devid %d or size %d\n", devid, size);
573         /*
574          * in this implementation, in case of error
575          * we ignore this command and move onto the next
576          * command in the queue
577          */
578         return CMD_CONTINUE;
579     }
580 
581     return update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL;
582 }
583 
584 /*
585  * Current implementation blocks until all
586  * commands are processed
587  */
588 static void process_cmdq(GICv3ITSState *s)
589 {
590     uint32_t wr_offset = 0;
591     uint32_t rd_offset = 0;
592     uint32_t cq_offset = 0;
593     uint64_t data;
594     AddressSpace *as = &s->gicv3->dma_as;
595     MemTxResult res = MEMTX_OK;
596     uint8_t cmd;
597     int i;
598 
599     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
600         return;
601     }
602 
603     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
604 
605     if (wr_offset >= s->cq.num_entries) {
606         qemu_log_mask(LOG_GUEST_ERROR,
607                       "%s: invalid write offset "
608                       "%d\n", __func__, wr_offset);
609         return;
610     }
611 
612     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
613 
614     if (rd_offset >= s->cq.num_entries) {
615         qemu_log_mask(LOG_GUEST_ERROR,
616                       "%s: invalid read offset "
617                       "%d\n", __func__, rd_offset);
618         return;
619     }
620 
621     while (wr_offset != rd_offset) {
622         ItsCmdResult result = CMD_CONTINUE;
623 
624         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
625         data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
626                                     MEMTXATTRS_UNSPECIFIED, &res);
627         if (res != MEMTX_OK) {
628             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
629             qemu_log_mask(LOG_GUEST_ERROR,
630                           "%s: could not read command at 0x%" PRIx64 "\n",
631                           __func__, s->cq.base_addr + cq_offset);
632             break;
633         }
634 
635         cmd = (data & CMD_MASK);
636 
637         switch (cmd) {
638         case GITS_CMD_INT:
639             result = process_its_cmd(s, data, cq_offset, INTERRUPT);
640             break;
641         case GITS_CMD_CLEAR:
642             result = process_its_cmd(s, data, cq_offset, CLEAR);
643             break;
644         case GITS_CMD_SYNC:
645             /*
646              * Current implementation makes a blocking synchronous call
647              * for every command issued earlier, hence the internal state
648              * is already consistent by the time SYNC command is executed.
649              * Hence no further processing is required for SYNC command.
650              */
651             break;
652         case GITS_CMD_MAPD:
653             result = process_mapd(s, data, cq_offset);
654             break;
655         case GITS_CMD_MAPC:
656             result = process_mapc(s, cq_offset);
657             break;
658         case GITS_CMD_MAPTI:
659             result = process_mapti(s, data, cq_offset, false);
660             break;
661         case GITS_CMD_MAPI:
662             result = process_mapti(s, data, cq_offset, true);
663             break;
664         case GITS_CMD_DISCARD:
665             result = process_its_cmd(s, data, cq_offset, DISCARD);
666             break;
667         case GITS_CMD_INV:
668         case GITS_CMD_INVALL:
669             /*
670              * Current implementation doesn't cache any ITS tables,
671              * but the calculated lpi priority information. We only
672              * need to trigger lpi priority re-calculation to be in
673              * sync with LPI config table or pending table changes.
674              */
675             for (i = 0; i < s->gicv3->num_cpu; i++) {
676                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
677             }
678             break;
679         default:
680             break;
681         }
682         if (result == CMD_CONTINUE) {
683             rd_offset++;
684             rd_offset %= s->cq.num_entries;
685             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
686         } else {
687             /* CMD_STALL */
688             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
689             qemu_log_mask(LOG_GUEST_ERROR,
690                           "%s: 0x%x cmd processing failed, stalling\n",
691                           __func__, cmd);
692             break;
693         }
694     }
695 }
696 
697 /*
698  * This function extracts the ITS Device and Collection table specific
699  * parameters (like base_addr, size etc) from GITS_BASER register.
700  * It is called during ITS enable and also during post_load migration
701  */
702 static void extract_table_params(GICv3ITSState *s)
703 {
704     uint16_t num_pages = 0;
705     uint8_t  page_sz_type;
706     uint8_t type;
707     uint32_t page_sz = 0;
708     uint64_t value;
709 
710     for (int i = 0; i < 8; i++) {
711         TableDesc *td;
712         int idbits;
713 
714         value = s->baser[i];
715 
716         if (!value) {
717             continue;
718         }
719 
720         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
721 
722         switch (page_sz_type) {
723         case 0:
724             page_sz = GITS_PAGE_SIZE_4K;
725             break;
726 
727         case 1:
728             page_sz = GITS_PAGE_SIZE_16K;
729             break;
730 
731         case 2:
732         case 3:
733             page_sz = GITS_PAGE_SIZE_64K;
734             break;
735 
736         default:
737             g_assert_not_reached();
738         }
739 
740         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
741 
742         type = FIELD_EX64(value, GITS_BASER, TYPE);
743 
744         switch (type) {
745         case GITS_BASER_TYPE_DEVICE:
746             td = &s->dt;
747             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
748             break;
749         case GITS_BASER_TYPE_COLLECTION:
750             td = &s->ct;
751             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
752                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
753             } else {
754                 /* 16-bit CollectionId supported when CIL == 0 */
755                 idbits = 16;
756             }
757             break;
758         default:
759             /*
760              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
761              * ensures we will only see type values corresponding to
762              * the values set up in gicv3_its_reset().
763              */
764             g_assert_not_reached();
765         }
766 
767         memset(td, 0, sizeof(*td));
768         td->valid = FIELD_EX64(value, GITS_BASER, VALID);
769         /*
770          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
771          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
772          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
773          * for the register corresponding to the Collection table but we
774          * still have to process interrupts using non-memory-backed
775          * Collection table entries.)
776          */
777         if (!td->valid) {
778             continue;
779         }
780         td->page_sz = page_sz;
781         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
782         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
783         td->base_addr = baser_base_addr(value, page_sz);
784         if (!td->indirect) {
785             td->num_entries = (num_pages * page_sz) / td->entry_sz;
786         } else {
787             td->num_entries = (((num_pages * page_sz) /
788                                   L1TABLE_ENTRY_SIZE) *
789                                  (page_sz / td->entry_sz));
790         }
791         td->num_ids = 1ULL << idbits;
792     }
793 }
794 
795 static void extract_cmdq_params(GICv3ITSState *s)
796 {
797     uint16_t num_pages = 0;
798     uint64_t value = s->cbaser;
799 
800     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
801 
802     memset(&s->cq, 0 , sizeof(s->cq));
803     s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
804 
805     if (s->cq.valid) {
806         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
807                              GITS_CMDQ_ENTRY_SIZE;
808         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
809         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
810     }
811 }
812 
813 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
814                                                uint64_t data, unsigned size,
815                                                MemTxAttrs attrs)
816 {
817     GICv3ITSState *s = (GICv3ITSState *)opaque;
818     bool result = true;
819     uint32_t devid = 0;
820 
821     switch (offset) {
822     case GITS_TRANSLATER:
823         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
824             devid = attrs.requester_id;
825             result = process_its_cmd(s, data, devid, NONE);
826         }
827         break;
828     default:
829         break;
830     }
831 
832     if (result) {
833         return MEMTX_OK;
834     } else {
835         return MEMTX_ERROR;
836     }
837 }
838 
839 static bool its_writel(GICv3ITSState *s, hwaddr offset,
840                               uint64_t value, MemTxAttrs attrs)
841 {
842     bool result = true;
843     int index;
844 
845     switch (offset) {
846     case GITS_CTLR:
847         if (value & R_GITS_CTLR_ENABLED_MASK) {
848             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
849             extract_table_params(s);
850             extract_cmdq_params(s);
851             s->creadr = 0;
852             process_cmdq(s);
853         } else {
854             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
855         }
856         break;
857     case GITS_CBASER:
858         /*
859          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
860          *                 already enabled
861          */
862         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
863             s->cbaser = deposit64(s->cbaser, 0, 32, value);
864             s->creadr = 0;
865             s->cwriter = s->creadr;
866         }
867         break;
868     case GITS_CBASER + 4:
869         /*
870          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
871          *                 already enabled
872          */
873         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
874             s->cbaser = deposit64(s->cbaser, 32, 32, value);
875             s->creadr = 0;
876             s->cwriter = s->creadr;
877         }
878         break;
879     case GITS_CWRITER:
880         s->cwriter = deposit64(s->cwriter, 0, 32,
881                                (value & ~R_GITS_CWRITER_RETRY_MASK));
882         if (s->cwriter != s->creadr) {
883             process_cmdq(s);
884         }
885         break;
886     case GITS_CWRITER + 4:
887         s->cwriter = deposit64(s->cwriter, 32, 32, value);
888         break;
889     case GITS_CREADR:
890         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
891             s->creadr = deposit64(s->creadr, 0, 32,
892                                   (value & ~R_GITS_CREADR_STALLED_MASK));
893         } else {
894             /* RO register, ignore the write */
895             qemu_log_mask(LOG_GUEST_ERROR,
896                           "%s: invalid guest write to RO register at offset "
897                           TARGET_FMT_plx "\n", __func__, offset);
898         }
899         break;
900     case GITS_CREADR + 4:
901         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
902             s->creadr = deposit64(s->creadr, 32, 32, value);
903         } else {
904             /* RO register, ignore the write */
905             qemu_log_mask(LOG_GUEST_ERROR,
906                           "%s: invalid guest write to RO register at offset "
907                           TARGET_FMT_plx "\n", __func__, offset);
908         }
909         break;
910     case GITS_BASER ... GITS_BASER + 0x3f:
911         /*
912          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
913          *                 already enabled
914          */
915         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
916             index = (offset - GITS_BASER) / 8;
917 
918             if (offset & 7) {
919                 value <<= 32;
920                 value &= ~GITS_BASER_RO_MASK;
921                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
922                 s->baser[index] |= value;
923             } else {
924                 value &= ~GITS_BASER_RO_MASK;
925                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
926                 s->baser[index] |= value;
927             }
928         }
929         break;
930     case GITS_IIDR:
931     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
932         /* RO registers, ignore the write */
933         qemu_log_mask(LOG_GUEST_ERROR,
934                       "%s: invalid guest write to RO register at offset "
935                       TARGET_FMT_plx "\n", __func__, offset);
936         break;
937     default:
938         result = false;
939         break;
940     }
941     return result;
942 }
943 
944 static bool its_readl(GICv3ITSState *s, hwaddr offset,
945                              uint64_t *data, MemTxAttrs attrs)
946 {
947     bool result = true;
948     int index;
949 
950     switch (offset) {
951     case GITS_CTLR:
952         *data = s->ctlr;
953         break;
954     case GITS_IIDR:
955         *data = gicv3_iidr();
956         break;
957     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
958         /* ID registers */
959         *data = gicv3_idreg(offset - GITS_IDREGS);
960         break;
961     case GITS_TYPER:
962         *data = extract64(s->typer, 0, 32);
963         break;
964     case GITS_TYPER + 4:
965         *data = extract64(s->typer, 32, 32);
966         break;
967     case GITS_CBASER:
968         *data = extract64(s->cbaser, 0, 32);
969         break;
970     case GITS_CBASER + 4:
971         *data = extract64(s->cbaser, 32, 32);
972         break;
973     case GITS_CREADR:
974         *data = extract64(s->creadr, 0, 32);
975         break;
976     case GITS_CREADR + 4:
977         *data = extract64(s->creadr, 32, 32);
978         break;
979     case GITS_CWRITER:
980         *data = extract64(s->cwriter, 0, 32);
981         break;
982     case GITS_CWRITER + 4:
983         *data = extract64(s->cwriter, 32, 32);
984         break;
985     case GITS_BASER ... GITS_BASER + 0x3f:
986         index = (offset - GITS_BASER) / 8;
987         if (offset & 7) {
988             *data = extract64(s->baser[index], 32, 32);
989         } else {
990             *data = extract64(s->baser[index], 0, 32);
991         }
992         break;
993     default:
994         result = false;
995         break;
996     }
997     return result;
998 }
999 
1000 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1001                                uint64_t value, MemTxAttrs attrs)
1002 {
1003     bool result = true;
1004     int index;
1005 
1006     switch (offset) {
1007     case GITS_BASER ... GITS_BASER + 0x3f:
1008         /*
1009          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1010          *                 already enabled
1011          */
1012         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1013             index = (offset - GITS_BASER) / 8;
1014             s->baser[index] &= GITS_BASER_RO_MASK;
1015             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1016         }
1017         break;
1018     case GITS_CBASER:
1019         /*
1020          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1021          *                 already enabled
1022          */
1023         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1024             s->cbaser = value;
1025             s->creadr = 0;
1026             s->cwriter = s->creadr;
1027         }
1028         break;
1029     case GITS_CWRITER:
1030         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1031         if (s->cwriter != s->creadr) {
1032             process_cmdq(s);
1033         }
1034         break;
1035     case GITS_CREADR:
1036         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1037             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1038         } else {
1039             /* RO register, ignore the write */
1040             qemu_log_mask(LOG_GUEST_ERROR,
1041                           "%s: invalid guest write to RO register at offset "
1042                           TARGET_FMT_plx "\n", __func__, offset);
1043         }
1044         break;
1045     case GITS_TYPER:
1046         /* RO registers, ignore the write */
1047         qemu_log_mask(LOG_GUEST_ERROR,
1048                       "%s: invalid guest write to RO register at offset "
1049                       TARGET_FMT_plx "\n", __func__, offset);
1050         break;
1051     default:
1052         result = false;
1053         break;
1054     }
1055     return result;
1056 }
1057 
1058 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1059                               uint64_t *data, MemTxAttrs attrs)
1060 {
1061     bool result = true;
1062     int index;
1063 
1064     switch (offset) {
1065     case GITS_TYPER:
1066         *data = s->typer;
1067         break;
1068     case GITS_BASER ... GITS_BASER + 0x3f:
1069         index = (offset - GITS_BASER) / 8;
1070         *data = s->baser[index];
1071         break;
1072     case GITS_CBASER:
1073         *data = s->cbaser;
1074         break;
1075     case GITS_CREADR:
1076         *data = s->creadr;
1077         break;
1078     case GITS_CWRITER:
1079         *data = s->cwriter;
1080         break;
1081     default:
1082         result = false;
1083         break;
1084     }
1085     return result;
1086 }
1087 
1088 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1089                                   unsigned size, MemTxAttrs attrs)
1090 {
1091     GICv3ITSState *s = (GICv3ITSState *)opaque;
1092     bool result;
1093 
1094     switch (size) {
1095     case 4:
1096         result = its_readl(s, offset, data, attrs);
1097         break;
1098     case 8:
1099         result = its_readll(s, offset, data, attrs);
1100         break;
1101     default:
1102         result = false;
1103         break;
1104     }
1105 
1106     if (!result) {
1107         qemu_log_mask(LOG_GUEST_ERROR,
1108                       "%s: invalid guest read at offset " TARGET_FMT_plx
1109                       "size %u\n", __func__, offset, size);
1110         /*
1111          * The spec requires that reserved registers are RAZ/WI;
1112          * so use false returns from leaf functions as a way to
1113          * trigger the guest-error logging but don't return it to
1114          * the caller, or we'll cause a spurious guest data abort.
1115          */
1116         *data = 0;
1117     }
1118     return MEMTX_OK;
1119 }
1120 
1121 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1122                                    unsigned size, MemTxAttrs attrs)
1123 {
1124     GICv3ITSState *s = (GICv3ITSState *)opaque;
1125     bool result;
1126 
1127     switch (size) {
1128     case 4:
1129         result = its_writel(s, offset, data, attrs);
1130         break;
1131     case 8:
1132         result = its_writell(s, offset, data, attrs);
1133         break;
1134     default:
1135         result = false;
1136         break;
1137     }
1138 
1139     if (!result) {
1140         qemu_log_mask(LOG_GUEST_ERROR,
1141                       "%s: invalid guest write at offset " TARGET_FMT_plx
1142                       "size %u\n", __func__, offset, size);
1143         /*
1144          * The spec requires that reserved registers are RAZ/WI;
1145          * so use false returns from leaf functions as a way to
1146          * trigger the guest-error logging but don't return it to
1147          * the caller, or we'll cause a spurious guest data abort.
1148          */
1149     }
1150     return MEMTX_OK;
1151 }
1152 
1153 static const MemoryRegionOps gicv3_its_control_ops = {
1154     .read_with_attrs = gicv3_its_read,
1155     .write_with_attrs = gicv3_its_write,
1156     .valid.min_access_size = 4,
1157     .valid.max_access_size = 8,
1158     .impl.min_access_size = 4,
1159     .impl.max_access_size = 8,
1160     .endianness = DEVICE_NATIVE_ENDIAN,
1161 };
1162 
1163 static const MemoryRegionOps gicv3_its_translation_ops = {
1164     .write_with_attrs = gicv3_its_translation_write,
1165     .valid.min_access_size = 2,
1166     .valid.max_access_size = 4,
1167     .impl.min_access_size = 2,
1168     .impl.max_access_size = 4,
1169     .endianness = DEVICE_NATIVE_ENDIAN,
1170 };
1171 
1172 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1173 {
1174     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1175     int i;
1176 
1177     for (i = 0; i < s->gicv3->num_cpu; i++) {
1178         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1179             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1180             return;
1181         }
1182     }
1183 
1184     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1185 
1186     address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
1187                        "gicv3-its-sysmem");
1188 
1189     /* set the ITS default features supported */
1190     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1191     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1192                           ITS_ITT_ENTRY_SIZE - 1);
1193     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1194     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1195     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1196     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1197 }
1198 
1199 static void gicv3_its_reset(DeviceState *dev)
1200 {
1201     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1202     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1203 
1204     c->parent_reset(dev);
1205 
1206     /* Quiescent bit reset to 1 */
1207     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1208 
1209     /*
1210      * setting GITS_BASER0.Type = 0b001 (Device)
1211      *         GITS_BASER1.Type = 0b100 (Collection Table)
1212      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1213      *         GITS_BASER<0,1>.Page_Size = 64KB
1214      * and default translation table entry size to 16 bytes
1215      */
1216     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1217                              GITS_BASER_TYPE_DEVICE);
1218     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1219                              GITS_BASER_PAGESIZE_64K);
1220     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1221                              GITS_DTE_SIZE - 1);
1222 
1223     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1224                              GITS_BASER_TYPE_COLLECTION);
1225     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1226                              GITS_BASER_PAGESIZE_64K);
1227     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1228                              GITS_CTE_SIZE - 1);
1229 }
1230 
1231 static void gicv3_its_post_load(GICv3ITSState *s)
1232 {
1233     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1234         extract_table_params(s);
1235         extract_cmdq_params(s);
1236     }
1237 }
1238 
1239 static Property gicv3_its_props[] = {
1240     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1241                      GICv3State *),
1242     DEFINE_PROP_END_OF_LIST(),
1243 };
1244 
1245 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1246 {
1247     DeviceClass *dc = DEVICE_CLASS(klass);
1248     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1249     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1250 
1251     dc->realize = gicv3_arm_its_realize;
1252     device_class_set_props(dc, gicv3_its_props);
1253     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1254     icc->post_load = gicv3_its_post_load;
1255 }
1256 
1257 static const TypeInfo gicv3_its_info = {
1258     .name = TYPE_ARM_GICV3_ITS,
1259     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1260     .instance_size = sizeof(GICv3ITSState),
1261     .class_init = gicv3_its_class_init,
1262     .class_size = sizeof(GICv3ITSClass),
1263 };
1264 
1265 static void gicv3_its_register_types(void)
1266 {
1267     type_register_static(&gicv3_its_info);
1268 }
1269 
1270 type_init(gicv3_its_register_types)
1271