xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision f0175135e74bc979573f170e83abfc536aed03de)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22 
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27 
28 struct GICv3ITSClass {
29     GICv3ITSCommonClass parent_class;
30     void (*parent_reset)(DeviceState *dev);
31 };
32 
33 /*
34  * This is an internal enum used to distinguish between LPI triggered
35  * via command queue and LPI triggered via gits_translater write.
36  */
37 typedef enum ItsCmdType {
38     NONE = 0, /* internal indication for GITS_TRANSLATER write */
39     CLEAR = 1,
40     DISCARD = 2,
41     INTERRUPT = 3,
42 } ItsCmdType;
43 
44 typedef struct DTEntry {
45     bool valid;
46     unsigned size;
47     uint64_t ittaddr;
48 } DTEntry;
49 
50 typedef struct CTEntry {
51     bool valid;
52     uint32_t rdbase;
53 } CTEntry;
54 
55 typedef struct ITEntry {
56     bool valid;
57     int inttype;
58     uint32_t intid;
59     uint32_t doorbell;
60     uint32_t icid;
61     uint32_t vpeid;
62 } ITEntry;
63 
64 typedef struct VTEntry {
65     bool valid;
66     unsigned vptsize;
67     uint32_t rdbase;
68     uint64_t vptaddr;
69 } VTEntry;
70 
71 /*
72  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
73  * if a command parameter is not correct. These include both "stall
74  * processing of the command queue" and "ignore this command, and
75  * keep processing the queue". In our implementation we choose that
76  * memory transaction errors reading the command packet provoke a
77  * stall, but errors in parameters cause us to ignore the command
78  * and continue processing.
79  * The process_* functions which handle individual ITS commands all
80  * return an ItsCmdResult which tells process_cmdq() whether it should
81  * stall, keep going because of an error, or keep going because the
82  * command was a success.
83  */
84 typedef enum ItsCmdResult {
85     CMD_STALL = 0,
86     CMD_CONTINUE = 1,
87     CMD_CONTINUE_OK = 2,
88 } ItsCmdResult;
89 
90 /* True if the ITS supports the GICv4 virtual LPI feature */
91 static bool its_feature_virtual(GICv3ITSState *s)
92 {
93     return s->typer & R_GITS_TYPER_VIRTUAL_MASK;
94 }
95 
96 static inline bool intid_in_lpi_range(uint32_t id)
97 {
98     return id >= GICV3_LPI_INTID_START &&
99         id < (1 << (GICD_TYPER_IDBITS + 1));
100 }
101 
102 static inline bool valid_doorbell(uint32_t id)
103 {
104     /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */
105     return id == INTID_SPURIOUS || intid_in_lpi_range(id);
106 }
107 
108 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
109 {
110     uint64_t result = 0;
111 
112     switch (page_sz) {
113     case GITS_PAGE_SIZE_4K:
114     case GITS_PAGE_SIZE_16K:
115         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
116         break;
117 
118     case GITS_PAGE_SIZE_64K:
119         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
120         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
121         break;
122 
123     default:
124         break;
125     }
126     return result;
127 }
128 
129 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
130                                  uint32_t idx, MemTxResult *res)
131 {
132     /*
133      * Given a TableDesc describing one of the ITS in-guest-memory
134      * tables and an index into it, return the guest address
135      * corresponding to that table entry.
136      * If there was a memory error reading the L1 table of an
137      * indirect table, *res is set accordingly, and we return -1.
138      * If the L1 table entry is marked not valid, we return -1 with
139      * *res set to MEMTX_OK.
140      *
141      * The specification defines the format of level 1 entries of a
142      * 2-level table, but the format of level 2 entries and the format
143      * of flat-mapped tables is IMPDEF.
144      */
145     AddressSpace *as = &s->gicv3->dma_as;
146     uint32_t l2idx;
147     uint64_t l2;
148     uint32_t num_l2_entries;
149 
150     *res = MEMTX_OK;
151 
152     if (!td->indirect) {
153         /* Single level table */
154         return td->base_addr + idx * td->entry_sz;
155     }
156 
157     /* Two level table */
158     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
159 
160     l2 = address_space_ldq_le(as,
161                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
162                               MEMTXATTRS_UNSPECIFIED, res);
163     if (*res != MEMTX_OK) {
164         return -1;
165     }
166     if (!(l2 & L2_TABLE_VALID_MASK)) {
167         return -1;
168     }
169 
170     num_l2_entries = td->page_sz / td->entry_sz;
171     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
172 }
173 
174 /*
175  * Read the Collection Table entry at index @icid. On success (including
176  * successfully determining that there is no valid CTE for this index),
177  * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
178  * If there is an error reading memory then we return the error code.
179  */
180 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
181 {
182     AddressSpace *as = &s->gicv3->dma_as;
183     MemTxResult res = MEMTX_OK;
184     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
185     uint64_t cteval;
186 
187     if (entry_addr == -1) {
188         /* No L2 table entry, i.e. no valid CTE, or a memory error */
189         cte->valid = false;
190         goto out;
191     }
192 
193     cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
194     if (res != MEMTX_OK) {
195         goto out;
196     }
197     cte->valid = FIELD_EX64(cteval, CTE, VALID);
198     cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
199 out:
200     if (res != MEMTX_OK) {
201         trace_gicv3_its_cte_read_fault(icid);
202     } else {
203         trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
204     }
205     return res;
206 }
207 
208 /*
209  * Update the Interrupt Table entry at index @evinted in the table specified
210  * by the dte @dte. Returns true on success, false if there was a memory
211  * access error.
212  */
213 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
214                        const ITEntry *ite)
215 {
216     AddressSpace *as = &s->gicv3->dma_as;
217     MemTxResult res = MEMTX_OK;
218     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
219     uint64_t itel = 0;
220     uint32_t iteh = 0;
221 
222     trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
223                               ite->inttype, ite->intid, ite->icid,
224                               ite->vpeid, ite->doorbell);
225 
226     if (ite->valid) {
227         itel = FIELD_DP64(itel, ITE_L, VALID, 1);
228         itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
229         itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
230         itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
231         itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
232         iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
233     }
234 
235     address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
236     if (res != MEMTX_OK) {
237         return false;
238     }
239     address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
240     return res == MEMTX_OK;
241 }
242 
243 /*
244  * Read the Interrupt Table entry at index @eventid from the table specified
245  * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
246  * struct @ite accordingly. If there is an error reading memory then we return
247  * the error code.
248  */
249 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
250                            const DTEntry *dte, ITEntry *ite)
251 {
252     AddressSpace *as = &s->gicv3->dma_as;
253     MemTxResult res = MEMTX_OK;
254     uint64_t itel;
255     uint32_t iteh;
256     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
257 
258     itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
259     if (res != MEMTX_OK) {
260         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
261         return res;
262     }
263 
264     iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
265     if (res != MEMTX_OK) {
266         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
267         return res;
268     }
269 
270     ite->valid = FIELD_EX64(itel, ITE_L, VALID);
271     ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
272     ite->intid = FIELD_EX64(itel, ITE_L, INTID);
273     ite->icid = FIELD_EX64(itel, ITE_L, ICID);
274     ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
275     ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
276     trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
277                              ite->inttype, ite->intid, ite->icid,
278                              ite->vpeid, ite->doorbell);
279     return MEMTX_OK;
280 }
281 
282 /*
283  * Read the Device Table entry at index @devid. On success (including
284  * successfully determining that there is no valid DTE for this index),
285  * we return MEMTX_OK and populate the DTEntry struct accordingly.
286  * If there is an error reading memory then we return the error code.
287  */
288 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
289 {
290     MemTxResult res = MEMTX_OK;
291     AddressSpace *as = &s->gicv3->dma_as;
292     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
293     uint64_t dteval;
294 
295     if (entry_addr == -1) {
296         /* No L2 table entry, i.e. no valid DTE, or a memory error */
297         dte->valid = false;
298         goto out;
299     }
300     dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
301     if (res != MEMTX_OK) {
302         goto out;
303     }
304     dte->valid = FIELD_EX64(dteval, DTE, VALID);
305     dte->size = FIELD_EX64(dteval, DTE, SIZE);
306     /* DTE word field stores bits [51:8] of the ITT address */
307     dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
308 out:
309     if (res != MEMTX_OK) {
310         trace_gicv3_its_dte_read_fault(devid);
311     } else {
312         trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
313     }
314     return res;
315 }
316 
317 /*
318  * Given a (DeviceID, EventID), look up the corresponding ITE, including
319  * checking for the various invalid-value cases. If we find a valid ITE,
320  * fill in @ite and @dte and return CMD_CONTINUE_OK. Otherwise return
321  * CMD_STALL or CMD_CONTINUE as appropriate (and the contents of @ite
322  * should not be relied on).
323  *
324  * The string @who is purely for the LOG_GUEST_ERROR messages,
325  * and should indicate the name of the calling function or similar.
326  */
327 static ItsCmdResult lookup_ite(GICv3ITSState *s, const char *who,
328                                uint32_t devid, uint32_t eventid, ITEntry *ite,
329                                DTEntry *dte)
330 {
331     uint64_t num_eventids;
332 
333     if (devid >= s->dt.num_entries) {
334         qemu_log_mask(LOG_GUEST_ERROR,
335                       "%s: invalid command attributes: devid %d>=%d",
336                       who, devid, s->dt.num_entries);
337         return CMD_CONTINUE;
338     }
339 
340     if (get_dte(s, devid, dte) != MEMTX_OK) {
341         return CMD_STALL;
342     }
343     if (!dte->valid) {
344         qemu_log_mask(LOG_GUEST_ERROR,
345                       "%s: invalid command attributes: "
346                       "invalid dte for %d\n", who, devid);
347         return CMD_CONTINUE;
348     }
349 
350     num_eventids = 1ULL << (dte->size + 1);
351     if (eventid >= num_eventids) {
352         qemu_log_mask(LOG_GUEST_ERROR,
353                       "%s: invalid command attributes: eventid %d >= %"
354                       PRId64 "\n", who, eventid, num_eventids);
355         return CMD_CONTINUE;
356     }
357 
358     if (get_ite(s, eventid, dte, ite) != MEMTX_OK) {
359         return CMD_STALL;
360     }
361 
362     if (!ite->valid) {
363         qemu_log_mask(LOG_GUEST_ERROR,
364                       "%s: invalid command attributes: invalid ITE\n", who);
365         return CMD_CONTINUE;
366     }
367 
368     return CMD_CONTINUE_OK;
369 }
370 
371 /*
372  * This function handles the processing of following commands based on
373  * the ItsCmdType parameter passed:-
374  * 1. triggering of lpi interrupt translation via ITS INT command
375  * 2. triggering of lpi interrupt translation via gits_translater register
376  * 3. handling of ITS CLEAR command
377  * 4. handling of ITS DISCARD command
378  */
379 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
380                                        uint32_t eventid, ItsCmdType cmd)
381 {
382     DTEntry dte;
383     CTEntry cte;
384     ITEntry ite;
385     ItsCmdResult cmdres;
386 
387     cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte);
388     if (cmdres != CMD_CONTINUE_OK) {
389         return cmdres;
390     }
391 
392     if (ite.inttype != ITE_INTTYPE_PHYSICAL) {
393         qemu_log_mask(LOG_GUEST_ERROR,
394                       "%s: invalid command attributes: invalid ITE\n",
395                       __func__);
396         return CMD_CONTINUE;
397     }
398 
399     if (ite.icid >= s->ct.num_entries) {
400         qemu_log_mask(LOG_GUEST_ERROR,
401                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
402                       __func__, ite.icid);
403         return CMD_CONTINUE;
404     }
405 
406     if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
407         return CMD_STALL;
408     }
409     if (!cte.valid) {
410         qemu_log_mask(LOG_GUEST_ERROR,
411                       "%s: invalid command attributes: invalid CTE\n",
412                       __func__);
413         return CMD_CONTINUE;
414     }
415 
416     /*
417      * Current implementation only supports rdbase == procnum
418      * Hence rdbase physical address is ignored
419      */
420     if (cte.rdbase >= s->gicv3->num_cpu) {
421         return CMD_CONTINUE;
422     }
423 
424     if ((cmd == CLEAR) || (cmd == DISCARD)) {
425         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
426     } else {
427         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
428     }
429 
430     if (cmd == DISCARD) {
431         ITEntry ite = {};
432         /* remove mapping from interrupt translation table */
433         ite.valid = false;
434         return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
435     }
436     return CMD_CONTINUE_OK;
437 }
438 
439 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
440                                     ItsCmdType cmd)
441 {
442     uint32_t devid, eventid;
443 
444     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
445     eventid = cmdpkt[1] & EVENTID_MASK;
446     switch (cmd) {
447     case INTERRUPT:
448         trace_gicv3_its_cmd_int(devid, eventid);
449         break;
450     case CLEAR:
451         trace_gicv3_its_cmd_clear(devid, eventid);
452         break;
453     case DISCARD:
454         trace_gicv3_its_cmd_discard(devid, eventid);
455         break;
456     default:
457         g_assert_not_reached();
458     }
459     return do_process_its_cmd(s, devid, eventid, cmd);
460 }
461 
462 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
463                                   bool ignore_pInt)
464 {
465     uint32_t devid, eventid;
466     uint32_t pIntid = 0;
467     uint64_t num_eventids;
468     uint16_t icid = 0;
469     DTEntry dte;
470     ITEntry ite;
471 
472     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
473     eventid = cmdpkt[1] & EVENTID_MASK;
474     icid = cmdpkt[2] & ICID_MASK;
475 
476     if (ignore_pInt) {
477         pIntid = eventid;
478         trace_gicv3_its_cmd_mapi(devid, eventid, icid);
479     } else {
480         pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
481         trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
482     }
483 
484     if (devid >= s->dt.num_entries) {
485         qemu_log_mask(LOG_GUEST_ERROR,
486                       "%s: invalid command attributes: devid %d>=%d",
487                       __func__, devid, s->dt.num_entries);
488         return CMD_CONTINUE;
489     }
490 
491     if (get_dte(s, devid, &dte) != MEMTX_OK) {
492         return CMD_STALL;
493     }
494     num_eventids = 1ULL << (dte.size + 1);
495 
496     if (icid >= s->ct.num_entries) {
497         qemu_log_mask(LOG_GUEST_ERROR,
498                       "%s: invalid ICID 0x%x >= 0x%x\n",
499                       __func__, icid, s->ct.num_entries);
500         return CMD_CONTINUE;
501     }
502 
503     if (!dte.valid) {
504         qemu_log_mask(LOG_GUEST_ERROR,
505                       "%s: no valid DTE for devid 0x%x\n", __func__, devid);
506         return CMD_CONTINUE;
507     }
508 
509     if (eventid >= num_eventids) {
510         qemu_log_mask(LOG_GUEST_ERROR,
511                       "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
512                       __func__, eventid, num_eventids);
513         return CMD_CONTINUE;
514     }
515 
516     if (!intid_in_lpi_range(pIntid)) {
517         qemu_log_mask(LOG_GUEST_ERROR,
518                       "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
519         return CMD_CONTINUE;
520     }
521 
522     /* add ite entry to interrupt translation table */
523     ite.valid = true;
524     ite.inttype = ITE_INTTYPE_PHYSICAL;
525     ite.intid = pIntid;
526     ite.icid = icid;
527     ite.doorbell = INTID_SPURIOUS;
528     ite.vpeid = 0;
529     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
530 }
531 
532 static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt,
533                                    bool ignore_vintid)
534 {
535     uint32_t devid, eventid, vintid, doorbell, vpeid;
536     uint32_t num_eventids;
537     DTEntry dte;
538     ITEntry ite;
539 
540     if (!its_feature_virtual(s)) {
541         return CMD_CONTINUE;
542     }
543 
544     devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID);
545     eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID);
546     vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID);
547     doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL);
548     if (ignore_vintid) {
549         vintid = eventid;
550         trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell);
551     } else {
552         vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID);
553         trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell);
554     }
555 
556     if (devid >= s->dt.num_entries) {
557         qemu_log_mask(LOG_GUEST_ERROR,
558                       "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n",
559                       __func__, devid, s->dt.num_entries);
560         return CMD_CONTINUE;
561     }
562 
563     if (get_dte(s, devid, &dte) != MEMTX_OK) {
564         return CMD_STALL;
565     }
566 
567     if (!dte.valid) {
568         qemu_log_mask(LOG_GUEST_ERROR,
569                       "%s: no entry in device table for DeviceID 0x%x\n",
570                       __func__, devid);
571         return CMD_CONTINUE;
572     }
573 
574     num_eventids = 1ULL << (dte.size + 1);
575 
576     if (eventid >= num_eventids) {
577         qemu_log_mask(LOG_GUEST_ERROR,
578                       "%s: EventID 0x%x too large for DeviceID 0x%x "
579                       "(must be less than 0x%x)\n",
580                       __func__, eventid, devid, num_eventids);
581         return CMD_CONTINUE;
582     }
583     if (!intid_in_lpi_range(vintid)) {
584         qemu_log_mask(LOG_GUEST_ERROR,
585                       "%s: VIntID 0x%x not a valid LPI\n",
586                       __func__, vintid);
587         return CMD_CONTINUE;
588     }
589     if (!valid_doorbell(doorbell)) {
590         qemu_log_mask(LOG_GUEST_ERROR,
591                       "%s: Doorbell %d not 1023 and not a valid LPI\n",
592                       __func__, doorbell);
593         return CMD_CONTINUE;
594     }
595     if (vpeid >= s->vpet.num_entries) {
596         qemu_log_mask(LOG_GUEST_ERROR,
597                       "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
598                       __func__, vpeid, s->vpet.num_entries);
599         return CMD_CONTINUE;
600     }
601     /* add ite entry to interrupt translation table */
602     ite.valid = true;
603     ite.inttype = ITE_INTTYPE_VIRTUAL;
604     ite.intid = vintid;
605     ite.icid = 0;
606     ite.doorbell = doorbell;
607     ite.vpeid = vpeid;
608     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
609 }
610 
611 /*
612  * Update the Collection Table entry for @icid to @cte. Returns true
613  * on success, false if there was a memory access error.
614  */
615 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
616 {
617     AddressSpace *as = &s->gicv3->dma_as;
618     uint64_t entry_addr;
619     uint64_t cteval = 0;
620     MemTxResult res = MEMTX_OK;
621 
622     trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
623 
624     if (cte->valid) {
625         /* add mapping entry to collection table */
626         cteval = FIELD_DP64(cteval, CTE, VALID, 1);
627         cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
628     }
629 
630     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
631     if (res != MEMTX_OK) {
632         /* memory access error: stall */
633         return false;
634     }
635     if (entry_addr == -1) {
636         /* No L2 table for this index: discard write and continue */
637         return true;
638     }
639 
640     address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
641     return res == MEMTX_OK;
642 }
643 
644 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
645 {
646     uint16_t icid;
647     CTEntry cte;
648 
649     icid = cmdpkt[2] & ICID_MASK;
650     cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
651     if (cte.valid) {
652         cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
653         cte.rdbase &= RDBASE_PROCNUM_MASK;
654     } else {
655         cte.rdbase = 0;
656     }
657     trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
658 
659     if (icid >= s->ct.num_entries) {
660         qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
661         return CMD_CONTINUE;
662     }
663     if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
664         qemu_log_mask(LOG_GUEST_ERROR,
665                       "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
666         return CMD_CONTINUE;
667     }
668 
669     return update_cte(s, icid, &cte) ? CMD_CONTINUE_OK : CMD_STALL;
670 }
671 
672 /*
673  * Update the Device Table entry for @devid to @dte. Returns true
674  * on success, false if there was a memory access error.
675  */
676 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
677 {
678     AddressSpace *as = &s->gicv3->dma_as;
679     uint64_t entry_addr;
680     uint64_t dteval = 0;
681     MemTxResult res = MEMTX_OK;
682 
683     trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
684 
685     if (dte->valid) {
686         /* add mapping entry to device table */
687         dteval = FIELD_DP64(dteval, DTE, VALID, 1);
688         dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
689         dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
690     }
691 
692     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
693     if (res != MEMTX_OK) {
694         /* memory access error: stall */
695         return false;
696     }
697     if (entry_addr == -1) {
698         /* No L2 table for this index: discard write and continue */
699         return true;
700     }
701     address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
702     return res == MEMTX_OK;
703 }
704 
705 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
706 {
707     uint32_t devid;
708     DTEntry dte;
709 
710     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
711     dte.size = cmdpkt[1] & SIZE_MASK;
712     dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
713     dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
714 
715     trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
716 
717     if (devid >= s->dt.num_entries) {
718         qemu_log_mask(LOG_GUEST_ERROR,
719                       "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
720                       devid, s->dt.num_entries);
721         return CMD_CONTINUE;
722     }
723 
724     if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
725         qemu_log_mask(LOG_GUEST_ERROR,
726                       "ITS MAPD: invalid size %d\n", dte.size);
727         return CMD_CONTINUE;
728     }
729 
730     return update_dte(s, devid, &dte) ? CMD_CONTINUE_OK : CMD_STALL;
731 }
732 
733 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
734 {
735     uint64_t rd1, rd2;
736 
737     rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
738     rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
739 
740     trace_gicv3_its_cmd_movall(rd1, rd2);
741 
742     if (rd1 >= s->gicv3->num_cpu) {
743         qemu_log_mask(LOG_GUEST_ERROR,
744                       "%s: RDBASE1 %" PRId64
745                       " out of range (must be less than %d)\n",
746                       __func__, rd1, s->gicv3->num_cpu);
747         return CMD_CONTINUE;
748     }
749     if (rd2 >= s->gicv3->num_cpu) {
750         qemu_log_mask(LOG_GUEST_ERROR,
751                       "%s: RDBASE2 %" PRId64
752                       " out of range (must be less than %d)\n",
753                       __func__, rd2, s->gicv3->num_cpu);
754         return CMD_CONTINUE;
755     }
756 
757     if (rd1 == rd2) {
758         /* Move to same target must succeed as a no-op */
759         return CMD_CONTINUE_OK;
760     }
761 
762     /* Move all pending LPIs from redistributor 1 to redistributor 2 */
763     gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
764 
765     return CMD_CONTINUE_OK;
766 }
767 
768 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
769 {
770     uint32_t devid, eventid;
771     uint16_t new_icid;
772     DTEntry dte;
773     CTEntry old_cte, new_cte;
774     ITEntry old_ite;
775     ItsCmdResult cmdres;
776 
777     devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
778     eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
779     new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
780 
781     trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
782 
783     cmdres = lookup_ite(s, __func__, devid, eventid, &old_ite, &dte);
784     if (cmdres != CMD_CONTINUE_OK) {
785         return cmdres;
786     }
787 
788     if (old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
789         qemu_log_mask(LOG_GUEST_ERROR,
790                       "%s: invalid command attributes: invalid ITE\n",
791                       __func__);
792         return CMD_CONTINUE;
793     }
794 
795     if (old_ite.icid >= s->ct.num_entries) {
796         qemu_log_mask(LOG_GUEST_ERROR,
797                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
798                       __func__, old_ite.icid);
799         return CMD_CONTINUE;
800     }
801 
802     if (new_icid >= s->ct.num_entries) {
803         qemu_log_mask(LOG_GUEST_ERROR,
804                       "%s: invalid command attributes: ICID 0x%x\n",
805                       __func__, new_icid);
806         return CMD_CONTINUE;
807     }
808 
809     if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
810         return CMD_STALL;
811     }
812     if (!old_cte.valid) {
813         qemu_log_mask(LOG_GUEST_ERROR,
814                       "%s: invalid command attributes: "
815                       "invalid CTE for old ICID 0x%x\n",
816                       __func__, old_ite.icid);
817         return CMD_CONTINUE;
818     }
819 
820     if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
821         return CMD_STALL;
822     }
823     if (!new_cte.valid) {
824         qemu_log_mask(LOG_GUEST_ERROR,
825                       "%s: invalid command attributes: "
826                       "invalid CTE for new ICID 0x%x\n",
827                       __func__, new_icid);
828         return CMD_CONTINUE;
829     }
830 
831     if (old_cte.rdbase >= s->gicv3->num_cpu) {
832         qemu_log_mask(LOG_GUEST_ERROR,
833                       "%s: CTE has invalid rdbase 0x%x\n",
834                       __func__, old_cte.rdbase);
835         return CMD_CONTINUE;
836     }
837 
838     if (new_cte.rdbase >= s->gicv3->num_cpu) {
839         qemu_log_mask(LOG_GUEST_ERROR,
840                       "%s: CTE has invalid rdbase 0x%x\n",
841                       __func__, new_cte.rdbase);
842         return CMD_CONTINUE;
843     }
844 
845     if (old_cte.rdbase != new_cte.rdbase) {
846         /* Move the LPI from the old redistributor to the new one */
847         gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
848                              &s->gicv3->cpu[new_cte.rdbase],
849                              old_ite.intid);
850     }
851 
852     /* Update the ICID field in the interrupt translation table entry */
853     old_ite.icid = new_icid;
854     return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE_OK : CMD_STALL;
855 }
856 
857 /*
858  * Update the vPE Table entry at index @vpeid with the entry @vte.
859  * Returns true on success, false if there was a memory access error.
860  */
861 static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte)
862 {
863     AddressSpace *as = &s->gicv3->dma_as;
864     uint64_t entry_addr;
865     uint64_t vteval = 0;
866     MemTxResult res = MEMTX_OK;
867 
868     trace_gicv3_its_vte_write(vpeid, vte->valid, vte->vptsize, vte->vptaddr,
869                               vte->rdbase);
870 
871     if (vte->valid) {
872         vteval = FIELD_DP64(vteval, VTE, VALID, 1);
873         vteval = FIELD_DP64(vteval, VTE, VPTSIZE, vte->vptsize);
874         vteval = FIELD_DP64(vteval, VTE, VPTADDR, vte->vptaddr);
875         vteval = FIELD_DP64(vteval, VTE, RDBASE, vte->rdbase);
876     }
877 
878     entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res);
879     if (res != MEMTX_OK) {
880         return false;
881     }
882     if (entry_addr == -1) {
883         /* No L2 table for this index: discard write and continue */
884         return true;
885     }
886     address_space_stq_le(as, entry_addr, vteval, MEMTXATTRS_UNSPECIFIED, &res);
887     return res == MEMTX_OK;
888 }
889 
890 static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt)
891 {
892     VTEntry vte;
893     uint32_t vpeid;
894 
895     if (!its_feature_virtual(s)) {
896         return CMD_CONTINUE;
897     }
898 
899     vpeid = FIELD_EX64(cmdpkt[1], VMAPP_1, VPEID);
900     vte.rdbase = FIELD_EX64(cmdpkt[2], VMAPP_2, RDBASE);
901     vte.valid = FIELD_EX64(cmdpkt[2], VMAPP_2, V);
902     vte.vptsize = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTSIZE);
903     vte.vptaddr = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTADDR);
904 
905     trace_gicv3_its_cmd_vmapp(vpeid, vte.rdbase, vte.valid,
906                               vte.vptaddr, vte.vptsize);
907 
908     /*
909      * For GICv4.0 the VPT_size field is only 5 bits, whereas we
910      * define our field macros to include the full GICv4.1 8 bits.
911      * The range check on VPT_size will catch the cases where
912      * the guest set the RES0-in-GICv4.0 bits [7:6].
913      */
914     if (vte.vptsize > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
915         qemu_log_mask(LOG_GUEST_ERROR,
916                       "%s: invalid VPT_size 0x%x\n", __func__, vte.vptsize);
917         return CMD_CONTINUE;
918     }
919 
920     if (vte.valid && vte.rdbase >= s->gicv3->num_cpu) {
921         qemu_log_mask(LOG_GUEST_ERROR,
922                       "%s: invalid rdbase 0x%x\n", __func__, vte.rdbase);
923         return CMD_CONTINUE;
924     }
925 
926     if (vpeid >= s->vpet.num_entries) {
927         qemu_log_mask(LOG_GUEST_ERROR,
928                       "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
929                       __func__, vpeid, s->vpet.num_entries);
930         return CMD_CONTINUE;
931     }
932 
933     return update_vte(s, vpeid, &vte) ? CMD_CONTINUE_OK : CMD_STALL;
934 }
935 
936 /*
937  * Current implementation blocks until all
938  * commands are processed
939  */
940 static void process_cmdq(GICv3ITSState *s)
941 {
942     uint32_t wr_offset = 0;
943     uint32_t rd_offset = 0;
944     uint32_t cq_offset = 0;
945     AddressSpace *as = &s->gicv3->dma_as;
946     uint8_t cmd;
947     int i;
948 
949     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
950         return;
951     }
952 
953     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
954 
955     if (wr_offset >= s->cq.num_entries) {
956         qemu_log_mask(LOG_GUEST_ERROR,
957                       "%s: invalid write offset "
958                       "%d\n", __func__, wr_offset);
959         return;
960     }
961 
962     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
963 
964     if (rd_offset >= s->cq.num_entries) {
965         qemu_log_mask(LOG_GUEST_ERROR,
966                       "%s: invalid read offset "
967                       "%d\n", __func__, rd_offset);
968         return;
969     }
970 
971     while (wr_offset != rd_offset) {
972         ItsCmdResult result = CMD_CONTINUE_OK;
973         void *hostmem;
974         hwaddr buflen;
975         uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
976 
977         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
978 
979         buflen = GITS_CMDQ_ENTRY_SIZE;
980         hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
981                                     &buflen, false, MEMTXATTRS_UNSPECIFIED);
982         if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
983             if (hostmem) {
984                 address_space_unmap(as, hostmem, buflen, false, 0);
985             }
986             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
987             qemu_log_mask(LOG_GUEST_ERROR,
988                           "%s: could not read command at 0x%" PRIx64 "\n",
989                           __func__, s->cq.base_addr + cq_offset);
990             break;
991         }
992         for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
993             cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
994         }
995         address_space_unmap(as, hostmem, buflen, false, 0);
996 
997         cmd = cmdpkt[0] & CMD_MASK;
998 
999         trace_gicv3_its_process_command(rd_offset, cmd);
1000 
1001         switch (cmd) {
1002         case GITS_CMD_INT:
1003             result = process_its_cmd(s, cmdpkt, INTERRUPT);
1004             break;
1005         case GITS_CMD_CLEAR:
1006             result = process_its_cmd(s, cmdpkt, CLEAR);
1007             break;
1008         case GITS_CMD_SYNC:
1009             /*
1010              * Current implementation makes a blocking synchronous call
1011              * for every command issued earlier, hence the internal state
1012              * is already consistent by the time SYNC command is executed.
1013              * Hence no further processing is required for SYNC command.
1014              */
1015             trace_gicv3_its_cmd_sync();
1016             break;
1017         case GITS_CMD_MAPD:
1018             result = process_mapd(s, cmdpkt);
1019             break;
1020         case GITS_CMD_MAPC:
1021             result = process_mapc(s, cmdpkt);
1022             break;
1023         case GITS_CMD_MAPTI:
1024             result = process_mapti(s, cmdpkt, false);
1025             break;
1026         case GITS_CMD_MAPI:
1027             result = process_mapti(s, cmdpkt, true);
1028             break;
1029         case GITS_CMD_DISCARD:
1030             result = process_its_cmd(s, cmdpkt, DISCARD);
1031             break;
1032         case GITS_CMD_INV:
1033         case GITS_CMD_INVALL:
1034             /*
1035              * Current implementation doesn't cache any ITS tables,
1036              * but the calculated lpi priority information. We only
1037              * need to trigger lpi priority re-calculation to be in
1038              * sync with LPI config table or pending table changes.
1039              */
1040             trace_gicv3_its_cmd_inv();
1041             for (i = 0; i < s->gicv3->num_cpu; i++) {
1042                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
1043             }
1044             break;
1045         case GITS_CMD_MOVI:
1046             result = process_movi(s, cmdpkt);
1047             break;
1048         case GITS_CMD_MOVALL:
1049             result = process_movall(s, cmdpkt);
1050             break;
1051         case GITS_CMD_VMAPTI:
1052             result = process_vmapti(s, cmdpkt, false);
1053             break;
1054         case GITS_CMD_VMAPI:
1055             result = process_vmapti(s, cmdpkt, true);
1056             break;
1057         case GITS_CMD_VMAPP:
1058             result = process_vmapp(s, cmdpkt);
1059             break;
1060         default:
1061             trace_gicv3_its_cmd_unknown(cmd);
1062             break;
1063         }
1064         if (result != CMD_STALL) {
1065             /* CMD_CONTINUE or CMD_CONTINUE_OK */
1066             rd_offset++;
1067             rd_offset %= s->cq.num_entries;
1068             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
1069         } else {
1070             /* CMD_STALL */
1071             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
1072             qemu_log_mask(LOG_GUEST_ERROR,
1073                           "%s: 0x%x cmd processing failed, stalling\n",
1074                           __func__, cmd);
1075             break;
1076         }
1077     }
1078 }
1079 
1080 /*
1081  * This function extracts the ITS Device and Collection table specific
1082  * parameters (like base_addr, size etc) from GITS_BASER register.
1083  * It is called during ITS enable and also during post_load migration
1084  */
1085 static void extract_table_params(GICv3ITSState *s)
1086 {
1087     uint16_t num_pages = 0;
1088     uint8_t  page_sz_type;
1089     uint8_t type;
1090     uint32_t page_sz = 0;
1091     uint64_t value;
1092 
1093     for (int i = 0; i < 8; i++) {
1094         TableDesc *td;
1095         int idbits;
1096 
1097         value = s->baser[i];
1098 
1099         if (!value) {
1100             continue;
1101         }
1102 
1103         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
1104 
1105         switch (page_sz_type) {
1106         case 0:
1107             page_sz = GITS_PAGE_SIZE_4K;
1108             break;
1109 
1110         case 1:
1111             page_sz = GITS_PAGE_SIZE_16K;
1112             break;
1113 
1114         case 2:
1115         case 3:
1116             page_sz = GITS_PAGE_SIZE_64K;
1117             break;
1118 
1119         default:
1120             g_assert_not_reached();
1121         }
1122 
1123         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
1124 
1125         type = FIELD_EX64(value, GITS_BASER, TYPE);
1126 
1127         switch (type) {
1128         case GITS_BASER_TYPE_DEVICE:
1129             td = &s->dt;
1130             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
1131             break;
1132         case GITS_BASER_TYPE_COLLECTION:
1133             td = &s->ct;
1134             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
1135                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
1136             } else {
1137                 /* 16-bit CollectionId supported when CIL == 0 */
1138                 idbits = 16;
1139             }
1140             break;
1141         case GITS_BASER_TYPE_VPE:
1142             td = &s->vpet;
1143             /*
1144              * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an
1145              * implementation to implement fewer bits and report this
1146              * via GICD_TYPER2.)
1147              */
1148             idbits = 16;
1149             break;
1150         default:
1151             /*
1152              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
1153              * ensures we will only see type values corresponding to
1154              * the values set up in gicv3_its_reset().
1155              */
1156             g_assert_not_reached();
1157         }
1158 
1159         memset(td, 0, sizeof(*td));
1160         /*
1161          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
1162          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
1163          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
1164          * for the register corresponding to the Collection table but we
1165          * still have to process interrupts using non-memory-backed
1166          * Collection table entries.)
1167          * The specification makes it UNPREDICTABLE to enable the ITS without
1168          * marking each BASER<n> as valid. We choose to handle these as if
1169          * the table was zero-sized, so commands using the table will fail
1170          * and interrupts requested via GITS_TRANSLATER writes will be ignored.
1171          * This happens automatically by leaving the num_entries field at
1172          * zero, which will be caught by the bounds checks we have before
1173          * every table lookup anyway.
1174          */
1175         if (!FIELD_EX64(value, GITS_BASER, VALID)) {
1176             continue;
1177         }
1178         td->page_sz = page_sz;
1179         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
1180         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
1181         td->base_addr = baser_base_addr(value, page_sz);
1182         if (!td->indirect) {
1183             td->num_entries = (num_pages * page_sz) / td->entry_sz;
1184         } else {
1185             td->num_entries = (((num_pages * page_sz) /
1186                                   L1TABLE_ENTRY_SIZE) *
1187                                  (page_sz / td->entry_sz));
1188         }
1189         td->num_entries = MIN(td->num_entries, 1ULL << idbits);
1190     }
1191 }
1192 
1193 static void extract_cmdq_params(GICv3ITSState *s)
1194 {
1195     uint16_t num_pages = 0;
1196     uint64_t value = s->cbaser;
1197 
1198     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
1199 
1200     memset(&s->cq, 0 , sizeof(s->cq));
1201 
1202     if (FIELD_EX64(value, GITS_CBASER, VALID)) {
1203         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
1204                              GITS_CMDQ_ENTRY_SIZE;
1205         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
1206         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
1207     }
1208 }
1209 
1210 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
1211                                               uint64_t *data, unsigned size,
1212                                               MemTxAttrs attrs)
1213 {
1214     /*
1215      * GITS_TRANSLATER is write-only, and all other addresses
1216      * in the interrupt translation space frame are RES0.
1217      */
1218     *data = 0;
1219     return MEMTX_OK;
1220 }
1221 
1222 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
1223                                                uint64_t data, unsigned size,
1224                                                MemTxAttrs attrs)
1225 {
1226     GICv3ITSState *s = (GICv3ITSState *)opaque;
1227     bool result = true;
1228 
1229     trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
1230 
1231     switch (offset) {
1232     case GITS_TRANSLATER:
1233         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1234             result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1235         }
1236         break;
1237     default:
1238         break;
1239     }
1240 
1241     if (result) {
1242         return MEMTX_OK;
1243     } else {
1244         return MEMTX_ERROR;
1245     }
1246 }
1247 
1248 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1249                               uint64_t value, MemTxAttrs attrs)
1250 {
1251     bool result = true;
1252     int index;
1253 
1254     switch (offset) {
1255     case GITS_CTLR:
1256         if (value & R_GITS_CTLR_ENABLED_MASK) {
1257             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1258             extract_table_params(s);
1259             extract_cmdq_params(s);
1260             process_cmdq(s);
1261         } else {
1262             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1263         }
1264         break;
1265     case GITS_CBASER:
1266         /*
1267          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1268          *                 already enabled
1269          */
1270         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1271             s->cbaser = deposit64(s->cbaser, 0, 32, value);
1272             s->creadr = 0;
1273         }
1274         break;
1275     case GITS_CBASER + 4:
1276         /*
1277          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1278          *                 already enabled
1279          */
1280         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1281             s->cbaser = deposit64(s->cbaser, 32, 32, value);
1282             s->creadr = 0;
1283         }
1284         break;
1285     case GITS_CWRITER:
1286         s->cwriter = deposit64(s->cwriter, 0, 32,
1287                                (value & ~R_GITS_CWRITER_RETRY_MASK));
1288         if (s->cwriter != s->creadr) {
1289             process_cmdq(s);
1290         }
1291         break;
1292     case GITS_CWRITER + 4:
1293         s->cwriter = deposit64(s->cwriter, 32, 32, value);
1294         break;
1295     case GITS_CREADR:
1296         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1297             s->creadr = deposit64(s->creadr, 0, 32,
1298                                   (value & ~R_GITS_CREADR_STALLED_MASK));
1299         } else {
1300             /* RO register, ignore the write */
1301             qemu_log_mask(LOG_GUEST_ERROR,
1302                           "%s: invalid guest write to RO register at offset "
1303                           TARGET_FMT_plx "\n", __func__, offset);
1304         }
1305         break;
1306     case GITS_CREADR + 4:
1307         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1308             s->creadr = deposit64(s->creadr, 32, 32, value);
1309         } else {
1310             /* RO register, ignore the write */
1311             qemu_log_mask(LOG_GUEST_ERROR,
1312                           "%s: invalid guest write to RO register at offset "
1313                           TARGET_FMT_plx "\n", __func__, offset);
1314         }
1315         break;
1316     case GITS_BASER ... GITS_BASER + 0x3f:
1317         /*
1318          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1319          *                 already enabled
1320          */
1321         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1322             index = (offset - GITS_BASER) / 8;
1323 
1324             if (s->baser[index] == 0) {
1325                 /* Unimplemented GITS_BASERn: RAZ/WI */
1326                 break;
1327             }
1328             if (offset & 7) {
1329                 value <<= 32;
1330                 value &= ~GITS_BASER_RO_MASK;
1331                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1332                 s->baser[index] |= value;
1333             } else {
1334                 value &= ~GITS_BASER_RO_MASK;
1335                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1336                 s->baser[index] |= value;
1337             }
1338         }
1339         break;
1340     case GITS_IIDR:
1341     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1342         /* RO registers, ignore the write */
1343         qemu_log_mask(LOG_GUEST_ERROR,
1344                       "%s: invalid guest write to RO register at offset "
1345                       TARGET_FMT_plx "\n", __func__, offset);
1346         break;
1347     default:
1348         result = false;
1349         break;
1350     }
1351     return result;
1352 }
1353 
1354 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1355                              uint64_t *data, MemTxAttrs attrs)
1356 {
1357     bool result = true;
1358     int index;
1359 
1360     switch (offset) {
1361     case GITS_CTLR:
1362         *data = s->ctlr;
1363         break;
1364     case GITS_IIDR:
1365         *data = gicv3_iidr();
1366         break;
1367     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1368         /* ID registers */
1369         *data = gicv3_idreg(offset - GITS_IDREGS, GICV3_PIDR0_ITS);
1370         break;
1371     case GITS_TYPER:
1372         *data = extract64(s->typer, 0, 32);
1373         break;
1374     case GITS_TYPER + 4:
1375         *data = extract64(s->typer, 32, 32);
1376         break;
1377     case GITS_CBASER:
1378         *data = extract64(s->cbaser, 0, 32);
1379         break;
1380     case GITS_CBASER + 4:
1381         *data = extract64(s->cbaser, 32, 32);
1382         break;
1383     case GITS_CREADR:
1384         *data = extract64(s->creadr, 0, 32);
1385         break;
1386     case GITS_CREADR + 4:
1387         *data = extract64(s->creadr, 32, 32);
1388         break;
1389     case GITS_CWRITER:
1390         *data = extract64(s->cwriter, 0, 32);
1391         break;
1392     case GITS_CWRITER + 4:
1393         *data = extract64(s->cwriter, 32, 32);
1394         break;
1395     case GITS_BASER ... GITS_BASER + 0x3f:
1396         index = (offset - GITS_BASER) / 8;
1397         if (offset & 7) {
1398             *data = extract64(s->baser[index], 32, 32);
1399         } else {
1400             *data = extract64(s->baser[index], 0, 32);
1401         }
1402         break;
1403     default:
1404         result = false;
1405         break;
1406     }
1407     return result;
1408 }
1409 
1410 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1411                                uint64_t value, MemTxAttrs attrs)
1412 {
1413     bool result = true;
1414     int index;
1415 
1416     switch (offset) {
1417     case GITS_BASER ... GITS_BASER + 0x3f:
1418         /*
1419          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1420          *                 already enabled
1421          */
1422         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1423             index = (offset - GITS_BASER) / 8;
1424             if (s->baser[index] == 0) {
1425                 /* Unimplemented GITS_BASERn: RAZ/WI */
1426                 break;
1427             }
1428             s->baser[index] &= GITS_BASER_RO_MASK;
1429             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1430         }
1431         break;
1432     case GITS_CBASER:
1433         /*
1434          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1435          *                 already enabled
1436          */
1437         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1438             s->cbaser = value;
1439             s->creadr = 0;
1440         }
1441         break;
1442     case GITS_CWRITER:
1443         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1444         if (s->cwriter != s->creadr) {
1445             process_cmdq(s);
1446         }
1447         break;
1448     case GITS_CREADR:
1449         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1450             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1451         } else {
1452             /* RO register, ignore the write */
1453             qemu_log_mask(LOG_GUEST_ERROR,
1454                           "%s: invalid guest write to RO register at offset "
1455                           TARGET_FMT_plx "\n", __func__, offset);
1456         }
1457         break;
1458     case GITS_TYPER:
1459         /* RO registers, ignore the write */
1460         qemu_log_mask(LOG_GUEST_ERROR,
1461                       "%s: invalid guest write to RO register at offset "
1462                       TARGET_FMT_plx "\n", __func__, offset);
1463         break;
1464     default:
1465         result = false;
1466         break;
1467     }
1468     return result;
1469 }
1470 
1471 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1472                               uint64_t *data, MemTxAttrs attrs)
1473 {
1474     bool result = true;
1475     int index;
1476 
1477     switch (offset) {
1478     case GITS_TYPER:
1479         *data = s->typer;
1480         break;
1481     case GITS_BASER ... GITS_BASER + 0x3f:
1482         index = (offset - GITS_BASER) / 8;
1483         *data = s->baser[index];
1484         break;
1485     case GITS_CBASER:
1486         *data = s->cbaser;
1487         break;
1488     case GITS_CREADR:
1489         *data = s->creadr;
1490         break;
1491     case GITS_CWRITER:
1492         *data = s->cwriter;
1493         break;
1494     default:
1495         result = false;
1496         break;
1497     }
1498     return result;
1499 }
1500 
1501 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1502                                   unsigned size, MemTxAttrs attrs)
1503 {
1504     GICv3ITSState *s = (GICv3ITSState *)opaque;
1505     bool result;
1506 
1507     switch (size) {
1508     case 4:
1509         result = its_readl(s, offset, data, attrs);
1510         break;
1511     case 8:
1512         result = its_readll(s, offset, data, attrs);
1513         break;
1514     default:
1515         result = false;
1516         break;
1517     }
1518 
1519     if (!result) {
1520         qemu_log_mask(LOG_GUEST_ERROR,
1521                       "%s: invalid guest read at offset " TARGET_FMT_plx
1522                       " size %u\n", __func__, offset, size);
1523         trace_gicv3_its_badread(offset, size);
1524         /*
1525          * The spec requires that reserved registers are RAZ/WI;
1526          * so use false returns from leaf functions as a way to
1527          * trigger the guest-error logging but don't return it to
1528          * the caller, or we'll cause a spurious guest data abort.
1529          */
1530         *data = 0;
1531     } else {
1532         trace_gicv3_its_read(offset, *data, size);
1533     }
1534     return MEMTX_OK;
1535 }
1536 
1537 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1538                                    unsigned size, MemTxAttrs attrs)
1539 {
1540     GICv3ITSState *s = (GICv3ITSState *)opaque;
1541     bool result;
1542 
1543     switch (size) {
1544     case 4:
1545         result = its_writel(s, offset, data, attrs);
1546         break;
1547     case 8:
1548         result = its_writell(s, offset, data, attrs);
1549         break;
1550     default:
1551         result = false;
1552         break;
1553     }
1554 
1555     if (!result) {
1556         qemu_log_mask(LOG_GUEST_ERROR,
1557                       "%s: invalid guest write at offset " TARGET_FMT_plx
1558                       " size %u\n", __func__, offset, size);
1559         trace_gicv3_its_badwrite(offset, data, size);
1560         /*
1561          * The spec requires that reserved registers are RAZ/WI;
1562          * so use false returns from leaf functions as a way to
1563          * trigger the guest-error logging but don't return it to
1564          * the caller, or we'll cause a spurious guest data abort.
1565          */
1566     } else {
1567         trace_gicv3_its_write(offset, data, size);
1568     }
1569     return MEMTX_OK;
1570 }
1571 
1572 static const MemoryRegionOps gicv3_its_control_ops = {
1573     .read_with_attrs = gicv3_its_read,
1574     .write_with_attrs = gicv3_its_write,
1575     .valid.min_access_size = 4,
1576     .valid.max_access_size = 8,
1577     .impl.min_access_size = 4,
1578     .impl.max_access_size = 8,
1579     .endianness = DEVICE_NATIVE_ENDIAN,
1580 };
1581 
1582 static const MemoryRegionOps gicv3_its_translation_ops = {
1583     .read_with_attrs = gicv3_its_translation_read,
1584     .write_with_attrs = gicv3_its_translation_write,
1585     .valid.min_access_size = 2,
1586     .valid.max_access_size = 4,
1587     .impl.min_access_size = 2,
1588     .impl.max_access_size = 4,
1589     .endianness = DEVICE_NATIVE_ENDIAN,
1590 };
1591 
1592 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1593 {
1594     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1595     int i;
1596 
1597     for (i = 0; i < s->gicv3->num_cpu; i++) {
1598         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1599             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1600             return;
1601         }
1602     }
1603 
1604     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1605 
1606     /* set the ITS default features supported */
1607     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1608     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1609                           ITS_ITT_ENTRY_SIZE - 1);
1610     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1611     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1612     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1613     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1614 }
1615 
1616 static void gicv3_its_reset(DeviceState *dev)
1617 {
1618     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1619     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1620 
1621     c->parent_reset(dev);
1622 
1623     /* Quiescent bit reset to 1 */
1624     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1625 
1626     /*
1627      * setting GITS_BASER0.Type = 0b001 (Device)
1628      *         GITS_BASER1.Type = 0b100 (Collection Table)
1629      *         GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later
1630      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1631      *         GITS_BASER<0,1>.Page_Size = 64KB
1632      * and default translation table entry size to 16 bytes
1633      */
1634     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1635                              GITS_BASER_TYPE_DEVICE);
1636     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1637                              GITS_BASER_PAGESIZE_64K);
1638     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1639                              GITS_DTE_SIZE - 1);
1640 
1641     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1642                              GITS_BASER_TYPE_COLLECTION);
1643     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1644                              GITS_BASER_PAGESIZE_64K);
1645     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1646                              GITS_CTE_SIZE - 1);
1647 
1648     if (its_feature_virtual(s)) {
1649         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE,
1650                                  GITS_BASER_TYPE_VPE);
1651         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE,
1652                                  GITS_BASER_PAGESIZE_64K);
1653         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE,
1654                                  GITS_VPE_SIZE - 1);
1655     }
1656 }
1657 
1658 static void gicv3_its_post_load(GICv3ITSState *s)
1659 {
1660     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1661         extract_table_params(s);
1662         extract_cmdq_params(s);
1663     }
1664 }
1665 
1666 static Property gicv3_its_props[] = {
1667     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1668                      GICv3State *),
1669     DEFINE_PROP_END_OF_LIST(),
1670 };
1671 
1672 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1673 {
1674     DeviceClass *dc = DEVICE_CLASS(klass);
1675     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1676     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1677 
1678     dc->realize = gicv3_arm_its_realize;
1679     device_class_set_props(dc, gicv3_its_props);
1680     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1681     icc->post_load = gicv3_its_post_load;
1682 }
1683 
1684 static const TypeInfo gicv3_its_info = {
1685     .name = TYPE_ARM_GICV3_ITS,
1686     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1687     .instance_size = sizeof(GICv3ITSState),
1688     .class_init = gicv3_its_class_init,
1689     .class_size = sizeof(GICv3ITSClass),
1690 };
1691 
1692 static void gicv3_its_register_types(void)
1693 {
1694     type_register_static(&gicv3_its_info);
1695 }
1696 
1697 type_init(gicv3_its_register_types)
1698