xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision c411db7bf75d0a2ecd7249533c74babf2af51afe)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22 
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27 
28 struct GICv3ITSClass {
29     GICv3ITSCommonClass parent_class;
30     void (*parent_reset)(DeviceState *dev);
31 };
32 
33 /*
34  * This is an internal enum used to distinguish between LPI triggered
35  * via command queue and LPI triggered via gits_translater write.
36  */
37 typedef enum ItsCmdType {
38     NONE = 0, /* internal indication for GITS_TRANSLATER write */
39     CLEAR = 1,
40     DISCARD = 2,
41     INTERRUPT = 3,
42 } ItsCmdType;
43 
44 typedef struct DTEntry {
45     bool valid;
46     unsigned size;
47     uint64_t ittaddr;
48 } DTEntry;
49 
50 typedef struct CTEntry {
51     bool valid;
52     uint32_t rdbase;
53 } CTEntry;
54 
55 typedef struct ITEntry {
56     bool valid;
57     int inttype;
58     uint32_t intid;
59     uint32_t doorbell;
60     uint32_t icid;
61     uint32_t vpeid;
62 } ITEntry;
63 
64 typedef struct VTEntry {
65     bool valid;
66     unsigned vptsize;
67     uint32_t rdbase;
68     uint64_t vptaddr;
69 } VTEntry;
70 
71 /*
72  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
73  * if a command parameter is not correct. These include both "stall
74  * processing of the command queue" and "ignore this command, and
75  * keep processing the queue". In our implementation we choose that
76  * memory transaction errors reading the command packet provoke a
77  * stall, but errors in parameters cause us to ignore the command
78  * and continue processing.
79  * The process_* functions which handle individual ITS commands all
80  * return an ItsCmdResult which tells process_cmdq() whether it should
81  * stall, keep going because of an error, or keep going because the
82  * command was a success.
83  */
84 typedef enum ItsCmdResult {
85     CMD_STALL = 0,
86     CMD_CONTINUE = 1,
87     CMD_CONTINUE_OK = 2,
88 } ItsCmdResult;
89 
90 /* True if the ITS supports the GICv4 virtual LPI feature */
91 static bool its_feature_virtual(GICv3ITSState *s)
92 {
93     return s->typer & R_GITS_TYPER_VIRTUAL_MASK;
94 }
95 
96 static inline bool intid_in_lpi_range(uint32_t id)
97 {
98     return id >= GICV3_LPI_INTID_START &&
99         id < (1 << (GICD_TYPER_IDBITS + 1));
100 }
101 
102 static inline bool valid_doorbell(uint32_t id)
103 {
104     /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */
105     return id == INTID_SPURIOUS || intid_in_lpi_range(id);
106 }
107 
108 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
109 {
110     uint64_t result = 0;
111 
112     switch (page_sz) {
113     case GITS_PAGE_SIZE_4K:
114     case GITS_PAGE_SIZE_16K:
115         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
116         break;
117 
118     case GITS_PAGE_SIZE_64K:
119         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
120         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
121         break;
122 
123     default:
124         break;
125     }
126     return result;
127 }
128 
129 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
130                                  uint32_t idx, MemTxResult *res)
131 {
132     /*
133      * Given a TableDesc describing one of the ITS in-guest-memory
134      * tables and an index into it, return the guest address
135      * corresponding to that table entry.
136      * If there was a memory error reading the L1 table of an
137      * indirect table, *res is set accordingly, and we return -1.
138      * If the L1 table entry is marked not valid, we return -1 with
139      * *res set to MEMTX_OK.
140      *
141      * The specification defines the format of level 1 entries of a
142      * 2-level table, but the format of level 2 entries and the format
143      * of flat-mapped tables is IMPDEF.
144      */
145     AddressSpace *as = &s->gicv3->dma_as;
146     uint32_t l2idx;
147     uint64_t l2;
148     uint32_t num_l2_entries;
149 
150     *res = MEMTX_OK;
151 
152     if (!td->indirect) {
153         /* Single level table */
154         return td->base_addr + idx * td->entry_sz;
155     }
156 
157     /* Two level table */
158     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
159 
160     l2 = address_space_ldq_le(as,
161                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
162                               MEMTXATTRS_UNSPECIFIED, res);
163     if (*res != MEMTX_OK) {
164         return -1;
165     }
166     if (!(l2 & L2_TABLE_VALID_MASK)) {
167         return -1;
168     }
169 
170     num_l2_entries = td->page_sz / td->entry_sz;
171     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
172 }
173 
174 /*
175  * Read the Collection Table entry at index @icid. On success (including
176  * successfully determining that there is no valid CTE for this index),
177  * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
178  * If there is an error reading memory then we return the error code.
179  */
180 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
181 {
182     AddressSpace *as = &s->gicv3->dma_as;
183     MemTxResult res = MEMTX_OK;
184     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
185     uint64_t cteval;
186 
187     if (entry_addr == -1) {
188         /* No L2 table entry, i.e. no valid CTE, or a memory error */
189         cte->valid = false;
190         goto out;
191     }
192 
193     cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
194     if (res != MEMTX_OK) {
195         goto out;
196     }
197     cte->valid = FIELD_EX64(cteval, CTE, VALID);
198     cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
199 out:
200     if (res != MEMTX_OK) {
201         trace_gicv3_its_cte_read_fault(icid);
202     } else {
203         trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
204     }
205     return res;
206 }
207 
208 /*
209  * Update the Interrupt Table entry at index @evinted in the table specified
210  * by the dte @dte. Returns true on success, false if there was a memory
211  * access error.
212  */
213 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
214                        const ITEntry *ite)
215 {
216     AddressSpace *as = &s->gicv3->dma_as;
217     MemTxResult res = MEMTX_OK;
218     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
219     uint64_t itel = 0;
220     uint32_t iteh = 0;
221 
222     trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
223                               ite->inttype, ite->intid, ite->icid,
224                               ite->vpeid, ite->doorbell);
225 
226     if (ite->valid) {
227         itel = FIELD_DP64(itel, ITE_L, VALID, 1);
228         itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
229         itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
230         itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
231         itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
232         iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
233     }
234 
235     address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
236     if (res != MEMTX_OK) {
237         return false;
238     }
239     address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
240     return res == MEMTX_OK;
241 }
242 
243 /*
244  * Read the Interrupt Table entry at index @eventid from the table specified
245  * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
246  * struct @ite accordingly. If there is an error reading memory then we return
247  * the error code.
248  */
249 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
250                            const DTEntry *dte, ITEntry *ite)
251 {
252     AddressSpace *as = &s->gicv3->dma_as;
253     MemTxResult res = MEMTX_OK;
254     uint64_t itel;
255     uint32_t iteh;
256     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
257 
258     itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
259     if (res != MEMTX_OK) {
260         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
261         return res;
262     }
263 
264     iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
265     if (res != MEMTX_OK) {
266         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
267         return res;
268     }
269 
270     ite->valid = FIELD_EX64(itel, ITE_L, VALID);
271     ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
272     ite->intid = FIELD_EX64(itel, ITE_L, INTID);
273     ite->icid = FIELD_EX64(itel, ITE_L, ICID);
274     ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
275     ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
276     trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
277                              ite->inttype, ite->intid, ite->icid,
278                              ite->vpeid, ite->doorbell);
279     return MEMTX_OK;
280 }
281 
282 /*
283  * Read the Device Table entry at index @devid. On success (including
284  * successfully determining that there is no valid DTE for this index),
285  * we return MEMTX_OK and populate the DTEntry struct accordingly.
286  * If there is an error reading memory then we return the error code.
287  */
288 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
289 {
290     MemTxResult res = MEMTX_OK;
291     AddressSpace *as = &s->gicv3->dma_as;
292     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
293     uint64_t dteval;
294 
295     if (entry_addr == -1) {
296         /* No L2 table entry, i.e. no valid DTE, or a memory error */
297         dte->valid = false;
298         goto out;
299     }
300     dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
301     if (res != MEMTX_OK) {
302         goto out;
303     }
304     dte->valid = FIELD_EX64(dteval, DTE, VALID);
305     dte->size = FIELD_EX64(dteval, DTE, SIZE);
306     /* DTE word field stores bits [51:8] of the ITT address */
307     dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
308 out:
309     if (res != MEMTX_OK) {
310         trace_gicv3_its_dte_read_fault(devid);
311     } else {
312         trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
313     }
314     return res;
315 }
316 
317 /*
318  * Given a (DeviceID, EventID), look up the corresponding ITE, including
319  * checking for the various invalid-value cases. If we find a valid ITE,
320  * fill in @ite and @dte and return CMD_CONTINUE_OK. Otherwise return
321  * CMD_STALL or CMD_CONTINUE as appropriate (and the contents of @ite
322  * should not be relied on).
323  *
324  * The string @who is purely for the LOG_GUEST_ERROR messages,
325  * and should indicate the name of the calling function or similar.
326  */
327 static ItsCmdResult lookup_ite(GICv3ITSState *s, const char *who,
328                                uint32_t devid, uint32_t eventid, ITEntry *ite,
329                                DTEntry *dte)
330 {
331     uint64_t num_eventids;
332 
333     if (devid >= s->dt.num_entries) {
334         qemu_log_mask(LOG_GUEST_ERROR,
335                       "%s: invalid command attributes: devid %d>=%d",
336                       who, devid, s->dt.num_entries);
337         return CMD_CONTINUE;
338     }
339 
340     if (get_dte(s, devid, dte) != MEMTX_OK) {
341         return CMD_STALL;
342     }
343     if (!dte->valid) {
344         qemu_log_mask(LOG_GUEST_ERROR,
345                       "%s: invalid command attributes: "
346                       "invalid dte for %d\n", who, devid);
347         return CMD_CONTINUE;
348     }
349 
350     num_eventids = 1ULL << (dte->size + 1);
351     if (eventid >= num_eventids) {
352         qemu_log_mask(LOG_GUEST_ERROR,
353                       "%s: invalid command attributes: eventid %d >= %"
354                       PRId64 "\n", who, eventid, num_eventids);
355         return CMD_CONTINUE;
356     }
357 
358     if (get_ite(s, eventid, dte, ite) != MEMTX_OK) {
359         return CMD_STALL;
360     }
361 
362     if (!ite->valid) {
363         qemu_log_mask(LOG_GUEST_ERROR,
364                       "%s: invalid command attributes: invalid ITE\n", who);
365         return CMD_CONTINUE;
366     }
367 
368     return CMD_CONTINUE_OK;
369 }
370 
371 /*
372  * Given an ICID, look up the corresponding CTE, including checking for various
373  * invalid-value cases. If we find a valid CTE, fill in @cte and return
374  * CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE (and the
375  * contents of @cte should not be relied on).
376  *
377  * The string @who is purely for the LOG_GUEST_ERROR messages,
378  * and should indicate the name of the calling function or similar.
379  */
380 static ItsCmdResult lookup_cte(GICv3ITSState *s, const char *who,
381                                uint32_t icid, CTEntry *cte)
382 {
383     if (icid >= s->ct.num_entries) {
384         qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid ICID 0x%x\n", who, icid);
385         return CMD_CONTINUE;
386     }
387     if (get_cte(s, icid, cte) != MEMTX_OK) {
388         return CMD_STALL;
389     }
390     if (!cte->valid) {
391         qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid CTE\n", who);
392         return CMD_CONTINUE;
393     }
394     if (cte->rdbase >= s->gicv3->num_cpu) {
395         return CMD_CONTINUE;
396     }
397     return CMD_CONTINUE_OK;
398 }
399 
400 
401 /*
402  * This function handles the processing of following commands based on
403  * the ItsCmdType parameter passed:-
404  * 1. triggering of lpi interrupt translation via ITS INT command
405  * 2. triggering of lpi interrupt translation via gits_translater register
406  * 3. handling of ITS CLEAR command
407  * 4. handling of ITS DISCARD command
408  */
409 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
410                                        uint32_t eventid, ItsCmdType cmd)
411 {
412     DTEntry dte;
413     CTEntry cte;
414     ITEntry ite;
415     ItsCmdResult cmdres;
416 
417     cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte);
418     if (cmdres != CMD_CONTINUE_OK) {
419         return cmdres;
420     }
421 
422     if (ite.inttype != ITE_INTTYPE_PHYSICAL) {
423         qemu_log_mask(LOG_GUEST_ERROR,
424                       "%s: invalid command attributes: invalid ITE\n",
425                       __func__);
426         return CMD_CONTINUE;
427     }
428 
429     cmdres = lookup_cte(s, __func__, ite.icid, &cte);
430     if (cmdres != CMD_CONTINUE_OK) {
431         return cmdres;
432     }
433 
434     if ((cmd == CLEAR) || (cmd == DISCARD)) {
435         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
436     } else {
437         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
438     }
439 
440     if (cmd == DISCARD) {
441         ITEntry ite = {};
442         /* remove mapping from interrupt translation table */
443         ite.valid = false;
444         return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
445     }
446     return CMD_CONTINUE_OK;
447 }
448 
449 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
450                                     ItsCmdType cmd)
451 {
452     uint32_t devid, eventid;
453 
454     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
455     eventid = cmdpkt[1] & EVENTID_MASK;
456     switch (cmd) {
457     case INTERRUPT:
458         trace_gicv3_its_cmd_int(devid, eventid);
459         break;
460     case CLEAR:
461         trace_gicv3_its_cmd_clear(devid, eventid);
462         break;
463     case DISCARD:
464         trace_gicv3_its_cmd_discard(devid, eventid);
465         break;
466     default:
467         g_assert_not_reached();
468     }
469     return do_process_its_cmd(s, devid, eventid, cmd);
470 }
471 
472 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
473                                   bool ignore_pInt)
474 {
475     uint32_t devid, eventid;
476     uint32_t pIntid = 0;
477     uint64_t num_eventids;
478     uint16_t icid = 0;
479     DTEntry dte;
480     ITEntry ite;
481 
482     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
483     eventid = cmdpkt[1] & EVENTID_MASK;
484     icid = cmdpkt[2] & ICID_MASK;
485 
486     if (ignore_pInt) {
487         pIntid = eventid;
488         trace_gicv3_its_cmd_mapi(devid, eventid, icid);
489     } else {
490         pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
491         trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
492     }
493 
494     if (devid >= s->dt.num_entries) {
495         qemu_log_mask(LOG_GUEST_ERROR,
496                       "%s: invalid command attributes: devid %d>=%d",
497                       __func__, devid, s->dt.num_entries);
498         return CMD_CONTINUE;
499     }
500 
501     if (get_dte(s, devid, &dte) != MEMTX_OK) {
502         return CMD_STALL;
503     }
504     num_eventids = 1ULL << (dte.size + 1);
505 
506     if (icid >= s->ct.num_entries) {
507         qemu_log_mask(LOG_GUEST_ERROR,
508                       "%s: invalid ICID 0x%x >= 0x%x\n",
509                       __func__, icid, s->ct.num_entries);
510         return CMD_CONTINUE;
511     }
512 
513     if (!dte.valid) {
514         qemu_log_mask(LOG_GUEST_ERROR,
515                       "%s: no valid DTE for devid 0x%x\n", __func__, devid);
516         return CMD_CONTINUE;
517     }
518 
519     if (eventid >= num_eventids) {
520         qemu_log_mask(LOG_GUEST_ERROR,
521                       "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
522                       __func__, eventid, num_eventids);
523         return CMD_CONTINUE;
524     }
525 
526     if (!intid_in_lpi_range(pIntid)) {
527         qemu_log_mask(LOG_GUEST_ERROR,
528                       "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
529         return CMD_CONTINUE;
530     }
531 
532     /* add ite entry to interrupt translation table */
533     ite.valid = true;
534     ite.inttype = ITE_INTTYPE_PHYSICAL;
535     ite.intid = pIntid;
536     ite.icid = icid;
537     ite.doorbell = INTID_SPURIOUS;
538     ite.vpeid = 0;
539     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
540 }
541 
542 static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt,
543                                    bool ignore_vintid)
544 {
545     uint32_t devid, eventid, vintid, doorbell, vpeid;
546     uint32_t num_eventids;
547     DTEntry dte;
548     ITEntry ite;
549 
550     if (!its_feature_virtual(s)) {
551         return CMD_CONTINUE;
552     }
553 
554     devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID);
555     eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID);
556     vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID);
557     doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL);
558     if (ignore_vintid) {
559         vintid = eventid;
560         trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell);
561     } else {
562         vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID);
563         trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell);
564     }
565 
566     if (devid >= s->dt.num_entries) {
567         qemu_log_mask(LOG_GUEST_ERROR,
568                       "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n",
569                       __func__, devid, s->dt.num_entries);
570         return CMD_CONTINUE;
571     }
572 
573     if (get_dte(s, devid, &dte) != MEMTX_OK) {
574         return CMD_STALL;
575     }
576 
577     if (!dte.valid) {
578         qemu_log_mask(LOG_GUEST_ERROR,
579                       "%s: no entry in device table for DeviceID 0x%x\n",
580                       __func__, devid);
581         return CMD_CONTINUE;
582     }
583 
584     num_eventids = 1ULL << (dte.size + 1);
585 
586     if (eventid >= num_eventids) {
587         qemu_log_mask(LOG_GUEST_ERROR,
588                       "%s: EventID 0x%x too large for DeviceID 0x%x "
589                       "(must be less than 0x%x)\n",
590                       __func__, eventid, devid, num_eventids);
591         return CMD_CONTINUE;
592     }
593     if (!intid_in_lpi_range(vintid)) {
594         qemu_log_mask(LOG_GUEST_ERROR,
595                       "%s: VIntID 0x%x not a valid LPI\n",
596                       __func__, vintid);
597         return CMD_CONTINUE;
598     }
599     if (!valid_doorbell(doorbell)) {
600         qemu_log_mask(LOG_GUEST_ERROR,
601                       "%s: Doorbell %d not 1023 and not a valid LPI\n",
602                       __func__, doorbell);
603         return CMD_CONTINUE;
604     }
605     if (vpeid >= s->vpet.num_entries) {
606         qemu_log_mask(LOG_GUEST_ERROR,
607                       "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
608                       __func__, vpeid, s->vpet.num_entries);
609         return CMD_CONTINUE;
610     }
611     /* add ite entry to interrupt translation table */
612     ite.valid = true;
613     ite.inttype = ITE_INTTYPE_VIRTUAL;
614     ite.intid = vintid;
615     ite.icid = 0;
616     ite.doorbell = doorbell;
617     ite.vpeid = vpeid;
618     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
619 }
620 
621 /*
622  * Update the Collection Table entry for @icid to @cte. Returns true
623  * on success, false if there was a memory access error.
624  */
625 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
626 {
627     AddressSpace *as = &s->gicv3->dma_as;
628     uint64_t entry_addr;
629     uint64_t cteval = 0;
630     MemTxResult res = MEMTX_OK;
631 
632     trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
633 
634     if (cte->valid) {
635         /* add mapping entry to collection table */
636         cteval = FIELD_DP64(cteval, CTE, VALID, 1);
637         cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
638     }
639 
640     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
641     if (res != MEMTX_OK) {
642         /* memory access error: stall */
643         return false;
644     }
645     if (entry_addr == -1) {
646         /* No L2 table for this index: discard write and continue */
647         return true;
648     }
649 
650     address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
651     return res == MEMTX_OK;
652 }
653 
654 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
655 {
656     uint16_t icid;
657     CTEntry cte;
658 
659     icid = cmdpkt[2] & ICID_MASK;
660     cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
661     if (cte.valid) {
662         cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
663         cte.rdbase &= RDBASE_PROCNUM_MASK;
664     } else {
665         cte.rdbase = 0;
666     }
667     trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
668 
669     if (icid >= s->ct.num_entries) {
670         qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
671         return CMD_CONTINUE;
672     }
673     if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
674         qemu_log_mask(LOG_GUEST_ERROR,
675                       "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
676         return CMD_CONTINUE;
677     }
678 
679     return update_cte(s, icid, &cte) ? CMD_CONTINUE_OK : CMD_STALL;
680 }
681 
682 /*
683  * Update the Device Table entry for @devid to @dte. Returns true
684  * on success, false if there was a memory access error.
685  */
686 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
687 {
688     AddressSpace *as = &s->gicv3->dma_as;
689     uint64_t entry_addr;
690     uint64_t dteval = 0;
691     MemTxResult res = MEMTX_OK;
692 
693     trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
694 
695     if (dte->valid) {
696         /* add mapping entry to device table */
697         dteval = FIELD_DP64(dteval, DTE, VALID, 1);
698         dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
699         dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
700     }
701 
702     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
703     if (res != MEMTX_OK) {
704         /* memory access error: stall */
705         return false;
706     }
707     if (entry_addr == -1) {
708         /* No L2 table for this index: discard write and continue */
709         return true;
710     }
711     address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
712     return res == MEMTX_OK;
713 }
714 
715 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
716 {
717     uint32_t devid;
718     DTEntry dte;
719 
720     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
721     dte.size = cmdpkt[1] & SIZE_MASK;
722     dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
723     dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
724 
725     trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
726 
727     if (devid >= s->dt.num_entries) {
728         qemu_log_mask(LOG_GUEST_ERROR,
729                       "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
730                       devid, s->dt.num_entries);
731         return CMD_CONTINUE;
732     }
733 
734     if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
735         qemu_log_mask(LOG_GUEST_ERROR,
736                       "ITS MAPD: invalid size %d\n", dte.size);
737         return CMD_CONTINUE;
738     }
739 
740     return update_dte(s, devid, &dte) ? CMD_CONTINUE_OK : CMD_STALL;
741 }
742 
743 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
744 {
745     uint64_t rd1, rd2;
746 
747     rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
748     rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
749 
750     trace_gicv3_its_cmd_movall(rd1, rd2);
751 
752     if (rd1 >= s->gicv3->num_cpu) {
753         qemu_log_mask(LOG_GUEST_ERROR,
754                       "%s: RDBASE1 %" PRId64
755                       " out of range (must be less than %d)\n",
756                       __func__, rd1, s->gicv3->num_cpu);
757         return CMD_CONTINUE;
758     }
759     if (rd2 >= s->gicv3->num_cpu) {
760         qemu_log_mask(LOG_GUEST_ERROR,
761                       "%s: RDBASE2 %" PRId64
762                       " out of range (must be less than %d)\n",
763                       __func__, rd2, s->gicv3->num_cpu);
764         return CMD_CONTINUE;
765     }
766 
767     if (rd1 == rd2) {
768         /* Move to same target must succeed as a no-op */
769         return CMD_CONTINUE_OK;
770     }
771 
772     /* Move all pending LPIs from redistributor 1 to redistributor 2 */
773     gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
774 
775     return CMD_CONTINUE_OK;
776 }
777 
778 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
779 {
780     uint32_t devid, eventid;
781     uint16_t new_icid;
782     DTEntry dte;
783     CTEntry old_cte, new_cte;
784     ITEntry old_ite;
785     ItsCmdResult cmdres;
786 
787     devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
788     eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
789     new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
790 
791     trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
792 
793     cmdres = lookup_ite(s, __func__, devid, eventid, &old_ite, &dte);
794     if (cmdres != CMD_CONTINUE_OK) {
795         return cmdres;
796     }
797 
798     if (old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
799         qemu_log_mask(LOG_GUEST_ERROR,
800                       "%s: invalid command attributes: invalid ITE\n",
801                       __func__);
802         return CMD_CONTINUE;
803     }
804 
805     cmdres = lookup_cte(s, __func__, old_ite.icid, &old_cte);
806     if (cmdres != CMD_CONTINUE_OK) {
807         return cmdres;
808     }
809     cmdres = lookup_cte(s, __func__, new_icid, &new_cte);
810     if (cmdres != CMD_CONTINUE_OK) {
811         return cmdres;
812     }
813 
814     if (old_cte.rdbase != new_cte.rdbase) {
815         /* Move the LPI from the old redistributor to the new one */
816         gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
817                              &s->gicv3->cpu[new_cte.rdbase],
818                              old_ite.intid);
819     }
820 
821     /* Update the ICID field in the interrupt translation table entry */
822     old_ite.icid = new_icid;
823     return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE_OK : CMD_STALL;
824 }
825 
826 /*
827  * Update the vPE Table entry at index @vpeid with the entry @vte.
828  * Returns true on success, false if there was a memory access error.
829  */
830 static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte)
831 {
832     AddressSpace *as = &s->gicv3->dma_as;
833     uint64_t entry_addr;
834     uint64_t vteval = 0;
835     MemTxResult res = MEMTX_OK;
836 
837     trace_gicv3_its_vte_write(vpeid, vte->valid, vte->vptsize, vte->vptaddr,
838                               vte->rdbase);
839 
840     if (vte->valid) {
841         vteval = FIELD_DP64(vteval, VTE, VALID, 1);
842         vteval = FIELD_DP64(vteval, VTE, VPTSIZE, vte->vptsize);
843         vteval = FIELD_DP64(vteval, VTE, VPTADDR, vte->vptaddr);
844         vteval = FIELD_DP64(vteval, VTE, RDBASE, vte->rdbase);
845     }
846 
847     entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res);
848     if (res != MEMTX_OK) {
849         return false;
850     }
851     if (entry_addr == -1) {
852         /* No L2 table for this index: discard write and continue */
853         return true;
854     }
855     address_space_stq_le(as, entry_addr, vteval, MEMTXATTRS_UNSPECIFIED, &res);
856     return res == MEMTX_OK;
857 }
858 
859 static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt)
860 {
861     VTEntry vte;
862     uint32_t vpeid;
863 
864     if (!its_feature_virtual(s)) {
865         return CMD_CONTINUE;
866     }
867 
868     vpeid = FIELD_EX64(cmdpkt[1], VMAPP_1, VPEID);
869     vte.rdbase = FIELD_EX64(cmdpkt[2], VMAPP_2, RDBASE);
870     vte.valid = FIELD_EX64(cmdpkt[2], VMAPP_2, V);
871     vte.vptsize = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTSIZE);
872     vte.vptaddr = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTADDR);
873 
874     trace_gicv3_its_cmd_vmapp(vpeid, vte.rdbase, vte.valid,
875                               vte.vptaddr, vte.vptsize);
876 
877     /*
878      * For GICv4.0 the VPT_size field is only 5 bits, whereas we
879      * define our field macros to include the full GICv4.1 8 bits.
880      * The range check on VPT_size will catch the cases where
881      * the guest set the RES0-in-GICv4.0 bits [7:6].
882      */
883     if (vte.vptsize > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
884         qemu_log_mask(LOG_GUEST_ERROR,
885                       "%s: invalid VPT_size 0x%x\n", __func__, vte.vptsize);
886         return CMD_CONTINUE;
887     }
888 
889     if (vte.valid && vte.rdbase >= s->gicv3->num_cpu) {
890         qemu_log_mask(LOG_GUEST_ERROR,
891                       "%s: invalid rdbase 0x%x\n", __func__, vte.rdbase);
892         return CMD_CONTINUE;
893     }
894 
895     if (vpeid >= s->vpet.num_entries) {
896         qemu_log_mask(LOG_GUEST_ERROR,
897                       "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
898                       __func__, vpeid, s->vpet.num_entries);
899         return CMD_CONTINUE;
900     }
901 
902     return update_vte(s, vpeid, &vte) ? CMD_CONTINUE_OK : CMD_STALL;
903 }
904 
905 /*
906  * Current implementation blocks until all
907  * commands are processed
908  */
909 static void process_cmdq(GICv3ITSState *s)
910 {
911     uint32_t wr_offset = 0;
912     uint32_t rd_offset = 0;
913     uint32_t cq_offset = 0;
914     AddressSpace *as = &s->gicv3->dma_as;
915     uint8_t cmd;
916     int i;
917 
918     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
919         return;
920     }
921 
922     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
923 
924     if (wr_offset >= s->cq.num_entries) {
925         qemu_log_mask(LOG_GUEST_ERROR,
926                       "%s: invalid write offset "
927                       "%d\n", __func__, wr_offset);
928         return;
929     }
930 
931     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
932 
933     if (rd_offset >= s->cq.num_entries) {
934         qemu_log_mask(LOG_GUEST_ERROR,
935                       "%s: invalid read offset "
936                       "%d\n", __func__, rd_offset);
937         return;
938     }
939 
940     while (wr_offset != rd_offset) {
941         ItsCmdResult result = CMD_CONTINUE_OK;
942         void *hostmem;
943         hwaddr buflen;
944         uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
945 
946         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
947 
948         buflen = GITS_CMDQ_ENTRY_SIZE;
949         hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
950                                     &buflen, false, MEMTXATTRS_UNSPECIFIED);
951         if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
952             if (hostmem) {
953                 address_space_unmap(as, hostmem, buflen, false, 0);
954             }
955             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
956             qemu_log_mask(LOG_GUEST_ERROR,
957                           "%s: could not read command at 0x%" PRIx64 "\n",
958                           __func__, s->cq.base_addr + cq_offset);
959             break;
960         }
961         for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
962             cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
963         }
964         address_space_unmap(as, hostmem, buflen, false, 0);
965 
966         cmd = cmdpkt[0] & CMD_MASK;
967 
968         trace_gicv3_its_process_command(rd_offset, cmd);
969 
970         switch (cmd) {
971         case GITS_CMD_INT:
972             result = process_its_cmd(s, cmdpkt, INTERRUPT);
973             break;
974         case GITS_CMD_CLEAR:
975             result = process_its_cmd(s, cmdpkt, CLEAR);
976             break;
977         case GITS_CMD_SYNC:
978             /*
979              * Current implementation makes a blocking synchronous call
980              * for every command issued earlier, hence the internal state
981              * is already consistent by the time SYNC command is executed.
982              * Hence no further processing is required for SYNC command.
983              */
984             trace_gicv3_its_cmd_sync();
985             break;
986         case GITS_CMD_MAPD:
987             result = process_mapd(s, cmdpkt);
988             break;
989         case GITS_CMD_MAPC:
990             result = process_mapc(s, cmdpkt);
991             break;
992         case GITS_CMD_MAPTI:
993             result = process_mapti(s, cmdpkt, false);
994             break;
995         case GITS_CMD_MAPI:
996             result = process_mapti(s, cmdpkt, true);
997             break;
998         case GITS_CMD_DISCARD:
999             result = process_its_cmd(s, cmdpkt, DISCARD);
1000             break;
1001         case GITS_CMD_INV:
1002         case GITS_CMD_INVALL:
1003             /*
1004              * Current implementation doesn't cache any ITS tables,
1005              * but the calculated lpi priority information. We only
1006              * need to trigger lpi priority re-calculation to be in
1007              * sync with LPI config table or pending table changes.
1008              */
1009             trace_gicv3_its_cmd_inv();
1010             for (i = 0; i < s->gicv3->num_cpu; i++) {
1011                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
1012             }
1013             break;
1014         case GITS_CMD_MOVI:
1015             result = process_movi(s, cmdpkt);
1016             break;
1017         case GITS_CMD_MOVALL:
1018             result = process_movall(s, cmdpkt);
1019             break;
1020         case GITS_CMD_VMAPTI:
1021             result = process_vmapti(s, cmdpkt, false);
1022             break;
1023         case GITS_CMD_VMAPI:
1024             result = process_vmapti(s, cmdpkt, true);
1025             break;
1026         case GITS_CMD_VMAPP:
1027             result = process_vmapp(s, cmdpkt);
1028             break;
1029         default:
1030             trace_gicv3_its_cmd_unknown(cmd);
1031             break;
1032         }
1033         if (result != CMD_STALL) {
1034             /* CMD_CONTINUE or CMD_CONTINUE_OK */
1035             rd_offset++;
1036             rd_offset %= s->cq.num_entries;
1037             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
1038         } else {
1039             /* CMD_STALL */
1040             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
1041             qemu_log_mask(LOG_GUEST_ERROR,
1042                           "%s: 0x%x cmd processing failed, stalling\n",
1043                           __func__, cmd);
1044             break;
1045         }
1046     }
1047 }
1048 
1049 /*
1050  * This function extracts the ITS Device and Collection table specific
1051  * parameters (like base_addr, size etc) from GITS_BASER register.
1052  * It is called during ITS enable and also during post_load migration
1053  */
1054 static void extract_table_params(GICv3ITSState *s)
1055 {
1056     uint16_t num_pages = 0;
1057     uint8_t  page_sz_type;
1058     uint8_t type;
1059     uint32_t page_sz = 0;
1060     uint64_t value;
1061 
1062     for (int i = 0; i < 8; i++) {
1063         TableDesc *td;
1064         int idbits;
1065 
1066         value = s->baser[i];
1067 
1068         if (!value) {
1069             continue;
1070         }
1071 
1072         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
1073 
1074         switch (page_sz_type) {
1075         case 0:
1076             page_sz = GITS_PAGE_SIZE_4K;
1077             break;
1078 
1079         case 1:
1080             page_sz = GITS_PAGE_SIZE_16K;
1081             break;
1082 
1083         case 2:
1084         case 3:
1085             page_sz = GITS_PAGE_SIZE_64K;
1086             break;
1087 
1088         default:
1089             g_assert_not_reached();
1090         }
1091 
1092         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
1093 
1094         type = FIELD_EX64(value, GITS_BASER, TYPE);
1095 
1096         switch (type) {
1097         case GITS_BASER_TYPE_DEVICE:
1098             td = &s->dt;
1099             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
1100             break;
1101         case GITS_BASER_TYPE_COLLECTION:
1102             td = &s->ct;
1103             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
1104                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
1105             } else {
1106                 /* 16-bit CollectionId supported when CIL == 0 */
1107                 idbits = 16;
1108             }
1109             break;
1110         case GITS_BASER_TYPE_VPE:
1111             td = &s->vpet;
1112             /*
1113              * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an
1114              * implementation to implement fewer bits and report this
1115              * via GICD_TYPER2.)
1116              */
1117             idbits = 16;
1118             break;
1119         default:
1120             /*
1121              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
1122              * ensures we will only see type values corresponding to
1123              * the values set up in gicv3_its_reset().
1124              */
1125             g_assert_not_reached();
1126         }
1127 
1128         memset(td, 0, sizeof(*td));
1129         /*
1130          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
1131          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
1132          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
1133          * for the register corresponding to the Collection table but we
1134          * still have to process interrupts using non-memory-backed
1135          * Collection table entries.)
1136          * The specification makes it UNPREDICTABLE to enable the ITS without
1137          * marking each BASER<n> as valid. We choose to handle these as if
1138          * the table was zero-sized, so commands using the table will fail
1139          * and interrupts requested via GITS_TRANSLATER writes will be ignored.
1140          * This happens automatically by leaving the num_entries field at
1141          * zero, which will be caught by the bounds checks we have before
1142          * every table lookup anyway.
1143          */
1144         if (!FIELD_EX64(value, GITS_BASER, VALID)) {
1145             continue;
1146         }
1147         td->page_sz = page_sz;
1148         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
1149         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
1150         td->base_addr = baser_base_addr(value, page_sz);
1151         if (!td->indirect) {
1152             td->num_entries = (num_pages * page_sz) / td->entry_sz;
1153         } else {
1154             td->num_entries = (((num_pages * page_sz) /
1155                                   L1TABLE_ENTRY_SIZE) *
1156                                  (page_sz / td->entry_sz));
1157         }
1158         td->num_entries = MIN(td->num_entries, 1ULL << idbits);
1159     }
1160 }
1161 
1162 static void extract_cmdq_params(GICv3ITSState *s)
1163 {
1164     uint16_t num_pages = 0;
1165     uint64_t value = s->cbaser;
1166 
1167     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
1168 
1169     memset(&s->cq, 0 , sizeof(s->cq));
1170 
1171     if (FIELD_EX64(value, GITS_CBASER, VALID)) {
1172         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
1173                              GITS_CMDQ_ENTRY_SIZE;
1174         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
1175         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
1176     }
1177 }
1178 
1179 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
1180                                               uint64_t *data, unsigned size,
1181                                               MemTxAttrs attrs)
1182 {
1183     /*
1184      * GITS_TRANSLATER is write-only, and all other addresses
1185      * in the interrupt translation space frame are RES0.
1186      */
1187     *data = 0;
1188     return MEMTX_OK;
1189 }
1190 
1191 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
1192                                                uint64_t data, unsigned size,
1193                                                MemTxAttrs attrs)
1194 {
1195     GICv3ITSState *s = (GICv3ITSState *)opaque;
1196     bool result = true;
1197 
1198     trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
1199 
1200     switch (offset) {
1201     case GITS_TRANSLATER:
1202         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1203             result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1204         }
1205         break;
1206     default:
1207         break;
1208     }
1209 
1210     if (result) {
1211         return MEMTX_OK;
1212     } else {
1213         return MEMTX_ERROR;
1214     }
1215 }
1216 
1217 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1218                               uint64_t value, MemTxAttrs attrs)
1219 {
1220     bool result = true;
1221     int index;
1222 
1223     switch (offset) {
1224     case GITS_CTLR:
1225         if (value & R_GITS_CTLR_ENABLED_MASK) {
1226             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1227             extract_table_params(s);
1228             extract_cmdq_params(s);
1229             process_cmdq(s);
1230         } else {
1231             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1232         }
1233         break;
1234     case GITS_CBASER:
1235         /*
1236          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1237          *                 already enabled
1238          */
1239         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1240             s->cbaser = deposit64(s->cbaser, 0, 32, value);
1241             s->creadr = 0;
1242         }
1243         break;
1244     case GITS_CBASER + 4:
1245         /*
1246          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1247          *                 already enabled
1248          */
1249         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1250             s->cbaser = deposit64(s->cbaser, 32, 32, value);
1251             s->creadr = 0;
1252         }
1253         break;
1254     case GITS_CWRITER:
1255         s->cwriter = deposit64(s->cwriter, 0, 32,
1256                                (value & ~R_GITS_CWRITER_RETRY_MASK));
1257         if (s->cwriter != s->creadr) {
1258             process_cmdq(s);
1259         }
1260         break;
1261     case GITS_CWRITER + 4:
1262         s->cwriter = deposit64(s->cwriter, 32, 32, value);
1263         break;
1264     case GITS_CREADR:
1265         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1266             s->creadr = deposit64(s->creadr, 0, 32,
1267                                   (value & ~R_GITS_CREADR_STALLED_MASK));
1268         } else {
1269             /* RO register, ignore the write */
1270             qemu_log_mask(LOG_GUEST_ERROR,
1271                           "%s: invalid guest write to RO register at offset "
1272                           TARGET_FMT_plx "\n", __func__, offset);
1273         }
1274         break;
1275     case GITS_CREADR + 4:
1276         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1277             s->creadr = deposit64(s->creadr, 32, 32, value);
1278         } else {
1279             /* RO register, ignore the write */
1280             qemu_log_mask(LOG_GUEST_ERROR,
1281                           "%s: invalid guest write to RO register at offset "
1282                           TARGET_FMT_plx "\n", __func__, offset);
1283         }
1284         break;
1285     case GITS_BASER ... GITS_BASER + 0x3f:
1286         /*
1287          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1288          *                 already enabled
1289          */
1290         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1291             index = (offset - GITS_BASER) / 8;
1292 
1293             if (s->baser[index] == 0) {
1294                 /* Unimplemented GITS_BASERn: RAZ/WI */
1295                 break;
1296             }
1297             if (offset & 7) {
1298                 value <<= 32;
1299                 value &= ~GITS_BASER_RO_MASK;
1300                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1301                 s->baser[index] |= value;
1302             } else {
1303                 value &= ~GITS_BASER_RO_MASK;
1304                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1305                 s->baser[index] |= value;
1306             }
1307         }
1308         break;
1309     case GITS_IIDR:
1310     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1311         /* RO registers, ignore the write */
1312         qemu_log_mask(LOG_GUEST_ERROR,
1313                       "%s: invalid guest write to RO register at offset "
1314                       TARGET_FMT_plx "\n", __func__, offset);
1315         break;
1316     default:
1317         result = false;
1318         break;
1319     }
1320     return result;
1321 }
1322 
1323 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1324                              uint64_t *data, MemTxAttrs attrs)
1325 {
1326     bool result = true;
1327     int index;
1328 
1329     switch (offset) {
1330     case GITS_CTLR:
1331         *data = s->ctlr;
1332         break;
1333     case GITS_IIDR:
1334         *data = gicv3_iidr();
1335         break;
1336     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1337         /* ID registers */
1338         *data = gicv3_idreg(offset - GITS_IDREGS, GICV3_PIDR0_ITS);
1339         break;
1340     case GITS_TYPER:
1341         *data = extract64(s->typer, 0, 32);
1342         break;
1343     case GITS_TYPER + 4:
1344         *data = extract64(s->typer, 32, 32);
1345         break;
1346     case GITS_CBASER:
1347         *data = extract64(s->cbaser, 0, 32);
1348         break;
1349     case GITS_CBASER + 4:
1350         *data = extract64(s->cbaser, 32, 32);
1351         break;
1352     case GITS_CREADR:
1353         *data = extract64(s->creadr, 0, 32);
1354         break;
1355     case GITS_CREADR + 4:
1356         *data = extract64(s->creadr, 32, 32);
1357         break;
1358     case GITS_CWRITER:
1359         *data = extract64(s->cwriter, 0, 32);
1360         break;
1361     case GITS_CWRITER + 4:
1362         *data = extract64(s->cwriter, 32, 32);
1363         break;
1364     case GITS_BASER ... GITS_BASER + 0x3f:
1365         index = (offset - GITS_BASER) / 8;
1366         if (offset & 7) {
1367             *data = extract64(s->baser[index], 32, 32);
1368         } else {
1369             *data = extract64(s->baser[index], 0, 32);
1370         }
1371         break;
1372     default:
1373         result = false;
1374         break;
1375     }
1376     return result;
1377 }
1378 
1379 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1380                                uint64_t value, MemTxAttrs attrs)
1381 {
1382     bool result = true;
1383     int index;
1384 
1385     switch (offset) {
1386     case GITS_BASER ... GITS_BASER + 0x3f:
1387         /*
1388          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1389          *                 already enabled
1390          */
1391         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1392             index = (offset - GITS_BASER) / 8;
1393             if (s->baser[index] == 0) {
1394                 /* Unimplemented GITS_BASERn: RAZ/WI */
1395                 break;
1396             }
1397             s->baser[index] &= GITS_BASER_RO_MASK;
1398             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1399         }
1400         break;
1401     case GITS_CBASER:
1402         /*
1403          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1404          *                 already enabled
1405          */
1406         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1407             s->cbaser = value;
1408             s->creadr = 0;
1409         }
1410         break;
1411     case GITS_CWRITER:
1412         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1413         if (s->cwriter != s->creadr) {
1414             process_cmdq(s);
1415         }
1416         break;
1417     case GITS_CREADR:
1418         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1419             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1420         } else {
1421             /* RO register, ignore the write */
1422             qemu_log_mask(LOG_GUEST_ERROR,
1423                           "%s: invalid guest write to RO register at offset "
1424                           TARGET_FMT_plx "\n", __func__, offset);
1425         }
1426         break;
1427     case GITS_TYPER:
1428         /* RO registers, ignore the write */
1429         qemu_log_mask(LOG_GUEST_ERROR,
1430                       "%s: invalid guest write to RO register at offset "
1431                       TARGET_FMT_plx "\n", __func__, offset);
1432         break;
1433     default:
1434         result = false;
1435         break;
1436     }
1437     return result;
1438 }
1439 
1440 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1441                               uint64_t *data, MemTxAttrs attrs)
1442 {
1443     bool result = true;
1444     int index;
1445 
1446     switch (offset) {
1447     case GITS_TYPER:
1448         *data = s->typer;
1449         break;
1450     case GITS_BASER ... GITS_BASER + 0x3f:
1451         index = (offset - GITS_BASER) / 8;
1452         *data = s->baser[index];
1453         break;
1454     case GITS_CBASER:
1455         *data = s->cbaser;
1456         break;
1457     case GITS_CREADR:
1458         *data = s->creadr;
1459         break;
1460     case GITS_CWRITER:
1461         *data = s->cwriter;
1462         break;
1463     default:
1464         result = false;
1465         break;
1466     }
1467     return result;
1468 }
1469 
1470 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1471                                   unsigned size, MemTxAttrs attrs)
1472 {
1473     GICv3ITSState *s = (GICv3ITSState *)opaque;
1474     bool result;
1475 
1476     switch (size) {
1477     case 4:
1478         result = its_readl(s, offset, data, attrs);
1479         break;
1480     case 8:
1481         result = its_readll(s, offset, data, attrs);
1482         break;
1483     default:
1484         result = false;
1485         break;
1486     }
1487 
1488     if (!result) {
1489         qemu_log_mask(LOG_GUEST_ERROR,
1490                       "%s: invalid guest read at offset " TARGET_FMT_plx
1491                       " size %u\n", __func__, offset, size);
1492         trace_gicv3_its_badread(offset, size);
1493         /*
1494          * The spec requires that reserved registers are RAZ/WI;
1495          * so use false returns from leaf functions as a way to
1496          * trigger the guest-error logging but don't return it to
1497          * the caller, or we'll cause a spurious guest data abort.
1498          */
1499         *data = 0;
1500     } else {
1501         trace_gicv3_its_read(offset, *data, size);
1502     }
1503     return MEMTX_OK;
1504 }
1505 
1506 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1507                                    unsigned size, MemTxAttrs attrs)
1508 {
1509     GICv3ITSState *s = (GICv3ITSState *)opaque;
1510     bool result;
1511 
1512     switch (size) {
1513     case 4:
1514         result = its_writel(s, offset, data, attrs);
1515         break;
1516     case 8:
1517         result = its_writell(s, offset, data, attrs);
1518         break;
1519     default:
1520         result = false;
1521         break;
1522     }
1523 
1524     if (!result) {
1525         qemu_log_mask(LOG_GUEST_ERROR,
1526                       "%s: invalid guest write at offset " TARGET_FMT_plx
1527                       " size %u\n", __func__, offset, size);
1528         trace_gicv3_its_badwrite(offset, data, size);
1529         /*
1530          * The spec requires that reserved registers are RAZ/WI;
1531          * so use false returns from leaf functions as a way to
1532          * trigger the guest-error logging but don't return it to
1533          * the caller, or we'll cause a spurious guest data abort.
1534          */
1535     } else {
1536         trace_gicv3_its_write(offset, data, size);
1537     }
1538     return MEMTX_OK;
1539 }
1540 
1541 static const MemoryRegionOps gicv3_its_control_ops = {
1542     .read_with_attrs = gicv3_its_read,
1543     .write_with_attrs = gicv3_its_write,
1544     .valid.min_access_size = 4,
1545     .valid.max_access_size = 8,
1546     .impl.min_access_size = 4,
1547     .impl.max_access_size = 8,
1548     .endianness = DEVICE_NATIVE_ENDIAN,
1549 };
1550 
1551 static const MemoryRegionOps gicv3_its_translation_ops = {
1552     .read_with_attrs = gicv3_its_translation_read,
1553     .write_with_attrs = gicv3_its_translation_write,
1554     .valid.min_access_size = 2,
1555     .valid.max_access_size = 4,
1556     .impl.min_access_size = 2,
1557     .impl.max_access_size = 4,
1558     .endianness = DEVICE_NATIVE_ENDIAN,
1559 };
1560 
1561 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1562 {
1563     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1564     int i;
1565 
1566     for (i = 0; i < s->gicv3->num_cpu; i++) {
1567         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1568             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1569             return;
1570         }
1571     }
1572 
1573     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1574 
1575     /* set the ITS default features supported */
1576     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1577     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1578                           ITS_ITT_ENTRY_SIZE - 1);
1579     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1580     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1581     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1582     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1583 }
1584 
1585 static void gicv3_its_reset(DeviceState *dev)
1586 {
1587     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1588     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1589 
1590     c->parent_reset(dev);
1591 
1592     /* Quiescent bit reset to 1 */
1593     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1594 
1595     /*
1596      * setting GITS_BASER0.Type = 0b001 (Device)
1597      *         GITS_BASER1.Type = 0b100 (Collection Table)
1598      *         GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later
1599      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1600      *         GITS_BASER<0,1>.Page_Size = 64KB
1601      * and default translation table entry size to 16 bytes
1602      */
1603     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1604                              GITS_BASER_TYPE_DEVICE);
1605     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1606                              GITS_BASER_PAGESIZE_64K);
1607     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1608                              GITS_DTE_SIZE - 1);
1609 
1610     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1611                              GITS_BASER_TYPE_COLLECTION);
1612     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1613                              GITS_BASER_PAGESIZE_64K);
1614     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1615                              GITS_CTE_SIZE - 1);
1616 
1617     if (its_feature_virtual(s)) {
1618         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE,
1619                                  GITS_BASER_TYPE_VPE);
1620         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE,
1621                                  GITS_BASER_PAGESIZE_64K);
1622         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE,
1623                                  GITS_VPE_SIZE - 1);
1624     }
1625 }
1626 
1627 static void gicv3_its_post_load(GICv3ITSState *s)
1628 {
1629     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1630         extract_table_params(s);
1631         extract_cmdq_params(s);
1632     }
1633 }
1634 
1635 static Property gicv3_its_props[] = {
1636     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1637                      GICv3State *),
1638     DEFINE_PROP_END_OF_LIST(),
1639 };
1640 
1641 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1642 {
1643     DeviceClass *dc = DEVICE_CLASS(klass);
1644     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1645     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1646 
1647     dc->realize = gicv3_arm_its_realize;
1648     device_class_set_props(dc, gicv3_its_props);
1649     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1650     icc->post_load = gicv3_its_post_load;
1651 }
1652 
1653 static const TypeInfo gicv3_its_info = {
1654     .name = TYPE_ARM_GICV3_ITS,
1655     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1656     .instance_size = sizeof(GICv3ITSState),
1657     .class_init = gicv3_its_class_init,
1658     .class_size = sizeof(GICv3ITSClass),
1659 };
1660 
1661 static void gicv3_its_register_types(void)
1662 {
1663     type_register_static(&gicv3_its_info);
1664 }
1665 
1666 type_init(gicv3_its_register_types)
1667