xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision 93f4fdcd4d98c0de8e056e08016bce7d71a91100)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22 
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27 
28 struct GICv3ITSClass {
29     GICv3ITSCommonClass parent_class;
30     void (*parent_reset)(DeviceState *dev);
31 };
32 
33 /*
34  * This is an internal enum used to distinguish between LPI triggered
35  * via command queue and LPI triggered via gits_translater write.
36  */
37 typedef enum ItsCmdType {
38     NONE = 0, /* internal indication for GITS_TRANSLATER write */
39     CLEAR = 1,
40     DISCARD = 2,
41     INTERRUPT = 3,
42 } ItsCmdType;
43 
44 typedef struct DTEntry {
45     bool valid;
46     unsigned size;
47     uint64_t ittaddr;
48 } DTEntry;
49 
50 typedef struct CTEntry {
51     bool valid;
52     uint32_t rdbase;
53 } CTEntry;
54 
55 typedef struct ITEntry {
56     bool valid;
57     int inttype;
58     uint32_t intid;
59     uint32_t doorbell;
60     uint32_t icid;
61     uint32_t vpeid;
62 } ITEntry;
63 
64 typedef struct VTEntry {
65     bool valid;
66     unsigned vptsize;
67     uint32_t rdbase;
68     uint64_t vptaddr;
69 } VTEntry;
70 
71 /*
72  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
73  * if a command parameter is not correct. These include both "stall
74  * processing of the command queue" and "ignore this command, and
75  * keep processing the queue". In our implementation we choose that
76  * memory transaction errors reading the command packet provoke a
77  * stall, but errors in parameters cause us to ignore the command
78  * and continue processing.
79  * The process_* functions which handle individual ITS commands all
80  * return an ItsCmdResult which tells process_cmdq() whether it should
81  * stall, keep going because of an error, or keep going because the
82  * command was a success.
83  */
84 typedef enum ItsCmdResult {
85     CMD_STALL = 0,
86     CMD_CONTINUE = 1,
87     CMD_CONTINUE_OK = 2,
88 } ItsCmdResult;
89 
90 /* True if the ITS supports the GICv4 virtual LPI feature */
91 static bool its_feature_virtual(GICv3ITSState *s)
92 {
93     return s->typer & R_GITS_TYPER_VIRTUAL_MASK;
94 }
95 
96 static inline bool intid_in_lpi_range(uint32_t id)
97 {
98     return id >= GICV3_LPI_INTID_START &&
99         id < (1 << (GICD_TYPER_IDBITS + 1));
100 }
101 
102 static inline bool valid_doorbell(uint32_t id)
103 {
104     /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */
105     return id == INTID_SPURIOUS || intid_in_lpi_range(id);
106 }
107 
108 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
109 {
110     uint64_t result = 0;
111 
112     switch (page_sz) {
113     case GITS_PAGE_SIZE_4K:
114     case GITS_PAGE_SIZE_16K:
115         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
116         break;
117 
118     case GITS_PAGE_SIZE_64K:
119         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
120         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
121         break;
122 
123     default:
124         break;
125     }
126     return result;
127 }
128 
129 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
130                                  uint32_t idx, MemTxResult *res)
131 {
132     /*
133      * Given a TableDesc describing one of the ITS in-guest-memory
134      * tables and an index into it, return the guest address
135      * corresponding to that table entry.
136      * If there was a memory error reading the L1 table of an
137      * indirect table, *res is set accordingly, and we return -1.
138      * If the L1 table entry is marked not valid, we return -1 with
139      * *res set to MEMTX_OK.
140      *
141      * The specification defines the format of level 1 entries of a
142      * 2-level table, but the format of level 2 entries and the format
143      * of flat-mapped tables is IMPDEF.
144      */
145     AddressSpace *as = &s->gicv3->dma_as;
146     uint32_t l2idx;
147     uint64_t l2;
148     uint32_t num_l2_entries;
149 
150     *res = MEMTX_OK;
151 
152     if (!td->indirect) {
153         /* Single level table */
154         return td->base_addr + idx * td->entry_sz;
155     }
156 
157     /* Two level table */
158     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
159 
160     l2 = address_space_ldq_le(as,
161                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
162                               MEMTXATTRS_UNSPECIFIED, res);
163     if (*res != MEMTX_OK) {
164         return -1;
165     }
166     if (!(l2 & L2_TABLE_VALID_MASK)) {
167         return -1;
168     }
169 
170     num_l2_entries = td->page_sz / td->entry_sz;
171     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
172 }
173 
174 /*
175  * Read the Collection Table entry at index @icid. On success (including
176  * successfully determining that there is no valid CTE for this index),
177  * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
178  * If there is an error reading memory then we return the error code.
179  */
180 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
181 {
182     AddressSpace *as = &s->gicv3->dma_as;
183     MemTxResult res = MEMTX_OK;
184     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
185     uint64_t cteval;
186 
187     if (entry_addr == -1) {
188         /* No L2 table entry, i.e. no valid CTE, or a memory error */
189         cte->valid = false;
190         goto out;
191     }
192 
193     cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
194     if (res != MEMTX_OK) {
195         goto out;
196     }
197     cte->valid = FIELD_EX64(cteval, CTE, VALID);
198     cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
199 out:
200     if (res != MEMTX_OK) {
201         trace_gicv3_its_cte_read_fault(icid);
202     } else {
203         trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
204     }
205     return res;
206 }
207 
208 /*
209  * Update the Interrupt Table entry at index @evinted in the table specified
210  * by the dte @dte. Returns true on success, false if there was a memory
211  * access error.
212  */
213 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
214                        const ITEntry *ite)
215 {
216     AddressSpace *as = &s->gicv3->dma_as;
217     MemTxResult res = MEMTX_OK;
218     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
219     uint64_t itel = 0;
220     uint32_t iteh = 0;
221 
222     trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
223                               ite->inttype, ite->intid, ite->icid,
224                               ite->vpeid, ite->doorbell);
225 
226     if (ite->valid) {
227         itel = FIELD_DP64(itel, ITE_L, VALID, 1);
228         itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
229         itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
230         itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
231         itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
232         iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
233     }
234 
235     address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
236     if (res != MEMTX_OK) {
237         return false;
238     }
239     address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
240     return res == MEMTX_OK;
241 }
242 
243 /*
244  * Read the Interrupt Table entry at index @eventid from the table specified
245  * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
246  * struct @ite accordingly. If there is an error reading memory then we return
247  * the error code.
248  */
249 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
250                            const DTEntry *dte, ITEntry *ite)
251 {
252     AddressSpace *as = &s->gicv3->dma_as;
253     MemTxResult res = MEMTX_OK;
254     uint64_t itel;
255     uint32_t iteh;
256     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
257 
258     itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
259     if (res != MEMTX_OK) {
260         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
261         return res;
262     }
263 
264     iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
265     if (res != MEMTX_OK) {
266         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
267         return res;
268     }
269 
270     ite->valid = FIELD_EX64(itel, ITE_L, VALID);
271     ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
272     ite->intid = FIELD_EX64(itel, ITE_L, INTID);
273     ite->icid = FIELD_EX64(itel, ITE_L, ICID);
274     ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
275     ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
276     trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
277                              ite->inttype, ite->intid, ite->icid,
278                              ite->vpeid, ite->doorbell);
279     return MEMTX_OK;
280 }
281 
282 /*
283  * Read the Device Table entry at index @devid. On success (including
284  * successfully determining that there is no valid DTE for this index),
285  * we return MEMTX_OK and populate the DTEntry struct accordingly.
286  * If there is an error reading memory then we return the error code.
287  */
288 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
289 {
290     MemTxResult res = MEMTX_OK;
291     AddressSpace *as = &s->gicv3->dma_as;
292     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
293     uint64_t dteval;
294 
295     if (entry_addr == -1) {
296         /* No L2 table entry, i.e. no valid DTE, or a memory error */
297         dte->valid = false;
298         goto out;
299     }
300     dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
301     if (res != MEMTX_OK) {
302         goto out;
303     }
304     dte->valid = FIELD_EX64(dteval, DTE, VALID);
305     dte->size = FIELD_EX64(dteval, DTE, SIZE);
306     /* DTE word field stores bits [51:8] of the ITT address */
307     dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
308 out:
309     if (res != MEMTX_OK) {
310         trace_gicv3_its_dte_read_fault(devid);
311     } else {
312         trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
313     }
314     return res;
315 }
316 
317 /*
318  * This function handles the processing of following commands based on
319  * the ItsCmdType parameter passed:-
320  * 1. triggering of lpi interrupt translation via ITS INT command
321  * 2. triggering of lpi interrupt translation via gits_translater register
322  * 3. handling of ITS CLEAR command
323  * 4. handling of ITS DISCARD command
324  */
325 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
326                                        uint32_t eventid, ItsCmdType cmd)
327 {
328     uint64_t num_eventids;
329     DTEntry dte;
330     CTEntry cte;
331     ITEntry ite;
332 
333     if (devid >= s->dt.num_entries) {
334         qemu_log_mask(LOG_GUEST_ERROR,
335                       "%s: invalid command attributes: devid %d>=%d",
336                       __func__, devid, s->dt.num_entries);
337         return CMD_CONTINUE;
338     }
339 
340     if (get_dte(s, devid, &dte) != MEMTX_OK) {
341         return CMD_STALL;
342     }
343     if (!dte.valid) {
344         qemu_log_mask(LOG_GUEST_ERROR,
345                       "%s: invalid command attributes: "
346                       "invalid dte for %d\n", __func__, devid);
347         return CMD_CONTINUE;
348     }
349 
350     num_eventids = 1ULL << (dte.size + 1);
351     if (eventid >= num_eventids) {
352         qemu_log_mask(LOG_GUEST_ERROR,
353                       "%s: invalid command attributes: eventid %d >= %"
354                       PRId64 "\n",
355                       __func__, eventid, num_eventids);
356         return CMD_CONTINUE;
357     }
358 
359     if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
360         return CMD_STALL;
361     }
362 
363     if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
364         qemu_log_mask(LOG_GUEST_ERROR,
365                       "%s: invalid command attributes: invalid ITE\n",
366                       __func__);
367         return CMD_CONTINUE;
368     }
369 
370     if (ite.icid >= s->ct.num_entries) {
371         qemu_log_mask(LOG_GUEST_ERROR,
372                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
373                       __func__, ite.icid);
374         return CMD_CONTINUE;
375     }
376 
377     if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
378         return CMD_STALL;
379     }
380     if (!cte.valid) {
381         qemu_log_mask(LOG_GUEST_ERROR,
382                       "%s: invalid command attributes: invalid CTE\n",
383                       __func__);
384         return CMD_CONTINUE;
385     }
386 
387     /*
388      * Current implementation only supports rdbase == procnum
389      * Hence rdbase physical address is ignored
390      */
391     if (cte.rdbase >= s->gicv3->num_cpu) {
392         return CMD_CONTINUE;
393     }
394 
395     if ((cmd == CLEAR) || (cmd == DISCARD)) {
396         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
397     } else {
398         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
399     }
400 
401     if (cmd == DISCARD) {
402         ITEntry ite = {};
403         /* remove mapping from interrupt translation table */
404         ite.valid = false;
405         return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
406     }
407     return CMD_CONTINUE_OK;
408 }
409 
410 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
411                                     ItsCmdType cmd)
412 {
413     uint32_t devid, eventid;
414 
415     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
416     eventid = cmdpkt[1] & EVENTID_MASK;
417     switch (cmd) {
418     case INTERRUPT:
419         trace_gicv3_its_cmd_int(devid, eventid);
420         break;
421     case CLEAR:
422         trace_gicv3_its_cmd_clear(devid, eventid);
423         break;
424     case DISCARD:
425         trace_gicv3_its_cmd_discard(devid, eventid);
426         break;
427     default:
428         g_assert_not_reached();
429     }
430     return do_process_its_cmd(s, devid, eventid, cmd);
431 }
432 
433 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
434                                   bool ignore_pInt)
435 {
436     uint32_t devid, eventid;
437     uint32_t pIntid = 0;
438     uint64_t num_eventids;
439     uint16_t icid = 0;
440     DTEntry dte;
441     ITEntry ite;
442 
443     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
444     eventid = cmdpkt[1] & EVENTID_MASK;
445     icid = cmdpkt[2] & ICID_MASK;
446 
447     if (ignore_pInt) {
448         pIntid = eventid;
449         trace_gicv3_its_cmd_mapi(devid, eventid, icid);
450     } else {
451         pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
452         trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
453     }
454 
455     if (devid >= s->dt.num_entries) {
456         qemu_log_mask(LOG_GUEST_ERROR,
457                       "%s: invalid command attributes: devid %d>=%d",
458                       __func__, devid, s->dt.num_entries);
459         return CMD_CONTINUE;
460     }
461 
462     if (get_dte(s, devid, &dte) != MEMTX_OK) {
463         return CMD_STALL;
464     }
465     num_eventids = 1ULL << (dte.size + 1);
466 
467     if (icid >= s->ct.num_entries) {
468         qemu_log_mask(LOG_GUEST_ERROR,
469                       "%s: invalid ICID 0x%x >= 0x%x\n",
470                       __func__, icid, s->ct.num_entries);
471         return CMD_CONTINUE;
472     }
473 
474     if (!dte.valid) {
475         qemu_log_mask(LOG_GUEST_ERROR,
476                       "%s: no valid DTE for devid 0x%x\n", __func__, devid);
477         return CMD_CONTINUE;
478     }
479 
480     if (eventid >= num_eventids) {
481         qemu_log_mask(LOG_GUEST_ERROR,
482                       "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
483                       __func__, eventid, num_eventids);
484         return CMD_CONTINUE;
485     }
486 
487     if (!intid_in_lpi_range(pIntid)) {
488         qemu_log_mask(LOG_GUEST_ERROR,
489                       "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
490         return CMD_CONTINUE;
491     }
492 
493     /* add ite entry to interrupt translation table */
494     ite.valid = true;
495     ite.inttype = ITE_INTTYPE_PHYSICAL;
496     ite.intid = pIntid;
497     ite.icid = icid;
498     ite.doorbell = INTID_SPURIOUS;
499     ite.vpeid = 0;
500     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
501 }
502 
503 static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt,
504                                    bool ignore_vintid)
505 {
506     uint32_t devid, eventid, vintid, doorbell, vpeid;
507     uint32_t num_eventids;
508     DTEntry dte;
509     ITEntry ite;
510 
511     if (!its_feature_virtual(s)) {
512         return CMD_CONTINUE;
513     }
514 
515     devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID);
516     eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID);
517     vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID);
518     doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL);
519     if (ignore_vintid) {
520         vintid = eventid;
521         trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell);
522     } else {
523         vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID);
524         trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell);
525     }
526 
527     if (devid >= s->dt.num_entries) {
528         qemu_log_mask(LOG_GUEST_ERROR,
529                       "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n",
530                       __func__, devid, s->dt.num_entries);
531         return CMD_CONTINUE;
532     }
533 
534     if (get_dte(s, devid, &dte) != MEMTX_OK) {
535         return CMD_STALL;
536     }
537 
538     if (!dte.valid) {
539         qemu_log_mask(LOG_GUEST_ERROR,
540                       "%s: no entry in device table for DeviceID 0x%x\n",
541                       __func__, devid);
542         return CMD_CONTINUE;
543     }
544 
545     num_eventids = 1ULL << (dte.size + 1);
546 
547     if (eventid >= num_eventids) {
548         qemu_log_mask(LOG_GUEST_ERROR,
549                       "%s: EventID 0x%x too large for DeviceID 0x%x "
550                       "(must be less than 0x%x)\n",
551                       __func__, eventid, devid, num_eventids);
552         return CMD_CONTINUE;
553     }
554     if (!intid_in_lpi_range(vintid)) {
555         qemu_log_mask(LOG_GUEST_ERROR,
556                       "%s: VIntID 0x%x not a valid LPI\n",
557                       __func__, vintid);
558         return CMD_CONTINUE;
559     }
560     if (!valid_doorbell(doorbell)) {
561         qemu_log_mask(LOG_GUEST_ERROR,
562                       "%s: Doorbell %d not 1023 and not a valid LPI\n",
563                       __func__, doorbell);
564         return CMD_CONTINUE;
565     }
566     if (vpeid >= s->vpet.num_entries) {
567         qemu_log_mask(LOG_GUEST_ERROR,
568                       "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
569                       __func__, vpeid, s->vpet.num_entries);
570         return CMD_CONTINUE;
571     }
572     /* add ite entry to interrupt translation table */
573     ite.valid = true;
574     ite.inttype = ITE_INTTYPE_VIRTUAL;
575     ite.intid = vintid;
576     ite.icid = 0;
577     ite.doorbell = doorbell;
578     ite.vpeid = vpeid;
579     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
580 }
581 
582 /*
583  * Update the Collection Table entry for @icid to @cte. Returns true
584  * on success, false if there was a memory access error.
585  */
586 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
587 {
588     AddressSpace *as = &s->gicv3->dma_as;
589     uint64_t entry_addr;
590     uint64_t cteval = 0;
591     MemTxResult res = MEMTX_OK;
592 
593     trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
594 
595     if (cte->valid) {
596         /* add mapping entry to collection table */
597         cteval = FIELD_DP64(cteval, CTE, VALID, 1);
598         cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
599     }
600 
601     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
602     if (res != MEMTX_OK) {
603         /* memory access error: stall */
604         return false;
605     }
606     if (entry_addr == -1) {
607         /* No L2 table for this index: discard write and continue */
608         return true;
609     }
610 
611     address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
612     return res == MEMTX_OK;
613 }
614 
615 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
616 {
617     uint16_t icid;
618     CTEntry cte;
619 
620     icid = cmdpkt[2] & ICID_MASK;
621     cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
622     if (cte.valid) {
623         cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
624         cte.rdbase &= RDBASE_PROCNUM_MASK;
625     } else {
626         cte.rdbase = 0;
627     }
628     trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
629 
630     if (icid >= s->ct.num_entries) {
631         qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
632         return CMD_CONTINUE;
633     }
634     if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
635         qemu_log_mask(LOG_GUEST_ERROR,
636                       "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
637         return CMD_CONTINUE;
638     }
639 
640     return update_cte(s, icid, &cte) ? CMD_CONTINUE_OK : CMD_STALL;
641 }
642 
643 /*
644  * Update the Device Table entry for @devid to @dte. Returns true
645  * on success, false if there was a memory access error.
646  */
647 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
648 {
649     AddressSpace *as = &s->gicv3->dma_as;
650     uint64_t entry_addr;
651     uint64_t dteval = 0;
652     MemTxResult res = MEMTX_OK;
653 
654     trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
655 
656     if (dte->valid) {
657         /* add mapping entry to device table */
658         dteval = FIELD_DP64(dteval, DTE, VALID, 1);
659         dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
660         dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
661     }
662 
663     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
664     if (res != MEMTX_OK) {
665         /* memory access error: stall */
666         return false;
667     }
668     if (entry_addr == -1) {
669         /* No L2 table for this index: discard write and continue */
670         return true;
671     }
672     address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
673     return res == MEMTX_OK;
674 }
675 
676 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
677 {
678     uint32_t devid;
679     DTEntry dte;
680 
681     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
682     dte.size = cmdpkt[1] & SIZE_MASK;
683     dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
684     dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
685 
686     trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
687 
688     if (devid >= s->dt.num_entries) {
689         qemu_log_mask(LOG_GUEST_ERROR,
690                       "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
691                       devid, s->dt.num_entries);
692         return CMD_CONTINUE;
693     }
694 
695     if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
696         qemu_log_mask(LOG_GUEST_ERROR,
697                       "ITS MAPD: invalid size %d\n", dte.size);
698         return CMD_CONTINUE;
699     }
700 
701     return update_dte(s, devid, &dte) ? CMD_CONTINUE_OK : CMD_STALL;
702 }
703 
704 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
705 {
706     uint64_t rd1, rd2;
707 
708     rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
709     rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
710 
711     trace_gicv3_its_cmd_movall(rd1, rd2);
712 
713     if (rd1 >= s->gicv3->num_cpu) {
714         qemu_log_mask(LOG_GUEST_ERROR,
715                       "%s: RDBASE1 %" PRId64
716                       " out of range (must be less than %d)\n",
717                       __func__, rd1, s->gicv3->num_cpu);
718         return CMD_CONTINUE;
719     }
720     if (rd2 >= s->gicv3->num_cpu) {
721         qemu_log_mask(LOG_GUEST_ERROR,
722                       "%s: RDBASE2 %" PRId64
723                       " out of range (must be less than %d)\n",
724                       __func__, rd2, s->gicv3->num_cpu);
725         return CMD_CONTINUE;
726     }
727 
728     if (rd1 == rd2) {
729         /* Move to same target must succeed as a no-op */
730         return CMD_CONTINUE_OK;
731     }
732 
733     /* Move all pending LPIs from redistributor 1 to redistributor 2 */
734     gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
735 
736     return CMD_CONTINUE_OK;
737 }
738 
739 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
740 {
741     uint32_t devid, eventid;
742     uint16_t new_icid;
743     uint64_t num_eventids;
744     DTEntry dte;
745     CTEntry old_cte, new_cte;
746     ITEntry old_ite;
747 
748     devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
749     eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
750     new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
751 
752     trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
753 
754     if (devid >= s->dt.num_entries) {
755         qemu_log_mask(LOG_GUEST_ERROR,
756                       "%s: invalid command attributes: devid %d>=%d",
757                       __func__, devid, s->dt.num_entries);
758         return CMD_CONTINUE;
759     }
760     if (get_dte(s, devid, &dte) != MEMTX_OK) {
761         return CMD_STALL;
762     }
763 
764     if (!dte.valid) {
765         qemu_log_mask(LOG_GUEST_ERROR,
766                       "%s: invalid command attributes: "
767                       "invalid dte for %d\n", __func__, devid);
768         return CMD_CONTINUE;
769     }
770 
771     num_eventids = 1ULL << (dte.size + 1);
772     if (eventid >= num_eventids) {
773         qemu_log_mask(LOG_GUEST_ERROR,
774                       "%s: invalid command attributes: eventid %d >= %"
775                       PRId64 "\n",
776                       __func__, eventid, num_eventids);
777         return CMD_CONTINUE;
778     }
779 
780     if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
781         return CMD_STALL;
782     }
783 
784     if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
785         qemu_log_mask(LOG_GUEST_ERROR,
786                       "%s: invalid command attributes: invalid ITE\n",
787                       __func__);
788         return CMD_CONTINUE;
789     }
790 
791     if (old_ite.icid >= s->ct.num_entries) {
792         qemu_log_mask(LOG_GUEST_ERROR,
793                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
794                       __func__, old_ite.icid);
795         return CMD_CONTINUE;
796     }
797 
798     if (new_icid >= s->ct.num_entries) {
799         qemu_log_mask(LOG_GUEST_ERROR,
800                       "%s: invalid command attributes: ICID 0x%x\n",
801                       __func__, new_icid);
802         return CMD_CONTINUE;
803     }
804 
805     if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
806         return CMD_STALL;
807     }
808     if (!old_cte.valid) {
809         qemu_log_mask(LOG_GUEST_ERROR,
810                       "%s: invalid command attributes: "
811                       "invalid CTE for old ICID 0x%x\n",
812                       __func__, old_ite.icid);
813         return CMD_CONTINUE;
814     }
815 
816     if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
817         return CMD_STALL;
818     }
819     if (!new_cte.valid) {
820         qemu_log_mask(LOG_GUEST_ERROR,
821                       "%s: invalid command attributes: "
822                       "invalid CTE for new ICID 0x%x\n",
823                       __func__, new_icid);
824         return CMD_CONTINUE;
825     }
826 
827     if (old_cte.rdbase >= s->gicv3->num_cpu) {
828         qemu_log_mask(LOG_GUEST_ERROR,
829                       "%s: CTE has invalid rdbase 0x%x\n",
830                       __func__, old_cte.rdbase);
831         return CMD_CONTINUE;
832     }
833 
834     if (new_cte.rdbase >= s->gicv3->num_cpu) {
835         qemu_log_mask(LOG_GUEST_ERROR,
836                       "%s: CTE has invalid rdbase 0x%x\n",
837                       __func__, new_cte.rdbase);
838         return CMD_CONTINUE;
839     }
840 
841     if (old_cte.rdbase != new_cte.rdbase) {
842         /* Move the LPI from the old redistributor to the new one */
843         gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
844                              &s->gicv3->cpu[new_cte.rdbase],
845                              old_ite.intid);
846     }
847 
848     /* Update the ICID field in the interrupt translation table entry */
849     old_ite.icid = new_icid;
850     return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE_OK : CMD_STALL;
851 }
852 
853 /*
854  * Update the vPE Table entry at index @vpeid with the entry @vte.
855  * Returns true on success, false if there was a memory access error.
856  */
857 static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte)
858 {
859     AddressSpace *as = &s->gicv3->dma_as;
860     uint64_t entry_addr;
861     uint64_t vteval = 0;
862     MemTxResult res = MEMTX_OK;
863 
864     trace_gicv3_its_vte_write(vpeid, vte->valid, vte->vptsize, vte->vptaddr,
865                               vte->rdbase);
866 
867     if (vte->valid) {
868         vteval = FIELD_DP64(vteval, VTE, VALID, 1);
869         vteval = FIELD_DP64(vteval, VTE, VPTSIZE, vte->vptsize);
870         vteval = FIELD_DP64(vteval, VTE, VPTADDR, vte->vptaddr);
871         vteval = FIELD_DP64(vteval, VTE, RDBASE, vte->rdbase);
872     }
873 
874     entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res);
875     if (res != MEMTX_OK) {
876         return false;
877     }
878     if (entry_addr == -1) {
879         /* No L2 table for this index: discard write and continue */
880         return true;
881     }
882     address_space_stq_le(as, entry_addr, vteval, MEMTXATTRS_UNSPECIFIED, &res);
883     return res == MEMTX_OK;
884 }
885 
886 static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt)
887 {
888     VTEntry vte;
889     uint32_t vpeid;
890 
891     if (!its_feature_virtual(s)) {
892         return CMD_CONTINUE;
893     }
894 
895     vpeid = FIELD_EX64(cmdpkt[1], VMAPP_1, VPEID);
896     vte.rdbase = FIELD_EX64(cmdpkt[2], VMAPP_2, RDBASE);
897     vte.valid = FIELD_EX64(cmdpkt[2], VMAPP_2, V);
898     vte.vptsize = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTSIZE);
899     vte.vptaddr = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTADDR);
900 
901     trace_gicv3_its_cmd_vmapp(vpeid, vte.rdbase, vte.valid,
902                               vte.vptaddr, vte.vptsize);
903 
904     /*
905      * For GICv4.0 the VPT_size field is only 5 bits, whereas we
906      * define our field macros to include the full GICv4.1 8 bits.
907      * The range check on VPT_size will catch the cases where
908      * the guest set the RES0-in-GICv4.0 bits [7:6].
909      */
910     if (vte.vptsize > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
911         qemu_log_mask(LOG_GUEST_ERROR,
912                       "%s: invalid VPT_size 0x%x\n", __func__, vte.vptsize);
913         return CMD_CONTINUE;
914     }
915 
916     if (vte.valid && vte.rdbase >= s->gicv3->num_cpu) {
917         qemu_log_mask(LOG_GUEST_ERROR,
918                       "%s: invalid rdbase 0x%x\n", __func__, vte.rdbase);
919         return CMD_CONTINUE;
920     }
921 
922     if (vpeid >= s->vpet.num_entries) {
923         qemu_log_mask(LOG_GUEST_ERROR,
924                       "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
925                       __func__, vpeid, s->vpet.num_entries);
926         return CMD_CONTINUE;
927     }
928 
929     return update_vte(s, vpeid, &vte) ? CMD_CONTINUE_OK : CMD_STALL;
930 }
931 
932 /*
933  * Current implementation blocks until all
934  * commands are processed
935  */
936 static void process_cmdq(GICv3ITSState *s)
937 {
938     uint32_t wr_offset = 0;
939     uint32_t rd_offset = 0;
940     uint32_t cq_offset = 0;
941     AddressSpace *as = &s->gicv3->dma_as;
942     uint8_t cmd;
943     int i;
944 
945     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
946         return;
947     }
948 
949     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
950 
951     if (wr_offset >= s->cq.num_entries) {
952         qemu_log_mask(LOG_GUEST_ERROR,
953                       "%s: invalid write offset "
954                       "%d\n", __func__, wr_offset);
955         return;
956     }
957 
958     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
959 
960     if (rd_offset >= s->cq.num_entries) {
961         qemu_log_mask(LOG_GUEST_ERROR,
962                       "%s: invalid read offset "
963                       "%d\n", __func__, rd_offset);
964         return;
965     }
966 
967     while (wr_offset != rd_offset) {
968         ItsCmdResult result = CMD_CONTINUE_OK;
969         void *hostmem;
970         hwaddr buflen;
971         uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
972 
973         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
974 
975         buflen = GITS_CMDQ_ENTRY_SIZE;
976         hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
977                                     &buflen, false, MEMTXATTRS_UNSPECIFIED);
978         if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
979             if (hostmem) {
980                 address_space_unmap(as, hostmem, buflen, false, 0);
981             }
982             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
983             qemu_log_mask(LOG_GUEST_ERROR,
984                           "%s: could not read command at 0x%" PRIx64 "\n",
985                           __func__, s->cq.base_addr + cq_offset);
986             break;
987         }
988         for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
989             cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
990         }
991         address_space_unmap(as, hostmem, buflen, false, 0);
992 
993         cmd = cmdpkt[0] & CMD_MASK;
994 
995         trace_gicv3_its_process_command(rd_offset, cmd);
996 
997         switch (cmd) {
998         case GITS_CMD_INT:
999             result = process_its_cmd(s, cmdpkt, INTERRUPT);
1000             break;
1001         case GITS_CMD_CLEAR:
1002             result = process_its_cmd(s, cmdpkt, CLEAR);
1003             break;
1004         case GITS_CMD_SYNC:
1005             /*
1006              * Current implementation makes a blocking synchronous call
1007              * for every command issued earlier, hence the internal state
1008              * is already consistent by the time SYNC command is executed.
1009              * Hence no further processing is required for SYNC command.
1010              */
1011             trace_gicv3_its_cmd_sync();
1012             break;
1013         case GITS_CMD_MAPD:
1014             result = process_mapd(s, cmdpkt);
1015             break;
1016         case GITS_CMD_MAPC:
1017             result = process_mapc(s, cmdpkt);
1018             break;
1019         case GITS_CMD_MAPTI:
1020             result = process_mapti(s, cmdpkt, false);
1021             break;
1022         case GITS_CMD_MAPI:
1023             result = process_mapti(s, cmdpkt, true);
1024             break;
1025         case GITS_CMD_DISCARD:
1026             result = process_its_cmd(s, cmdpkt, DISCARD);
1027             break;
1028         case GITS_CMD_INV:
1029         case GITS_CMD_INVALL:
1030             /*
1031              * Current implementation doesn't cache any ITS tables,
1032              * but the calculated lpi priority information. We only
1033              * need to trigger lpi priority re-calculation to be in
1034              * sync with LPI config table or pending table changes.
1035              */
1036             trace_gicv3_its_cmd_inv();
1037             for (i = 0; i < s->gicv3->num_cpu; i++) {
1038                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
1039             }
1040             break;
1041         case GITS_CMD_MOVI:
1042             result = process_movi(s, cmdpkt);
1043             break;
1044         case GITS_CMD_MOVALL:
1045             result = process_movall(s, cmdpkt);
1046             break;
1047         case GITS_CMD_VMAPTI:
1048             result = process_vmapti(s, cmdpkt, false);
1049             break;
1050         case GITS_CMD_VMAPI:
1051             result = process_vmapti(s, cmdpkt, true);
1052             break;
1053         case GITS_CMD_VMAPP:
1054             result = process_vmapp(s, cmdpkt);
1055             break;
1056         default:
1057             trace_gicv3_its_cmd_unknown(cmd);
1058             break;
1059         }
1060         if (result != CMD_STALL) {
1061             /* CMD_CONTINUE or CMD_CONTINUE_OK */
1062             rd_offset++;
1063             rd_offset %= s->cq.num_entries;
1064             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
1065         } else {
1066             /* CMD_STALL */
1067             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
1068             qemu_log_mask(LOG_GUEST_ERROR,
1069                           "%s: 0x%x cmd processing failed, stalling\n",
1070                           __func__, cmd);
1071             break;
1072         }
1073     }
1074 }
1075 
1076 /*
1077  * This function extracts the ITS Device and Collection table specific
1078  * parameters (like base_addr, size etc) from GITS_BASER register.
1079  * It is called during ITS enable and also during post_load migration
1080  */
1081 static void extract_table_params(GICv3ITSState *s)
1082 {
1083     uint16_t num_pages = 0;
1084     uint8_t  page_sz_type;
1085     uint8_t type;
1086     uint32_t page_sz = 0;
1087     uint64_t value;
1088 
1089     for (int i = 0; i < 8; i++) {
1090         TableDesc *td;
1091         int idbits;
1092 
1093         value = s->baser[i];
1094 
1095         if (!value) {
1096             continue;
1097         }
1098 
1099         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
1100 
1101         switch (page_sz_type) {
1102         case 0:
1103             page_sz = GITS_PAGE_SIZE_4K;
1104             break;
1105 
1106         case 1:
1107             page_sz = GITS_PAGE_SIZE_16K;
1108             break;
1109 
1110         case 2:
1111         case 3:
1112             page_sz = GITS_PAGE_SIZE_64K;
1113             break;
1114 
1115         default:
1116             g_assert_not_reached();
1117         }
1118 
1119         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
1120 
1121         type = FIELD_EX64(value, GITS_BASER, TYPE);
1122 
1123         switch (type) {
1124         case GITS_BASER_TYPE_DEVICE:
1125             td = &s->dt;
1126             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
1127             break;
1128         case GITS_BASER_TYPE_COLLECTION:
1129             td = &s->ct;
1130             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
1131                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
1132             } else {
1133                 /* 16-bit CollectionId supported when CIL == 0 */
1134                 idbits = 16;
1135             }
1136             break;
1137         case GITS_BASER_TYPE_VPE:
1138             td = &s->vpet;
1139             /*
1140              * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an
1141              * implementation to implement fewer bits and report this
1142              * via GICD_TYPER2.)
1143              */
1144             idbits = 16;
1145             break;
1146         default:
1147             /*
1148              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
1149              * ensures we will only see type values corresponding to
1150              * the values set up in gicv3_its_reset().
1151              */
1152             g_assert_not_reached();
1153         }
1154 
1155         memset(td, 0, sizeof(*td));
1156         /*
1157          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
1158          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
1159          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
1160          * for the register corresponding to the Collection table but we
1161          * still have to process interrupts using non-memory-backed
1162          * Collection table entries.)
1163          * The specification makes it UNPREDICTABLE to enable the ITS without
1164          * marking each BASER<n> as valid. We choose to handle these as if
1165          * the table was zero-sized, so commands using the table will fail
1166          * and interrupts requested via GITS_TRANSLATER writes will be ignored.
1167          * This happens automatically by leaving the num_entries field at
1168          * zero, which will be caught by the bounds checks we have before
1169          * every table lookup anyway.
1170          */
1171         if (!FIELD_EX64(value, GITS_BASER, VALID)) {
1172             continue;
1173         }
1174         td->page_sz = page_sz;
1175         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
1176         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
1177         td->base_addr = baser_base_addr(value, page_sz);
1178         if (!td->indirect) {
1179             td->num_entries = (num_pages * page_sz) / td->entry_sz;
1180         } else {
1181             td->num_entries = (((num_pages * page_sz) /
1182                                   L1TABLE_ENTRY_SIZE) *
1183                                  (page_sz / td->entry_sz));
1184         }
1185         td->num_entries = MIN(td->num_entries, 1ULL << idbits);
1186     }
1187 }
1188 
1189 static void extract_cmdq_params(GICv3ITSState *s)
1190 {
1191     uint16_t num_pages = 0;
1192     uint64_t value = s->cbaser;
1193 
1194     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
1195 
1196     memset(&s->cq, 0 , sizeof(s->cq));
1197 
1198     if (FIELD_EX64(value, GITS_CBASER, VALID)) {
1199         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
1200                              GITS_CMDQ_ENTRY_SIZE;
1201         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
1202         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
1203     }
1204 }
1205 
1206 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
1207                                               uint64_t *data, unsigned size,
1208                                               MemTxAttrs attrs)
1209 {
1210     /*
1211      * GITS_TRANSLATER is write-only, and all other addresses
1212      * in the interrupt translation space frame are RES0.
1213      */
1214     *data = 0;
1215     return MEMTX_OK;
1216 }
1217 
1218 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
1219                                                uint64_t data, unsigned size,
1220                                                MemTxAttrs attrs)
1221 {
1222     GICv3ITSState *s = (GICv3ITSState *)opaque;
1223     bool result = true;
1224 
1225     trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
1226 
1227     switch (offset) {
1228     case GITS_TRANSLATER:
1229         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1230             result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1231         }
1232         break;
1233     default:
1234         break;
1235     }
1236 
1237     if (result) {
1238         return MEMTX_OK;
1239     } else {
1240         return MEMTX_ERROR;
1241     }
1242 }
1243 
1244 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1245                               uint64_t value, MemTxAttrs attrs)
1246 {
1247     bool result = true;
1248     int index;
1249 
1250     switch (offset) {
1251     case GITS_CTLR:
1252         if (value & R_GITS_CTLR_ENABLED_MASK) {
1253             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1254             extract_table_params(s);
1255             extract_cmdq_params(s);
1256             process_cmdq(s);
1257         } else {
1258             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1259         }
1260         break;
1261     case GITS_CBASER:
1262         /*
1263          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1264          *                 already enabled
1265          */
1266         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1267             s->cbaser = deposit64(s->cbaser, 0, 32, value);
1268             s->creadr = 0;
1269         }
1270         break;
1271     case GITS_CBASER + 4:
1272         /*
1273          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1274          *                 already enabled
1275          */
1276         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1277             s->cbaser = deposit64(s->cbaser, 32, 32, value);
1278             s->creadr = 0;
1279         }
1280         break;
1281     case GITS_CWRITER:
1282         s->cwriter = deposit64(s->cwriter, 0, 32,
1283                                (value & ~R_GITS_CWRITER_RETRY_MASK));
1284         if (s->cwriter != s->creadr) {
1285             process_cmdq(s);
1286         }
1287         break;
1288     case GITS_CWRITER + 4:
1289         s->cwriter = deposit64(s->cwriter, 32, 32, value);
1290         break;
1291     case GITS_CREADR:
1292         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1293             s->creadr = deposit64(s->creadr, 0, 32,
1294                                   (value & ~R_GITS_CREADR_STALLED_MASK));
1295         } else {
1296             /* RO register, ignore the write */
1297             qemu_log_mask(LOG_GUEST_ERROR,
1298                           "%s: invalid guest write to RO register at offset "
1299                           TARGET_FMT_plx "\n", __func__, offset);
1300         }
1301         break;
1302     case GITS_CREADR + 4:
1303         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1304             s->creadr = deposit64(s->creadr, 32, 32, value);
1305         } else {
1306             /* RO register, ignore the write */
1307             qemu_log_mask(LOG_GUEST_ERROR,
1308                           "%s: invalid guest write to RO register at offset "
1309                           TARGET_FMT_plx "\n", __func__, offset);
1310         }
1311         break;
1312     case GITS_BASER ... GITS_BASER + 0x3f:
1313         /*
1314          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1315          *                 already enabled
1316          */
1317         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1318             index = (offset - GITS_BASER) / 8;
1319 
1320             if (s->baser[index] == 0) {
1321                 /* Unimplemented GITS_BASERn: RAZ/WI */
1322                 break;
1323             }
1324             if (offset & 7) {
1325                 value <<= 32;
1326                 value &= ~GITS_BASER_RO_MASK;
1327                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1328                 s->baser[index] |= value;
1329             } else {
1330                 value &= ~GITS_BASER_RO_MASK;
1331                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1332                 s->baser[index] |= value;
1333             }
1334         }
1335         break;
1336     case GITS_IIDR:
1337     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1338         /* RO registers, ignore the write */
1339         qemu_log_mask(LOG_GUEST_ERROR,
1340                       "%s: invalid guest write to RO register at offset "
1341                       TARGET_FMT_plx "\n", __func__, offset);
1342         break;
1343     default:
1344         result = false;
1345         break;
1346     }
1347     return result;
1348 }
1349 
1350 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1351                              uint64_t *data, MemTxAttrs attrs)
1352 {
1353     bool result = true;
1354     int index;
1355 
1356     switch (offset) {
1357     case GITS_CTLR:
1358         *data = s->ctlr;
1359         break;
1360     case GITS_IIDR:
1361         *data = gicv3_iidr();
1362         break;
1363     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1364         /* ID registers */
1365         *data = gicv3_idreg(offset - GITS_IDREGS, GICV3_PIDR0_ITS);
1366         break;
1367     case GITS_TYPER:
1368         *data = extract64(s->typer, 0, 32);
1369         break;
1370     case GITS_TYPER + 4:
1371         *data = extract64(s->typer, 32, 32);
1372         break;
1373     case GITS_CBASER:
1374         *data = extract64(s->cbaser, 0, 32);
1375         break;
1376     case GITS_CBASER + 4:
1377         *data = extract64(s->cbaser, 32, 32);
1378         break;
1379     case GITS_CREADR:
1380         *data = extract64(s->creadr, 0, 32);
1381         break;
1382     case GITS_CREADR + 4:
1383         *data = extract64(s->creadr, 32, 32);
1384         break;
1385     case GITS_CWRITER:
1386         *data = extract64(s->cwriter, 0, 32);
1387         break;
1388     case GITS_CWRITER + 4:
1389         *data = extract64(s->cwriter, 32, 32);
1390         break;
1391     case GITS_BASER ... GITS_BASER + 0x3f:
1392         index = (offset - GITS_BASER) / 8;
1393         if (offset & 7) {
1394             *data = extract64(s->baser[index], 32, 32);
1395         } else {
1396             *data = extract64(s->baser[index], 0, 32);
1397         }
1398         break;
1399     default:
1400         result = false;
1401         break;
1402     }
1403     return result;
1404 }
1405 
1406 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1407                                uint64_t value, MemTxAttrs attrs)
1408 {
1409     bool result = true;
1410     int index;
1411 
1412     switch (offset) {
1413     case GITS_BASER ... GITS_BASER + 0x3f:
1414         /*
1415          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1416          *                 already enabled
1417          */
1418         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1419             index = (offset - GITS_BASER) / 8;
1420             if (s->baser[index] == 0) {
1421                 /* Unimplemented GITS_BASERn: RAZ/WI */
1422                 break;
1423             }
1424             s->baser[index] &= GITS_BASER_RO_MASK;
1425             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1426         }
1427         break;
1428     case GITS_CBASER:
1429         /*
1430          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1431          *                 already enabled
1432          */
1433         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1434             s->cbaser = value;
1435             s->creadr = 0;
1436         }
1437         break;
1438     case GITS_CWRITER:
1439         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1440         if (s->cwriter != s->creadr) {
1441             process_cmdq(s);
1442         }
1443         break;
1444     case GITS_CREADR:
1445         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1446             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1447         } else {
1448             /* RO register, ignore the write */
1449             qemu_log_mask(LOG_GUEST_ERROR,
1450                           "%s: invalid guest write to RO register at offset "
1451                           TARGET_FMT_plx "\n", __func__, offset);
1452         }
1453         break;
1454     case GITS_TYPER:
1455         /* RO registers, ignore the write */
1456         qemu_log_mask(LOG_GUEST_ERROR,
1457                       "%s: invalid guest write to RO register at offset "
1458                       TARGET_FMT_plx "\n", __func__, offset);
1459         break;
1460     default:
1461         result = false;
1462         break;
1463     }
1464     return result;
1465 }
1466 
1467 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1468                               uint64_t *data, MemTxAttrs attrs)
1469 {
1470     bool result = true;
1471     int index;
1472 
1473     switch (offset) {
1474     case GITS_TYPER:
1475         *data = s->typer;
1476         break;
1477     case GITS_BASER ... GITS_BASER + 0x3f:
1478         index = (offset - GITS_BASER) / 8;
1479         *data = s->baser[index];
1480         break;
1481     case GITS_CBASER:
1482         *data = s->cbaser;
1483         break;
1484     case GITS_CREADR:
1485         *data = s->creadr;
1486         break;
1487     case GITS_CWRITER:
1488         *data = s->cwriter;
1489         break;
1490     default:
1491         result = false;
1492         break;
1493     }
1494     return result;
1495 }
1496 
1497 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1498                                   unsigned size, MemTxAttrs attrs)
1499 {
1500     GICv3ITSState *s = (GICv3ITSState *)opaque;
1501     bool result;
1502 
1503     switch (size) {
1504     case 4:
1505         result = its_readl(s, offset, data, attrs);
1506         break;
1507     case 8:
1508         result = its_readll(s, offset, data, attrs);
1509         break;
1510     default:
1511         result = false;
1512         break;
1513     }
1514 
1515     if (!result) {
1516         qemu_log_mask(LOG_GUEST_ERROR,
1517                       "%s: invalid guest read at offset " TARGET_FMT_plx
1518                       " size %u\n", __func__, offset, size);
1519         trace_gicv3_its_badread(offset, size);
1520         /*
1521          * The spec requires that reserved registers are RAZ/WI;
1522          * so use false returns from leaf functions as a way to
1523          * trigger the guest-error logging but don't return it to
1524          * the caller, or we'll cause a spurious guest data abort.
1525          */
1526         *data = 0;
1527     } else {
1528         trace_gicv3_its_read(offset, *data, size);
1529     }
1530     return MEMTX_OK;
1531 }
1532 
1533 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1534                                    unsigned size, MemTxAttrs attrs)
1535 {
1536     GICv3ITSState *s = (GICv3ITSState *)opaque;
1537     bool result;
1538 
1539     switch (size) {
1540     case 4:
1541         result = its_writel(s, offset, data, attrs);
1542         break;
1543     case 8:
1544         result = its_writell(s, offset, data, attrs);
1545         break;
1546     default:
1547         result = false;
1548         break;
1549     }
1550 
1551     if (!result) {
1552         qemu_log_mask(LOG_GUEST_ERROR,
1553                       "%s: invalid guest write at offset " TARGET_FMT_plx
1554                       " size %u\n", __func__, offset, size);
1555         trace_gicv3_its_badwrite(offset, data, size);
1556         /*
1557          * The spec requires that reserved registers are RAZ/WI;
1558          * so use false returns from leaf functions as a way to
1559          * trigger the guest-error logging but don't return it to
1560          * the caller, or we'll cause a spurious guest data abort.
1561          */
1562     } else {
1563         trace_gicv3_its_write(offset, data, size);
1564     }
1565     return MEMTX_OK;
1566 }
1567 
1568 static const MemoryRegionOps gicv3_its_control_ops = {
1569     .read_with_attrs = gicv3_its_read,
1570     .write_with_attrs = gicv3_its_write,
1571     .valid.min_access_size = 4,
1572     .valid.max_access_size = 8,
1573     .impl.min_access_size = 4,
1574     .impl.max_access_size = 8,
1575     .endianness = DEVICE_NATIVE_ENDIAN,
1576 };
1577 
1578 static const MemoryRegionOps gicv3_its_translation_ops = {
1579     .read_with_attrs = gicv3_its_translation_read,
1580     .write_with_attrs = gicv3_its_translation_write,
1581     .valid.min_access_size = 2,
1582     .valid.max_access_size = 4,
1583     .impl.min_access_size = 2,
1584     .impl.max_access_size = 4,
1585     .endianness = DEVICE_NATIVE_ENDIAN,
1586 };
1587 
1588 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1589 {
1590     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1591     int i;
1592 
1593     for (i = 0; i < s->gicv3->num_cpu; i++) {
1594         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1595             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1596             return;
1597         }
1598     }
1599 
1600     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1601 
1602     /* set the ITS default features supported */
1603     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1604     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1605                           ITS_ITT_ENTRY_SIZE - 1);
1606     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1607     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1608     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1609     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1610 }
1611 
1612 static void gicv3_its_reset(DeviceState *dev)
1613 {
1614     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1615     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1616 
1617     c->parent_reset(dev);
1618 
1619     /* Quiescent bit reset to 1 */
1620     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1621 
1622     /*
1623      * setting GITS_BASER0.Type = 0b001 (Device)
1624      *         GITS_BASER1.Type = 0b100 (Collection Table)
1625      *         GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later
1626      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1627      *         GITS_BASER<0,1>.Page_Size = 64KB
1628      * and default translation table entry size to 16 bytes
1629      */
1630     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1631                              GITS_BASER_TYPE_DEVICE);
1632     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1633                              GITS_BASER_PAGESIZE_64K);
1634     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1635                              GITS_DTE_SIZE - 1);
1636 
1637     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1638                              GITS_BASER_TYPE_COLLECTION);
1639     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1640                              GITS_BASER_PAGESIZE_64K);
1641     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1642                              GITS_CTE_SIZE - 1);
1643 
1644     if (its_feature_virtual(s)) {
1645         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE,
1646                                  GITS_BASER_TYPE_VPE);
1647         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE,
1648                                  GITS_BASER_PAGESIZE_64K);
1649         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE,
1650                                  GITS_VPE_SIZE - 1);
1651     }
1652 }
1653 
1654 static void gicv3_its_post_load(GICv3ITSState *s)
1655 {
1656     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1657         extract_table_params(s);
1658         extract_cmdq_params(s);
1659     }
1660 }
1661 
1662 static Property gicv3_its_props[] = {
1663     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1664                      GICv3State *),
1665     DEFINE_PROP_END_OF_LIST(),
1666 };
1667 
1668 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1669 {
1670     DeviceClass *dc = DEVICE_CLASS(klass);
1671     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1672     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1673 
1674     dc->realize = gicv3_arm_its_realize;
1675     device_class_set_props(dc, gicv3_its_props);
1676     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1677     icc->post_load = gicv3_its_post_load;
1678 }
1679 
1680 static const TypeInfo gicv3_its_info = {
1681     .name = TYPE_ARM_GICV3_ITS,
1682     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1683     .instance_size = sizeof(GICv3ITSState),
1684     .class_init = gicv3_its_class_init,
1685     .class_size = sizeof(GICv3ITSClass),
1686 };
1687 
1688 static void gicv3_its_register_types(void)
1689 {
1690     type_register_static(&gicv3_its_info);
1691 }
1692 
1693 type_init(gicv3_its_register_types)
1694