xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision 37094b6d)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22 
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27 
28 struct GICv3ITSClass {
29     GICv3ITSCommonClass parent_class;
30     void (*parent_reset)(DeviceState *dev);
31 };
32 
33 /*
34  * This is an internal enum used to distinguish between LPI triggered
35  * via command queue and LPI triggered via gits_translater write.
36  */
37 typedef enum ItsCmdType {
38     NONE = 0, /* internal indication for GITS_TRANSLATER write */
39     CLEAR = 1,
40     DISCARD = 2,
41     INTERRUPT = 3,
42 } ItsCmdType;
43 
44 typedef struct DTEntry {
45     bool valid;
46     unsigned size;
47     uint64_t ittaddr;
48 } DTEntry;
49 
50 typedef struct CTEntry {
51     bool valid;
52     uint32_t rdbase;
53 } CTEntry;
54 
55 typedef struct ITEntry {
56     bool valid;
57     int inttype;
58     uint32_t intid;
59     uint32_t doorbell;
60     uint32_t icid;
61     uint32_t vpeid;
62 } ITEntry;
63 
64 
65 /*
66  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
67  * if a command parameter is not correct. These include both "stall
68  * processing of the command queue" and "ignore this command, and
69  * keep processing the queue". In our implementation we choose that
70  * memory transaction errors reading the command packet provoke a
71  * stall, but errors in parameters cause us to ignore the command
72  * and continue processing.
73  * The process_* functions which handle individual ITS commands all
74  * return an ItsCmdResult which tells process_cmdq() whether it should
75  * stall or keep going.
76  */
77 typedef enum ItsCmdResult {
78     CMD_STALL = 0,
79     CMD_CONTINUE = 1,
80 } ItsCmdResult;
81 
82 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
83 {
84     uint64_t result = 0;
85 
86     switch (page_sz) {
87     case GITS_PAGE_SIZE_4K:
88     case GITS_PAGE_SIZE_16K:
89         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
90         break;
91 
92     case GITS_PAGE_SIZE_64K:
93         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
94         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
95         break;
96 
97     default:
98         break;
99     }
100     return result;
101 }
102 
103 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
104                                  uint32_t idx, MemTxResult *res)
105 {
106     /*
107      * Given a TableDesc describing one of the ITS in-guest-memory
108      * tables and an index into it, return the guest address
109      * corresponding to that table entry.
110      * If there was a memory error reading the L1 table of an
111      * indirect table, *res is set accordingly, and we return -1.
112      * If the L1 table entry is marked not valid, we return -1 with
113      * *res set to MEMTX_OK.
114      *
115      * The specification defines the format of level 1 entries of a
116      * 2-level table, but the format of level 2 entries and the format
117      * of flat-mapped tables is IMPDEF.
118      */
119     AddressSpace *as = &s->gicv3->dma_as;
120     uint32_t l2idx;
121     uint64_t l2;
122     uint32_t num_l2_entries;
123 
124     *res = MEMTX_OK;
125 
126     if (!td->indirect) {
127         /* Single level table */
128         return td->base_addr + idx * td->entry_sz;
129     }
130 
131     /* Two level table */
132     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
133 
134     l2 = address_space_ldq_le(as,
135                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
136                               MEMTXATTRS_UNSPECIFIED, res);
137     if (*res != MEMTX_OK) {
138         return -1;
139     }
140     if (!(l2 & L2_TABLE_VALID_MASK)) {
141         return -1;
142     }
143 
144     num_l2_entries = td->page_sz / td->entry_sz;
145     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
146 }
147 
148 /*
149  * Read the Collection Table entry at index @icid. On success (including
150  * successfully determining that there is no valid CTE for this index),
151  * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
152  * If there is an error reading memory then we return the error code.
153  */
154 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
155 {
156     AddressSpace *as = &s->gicv3->dma_as;
157     MemTxResult res = MEMTX_OK;
158     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
159     uint64_t cteval;
160 
161     if (entry_addr == -1) {
162         /* No L2 table entry, i.e. no valid CTE, or a memory error */
163         cte->valid = false;
164         goto out;
165     }
166 
167     cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
168     if (res != MEMTX_OK) {
169         goto out;
170     }
171     cte->valid = FIELD_EX64(cteval, CTE, VALID);
172     cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
173 out:
174     if (res != MEMTX_OK) {
175         trace_gicv3_its_cte_read_fault(icid);
176     } else {
177         trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
178     }
179     return res;
180 }
181 
182 /*
183  * Update the Interrupt Table entry at index @evinted in the table specified
184  * by the dte @dte. Returns true on success, false if there was a memory
185  * access error.
186  */
187 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
188                        const ITEntry *ite)
189 {
190     AddressSpace *as = &s->gicv3->dma_as;
191     MemTxResult res = MEMTX_OK;
192     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
193     uint64_t itel = 0;
194     uint32_t iteh = 0;
195 
196     trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
197                               ite->inttype, ite->intid, ite->icid,
198                               ite->vpeid, ite->doorbell);
199 
200     if (ite->valid) {
201         itel = FIELD_DP64(itel, ITE_L, VALID, 1);
202         itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
203         itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
204         itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
205         itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
206         iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
207     }
208 
209     address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
210     if (res != MEMTX_OK) {
211         return false;
212     }
213     address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
214     return res == MEMTX_OK;
215 }
216 
217 /*
218  * Read the Interrupt Table entry at index @eventid from the table specified
219  * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
220  * struct @ite accordingly. If there is an error reading memory then we return
221  * the error code.
222  */
223 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
224                            const DTEntry *dte, ITEntry *ite)
225 {
226     AddressSpace *as = &s->gicv3->dma_as;
227     MemTxResult res = MEMTX_OK;
228     uint64_t itel;
229     uint32_t iteh;
230     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
231 
232     itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
233     if (res != MEMTX_OK) {
234         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
235         return res;
236     }
237 
238     iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
239     if (res != MEMTX_OK) {
240         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
241         return res;
242     }
243 
244     ite->valid = FIELD_EX64(itel, ITE_L, VALID);
245     ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
246     ite->intid = FIELD_EX64(itel, ITE_L, INTID);
247     ite->icid = FIELD_EX64(itel, ITE_L, ICID);
248     ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
249     ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
250     trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
251                              ite->inttype, ite->intid, ite->icid,
252                              ite->vpeid, ite->doorbell);
253     return MEMTX_OK;
254 }
255 
256 /*
257  * Read the Device Table entry at index @devid. On success (including
258  * successfully determining that there is no valid DTE for this index),
259  * we return MEMTX_OK and populate the DTEntry struct accordingly.
260  * If there is an error reading memory then we return the error code.
261  */
262 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
263 {
264     MemTxResult res = MEMTX_OK;
265     AddressSpace *as = &s->gicv3->dma_as;
266     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
267     uint64_t dteval;
268 
269     if (entry_addr == -1) {
270         /* No L2 table entry, i.e. no valid DTE, or a memory error */
271         dte->valid = false;
272         goto out;
273     }
274     dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
275     if (res != MEMTX_OK) {
276         goto out;
277     }
278     dte->valid = FIELD_EX64(dteval, DTE, VALID);
279     dte->size = FIELD_EX64(dteval, DTE, SIZE);
280     /* DTE word field stores bits [51:8] of the ITT address */
281     dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
282 out:
283     if (res != MEMTX_OK) {
284         trace_gicv3_its_dte_read_fault(devid);
285     } else {
286         trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
287     }
288     return res;
289 }
290 
291 /*
292  * This function handles the processing of following commands based on
293  * the ItsCmdType parameter passed:-
294  * 1. triggering of lpi interrupt translation via ITS INT command
295  * 2. triggering of lpi interrupt translation via gits_translater register
296  * 3. handling of ITS CLEAR command
297  * 4. handling of ITS DISCARD command
298  */
299 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
300                                        uint32_t eventid, ItsCmdType cmd)
301 {
302     uint64_t num_eventids;
303     DTEntry dte;
304     CTEntry cte;
305     ITEntry ite;
306 
307     if (devid >= s->dt.num_entries) {
308         qemu_log_mask(LOG_GUEST_ERROR,
309                       "%s: invalid command attributes: devid %d>=%d",
310                       __func__, devid, s->dt.num_entries);
311         return CMD_CONTINUE;
312     }
313 
314     if (get_dte(s, devid, &dte) != MEMTX_OK) {
315         return CMD_STALL;
316     }
317     if (!dte.valid) {
318         qemu_log_mask(LOG_GUEST_ERROR,
319                       "%s: invalid command attributes: "
320                       "invalid dte for %d\n", __func__, devid);
321         return CMD_CONTINUE;
322     }
323 
324     num_eventids = 1ULL << (dte.size + 1);
325     if (eventid >= num_eventids) {
326         qemu_log_mask(LOG_GUEST_ERROR,
327                       "%s: invalid command attributes: eventid %d >= %"
328                       PRId64 "\n",
329                       __func__, eventid, num_eventids);
330         return CMD_CONTINUE;
331     }
332 
333     if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
334         return CMD_STALL;
335     }
336 
337     if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
338         qemu_log_mask(LOG_GUEST_ERROR,
339                       "%s: invalid command attributes: invalid ITE\n",
340                       __func__);
341         return CMD_CONTINUE;
342     }
343 
344     if (ite.icid >= s->ct.num_entries) {
345         qemu_log_mask(LOG_GUEST_ERROR,
346                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
347                       __func__, ite.icid);
348         return CMD_CONTINUE;
349     }
350 
351     if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
352         return CMD_STALL;
353     }
354     if (!cte.valid) {
355         qemu_log_mask(LOG_GUEST_ERROR,
356                       "%s: invalid command attributes: invalid CTE\n",
357                       __func__);
358         return CMD_CONTINUE;
359     }
360 
361     /*
362      * Current implementation only supports rdbase == procnum
363      * Hence rdbase physical address is ignored
364      */
365     if (cte.rdbase >= s->gicv3->num_cpu) {
366         return CMD_CONTINUE;
367     }
368 
369     if ((cmd == CLEAR) || (cmd == DISCARD)) {
370         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
371     } else {
372         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
373     }
374 
375     if (cmd == DISCARD) {
376         ITEntry ite = {};
377         /* remove mapping from interrupt translation table */
378         ite.valid = false;
379         return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
380     }
381     return CMD_CONTINUE;
382 }
383 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
384                                     ItsCmdType cmd)
385 {
386     uint32_t devid, eventid;
387 
388     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
389     eventid = cmdpkt[1] & EVENTID_MASK;
390     switch (cmd) {
391     case INTERRUPT:
392         trace_gicv3_its_cmd_int(devid, eventid);
393         break;
394     case CLEAR:
395         trace_gicv3_its_cmd_clear(devid, eventid);
396         break;
397     case DISCARD:
398         trace_gicv3_its_cmd_discard(devid, eventid);
399         break;
400     default:
401         g_assert_not_reached();
402     }
403     return do_process_its_cmd(s, devid, eventid, cmd);
404 }
405 
406 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
407                                   bool ignore_pInt)
408 {
409     uint32_t devid, eventid;
410     uint32_t pIntid = 0;
411     uint64_t num_eventids;
412     uint32_t num_intids;
413     uint16_t icid = 0;
414     DTEntry dte;
415     ITEntry ite;
416 
417     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
418     eventid = cmdpkt[1] & EVENTID_MASK;
419     icid = cmdpkt[2] & ICID_MASK;
420 
421     if (ignore_pInt) {
422         pIntid = eventid;
423         trace_gicv3_its_cmd_mapi(devid, eventid, icid);
424     } else {
425         pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
426         trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
427     }
428 
429     if (devid >= s->dt.num_entries) {
430         qemu_log_mask(LOG_GUEST_ERROR,
431                       "%s: invalid command attributes: devid %d>=%d",
432                       __func__, devid, s->dt.num_entries);
433         return CMD_CONTINUE;
434     }
435 
436     if (get_dte(s, devid, &dte) != MEMTX_OK) {
437         return CMD_STALL;
438     }
439     num_eventids = 1ULL << (dte.size + 1);
440     num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
441 
442     if (icid >= s->ct.num_entries) {
443         qemu_log_mask(LOG_GUEST_ERROR,
444                       "%s: invalid ICID 0x%x >= 0x%x\n",
445                       __func__, icid, s->ct.num_entries);
446         return CMD_CONTINUE;
447     }
448 
449     if (!dte.valid) {
450         qemu_log_mask(LOG_GUEST_ERROR,
451                       "%s: no valid DTE for devid 0x%x\n", __func__, devid);
452         return CMD_CONTINUE;
453     }
454 
455     if (eventid >= num_eventids) {
456         qemu_log_mask(LOG_GUEST_ERROR,
457                       "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
458                       __func__, eventid, num_eventids);
459         return CMD_CONTINUE;
460     }
461 
462     if (pIntid < GICV3_LPI_INTID_START || pIntid >= num_intids) {
463         qemu_log_mask(LOG_GUEST_ERROR,
464                       "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
465         return CMD_CONTINUE;
466     }
467 
468     /* add ite entry to interrupt translation table */
469     ite.valid = true;
470     ite.inttype = ITE_INTTYPE_PHYSICAL;
471     ite.intid = pIntid;
472     ite.icid = icid;
473     ite.doorbell = INTID_SPURIOUS;
474     ite.vpeid = 0;
475     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
476 }
477 
478 /*
479  * Update the Collection Table entry for @icid to @cte. Returns true
480  * on success, false if there was a memory access error.
481  */
482 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
483 {
484     AddressSpace *as = &s->gicv3->dma_as;
485     uint64_t entry_addr;
486     uint64_t cteval = 0;
487     MemTxResult res = MEMTX_OK;
488 
489     trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
490 
491     if (cte->valid) {
492         /* add mapping entry to collection table */
493         cteval = FIELD_DP64(cteval, CTE, VALID, 1);
494         cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
495     }
496 
497     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
498     if (res != MEMTX_OK) {
499         /* memory access error: stall */
500         return false;
501     }
502     if (entry_addr == -1) {
503         /* No L2 table for this index: discard write and continue */
504         return true;
505     }
506 
507     address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
508     return res == MEMTX_OK;
509 }
510 
511 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
512 {
513     uint16_t icid;
514     CTEntry cte;
515 
516     icid = cmdpkt[2] & ICID_MASK;
517     cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
518     if (cte.valid) {
519         cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
520         cte.rdbase &= RDBASE_PROCNUM_MASK;
521     } else {
522         cte.rdbase = 0;
523     }
524     trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
525 
526     if (icid >= s->ct.num_entries) {
527         qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
528         return CMD_CONTINUE;
529     }
530     if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
531         qemu_log_mask(LOG_GUEST_ERROR,
532                       "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
533         return CMD_CONTINUE;
534     }
535 
536     return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
537 }
538 
539 /*
540  * Update the Device Table entry for @devid to @dte. Returns true
541  * on success, false if there was a memory access error.
542  */
543 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
544 {
545     AddressSpace *as = &s->gicv3->dma_as;
546     uint64_t entry_addr;
547     uint64_t dteval = 0;
548     MemTxResult res = MEMTX_OK;
549 
550     trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
551 
552     if (dte->valid) {
553         /* add mapping entry to device table */
554         dteval = FIELD_DP64(dteval, DTE, VALID, 1);
555         dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
556         dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
557     }
558 
559     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
560     if (res != MEMTX_OK) {
561         /* memory access error: stall */
562         return false;
563     }
564     if (entry_addr == -1) {
565         /* No L2 table for this index: discard write and continue */
566         return true;
567     }
568     address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
569     return res == MEMTX_OK;
570 }
571 
572 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
573 {
574     uint32_t devid;
575     DTEntry dte;
576 
577     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
578     dte.size = cmdpkt[1] & SIZE_MASK;
579     dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
580     dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
581 
582     trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
583 
584     if (devid >= s->dt.num_entries) {
585         qemu_log_mask(LOG_GUEST_ERROR,
586                       "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
587                       devid, s->dt.num_entries);
588         return CMD_CONTINUE;
589     }
590 
591     if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
592         qemu_log_mask(LOG_GUEST_ERROR,
593                       "ITS MAPD: invalid size %d\n", dte.size);
594         return CMD_CONTINUE;
595     }
596 
597     return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
598 }
599 
600 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
601 {
602     uint64_t rd1, rd2;
603 
604     rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
605     rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
606 
607     trace_gicv3_its_cmd_movall(rd1, rd2);
608 
609     if (rd1 >= s->gicv3->num_cpu) {
610         qemu_log_mask(LOG_GUEST_ERROR,
611                       "%s: RDBASE1 %" PRId64
612                       " out of range (must be less than %d)\n",
613                       __func__, rd1, s->gicv3->num_cpu);
614         return CMD_CONTINUE;
615     }
616     if (rd2 >= s->gicv3->num_cpu) {
617         qemu_log_mask(LOG_GUEST_ERROR,
618                       "%s: RDBASE2 %" PRId64
619                       " out of range (must be less than %d)\n",
620                       __func__, rd2, s->gicv3->num_cpu);
621         return CMD_CONTINUE;
622     }
623 
624     if (rd1 == rd2) {
625         /* Move to same target must succeed as a no-op */
626         return CMD_CONTINUE;
627     }
628 
629     /* Move all pending LPIs from redistributor 1 to redistributor 2 */
630     gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
631 
632     return CMD_CONTINUE;
633 }
634 
635 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
636 {
637     uint32_t devid, eventid;
638     uint16_t new_icid;
639     uint64_t num_eventids;
640     DTEntry dte;
641     CTEntry old_cte, new_cte;
642     ITEntry old_ite;
643 
644     devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
645     eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
646     new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
647 
648     trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
649 
650     if (devid >= s->dt.num_entries) {
651         qemu_log_mask(LOG_GUEST_ERROR,
652                       "%s: invalid command attributes: devid %d>=%d",
653                       __func__, devid, s->dt.num_entries);
654         return CMD_CONTINUE;
655     }
656     if (get_dte(s, devid, &dte) != MEMTX_OK) {
657         return CMD_STALL;
658     }
659 
660     if (!dte.valid) {
661         qemu_log_mask(LOG_GUEST_ERROR,
662                       "%s: invalid command attributes: "
663                       "invalid dte for %d\n", __func__, devid);
664         return CMD_CONTINUE;
665     }
666 
667     num_eventids = 1ULL << (dte.size + 1);
668     if (eventid >= num_eventids) {
669         qemu_log_mask(LOG_GUEST_ERROR,
670                       "%s: invalid command attributes: eventid %d >= %"
671                       PRId64 "\n",
672                       __func__, eventid, num_eventids);
673         return CMD_CONTINUE;
674     }
675 
676     if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
677         return CMD_STALL;
678     }
679 
680     if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
681         qemu_log_mask(LOG_GUEST_ERROR,
682                       "%s: invalid command attributes: invalid ITE\n",
683                       __func__);
684         return CMD_CONTINUE;
685     }
686 
687     if (old_ite.icid >= s->ct.num_entries) {
688         qemu_log_mask(LOG_GUEST_ERROR,
689                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
690                       __func__, old_ite.icid);
691         return CMD_CONTINUE;
692     }
693 
694     if (new_icid >= s->ct.num_entries) {
695         qemu_log_mask(LOG_GUEST_ERROR,
696                       "%s: invalid command attributes: ICID 0x%x\n",
697                       __func__, new_icid);
698         return CMD_CONTINUE;
699     }
700 
701     if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
702         return CMD_STALL;
703     }
704     if (!old_cte.valid) {
705         qemu_log_mask(LOG_GUEST_ERROR,
706                       "%s: invalid command attributes: "
707                       "invalid CTE for old ICID 0x%x\n",
708                       __func__, old_ite.icid);
709         return CMD_CONTINUE;
710     }
711 
712     if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
713         return CMD_STALL;
714     }
715     if (!new_cte.valid) {
716         qemu_log_mask(LOG_GUEST_ERROR,
717                       "%s: invalid command attributes: "
718                       "invalid CTE for new ICID 0x%x\n",
719                       __func__, new_icid);
720         return CMD_CONTINUE;
721     }
722 
723     if (old_cte.rdbase >= s->gicv3->num_cpu) {
724         qemu_log_mask(LOG_GUEST_ERROR,
725                       "%s: CTE has invalid rdbase 0x%x\n",
726                       __func__, old_cte.rdbase);
727         return CMD_CONTINUE;
728     }
729 
730     if (new_cte.rdbase >= s->gicv3->num_cpu) {
731         qemu_log_mask(LOG_GUEST_ERROR,
732                       "%s: CTE has invalid rdbase 0x%x\n",
733                       __func__, new_cte.rdbase);
734         return CMD_CONTINUE;
735     }
736 
737     if (old_cte.rdbase != new_cte.rdbase) {
738         /* Move the LPI from the old redistributor to the new one */
739         gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
740                              &s->gicv3->cpu[new_cte.rdbase],
741                              old_ite.intid);
742     }
743 
744     /* Update the ICID field in the interrupt translation table entry */
745     old_ite.icid = new_icid;
746     return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
747 }
748 
749 /*
750  * Current implementation blocks until all
751  * commands are processed
752  */
753 static void process_cmdq(GICv3ITSState *s)
754 {
755     uint32_t wr_offset = 0;
756     uint32_t rd_offset = 0;
757     uint32_t cq_offset = 0;
758     AddressSpace *as = &s->gicv3->dma_as;
759     uint8_t cmd;
760     int i;
761 
762     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
763         return;
764     }
765 
766     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
767 
768     if (wr_offset >= s->cq.num_entries) {
769         qemu_log_mask(LOG_GUEST_ERROR,
770                       "%s: invalid write offset "
771                       "%d\n", __func__, wr_offset);
772         return;
773     }
774 
775     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
776 
777     if (rd_offset >= s->cq.num_entries) {
778         qemu_log_mask(LOG_GUEST_ERROR,
779                       "%s: invalid read offset "
780                       "%d\n", __func__, rd_offset);
781         return;
782     }
783 
784     while (wr_offset != rd_offset) {
785         ItsCmdResult result = CMD_CONTINUE;
786         void *hostmem;
787         hwaddr buflen;
788         uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
789 
790         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
791 
792         buflen = GITS_CMDQ_ENTRY_SIZE;
793         hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
794                                     &buflen, false, MEMTXATTRS_UNSPECIFIED);
795         if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
796             if (hostmem) {
797                 address_space_unmap(as, hostmem, buflen, false, 0);
798             }
799             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
800             qemu_log_mask(LOG_GUEST_ERROR,
801                           "%s: could not read command at 0x%" PRIx64 "\n",
802                           __func__, s->cq.base_addr + cq_offset);
803             break;
804         }
805         for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
806             cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
807         }
808         address_space_unmap(as, hostmem, buflen, false, 0);
809 
810         cmd = cmdpkt[0] & CMD_MASK;
811 
812         trace_gicv3_its_process_command(rd_offset, cmd);
813 
814         switch (cmd) {
815         case GITS_CMD_INT:
816             result = process_its_cmd(s, cmdpkt, INTERRUPT);
817             break;
818         case GITS_CMD_CLEAR:
819             result = process_its_cmd(s, cmdpkt, CLEAR);
820             break;
821         case GITS_CMD_SYNC:
822             /*
823              * Current implementation makes a blocking synchronous call
824              * for every command issued earlier, hence the internal state
825              * is already consistent by the time SYNC command is executed.
826              * Hence no further processing is required for SYNC command.
827              */
828             trace_gicv3_its_cmd_sync();
829             break;
830         case GITS_CMD_MAPD:
831             result = process_mapd(s, cmdpkt);
832             break;
833         case GITS_CMD_MAPC:
834             result = process_mapc(s, cmdpkt);
835             break;
836         case GITS_CMD_MAPTI:
837             result = process_mapti(s, cmdpkt, false);
838             break;
839         case GITS_CMD_MAPI:
840             result = process_mapti(s, cmdpkt, true);
841             break;
842         case GITS_CMD_DISCARD:
843             result = process_its_cmd(s, cmdpkt, DISCARD);
844             break;
845         case GITS_CMD_INV:
846         case GITS_CMD_INVALL:
847             /*
848              * Current implementation doesn't cache any ITS tables,
849              * but the calculated lpi priority information. We only
850              * need to trigger lpi priority re-calculation to be in
851              * sync with LPI config table or pending table changes.
852              */
853             trace_gicv3_its_cmd_inv();
854             for (i = 0; i < s->gicv3->num_cpu; i++) {
855                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
856             }
857             break;
858         case GITS_CMD_MOVI:
859             result = process_movi(s, cmdpkt);
860             break;
861         case GITS_CMD_MOVALL:
862             result = process_movall(s, cmdpkt);
863             break;
864         default:
865             trace_gicv3_its_cmd_unknown(cmd);
866             break;
867         }
868         if (result == CMD_CONTINUE) {
869             rd_offset++;
870             rd_offset %= s->cq.num_entries;
871             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
872         } else {
873             /* CMD_STALL */
874             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
875             qemu_log_mask(LOG_GUEST_ERROR,
876                           "%s: 0x%x cmd processing failed, stalling\n",
877                           __func__, cmd);
878             break;
879         }
880     }
881 }
882 
883 /*
884  * This function extracts the ITS Device and Collection table specific
885  * parameters (like base_addr, size etc) from GITS_BASER register.
886  * It is called during ITS enable and also during post_load migration
887  */
888 static void extract_table_params(GICv3ITSState *s)
889 {
890     uint16_t num_pages = 0;
891     uint8_t  page_sz_type;
892     uint8_t type;
893     uint32_t page_sz = 0;
894     uint64_t value;
895 
896     for (int i = 0; i < 8; i++) {
897         TableDesc *td;
898         int idbits;
899 
900         value = s->baser[i];
901 
902         if (!value) {
903             continue;
904         }
905 
906         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
907 
908         switch (page_sz_type) {
909         case 0:
910             page_sz = GITS_PAGE_SIZE_4K;
911             break;
912 
913         case 1:
914             page_sz = GITS_PAGE_SIZE_16K;
915             break;
916 
917         case 2:
918         case 3:
919             page_sz = GITS_PAGE_SIZE_64K;
920             break;
921 
922         default:
923             g_assert_not_reached();
924         }
925 
926         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
927 
928         type = FIELD_EX64(value, GITS_BASER, TYPE);
929 
930         switch (type) {
931         case GITS_BASER_TYPE_DEVICE:
932             td = &s->dt;
933             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
934             break;
935         case GITS_BASER_TYPE_COLLECTION:
936             td = &s->ct;
937             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
938                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
939             } else {
940                 /* 16-bit CollectionId supported when CIL == 0 */
941                 idbits = 16;
942             }
943             break;
944         default:
945             /*
946              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
947              * ensures we will only see type values corresponding to
948              * the values set up in gicv3_its_reset().
949              */
950             g_assert_not_reached();
951         }
952 
953         memset(td, 0, sizeof(*td));
954         /*
955          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
956          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
957          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
958          * for the register corresponding to the Collection table but we
959          * still have to process interrupts using non-memory-backed
960          * Collection table entries.)
961          * The specification makes it UNPREDICTABLE to enable the ITS without
962          * marking each BASER<n> as valid. We choose to handle these as if
963          * the table was zero-sized, so commands using the table will fail
964          * and interrupts requested via GITS_TRANSLATER writes will be ignored.
965          * This happens automatically by leaving the num_entries field at
966          * zero, which will be caught by the bounds checks we have before
967          * every table lookup anyway.
968          */
969         if (!FIELD_EX64(value, GITS_BASER, VALID)) {
970             continue;
971         }
972         td->page_sz = page_sz;
973         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
974         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
975         td->base_addr = baser_base_addr(value, page_sz);
976         if (!td->indirect) {
977             td->num_entries = (num_pages * page_sz) / td->entry_sz;
978         } else {
979             td->num_entries = (((num_pages * page_sz) /
980                                   L1TABLE_ENTRY_SIZE) *
981                                  (page_sz / td->entry_sz));
982         }
983         td->num_entries = MIN(td->num_entries, 1ULL << idbits);
984     }
985 }
986 
987 static void extract_cmdq_params(GICv3ITSState *s)
988 {
989     uint16_t num_pages = 0;
990     uint64_t value = s->cbaser;
991 
992     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
993 
994     memset(&s->cq, 0 , sizeof(s->cq));
995 
996     if (FIELD_EX64(value, GITS_CBASER, VALID)) {
997         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
998                              GITS_CMDQ_ENTRY_SIZE;
999         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
1000         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
1001     }
1002 }
1003 
1004 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
1005                                               uint64_t *data, unsigned size,
1006                                               MemTxAttrs attrs)
1007 {
1008     /*
1009      * GITS_TRANSLATER is write-only, and all other addresses
1010      * in the interrupt translation space frame are RES0.
1011      */
1012     *data = 0;
1013     return MEMTX_OK;
1014 }
1015 
1016 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
1017                                                uint64_t data, unsigned size,
1018                                                MemTxAttrs attrs)
1019 {
1020     GICv3ITSState *s = (GICv3ITSState *)opaque;
1021     bool result = true;
1022 
1023     trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
1024 
1025     switch (offset) {
1026     case GITS_TRANSLATER:
1027         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1028             result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1029         }
1030         break;
1031     default:
1032         break;
1033     }
1034 
1035     if (result) {
1036         return MEMTX_OK;
1037     } else {
1038         return MEMTX_ERROR;
1039     }
1040 }
1041 
1042 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1043                               uint64_t value, MemTxAttrs attrs)
1044 {
1045     bool result = true;
1046     int index;
1047 
1048     switch (offset) {
1049     case GITS_CTLR:
1050         if (value & R_GITS_CTLR_ENABLED_MASK) {
1051             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1052             extract_table_params(s);
1053             extract_cmdq_params(s);
1054             process_cmdq(s);
1055         } else {
1056             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1057         }
1058         break;
1059     case GITS_CBASER:
1060         /*
1061          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1062          *                 already enabled
1063          */
1064         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1065             s->cbaser = deposit64(s->cbaser, 0, 32, value);
1066             s->creadr = 0;
1067         }
1068         break;
1069     case GITS_CBASER + 4:
1070         /*
1071          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1072          *                 already enabled
1073          */
1074         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1075             s->cbaser = deposit64(s->cbaser, 32, 32, value);
1076             s->creadr = 0;
1077         }
1078         break;
1079     case GITS_CWRITER:
1080         s->cwriter = deposit64(s->cwriter, 0, 32,
1081                                (value & ~R_GITS_CWRITER_RETRY_MASK));
1082         if (s->cwriter != s->creadr) {
1083             process_cmdq(s);
1084         }
1085         break;
1086     case GITS_CWRITER + 4:
1087         s->cwriter = deposit64(s->cwriter, 32, 32, value);
1088         break;
1089     case GITS_CREADR:
1090         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1091             s->creadr = deposit64(s->creadr, 0, 32,
1092                                   (value & ~R_GITS_CREADR_STALLED_MASK));
1093         } else {
1094             /* RO register, ignore the write */
1095             qemu_log_mask(LOG_GUEST_ERROR,
1096                           "%s: invalid guest write to RO register at offset "
1097                           TARGET_FMT_plx "\n", __func__, offset);
1098         }
1099         break;
1100     case GITS_CREADR + 4:
1101         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1102             s->creadr = deposit64(s->creadr, 32, 32, value);
1103         } else {
1104             /* RO register, ignore the write */
1105             qemu_log_mask(LOG_GUEST_ERROR,
1106                           "%s: invalid guest write to RO register at offset "
1107                           TARGET_FMT_plx "\n", __func__, offset);
1108         }
1109         break;
1110     case GITS_BASER ... GITS_BASER + 0x3f:
1111         /*
1112          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1113          *                 already enabled
1114          */
1115         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1116             index = (offset - GITS_BASER) / 8;
1117 
1118             if (s->baser[index] == 0) {
1119                 /* Unimplemented GITS_BASERn: RAZ/WI */
1120                 break;
1121             }
1122             if (offset & 7) {
1123                 value <<= 32;
1124                 value &= ~GITS_BASER_RO_MASK;
1125                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1126                 s->baser[index] |= value;
1127             } else {
1128                 value &= ~GITS_BASER_RO_MASK;
1129                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1130                 s->baser[index] |= value;
1131             }
1132         }
1133         break;
1134     case GITS_IIDR:
1135     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1136         /* RO registers, ignore the write */
1137         qemu_log_mask(LOG_GUEST_ERROR,
1138                       "%s: invalid guest write to RO register at offset "
1139                       TARGET_FMT_plx "\n", __func__, offset);
1140         break;
1141     default:
1142         result = false;
1143         break;
1144     }
1145     return result;
1146 }
1147 
1148 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1149                              uint64_t *data, MemTxAttrs attrs)
1150 {
1151     bool result = true;
1152     int index;
1153 
1154     switch (offset) {
1155     case GITS_CTLR:
1156         *data = s->ctlr;
1157         break;
1158     case GITS_IIDR:
1159         *data = gicv3_iidr();
1160         break;
1161     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1162         /* ID registers */
1163         *data = gicv3_idreg(offset - GITS_IDREGS);
1164         break;
1165     case GITS_TYPER:
1166         *data = extract64(s->typer, 0, 32);
1167         break;
1168     case GITS_TYPER + 4:
1169         *data = extract64(s->typer, 32, 32);
1170         break;
1171     case GITS_CBASER:
1172         *data = extract64(s->cbaser, 0, 32);
1173         break;
1174     case GITS_CBASER + 4:
1175         *data = extract64(s->cbaser, 32, 32);
1176         break;
1177     case GITS_CREADR:
1178         *data = extract64(s->creadr, 0, 32);
1179         break;
1180     case GITS_CREADR + 4:
1181         *data = extract64(s->creadr, 32, 32);
1182         break;
1183     case GITS_CWRITER:
1184         *data = extract64(s->cwriter, 0, 32);
1185         break;
1186     case GITS_CWRITER + 4:
1187         *data = extract64(s->cwriter, 32, 32);
1188         break;
1189     case GITS_BASER ... GITS_BASER + 0x3f:
1190         index = (offset - GITS_BASER) / 8;
1191         if (offset & 7) {
1192             *data = extract64(s->baser[index], 32, 32);
1193         } else {
1194             *data = extract64(s->baser[index], 0, 32);
1195         }
1196         break;
1197     default:
1198         result = false;
1199         break;
1200     }
1201     return result;
1202 }
1203 
1204 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1205                                uint64_t value, MemTxAttrs attrs)
1206 {
1207     bool result = true;
1208     int index;
1209 
1210     switch (offset) {
1211     case GITS_BASER ... GITS_BASER + 0x3f:
1212         /*
1213          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1214          *                 already enabled
1215          */
1216         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1217             index = (offset - GITS_BASER) / 8;
1218             if (s->baser[index] == 0) {
1219                 /* Unimplemented GITS_BASERn: RAZ/WI */
1220                 break;
1221             }
1222             s->baser[index] &= GITS_BASER_RO_MASK;
1223             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1224         }
1225         break;
1226     case GITS_CBASER:
1227         /*
1228          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1229          *                 already enabled
1230          */
1231         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1232             s->cbaser = value;
1233             s->creadr = 0;
1234         }
1235         break;
1236     case GITS_CWRITER:
1237         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1238         if (s->cwriter != s->creadr) {
1239             process_cmdq(s);
1240         }
1241         break;
1242     case GITS_CREADR:
1243         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1244             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1245         } else {
1246             /* RO register, ignore the write */
1247             qemu_log_mask(LOG_GUEST_ERROR,
1248                           "%s: invalid guest write to RO register at offset "
1249                           TARGET_FMT_plx "\n", __func__, offset);
1250         }
1251         break;
1252     case GITS_TYPER:
1253         /* RO registers, ignore the write */
1254         qemu_log_mask(LOG_GUEST_ERROR,
1255                       "%s: invalid guest write to RO register at offset "
1256                       TARGET_FMT_plx "\n", __func__, offset);
1257         break;
1258     default:
1259         result = false;
1260         break;
1261     }
1262     return result;
1263 }
1264 
1265 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1266                               uint64_t *data, MemTxAttrs attrs)
1267 {
1268     bool result = true;
1269     int index;
1270 
1271     switch (offset) {
1272     case GITS_TYPER:
1273         *data = s->typer;
1274         break;
1275     case GITS_BASER ... GITS_BASER + 0x3f:
1276         index = (offset - GITS_BASER) / 8;
1277         *data = s->baser[index];
1278         break;
1279     case GITS_CBASER:
1280         *data = s->cbaser;
1281         break;
1282     case GITS_CREADR:
1283         *data = s->creadr;
1284         break;
1285     case GITS_CWRITER:
1286         *data = s->cwriter;
1287         break;
1288     default:
1289         result = false;
1290         break;
1291     }
1292     return result;
1293 }
1294 
1295 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1296                                   unsigned size, MemTxAttrs attrs)
1297 {
1298     GICv3ITSState *s = (GICv3ITSState *)opaque;
1299     bool result;
1300 
1301     switch (size) {
1302     case 4:
1303         result = its_readl(s, offset, data, attrs);
1304         break;
1305     case 8:
1306         result = its_readll(s, offset, data, attrs);
1307         break;
1308     default:
1309         result = false;
1310         break;
1311     }
1312 
1313     if (!result) {
1314         qemu_log_mask(LOG_GUEST_ERROR,
1315                       "%s: invalid guest read at offset " TARGET_FMT_plx
1316                       " size %u\n", __func__, offset, size);
1317         trace_gicv3_its_badread(offset, size);
1318         /*
1319          * The spec requires that reserved registers are RAZ/WI;
1320          * so use false returns from leaf functions as a way to
1321          * trigger the guest-error logging but don't return it to
1322          * the caller, or we'll cause a spurious guest data abort.
1323          */
1324         *data = 0;
1325     } else {
1326         trace_gicv3_its_read(offset, *data, size);
1327     }
1328     return MEMTX_OK;
1329 }
1330 
1331 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1332                                    unsigned size, MemTxAttrs attrs)
1333 {
1334     GICv3ITSState *s = (GICv3ITSState *)opaque;
1335     bool result;
1336 
1337     switch (size) {
1338     case 4:
1339         result = its_writel(s, offset, data, attrs);
1340         break;
1341     case 8:
1342         result = its_writell(s, offset, data, attrs);
1343         break;
1344     default:
1345         result = false;
1346         break;
1347     }
1348 
1349     if (!result) {
1350         qemu_log_mask(LOG_GUEST_ERROR,
1351                       "%s: invalid guest write at offset " TARGET_FMT_plx
1352                       " size %u\n", __func__, offset, size);
1353         trace_gicv3_its_badwrite(offset, data, size);
1354         /*
1355          * The spec requires that reserved registers are RAZ/WI;
1356          * so use false returns from leaf functions as a way to
1357          * trigger the guest-error logging but don't return it to
1358          * the caller, or we'll cause a spurious guest data abort.
1359          */
1360     } else {
1361         trace_gicv3_its_write(offset, data, size);
1362     }
1363     return MEMTX_OK;
1364 }
1365 
1366 static const MemoryRegionOps gicv3_its_control_ops = {
1367     .read_with_attrs = gicv3_its_read,
1368     .write_with_attrs = gicv3_its_write,
1369     .valid.min_access_size = 4,
1370     .valid.max_access_size = 8,
1371     .impl.min_access_size = 4,
1372     .impl.max_access_size = 8,
1373     .endianness = DEVICE_NATIVE_ENDIAN,
1374 };
1375 
1376 static const MemoryRegionOps gicv3_its_translation_ops = {
1377     .read_with_attrs = gicv3_its_translation_read,
1378     .write_with_attrs = gicv3_its_translation_write,
1379     .valid.min_access_size = 2,
1380     .valid.max_access_size = 4,
1381     .impl.min_access_size = 2,
1382     .impl.max_access_size = 4,
1383     .endianness = DEVICE_NATIVE_ENDIAN,
1384 };
1385 
1386 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1387 {
1388     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1389     int i;
1390 
1391     for (i = 0; i < s->gicv3->num_cpu; i++) {
1392         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1393             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1394             return;
1395         }
1396     }
1397 
1398     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1399 
1400     /* set the ITS default features supported */
1401     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1402     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1403                           ITS_ITT_ENTRY_SIZE - 1);
1404     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1405     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1406     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1407     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1408 }
1409 
1410 static void gicv3_its_reset(DeviceState *dev)
1411 {
1412     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1413     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1414 
1415     c->parent_reset(dev);
1416 
1417     /* Quiescent bit reset to 1 */
1418     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1419 
1420     /*
1421      * setting GITS_BASER0.Type = 0b001 (Device)
1422      *         GITS_BASER1.Type = 0b100 (Collection Table)
1423      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1424      *         GITS_BASER<0,1>.Page_Size = 64KB
1425      * and default translation table entry size to 16 bytes
1426      */
1427     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1428                              GITS_BASER_TYPE_DEVICE);
1429     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1430                              GITS_BASER_PAGESIZE_64K);
1431     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1432                              GITS_DTE_SIZE - 1);
1433 
1434     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1435                              GITS_BASER_TYPE_COLLECTION);
1436     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1437                              GITS_BASER_PAGESIZE_64K);
1438     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1439                              GITS_CTE_SIZE - 1);
1440 }
1441 
1442 static void gicv3_its_post_load(GICv3ITSState *s)
1443 {
1444     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1445         extract_table_params(s);
1446         extract_cmdq_params(s);
1447     }
1448 }
1449 
1450 static Property gicv3_its_props[] = {
1451     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1452                      GICv3State *),
1453     DEFINE_PROP_END_OF_LIST(),
1454 };
1455 
1456 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1457 {
1458     DeviceClass *dc = DEVICE_CLASS(klass);
1459     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1460     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1461 
1462     dc->realize = gicv3_arm_its_realize;
1463     device_class_set_props(dc, gicv3_its_props);
1464     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1465     icc->post_load = gicv3_its_post_load;
1466 }
1467 
1468 static const TypeInfo gicv3_its_info = {
1469     .name = TYPE_ARM_GICV3_ITS,
1470     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1471     .instance_size = sizeof(GICv3ITSState),
1472     .class_init = gicv3_its_class_init,
1473     .class_size = sizeof(GICv3ITSClass),
1474 };
1475 
1476 static void gicv3_its_register_types(void)
1477 {
1478     type_register_static(&gicv3_its_info);
1479 }
1480 
1481 type_init(gicv3_its_register_types)
1482