xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision da4680ce3a03b0cc13fe7a2b98b815c039517f26)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22 
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27 
28 struct GICv3ITSClass {
29     GICv3ITSCommonClass parent_class;
30     void (*parent_reset)(DeviceState *dev);
31 };
32 
33 /*
34  * This is an internal enum used to distinguish between LPI triggered
35  * via command queue and LPI triggered via gits_translater write.
36  */
37 typedef enum ItsCmdType {
38     NONE = 0, /* internal indication for GITS_TRANSLATER write */
39     CLEAR = 1,
40     DISCARD = 2,
41     INTERRUPT = 3,
42 } ItsCmdType;
43 
44 typedef struct DTEntry {
45     bool valid;
46     unsigned size;
47     uint64_t ittaddr;
48 } DTEntry;
49 
50 typedef struct CTEntry {
51     bool valid;
52     uint32_t rdbase;
53 } CTEntry;
54 
55 typedef struct ITEntry {
56     bool valid;
57     int inttype;
58     uint32_t intid;
59     uint32_t doorbell;
60     uint32_t icid;
61     uint32_t vpeid;
62 } ITEntry;
63 
64 
65 /*
66  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
67  * if a command parameter is not correct. These include both "stall
68  * processing of the command queue" and "ignore this command, and
69  * keep processing the queue". In our implementation we choose that
70  * memory transaction errors reading the command packet provoke a
71  * stall, but errors in parameters cause us to ignore the command
72  * and continue processing.
73  * The process_* functions which handle individual ITS commands all
74  * return an ItsCmdResult which tells process_cmdq() whether it should
75  * stall or keep going.
76  */
77 typedef enum ItsCmdResult {
78     CMD_STALL = 0,
79     CMD_CONTINUE = 1,
80 } ItsCmdResult;
81 
82 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
83 {
84     uint64_t result = 0;
85 
86     switch (page_sz) {
87     case GITS_PAGE_SIZE_4K:
88     case GITS_PAGE_SIZE_16K:
89         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
90         break;
91 
92     case GITS_PAGE_SIZE_64K:
93         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
94         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
95         break;
96 
97     default:
98         break;
99     }
100     return result;
101 }
102 
103 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
104                                  uint32_t idx, MemTxResult *res)
105 {
106     /*
107      * Given a TableDesc describing one of the ITS in-guest-memory
108      * tables and an index into it, return the guest address
109      * corresponding to that table entry.
110      * If there was a memory error reading the L1 table of an
111      * indirect table, *res is set accordingly, and we return -1.
112      * If the L1 table entry is marked not valid, we return -1 with
113      * *res set to MEMTX_OK.
114      *
115      * The specification defines the format of level 1 entries of a
116      * 2-level table, but the format of level 2 entries and the format
117      * of flat-mapped tables is IMPDEF.
118      */
119     AddressSpace *as = &s->gicv3->dma_as;
120     uint32_t l2idx;
121     uint64_t l2;
122     uint32_t num_l2_entries;
123 
124     *res = MEMTX_OK;
125 
126     if (!td->indirect) {
127         /* Single level table */
128         return td->base_addr + idx * td->entry_sz;
129     }
130 
131     /* Two level table */
132     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
133 
134     l2 = address_space_ldq_le(as,
135                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
136                               MEMTXATTRS_UNSPECIFIED, res);
137     if (*res != MEMTX_OK) {
138         return -1;
139     }
140     if (!(l2 & L2_TABLE_VALID_MASK)) {
141         return -1;
142     }
143 
144     num_l2_entries = td->page_sz / td->entry_sz;
145     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
146 }
147 
148 /*
149  * Read the Collection Table entry at index @icid. On success (including
150  * successfully determining that there is no valid CTE for this index),
151  * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
152  * If there is an error reading memory then we return the error code.
153  */
154 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
155 {
156     AddressSpace *as = &s->gicv3->dma_as;
157     MemTxResult res = MEMTX_OK;
158     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
159     uint64_t cteval;
160 
161     if (entry_addr == -1) {
162         /* No L2 table entry, i.e. no valid CTE, or a memory error */
163         cte->valid = false;
164         return res;
165     }
166 
167     cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
168     if (res != MEMTX_OK) {
169         return res;
170     }
171     cte->valid = FIELD_EX64(cteval, CTE, VALID);
172     cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
173     return MEMTX_OK;
174 }
175 
176 /*
177  * Update the Interrupt Table entry at index @evinted in the table specified
178  * by the dte @dte. Returns true on success, false if there was a memory
179  * access error.
180  */
181 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
182                        const ITEntry *ite)
183 {
184     AddressSpace *as = &s->gicv3->dma_as;
185     MemTxResult res = MEMTX_OK;
186     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
187     uint64_t itel = 0;
188     uint32_t iteh = 0;
189 
190     if (ite->valid) {
191         itel = FIELD_DP64(itel, ITE_L, VALID, 1);
192         itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
193         itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
194         itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
195         itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
196         iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
197     }
198 
199     address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
200     if (res != MEMTX_OK) {
201         return false;
202     }
203     address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
204     return res == MEMTX_OK;
205 }
206 
207 /*
208  * Read the Interrupt Table entry at index @eventid from the table specified
209  * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
210  * struct @ite accordingly. If there is an error reading memory then we return
211  * the error code.
212  */
213 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
214                            const DTEntry *dte, ITEntry *ite)
215 {
216     AddressSpace *as = &s->gicv3->dma_as;
217     MemTxResult res = MEMTX_OK;
218     uint64_t itel;
219     uint32_t iteh;
220     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
221 
222     itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
223     if (res != MEMTX_OK) {
224         return res;
225     }
226 
227     iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
228     if (res != MEMTX_OK) {
229         return res;
230     }
231 
232     ite->valid = FIELD_EX64(itel, ITE_L, VALID);
233     ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
234     ite->intid = FIELD_EX64(itel, ITE_L, INTID);
235     ite->icid = FIELD_EX64(itel, ITE_L, ICID);
236     ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
237     ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
238     return MEMTX_OK;
239 }
240 
241 /*
242  * Read the Device Table entry at index @devid. On success (including
243  * successfully determining that there is no valid DTE for this index),
244  * we return MEMTX_OK and populate the DTEntry struct accordingly.
245  * If there is an error reading memory then we return the error code.
246  */
247 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
248 {
249     MemTxResult res = MEMTX_OK;
250     AddressSpace *as = &s->gicv3->dma_as;
251     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
252     uint64_t dteval;
253 
254     if (entry_addr == -1) {
255         /* No L2 table entry, i.e. no valid DTE, or a memory error */
256         dte->valid = false;
257         return res;
258     }
259     dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
260     if (res != MEMTX_OK) {
261         return res;
262     }
263     dte->valid = FIELD_EX64(dteval, DTE, VALID);
264     dte->size = FIELD_EX64(dteval, DTE, SIZE);
265     /* DTE word field stores bits [51:8] of the ITT address */
266     dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
267     return MEMTX_OK;
268 }
269 
270 /*
271  * This function handles the processing of following commands based on
272  * the ItsCmdType parameter passed:-
273  * 1. triggering of lpi interrupt translation via ITS INT command
274  * 2. triggering of lpi interrupt translation via gits_translater register
275  * 3. handling of ITS CLEAR command
276  * 4. handling of ITS DISCARD command
277  */
278 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
279                                        uint32_t eventid, ItsCmdType cmd)
280 {
281     uint64_t num_eventids;
282     DTEntry dte;
283     CTEntry cte;
284     ITEntry ite;
285 
286     if (devid >= s->dt.num_entries) {
287         qemu_log_mask(LOG_GUEST_ERROR,
288                       "%s: invalid command attributes: devid %d>=%d",
289                       __func__, devid, s->dt.num_entries);
290         return CMD_CONTINUE;
291     }
292 
293     if (get_dte(s, devid, &dte) != MEMTX_OK) {
294         return CMD_STALL;
295     }
296     if (!dte.valid) {
297         qemu_log_mask(LOG_GUEST_ERROR,
298                       "%s: invalid command attributes: "
299                       "invalid dte for %d\n", __func__, devid);
300         return CMD_CONTINUE;
301     }
302 
303     num_eventids = 1ULL << (dte.size + 1);
304     if (eventid >= num_eventids) {
305         qemu_log_mask(LOG_GUEST_ERROR,
306                       "%s: invalid command attributes: eventid %d >= %"
307                       PRId64 "\n",
308                       __func__, eventid, num_eventids);
309         return CMD_CONTINUE;
310     }
311 
312     if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
313         return CMD_STALL;
314     }
315 
316     if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
317         qemu_log_mask(LOG_GUEST_ERROR,
318                       "%s: invalid command attributes: invalid ITE\n",
319                       __func__);
320         return CMD_CONTINUE;
321     }
322 
323     if (ite.icid >= s->ct.num_entries) {
324         qemu_log_mask(LOG_GUEST_ERROR,
325                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
326                       __func__, ite.icid);
327         return CMD_CONTINUE;
328     }
329 
330     if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
331         return CMD_STALL;
332     }
333     if (!cte.valid) {
334         qemu_log_mask(LOG_GUEST_ERROR,
335                       "%s: invalid command attributes: invalid CTE\n",
336                       __func__);
337         return CMD_CONTINUE;
338     }
339 
340     /*
341      * Current implementation only supports rdbase == procnum
342      * Hence rdbase physical address is ignored
343      */
344     if (cte.rdbase >= s->gicv3->num_cpu) {
345         return CMD_CONTINUE;
346     }
347 
348     if ((cmd == CLEAR) || (cmd == DISCARD)) {
349         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
350     } else {
351         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
352     }
353 
354     if (cmd == DISCARD) {
355         ITEntry ite = {};
356         /* remove mapping from interrupt translation table */
357         ite.valid = false;
358         return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
359     }
360     return CMD_CONTINUE;
361 }
362 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
363                                     ItsCmdType cmd)
364 {
365     uint32_t devid, eventid;
366 
367     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
368     eventid = cmdpkt[1] & EVENTID_MASK;
369     return do_process_its_cmd(s, devid, eventid, cmd);
370 }
371 
372 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
373                                   bool ignore_pInt)
374 {
375     uint32_t devid, eventid;
376     uint32_t pIntid = 0;
377     uint64_t num_eventids;
378     uint32_t num_intids;
379     uint16_t icid = 0;
380     DTEntry dte;
381     ITEntry ite;
382 
383     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
384     eventid = cmdpkt[1] & EVENTID_MASK;
385 
386     if (ignore_pInt) {
387         pIntid = eventid;
388     } else {
389         pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
390     }
391 
392     icid = cmdpkt[2] & ICID_MASK;
393 
394     if (devid >= s->dt.num_entries) {
395         qemu_log_mask(LOG_GUEST_ERROR,
396                       "%s: invalid command attributes: devid %d>=%d",
397                       __func__, devid, s->dt.num_entries);
398         return CMD_CONTINUE;
399     }
400 
401     if (get_dte(s, devid, &dte) != MEMTX_OK) {
402         return CMD_STALL;
403     }
404     num_eventids = 1ULL << (dte.size + 1);
405     num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
406 
407     if ((icid >= s->ct.num_entries)
408             || !dte.valid || (eventid >= num_eventids) ||
409             (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
410              (pIntid != INTID_SPURIOUS))) {
411         qemu_log_mask(LOG_GUEST_ERROR,
412                       "%s: invalid command attributes "
413                       "icid %d or eventid %d or pIntid %d or"
414                       "unmapped dte %d\n", __func__, icid, eventid,
415                       pIntid, dte.valid);
416         /*
417          * in this implementation, in case of error
418          * we ignore this command and move onto the next
419          * command in the queue
420          */
421         return CMD_CONTINUE;
422     }
423 
424     /* add ite entry to interrupt translation table */
425     ite.valid = true;
426     ite.inttype = ITE_INTTYPE_PHYSICAL;
427     ite.intid = pIntid;
428     ite.icid = icid;
429     ite.doorbell = INTID_SPURIOUS;
430     ite.vpeid = 0;
431     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
432 }
433 
434 /*
435  * Update the Collection Table entry for @icid to @cte. Returns true
436  * on success, false if there was a memory access error.
437  */
438 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
439 {
440     AddressSpace *as = &s->gicv3->dma_as;
441     uint64_t entry_addr;
442     uint64_t cteval = 0;
443     MemTxResult res = MEMTX_OK;
444 
445     if (cte->valid) {
446         /* add mapping entry to collection table */
447         cteval = FIELD_DP64(cteval, CTE, VALID, 1);
448         cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
449     }
450 
451     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
452     if (res != MEMTX_OK) {
453         /* memory access error: stall */
454         return false;
455     }
456     if (entry_addr == -1) {
457         /* No L2 table for this index: discard write and continue */
458         return true;
459     }
460 
461     address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
462     return res == MEMTX_OK;
463 }
464 
465 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
466 {
467     uint16_t icid;
468     CTEntry cte;
469 
470     icid = cmdpkt[2] & ICID_MASK;
471 
472     cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
473     cte.rdbase &= RDBASE_PROCNUM_MASK;
474 
475     cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
476 
477     if ((icid >= s->ct.num_entries) || (cte.rdbase >= s->gicv3->num_cpu)) {
478         qemu_log_mask(LOG_GUEST_ERROR,
479                       "ITS MAPC: invalid collection table attributes "
480                       "icid %d rdbase %u\n",  icid, cte.rdbase);
481         /*
482          * in this implementation, in case of error
483          * we ignore this command and move onto the next
484          * command in the queue
485          */
486         return CMD_CONTINUE;
487     }
488 
489     return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
490 }
491 
492 /*
493  * Update the Device Table entry for @devid to @dte. Returns true
494  * on success, false if there was a memory access error.
495  */
496 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
497 {
498     AddressSpace *as = &s->gicv3->dma_as;
499     uint64_t entry_addr;
500     uint64_t dteval = 0;
501     MemTxResult res = MEMTX_OK;
502 
503     if (dte->valid) {
504         /* add mapping entry to device table */
505         dteval = FIELD_DP64(dteval, DTE, VALID, 1);
506         dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
507         dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
508     }
509 
510     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
511     if (res != MEMTX_OK) {
512         /* memory access error: stall */
513         return false;
514     }
515     if (entry_addr == -1) {
516         /* No L2 table for this index: discard write and continue */
517         return true;
518     }
519     address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
520     return res == MEMTX_OK;
521 }
522 
523 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
524 {
525     uint32_t devid;
526     DTEntry dte;
527 
528     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
529     dte.size = cmdpkt[1] & SIZE_MASK;
530     dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
531     dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
532 
533     if ((devid >= s->dt.num_entries) ||
534         (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
535         qemu_log_mask(LOG_GUEST_ERROR,
536                       "ITS MAPD: invalid device table attributes "
537                       "devid %d or size %d\n", devid, dte.size);
538         /*
539          * in this implementation, in case of error
540          * we ignore this command and move onto the next
541          * command in the queue
542          */
543         return CMD_CONTINUE;
544     }
545 
546     return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
547 }
548 
549 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
550 {
551     uint64_t rd1, rd2;
552 
553     rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
554     rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
555 
556     if (rd1 >= s->gicv3->num_cpu) {
557         qemu_log_mask(LOG_GUEST_ERROR,
558                       "%s: RDBASE1 %" PRId64
559                       " out of range (must be less than %d)\n",
560                       __func__, rd1, s->gicv3->num_cpu);
561         return CMD_CONTINUE;
562     }
563     if (rd2 >= s->gicv3->num_cpu) {
564         qemu_log_mask(LOG_GUEST_ERROR,
565                       "%s: RDBASE2 %" PRId64
566                       " out of range (must be less than %d)\n",
567                       __func__, rd2, s->gicv3->num_cpu);
568         return CMD_CONTINUE;
569     }
570 
571     if (rd1 == rd2) {
572         /* Move to same target must succeed as a no-op */
573         return CMD_CONTINUE;
574     }
575 
576     /* Move all pending LPIs from redistributor 1 to redistributor 2 */
577     gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
578 
579     return CMD_CONTINUE;
580 }
581 
582 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
583 {
584     uint32_t devid, eventid;
585     uint16_t new_icid;
586     uint64_t num_eventids;
587     DTEntry dte;
588     CTEntry old_cte, new_cte;
589     ITEntry old_ite;
590 
591     devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
592     eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
593     new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
594 
595     if (devid >= s->dt.num_entries) {
596         qemu_log_mask(LOG_GUEST_ERROR,
597                       "%s: invalid command attributes: devid %d>=%d",
598                       __func__, devid, s->dt.num_entries);
599         return CMD_CONTINUE;
600     }
601     if (get_dte(s, devid, &dte) != MEMTX_OK) {
602         return CMD_STALL;
603     }
604 
605     if (!dte.valid) {
606         qemu_log_mask(LOG_GUEST_ERROR,
607                       "%s: invalid command attributes: "
608                       "invalid dte for %d\n", __func__, devid);
609         return CMD_CONTINUE;
610     }
611 
612     num_eventids = 1ULL << (dte.size + 1);
613     if (eventid >= num_eventids) {
614         qemu_log_mask(LOG_GUEST_ERROR,
615                       "%s: invalid command attributes: eventid %d >= %"
616                       PRId64 "\n",
617                       __func__, eventid, num_eventids);
618         return CMD_CONTINUE;
619     }
620 
621     if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
622         return CMD_STALL;
623     }
624 
625     if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
626         qemu_log_mask(LOG_GUEST_ERROR,
627                       "%s: invalid command attributes: invalid ITE\n",
628                       __func__);
629         return CMD_CONTINUE;
630     }
631 
632     if (old_ite.icid >= s->ct.num_entries) {
633         qemu_log_mask(LOG_GUEST_ERROR,
634                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
635                       __func__, old_ite.icid);
636         return CMD_CONTINUE;
637     }
638 
639     if (new_icid >= s->ct.num_entries) {
640         qemu_log_mask(LOG_GUEST_ERROR,
641                       "%s: invalid command attributes: ICID 0x%x\n",
642                       __func__, new_icid);
643         return CMD_CONTINUE;
644     }
645 
646     if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
647         return CMD_STALL;
648     }
649     if (!old_cte.valid) {
650         qemu_log_mask(LOG_GUEST_ERROR,
651                       "%s: invalid command attributes: "
652                       "invalid CTE for old ICID 0x%x\n",
653                       __func__, old_ite.icid);
654         return CMD_CONTINUE;
655     }
656 
657     if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
658         return CMD_STALL;
659     }
660     if (!new_cte.valid) {
661         qemu_log_mask(LOG_GUEST_ERROR,
662                       "%s: invalid command attributes: "
663                       "invalid CTE for new ICID 0x%x\n",
664                       __func__, new_icid);
665         return CMD_CONTINUE;
666     }
667 
668     if (old_cte.rdbase >= s->gicv3->num_cpu) {
669         qemu_log_mask(LOG_GUEST_ERROR,
670                       "%s: CTE has invalid rdbase 0x%x\n",
671                       __func__, old_cte.rdbase);
672         return CMD_CONTINUE;
673     }
674 
675     if (new_cte.rdbase >= s->gicv3->num_cpu) {
676         qemu_log_mask(LOG_GUEST_ERROR,
677                       "%s: CTE has invalid rdbase 0x%x\n",
678                       __func__, new_cte.rdbase);
679         return CMD_CONTINUE;
680     }
681 
682     if (old_cte.rdbase != new_cte.rdbase) {
683         /* Move the LPI from the old redistributor to the new one */
684         gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
685                              &s->gicv3->cpu[new_cte.rdbase],
686                              old_ite.intid);
687     }
688 
689     /* Update the ICID field in the interrupt translation table entry */
690     old_ite.icid = new_icid;
691     return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
692 }
693 
694 /*
695  * Current implementation blocks until all
696  * commands are processed
697  */
698 static void process_cmdq(GICv3ITSState *s)
699 {
700     uint32_t wr_offset = 0;
701     uint32_t rd_offset = 0;
702     uint32_t cq_offset = 0;
703     AddressSpace *as = &s->gicv3->dma_as;
704     uint8_t cmd;
705     int i;
706 
707     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
708         return;
709     }
710 
711     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
712 
713     if (wr_offset >= s->cq.num_entries) {
714         qemu_log_mask(LOG_GUEST_ERROR,
715                       "%s: invalid write offset "
716                       "%d\n", __func__, wr_offset);
717         return;
718     }
719 
720     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
721 
722     if (rd_offset >= s->cq.num_entries) {
723         qemu_log_mask(LOG_GUEST_ERROR,
724                       "%s: invalid read offset "
725                       "%d\n", __func__, rd_offset);
726         return;
727     }
728 
729     while (wr_offset != rd_offset) {
730         ItsCmdResult result = CMD_CONTINUE;
731         void *hostmem;
732         hwaddr buflen;
733         uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
734 
735         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
736 
737         buflen = GITS_CMDQ_ENTRY_SIZE;
738         hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
739                                     &buflen, false, MEMTXATTRS_UNSPECIFIED);
740         if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
741             if (hostmem) {
742                 address_space_unmap(as, hostmem, buflen, false, 0);
743             }
744             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
745             qemu_log_mask(LOG_GUEST_ERROR,
746                           "%s: could not read command at 0x%" PRIx64 "\n",
747                           __func__, s->cq.base_addr + cq_offset);
748             break;
749         }
750         for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
751             cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
752         }
753         address_space_unmap(as, hostmem, buflen, false, 0);
754 
755         cmd = cmdpkt[0] & CMD_MASK;
756 
757         trace_gicv3_its_process_command(rd_offset, cmd);
758 
759         switch (cmd) {
760         case GITS_CMD_INT:
761             result = process_its_cmd(s, cmdpkt, INTERRUPT);
762             break;
763         case GITS_CMD_CLEAR:
764             result = process_its_cmd(s, cmdpkt, CLEAR);
765             break;
766         case GITS_CMD_SYNC:
767             /*
768              * Current implementation makes a blocking synchronous call
769              * for every command issued earlier, hence the internal state
770              * is already consistent by the time SYNC command is executed.
771              * Hence no further processing is required for SYNC command.
772              */
773             break;
774         case GITS_CMD_MAPD:
775             result = process_mapd(s, cmdpkt);
776             break;
777         case GITS_CMD_MAPC:
778             result = process_mapc(s, cmdpkt);
779             break;
780         case GITS_CMD_MAPTI:
781             result = process_mapti(s, cmdpkt, false);
782             break;
783         case GITS_CMD_MAPI:
784             result = process_mapti(s, cmdpkt, true);
785             break;
786         case GITS_CMD_DISCARD:
787             result = process_its_cmd(s, cmdpkt, DISCARD);
788             break;
789         case GITS_CMD_INV:
790         case GITS_CMD_INVALL:
791             /*
792              * Current implementation doesn't cache any ITS tables,
793              * but the calculated lpi priority information. We only
794              * need to trigger lpi priority re-calculation to be in
795              * sync with LPI config table or pending table changes.
796              */
797             for (i = 0; i < s->gicv3->num_cpu; i++) {
798                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
799             }
800             break;
801         case GITS_CMD_MOVI:
802             result = process_movi(s, cmdpkt);
803             break;
804         case GITS_CMD_MOVALL:
805             result = process_movall(s, cmdpkt);
806             break;
807         default:
808             break;
809         }
810         if (result == CMD_CONTINUE) {
811             rd_offset++;
812             rd_offset %= s->cq.num_entries;
813             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
814         } else {
815             /* CMD_STALL */
816             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
817             qemu_log_mask(LOG_GUEST_ERROR,
818                           "%s: 0x%x cmd processing failed, stalling\n",
819                           __func__, cmd);
820             break;
821         }
822     }
823 }
824 
825 /*
826  * This function extracts the ITS Device and Collection table specific
827  * parameters (like base_addr, size etc) from GITS_BASER register.
828  * It is called during ITS enable and also during post_load migration
829  */
830 static void extract_table_params(GICv3ITSState *s)
831 {
832     uint16_t num_pages = 0;
833     uint8_t  page_sz_type;
834     uint8_t type;
835     uint32_t page_sz = 0;
836     uint64_t value;
837 
838     for (int i = 0; i < 8; i++) {
839         TableDesc *td;
840         int idbits;
841 
842         value = s->baser[i];
843 
844         if (!value) {
845             continue;
846         }
847 
848         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
849 
850         switch (page_sz_type) {
851         case 0:
852             page_sz = GITS_PAGE_SIZE_4K;
853             break;
854 
855         case 1:
856             page_sz = GITS_PAGE_SIZE_16K;
857             break;
858 
859         case 2:
860         case 3:
861             page_sz = GITS_PAGE_SIZE_64K;
862             break;
863 
864         default:
865             g_assert_not_reached();
866         }
867 
868         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
869 
870         type = FIELD_EX64(value, GITS_BASER, TYPE);
871 
872         switch (type) {
873         case GITS_BASER_TYPE_DEVICE:
874             td = &s->dt;
875             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
876             break;
877         case GITS_BASER_TYPE_COLLECTION:
878             td = &s->ct;
879             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
880                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
881             } else {
882                 /* 16-bit CollectionId supported when CIL == 0 */
883                 idbits = 16;
884             }
885             break;
886         default:
887             /*
888              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
889              * ensures we will only see type values corresponding to
890              * the values set up in gicv3_its_reset().
891              */
892             g_assert_not_reached();
893         }
894 
895         memset(td, 0, sizeof(*td));
896         /*
897          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
898          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
899          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
900          * for the register corresponding to the Collection table but we
901          * still have to process interrupts using non-memory-backed
902          * Collection table entries.)
903          * The specification makes it UNPREDICTABLE to enable the ITS without
904          * marking each BASER<n> as valid. We choose to handle these as if
905          * the table was zero-sized, so commands using the table will fail
906          * and interrupts requested via GITS_TRANSLATER writes will be ignored.
907          * This happens automatically by leaving the num_entries field at
908          * zero, which will be caught by the bounds checks we have before
909          * every table lookup anyway.
910          */
911         if (!FIELD_EX64(value, GITS_BASER, VALID)) {
912             continue;
913         }
914         td->page_sz = page_sz;
915         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
916         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
917         td->base_addr = baser_base_addr(value, page_sz);
918         if (!td->indirect) {
919             td->num_entries = (num_pages * page_sz) / td->entry_sz;
920         } else {
921             td->num_entries = (((num_pages * page_sz) /
922                                   L1TABLE_ENTRY_SIZE) *
923                                  (page_sz / td->entry_sz));
924         }
925         td->num_entries = MIN(td->num_entries, 1ULL << idbits);
926     }
927 }
928 
929 static void extract_cmdq_params(GICv3ITSState *s)
930 {
931     uint16_t num_pages = 0;
932     uint64_t value = s->cbaser;
933 
934     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
935 
936     memset(&s->cq, 0 , sizeof(s->cq));
937 
938     if (FIELD_EX64(value, GITS_CBASER, VALID)) {
939         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
940                              GITS_CMDQ_ENTRY_SIZE;
941         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
942         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
943     }
944 }
945 
946 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
947                                               uint64_t *data, unsigned size,
948                                               MemTxAttrs attrs)
949 {
950     /*
951      * GITS_TRANSLATER is write-only, and all other addresses
952      * in the interrupt translation space frame are RES0.
953      */
954     *data = 0;
955     return MEMTX_OK;
956 }
957 
958 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
959                                                uint64_t data, unsigned size,
960                                                MemTxAttrs attrs)
961 {
962     GICv3ITSState *s = (GICv3ITSState *)opaque;
963     bool result = true;
964 
965     trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
966 
967     switch (offset) {
968     case GITS_TRANSLATER:
969         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
970             result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
971         }
972         break;
973     default:
974         break;
975     }
976 
977     if (result) {
978         return MEMTX_OK;
979     } else {
980         return MEMTX_ERROR;
981     }
982 }
983 
984 static bool its_writel(GICv3ITSState *s, hwaddr offset,
985                               uint64_t value, MemTxAttrs attrs)
986 {
987     bool result = true;
988     int index;
989 
990     switch (offset) {
991     case GITS_CTLR:
992         if (value & R_GITS_CTLR_ENABLED_MASK) {
993             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
994             extract_table_params(s);
995             extract_cmdq_params(s);
996             process_cmdq(s);
997         } else {
998             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
999         }
1000         break;
1001     case GITS_CBASER:
1002         /*
1003          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1004          *                 already enabled
1005          */
1006         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1007             s->cbaser = deposit64(s->cbaser, 0, 32, value);
1008             s->creadr = 0;
1009         }
1010         break;
1011     case GITS_CBASER + 4:
1012         /*
1013          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1014          *                 already enabled
1015          */
1016         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1017             s->cbaser = deposit64(s->cbaser, 32, 32, value);
1018             s->creadr = 0;
1019         }
1020         break;
1021     case GITS_CWRITER:
1022         s->cwriter = deposit64(s->cwriter, 0, 32,
1023                                (value & ~R_GITS_CWRITER_RETRY_MASK));
1024         if (s->cwriter != s->creadr) {
1025             process_cmdq(s);
1026         }
1027         break;
1028     case GITS_CWRITER + 4:
1029         s->cwriter = deposit64(s->cwriter, 32, 32, value);
1030         break;
1031     case GITS_CREADR:
1032         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1033             s->creadr = deposit64(s->creadr, 0, 32,
1034                                   (value & ~R_GITS_CREADR_STALLED_MASK));
1035         } else {
1036             /* RO register, ignore the write */
1037             qemu_log_mask(LOG_GUEST_ERROR,
1038                           "%s: invalid guest write to RO register at offset "
1039                           TARGET_FMT_plx "\n", __func__, offset);
1040         }
1041         break;
1042     case GITS_CREADR + 4:
1043         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1044             s->creadr = deposit64(s->creadr, 32, 32, value);
1045         } else {
1046             /* RO register, ignore the write */
1047             qemu_log_mask(LOG_GUEST_ERROR,
1048                           "%s: invalid guest write to RO register at offset "
1049                           TARGET_FMT_plx "\n", __func__, offset);
1050         }
1051         break;
1052     case GITS_BASER ... GITS_BASER + 0x3f:
1053         /*
1054          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1055          *                 already enabled
1056          */
1057         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1058             index = (offset - GITS_BASER) / 8;
1059 
1060             if (s->baser[index] == 0) {
1061                 /* Unimplemented GITS_BASERn: RAZ/WI */
1062                 break;
1063             }
1064             if (offset & 7) {
1065                 value <<= 32;
1066                 value &= ~GITS_BASER_RO_MASK;
1067                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1068                 s->baser[index] |= value;
1069             } else {
1070                 value &= ~GITS_BASER_RO_MASK;
1071                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1072                 s->baser[index] |= value;
1073             }
1074         }
1075         break;
1076     case GITS_IIDR:
1077     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1078         /* RO registers, ignore the write */
1079         qemu_log_mask(LOG_GUEST_ERROR,
1080                       "%s: invalid guest write to RO register at offset "
1081                       TARGET_FMT_plx "\n", __func__, offset);
1082         break;
1083     default:
1084         result = false;
1085         break;
1086     }
1087     return result;
1088 }
1089 
1090 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1091                              uint64_t *data, MemTxAttrs attrs)
1092 {
1093     bool result = true;
1094     int index;
1095 
1096     switch (offset) {
1097     case GITS_CTLR:
1098         *data = s->ctlr;
1099         break;
1100     case GITS_IIDR:
1101         *data = gicv3_iidr();
1102         break;
1103     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1104         /* ID registers */
1105         *data = gicv3_idreg(offset - GITS_IDREGS);
1106         break;
1107     case GITS_TYPER:
1108         *data = extract64(s->typer, 0, 32);
1109         break;
1110     case GITS_TYPER + 4:
1111         *data = extract64(s->typer, 32, 32);
1112         break;
1113     case GITS_CBASER:
1114         *data = extract64(s->cbaser, 0, 32);
1115         break;
1116     case GITS_CBASER + 4:
1117         *data = extract64(s->cbaser, 32, 32);
1118         break;
1119     case GITS_CREADR:
1120         *data = extract64(s->creadr, 0, 32);
1121         break;
1122     case GITS_CREADR + 4:
1123         *data = extract64(s->creadr, 32, 32);
1124         break;
1125     case GITS_CWRITER:
1126         *data = extract64(s->cwriter, 0, 32);
1127         break;
1128     case GITS_CWRITER + 4:
1129         *data = extract64(s->cwriter, 32, 32);
1130         break;
1131     case GITS_BASER ... GITS_BASER + 0x3f:
1132         index = (offset - GITS_BASER) / 8;
1133         if (offset & 7) {
1134             *data = extract64(s->baser[index], 32, 32);
1135         } else {
1136             *data = extract64(s->baser[index], 0, 32);
1137         }
1138         break;
1139     default:
1140         result = false;
1141         break;
1142     }
1143     return result;
1144 }
1145 
1146 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1147                                uint64_t value, MemTxAttrs attrs)
1148 {
1149     bool result = true;
1150     int index;
1151 
1152     switch (offset) {
1153     case GITS_BASER ... GITS_BASER + 0x3f:
1154         /*
1155          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1156          *                 already enabled
1157          */
1158         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1159             index = (offset - GITS_BASER) / 8;
1160             if (s->baser[index] == 0) {
1161                 /* Unimplemented GITS_BASERn: RAZ/WI */
1162                 break;
1163             }
1164             s->baser[index] &= GITS_BASER_RO_MASK;
1165             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1166         }
1167         break;
1168     case GITS_CBASER:
1169         /*
1170          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1171          *                 already enabled
1172          */
1173         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1174             s->cbaser = value;
1175             s->creadr = 0;
1176         }
1177         break;
1178     case GITS_CWRITER:
1179         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1180         if (s->cwriter != s->creadr) {
1181             process_cmdq(s);
1182         }
1183         break;
1184     case GITS_CREADR:
1185         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1186             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1187         } else {
1188             /* RO register, ignore the write */
1189             qemu_log_mask(LOG_GUEST_ERROR,
1190                           "%s: invalid guest write to RO register at offset "
1191                           TARGET_FMT_plx "\n", __func__, offset);
1192         }
1193         break;
1194     case GITS_TYPER:
1195         /* RO registers, ignore the write */
1196         qemu_log_mask(LOG_GUEST_ERROR,
1197                       "%s: invalid guest write to RO register at offset "
1198                       TARGET_FMT_plx "\n", __func__, offset);
1199         break;
1200     default:
1201         result = false;
1202         break;
1203     }
1204     return result;
1205 }
1206 
1207 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1208                               uint64_t *data, MemTxAttrs attrs)
1209 {
1210     bool result = true;
1211     int index;
1212 
1213     switch (offset) {
1214     case GITS_TYPER:
1215         *data = s->typer;
1216         break;
1217     case GITS_BASER ... GITS_BASER + 0x3f:
1218         index = (offset - GITS_BASER) / 8;
1219         *data = s->baser[index];
1220         break;
1221     case GITS_CBASER:
1222         *data = s->cbaser;
1223         break;
1224     case GITS_CREADR:
1225         *data = s->creadr;
1226         break;
1227     case GITS_CWRITER:
1228         *data = s->cwriter;
1229         break;
1230     default:
1231         result = false;
1232         break;
1233     }
1234     return result;
1235 }
1236 
1237 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1238                                   unsigned size, MemTxAttrs attrs)
1239 {
1240     GICv3ITSState *s = (GICv3ITSState *)opaque;
1241     bool result;
1242 
1243     switch (size) {
1244     case 4:
1245         result = its_readl(s, offset, data, attrs);
1246         break;
1247     case 8:
1248         result = its_readll(s, offset, data, attrs);
1249         break;
1250     default:
1251         result = false;
1252         break;
1253     }
1254 
1255     if (!result) {
1256         qemu_log_mask(LOG_GUEST_ERROR,
1257                       "%s: invalid guest read at offset " TARGET_FMT_plx
1258                       "size %u\n", __func__, offset, size);
1259         trace_gicv3_its_badread(offset, size);
1260         /*
1261          * The spec requires that reserved registers are RAZ/WI;
1262          * so use false returns from leaf functions as a way to
1263          * trigger the guest-error logging but don't return it to
1264          * the caller, or we'll cause a spurious guest data abort.
1265          */
1266         *data = 0;
1267     } else {
1268         trace_gicv3_its_read(offset, *data, size);
1269     }
1270     return MEMTX_OK;
1271 }
1272 
1273 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1274                                    unsigned size, MemTxAttrs attrs)
1275 {
1276     GICv3ITSState *s = (GICv3ITSState *)opaque;
1277     bool result;
1278 
1279     switch (size) {
1280     case 4:
1281         result = its_writel(s, offset, data, attrs);
1282         break;
1283     case 8:
1284         result = its_writell(s, offset, data, attrs);
1285         break;
1286     default:
1287         result = false;
1288         break;
1289     }
1290 
1291     if (!result) {
1292         qemu_log_mask(LOG_GUEST_ERROR,
1293                       "%s: invalid guest write at offset " TARGET_FMT_plx
1294                       "size %u\n", __func__, offset, size);
1295         trace_gicv3_its_badwrite(offset, data, size);
1296         /*
1297          * The spec requires that reserved registers are RAZ/WI;
1298          * so use false returns from leaf functions as a way to
1299          * trigger the guest-error logging but don't return it to
1300          * the caller, or we'll cause a spurious guest data abort.
1301          */
1302     } else {
1303         trace_gicv3_its_write(offset, data, size);
1304     }
1305     return MEMTX_OK;
1306 }
1307 
1308 static const MemoryRegionOps gicv3_its_control_ops = {
1309     .read_with_attrs = gicv3_its_read,
1310     .write_with_attrs = gicv3_its_write,
1311     .valid.min_access_size = 4,
1312     .valid.max_access_size = 8,
1313     .impl.min_access_size = 4,
1314     .impl.max_access_size = 8,
1315     .endianness = DEVICE_NATIVE_ENDIAN,
1316 };
1317 
1318 static const MemoryRegionOps gicv3_its_translation_ops = {
1319     .read_with_attrs = gicv3_its_translation_read,
1320     .write_with_attrs = gicv3_its_translation_write,
1321     .valid.min_access_size = 2,
1322     .valid.max_access_size = 4,
1323     .impl.min_access_size = 2,
1324     .impl.max_access_size = 4,
1325     .endianness = DEVICE_NATIVE_ENDIAN,
1326 };
1327 
1328 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1329 {
1330     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1331     int i;
1332 
1333     for (i = 0; i < s->gicv3->num_cpu; i++) {
1334         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1335             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1336             return;
1337         }
1338     }
1339 
1340     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1341 
1342     /* set the ITS default features supported */
1343     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1344     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1345                           ITS_ITT_ENTRY_SIZE - 1);
1346     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1347     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1348     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1349     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1350 }
1351 
1352 static void gicv3_its_reset(DeviceState *dev)
1353 {
1354     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1355     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1356 
1357     c->parent_reset(dev);
1358 
1359     /* Quiescent bit reset to 1 */
1360     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1361 
1362     /*
1363      * setting GITS_BASER0.Type = 0b001 (Device)
1364      *         GITS_BASER1.Type = 0b100 (Collection Table)
1365      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1366      *         GITS_BASER<0,1>.Page_Size = 64KB
1367      * and default translation table entry size to 16 bytes
1368      */
1369     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1370                              GITS_BASER_TYPE_DEVICE);
1371     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1372                              GITS_BASER_PAGESIZE_64K);
1373     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1374                              GITS_DTE_SIZE - 1);
1375 
1376     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1377                              GITS_BASER_TYPE_COLLECTION);
1378     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1379                              GITS_BASER_PAGESIZE_64K);
1380     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1381                              GITS_CTE_SIZE - 1);
1382 }
1383 
1384 static void gicv3_its_post_load(GICv3ITSState *s)
1385 {
1386     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1387         extract_table_params(s);
1388         extract_cmdq_params(s);
1389     }
1390 }
1391 
1392 static Property gicv3_its_props[] = {
1393     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1394                      GICv3State *),
1395     DEFINE_PROP_END_OF_LIST(),
1396 };
1397 
1398 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1399 {
1400     DeviceClass *dc = DEVICE_CLASS(klass);
1401     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1402     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1403 
1404     dc->realize = gicv3_arm_its_realize;
1405     device_class_set_props(dc, gicv3_its_props);
1406     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1407     icc->post_load = gicv3_its_post_load;
1408 }
1409 
1410 static const TypeInfo gicv3_its_info = {
1411     .name = TYPE_ARM_GICV3_ITS,
1412     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1413     .instance_size = sizeof(GICv3ITSState),
1414     .class_init = gicv3_its_class_init,
1415     .class_size = sizeof(GICv3ITSClass),
1416 };
1417 
1418 static void gicv3_its_register_types(void)
1419 {
1420     type_register_static(&gicv3_its_info);
1421 }
1422 
1423 type_init(gicv3_its_register_types)
1424