xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision 9de53de60cb8638e9c2e02b25ec4445791672aeb)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22 
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27 
28 struct GICv3ITSClass {
29     GICv3ITSCommonClass parent_class;
30     void (*parent_reset)(DeviceState *dev);
31 };
32 
33 /*
34  * This is an internal enum used to distinguish between LPI triggered
35  * via command queue and LPI triggered via gits_translater write.
36  */
37 typedef enum ItsCmdType {
38     NONE = 0, /* internal indication for GITS_TRANSLATER write */
39     CLEAR = 1,
40     DISCARD = 2,
41     INTERRUPT = 3,
42 } ItsCmdType;
43 
44 typedef struct DTEntry {
45     bool valid;
46     unsigned size;
47     uint64_t ittaddr;
48 } DTEntry;
49 
50 typedef struct CTEntry {
51     bool valid;
52     uint32_t rdbase;
53 } CTEntry;
54 
55 typedef struct ITEntry {
56     bool valid;
57     int inttype;
58     uint32_t intid;
59     uint32_t doorbell;
60     uint32_t icid;
61     uint32_t vpeid;
62 } ITEntry;
63 
64 
65 /*
66  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
67  * if a command parameter is not correct. These include both "stall
68  * processing of the command queue" and "ignore this command, and
69  * keep processing the queue". In our implementation we choose that
70  * memory transaction errors reading the command packet provoke a
71  * stall, but errors in parameters cause us to ignore the command
72  * and continue processing.
73  * The process_* functions which handle individual ITS commands all
74  * return an ItsCmdResult which tells process_cmdq() whether it should
75  * stall or keep going.
76  */
77 typedef enum ItsCmdResult {
78     CMD_STALL = 0,
79     CMD_CONTINUE = 1,
80 } ItsCmdResult;
81 
82 /* True if the ITS supports the GICv4 virtual LPI feature */
83 static bool its_feature_virtual(GICv3ITSState *s)
84 {
85     return s->typer & R_GITS_TYPER_VIRTUAL_MASK;
86 }
87 
88 static inline bool intid_in_lpi_range(uint32_t id)
89 {
90     return id >= GICV3_LPI_INTID_START &&
91         id < (1 << (GICD_TYPER_IDBITS + 1));
92 }
93 
94 static inline bool valid_doorbell(uint32_t id)
95 {
96     /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */
97     return id == INTID_SPURIOUS || intid_in_lpi_range(id);
98 }
99 
100 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
101 {
102     uint64_t result = 0;
103 
104     switch (page_sz) {
105     case GITS_PAGE_SIZE_4K:
106     case GITS_PAGE_SIZE_16K:
107         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
108         break;
109 
110     case GITS_PAGE_SIZE_64K:
111         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
112         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
113         break;
114 
115     default:
116         break;
117     }
118     return result;
119 }
120 
121 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
122                                  uint32_t idx, MemTxResult *res)
123 {
124     /*
125      * Given a TableDesc describing one of the ITS in-guest-memory
126      * tables and an index into it, return the guest address
127      * corresponding to that table entry.
128      * If there was a memory error reading the L1 table of an
129      * indirect table, *res is set accordingly, and we return -1.
130      * If the L1 table entry is marked not valid, we return -1 with
131      * *res set to MEMTX_OK.
132      *
133      * The specification defines the format of level 1 entries of a
134      * 2-level table, but the format of level 2 entries and the format
135      * of flat-mapped tables is IMPDEF.
136      */
137     AddressSpace *as = &s->gicv3->dma_as;
138     uint32_t l2idx;
139     uint64_t l2;
140     uint32_t num_l2_entries;
141 
142     *res = MEMTX_OK;
143 
144     if (!td->indirect) {
145         /* Single level table */
146         return td->base_addr + idx * td->entry_sz;
147     }
148 
149     /* Two level table */
150     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
151 
152     l2 = address_space_ldq_le(as,
153                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
154                               MEMTXATTRS_UNSPECIFIED, res);
155     if (*res != MEMTX_OK) {
156         return -1;
157     }
158     if (!(l2 & L2_TABLE_VALID_MASK)) {
159         return -1;
160     }
161 
162     num_l2_entries = td->page_sz / td->entry_sz;
163     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
164 }
165 
166 /*
167  * Read the Collection Table entry at index @icid. On success (including
168  * successfully determining that there is no valid CTE for this index),
169  * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
170  * If there is an error reading memory then we return the error code.
171  */
172 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
173 {
174     AddressSpace *as = &s->gicv3->dma_as;
175     MemTxResult res = MEMTX_OK;
176     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
177     uint64_t cteval;
178 
179     if (entry_addr == -1) {
180         /* No L2 table entry, i.e. no valid CTE, or a memory error */
181         cte->valid = false;
182         goto out;
183     }
184 
185     cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
186     if (res != MEMTX_OK) {
187         goto out;
188     }
189     cte->valid = FIELD_EX64(cteval, CTE, VALID);
190     cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
191 out:
192     if (res != MEMTX_OK) {
193         trace_gicv3_its_cte_read_fault(icid);
194     } else {
195         trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
196     }
197     return res;
198 }
199 
200 /*
201  * Update the Interrupt Table entry at index @evinted in the table specified
202  * by the dte @dte. Returns true on success, false if there was a memory
203  * access error.
204  */
205 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
206                        const ITEntry *ite)
207 {
208     AddressSpace *as = &s->gicv3->dma_as;
209     MemTxResult res = MEMTX_OK;
210     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
211     uint64_t itel = 0;
212     uint32_t iteh = 0;
213 
214     trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
215                               ite->inttype, ite->intid, ite->icid,
216                               ite->vpeid, ite->doorbell);
217 
218     if (ite->valid) {
219         itel = FIELD_DP64(itel, ITE_L, VALID, 1);
220         itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
221         itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
222         itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
223         itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
224         iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
225     }
226 
227     address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
228     if (res != MEMTX_OK) {
229         return false;
230     }
231     address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
232     return res == MEMTX_OK;
233 }
234 
235 /*
236  * Read the Interrupt Table entry at index @eventid from the table specified
237  * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
238  * struct @ite accordingly. If there is an error reading memory then we return
239  * the error code.
240  */
241 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
242                            const DTEntry *dte, ITEntry *ite)
243 {
244     AddressSpace *as = &s->gicv3->dma_as;
245     MemTxResult res = MEMTX_OK;
246     uint64_t itel;
247     uint32_t iteh;
248     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
249 
250     itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
251     if (res != MEMTX_OK) {
252         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
253         return res;
254     }
255 
256     iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
257     if (res != MEMTX_OK) {
258         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
259         return res;
260     }
261 
262     ite->valid = FIELD_EX64(itel, ITE_L, VALID);
263     ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
264     ite->intid = FIELD_EX64(itel, ITE_L, INTID);
265     ite->icid = FIELD_EX64(itel, ITE_L, ICID);
266     ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
267     ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
268     trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
269                              ite->inttype, ite->intid, ite->icid,
270                              ite->vpeid, ite->doorbell);
271     return MEMTX_OK;
272 }
273 
274 /*
275  * Read the Device Table entry at index @devid. On success (including
276  * successfully determining that there is no valid DTE for this index),
277  * we return MEMTX_OK and populate the DTEntry struct accordingly.
278  * If there is an error reading memory then we return the error code.
279  */
280 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
281 {
282     MemTxResult res = MEMTX_OK;
283     AddressSpace *as = &s->gicv3->dma_as;
284     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
285     uint64_t dteval;
286 
287     if (entry_addr == -1) {
288         /* No L2 table entry, i.e. no valid DTE, or a memory error */
289         dte->valid = false;
290         goto out;
291     }
292     dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
293     if (res != MEMTX_OK) {
294         goto out;
295     }
296     dte->valid = FIELD_EX64(dteval, DTE, VALID);
297     dte->size = FIELD_EX64(dteval, DTE, SIZE);
298     /* DTE word field stores bits [51:8] of the ITT address */
299     dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
300 out:
301     if (res != MEMTX_OK) {
302         trace_gicv3_its_dte_read_fault(devid);
303     } else {
304         trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
305     }
306     return res;
307 }
308 
309 /*
310  * This function handles the processing of following commands based on
311  * the ItsCmdType parameter passed:-
312  * 1. triggering of lpi interrupt translation via ITS INT command
313  * 2. triggering of lpi interrupt translation via gits_translater register
314  * 3. handling of ITS CLEAR command
315  * 4. handling of ITS DISCARD command
316  */
317 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
318                                        uint32_t eventid, ItsCmdType cmd)
319 {
320     uint64_t num_eventids;
321     DTEntry dte;
322     CTEntry cte;
323     ITEntry ite;
324 
325     if (devid >= s->dt.num_entries) {
326         qemu_log_mask(LOG_GUEST_ERROR,
327                       "%s: invalid command attributes: devid %d>=%d",
328                       __func__, devid, s->dt.num_entries);
329         return CMD_CONTINUE;
330     }
331 
332     if (get_dte(s, devid, &dte) != MEMTX_OK) {
333         return CMD_STALL;
334     }
335     if (!dte.valid) {
336         qemu_log_mask(LOG_GUEST_ERROR,
337                       "%s: invalid command attributes: "
338                       "invalid dte for %d\n", __func__, devid);
339         return CMD_CONTINUE;
340     }
341 
342     num_eventids = 1ULL << (dte.size + 1);
343     if (eventid >= num_eventids) {
344         qemu_log_mask(LOG_GUEST_ERROR,
345                       "%s: invalid command attributes: eventid %d >= %"
346                       PRId64 "\n",
347                       __func__, eventid, num_eventids);
348         return CMD_CONTINUE;
349     }
350 
351     if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
352         return CMD_STALL;
353     }
354 
355     if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
356         qemu_log_mask(LOG_GUEST_ERROR,
357                       "%s: invalid command attributes: invalid ITE\n",
358                       __func__);
359         return CMD_CONTINUE;
360     }
361 
362     if (ite.icid >= s->ct.num_entries) {
363         qemu_log_mask(LOG_GUEST_ERROR,
364                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
365                       __func__, ite.icid);
366         return CMD_CONTINUE;
367     }
368 
369     if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
370         return CMD_STALL;
371     }
372     if (!cte.valid) {
373         qemu_log_mask(LOG_GUEST_ERROR,
374                       "%s: invalid command attributes: invalid CTE\n",
375                       __func__);
376         return CMD_CONTINUE;
377     }
378 
379     /*
380      * Current implementation only supports rdbase == procnum
381      * Hence rdbase physical address is ignored
382      */
383     if (cte.rdbase >= s->gicv3->num_cpu) {
384         return CMD_CONTINUE;
385     }
386 
387     if ((cmd == CLEAR) || (cmd == DISCARD)) {
388         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
389     } else {
390         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
391     }
392 
393     if (cmd == DISCARD) {
394         ITEntry ite = {};
395         /* remove mapping from interrupt translation table */
396         ite.valid = false;
397         return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
398     }
399     return CMD_CONTINUE;
400 }
401 
402 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
403                                     ItsCmdType cmd)
404 {
405     uint32_t devid, eventid;
406 
407     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
408     eventid = cmdpkt[1] & EVENTID_MASK;
409     switch (cmd) {
410     case INTERRUPT:
411         trace_gicv3_its_cmd_int(devid, eventid);
412         break;
413     case CLEAR:
414         trace_gicv3_its_cmd_clear(devid, eventid);
415         break;
416     case DISCARD:
417         trace_gicv3_its_cmd_discard(devid, eventid);
418         break;
419     default:
420         g_assert_not_reached();
421     }
422     return do_process_its_cmd(s, devid, eventid, cmd);
423 }
424 
425 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
426                                   bool ignore_pInt)
427 {
428     uint32_t devid, eventid;
429     uint32_t pIntid = 0;
430     uint64_t num_eventids;
431     uint16_t icid = 0;
432     DTEntry dte;
433     ITEntry ite;
434 
435     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
436     eventid = cmdpkt[1] & EVENTID_MASK;
437     icid = cmdpkt[2] & ICID_MASK;
438 
439     if (ignore_pInt) {
440         pIntid = eventid;
441         trace_gicv3_its_cmd_mapi(devid, eventid, icid);
442     } else {
443         pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
444         trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
445     }
446 
447     if (devid >= s->dt.num_entries) {
448         qemu_log_mask(LOG_GUEST_ERROR,
449                       "%s: invalid command attributes: devid %d>=%d",
450                       __func__, devid, s->dt.num_entries);
451         return CMD_CONTINUE;
452     }
453 
454     if (get_dte(s, devid, &dte) != MEMTX_OK) {
455         return CMD_STALL;
456     }
457     num_eventids = 1ULL << (dte.size + 1);
458 
459     if (icid >= s->ct.num_entries) {
460         qemu_log_mask(LOG_GUEST_ERROR,
461                       "%s: invalid ICID 0x%x >= 0x%x\n",
462                       __func__, icid, s->ct.num_entries);
463         return CMD_CONTINUE;
464     }
465 
466     if (!dte.valid) {
467         qemu_log_mask(LOG_GUEST_ERROR,
468                       "%s: no valid DTE for devid 0x%x\n", __func__, devid);
469         return CMD_CONTINUE;
470     }
471 
472     if (eventid >= num_eventids) {
473         qemu_log_mask(LOG_GUEST_ERROR,
474                       "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
475                       __func__, eventid, num_eventids);
476         return CMD_CONTINUE;
477     }
478 
479     if (!intid_in_lpi_range(pIntid)) {
480         qemu_log_mask(LOG_GUEST_ERROR,
481                       "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
482         return CMD_CONTINUE;
483     }
484 
485     /* add ite entry to interrupt translation table */
486     ite.valid = true;
487     ite.inttype = ITE_INTTYPE_PHYSICAL;
488     ite.intid = pIntid;
489     ite.icid = icid;
490     ite.doorbell = INTID_SPURIOUS;
491     ite.vpeid = 0;
492     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
493 }
494 
495 static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt,
496                                    bool ignore_vintid)
497 {
498     uint32_t devid, eventid, vintid, doorbell, vpeid;
499     uint32_t num_eventids;
500     DTEntry dte;
501     ITEntry ite;
502 
503     if (!its_feature_virtual(s)) {
504         return CMD_CONTINUE;
505     }
506 
507     devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID);
508     eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID);
509     vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID);
510     doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL);
511     if (ignore_vintid) {
512         vintid = eventid;
513         trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell);
514     } else {
515         vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID);
516         trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell);
517     }
518 
519     if (devid >= s->dt.num_entries) {
520         qemu_log_mask(LOG_GUEST_ERROR,
521                       "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n",
522                       __func__, devid, s->dt.num_entries);
523         return CMD_CONTINUE;
524     }
525 
526     if (get_dte(s, devid, &dte) != MEMTX_OK) {
527         return CMD_STALL;
528     }
529 
530     if (!dte.valid) {
531         qemu_log_mask(LOG_GUEST_ERROR,
532                       "%s: no entry in device table for DeviceID 0x%x\n",
533                       __func__, devid);
534         return CMD_CONTINUE;
535     }
536 
537     num_eventids = 1ULL << (dte.size + 1);
538 
539     if (eventid >= num_eventids) {
540         qemu_log_mask(LOG_GUEST_ERROR,
541                       "%s: EventID 0x%x too large for DeviceID 0x%x "
542                       "(must be less than 0x%x)\n",
543                       __func__, eventid, devid, num_eventids);
544         return CMD_CONTINUE;
545     }
546     if (!intid_in_lpi_range(vintid)) {
547         qemu_log_mask(LOG_GUEST_ERROR,
548                       "%s: VIntID 0x%x not a valid LPI\n",
549                       __func__, vintid);
550         return CMD_CONTINUE;
551     }
552     if (!valid_doorbell(doorbell)) {
553         qemu_log_mask(LOG_GUEST_ERROR,
554                       "%s: Doorbell %d not 1023 and not a valid LPI\n",
555                       __func__, doorbell);
556         return CMD_CONTINUE;
557     }
558     if (vpeid >= s->vpet.num_entries) {
559         qemu_log_mask(LOG_GUEST_ERROR,
560                       "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
561                       __func__, vpeid, s->vpet.num_entries);
562         return CMD_CONTINUE;
563     }
564     /* add ite entry to interrupt translation table */
565     ite.valid = true;
566     ite.inttype = ITE_INTTYPE_VIRTUAL;
567     ite.intid = vintid;
568     ite.icid = 0;
569     ite.doorbell = doorbell;
570     ite.vpeid = vpeid;
571     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
572 }
573 
574 /*
575  * Update the Collection Table entry for @icid to @cte. Returns true
576  * on success, false if there was a memory access error.
577  */
578 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
579 {
580     AddressSpace *as = &s->gicv3->dma_as;
581     uint64_t entry_addr;
582     uint64_t cteval = 0;
583     MemTxResult res = MEMTX_OK;
584 
585     trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
586 
587     if (cte->valid) {
588         /* add mapping entry to collection table */
589         cteval = FIELD_DP64(cteval, CTE, VALID, 1);
590         cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
591     }
592 
593     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
594     if (res != MEMTX_OK) {
595         /* memory access error: stall */
596         return false;
597     }
598     if (entry_addr == -1) {
599         /* No L2 table for this index: discard write and continue */
600         return true;
601     }
602 
603     address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
604     return res == MEMTX_OK;
605 }
606 
607 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
608 {
609     uint16_t icid;
610     CTEntry cte;
611 
612     icid = cmdpkt[2] & ICID_MASK;
613     cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
614     if (cte.valid) {
615         cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
616         cte.rdbase &= RDBASE_PROCNUM_MASK;
617     } else {
618         cte.rdbase = 0;
619     }
620     trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
621 
622     if (icid >= s->ct.num_entries) {
623         qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
624         return CMD_CONTINUE;
625     }
626     if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
627         qemu_log_mask(LOG_GUEST_ERROR,
628                       "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
629         return CMD_CONTINUE;
630     }
631 
632     return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
633 }
634 
635 /*
636  * Update the Device Table entry for @devid to @dte. Returns true
637  * on success, false if there was a memory access error.
638  */
639 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
640 {
641     AddressSpace *as = &s->gicv3->dma_as;
642     uint64_t entry_addr;
643     uint64_t dteval = 0;
644     MemTxResult res = MEMTX_OK;
645 
646     trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
647 
648     if (dte->valid) {
649         /* add mapping entry to device table */
650         dteval = FIELD_DP64(dteval, DTE, VALID, 1);
651         dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
652         dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
653     }
654 
655     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
656     if (res != MEMTX_OK) {
657         /* memory access error: stall */
658         return false;
659     }
660     if (entry_addr == -1) {
661         /* No L2 table for this index: discard write and continue */
662         return true;
663     }
664     address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
665     return res == MEMTX_OK;
666 }
667 
668 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
669 {
670     uint32_t devid;
671     DTEntry dte;
672 
673     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
674     dte.size = cmdpkt[1] & SIZE_MASK;
675     dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
676     dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
677 
678     trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
679 
680     if (devid >= s->dt.num_entries) {
681         qemu_log_mask(LOG_GUEST_ERROR,
682                       "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
683                       devid, s->dt.num_entries);
684         return CMD_CONTINUE;
685     }
686 
687     if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
688         qemu_log_mask(LOG_GUEST_ERROR,
689                       "ITS MAPD: invalid size %d\n", dte.size);
690         return CMD_CONTINUE;
691     }
692 
693     return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
694 }
695 
696 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
697 {
698     uint64_t rd1, rd2;
699 
700     rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
701     rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
702 
703     trace_gicv3_its_cmd_movall(rd1, rd2);
704 
705     if (rd1 >= s->gicv3->num_cpu) {
706         qemu_log_mask(LOG_GUEST_ERROR,
707                       "%s: RDBASE1 %" PRId64
708                       " out of range (must be less than %d)\n",
709                       __func__, rd1, s->gicv3->num_cpu);
710         return CMD_CONTINUE;
711     }
712     if (rd2 >= s->gicv3->num_cpu) {
713         qemu_log_mask(LOG_GUEST_ERROR,
714                       "%s: RDBASE2 %" PRId64
715                       " out of range (must be less than %d)\n",
716                       __func__, rd2, s->gicv3->num_cpu);
717         return CMD_CONTINUE;
718     }
719 
720     if (rd1 == rd2) {
721         /* Move to same target must succeed as a no-op */
722         return CMD_CONTINUE;
723     }
724 
725     /* Move all pending LPIs from redistributor 1 to redistributor 2 */
726     gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
727 
728     return CMD_CONTINUE;
729 }
730 
731 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
732 {
733     uint32_t devid, eventid;
734     uint16_t new_icid;
735     uint64_t num_eventids;
736     DTEntry dte;
737     CTEntry old_cte, new_cte;
738     ITEntry old_ite;
739 
740     devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
741     eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
742     new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
743 
744     trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
745 
746     if (devid >= s->dt.num_entries) {
747         qemu_log_mask(LOG_GUEST_ERROR,
748                       "%s: invalid command attributes: devid %d>=%d",
749                       __func__, devid, s->dt.num_entries);
750         return CMD_CONTINUE;
751     }
752     if (get_dte(s, devid, &dte) != MEMTX_OK) {
753         return CMD_STALL;
754     }
755 
756     if (!dte.valid) {
757         qemu_log_mask(LOG_GUEST_ERROR,
758                       "%s: invalid command attributes: "
759                       "invalid dte for %d\n", __func__, devid);
760         return CMD_CONTINUE;
761     }
762 
763     num_eventids = 1ULL << (dte.size + 1);
764     if (eventid >= num_eventids) {
765         qemu_log_mask(LOG_GUEST_ERROR,
766                       "%s: invalid command attributes: eventid %d >= %"
767                       PRId64 "\n",
768                       __func__, eventid, num_eventids);
769         return CMD_CONTINUE;
770     }
771 
772     if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
773         return CMD_STALL;
774     }
775 
776     if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
777         qemu_log_mask(LOG_GUEST_ERROR,
778                       "%s: invalid command attributes: invalid ITE\n",
779                       __func__);
780         return CMD_CONTINUE;
781     }
782 
783     if (old_ite.icid >= s->ct.num_entries) {
784         qemu_log_mask(LOG_GUEST_ERROR,
785                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
786                       __func__, old_ite.icid);
787         return CMD_CONTINUE;
788     }
789 
790     if (new_icid >= s->ct.num_entries) {
791         qemu_log_mask(LOG_GUEST_ERROR,
792                       "%s: invalid command attributes: ICID 0x%x\n",
793                       __func__, new_icid);
794         return CMD_CONTINUE;
795     }
796 
797     if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
798         return CMD_STALL;
799     }
800     if (!old_cte.valid) {
801         qemu_log_mask(LOG_GUEST_ERROR,
802                       "%s: invalid command attributes: "
803                       "invalid CTE for old ICID 0x%x\n",
804                       __func__, old_ite.icid);
805         return CMD_CONTINUE;
806     }
807 
808     if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
809         return CMD_STALL;
810     }
811     if (!new_cte.valid) {
812         qemu_log_mask(LOG_GUEST_ERROR,
813                       "%s: invalid command attributes: "
814                       "invalid CTE for new ICID 0x%x\n",
815                       __func__, new_icid);
816         return CMD_CONTINUE;
817     }
818 
819     if (old_cte.rdbase >= s->gicv3->num_cpu) {
820         qemu_log_mask(LOG_GUEST_ERROR,
821                       "%s: CTE has invalid rdbase 0x%x\n",
822                       __func__, old_cte.rdbase);
823         return CMD_CONTINUE;
824     }
825 
826     if (new_cte.rdbase >= s->gicv3->num_cpu) {
827         qemu_log_mask(LOG_GUEST_ERROR,
828                       "%s: CTE has invalid rdbase 0x%x\n",
829                       __func__, new_cte.rdbase);
830         return CMD_CONTINUE;
831     }
832 
833     if (old_cte.rdbase != new_cte.rdbase) {
834         /* Move the LPI from the old redistributor to the new one */
835         gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
836                              &s->gicv3->cpu[new_cte.rdbase],
837                              old_ite.intid);
838     }
839 
840     /* Update the ICID field in the interrupt translation table entry */
841     old_ite.icid = new_icid;
842     return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
843 }
844 
845 /*
846  * Current implementation blocks until all
847  * commands are processed
848  */
849 static void process_cmdq(GICv3ITSState *s)
850 {
851     uint32_t wr_offset = 0;
852     uint32_t rd_offset = 0;
853     uint32_t cq_offset = 0;
854     AddressSpace *as = &s->gicv3->dma_as;
855     uint8_t cmd;
856     int i;
857 
858     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
859         return;
860     }
861 
862     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
863 
864     if (wr_offset >= s->cq.num_entries) {
865         qemu_log_mask(LOG_GUEST_ERROR,
866                       "%s: invalid write offset "
867                       "%d\n", __func__, wr_offset);
868         return;
869     }
870 
871     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
872 
873     if (rd_offset >= s->cq.num_entries) {
874         qemu_log_mask(LOG_GUEST_ERROR,
875                       "%s: invalid read offset "
876                       "%d\n", __func__, rd_offset);
877         return;
878     }
879 
880     while (wr_offset != rd_offset) {
881         ItsCmdResult result = CMD_CONTINUE;
882         void *hostmem;
883         hwaddr buflen;
884         uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
885 
886         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
887 
888         buflen = GITS_CMDQ_ENTRY_SIZE;
889         hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
890                                     &buflen, false, MEMTXATTRS_UNSPECIFIED);
891         if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
892             if (hostmem) {
893                 address_space_unmap(as, hostmem, buflen, false, 0);
894             }
895             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
896             qemu_log_mask(LOG_GUEST_ERROR,
897                           "%s: could not read command at 0x%" PRIx64 "\n",
898                           __func__, s->cq.base_addr + cq_offset);
899             break;
900         }
901         for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
902             cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
903         }
904         address_space_unmap(as, hostmem, buflen, false, 0);
905 
906         cmd = cmdpkt[0] & CMD_MASK;
907 
908         trace_gicv3_its_process_command(rd_offset, cmd);
909 
910         switch (cmd) {
911         case GITS_CMD_INT:
912             result = process_its_cmd(s, cmdpkt, INTERRUPT);
913             break;
914         case GITS_CMD_CLEAR:
915             result = process_its_cmd(s, cmdpkt, CLEAR);
916             break;
917         case GITS_CMD_SYNC:
918             /*
919              * Current implementation makes a blocking synchronous call
920              * for every command issued earlier, hence the internal state
921              * is already consistent by the time SYNC command is executed.
922              * Hence no further processing is required for SYNC command.
923              */
924             trace_gicv3_its_cmd_sync();
925             break;
926         case GITS_CMD_MAPD:
927             result = process_mapd(s, cmdpkt);
928             break;
929         case GITS_CMD_MAPC:
930             result = process_mapc(s, cmdpkt);
931             break;
932         case GITS_CMD_MAPTI:
933             result = process_mapti(s, cmdpkt, false);
934             break;
935         case GITS_CMD_MAPI:
936             result = process_mapti(s, cmdpkt, true);
937             break;
938         case GITS_CMD_DISCARD:
939             result = process_its_cmd(s, cmdpkt, DISCARD);
940             break;
941         case GITS_CMD_INV:
942         case GITS_CMD_INVALL:
943             /*
944              * Current implementation doesn't cache any ITS tables,
945              * but the calculated lpi priority information. We only
946              * need to trigger lpi priority re-calculation to be in
947              * sync with LPI config table or pending table changes.
948              */
949             trace_gicv3_its_cmd_inv();
950             for (i = 0; i < s->gicv3->num_cpu; i++) {
951                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
952             }
953             break;
954         case GITS_CMD_MOVI:
955             result = process_movi(s, cmdpkt);
956             break;
957         case GITS_CMD_MOVALL:
958             result = process_movall(s, cmdpkt);
959             break;
960         case GITS_CMD_VMAPTI:
961             result = process_vmapti(s, cmdpkt, false);
962             break;
963         case GITS_CMD_VMAPI:
964             result = process_vmapti(s, cmdpkt, true);
965             break;
966         default:
967             trace_gicv3_its_cmd_unknown(cmd);
968             break;
969         }
970         if (result == CMD_CONTINUE) {
971             rd_offset++;
972             rd_offset %= s->cq.num_entries;
973             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
974         } else {
975             /* CMD_STALL */
976             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
977             qemu_log_mask(LOG_GUEST_ERROR,
978                           "%s: 0x%x cmd processing failed, stalling\n",
979                           __func__, cmd);
980             break;
981         }
982     }
983 }
984 
985 /*
986  * This function extracts the ITS Device and Collection table specific
987  * parameters (like base_addr, size etc) from GITS_BASER register.
988  * It is called during ITS enable and also during post_load migration
989  */
990 static void extract_table_params(GICv3ITSState *s)
991 {
992     uint16_t num_pages = 0;
993     uint8_t  page_sz_type;
994     uint8_t type;
995     uint32_t page_sz = 0;
996     uint64_t value;
997 
998     for (int i = 0; i < 8; i++) {
999         TableDesc *td;
1000         int idbits;
1001 
1002         value = s->baser[i];
1003 
1004         if (!value) {
1005             continue;
1006         }
1007 
1008         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
1009 
1010         switch (page_sz_type) {
1011         case 0:
1012             page_sz = GITS_PAGE_SIZE_4K;
1013             break;
1014 
1015         case 1:
1016             page_sz = GITS_PAGE_SIZE_16K;
1017             break;
1018 
1019         case 2:
1020         case 3:
1021             page_sz = GITS_PAGE_SIZE_64K;
1022             break;
1023 
1024         default:
1025             g_assert_not_reached();
1026         }
1027 
1028         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
1029 
1030         type = FIELD_EX64(value, GITS_BASER, TYPE);
1031 
1032         switch (type) {
1033         case GITS_BASER_TYPE_DEVICE:
1034             td = &s->dt;
1035             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
1036             break;
1037         case GITS_BASER_TYPE_COLLECTION:
1038             td = &s->ct;
1039             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
1040                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
1041             } else {
1042                 /* 16-bit CollectionId supported when CIL == 0 */
1043                 idbits = 16;
1044             }
1045             break;
1046         case GITS_BASER_TYPE_VPE:
1047             td = &s->vpet;
1048             /*
1049              * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an
1050              * implementation to implement fewer bits and report this
1051              * via GICD_TYPER2.)
1052              */
1053             idbits = 16;
1054             break;
1055         default:
1056             /*
1057              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
1058              * ensures we will only see type values corresponding to
1059              * the values set up in gicv3_its_reset().
1060              */
1061             g_assert_not_reached();
1062         }
1063 
1064         memset(td, 0, sizeof(*td));
1065         /*
1066          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
1067          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
1068          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
1069          * for the register corresponding to the Collection table but we
1070          * still have to process interrupts using non-memory-backed
1071          * Collection table entries.)
1072          * The specification makes it UNPREDICTABLE to enable the ITS without
1073          * marking each BASER<n> as valid. We choose to handle these as if
1074          * the table was zero-sized, so commands using the table will fail
1075          * and interrupts requested via GITS_TRANSLATER writes will be ignored.
1076          * This happens automatically by leaving the num_entries field at
1077          * zero, which will be caught by the bounds checks we have before
1078          * every table lookup anyway.
1079          */
1080         if (!FIELD_EX64(value, GITS_BASER, VALID)) {
1081             continue;
1082         }
1083         td->page_sz = page_sz;
1084         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
1085         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
1086         td->base_addr = baser_base_addr(value, page_sz);
1087         if (!td->indirect) {
1088             td->num_entries = (num_pages * page_sz) / td->entry_sz;
1089         } else {
1090             td->num_entries = (((num_pages * page_sz) /
1091                                   L1TABLE_ENTRY_SIZE) *
1092                                  (page_sz / td->entry_sz));
1093         }
1094         td->num_entries = MIN(td->num_entries, 1ULL << idbits);
1095     }
1096 }
1097 
1098 static void extract_cmdq_params(GICv3ITSState *s)
1099 {
1100     uint16_t num_pages = 0;
1101     uint64_t value = s->cbaser;
1102 
1103     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
1104 
1105     memset(&s->cq, 0 , sizeof(s->cq));
1106 
1107     if (FIELD_EX64(value, GITS_CBASER, VALID)) {
1108         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
1109                              GITS_CMDQ_ENTRY_SIZE;
1110         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
1111         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
1112     }
1113 }
1114 
1115 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
1116                                               uint64_t *data, unsigned size,
1117                                               MemTxAttrs attrs)
1118 {
1119     /*
1120      * GITS_TRANSLATER is write-only, and all other addresses
1121      * in the interrupt translation space frame are RES0.
1122      */
1123     *data = 0;
1124     return MEMTX_OK;
1125 }
1126 
1127 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
1128                                                uint64_t data, unsigned size,
1129                                                MemTxAttrs attrs)
1130 {
1131     GICv3ITSState *s = (GICv3ITSState *)opaque;
1132     bool result = true;
1133 
1134     trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
1135 
1136     switch (offset) {
1137     case GITS_TRANSLATER:
1138         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1139             result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1140         }
1141         break;
1142     default:
1143         break;
1144     }
1145 
1146     if (result) {
1147         return MEMTX_OK;
1148     } else {
1149         return MEMTX_ERROR;
1150     }
1151 }
1152 
1153 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1154                               uint64_t value, MemTxAttrs attrs)
1155 {
1156     bool result = true;
1157     int index;
1158 
1159     switch (offset) {
1160     case GITS_CTLR:
1161         if (value & R_GITS_CTLR_ENABLED_MASK) {
1162             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1163             extract_table_params(s);
1164             extract_cmdq_params(s);
1165             process_cmdq(s);
1166         } else {
1167             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1168         }
1169         break;
1170     case GITS_CBASER:
1171         /*
1172          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1173          *                 already enabled
1174          */
1175         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1176             s->cbaser = deposit64(s->cbaser, 0, 32, value);
1177             s->creadr = 0;
1178         }
1179         break;
1180     case GITS_CBASER + 4:
1181         /*
1182          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1183          *                 already enabled
1184          */
1185         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1186             s->cbaser = deposit64(s->cbaser, 32, 32, value);
1187             s->creadr = 0;
1188         }
1189         break;
1190     case GITS_CWRITER:
1191         s->cwriter = deposit64(s->cwriter, 0, 32,
1192                                (value & ~R_GITS_CWRITER_RETRY_MASK));
1193         if (s->cwriter != s->creadr) {
1194             process_cmdq(s);
1195         }
1196         break;
1197     case GITS_CWRITER + 4:
1198         s->cwriter = deposit64(s->cwriter, 32, 32, value);
1199         break;
1200     case GITS_CREADR:
1201         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1202             s->creadr = deposit64(s->creadr, 0, 32,
1203                                   (value & ~R_GITS_CREADR_STALLED_MASK));
1204         } else {
1205             /* RO register, ignore the write */
1206             qemu_log_mask(LOG_GUEST_ERROR,
1207                           "%s: invalid guest write to RO register at offset "
1208                           TARGET_FMT_plx "\n", __func__, offset);
1209         }
1210         break;
1211     case GITS_CREADR + 4:
1212         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1213             s->creadr = deposit64(s->creadr, 32, 32, value);
1214         } else {
1215             /* RO register, ignore the write */
1216             qemu_log_mask(LOG_GUEST_ERROR,
1217                           "%s: invalid guest write to RO register at offset "
1218                           TARGET_FMT_plx "\n", __func__, offset);
1219         }
1220         break;
1221     case GITS_BASER ... GITS_BASER + 0x3f:
1222         /*
1223          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1224          *                 already enabled
1225          */
1226         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1227             index = (offset - GITS_BASER) / 8;
1228 
1229             if (s->baser[index] == 0) {
1230                 /* Unimplemented GITS_BASERn: RAZ/WI */
1231                 break;
1232             }
1233             if (offset & 7) {
1234                 value <<= 32;
1235                 value &= ~GITS_BASER_RO_MASK;
1236                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1237                 s->baser[index] |= value;
1238             } else {
1239                 value &= ~GITS_BASER_RO_MASK;
1240                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1241                 s->baser[index] |= value;
1242             }
1243         }
1244         break;
1245     case GITS_IIDR:
1246     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1247         /* RO registers, ignore the write */
1248         qemu_log_mask(LOG_GUEST_ERROR,
1249                       "%s: invalid guest write to RO register at offset "
1250                       TARGET_FMT_plx "\n", __func__, offset);
1251         break;
1252     default:
1253         result = false;
1254         break;
1255     }
1256     return result;
1257 }
1258 
1259 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1260                              uint64_t *data, MemTxAttrs attrs)
1261 {
1262     bool result = true;
1263     int index;
1264 
1265     switch (offset) {
1266     case GITS_CTLR:
1267         *data = s->ctlr;
1268         break;
1269     case GITS_IIDR:
1270         *data = gicv3_iidr();
1271         break;
1272     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1273         /* ID registers */
1274         *data = gicv3_idreg(offset - GITS_IDREGS, GICV3_PIDR0_ITS);
1275         break;
1276     case GITS_TYPER:
1277         *data = extract64(s->typer, 0, 32);
1278         break;
1279     case GITS_TYPER + 4:
1280         *data = extract64(s->typer, 32, 32);
1281         break;
1282     case GITS_CBASER:
1283         *data = extract64(s->cbaser, 0, 32);
1284         break;
1285     case GITS_CBASER + 4:
1286         *data = extract64(s->cbaser, 32, 32);
1287         break;
1288     case GITS_CREADR:
1289         *data = extract64(s->creadr, 0, 32);
1290         break;
1291     case GITS_CREADR + 4:
1292         *data = extract64(s->creadr, 32, 32);
1293         break;
1294     case GITS_CWRITER:
1295         *data = extract64(s->cwriter, 0, 32);
1296         break;
1297     case GITS_CWRITER + 4:
1298         *data = extract64(s->cwriter, 32, 32);
1299         break;
1300     case GITS_BASER ... GITS_BASER + 0x3f:
1301         index = (offset - GITS_BASER) / 8;
1302         if (offset & 7) {
1303             *data = extract64(s->baser[index], 32, 32);
1304         } else {
1305             *data = extract64(s->baser[index], 0, 32);
1306         }
1307         break;
1308     default:
1309         result = false;
1310         break;
1311     }
1312     return result;
1313 }
1314 
1315 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1316                                uint64_t value, MemTxAttrs attrs)
1317 {
1318     bool result = true;
1319     int index;
1320 
1321     switch (offset) {
1322     case GITS_BASER ... GITS_BASER + 0x3f:
1323         /*
1324          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1325          *                 already enabled
1326          */
1327         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1328             index = (offset - GITS_BASER) / 8;
1329             if (s->baser[index] == 0) {
1330                 /* Unimplemented GITS_BASERn: RAZ/WI */
1331                 break;
1332             }
1333             s->baser[index] &= GITS_BASER_RO_MASK;
1334             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1335         }
1336         break;
1337     case GITS_CBASER:
1338         /*
1339          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1340          *                 already enabled
1341          */
1342         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1343             s->cbaser = value;
1344             s->creadr = 0;
1345         }
1346         break;
1347     case GITS_CWRITER:
1348         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1349         if (s->cwriter != s->creadr) {
1350             process_cmdq(s);
1351         }
1352         break;
1353     case GITS_CREADR:
1354         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1355             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1356         } else {
1357             /* RO register, ignore the write */
1358             qemu_log_mask(LOG_GUEST_ERROR,
1359                           "%s: invalid guest write to RO register at offset "
1360                           TARGET_FMT_plx "\n", __func__, offset);
1361         }
1362         break;
1363     case GITS_TYPER:
1364         /* RO registers, ignore the write */
1365         qemu_log_mask(LOG_GUEST_ERROR,
1366                       "%s: invalid guest write to RO register at offset "
1367                       TARGET_FMT_plx "\n", __func__, offset);
1368         break;
1369     default:
1370         result = false;
1371         break;
1372     }
1373     return result;
1374 }
1375 
1376 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1377                               uint64_t *data, MemTxAttrs attrs)
1378 {
1379     bool result = true;
1380     int index;
1381 
1382     switch (offset) {
1383     case GITS_TYPER:
1384         *data = s->typer;
1385         break;
1386     case GITS_BASER ... GITS_BASER + 0x3f:
1387         index = (offset - GITS_BASER) / 8;
1388         *data = s->baser[index];
1389         break;
1390     case GITS_CBASER:
1391         *data = s->cbaser;
1392         break;
1393     case GITS_CREADR:
1394         *data = s->creadr;
1395         break;
1396     case GITS_CWRITER:
1397         *data = s->cwriter;
1398         break;
1399     default:
1400         result = false;
1401         break;
1402     }
1403     return result;
1404 }
1405 
1406 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1407                                   unsigned size, MemTxAttrs attrs)
1408 {
1409     GICv3ITSState *s = (GICv3ITSState *)opaque;
1410     bool result;
1411 
1412     switch (size) {
1413     case 4:
1414         result = its_readl(s, offset, data, attrs);
1415         break;
1416     case 8:
1417         result = its_readll(s, offset, data, attrs);
1418         break;
1419     default:
1420         result = false;
1421         break;
1422     }
1423 
1424     if (!result) {
1425         qemu_log_mask(LOG_GUEST_ERROR,
1426                       "%s: invalid guest read at offset " TARGET_FMT_plx
1427                       " size %u\n", __func__, offset, size);
1428         trace_gicv3_its_badread(offset, size);
1429         /*
1430          * The spec requires that reserved registers are RAZ/WI;
1431          * so use false returns from leaf functions as a way to
1432          * trigger the guest-error logging but don't return it to
1433          * the caller, or we'll cause a spurious guest data abort.
1434          */
1435         *data = 0;
1436     } else {
1437         trace_gicv3_its_read(offset, *data, size);
1438     }
1439     return MEMTX_OK;
1440 }
1441 
1442 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1443                                    unsigned size, MemTxAttrs attrs)
1444 {
1445     GICv3ITSState *s = (GICv3ITSState *)opaque;
1446     bool result;
1447 
1448     switch (size) {
1449     case 4:
1450         result = its_writel(s, offset, data, attrs);
1451         break;
1452     case 8:
1453         result = its_writell(s, offset, data, attrs);
1454         break;
1455     default:
1456         result = false;
1457         break;
1458     }
1459 
1460     if (!result) {
1461         qemu_log_mask(LOG_GUEST_ERROR,
1462                       "%s: invalid guest write at offset " TARGET_FMT_plx
1463                       " size %u\n", __func__, offset, size);
1464         trace_gicv3_its_badwrite(offset, data, size);
1465         /*
1466          * The spec requires that reserved registers are RAZ/WI;
1467          * so use false returns from leaf functions as a way to
1468          * trigger the guest-error logging but don't return it to
1469          * the caller, or we'll cause a spurious guest data abort.
1470          */
1471     } else {
1472         trace_gicv3_its_write(offset, data, size);
1473     }
1474     return MEMTX_OK;
1475 }
1476 
1477 static const MemoryRegionOps gicv3_its_control_ops = {
1478     .read_with_attrs = gicv3_its_read,
1479     .write_with_attrs = gicv3_its_write,
1480     .valid.min_access_size = 4,
1481     .valid.max_access_size = 8,
1482     .impl.min_access_size = 4,
1483     .impl.max_access_size = 8,
1484     .endianness = DEVICE_NATIVE_ENDIAN,
1485 };
1486 
1487 static const MemoryRegionOps gicv3_its_translation_ops = {
1488     .read_with_attrs = gicv3_its_translation_read,
1489     .write_with_attrs = gicv3_its_translation_write,
1490     .valid.min_access_size = 2,
1491     .valid.max_access_size = 4,
1492     .impl.min_access_size = 2,
1493     .impl.max_access_size = 4,
1494     .endianness = DEVICE_NATIVE_ENDIAN,
1495 };
1496 
1497 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1498 {
1499     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1500     int i;
1501 
1502     for (i = 0; i < s->gicv3->num_cpu; i++) {
1503         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1504             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1505             return;
1506         }
1507     }
1508 
1509     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1510 
1511     /* set the ITS default features supported */
1512     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1513     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1514                           ITS_ITT_ENTRY_SIZE - 1);
1515     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1516     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1517     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1518     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1519 }
1520 
1521 static void gicv3_its_reset(DeviceState *dev)
1522 {
1523     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1524     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1525 
1526     c->parent_reset(dev);
1527 
1528     /* Quiescent bit reset to 1 */
1529     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1530 
1531     /*
1532      * setting GITS_BASER0.Type = 0b001 (Device)
1533      *         GITS_BASER1.Type = 0b100 (Collection Table)
1534      *         GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later
1535      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1536      *         GITS_BASER<0,1>.Page_Size = 64KB
1537      * and default translation table entry size to 16 bytes
1538      */
1539     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1540                              GITS_BASER_TYPE_DEVICE);
1541     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1542                              GITS_BASER_PAGESIZE_64K);
1543     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1544                              GITS_DTE_SIZE - 1);
1545 
1546     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1547                              GITS_BASER_TYPE_COLLECTION);
1548     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1549                              GITS_BASER_PAGESIZE_64K);
1550     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1551                              GITS_CTE_SIZE - 1);
1552 
1553     if (its_feature_virtual(s)) {
1554         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE,
1555                                  GITS_BASER_TYPE_VPE);
1556         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE,
1557                                  GITS_BASER_PAGESIZE_64K);
1558         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE,
1559                                  GITS_VPE_SIZE - 1);
1560     }
1561 }
1562 
1563 static void gicv3_its_post_load(GICv3ITSState *s)
1564 {
1565     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1566         extract_table_params(s);
1567         extract_cmdq_params(s);
1568     }
1569 }
1570 
1571 static Property gicv3_its_props[] = {
1572     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1573                      GICv3State *),
1574     DEFINE_PROP_END_OF_LIST(),
1575 };
1576 
1577 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1578 {
1579     DeviceClass *dc = DEVICE_CLASS(klass);
1580     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1581     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1582 
1583     dc->realize = gicv3_arm_its_realize;
1584     device_class_set_props(dc, gicv3_its_props);
1585     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1586     icc->post_load = gicv3_its_post_load;
1587 }
1588 
1589 static const TypeInfo gicv3_its_info = {
1590     .name = TYPE_ARM_GICV3_ITS,
1591     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1592     .instance_size = sizeof(GICv3ITSState),
1593     .class_init = gicv3_its_class_init,
1594     .class_size = sizeof(GICv3ITSClass),
1595 };
1596 
1597 static void gicv3_its_register_types(void)
1598 {
1599     type_register_static(&gicv3_its_info);
1600 }
1601 
1602 type_init(gicv3_its_register_types)
1603