xref: /openbmc/qemu/hw/intc/arm_gicv3_its.c (revision 0cdf7a5dc8d4e49b19d91219dc3e3cc65d6d8c60)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22 
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27 
28 struct GICv3ITSClass {
29     GICv3ITSCommonClass parent_class;
30     void (*parent_reset)(DeviceState *dev);
31 };
32 
33 /*
34  * This is an internal enum used to distinguish between LPI triggered
35  * via command queue and LPI triggered via gits_translater write.
36  */
37 typedef enum ItsCmdType {
38     NONE = 0, /* internal indication for GITS_TRANSLATER write */
39     CLEAR = 1,
40     DISCARD = 2,
41     INTERRUPT = 3,
42 } ItsCmdType;
43 
44 typedef struct DTEntry {
45     bool valid;
46     unsigned size;
47     uint64_t ittaddr;
48 } DTEntry;
49 
50 typedef struct CTEntry {
51     bool valid;
52     uint32_t rdbase;
53 } CTEntry;
54 
55 typedef struct ITEntry {
56     bool valid;
57     int inttype;
58     uint32_t intid;
59     uint32_t doorbell;
60     uint32_t icid;
61     uint32_t vpeid;
62 } ITEntry;
63 
64 typedef struct VTEntry {
65     bool valid;
66     unsigned vptsize;
67     uint32_t rdbase;
68     uint64_t vptaddr;
69 } VTEntry;
70 
71 /*
72  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
73  * if a command parameter is not correct. These include both "stall
74  * processing of the command queue" and "ignore this command, and
75  * keep processing the queue". In our implementation we choose that
76  * memory transaction errors reading the command packet provoke a
77  * stall, but errors in parameters cause us to ignore the command
78  * and continue processing.
79  * The process_* functions which handle individual ITS commands all
80  * return an ItsCmdResult which tells process_cmdq() whether it should
81  * stall or keep going.
82  */
83 typedef enum ItsCmdResult {
84     CMD_STALL = 0,
85     CMD_CONTINUE = 1,
86 } ItsCmdResult;
87 
88 /* True if the ITS supports the GICv4 virtual LPI feature */
89 static bool its_feature_virtual(GICv3ITSState *s)
90 {
91     return s->typer & R_GITS_TYPER_VIRTUAL_MASK;
92 }
93 
94 static inline bool intid_in_lpi_range(uint32_t id)
95 {
96     return id >= GICV3_LPI_INTID_START &&
97         id < (1 << (GICD_TYPER_IDBITS + 1));
98 }
99 
100 static inline bool valid_doorbell(uint32_t id)
101 {
102     /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */
103     return id == INTID_SPURIOUS || intid_in_lpi_range(id);
104 }
105 
106 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
107 {
108     uint64_t result = 0;
109 
110     switch (page_sz) {
111     case GITS_PAGE_SIZE_4K:
112     case GITS_PAGE_SIZE_16K:
113         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
114         break;
115 
116     case GITS_PAGE_SIZE_64K:
117         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
118         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
119         break;
120 
121     default:
122         break;
123     }
124     return result;
125 }
126 
127 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
128                                  uint32_t idx, MemTxResult *res)
129 {
130     /*
131      * Given a TableDesc describing one of the ITS in-guest-memory
132      * tables and an index into it, return the guest address
133      * corresponding to that table entry.
134      * If there was a memory error reading the L1 table of an
135      * indirect table, *res is set accordingly, and we return -1.
136      * If the L1 table entry is marked not valid, we return -1 with
137      * *res set to MEMTX_OK.
138      *
139      * The specification defines the format of level 1 entries of a
140      * 2-level table, but the format of level 2 entries and the format
141      * of flat-mapped tables is IMPDEF.
142      */
143     AddressSpace *as = &s->gicv3->dma_as;
144     uint32_t l2idx;
145     uint64_t l2;
146     uint32_t num_l2_entries;
147 
148     *res = MEMTX_OK;
149 
150     if (!td->indirect) {
151         /* Single level table */
152         return td->base_addr + idx * td->entry_sz;
153     }
154 
155     /* Two level table */
156     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
157 
158     l2 = address_space_ldq_le(as,
159                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
160                               MEMTXATTRS_UNSPECIFIED, res);
161     if (*res != MEMTX_OK) {
162         return -1;
163     }
164     if (!(l2 & L2_TABLE_VALID_MASK)) {
165         return -1;
166     }
167 
168     num_l2_entries = td->page_sz / td->entry_sz;
169     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
170 }
171 
172 /*
173  * Read the Collection Table entry at index @icid. On success (including
174  * successfully determining that there is no valid CTE for this index),
175  * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
176  * If there is an error reading memory then we return the error code.
177  */
178 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
179 {
180     AddressSpace *as = &s->gicv3->dma_as;
181     MemTxResult res = MEMTX_OK;
182     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
183     uint64_t cteval;
184 
185     if (entry_addr == -1) {
186         /* No L2 table entry, i.e. no valid CTE, or a memory error */
187         cte->valid = false;
188         goto out;
189     }
190 
191     cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
192     if (res != MEMTX_OK) {
193         goto out;
194     }
195     cte->valid = FIELD_EX64(cteval, CTE, VALID);
196     cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
197 out:
198     if (res != MEMTX_OK) {
199         trace_gicv3_its_cte_read_fault(icid);
200     } else {
201         trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
202     }
203     return res;
204 }
205 
206 /*
207  * Update the Interrupt Table entry at index @evinted in the table specified
208  * by the dte @dte. Returns true on success, false if there was a memory
209  * access error.
210  */
211 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
212                        const ITEntry *ite)
213 {
214     AddressSpace *as = &s->gicv3->dma_as;
215     MemTxResult res = MEMTX_OK;
216     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
217     uint64_t itel = 0;
218     uint32_t iteh = 0;
219 
220     trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
221                               ite->inttype, ite->intid, ite->icid,
222                               ite->vpeid, ite->doorbell);
223 
224     if (ite->valid) {
225         itel = FIELD_DP64(itel, ITE_L, VALID, 1);
226         itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
227         itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
228         itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
229         itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
230         iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
231     }
232 
233     address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
234     if (res != MEMTX_OK) {
235         return false;
236     }
237     address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
238     return res == MEMTX_OK;
239 }
240 
241 /*
242  * Read the Interrupt Table entry at index @eventid from the table specified
243  * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
244  * struct @ite accordingly. If there is an error reading memory then we return
245  * the error code.
246  */
247 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
248                            const DTEntry *dte, ITEntry *ite)
249 {
250     AddressSpace *as = &s->gicv3->dma_as;
251     MemTxResult res = MEMTX_OK;
252     uint64_t itel;
253     uint32_t iteh;
254     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
255 
256     itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
257     if (res != MEMTX_OK) {
258         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
259         return res;
260     }
261 
262     iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
263     if (res != MEMTX_OK) {
264         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
265         return res;
266     }
267 
268     ite->valid = FIELD_EX64(itel, ITE_L, VALID);
269     ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
270     ite->intid = FIELD_EX64(itel, ITE_L, INTID);
271     ite->icid = FIELD_EX64(itel, ITE_L, ICID);
272     ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
273     ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
274     trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
275                              ite->inttype, ite->intid, ite->icid,
276                              ite->vpeid, ite->doorbell);
277     return MEMTX_OK;
278 }
279 
280 /*
281  * Read the Device Table entry at index @devid. On success (including
282  * successfully determining that there is no valid DTE for this index),
283  * we return MEMTX_OK and populate the DTEntry struct accordingly.
284  * If there is an error reading memory then we return the error code.
285  */
286 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
287 {
288     MemTxResult res = MEMTX_OK;
289     AddressSpace *as = &s->gicv3->dma_as;
290     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
291     uint64_t dteval;
292 
293     if (entry_addr == -1) {
294         /* No L2 table entry, i.e. no valid DTE, or a memory error */
295         dte->valid = false;
296         goto out;
297     }
298     dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
299     if (res != MEMTX_OK) {
300         goto out;
301     }
302     dte->valid = FIELD_EX64(dteval, DTE, VALID);
303     dte->size = FIELD_EX64(dteval, DTE, SIZE);
304     /* DTE word field stores bits [51:8] of the ITT address */
305     dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
306 out:
307     if (res != MEMTX_OK) {
308         trace_gicv3_its_dte_read_fault(devid);
309     } else {
310         trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
311     }
312     return res;
313 }
314 
315 /*
316  * This function handles the processing of following commands based on
317  * the ItsCmdType parameter passed:-
318  * 1. triggering of lpi interrupt translation via ITS INT command
319  * 2. triggering of lpi interrupt translation via gits_translater register
320  * 3. handling of ITS CLEAR command
321  * 4. handling of ITS DISCARD command
322  */
323 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
324                                        uint32_t eventid, ItsCmdType cmd)
325 {
326     uint64_t num_eventids;
327     DTEntry dte;
328     CTEntry cte;
329     ITEntry ite;
330 
331     if (devid >= s->dt.num_entries) {
332         qemu_log_mask(LOG_GUEST_ERROR,
333                       "%s: invalid command attributes: devid %d>=%d",
334                       __func__, devid, s->dt.num_entries);
335         return CMD_CONTINUE;
336     }
337 
338     if (get_dte(s, devid, &dte) != MEMTX_OK) {
339         return CMD_STALL;
340     }
341     if (!dte.valid) {
342         qemu_log_mask(LOG_GUEST_ERROR,
343                       "%s: invalid command attributes: "
344                       "invalid dte for %d\n", __func__, devid);
345         return CMD_CONTINUE;
346     }
347 
348     num_eventids = 1ULL << (dte.size + 1);
349     if (eventid >= num_eventids) {
350         qemu_log_mask(LOG_GUEST_ERROR,
351                       "%s: invalid command attributes: eventid %d >= %"
352                       PRId64 "\n",
353                       __func__, eventid, num_eventids);
354         return CMD_CONTINUE;
355     }
356 
357     if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
358         return CMD_STALL;
359     }
360 
361     if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
362         qemu_log_mask(LOG_GUEST_ERROR,
363                       "%s: invalid command attributes: invalid ITE\n",
364                       __func__);
365         return CMD_CONTINUE;
366     }
367 
368     if (ite.icid >= s->ct.num_entries) {
369         qemu_log_mask(LOG_GUEST_ERROR,
370                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
371                       __func__, ite.icid);
372         return CMD_CONTINUE;
373     }
374 
375     if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
376         return CMD_STALL;
377     }
378     if (!cte.valid) {
379         qemu_log_mask(LOG_GUEST_ERROR,
380                       "%s: invalid command attributes: invalid CTE\n",
381                       __func__);
382         return CMD_CONTINUE;
383     }
384 
385     /*
386      * Current implementation only supports rdbase == procnum
387      * Hence rdbase physical address is ignored
388      */
389     if (cte.rdbase >= s->gicv3->num_cpu) {
390         return CMD_CONTINUE;
391     }
392 
393     if ((cmd == CLEAR) || (cmd == DISCARD)) {
394         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
395     } else {
396         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
397     }
398 
399     if (cmd == DISCARD) {
400         ITEntry ite = {};
401         /* remove mapping from interrupt translation table */
402         ite.valid = false;
403         return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
404     }
405     return CMD_CONTINUE;
406 }
407 
408 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
409                                     ItsCmdType cmd)
410 {
411     uint32_t devid, eventid;
412 
413     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
414     eventid = cmdpkt[1] & EVENTID_MASK;
415     switch (cmd) {
416     case INTERRUPT:
417         trace_gicv3_its_cmd_int(devid, eventid);
418         break;
419     case CLEAR:
420         trace_gicv3_its_cmd_clear(devid, eventid);
421         break;
422     case DISCARD:
423         trace_gicv3_its_cmd_discard(devid, eventid);
424         break;
425     default:
426         g_assert_not_reached();
427     }
428     return do_process_its_cmd(s, devid, eventid, cmd);
429 }
430 
431 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
432                                   bool ignore_pInt)
433 {
434     uint32_t devid, eventid;
435     uint32_t pIntid = 0;
436     uint64_t num_eventids;
437     uint16_t icid = 0;
438     DTEntry dte;
439     ITEntry ite;
440 
441     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
442     eventid = cmdpkt[1] & EVENTID_MASK;
443     icid = cmdpkt[2] & ICID_MASK;
444 
445     if (ignore_pInt) {
446         pIntid = eventid;
447         trace_gicv3_its_cmd_mapi(devid, eventid, icid);
448     } else {
449         pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
450         trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
451     }
452 
453     if (devid >= s->dt.num_entries) {
454         qemu_log_mask(LOG_GUEST_ERROR,
455                       "%s: invalid command attributes: devid %d>=%d",
456                       __func__, devid, s->dt.num_entries);
457         return CMD_CONTINUE;
458     }
459 
460     if (get_dte(s, devid, &dte) != MEMTX_OK) {
461         return CMD_STALL;
462     }
463     num_eventids = 1ULL << (dte.size + 1);
464 
465     if (icid >= s->ct.num_entries) {
466         qemu_log_mask(LOG_GUEST_ERROR,
467                       "%s: invalid ICID 0x%x >= 0x%x\n",
468                       __func__, icid, s->ct.num_entries);
469         return CMD_CONTINUE;
470     }
471 
472     if (!dte.valid) {
473         qemu_log_mask(LOG_GUEST_ERROR,
474                       "%s: no valid DTE for devid 0x%x\n", __func__, devid);
475         return CMD_CONTINUE;
476     }
477 
478     if (eventid >= num_eventids) {
479         qemu_log_mask(LOG_GUEST_ERROR,
480                       "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
481                       __func__, eventid, num_eventids);
482         return CMD_CONTINUE;
483     }
484 
485     if (!intid_in_lpi_range(pIntid)) {
486         qemu_log_mask(LOG_GUEST_ERROR,
487                       "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
488         return CMD_CONTINUE;
489     }
490 
491     /* add ite entry to interrupt translation table */
492     ite.valid = true;
493     ite.inttype = ITE_INTTYPE_PHYSICAL;
494     ite.intid = pIntid;
495     ite.icid = icid;
496     ite.doorbell = INTID_SPURIOUS;
497     ite.vpeid = 0;
498     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
499 }
500 
501 static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt,
502                                    bool ignore_vintid)
503 {
504     uint32_t devid, eventid, vintid, doorbell, vpeid;
505     uint32_t num_eventids;
506     DTEntry dte;
507     ITEntry ite;
508 
509     if (!its_feature_virtual(s)) {
510         return CMD_CONTINUE;
511     }
512 
513     devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID);
514     eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID);
515     vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID);
516     doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL);
517     if (ignore_vintid) {
518         vintid = eventid;
519         trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell);
520     } else {
521         vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID);
522         trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell);
523     }
524 
525     if (devid >= s->dt.num_entries) {
526         qemu_log_mask(LOG_GUEST_ERROR,
527                       "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n",
528                       __func__, devid, s->dt.num_entries);
529         return CMD_CONTINUE;
530     }
531 
532     if (get_dte(s, devid, &dte) != MEMTX_OK) {
533         return CMD_STALL;
534     }
535 
536     if (!dte.valid) {
537         qemu_log_mask(LOG_GUEST_ERROR,
538                       "%s: no entry in device table for DeviceID 0x%x\n",
539                       __func__, devid);
540         return CMD_CONTINUE;
541     }
542 
543     num_eventids = 1ULL << (dte.size + 1);
544 
545     if (eventid >= num_eventids) {
546         qemu_log_mask(LOG_GUEST_ERROR,
547                       "%s: EventID 0x%x too large for DeviceID 0x%x "
548                       "(must be less than 0x%x)\n",
549                       __func__, eventid, devid, num_eventids);
550         return CMD_CONTINUE;
551     }
552     if (!intid_in_lpi_range(vintid)) {
553         qemu_log_mask(LOG_GUEST_ERROR,
554                       "%s: VIntID 0x%x not a valid LPI\n",
555                       __func__, vintid);
556         return CMD_CONTINUE;
557     }
558     if (!valid_doorbell(doorbell)) {
559         qemu_log_mask(LOG_GUEST_ERROR,
560                       "%s: Doorbell %d not 1023 and not a valid LPI\n",
561                       __func__, doorbell);
562         return CMD_CONTINUE;
563     }
564     if (vpeid >= s->vpet.num_entries) {
565         qemu_log_mask(LOG_GUEST_ERROR,
566                       "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
567                       __func__, vpeid, s->vpet.num_entries);
568         return CMD_CONTINUE;
569     }
570     /* add ite entry to interrupt translation table */
571     ite.valid = true;
572     ite.inttype = ITE_INTTYPE_VIRTUAL;
573     ite.intid = vintid;
574     ite.icid = 0;
575     ite.doorbell = doorbell;
576     ite.vpeid = vpeid;
577     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
578 }
579 
580 /*
581  * Update the Collection Table entry for @icid to @cte. Returns true
582  * on success, false if there was a memory access error.
583  */
584 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
585 {
586     AddressSpace *as = &s->gicv3->dma_as;
587     uint64_t entry_addr;
588     uint64_t cteval = 0;
589     MemTxResult res = MEMTX_OK;
590 
591     trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
592 
593     if (cte->valid) {
594         /* add mapping entry to collection table */
595         cteval = FIELD_DP64(cteval, CTE, VALID, 1);
596         cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
597     }
598 
599     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
600     if (res != MEMTX_OK) {
601         /* memory access error: stall */
602         return false;
603     }
604     if (entry_addr == -1) {
605         /* No L2 table for this index: discard write and continue */
606         return true;
607     }
608 
609     address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
610     return res == MEMTX_OK;
611 }
612 
613 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
614 {
615     uint16_t icid;
616     CTEntry cte;
617 
618     icid = cmdpkt[2] & ICID_MASK;
619     cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
620     if (cte.valid) {
621         cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
622         cte.rdbase &= RDBASE_PROCNUM_MASK;
623     } else {
624         cte.rdbase = 0;
625     }
626     trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
627 
628     if (icid >= s->ct.num_entries) {
629         qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
630         return CMD_CONTINUE;
631     }
632     if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
633         qemu_log_mask(LOG_GUEST_ERROR,
634                       "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
635         return CMD_CONTINUE;
636     }
637 
638     return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
639 }
640 
641 /*
642  * Update the Device Table entry for @devid to @dte. Returns true
643  * on success, false if there was a memory access error.
644  */
645 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
646 {
647     AddressSpace *as = &s->gicv3->dma_as;
648     uint64_t entry_addr;
649     uint64_t dteval = 0;
650     MemTxResult res = MEMTX_OK;
651 
652     trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
653 
654     if (dte->valid) {
655         /* add mapping entry to device table */
656         dteval = FIELD_DP64(dteval, DTE, VALID, 1);
657         dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
658         dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
659     }
660 
661     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
662     if (res != MEMTX_OK) {
663         /* memory access error: stall */
664         return false;
665     }
666     if (entry_addr == -1) {
667         /* No L2 table for this index: discard write and continue */
668         return true;
669     }
670     address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
671     return res == MEMTX_OK;
672 }
673 
674 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
675 {
676     uint32_t devid;
677     DTEntry dte;
678 
679     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
680     dte.size = cmdpkt[1] & SIZE_MASK;
681     dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
682     dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
683 
684     trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
685 
686     if (devid >= s->dt.num_entries) {
687         qemu_log_mask(LOG_GUEST_ERROR,
688                       "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
689                       devid, s->dt.num_entries);
690         return CMD_CONTINUE;
691     }
692 
693     if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
694         qemu_log_mask(LOG_GUEST_ERROR,
695                       "ITS MAPD: invalid size %d\n", dte.size);
696         return CMD_CONTINUE;
697     }
698 
699     return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
700 }
701 
702 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
703 {
704     uint64_t rd1, rd2;
705 
706     rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
707     rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
708 
709     trace_gicv3_its_cmd_movall(rd1, rd2);
710 
711     if (rd1 >= s->gicv3->num_cpu) {
712         qemu_log_mask(LOG_GUEST_ERROR,
713                       "%s: RDBASE1 %" PRId64
714                       " out of range (must be less than %d)\n",
715                       __func__, rd1, s->gicv3->num_cpu);
716         return CMD_CONTINUE;
717     }
718     if (rd2 >= s->gicv3->num_cpu) {
719         qemu_log_mask(LOG_GUEST_ERROR,
720                       "%s: RDBASE2 %" PRId64
721                       " out of range (must be less than %d)\n",
722                       __func__, rd2, s->gicv3->num_cpu);
723         return CMD_CONTINUE;
724     }
725 
726     if (rd1 == rd2) {
727         /* Move to same target must succeed as a no-op */
728         return CMD_CONTINUE;
729     }
730 
731     /* Move all pending LPIs from redistributor 1 to redistributor 2 */
732     gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
733 
734     return CMD_CONTINUE;
735 }
736 
737 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
738 {
739     uint32_t devid, eventid;
740     uint16_t new_icid;
741     uint64_t num_eventids;
742     DTEntry dte;
743     CTEntry old_cte, new_cte;
744     ITEntry old_ite;
745 
746     devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
747     eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
748     new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
749 
750     trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
751 
752     if (devid >= s->dt.num_entries) {
753         qemu_log_mask(LOG_GUEST_ERROR,
754                       "%s: invalid command attributes: devid %d>=%d",
755                       __func__, devid, s->dt.num_entries);
756         return CMD_CONTINUE;
757     }
758     if (get_dte(s, devid, &dte) != MEMTX_OK) {
759         return CMD_STALL;
760     }
761 
762     if (!dte.valid) {
763         qemu_log_mask(LOG_GUEST_ERROR,
764                       "%s: invalid command attributes: "
765                       "invalid dte for %d\n", __func__, devid);
766         return CMD_CONTINUE;
767     }
768 
769     num_eventids = 1ULL << (dte.size + 1);
770     if (eventid >= num_eventids) {
771         qemu_log_mask(LOG_GUEST_ERROR,
772                       "%s: invalid command attributes: eventid %d >= %"
773                       PRId64 "\n",
774                       __func__, eventid, num_eventids);
775         return CMD_CONTINUE;
776     }
777 
778     if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
779         return CMD_STALL;
780     }
781 
782     if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
783         qemu_log_mask(LOG_GUEST_ERROR,
784                       "%s: invalid command attributes: invalid ITE\n",
785                       __func__);
786         return CMD_CONTINUE;
787     }
788 
789     if (old_ite.icid >= s->ct.num_entries) {
790         qemu_log_mask(LOG_GUEST_ERROR,
791                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
792                       __func__, old_ite.icid);
793         return CMD_CONTINUE;
794     }
795 
796     if (new_icid >= s->ct.num_entries) {
797         qemu_log_mask(LOG_GUEST_ERROR,
798                       "%s: invalid command attributes: ICID 0x%x\n",
799                       __func__, new_icid);
800         return CMD_CONTINUE;
801     }
802 
803     if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
804         return CMD_STALL;
805     }
806     if (!old_cte.valid) {
807         qemu_log_mask(LOG_GUEST_ERROR,
808                       "%s: invalid command attributes: "
809                       "invalid CTE for old ICID 0x%x\n",
810                       __func__, old_ite.icid);
811         return CMD_CONTINUE;
812     }
813 
814     if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
815         return CMD_STALL;
816     }
817     if (!new_cte.valid) {
818         qemu_log_mask(LOG_GUEST_ERROR,
819                       "%s: invalid command attributes: "
820                       "invalid CTE for new ICID 0x%x\n",
821                       __func__, new_icid);
822         return CMD_CONTINUE;
823     }
824 
825     if (old_cte.rdbase >= s->gicv3->num_cpu) {
826         qemu_log_mask(LOG_GUEST_ERROR,
827                       "%s: CTE has invalid rdbase 0x%x\n",
828                       __func__, old_cte.rdbase);
829         return CMD_CONTINUE;
830     }
831 
832     if (new_cte.rdbase >= s->gicv3->num_cpu) {
833         qemu_log_mask(LOG_GUEST_ERROR,
834                       "%s: CTE has invalid rdbase 0x%x\n",
835                       __func__, new_cte.rdbase);
836         return CMD_CONTINUE;
837     }
838 
839     if (old_cte.rdbase != new_cte.rdbase) {
840         /* Move the LPI from the old redistributor to the new one */
841         gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
842                              &s->gicv3->cpu[new_cte.rdbase],
843                              old_ite.intid);
844     }
845 
846     /* Update the ICID field in the interrupt translation table entry */
847     old_ite.icid = new_icid;
848     return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
849 }
850 
851 /*
852  * Update the vPE Table entry at index @vpeid with the entry @vte.
853  * Returns true on success, false if there was a memory access error.
854  */
855 static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte)
856 {
857     AddressSpace *as = &s->gicv3->dma_as;
858     uint64_t entry_addr;
859     uint64_t vteval = 0;
860     MemTxResult res = MEMTX_OK;
861 
862     trace_gicv3_its_vte_write(vpeid, vte->valid, vte->vptsize, vte->vptaddr,
863                               vte->rdbase);
864 
865     if (vte->valid) {
866         vteval = FIELD_DP64(vteval, VTE, VALID, 1);
867         vteval = FIELD_DP64(vteval, VTE, VPTSIZE, vte->vptsize);
868         vteval = FIELD_DP64(vteval, VTE, VPTADDR, vte->vptaddr);
869         vteval = FIELD_DP64(vteval, VTE, RDBASE, vte->rdbase);
870     }
871 
872     entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res);
873     if (res != MEMTX_OK) {
874         return false;
875     }
876     if (entry_addr == -1) {
877         /* No L2 table for this index: discard write and continue */
878         return true;
879     }
880     address_space_stq_le(as, entry_addr, vteval, MEMTXATTRS_UNSPECIFIED, &res);
881     return res == MEMTX_OK;
882 }
883 
884 static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt)
885 {
886     VTEntry vte;
887     uint32_t vpeid;
888 
889     if (!its_feature_virtual(s)) {
890         return CMD_CONTINUE;
891     }
892 
893     vpeid = FIELD_EX64(cmdpkt[1], VMAPP_1, VPEID);
894     vte.rdbase = FIELD_EX64(cmdpkt[2], VMAPP_2, RDBASE);
895     vte.valid = FIELD_EX64(cmdpkt[2], VMAPP_2, V);
896     vte.vptsize = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTSIZE);
897     vte.vptaddr = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTADDR);
898 
899     trace_gicv3_its_cmd_vmapp(vpeid, vte.rdbase, vte.valid,
900                               vte.vptaddr, vte.vptsize);
901 
902     /*
903      * For GICv4.0 the VPT_size field is only 5 bits, whereas we
904      * define our field macros to include the full GICv4.1 8 bits.
905      * The range check on VPT_size will catch the cases where
906      * the guest set the RES0-in-GICv4.0 bits [7:6].
907      */
908     if (vte.vptsize > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
909         qemu_log_mask(LOG_GUEST_ERROR,
910                       "%s: invalid VPT_size 0x%x\n", __func__, vte.vptsize);
911         return CMD_CONTINUE;
912     }
913 
914     if (vte.valid && vte.rdbase >= s->gicv3->num_cpu) {
915         qemu_log_mask(LOG_GUEST_ERROR,
916                       "%s: invalid rdbase 0x%x\n", __func__, vte.rdbase);
917         return CMD_CONTINUE;
918     }
919 
920     if (vpeid >= s->vpet.num_entries) {
921         qemu_log_mask(LOG_GUEST_ERROR,
922                       "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
923                       __func__, vpeid, s->vpet.num_entries);
924         return CMD_CONTINUE;
925     }
926 
927     return update_vte(s, vpeid, &vte) ? CMD_CONTINUE : CMD_STALL;
928 }
929 
930 /*
931  * Current implementation blocks until all
932  * commands are processed
933  */
934 static void process_cmdq(GICv3ITSState *s)
935 {
936     uint32_t wr_offset = 0;
937     uint32_t rd_offset = 0;
938     uint32_t cq_offset = 0;
939     AddressSpace *as = &s->gicv3->dma_as;
940     uint8_t cmd;
941     int i;
942 
943     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
944         return;
945     }
946 
947     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
948 
949     if (wr_offset >= s->cq.num_entries) {
950         qemu_log_mask(LOG_GUEST_ERROR,
951                       "%s: invalid write offset "
952                       "%d\n", __func__, wr_offset);
953         return;
954     }
955 
956     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
957 
958     if (rd_offset >= s->cq.num_entries) {
959         qemu_log_mask(LOG_GUEST_ERROR,
960                       "%s: invalid read offset "
961                       "%d\n", __func__, rd_offset);
962         return;
963     }
964 
965     while (wr_offset != rd_offset) {
966         ItsCmdResult result = CMD_CONTINUE;
967         void *hostmem;
968         hwaddr buflen;
969         uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
970 
971         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
972 
973         buflen = GITS_CMDQ_ENTRY_SIZE;
974         hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
975                                     &buflen, false, MEMTXATTRS_UNSPECIFIED);
976         if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
977             if (hostmem) {
978                 address_space_unmap(as, hostmem, buflen, false, 0);
979             }
980             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
981             qemu_log_mask(LOG_GUEST_ERROR,
982                           "%s: could not read command at 0x%" PRIx64 "\n",
983                           __func__, s->cq.base_addr + cq_offset);
984             break;
985         }
986         for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
987             cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
988         }
989         address_space_unmap(as, hostmem, buflen, false, 0);
990 
991         cmd = cmdpkt[0] & CMD_MASK;
992 
993         trace_gicv3_its_process_command(rd_offset, cmd);
994 
995         switch (cmd) {
996         case GITS_CMD_INT:
997             result = process_its_cmd(s, cmdpkt, INTERRUPT);
998             break;
999         case GITS_CMD_CLEAR:
1000             result = process_its_cmd(s, cmdpkt, CLEAR);
1001             break;
1002         case GITS_CMD_SYNC:
1003             /*
1004              * Current implementation makes a blocking synchronous call
1005              * for every command issued earlier, hence the internal state
1006              * is already consistent by the time SYNC command is executed.
1007              * Hence no further processing is required for SYNC command.
1008              */
1009             trace_gicv3_its_cmd_sync();
1010             break;
1011         case GITS_CMD_MAPD:
1012             result = process_mapd(s, cmdpkt);
1013             break;
1014         case GITS_CMD_MAPC:
1015             result = process_mapc(s, cmdpkt);
1016             break;
1017         case GITS_CMD_MAPTI:
1018             result = process_mapti(s, cmdpkt, false);
1019             break;
1020         case GITS_CMD_MAPI:
1021             result = process_mapti(s, cmdpkt, true);
1022             break;
1023         case GITS_CMD_DISCARD:
1024             result = process_its_cmd(s, cmdpkt, DISCARD);
1025             break;
1026         case GITS_CMD_INV:
1027         case GITS_CMD_INVALL:
1028             /*
1029              * Current implementation doesn't cache any ITS tables,
1030              * but the calculated lpi priority information. We only
1031              * need to trigger lpi priority re-calculation to be in
1032              * sync with LPI config table or pending table changes.
1033              */
1034             trace_gicv3_its_cmd_inv();
1035             for (i = 0; i < s->gicv3->num_cpu; i++) {
1036                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
1037             }
1038             break;
1039         case GITS_CMD_MOVI:
1040             result = process_movi(s, cmdpkt);
1041             break;
1042         case GITS_CMD_MOVALL:
1043             result = process_movall(s, cmdpkt);
1044             break;
1045         case GITS_CMD_VMAPTI:
1046             result = process_vmapti(s, cmdpkt, false);
1047             break;
1048         case GITS_CMD_VMAPI:
1049             result = process_vmapti(s, cmdpkt, true);
1050             break;
1051         case GITS_CMD_VMAPP:
1052             result = process_vmapp(s, cmdpkt);
1053             break;
1054         default:
1055             trace_gicv3_its_cmd_unknown(cmd);
1056             break;
1057         }
1058         if (result == CMD_CONTINUE) {
1059             rd_offset++;
1060             rd_offset %= s->cq.num_entries;
1061             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
1062         } else {
1063             /* CMD_STALL */
1064             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
1065             qemu_log_mask(LOG_GUEST_ERROR,
1066                           "%s: 0x%x cmd processing failed, stalling\n",
1067                           __func__, cmd);
1068             break;
1069         }
1070     }
1071 }
1072 
1073 /*
1074  * This function extracts the ITS Device and Collection table specific
1075  * parameters (like base_addr, size etc) from GITS_BASER register.
1076  * It is called during ITS enable and also during post_load migration
1077  */
1078 static void extract_table_params(GICv3ITSState *s)
1079 {
1080     uint16_t num_pages = 0;
1081     uint8_t  page_sz_type;
1082     uint8_t type;
1083     uint32_t page_sz = 0;
1084     uint64_t value;
1085 
1086     for (int i = 0; i < 8; i++) {
1087         TableDesc *td;
1088         int idbits;
1089 
1090         value = s->baser[i];
1091 
1092         if (!value) {
1093             continue;
1094         }
1095 
1096         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
1097 
1098         switch (page_sz_type) {
1099         case 0:
1100             page_sz = GITS_PAGE_SIZE_4K;
1101             break;
1102 
1103         case 1:
1104             page_sz = GITS_PAGE_SIZE_16K;
1105             break;
1106 
1107         case 2:
1108         case 3:
1109             page_sz = GITS_PAGE_SIZE_64K;
1110             break;
1111 
1112         default:
1113             g_assert_not_reached();
1114         }
1115 
1116         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
1117 
1118         type = FIELD_EX64(value, GITS_BASER, TYPE);
1119 
1120         switch (type) {
1121         case GITS_BASER_TYPE_DEVICE:
1122             td = &s->dt;
1123             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
1124             break;
1125         case GITS_BASER_TYPE_COLLECTION:
1126             td = &s->ct;
1127             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
1128                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
1129             } else {
1130                 /* 16-bit CollectionId supported when CIL == 0 */
1131                 idbits = 16;
1132             }
1133             break;
1134         case GITS_BASER_TYPE_VPE:
1135             td = &s->vpet;
1136             /*
1137              * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an
1138              * implementation to implement fewer bits and report this
1139              * via GICD_TYPER2.)
1140              */
1141             idbits = 16;
1142             break;
1143         default:
1144             /*
1145              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
1146              * ensures we will only see type values corresponding to
1147              * the values set up in gicv3_its_reset().
1148              */
1149             g_assert_not_reached();
1150         }
1151 
1152         memset(td, 0, sizeof(*td));
1153         /*
1154          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
1155          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
1156          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
1157          * for the register corresponding to the Collection table but we
1158          * still have to process interrupts using non-memory-backed
1159          * Collection table entries.)
1160          * The specification makes it UNPREDICTABLE to enable the ITS without
1161          * marking each BASER<n> as valid. We choose to handle these as if
1162          * the table was zero-sized, so commands using the table will fail
1163          * and interrupts requested via GITS_TRANSLATER writes will be ignored.
1164          * This happens automatically by leaving the num_entries field at
1165          * zero, which will be caught by the bounds checks we have before
1166          * every table lookup anyway.
1167          */
1168         if (!FIELD_EX64(value, GITS_BASER, VALID)) {
1169             continue;
1170         }
1171         td->page_sz = page_sz;
1172         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
1173         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
1174         td->base_addr = baser_base_addr(value, page_sz);
1175         if (!td->indirect) {
1176             td->num_entries = (num_pages * page_sz) / td->entry_sz;
1177         } else {
1178             td->num_entries = (((num_pages * page_sz) /
1179                                   L1TABLE_ENTRY_SIZE) *
1180                                  (page_sz / td->entry_sz));
1181         }
1182         td->num_entries = MIN(td->num_entries, 1ULL << idbits);
1183     }
1184 }
1185 
1186 static void extract_cmdq_params(GICv3ITSState *s)
1187 {
1188     uint16_t num_pages = 0;
1189     uint64_t value = s->cbaser;
1190 
1191     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
1192 
1193     memset(&s->cq, 0 , sizeof(s->cq));
1194 
1195     if (FIELD_EX64(value, GITS_CBASER, VALID)) {
1196         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
1197                              GITS_CMDQ_ENTRY_SIZE;
1198         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
1199         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
1200     }
1201 }
1202 
1203 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
1204                                               uint64_t *data, unsigned size,
1205                                               MemTxAttrs attrs)
1206 {
1207     /*
1208      * GITS_TRANSLATER is write-only, and all other addresses
1209      * in the interrupt translation space frame are RES0.
1210      */
1211     *data = 0;
1212     return MEMTX_OK;
1213 }
1214 
1215 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
1216                                                uint64_t data, unsigned size,
1217                                                MemTxAttrs attrs)
1218 {
1219     GICv3ITSState *s = (GICv3ITSState *)opaque;
1220     bool result = true;
1221 
1222     trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
1223 
1224     switch (offset) {
1225     case GITS_TRANSLATER:
1226         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1227             result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1228         }
1229         break;
1230     default:
1231         break;
1232     }
1233 
1234     if (result) {
1235         return MEMTX_OK;
1236     } else {
1237         return MEMTX_ERROR;
1238     }
1239 }
1240 
1241 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1242                               uint64_t value, MemTxAttrs attrs)
1243 {
1244     bool result = true;
1245     int index;
1246 
1247     switch (offset) {
1248     case GITS_CTLR:
1249         if (value & R_GITS_CTLR_ENABLED_MASK) {
1250             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1251             extract_table_params(s);
1252             extract_cmdq_params(s);
1253             process_cmdq(s);
1254         } else {
1255             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1256         }
1257         break;
1258     case GITS_CBASER:
1259         /*
1260          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1261          *                 already enabled
1262          */
1263         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1264             s->cbaser = deposit64(s->cbaser, 0, 32, value);
1265             s->creadr = 0;
1266         }
1267         break;
1268     case GITS_CBASER + 4:
1269         /*
1270          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1271          *                 already enabled
1272          */
1273         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1274             s->cbaser = deposit64(s->cbaser, 32, 32, value);
1275             s->creadr = 0;
1276         }
1277         break;
1278     case GITS_CWRITER:
1279         s->cwriter = deposit64(s->cwriter, 0, 32,
1280                                (value & ~R_GITS_CWRITER_RETRY_MASK));
1281         if (s->cwriter != s->creadr) {
1282             process_cmdq(s);
1283         }
1284         break;
1285     case GITS_CWRITER + 4:
1286         s->cwriter = deposit64(s->cwriter, 32, 32, value);
1287         break;
1288     case GITS_CREADR:
1289         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1290             s->creadr = deposit64(s->creadr, 0, 32,
1291                                   (value & ~R_GITS_CREADR_STALLED_MASK));
1292         } else {
1293             /* RO register, ignore the write */
1294             qemu_log_mask(LOG_GUEST_ERROR,
1295                           "%s: invalid guest write to RO register at offset "
1296                           TARGET_FMT_plx "\n", __func__, offset);
1297         }
1298         break;
1299     case GITS_CREADR + 4:
1300         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1301             s->creadr = deposit64(s->creadr, 32, 32, value);
1302         } else {
1303             /* RO register, ignore the write */
1304             qemu_log_mask(LOG_GUEST_ERROR,
1305                           "%s: invalid guest write to RO register at offset "
1306                           TARGET_FMT_plx "\n", __func__, offset);
1307         }
1308         break;
1309     case GITS_BASER ... GITS_BASER + 0x3f:
1310         /*
1311          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1312          *                 already enabled
1313          */
1314         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1315             index = (offset - GITS_BASER) / 8;
1316 
1317             if (s->baser[index] == 0) {
1318                 /* Unimplemented GITS_BASERn: RAZ/WI */
1319                 break;
1320             }
1321             if (offset & 7) {
1322                 value <<= 32;
1323                 value &= ~GITS_BASER_RO_MASK;
1324                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1325                 s->baser[index] |= value;
1326             } else {
1327                 value &= ~GITS_BASER_RO_MASK;
1328                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1329                 s->baser[index] |= value;
1330             }
1331         }
1332         break;
1333     case GITS_IIDR:
1334     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1335         /* RO registers, ignore the write */
1336         qemu_log_mask(LOG_GUEST_ERROR,
1337                       "%s: invalid guest write to RO register at offset "
1338                       TARGET_FMT_plx "\n", __func__, offset);
1339         break;
1340     default:
1341         result = false;
1342         break;
1343     }
1344     return result;
1345 }
1346 
1347 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1348                              uint64_t *data, MemTxAttrs attrs)
1349 {
1350     bool result = true;
1351     int index;
1352 
1353     switch (offset) {
1354     case GITS_CTLR:
1355         *data = s->ctlr;
1356         break;
1357     case GITS_IIDR:
1358         *data = gicv3_iidr();
1359         break;
1360     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1361         /* ID registers */
1362         *data = gicv3_idreg(offset - GITS_IDREGS, GICV3_PIDR0_ITS);
1363         break;
1364     case GITS_TYPER:
1365         *data = extract64(s->typer, 0, 32);
1366         break;
1367     case GITS_TYPER + 4:
1368         *data = extract64(s->typer, 32, 32);
1369         break;
1370     case GITS_CBASER:
1371         *data = extract64(s->cbaser, 0, 32);
1372         break;
1373     case GITS_CBASER + 4:
1374         *data = extract64(s->cbaser, 32, 32);
1375         break;
1376     case GITS_CREADR:
1377         *data = extract64(s->creadr, 0, 32);
1378         break;
1379     case GITS_CREADR + 4:
1380         *data = extract64(s->creadr, 32, 32);
1381         break;
1382     case GITS_CWRITER:
1383         *data = extract64(s->cwriter, 0, 32);
1384         break;
1385     case GITS_CWRITER + 4:
1386         *data = extract64(s->cwriter, 32, 32);
1387         break;
1388     case GITS_BASER ... GITS_BASER + 0x3f:
1389         index = (offset - GITS_BASER) / 8;
1390         if (offset & 7) {
1391             *data = extract64(s->baser[index], 32, 32);
1392         } else {
1393             *data = extract64(s->baser[index], 0, 32);
1394         }
1395         break;
1396     default:
1397         result = false;
1398         break;
1399     }
1400     return result;
1401 }
1402 
1403 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1404                                uint64_t value, MemTxAttrs attrs)
1405 {
1406     bool result = true;
1407     int index;
1408 
1409     switch (offset) {
1410     case GITS_BASER ... GITS_BASER + 0x3f:
1411         /*
1412          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1413          *                 already enabled
1414          */
1415         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1416             index = (offset - GITS_BASER) / 8;
1417             if (s->baser[index] == 0) {
1418                 /* Unimplemented GITS_BASERn: RAZ/WI */
1419                 break;
1420             }
1421             s->baser[index] &= GITS_BASER_RO_MASK;
1422             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1423         }
1424         break;
1425     case GITS_CBASER:
1426         /*
1427          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1428          *                 already enabled
1429          */
1430         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1431             s->cbaser = value;
1432             s->creadr = 0;
1433         }
1434         break;
1435     case GITS_CWRITER:
1436         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1437         if (s->cwriter != s->creadr) {
1438             process_cmdq(s);
1439         }
1440         break;
1441     case GITS_CREADR:
1442         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1443             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1444         } else {
1445             /* RO register, ignore the write */
1446             qemu_log_mask(LOG_GUEST_ERROR,
1447                           "%s: invalid guest write to RO register at offset "
1448                           TARGET_FMT_plx "\n", __func__, offset);
1449         }
1450         break;
1451     case GITS_TYPER:
1452         /* RO registers, ignore the write */
1453         qemu_log_mask(LOG_GUEST_ERROR,
1454                       "%s: invalid guest write to RO register at offset "
1455                       TARGET_FMT_plx "\n", __func__, offset);
1456         break;
1457     default:
1458         result = false;
1459         break;
1460     }
1461     return result;
1462 }
1463 
1464 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1465                               uint64_t *data, MemTxAttrs attrs)
1466 {
1467     bool result = true;
1468     int index;
1469 
1470     switch (offset) {
1471     case GITS_TYPER:
1472         *data = s->typer;
1473         break;
1474     case GITS_BASER ... GITS_BASER + 0x3f:
1475         index = (offset - GITS_BASER) / 8;
1476         *data = s->baser[index];
1477         break;
1478     case GITS_CBASER:
1479         *data = s->cbaser;
1480         break;
1481     case GITS_CREADR:
1482         *data = s->creadr;
1483         break;
1484     case GITS_CWRITER:
1485         *data = s->cwriter;
1486         break;
1487     default:
1488         result = false;
1489         break;
1490     }
1491     return result;
1492 }
1493 
1494 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1495                                   unsigned size, MemTxAttrs attrs)
1496 {
1497     GICv3ITSState *s = (GICv3ITSState *)opaque;
1498     bool result;
1499 
1500     switch (size) {
1501     case 4:
1502         result = its_readl(s, offset, data, attrs);
1503         break;
1504     case 8:
1505         result = its_readll(s, offset, data, attrs);
1506         break;
1507     default:
1508         result = false;
1509         break;
1510     }
1511 
1512     if (!result) {
1513         qemu_log_mask(LOG_GUEST_ERROR,
1514                       "%s: invalid guest read at offset " TARGET_FMT_plx
1515                       " size %u\n", __func__, offset, size);
1516         trace_gicv3_its_badread(offset, size);
1517         /*
1518          * The spec requires that reserved registers are RAZ/WI;
1519          * so use false returns from leaf functions as a way to
1520          * trigger the guest-error logging but don't return it to
1521          * the caller, or we'll cause a spurious guest data abort.
1522          */
1523         *data = 0;
1524     } else {
1525         trace_gicv3_its_read(offset, *data, size);
1526     }
1527     return MEMTX_OK;
1528 }
1529 
1530 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1531                                    unsigned size, MemTxAttrs attrs)
1532 {
1533     GICv3ITSState *s = (GICv3ITSState *)opaque;
1534     bool result;
1535 
1536     switch (size) {
1537     case 4:
1538         result = its_writel(s, offset, data, attrs);
1539         break;
1540     case 8:
1541         result = its_writell(s, offset, data, attrs);
1542         break;
1543     default:
1544         result = false;
1545         break;
1546     }
1547 
1548     if (!result) {
1549         qemu_log_mask(LOG_GUEST_ERROR,
1550                       "%s: invalid guest write at offset " TARGET_FMT_plx
1551                       " size %u\n", __func__, offset, size);
1552         trace_gicv3_its_badwrite(offset, data, size);
1553         /*
1554          * The spec requires that reserved registers are RAZ/WI;
1555          * so use false returns from leaf functions as a way to
1556          * trigger the guest-error logging but don't return it to
1557          * the caller, or we'll cause a spurious guest data abort.
1558          */
1559     } else {
1560         trace_gicv3_its_write(offset, data, size);
1561     }
1562     return MEMTX_OK;
1563 }
1564 
1565 static const MemoryRegionOps gicv3_its_control_ops = {
1566     .read_with_attrs = gicv3_its_read,
1567     .write_with_attrs = gicv3_its_write,
1568     .valid.min_access_size = 4,
1569     .valid.max_access_size = 8,
1570     .impl.min_access_size = 4,
1571     .impl.max_access_size = 8,
1572     .endianness = DEVICE_NATIVE_ENDIAN,
1573 };
1574 
1575 static const MemoryRegionOps gicv3_its_translation_ops = {
1576     .read_with_attrs = gicv3_its_translation_read,
1577     .write_with_attrs = gicv3_its_translation_write,
1578     .valid.min_access_size = 2,
1579     .valid.max_access_size = 4,
1580     .impl.min_access_size = 2,
1581     .impl.max_access_size = 4,
1582     .endianness = DEVICE_NATIVE_ENDIAN,
1583 };
1584 
1585 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1586 {
1587     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1588     int i;
1589 
1590     for (i = 0; i < s->gicv3->num_cpu; i++) {
1591         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1592             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1593             return;
1594         }
1595     }
1596 
1597     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1598 
1599     /* set the ITS default features supported */
1600     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1601     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1602                           ITS_ITT_ENTRY_SIZE - 1);
1603     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1604     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1605     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1606     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1607 }
1608 
1609 static void gicv3_its_reset(DeviceState *dev)
1610 {
1611     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1612     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1613 
1614     c->parent_reset(dev);
1615 
1616     /* Quiescent bit reset to 1 */
1617     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1618 
1619     /*
1620      * setting GITS_BASER0.Type = 0b001 (Device)
1621      *         GITS_BASER1.Type = 0b100 (Collection Table)
1622      *         GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later
1623      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1624      *         GITS_BASER<0,1>.Page_Size = 64KB
1625      * and default translation table entry size to 16 bytes
1626      */
1627     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1628                              GITS_BASER_TYPE_DEVICE);
1629     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1630                              GITS_BASER_PAGESIZE_64K);
1631     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1632                              GITS_DTE_SIZE - 1);
1633 
1634     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1635                              GITS_BASER_TYPE_COLLECTION);
1636     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1637                              GITS_BASER_PAGESIZE_64K);
1638     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1639                              GITS_CTE_SIZE - 1);
1640 
1641     if (its_feature_virtual(s)) {
1642         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE,
1643                                  GITS_BASER_TYPE_VPE);
1644         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE,
1645                                  GITS_BASER_PAGESIZE_64K);
1646         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE,
1647                                  GITS_VPE_SIZE - 1);
1648     }
1649 }
1650 
1651 static void gicv3_its_post_load(GICv3ITSState *s)
1652 {
1653     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1654         extract_table_params(s);
1655         extract_cmdq_params(s);
1656     }
1657 }
1658 
1659 static Property gicv3_its_props[] = {
1660     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1661                      GICv3State *),
1662     DEFINE_PROP_END_OF_LIST(),
1663 };
1664 
1665 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1666 {
1667     DeviceClass *dc = DEVICE_CLASS(klass);
1668     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1669     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1670 
1671     dc->realize = gicv3_arm_its_realize;
1672     device_class_set_props(dc, gicv3_its_props);
1673     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1674     icc->post_load = gicv3_its_post_load;
1675 }
1676 
1677 static const TypeInfo gicv3_its_info = {
1678     .name = TYPE_ARM_GICV3_ITS,
1679     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1680     .instance_size = sizeof(GICv3ITSState),
1681     .class_init = gicv3_its_class_init,
1682     .class_size = sizeof(GICv3ITSClass),
1683 };
1684 
1685 static void gicv3_its_register_types(void)
1686 {
1687     type_register_static(&gicv3_its_info);
1688 }
1689 
1690 type_init(gicv3_its_register_types)
1691