1 /*
2 * ITS emulation for a GICv3-based system
3 *
4 * Copyright Linaro.org 2021
5 *
6 * Authors:
7 * Shashi Mallela <shashi.mallela@linaro.org>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10 * option) any later version. See the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27
28 struct GICv3ITSClass {
29 GICv3ITSCommonClass parent_class;
30 ResettablePhases parent_phases;
31 };
32
33 /*
34 * This is an internal enum used to distinguish between LPI triggered
35 * via command queue and LPI triggered via gits_translater write.
36 */
37 typedef enum ItsCmdType {
38 NONE = 0, /* internal indication for GITS_TRANSLATER write */
39 CLEAR = 1,
40 DISCARD = 2,
41 INTERRUPT = 3,
42 } ItsCmdType;
43
44 typedef struct DTEntry {
45 bool valid;
46 unsigned size;
47 uint64_t ittaddr;
48 } DTEntry;
49
50 typedef struct CTEntry {
51 bool valid;
52 uint32_t rdbase;
53 } CTEntry;
54
55 typedef struct ITEntry {
56 bool valid;
57 int inttype;
58 uint32_t intid;
59 uint32_t doorbell;
60 uint32_t icid;
61 uint32_t vpeid;
62 } ITEntry;
63
64 typedef struct VTEntry {
65 bool valid;
66 unsigned vptsize;
67 uint32_t rdbase;
68 uint64_t vptaddr;
69 } VTEntry;
70
71 /*
72 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
73 * if a command parameter is not correct. These include both "stall
74 * processing of the command queue" and "ignore this command, and
75 * keep processing the queue". In our implementation we choose that
76 * memory transaction errors reading the command packet provoke a
77 * stall, but errors in parameters cause us to ignore the command
78 * and continue processing.
79 * The process_* functions which handle individual ITS commands all
80 * return an ItsCmdResult which tells process_cmdq() whether it should
81 * stall, keep going because of an error, or keep going because the
82 * command was a success.
83 */
84 typedef enum ItsCmdResult {
85 CMD_STALL = 0,
86 CMD_CONTINUE = 1,
87 CMD_CONTINUE_OK = 2,
88 } ItsCmdResult;
89
90 /* True if the ITS supports the GICv4 virtual LPI feature */
its_feature_virtual(GICv3ITSState * s)91 static bool its_feature_virtual(GICv3ITSState *s)
92 {
93 return s->typer & R_GITS_TYPER_VIRTUAL_MASK;
94 }
95
intid_in_lpi_range(uint32_t id)96 static inline bool intid_in_lpi_range(uint32_t id)
97 {
98 return id >= GICV3_LPI_INTID_START &&
99 id < (1 << (GICD_TYPER_IDBITS + 1));
100 }
101
valid_doorbell(uint32_t id)102 static inline bool valid_doorbell(uint32_t id)
103 {
104 /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */
105 return id == INTID_SPURIOUS || intid_in_lpi_range(id);
106 }
107
baser_base_addr(uint64_t value,uint32_t page_sz)108 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
109 {
110 uint64_t result = 0;
111
112 switch (page_sz) {
113 case GITS_PAGE_SIZE_4K:
114 case GITS_PAGE_SIZE_16K:
115 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
116 break;
117
118 case GITS_PAGE_SIZE_64K:
119 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
120 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
121 break;
122
123 default:
124 break;
125 }
126 return result;
127 }
128
table_entry_addr(GICv3ITSState * s,TableDesc * td,uint32_t idx,MemTxResult * res)129 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
130 uint32_t idx, MemTxResult *res)
131 {
132 /*
133 * Given a TableDesc describing one of the ITS in-guest-memory
134 * tables and an index into it, return the guest address
135 * corresponding to that table entry.
136 * If there was a memory error reading the L1 table of an
137 * indirect table, *res is set accordingly, and we return -1.
138 * If the L1 table entry is marked not valid, we return -1 with
139 * *res set to MEMTX_OK.
140 *
141 * The specification defines the format of level 1 entries of a
142 * 2-level table, but the format of level 2 entries and the format
143 * of flat-mapped tables is IMPDEF.
144 */
145 AddressSpace *as = &s->gicv3->dma_as;
146 uint32_t l2idx;
147 uint64_t l2;
148 uint32_t num_l2_entries;
149
150 *res = MEMTX_OK;
151
152 if (!td->indirect) {
153 /* Single level table */
154 return td->base_addr + idx * td->entry_sz;
155 }
156
157 /* Two level table */
158 l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
159
160 l2 = address_space_ldq_le(as,
161 td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
162 MEMTXATTRS_UNSPECIFIED, res);
163 if (*res != MEMTX_OK) {
164 return -1;
165 }
166 if (!(l2 & L2_TABLE_VALID_MASK)) {
167 return -1;
168 }
169
170 num_l2_entries = td->page_sz / td->entry_sz;
171 return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
172 }
173
174 /*
175 * Read the Collection Table entry at index @icid. On success (including
176 * successfully determining that there is no valid CTE for this index),
177 * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
178 * If there is an error reading memory then we return the error code.
179 */
get_cte(GICv3ITSState * s,uint16_t icid,CTEntry * cte)180 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
181 {
182 AddressSpace *as = &s->gicv3->dma_as;
183 MemTxResult res = MEMTX_OK;
184 uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
185 uint64_t cteval;
186
187 if (entry_addr == -1) {
188 /* No L2 table entry, i.e. no valid CTE, or a memory error */
189 cte->valid = false;
190 goto out;
191 }
192
193 cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
194 if (res != MEMTX_OK) {
195 goto out;
196 }
197 cte->valid = FIELD_EX64(cteval, CTE, VALID);
198 cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
199 out:
200 if (res != MEMTX_OK) {
201 trace_gicv3_its_cte_read_fault(icid);
202 } else {
203 trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
204 }
205 return res;
206 }
207
208 /*
209 * Update the Interrupt Table entry at index @evinted in the table specified
210 * by the dte @dte. Returns true on success, false if there was a memory
211 * access error.
212 */
update_ite(GICv3ITSState * s,uint32_t eventid,const DTEntry * dte,const ITEntry * ite)213 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
214 const ITEntry *ite)
215 {
216 AddressSpace *as = &s->gicv3->dma_as;
217 MemTxResult res = MEMTX_OK;
218 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
219 uint64_t itel = 0;
220 uint32_t iteh = 0;
221
222 trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
223 ite->inttype, ite->intid, ite->icid,
224 ite->vpeid, ite->doorbell);
225
226 if (ite->valid) {
227 itel = FIELD_DP64(itel, ITE_L, VALID, 1);
228 itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
229 itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
230 itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
231 itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
232 iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
233 }
234
235 address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
236 if (res != MEMTX_OK) {
237 return false;
238 }
239 address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
240 return res == MEMTX_OK;
241 }
242
243 /*
244 * Read the Interrupt Table entry at index @eventid from the table specified
245 * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
246 * struct @ite accordingly. If there is an error reading memory then we return
247 * the error code.
248 */
get_ite(GICv3ITSState * s,uint32_t eventid,const DTEntry * dte,ITEntry * ite)249 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
250 const DTEntry *dte, ITEntry *ite)
251 {
252 AddressSpace *as = &s->gicv3->dma_as;
253 MemTxResult res = MEMTX_OK;
254 uint64_t itel;
255 uint32_t iteh;
256 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
257
258 itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
259 if (res != MEMTX_OK) {
260 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
261 return res;
262 }
263
264 iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
265 if (res != MEMTX_OK) {
266 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
267 return res;
268 }
269
270 ite->valid = FIELD_EX64(itel, ITE_L, VALID);
271 ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
272 ite->intid = FIELD_EX64(itel, ITE_L, INTID);
273 ite->icid = FIELD_EX64(itel, ITE_L, ICID);
274 ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
275 ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
276 trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
277 ite->inttype, ite->intid, ite->icid,
278 ite->vpeid, ite->doorbell);
279 return MEMTX_OK;
280 }
281
282 /*
283 * Read the Device Table entry at index @devid. On success (including
284 * successfully determining that there is no valid DTE for this index),
285 * we return MEMTX_OK and populate the DTEntry struct accordingly.
286 * If there is an error reading memory then we return the error code.
287 */
get_dte(GICv3ITSState * s,uint32_t devid,DTEntry * dte)288 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
289 {
290 MemTxResult res = MEMTX_OK;
291 AddressSpace *as = &s->gicv3->dma_as;
292 uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
293 uint64_t dteval;
294
295 if (entry_addr == -1) {
296 /* No L2 table entry, i.e. no valid DTE, or a memory error */
297 dte->valid = false;
298 goto out;
299 }
300 dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
301 if (res != MEMTX_OK) {
302 goto out;
303 }
304 dte->valid = FIELD_EX64(dteval, DTE, VALID);
305 dte->size = FIELD_EX64(dteval, DTE, SIZE);
306 /* DTE word field stores bits [51:8] of the ITT address */
307 dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
308 out:
309 if (res != MEMTX_OK) {
310 trace_gicv3_its_dte_read_fault(devid);
311 } else {
312 trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
313 }
314 return res;
315 }
316
317 /*
318 * Read the vPE Table entry at index @vpeid. On success (including
319 * successfully determining that there is no valid entry for this index),
320 * we return MEMTX_OK and populate the VTEntry struct accordingly.
321 * If there is an error reading memory then we return the error code.
322 */
get_vte(GICv3ITSState * s,uint32_t vpeid,VTEntry * vte)323 static MemTxResult get_vte(GICv3ITSState *s, uint32_t vpeid, VTEntry *vte)
324 {
325 MemTxResult res = MEMTX_OK;
326 AddressSpace *as = &s->gicv3->dma_as;
327 uint64_t entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res);
328 uint64_t vteval;
329
330 if (entry_addr == -1) {
331 /* No L2 table entry, i.e. no valid VTE, or a memory error */
332 vte->valid = false;
333 trace_gicv3_its_vte_read_fault(vpeid);
334 return MEMTX_OK;
335 }
336 vteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
337 if (res != MEMTX_OK) {
338 trace_gicv3_its_vte_read_fault(vpeid);
339 return res;
340 }
341 vte->valid = FIELD_EX64(vteval, VTE, VALID);
342 vte->vptsize = FIELD_EX64(vteval, VTE, VPTSIZE);
343 vte->vptaddr = FIELD_EX64(vteval, VTE, VPTADDR);
344 vte->rdbase = FIELD_EX64(vteval, VTE, RDBASE);
345 trace_gicv3_its_vte_read(vpeid, vte->valid, vte->vptsize,
346 vte->vptaddr, vte->rdbase);
347 return res;
348 }
349
350 /*
351 * Given a (DeviceID, EventID), look up the corresponding ITE, including
352 * checking for the various invalid-value cases. If we find a valid ITE,
353 * fill in @ite and @dte and return CMD_CONTINUE_OK. Otherwise return
354 * CMD_STALL or CMD_CONTINUE as appropriate (and the contents of @ite
355 * should not be relied on).
356 *
357 * The string @who is purely for the LOG_GUEST_ERROR messages,
358 * and should indicate the name of the calling function or similar.
359 */
lookup_ite(GICv3ITSState * s,const char * who,uint32_t devid,uint32_t eventid,ITEntry * ite,DTEntry * dte)360 static ItsCmdResult lookup_ite(GICv3ITSState *s, const char *who,
361 uint32_t devid, uint32_t eventid, ITEntry *ite,
362 DTEntry *dte)
363 {
364 uint64_t num_eventids;
365
366 if (devid >= s->dt.num_entries) {
367 qemu_log_mask(LOG_GUEST_ERROR,
368 "%s: invalid command attributes: devid %d>=%d",
369 who, devid, s->dt.num_entries);
370 return CMD_CONTINUE;
371 }
372
373 if (get_dte(s, devid, dte) != MEMTX_OK) {
374 return CMD_STALL;
375 }
376 if (!dte->valid) {
377 qemu_log_mask(LOG_GUEST_ERROR,
378 "%s: invalid command attributes: "
379 "invalid dte for %d\n", who, devid);
380 return CMD_CONTINUE;
381 }
382
383 num_eventids = 1ULL << (dte->size + 1);
384 if (eventid >= num_eventids) {
385 qemu_log_mask(LOG_GUEST_ERROR,
386 "%s: invalid command attributes: eventid %d >= %"
387 PRId64 "\n", who, eventid, num_eventids);
388 return CMD_CONTINUE;
389 }
390
391 if (get_ite(s, eventid, dte, ite) != MEMTX_OK) {
392 return CMD_STALL;
393 }
394
395 if (!ite->valid) {
396 qemu_log_mask(LOG_GUEST_ERROR,
397 "%s: invalid command attributes: invalid ITE\n", who);
398 return CMD_CONTINUE;
399 }
400
401 return CMD_CONTINUE_OK;
402 }
403
404 /*
405 * Given an ICID, look up the corresponding CTE, including checking for various
406 * invalid-value cases. If we find a valid CTE, fill in @cte and return
407 * CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE (and the
408 * contents of @cte should not be relied on).
409 *
410 * The string @who is purely for the LOG_GUEST_ERROR messages,
411 * and should indicate the name of the calling function or similar.
412 */
lookup_cte(GICv3ITSState * s,const char * who,uint32_t icid,CTEntry * cte)413 static ItsCmdResult lookup_cte(GICv3ITSState *s, const char *who,
414 uint32_t icid, CTEntry *cte)
415 {
416 if (icid >= s->ct.num_entries) {
417 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid ICID 0x%x\n", who, icid);
418 return CMD_CONTINUE;
419 }
420 if (get_cte(s, icid, cte) != MEMTX_OK) {
421 return CMD_STALL;
422 }
423 if (!cte->valid) {
424 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid CTE\n", who);
425 return CMD_CONTINUE;
426 }
427 if (cte->rdbase >= s->gicv3->num_cpu) {
428 return CMD_CONTINUE;
429 }
430 return CMD_CONTINUE_OK;
431 }
432
433 /*
434 * Given a VPEID, look up the corresponding VTE, including checking
435 * for various invalid-value cases. if we find a valid VTE, fill in @vte
436 * and return CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE
437 * (and the contents of @vte should not be relied on).
438 *
439 * The string @who is purely for the LOG_GUEST_ERROR messages,
440 * and should indicate the name of the calling function or similar.
441 */
lookup_vte(GICv3ITSState * s,const char * who,uint32_t vpeid,VTEntry * vte)442 static ItsCmdResult lookup_vte(GICv3ITSState *s, const char *who,
443 uint32_t vpeid, VTEntry *vte)
444 {
445 if (vpeid >= s->vpet.num_entries) {
446 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid VPEID 0x%x\n", who, vpeid);
447 return CMD_CONTINUE;
448 }
449
450 if (get_vte(s, vpeid, vte) != MEMTX_OK) {
451 return CMD_STALL;
452 }
453 if (!vte->valid) {
454 qemu_log_mask(LOG_GUEST_ERROR,
455 "%s: invalid VTE for VPEID 0x%x\n", who, vpeid);
456 return CMD_CONTINUE;
457 }
458
459 if (vte->rdbase >= s->gicv3->num_cpu) {
460 return CMD_CONTINUE;
461 }
462 return CMD_CONTINUE_OK;
463 }
464
process_its_cmd_phys(GICv3ITSState * s,const ITEntry * ite,int irqlevel)465 static ItsCmdResult process_its_cmd_phys(GICv3ITSState *s, const ITEntry *ite,
466 int irqlevel)
467 {
468 CTEntry cte;
469 ItsCmdResult cmdres;
470
471 cmdres = lookup_cte(s, __func__, ite->icid, &cte);
472 if (cmdres != CMD_CONTINUE_OK) {
473 return cmdres;
474 }
475 gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite->intid, irqlevel);
476 return CMD_CONTINUE_OK;
477 }
478
process_its_cmd_virt(GICv3ITSState * s,const ITEntry * ite,int irqlevel)479 static ItsCmdResult process_its_cmd_virt(GICv3ITSState *s, const ITEntry *ite,
480 int irqlevel)
481 {
482 VTEntry vte;
483 ItsCmdResult cmdres;
484
485 cmdres = lookup_vte(s, __func__, ite->vpeid, &vte);
486 if (cmdres != CMD_CONTINUE_OK) {
487 return cmdres;
488 }
489
490 if (!intid_in_lpi_range(ite->intid) ||
491 ite->intid >= (1ULL << (vte.vptsize + 1))) {
492 qemu_log_mask(LOG_GUEST_ERROR, "%s: intid 0x%x out of range\n",
493 __func__, ite->intid);
494 return CMD_CONTINUE;
495 }
496
497 /*
498 * For QEMU the actual pending of the vLPI is handled in the
499 * redistributor code
500 */
501 gicv3_redist_process_vlpi(&s->gicv3->cpu[vte.rdbase], ite->intid,
502 vte.vptaddr << 16, ite->doorbell, irqlevel);
503 return CMD_CONTINUE_OK;
504 }
505
506 /*
507 * This function handles the processing of following commands based on
508 * the ItsCmdType parameter passed:-
509 * 1. triggering of lpi interrupt translation via ITS INT command
510 * 2. triggering of lpi interrupt translation via gits_translater register
511 * 3. handling of ITS CLEAR command
512 * 4. handling of ITS DISCARD command
513 */
do_process_its_cmd(GICv3ITSState * s,uint32_t devid,uint32_t eventid,ItsCmdType cmd)514 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
515 uint32_t eventid, ItsCmdType cmd)
516 {
517 DTEntry dte;
518 ITEntry ite;
519 ItsCmdResult cmdres;
520 int irqlevel;
521
522 cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte);
523 if (cmdres != CMD_CONTINUE_OK) {
524 return cmdres;
525 }
526
527 irqlevel = (cmd == CLEAR || cmd == DISCARD) ? 0 : 1;
528
529 switch (ite.inttype) {
530 case ITE_INTTYPE_PHYSICAL:
531 cmdres = process_its_cmd_phys(s, &ite, irqlevel);
532 break;
533 case ITE_INTTYPE_VIRTUAL:
534 if (!its_feature_virtual(s)) {
535 /* Can't happen unless guest is illegally writing to table memory */
536 qemu_log_mask(LOG_GUEST_ERROR,
537 "%s: invalid type %d in ITE (table corrupted?)\n",
538 __func__, ite.inttype);
539 return CMD_CONTINUE;
540 }
541 cmdres = process_its_cmd_virt(s, &ite, irqlevel);
542 break;
543 default:
544 g_assert_not_reached();
545 }
546
547 if (cmdres == CMD_CONTINUE_OK && cmd == DISCARD) {
548 ITEntry i = {};
549 /* remove mapping from interrupt translation table */
550 i.valid = false;
551 return update_ite(s, eventid, &dte, &i) ? CMD_CONTINUE_OK : CMD_STALL;
552 }
553 return CMD_CONTINUE_OK;
554 }
555
process_its_cmd(GICv3ITSState * s,const uint64_t * cmdpkt,ItsCmdType cmd)556 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
557 ItsCmdType cmd)
558 {
559 uint32_t devid, eventid;
560
561 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
562 eventid = cmdpkt[1] & EVENTID_MASK;
563 switch (cmd) {
564 case INTERRUPT:
565 trace_gicv3_its_cmd_int(devid, eventid);
566 break;
567 case CLEAR:
568 trace_gicv3_its_cmd_clear(devid, eventid);
569 break;
570 case DISCARD:
571 trace_gicv3_its_cmd_discard(devid, eventid);
572 break;
573 default:
574 g_assert_not_reached();
575 }
576 return do_process_its_cmd(s, devid, eventid, cmd);
577 }
578
process_mapti(GICv3ITSState * s,const uint64_t * cmdpkt,bool ignore_pInt)579 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
580 bool ignore_pInt)
581 {
582 uint32_t devid, eventid;
583 uint32_t pIntid = 0;
584 uint64_t num_eventids;
585 uint16_t icid = 0;
586 DTEntry dte;
587 ITEntry ite;
588
589 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
590 eventid = cmdpkt[1] & EVENTID_MASK;
591 icid = cmdpkt[2] & ICID_MASK;
592
593 if (ignore_pInt) {
594 pIntid = eventid;
595 trace_gicv3_its_cmd_mapi(devid, eventid, icid);
596 } else {
597 pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
598 trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
599 }
600
601 if (devid >= s->dt.num_entries) {
602 qemu_log_mask(LOG_GUEST_ERROR,
603 "%s: invalid command attributes: devid %d>=%d",
604 __func__, devid, s->dt.num_entries);
605 return CMD_CONTINUE;
606 }
607
608 if (get_dte(s, devid, &dte) != MEMTX_OK) {
609 return CMD_STALL;
610 }
611 num_eventids = 1ULL << (dte.size + 1);
612
613 if (icid >= s->ct.num_entries) {
614 qemu_log_mask(LOG_GUEST_ERROR,
615 "%s: invalid ICID 0x%x >= 0x%x\n",
616 __func__, icid, s->ct.num_entries);
617 return CMD_CONTINUE;
618 }
619
620 if (!dte.valid) {
621 qemu_log_mask(LOG_GUEST_ERROR,
622 "%s: no valid DTE for devid 0x%x\n", __func__, devid);
623 return CMD_CONTINUE;
624 }
625
626 if (eventid >= num_eventids) {
627 qemu_log_mask(LOG_GUEST_ERROR,
628 "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
629 __func__, eventid, num_eventids);
630 return CMD_CONTINUE;
631 }
632
633 if (!intid_in_lpi_range(pIntid)) {
634 qemu_log_mask(LOG_GUEST_ERROR,
635 "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
636 return CMD_CONTINUE;
637 }
638
639 /* add ite entry to interrupt translation table */
640 ite.valid = true;
641 ite.inttype = ITE_INTTYPE_PHYSICAL;
642 ite.intid = pIntid;
643 ite.icid = icid;
644 ite.doorbell = INTID_SPURIOUS;
645 ite.vpeid = 0;
646 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
647 }
648
process_vmapti(GICv3ITSState * s,const uint64_t * cmdpkt,bool ignore_vintid)649 static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt,
650 bool ignore_vintid)
651 {
652 uint32_t devid, eventid, vintid, doorbell, vpeid;
653 uint32_t num_eventids;
654 DTEntry dte;
655 ITEntry ite;
656
657 if (!its_feature_virtual(s)) {
658 return CMD_CONTINUE;
659 }
660
661 devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID);
662 eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID);
663 vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID);
664 doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL);
665 if (ignore_vintid) {
666 vintid = eventid;
667 trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell);
668 } else {
669 vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID);
670 trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell);
671 }
672
673 if (devid >= s->dt.num_entries) {
674 qemu_log_mask(LOG_GUEST_ERROR,
675 "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n",
676 __func__, devid, s->dt.num_entries);
677 return CMD_CONTINUE;
678 }
679
680 if (get_dte(s, devid, &dte) != MEMTX_OK) {
681 return CMD_STALL;
682 }
683
684 if (!dte.valid) {
685 qemu_log_mask(LOG_GUEST_ERROR,
686 "%s: no entry in device table for DeviceID 0x%x\n",
687 __func__, devid);
688 return CMD_CONTINUE;
689 }
690
691 num_eventids = 1ULL << (dte.size + 1);
692
693 if (eventid >= num_eventids) {
694 qemu_log_mask(LOG_GUEST_ERROR,
695 "%s: EventID 0x%x too large for DeviceID 0x%x "
696 "(must be less than 0x%x)\n",
697 __func__, eventid, devid, num_eventids);
698 return CMD_CONTINUE;
699 }
700 if (!intid_in_lpi_range(vintid)) {
701 qemu_log_mask(LOG_GUEST_ERROR,
702 "%s: VIntID 0x%x not a valid LPI\n",
703 __func__, vintid);
704 return CMD_CONTINUE;
705 }
706 if (!valid_doorbell(doorbell)) {
707 qemu_log_mask(LOG_GUEST_ERROR,
708 "%s: Doorbell %d not 1023 and not a valid LPI\n",
709 __func__, doorbell);
710 return CMD_CONTINUE;
711 }
712 if (vpeid >= s->vpet.num_entries) {
713 qemu_log_mask(LOG_GUEST_ERROR,
714 "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
715 __func__, vpeid, s->vpet.num_entries);
716 return CMD_CONTINUE;
717 }
718 /* add ite entry to interrupt translation table */
719 ite.valid = true;
720 ite.inttype = ITE_INTTYPE_VIRTUAL;
721 ite.intid = vintid;
722 ite.icid = 0;
723 ite.doorbell = doorbell;
724 ite.vpeid = vpeid;
725 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
726 }
727
728 /*
729 * Update the Collection Table entry for @icid to @cte. Returns true
730 * on success, false if there was a memory access error.
731 */
update_cte(GICv3ITSState * s,uint16_t icid,const CTEntry * cte)732 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
733 {
734 AddressSpace *as = &s->gicv3->dma_as;
735 uint64_t entry_addr;
736 uint64_t cteval = 0;
737 MemTxResult res = MEMTX_OK;
738
739 trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
740
741 if (cte->valid) {
742 /* add mapping entry to collection table */
743 cteval = FIELD_DP64(cteval, CTE, VALID, 1);
744 cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
745 }
746
747 entry_addr = table_entry_addr(s, &s->ct, icid, &res);
748 if (res != MEMTX_OK) {
749 /* memory access error: stall */
750 return false;
751 }
752 if (entry_addr == -1) {
753 /* No L2 table for this index: discard write and continue */
754 return true;
755 }
756
757 address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
758 return res == MEMTX_OK;
759 }
760
process_mapc(GICv3ITSState * s,const uint64_t * cmdpkt)761 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
762 {
763 uint16_t icid;
764 CTEntry cte;
765
766 icid = cmdpkt[2] & ICID_MASK;
767 cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
768 if (cte.valid) {
769 cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
770 cte.rdbase &= RDBASE_PROCNUM_MASK;
771 } else {
772 cte.rdbase = 0;
773 }
774 trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
775
776 if (icid >= s->ct.num_entries) {
777 qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
778 return CMD_CONTINUE;
779 }
780 if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
781 qemu_log_mask(LOG_GUEST_ERROR,
782 "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
783 return CMD_CONTINUE;
784 }
785
786 return update_cte(s, icid, &cte) ? CMD_CONTINUE_OK : CMD_STALL;
787 }
788
789 /*
790 * Update the Device Table entry for @devid to @dte. Returns true
791 * on success, false if there was a memory access error.
792 */
update_dte(GICv3ITSState * s,uint32_t devid,const DTEntry * dte)793 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
794 {
795 AddressSpace *as = &s->gicv3->dma_as;
796 uint64_t entry_addr;
797 uint64_t dteval = 0;
798 MemTxResult res = MEMTX_OK;
799
800 trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
801
802 if (dte->valid) {
803 /* add mapping entry to device table */
804 dteval = FIELD_DP64(dteval, DTE, VALID, 1);
805 dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
806 dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
807 }
808
809 entry_addr = table_entry_addr(s, &s->dt, devid, &res);
810 if (res != MEMTX_OK) {
811 /* memory access error: stall */
812 return false;
813 }
814 if (entry_addr == -1) {
815 /* No L2 table for this index: discard write and continue */
816 return true;
817 }
818 address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
819 return res == MEMTX_OK;
820 }
821
process_mapd(GICv3ITSState * s,const uint64_t * cmdpkt)822 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
823 {
824 uint32_t devid;
825 DTEntry dte;
826
827 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
828 dte.size = cmdpkt[1] & SIZE_MASK;
829 dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
830 dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
831
832 trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
833
834 if (devid >= s->dt.num_entries) {
835 qemu_log_mask(LOG_GUEST_ERROR,
836 "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
837 devid, s->dt.num_entries);
838 return CMD_CONTINUE;
839 }
840
841 if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
842 qemu_log_mask(LOG_GUEST_ERROR,
843 "ITS MAPD: invalid size %d\n", dte.size);
844 return CMD_CONTINUE;
845 }
846
847 return update_dte(s, devid, &dte) ? CMD_CONTINUE_OK : CMD_STALL;
848 }
849
process_movall(GICv3ITSState * s,const uint64_t * cmdpkt)850 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
851 {
852 uint64_t rd1, rd2;
853
854 rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
855 rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
856
857 trace_gicv3_its_cmd_movall(rd1, rd2);
858
859 if (rd1 >= s->gicv3->num_cpu) {
860 qemu_log_mask(LOG_GUEST_ERROR,
861 "%s: RDBASE1 %" PRId64
862 " out of range (must be less than %d)\n",
863 __func__, rd1, s->gicv3->num_cpu);
864 return CMD_CONTINUE;
865 }
866 if (rd2 >= s->gicv3->num_cpu) {
867 qemu_log_mask(LOG_GUEST_ERROR,
868 "%s: RDBASE2 %" PRId64
869 " out of range (must be less than %d)\n",
870 __func__, rd2, s->gicv3->num_cpu);
871 return CMD_CONTINUE;
872 }
873
874 if (rd1 == rd2) {
875 /* Move to same target must succeed as a no-op */
876 return CMD_CONTINUE_OK;
877 }
878
879 /* Move all pending LPIs from redistributor 1 to redistributor 2 */
880 gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
881
882 return CMD_CONTINUE_OK;
883 }
884
process_movi(GICv3ITSState * s,const uint64_t * cmdpkt)885 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
886 {
887 uint32_t devid, eventid;
888 uint16_t new_icid;
889 DTEntry dte;
890 CTEntry old_cte, new_cte;
891 ITEntry old_ite;
892 ItsCmdResult cmdres;
893
894 devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
895 eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
896 new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
897
898 trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
899
900 cmdres = lookup_ite(s, __func__, devid, eventid, &old_ite, &dte);
901 if (cmdres != CMD_CONTINUE_OK) {
902 return cmdres;
903 }
904
905 if (old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
906 qemu_log_mask(LOG_GUEST_ERROR,
907 "%s: invalid command attributes: invalid ITE\n",
908 __func__);
909 return CMD_CONTINUE;
910 }
911
912 cmdres = lookup_cte(s, __func__, old_ite.icid, &old_cte);
913 if (cmdres != CMD_CONTINUE_OK) {
914 return cmdres;
915 }
916 cmdres = lookup_cte(s, __func__, new_icid, &new_cte);
917 if (cmdres != CMD_CONTINUE_OK) {
918 return cmdres;
919 }
920
921 if (old_cte.rdbase != new_cte.rdbase) {
922 /* Move the LPI from the old redistributor to the new one */
923 gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
924 &s->gicv3->cpu[new_cte.rdbase],
925 old_ite.intid);
926 }
927
928 /* Update the ICID field in the interrupt translation table entry */
929 old_ite.icid = new_icid;
930 return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE_OK : CMD_STALL;
931 }
932
933 /*
934 * Update the vPE Table entry at index @vpeid with the entry @vte.
935 * Returns true on success, false if there was a memory access error.
936 */
update_vte(GICv3ITSState * s,uint32_t vpeid,const VTEntry * vte)937 static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte)
938 {
939 AddressSpace *as = &s->gicv3->dma_as;
940 uint64_t entry_addr;
941 uint64_t vteval = 0;
942 MemTxResult res = MEMTX_OK;
943
944 trace_gicv3_its_vte_write(vpeid, vte->valid, vte->vptsize, vte->vptaddr,
945 vte->rdbase);
946
947 if (vte->valid) {
948 vteval = FIELD_DP64(vteval, VTE, VALID, 1);
949 vteval = FIELD_DP64(vteval, VTE, VPTSIZE, vte->vptsize);
950 vteval = FIELD_DP64(vteval, VTE, VPTADDR, vte->vptaddr);
951 vteval = FIELD_DP64(vteval, VTE, RDBASE, vte->rdbase);
952 }
953
954 entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res);
955 if (res != MEMTX_OK) {
956 return false;
957 }
958 if (entry_addr == -1) {
959 /* No L2 table for this index: discard write and continue */
960 return true;
961 }
962 address_space_stq_le(as, entry_addr, vteval, MEMTXATTRS_UNSPECIFIED, &res);
963 return res == MEMTX_OK;
964 }
965
process_vmapp(GICv3ITSState * s,const uint64_t * cmdpkt)966 static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt)
967 {
968 VTEntry vte;
969 uint32_t vpeid;
970
971 if (!its_feature_virtual(s)) {
972 return CMD_CONTINUE;
973 }
974
975 vpeid = FIELD_EX64(cmdpkt[1], VMAPP_1, VPEID);
976 vte.rdbase = FIELD_EX64(cmdpkt[2], VMAPP_2, RDBASE);
977 vte.valid = FIELD_EX64(cmdpkt[2], VMAPP_2, V);
978 vte.vptsize = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTSIZE);
979 vte.vptaddr = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTADDR);
980
981 trace_gicv3_its_cmd_vmapp(vpeid, vte.rdbase, vte.valid,
982 vte.vptaddr, vte.vptsize);
983
984 /*
985 * For GICv4.0 the VPT_size field is only 5 bits, whereas we
986 * define our field macros to include the full GICv4.1 8 bits.
987 * The range check on VPT_size will catch the cases where
988 * the guest set the RES0-in-GICv4.0 bits [7:6].
989 */
990 if (vte.vptsize > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
991 qemu_log_mask(LOG_GUEST_ERROR,
992 "%s: invalid VPT_size 0x%x\n", __func__, vte.vptsize);
993 return CMD_CONTINUE;
994 }
995
996 if (vte.valid && vte.rdbase >= s->gicv3->num_cpu) {
997 qemu_log_mask(LOG_GUEST_ERROR,
998 "%s: invalid rdbase 0x%x\n", __func__, vte.rdbase);
999 return CMD_CONTINUE;
1000 }
1001
1002 if (vpeid >= s->vpet.num_entries) {
1003 qemu_log_mask(LOG_GUEST_ERROR,
1004 "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
1005 __func__, vpeid, s->vpet.num_entries);
1006 return CMD_CONTINUE;
1007 }
1008
1009 return update_vte(s, vpeid, &vte) ? CMD_CONTINUE_OK : CMD_STALL;
1010 }
1011
1012 typedef struct VmovpCallbackData {
1013 uint64_t rdbase;
1014 uint32_t vpeid;
1015 /*
1016 * Overall command result. If more than one callback finds an
1017 * error, STALL beats CONTINUE.
1018 */
1019 ItsCmdResult result;
1020 } VmovpCallbackData;
1021
vmovp_callback(gpointer data,gpointer opaque)1022 static void vmovp_callback(gpointer data, gpointer opaque)
1023 {
1024 /*
1025 * This function is called to update the VPEID field in a VPE
1026 * table entry for this ITS. This might be because of a VMOVP
1027 * command executed on any ITS that is connected to the same GIC
1028 * as this ITS. We need to read the VPE table entry for the VPEID
1029 * and update its RDBASE field.
1030 */
1031 GICv3ITSState *s = data;
1032 VmovpCallbackData *cbdata = opaque;
1033 VTEntry vte;
1034 ItsCmdResult cmdres;
1035
1036 cmdres = lookup_vte(s, __func__, cbdata->vpeid, &vte);
1037 switch (cmdres) {
1038 case CMD_STALL:
1039 cbdata->result = CMD_STALL;
1040 return;
1041 case CMD_CONTINUE:
1042 if (cbdata->result != CMD_STALL) {
1043 cbdata->result = CMD_CONTINUE;
1044 }
1045 return;
1046 case CMD_CONTINUE_OK:
1047 break;
1048 }
1049
1050 vte.rdbase = cbdata->rdbase;
1051 if (!update_vte(s, cbdata->vpeid, &vte)) {
1052 cbdata->result = CMD_STALL;
1053 }
1054 }
1055
process_vmovp(GICv3ITSState * s,const uint64_t * cmdpkt)1056 static ItsCmdResult process_vmovp(GICv3ITSState *s, const uint64_t *cmdpkt)
1057 {
1058 VmovpCallbackData cbdata;
1059
1060 if (!its_feature_virtual(s)) {
1061 return CMD_CONTINUE;
1062 }
1063
1064 cbdata.vpeid = FIELD_EX64(cmdpkt[1], VMOVP_1, VPEID);
1065 cbdata.rdbase = FIELD_EX64(cmdpkt[2], VMOVP_2, RDBASE);
1066
1067 trace_gicv3_its_cmd_vmovp(cbdata.vpeid, cbdata.rdbase);
1068
1069 if (cbdata.rdbase >= s->gicv3->num_cpu) {
1070 return CMD_CONTINUE;
1071 }
1072
1073 /*
1074 * Our ITS implementation reports GITS_TYPER.VMOVP == 1, which means
1075 * that when the VMOVP command is executed on an ITS to change the
1076 * VPEID field in a VPE table entry the change must be propagated
1077 * to all the ITSes connected to the same GIC.
1078 */
1079 cbdata.result = CMD_CONTINUE_OK;
1080 gicv3_foreach_its(s->gicv3, vmovp_callback, &cbdata);
1081 return cbdata.result;
1082 }
1083
process_vmovi(GICv3ITSState * s,const uint64_t * cmdpkt)1084 static ItsCmdResult process_vmovi(GICv3ITSState *s, const uint64_t *cmdpkt)
1085 {
1086 uint32_t devid, eventid, vpeid, doorbell;
1087 bool doorbell_valid;
1088 DTEntry dte;
1089 ITEntry ite;
1090 VTEntry old_vte, new_vte;
1091 ItsCmdResult cmdres;
1092
1093 if (!its_feature_virtual(s)) {
1094 return CMD_CONTINUE;
1095 }
1096
1097 devid = FIELD_EX64(cmdpkt[0], VMOVI_0, DEVICEID);
1098 eventid = FIELD_EX64(cmdpkt[1], VMOVI_1, EVENTID);
1099 vpeid = FIELD_EX64(cmdpkt[1], VMOVI_1, VPEID);
1100 doorbell_valid = FIELD_EX64(cmdpkt[2], VMOVI_2, D);
1101 doorbell = FIELD_EX64(cmdpkt[2], VMOVI_2, DOORBELL);
1102
1103 trace_gicv3_its_cmd_vmovi(devid, eventid, vpeid, doorbell_valid, doorbell);
1104
1105 if (doorbell_valid && !valid_doorbell(doorbell)) {
1106 qemu_log_mask(LOG_GUEST_ERROR,
1107 "%s: invalid doorbell 0x%x\n", __func__, doorbell);
1108 return CMD_CONTINUE;
1109 }
1110
1111 cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte);
1112 if (cmdres != CMD_CONTINUE_OK) {
1113 return cmdres;
1114 }
1115
1116 if (ite.inttype != ITE_INTTYPE_VIRTUAL) {
1117 qemu_log_mask(LOG_GUEST_ERROR, "%s: ITE is not for virtual interrupt\n",
1118 __func__);
1119 return CMD_CONTINUE;
1120 }
1121
1122 cmdres = lookup_vte(s, __func__, ite.vpeid, &old_vte);
1123 if (cmdres != CMD_CONTINUE_OK) {
1124 return cmdres;
1125 }
1126 cmdres = lookup_vte(s, __func__, vpeid, &new_vte);
1127 if (cmdres != CMD_CONTINUE_OK) {
1128 return cmdres;
1129 }
1130
1131 if (!intid_in_lpi_range(ite.intid) ||
1132 ite.intid >= (1ULL << (old_vte.vptsize + 1)) ||
1133 ite.intid >= (1ULL << (new_vte.vptsize + 1))) {
1134 qemu_log_mask(LOG_GUEST_ERROR,
1135 "%s: ITE intid 0x%x out of range\n",
1136 __func__, ite.intid);
1137 return CMD_CONTINUE;
1138 }
1139
1140 ite.vpeid = vpeid;
1141 if (doorbell_valid) {
1142 ite.doorbell = doorbell;
1143 }
1144
1145 /*
1146 * Move the LPI from the old redistributor to the new one. We don't
1147 * need to do anything if the guest somehow specified the
1148 * same pending table for source and destination.
1149 */
1150 if (old_vte.vptaddr != new_vte.vptaddr) {
1151 gicv3_redist_mov_vlpi(&s->gicv3->cpu[old_vte.rdbase],
1152 old_vte.vptaddr << 16,
1153 &s->gicv3->cpu[new_vte.rdbase],
1154 new_vte.vptaddr << 16,
1155 ite.intid,
1156 ite.doorbell);
1157 }
1158
1159 /* Update the ITE to the new VPEID and possibly doorbell values */
1160 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
1161 }
1162
process_vinvall(GICv3ITSState * s,const uint64_t * cmdpkt)1163 static ItsCmdResult process_vinvall(GICv3ITSState *s, const uint64_t *cmdpkt)
1164 {
1165 VTEntry vte;
1166 uint32_t vpeid;
1167 ItsCmdResult cmdres;
1168
1169 if (!its_feature_virtual(s)) {
1170 return CMD_CONTINUE;
1171 }
1172
1173 vpeid = FIELD_EX64(cmdpkt[1], VINVALL_1, VPEID);
1174
1175 trace_gicv3_its_cmd_vinvall(vpeid);
1176
1177 cmdres = lookup_vte(s, __func__, vpeid, &vte);
1178 if (cmdres != CMD_CONTINUE_OK) {
1179 return cmdres;
1180 }
1181
1182 gicv3_redist_vinvall(&s->gicv3->cpu[vte.rdbase], vte.vptaddr << 16);
1183 return CMD_CONTINUE_OK;
1184 }
1185
process_inv(GICv3ITSState * s,const uint64_t * cmdpkt)1186 static ItsCmdResult process_inv(GICv3ITSState *s, const uint64_t *cmdpkt)
1187 {
1188 uint32_t devid, eventid;
1189 ITEntry ite;
1190 DTEntry dte;
1191 CTEntry cte;
1192 VTEntry vte;
1193 ItsCmdResult cmdres;
1194
1195 devid = FIELD_EX64(cmdpkt[0], INV_0, DEVICEID);
1196 eventid = FIELD_EX64(cmdpkt[1], INV_1, EVENTID);
1197
1198 trace_gicv3_its_cmd_inv(devid, eventid);
1199
1200 cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte);
1201 if (cmdres != CMD_CONTINUE_OK) {
1202 return cmdres;
1203 }
1204
1205 switch (ite.inttype) {
1206 case ITE_INTTYPE_PHYSICAL:
1207 cmdres = lookup_cte(s, __func__, ite.icid, &cte);
1208 if (cmdres != CMD_CONTINUE_OK) {
1209 return cmdres;
1210 }
1211 gicv3_redist_inv_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid);
1212 break;
1213 case ITE_INTTYPE_VIRTUAL:
1214 if (!its_feature_virtual(s)) {
1215 /* Can't happen unless guest is illegally writing to table memory */
1216 qemu_log_mask(LOG_GUEST_ERROR,
1217 "%s: invalid type %d in ITE (table corrupted?)\n",
1218 __func__, ite.inttype);
1219 return CMD_CONTINUE;
1220 }
1221
1222 cmdres = lookup_vte(s, __func__, ite.vpeid, &vte);
1223 if (cmdres != CMD_CONTINUE_OK) {
1224 return cmdres;
1225 }
1226 if (!intid_in_lpi_range(ite.intid) ||
1227 ite.intid >= (1ULL << (vte.vptsize + 1))) {
1228 qemu_log_mask(LOG_GUEST_ERROR, "%s: intid 0x%x out of range\n",
1229 __func__, ite.intid);
1230 return CMD_CONTINUE;
1231 }
1232 gicv3_redist_inv_vlpi(&s->gicv3->cpu[vte.rdbase], ite.intid,
1233 vte.vptaddr << 16);
1234 break;
1235 default:
1236 g_assert_not_reached();
1237 }
1238
1239 return CMD_CONTINUE_OK;
1240 }
1241
1242 /*
1243 * Current implementation blocks until all
1244 * commands are processed
1245 */
process_cmdq(GICv3ITSState * s)1246 static void process_cmdq(GICv3ITSState *s)
1247 {
1248 uint32_t wr_offset = 0;
1249 uint32_t rd_offset = 0;
1250 uint32_t cq_offset = 0;
1251 AddressSpace *as = &s->gicv3->dma_as;
1252 uint8_t cmd;
1253 int i;
1254
1255 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1256 return;
1257 }
1258
1259 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
1260
1261 if (wr_offset >= s->cq.num_entries) {
1262 qemu_log_mask(LOG_GUEST_ERROR,
1263 "%s: invalid write offset "
1264 "%d\n", __func__, wr_offset);
1265 return;
1266 }
1267
1268 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
1269
1270 if (rd_offset >= s->cq.num_entries) {
1271 qemu_log_mask(LOG_GUEST_ERROR,
1272 "%s: invalid read offset "
1273 "%d\n", __func__, rd_offset);
1274 return;
1275 }
1276
1277 while (wr_offset != rd_offset) {
1278 ItsCmdResult result = CMD_CONTINUE_OK;
1279 void *hostmem;
1280 hwaddr buflen;
1281 uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
1282
1283 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
1284
1285 buflen = GITS_CMDQ_ENTRY_SIZE;
1286 hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
1287 &buflen, false, MEMTXATTRS_UNSPECIFIED);
1288 if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
1289 if (hostmem) {
1290 address_space_unmap(as, hostmem, buflen, false, 0);
1291 }
1292 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
1293 qemu_log_mask(LOG_GUEST_ERROR,
1294 "%s: could not read command at 0x%" PRIx64 "\n",
1295 __func__, s->cq.base_addr + cq_offset);
1296 break;
1297 }
1298 for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
1299 cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
1300 }
1301 address_space_unmap(as, hostmem, buflen, false, 0);
1302
1303 cmd = cmdpkt[0] & CMD_MASK;
1304
1305 trace_gicv3_its_process_command(rd_offset, cmd);
1306
1307 switch (cmd) {
1308 case GITS_CMD_INT:
1309 result = process_its_cmd(s, cmdpkt, INTERRUPT);
1310 break;
1311 case GITS_CMD_CLEAR:
1312 result = process_its_cmd(s, cmdpkt, CLEAR);
1313 break;
1314 case GITS_CMD_SYNC:
1315 /*
1316 * Current implementation makes a blocking synchronous call
1317 * for every command issued earlier, hence the internal state
1318 * is already consistent by the time SYNC command is executed.
1319 * Hence no further processing is required for SYNC command.
1320 */
1321 trace_gicv3_its_cmd_sync();
1322 break;
1323 case GITS_CMD_VSYNC:
1324 /*
1325 * VSYNC also is a nop, because our implementation is always
1326 * in sync.
1327 */
1328 if (!its_feature_virtual(s)) {
1329 result = CMD_CONTINUE;
1330 break;
1331 }
1332 trace_gicv3_its_cmd_vsync();
1333 break;
1334 case GITS_CMD_MAPD:
1335 result = process_mapd(s, cmdpkt);
1336 break;
1337 case GITS_CMD_MAPC:
1338 result = process_mapc(s, cmdpkt);
1339 break;
1340 case GITS_CMD_MAPTI:
1341 result = process_mapti(s, cmdpkt, false);
1342 break;
1343 case GITS_CMD_MAPI:
1344 result = process_mapti(s, cmdpkt, true);
1345 break;
1346 case GITS_CMD_DISCARD:
1347 result = process_its_cmd(s, cmdpkt, DISCARD);
1348 break;
1349 case GITS_CMD_INV:
1350 result = process_inv(s, cmdpkt);
1351 break;
1352 case GITS_CMD_INVALL:
1353 /*
1354 * Current implementation doesn't cache any ITS tables,
1355 * but the calculated lpi priority information. We only
1356 * need to trigger lpi priority re-calculation to be in
1357 * sync with LPI config table or pending table changes.
1358 * INVALL operates on a collection specified by ICID so
1359 * it only affects physical LPIs.
1360 */
1361 trace_gicv3_its_cmd_invall();
1362 for (i = 0; i < s->gicv3->num_cpu; i++) {
1363 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
1364 }
1365 break;
1366 case GITS_CMD_MOVI:
1367 result = process_movi(s, cmdpkt);
1368 break;
1369 case GITS_CMD_MOVALL:
1370 result = process_movall(s, cmdpkt);
1371 break;
1372 case GITS_CMD_VMAPTI:
1373 result = process_vmapti(s, cmdpkt, false);
1374 break;
1375 case GITS_CMD_VMAPI:
1376 result = process_vmapti(s, cmdpkt, true);
1377 break;
1378 case GITS_CMD_VMAPP:
1379 result = process_vmapp(s, cmdpkt);
1380 break;
1381 case GITS_CMD_VMOVP:
1382 result = process_vmovp(s, cmdpkt);
1383 break;
1384 case GITS_CMD_VMOVI:
1385 result = process_vmovi(s, cmdpkt);
1386 break;
1387 case GITS_CMD_VINVALL:
1388 result = process_vinvall(s, cmdpkt);
1389 break;
1390 default:
1391 trace_gicv3_its_cmd_unknown(cmd);
1392 break;
1393 }
1394 if (result != CMD_STALL) {
1395 /* CMD_CONTINUE or CMD_CONTINUE_OK */
1396 rd_offset++;
1397 rd_offset %= s->cq.num_entries;
1398 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
1399 } else {
1400 /* CMD_STALL */
1401 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
1402 qemu_log_mask(LOG_GUEST_ERROR,
1403 "%s: 0x%x cmd processing failed, stalling\n",
1404 __func__, cmd);
1405 break;
1406 }
1407 }
1408 }
1409
1410 /*
1411 * This function extracts the ITS Device and Collection table specific
1412 * parameters (like base_addr, size etc) from GITS_BASER register.
1413 * It is called during ITS enable and also during post_load migration
1414 */
extract_table_params(GICv3ITSState * s)1415 static void extract_table_params(GICv3ITSState *s)
1416 {
1417 uint16_t num_pages = 0;
1418 uint8_t page_sz_type;
1419 uint8_t type;
1420 uint32_t page_sz = 0;
1421 uint64_t value;
1422
1423 for (int i = 0; i < 8; i++) {
1424 TableDesc *td;
1425 int idbits;
1426
1427 value = s->baser[i];
1428
1429 if (!value) {
1430 continue;
1431 }
1432
1433 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
1434
1435 switch (page_sz_type) {
1436 case 0:
1437 page_sz = GITS_PAGE_SIZE_4K;
1438 break;
1439
1440 case 1:
1441 page_sz = GITS_PAGE_SIZE_16K;
1442 break;
1443
1444 case 2:
1445 case 3:
1446 page_sz = GITS_PAGE_SIZE_64K;
1447 break;
1448
1449 default:
1450 g_assert_not_reached();
1451 }
1452
1453 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
1454
1455 type = FIELD_EX64(value, GITS_BASER, TYPE);
1456
1457 switch (type) {
1458 case GITS_BASER_TYPE_DEVICE:
1459 td = &s->dt;
1460 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
1461 break;
1462 case GITS_BASER_TYPE_COLLECTION:
1463 td = &s->ct;
1464 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
1465 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
1466 } else {
1467 /* 16-bit CollectionId supported when CIL == 0 */
1468 idbits = 16;
1469 }
1470 break;
1471 case GITS_BASER_TYPE_VPE:
1472 td = &s->vpet;
1473 /*
1474 * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an
1475 * implementation to implement fewer bits and report this
1476 * via GICD_TYPER2.)
1477 */
1478 idbits = 16;
1479 break;
1480 default:
1481 /*
1482 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
1483 * ensures we will only see type values corresponding to
1484 * the values set up in gicv3_its_reset().
1485 */
1486 g_assert_not_reached();
1487 }
1488
1489 memset(td, 0, sizeof(*td));
1490 /*
1491 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
1492 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
1493 * do not have a special case where the GITS_BASER<n>.Valid bit is 0
1494 * for the register corresponding to the Collection table but we
1495 * still have to process interrupts using non-memory-backed
1496 * Collection table entries.)
1497 * The specification makes it UNPREDICTABLE to enable the ITS without
1498 * marking each BASER<n> as valid. We choose to handle these as if
1499 * the table was zero-sized, so commands using the table will fail
1500 * and interrupts requested via GITS_TRANSLATER writes will be ignored.
1501 * This happens automatically by leaving the num_entries field at
1502 * zero, which will be caught by the bounds checks we have before
1503 * every table lookup anyway.
1504 */
1505 if (!FIELD_EX64(value, GITS_BASER, VALID)) {
1506 continue;
1507 }
1508 td->page_sz = page_sz;
1509 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
1510 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
1511 td->base_addr = baser_base_addr(value, page_sz);
1512 if (!td->indirect) {
1513 td->num_entries = (num_pages * page_sz) / td->entry_sz;
1514 } else {
1515 td->num_entries = (((num_pages * page_sz) /
1516 L1TABLE_ENTRY_SIZE) *
1517 (page_sz / td->entry_sz));
1518 }
1519 td->num_entries = MIN(td->num_entries, 1ULL << idbits);
1520 }
1521 }
1522
extract_cmdq_params(GICv3ITSState * s)1523 static void extract_cmdq_params(GICv3ITSState *s)
1524 {
1525 uint16_t num_pages = 0;
1526 uint64_t value = s->cbaser;
1527
1528 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
1529
1530 memset(&s->cq, 0 , sizeof(s->cq));
1531
1532 if (FIELD_EX64(value, GITS_CBASER, VALID)) {
1533 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
1534 GITS_CMDQ_ENTRY_SIZE;
1535 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
1536 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
1537 }
1538 }
1539
gicv3_its_translation_read(void * opaque,hwaddr offset,uint64_t * data,unsigned size,MemTxAttrs attrs)1540 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
1541 uint64_t *data, unsigned size,
1542 MemTxAttrs attrs)
1543 {
1544 /*
1545 * GITS_TRANSLATER is write-only, and all other addresses
1546 * in the interrupt translation space frame are RES0.
1547 */
1548 *data = 0;
1549 return MEMTX_OK;
1550 }
1551
gicv3_its_translation_write(void * opaque,hwaddr offset,uint64_t data,unsigned size,MemTxAttrs attrs)1552 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
1553 uint64_t data, unsigned size,
1554 MemTxAttrs attrs)
1555 {
1556 GICv3ITSState *s = (GICv3ITSState *)opaque;
1557 bool result = true;
1558
1559 trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
1560
1561 switch (offset) {
1562 case GITS_TRANSLATER:
1563 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1564 result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1565 }
1566 break;
1567 default:
1568 break;
1569 }
1570
1571 if (result) {
1572 return MEMTX_OK;
1573 } else {
1574 return MEMTX_ERROR;
1575 }
1576 }
1577
its_writel(GICv3ITSState * s,hwaddr offset,uint64_t value,MemTxAttrs attrs)1578 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1579 uint64_t value, MemTxAttrs attrs)
1580 {
1581 bool result = true;
1582 int index;
1583
1584 switch (offset) {
1585 case GITS_CTLR:
1586 if (value & R_GITS_CTLR_ENABLED_MASK) {
1587 s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1588 extract_table_params(s);
1589 extract_cmdq_params(s);
1590 process_cmdq(s);
1591 } else {
1592 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1593 }
1594 break;
1595 case GITS_CBASER:
1596 /*
1597 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1598 * already enabled
1599 */
1600 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1601 s->cbaser = deposit64(s->cbaser, 0, 32, value);
1602 s->creadr = 0;
1603 }
1604 break;
1605 case GITS_CBASER + 4:
1606 /*
1607 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1608 * already enabled
1609 */
1610 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1611 s->cbaser = deposit64(s->cbaser, 32, 32, value);
1612 s->creadr = 0;
1613 }
1614 break;
1615 case GITS_CWRITER:
1616 s->cwriter = deposit64(s->cwriter, 0, 32,
1617 (value & ~R_GITS_CWRITER_RETRY_MASK));
1618 if (s->cwriter != s->creadr) {
1619 process_cmdq(s);
1620 }
1621 break;
1622 case GITS_CWRITER + 4:
1623 s->cwriter = deposit64(s->cwriter, 32, 32, value);
1624 break;
1625 case GITS_CREADR:
1626 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1627 s->creadr = deposit64(s->creadr, 0, 32,
1628 (value & ~R_GITS_CREADR_STALLED_MASK));
1629 } else {
1630 /* RO register, ignore the write */
1631 qemu_log_mask(LOG_GUEST_ERROR,
1632 "%s: invalid guest write to RO register at offset "
1633 HWADDR_FMT_plx "\n", __func__, offset);
1634 }
1635 break;
1636 case GITS_CREADR + 4:
1637 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1638 s->creadr = deposit64(s->creadr, 32, 32, value);
1639 } else {
1640 /* RO register, ignore the write */
1641 qemu_log_mask(LOG_GUEST_ERROR,
1642 "%s: invalid guest write to RO register at offset "
1643 HWADDR_FMT_plx "\n", __func__, offset);
1644 }
1645 break;
1646 case GITS_BASER ... GITS_BASER + 0x3f:
1647 /*
1648 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1649 * already enabled
1650 */
1651 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1652 index = (offset - GITS_BASER) / 8;
1653
1654 if (s->baser[index] == 0) {
1655 /* Unimplemented GITS_BASERn: RAZ/WI */
1656 break;
1657 }
1658 if (offset & 7) {
1659 value <<= 32;
1660 value &= ~GITS_BASER_RO_MASK;
1661 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1662 s->baser[index] |= value;
1663 } else {
1664 value &= ~GITS_BASER_RO_MASK;
1665 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1666 s->baser[index] |= value;
1667 }
1668 }
1669 break;
1670 case GITS_IIDR:
1671 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1672 /* RO registers, ignore the write */
1673 qemu_log_mask(LOG_GUEST_ERROR,
1674 "%s: invalid guest write to RO register at offset "
1675 HWADDR_FMT_plx "\n", __func__, offset);
1676 break;
1677 default:
1678 result = false;
1679 break;
1680 }
1681 return result;
1682 }
1683
its_readl(GICv3ITSState * s,hwaddr offset,uint64_t * data,MemTxAttrs attrs)1684 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1685 uint64_t *data, MemTxAttrs attrs)
1686 {
1687 bool result = true;
1688 int index;
1689
1690 switch (offset) {
1691 case GITS_CTLR:
1692 *data = s->ctlr;
1693 break;
1694 case GITS_IIDR:
1695 *data = gicv3_iidr();
1696 break;
1697 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1698 /* ID registers */
1699 *data = gicv3_idreg(s->gicv3, offset - GITS_IDREGS, GICV3_PIDR0_ITS);
1700 break;
1701 case GITS_TYPER:
1702 *data = extract64(s->typer, 0, 32);
1703 break;
1704 case GITS_TYPER + 4:
1705 *data = extract64(s->typer, 32, 32);
1706 break;
1707 case GITS_CBASER:
1708 *data = extract64(s->cbaser, 0, 32);
1709 break;
1710 case GITS_CBASER + 4:
1711 *data = extract64(s->cbaser, 32, 32);
1712 break;
1713 case GITS_CREADR:
1714 *data = extract64(s->creadr, 0, 32);
1715 break;
1716 case GITS_CREADR + 4:
1717 *data = extract64(s->creadr, 32, 32);
1718 break;
1719 case GITS_CWRITER:
1720 *data = extract64(s->cwriter, 0, 32);
1721 break;
1722 case GITS_CWRITER + 4:
1723 *data = extract64(s->cwriter, 32, 32);
1724 break;
1725 case GITS_BASER ... GITS_BASER + 0x3f:
1726 index = (offset - GITS_BASER) / 8;
1727 if (offset & 7) {
1728 *data = extract64(s->baser[index], 32, 32);
1729 } else {
1730 *data = extract64(s->baser[index], 0, 32);
1731 }
1732 break;
1733 default:
1734 result = false;
1735 break;
1736 }
1737 return result;
1738 }
1739
its_writell(GICv3ITSState * s,hwaddr offset,uint64_t value,MemTxAttrs attrs)1740 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1741 uint64_t value, MemTxAttrs attrs)
1742 {
1743 bool result = true;
1744 int index;
1745
1746 switch (offset) {
1747 case GITS_BASER ... GITS_BASER + 0x3f:
1748 /*
1749 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1750 * already enabled
1751 */
1752 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1753 index = (offset - GITS_BASER) / 8;
1754 if (s->baser[index] == 0) {
1755 /* Unimplemented GITS_BASERn: RAZ/WI */
1756 break;
1757 }
1758 s->baser[index] &= GITS_BASER_RO_MASK;
1759 s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1760 }
1761 break;
1762 case GITS_CBASER:
1763 /*
1764 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1765 * already enabled
1766 */
1767 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1768 s->cbaser = value;
1769 s->creadr = 0;
1770 }
1771 break;
1772 case GITS_CWRITER:
1773 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1774 if (s->cwriter != s->creadr) {
1775 process_cmdq(s);
1776 }
1777 break;
1778 case GITS_CREADR:
1779 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1780 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1781 } else {
1782 /* RO register, ignore the write */
1783 qemu_log_mask(LOG_GUEST_ERROR,
1784 "%s: invalid guest write to RO register at offset "
1785 HWADDR_FMT_plx "\n", __func__, offset);
1786 }
1787 break;
1788 case GITS_TYPER:
1789 /* RO registers, ignore the write */
1790 qemu_log_mask(LOG_GUEST_ERROR,
1791 "%s: invalid guest write to RO register at offset "
1792 HWADDR_FMT_plx "\n", __func__, offset);
1793 break;
1794 default:
1795 result = false;
1796 break;
1797 }
1798 return result;
1799 }
1800
its_readll(GICv3ITSState * s,hwaddr offset,uint64_t * data,MemTxAttrs attrs)1801 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1802 uint64_t *data, MemTxAttrs attrs)
1803 {
1804 bool result = true;
1805 int index;
1806
1807 switch (offset) {
1808 case GITS_TYPER:
1809 *data = s->typer;
1810 break;
1811 case GITS_BASER ... GITS_BASER + 0x3f:
1812 index = (offset - GITS_BASER) / 8;
1813 *data = s->baser[index];
1814 break;
1815 case GITS_CBASER:
1816 *data = s->cbaser;
1817 break;
1818 case GITS_CREADR:
1819 *data = s->creadr;
1820 break;
1821 case GITS_CWRITER:
1822 *data = s->cwriter;
1823 break;
1824 default:
1825 result = false;
1826 break;
1827 }
1828 return result;
1829 }
1830
gicv3_its_read(void * opaque,hwaddr offset,uint64_t * data,unsigned size,MemTxAttrs attrs)1831 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1832 unsigned size, MemTxAttrs attrs)
1833 {
1834 GICv3ITSState *s = (GICv3ITSState *)opaque;
1835 bool result;
1836
1837 switch (size) {
1838 case 4:
1839 result = its_readl(s, offset, data, attrs);
1840 break;
1841 case 8:
1842 result = its_readll(s, offset, data, attrs);
1843 break;
1844 default:
1845 result = false;
1846 break;
1847 }
1848
1849 if (!result) {
1850 qemu_log_mask(LOG_GUEST_ERROR,
1851 "%s: invalid guest read at offset " HWADDR_FMT_plx
1852 " size %u\n", __func__, offset, size);
1853 trace_gicv3_its_badread(offset, size);
1854 /*
1855 * The spec requires that reserved registers are RAZ/WI;
1856 * so use false returns from leaf functions as a way to
1857 * trigger the guest-error logging but don't return it to
1858 * the caller, or we'll cause a spurious guest data abort.
1859 */
1860 *data = 0;
1861 } else {
1862 trace_gicv3_its_read(offset, *data, size);
1863 }
1864 return MEMTX_OK;
1865 }
1866
gicv3_its_write(void * opaque,hwaddr offset,uint64_t data,unsigned size,MemTxAttrs attrs)1867 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1868 unsigned size, MemTxAttrs attrs)
1869 {
1870 GICv3ITSState *s = (GICv3ITSState *)opaque;
1871 bool result;
1872
1873 switch (size) {
1874 case 4:
1875 result = its_writel(s, offset, data, attrs);
1876 break;
1877 case 8:
1878 result = its_writell(s, offset, data, attrs);
1879 break;
1880 default:
1881 result = false;
1882 break;
1883 }
1884
1885 if (!result) {
1886 qemu_log_mask(LOG_GUEST_ERROR,
1887 "%s: invalid guest write at offset " HWADDR_FMT_plx
1888 " size %u\n", __func__, offset, size);
1889 trace_gicv3_its_badwrite(offset, data, size);
1890 /*
1891 * The spec requires that reserved registers are RAZ/WI;
1892 * so use false returns from leaf functions as a way to
1893 * trigger the guest-error logging but don't return it to
1894 * the caller, or we'll cause a spurious guest data abort.
1895 */
1896 } else {
1897 trace_gicv3_its_write(offset, data, size);
1898 }
1899 return MEMTX_OK;
1900 }
1901
1902 static const MemoryRegionOps gicv3_its_control_ops = {
1903 .read_with_attrs = gicv3_its_read,
1904 .write_with_attrs = gicv3_its_write,
1905 .valid.min_access_size = 4,
1906 .valid.max_access_size = 8,
1907 .impl.min_access_size = 4,
1908 .impl.max_access_size = 8,
1909 .endianness = DEVICE_NATIVE_ENDIAN,
1910 };
1911
1912 static const MemoryRegionOps gicv3_its_translation_ops = {
1913 .read_with_attrs = gicv3_its_translation_read,
1914 .write_with_attrs = gicv3_its_translation_write,
1915 .valid.min_access_size = 2,
1916 .valid.max_access_size = 4,
1917 .impl.min_access_size = 2,
1918 .impl.max_access_size = 4,
1919 .endianness = DEVICE_NATIVE_ENDIAN,
1920 };
1921
gicv3_arm_its_realize(DeviceState * dev,Error ** errp)1922 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1923 {
1924 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1925 int i;
1926
1927 for (i = 0; i < s->gicv3->num_cpu; i++) {
1928 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1929 error_setg(errp, "Physical LPI not supported by CPU %d", i);
1930 return;
1931 }
1932 }
1933
1934 gicv3_add_its(s->gicv3, dev);
1935
1936 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1937
1938 /* set the ITS default features supported */
1939 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1940 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1941 ITS_ITT_ENTRY_SIZE - 1);
1942 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1943 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1944 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1945 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1946 if (s->gicv3->revision >= 4) {
1947 /* Our VMOVP handles cross-ITS synchronization itself */
1948 s->typer = FIELD_DP64(s->typer, GITS_TYPER, VMOVP, 1);
1949 s->typer = FIELD_DP64(s->typer, GITS_TYPER, VIRTUAL, 1);
1950 }
1951 }
1952
gicv3_its_reset_hold(Object * obj,ResetType type)1953 static void gicv3_its_reset_hold(Object *obj, ResetType type)
1954 {
1955 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(obj);
1956 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1957
1958 if (c->parent_phases.hold) {
1959 c->parent_phases.hold(obj, type);
1960 }
1961
1962 /* Quiescent bit reset to 1 */
1963 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1964
1965 /*
1966 * setting GITS_BASER0.Type = 0b001 (Device)
1967 * GITS_BASER1.Type = 0b100 (Collection Table)
1968 * GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later
1969 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1970 * GITS_BASER<0,1>.Page_Size = 64KB
1971 * and default translation table entry size to 16 bytes
1972 */
1973 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1974 GITS_BASER_TYPE_DEVICE);
1975 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1976 GITS_BASER_PAGESIZE_64K);
1977 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1978 GITS_DTE_SIZE - 1);
1979
1980 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1981 GITS_BASER_TYPE_COLLECTION);
1982 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1983 GITS_BASER_PAGESIZE_64K);
1984 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1985 GITS_CTE_SIZE - 1);
1986
1987 if (its_feature_virtual(s)) {
1988 s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE,
1989 GITS_BASER_TYPE_VPE);
1990 s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE,
1991 GITS_BASER_PAGESIZE_64K);
1992 s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE,
1993 GITS_VPE_SIZE - 1);
1994 }
1995 }
1996
gicv3_its_post_load(GICv3ITSState * s)1997 static void gicv3_its_post_load(GICv3ITSState *s)
1998 {
1999 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
2000 extract_table_params(s);
2001 extract_cmdq_params(s);
2002 }
2003 }
2004
2005 static Property gicv3_its_props[] = {
2006 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
2007 GICv3State *),
2008 DEFINE_PROP_END_OF_LIST(),
2009 };
2010
gicv3_its_class_init(ObjectClass * klass,void * data)2011 static void gicv3_its_class_init(ObjectClass *klass, void *data)
2012 {
2013 DeviceClass *dc = DEVICE_CLASS(klass);
2014 ResettableClass *rc = RESETTABLE_CLASS(klass);
2015 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
2016 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
2017
2018 dc->realize = gicv3_arm_its_realize;
2019 device_class_set_props(dc, gicv3_its_props);
2020 resettable_class_set_parent_phases(rc, NULL, gicv3_its_reset_hold, NULL,
2021 &ic->parent_phases);
2022 icc->post_load = gicv3_its_post_load;
2023 }
2024
2025 static const TypeInfo gicv3_its_info = {
2026 .name = TYPE_ARM_GICV3_ITS,
2027 .parent = TYPE_ARM_GICV3_ITS_COMMON,
2028 .instance_size = sizeof(GICv3ITSState),
2029 .class_init = gicv3_its_class_init,
2030 .class_size = sizeof(GICv3ITSClass),
2031 };
2032
gicv3_its_register_types(void)2033 static void gicv3_its_register_types(void)
2034 {
2035 type_register_static(&gicv3_its_info);
2036 }
2037
2038 type_init(gicv3_its_register_types)
2039