1 /* 2 * ITS emulation for a GICv3-based system 3 * 4 * Copyright Linaro.org 2021 5 * 6 * Authors: 7 * Shashi Mallela <shashi.mallela@linaro.org> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 10 * option) any later version. See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/log.h" 16 #include "trace.h" 17 #include "hw/qdev-properties.h" 18 #include "hw/intc/arm_gicv3_its_common.h" 19 #include "gicv3_internal.h" 20 #include "qom/object.h" 21 #include "qapi/error.h" 22 23 typedef struct GICv3ITSClass GICv3ITSClass; 24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */ 25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass, 26 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS) 27 28 struct GICv3ITSClass { 29 GICv3ITSCommonClass parent_class; 30 void (*parent_reset)(DeviceState *dev); 31 }; 32 33 /* 34 * This is an internal enum used to distinguish between LPI triggered 35 * via command queue and LPI triggered via gits_translater write. 36 */ 37 typedef enum ItsCmdType { 38 NONE = 0, /* internal indication for GITS_TRANSLATER write */ 39 CLEAR = 1, 40 DISCARD = 2, 41 INTERRUPT = 3, 42 } ItsCmdType; 43 44 typedef struct DTEntry { 45 bool valid; 46 unsigned size; 47 uint64_t ittaddr; 48 } DTEntry; 49 50 typedef struct CTEntry { 51 bool valid; 52 uint32_t rdbase; 53 } CTEntry; 54 55 typedef struct ITEntry { 56 bool valid; 57 int inttype; 58 uint32_t intid; 59 uint32_t doorbell; 60 uint32_t icid; 61 uint32_t vpeid; 62 } ITEntry; 63 64 typedef struct VTEntry { 65 bool valid; 66 unsigned vptsize; 67 uint32_t rdbase; 68 uint64_t vptaddr; 69 } VTEntry; 70 71 /* 72 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options 73 * if a command parameter is not correct. These include both "stall 74 * processing of the command queue" and "ignore this command, and 75 * keep processing the queue". In our implementation we choose that 76 * memory transaction errors reading the command packet provoke a 77 * stall, but errors in parameters cause us to ignore the command 78 * and continue processing. 79 * The process_* functions which handle individual ITS commands all 80 * return an ItsCmdResult which tells process_cmdq() whether it should 81 * stall, keep going because of an error, or keep going because the 82 * command was a success. 83 */ 84 typedef enum ItsCmdResult { 85 CMD_STALL = 0, 86 CMD_CONTINUE = 1, 87 CMD_CONTINUE_OK = 2, 88 } ItsCmdResult; 89 90 /* True if the ITS supports the GICv4 virtual LPI feature */ 91 static bool its_feature_virtual(GICv3ITSState *s) 92 { 93 return s->typer & R_GITS_TYPER_VIRTUAL_MASK; 94 } 95 96 static inline bool intid_in_lpi_range(uint32_t id) 97 { 98 return id >= GICV3_LPI_INTID_START && 99 id < (1 << (GICD_TYPER_IDBITS + 1)); 100 } 101 102 static inline bool valid_doorbell(uint32_t id) 103 { 104 /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */ 105 return id == INTID_SPURIOUS || intid_in_lpi_range(id); 106 } 107 108 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz) 109 { 110 uint64_t result = 0; 111 112 switch (page_sz) { 113 case GITS_PAGE_SIZE_4K: 114 case GITS_PAGE_SIZE_16K: 115 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12; 116 break; 117 118 case GITS_PAGE_SIZE_64K: 119 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16; 120 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48; 121 break; 122 123 default: 124 break; 125 } 126 return result; 127 } 128 129 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td, 130 uint32_t idx, MemTxResult *res) 131 { 132 /* 133 * Given a TableDesc describing one of the ITS in-guest-memory 134 * tables and an index into it, return the guest address 135 * corresponding to that table entry. 136 * If there was a memory error reading the L1 table of an 137 * indirect table, *res is set accordingly, and we return -1. 138 * If the L1 table entry is marked not valid, we return -1 with 139 * *res set to MEMTX_OK. 140 * 141 * The specification defines the format of level 1 entries of a 142 * 2-level table, but the format of level 2 entries and the format 143 * of flat-mapped tables is IMPDEF. 144 */ 145 AddressSpace *as = &s->gicv3->dma_as; 146 uint32_t l2idx; 147 uint64_t l2; 148 uint32_t num_l2_entries; 149 150 *res = MEMTX_OK; 151 152 if (!td->indirect) { 153 /* Single level table */ 154 return td->base_addr + idx * td->entry_sz; 155 } 156 157 /* Two level table */ 158 l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE); 159 160 l2 = address_space_ldq_le(as, 161 td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE), 162 MEMTXATTRS_UNSPECIFIED, res); 163 if (*res != MEMTX_OK) { 164 return -1; 165 } 166 if (!(l2 & L2_TABLE_VALID_MASK)) { 167 return -1; 168 } 169 170 num_l2_entries = td->page_sz / td->entry_sz; 171 return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz; 172 } 173 174 /* 175 * Read the Collection Table entry at index @icid. On success (including 176 * successfully determining that there is no valid CTE for this index), 177 * we return MEMTX_OK and populate the CTEntry struct @cte accordingly. 178 * If there is an error reading memory then we return the error code. 179 */ 180 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte) 181 { 182 AddressSpace *as = &s->gicv3->dma_as; 183 MemTxResult res = MEMTX_OK; 184 uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res); 185 uint64_t cteval; 186 187 if (entry_addr == -1) { 188 /* No L2 table entry, i.e. no valid CTE, or a memory error */ 189 cte->valid = false; 190 goto out; 191 } 192 193 cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res); 194 if (res != MEMTX_OK) { 195 goto out; 196 } 197 cte->valid = FIELD_EX64(cteval, CTE, VALID); 198 cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE); 199 out: 200 if (res != MEMTX_OK) { 201 trace_gicv3_its_cte_read_fault(icid); 202 } else { 203 trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase); 204 } 205 return res; 206 } 207 208 /* 209 * Update the Interrupt Table entry at index @evinted in the table specified 210 * by the dte @dte. Returns true on success, false if there was a memory 211 * access error. 212 */ 213 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte, 214 const ITEntry *ite) 215 { 216 AddressSpace *as = &s->gicv3->dma_as; 217 MemTxResult res = MEMTX_OK; 218 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE; 219 uint64_t itel = 0; 220 uint32_t iteh = 0; 221 222 trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid, 223 ite->inttype, ite->intid, ite->icid, 224 ite->vpeid, ite->doorbell); 225 226 if (ite->valid) { 227 itel = FIELD_DP64(itel, ITE_L, VALID, 1); 228 itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype); 229 itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid); 230 itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid); 231 itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid); 232 iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell); 233 } 234 235 address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res); 236 if (res != MEMTX_OK) { 237 return false; 238 } 239 address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res); 240 return res == MEMTX_OK; 241 } 242 243 /* 244 * Read the Interrupt Table entry at index @eventid from the table specified 245 * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry 246 * struct @ite accordingly. If there is an error reading memory then we return 247 * the error code. 248 */ 249 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid, 250 const DTEntry *dte, ITEntry *ite) 251 { 252 AddressSpace *as = &s->gicv3->dma_as; 253 MemTxResult res = MEMTX_OK; 254 uint64_t itel; 255 uint32_t iteh; 256 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE; 257 258 itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res); 259 if (res != MEMTX_OK) { 260 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid); 261 return res; 262 } 263 264 iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res); 265 if (res != MEMTX_OK) { 266 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid); 267 return res; 268 } 269 270 ite->valid = FIELD_EX64(itel, ITE_L, VALID); 271 ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE); 272 ite->intid = FIELD_EX64(itel, ITE_L, INTID); 273 ite->icid = FIELD_EX64(itel, ITE_L, ICID); 274 ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID); 275 ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL); 276 trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid, 277 ite->inttype, ite->intid, ite->icid, 278 ite->vpeid, ite->doorbell); 279 return MEMTX_OK; 280 } 281 282 /* 283 * Read the Device Table entry at index @devid. On success (including 284 * successfully determining that there is no valid DTE for this index), 285 * we return MEMTX_OK and populate the DTEntry struct accordingly. 286 * If there is an error reading memory then we return the error code. 287 */ 288 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte) 289 { 290 MemTxResult res = MEMTX_OK; 291 AddressSpace *as = &s->gicv3->dma_as; 292 uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res); 293 uint64_t dteval; 294 295 if (entry_addr == -1) { 296 /* No L2 table entry, i.e. no valid DTE, or a memory error */ 297 dte->valid = false; 298 goto out; 299 } 300 dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res); 301 if (res != MEMTX_OK) { 302 goto out; 303 } 304 dte->valid = FIELD_EX64(dteval, DTE, VALID); 305 dte->size = FIELD_EX64(dteval, DTE, SIZE); 306 /* DTE word field stores bits [51:8] of the ITT address */ 307 dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT; 308 out: 309 if (res != MEMTX_OK) { 310 trace_gicv3_its_dte_read_fault(devid); 311 } else { 312 trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr); 313 } 314 return res; 315 } 316 317 /* 318 * Given a (DeviceID, EventID), look up the corresponding ITE, including 319 * checking for the various invalid-value cases. If we find a valid ITE, 320 * fill in @ite and @dte and return CMD_CONTINUE_OK. Otherwise return 321 * CMD_STALL or CMD_CONTINUE as appropriate (and the contents of @ite 322 * should not be relied on). 323 * 324 * The string @who is purely for the LOG_GUEST_ERROR messages, 325 * and should indicate the name of the calling function or similar. 326 */ 327 static ItsCmdResult lookup_ite(GICv3ITSState *s, const char *who, 328 uint32_t devid, uint32_t eventid, ITEntry *ite, 329 DTEntry *dte) 330 { 331 uint64_t num_eventids; 332 333 if (devid >= s->dt.num_entries) { 334 qemu_log_mask(LOG_GUEST_ERROR, 335 "%s: invalid command attributes: devid %d>=%d", 336 who, devid, s->dt.num_entries); 337 return CMD_CONTINUE; 338 } 339 340 if (get_dte(s, devid, dte) != MEMTX_OK) { 341 return CMD_STALL; 342 } 343 if (!dte->valid) { 344 qemu_log_mask(LOG_GUEST_ERROR, 345 "%s: invalid command attributes: " 346 "invalid dte for %d\n", who, devid); 347 return CMD_CONTINUE; 348 } 349 350 num_eventids = 1ULL << (dte->size + 1); 351 if (eventid >= num_eventids) { 352 qemu_log_mask(LOG_GUEST_ERROR, 353 "%s: invalid command attributes: eventid %d >= %" 354 PRId64 "\n", who, eventid, num_eventids); 355 return CMD_CONTINUE; 356 } 357 358 if (get_ite(s, eventid, dte, ite) != MEMTX_OK) { 359 return CMD_STALL; 360 } 361 362 if (!ite->valid) { 363 qemu_log_mask(LOG_GUEST_ERROR, 364 "%s: invalid command attributes: invalid ITE\n", who); 365 return CMD_CONTINUE; 366 } 367 368 return CMD_CONTINUE_OK; 369 } 370 371 /* 372 * Given an ICID, look up the corresponding CTE, including checking for various 373 * invalid-value cases. If we find a valid CTE, fill in @cte and return 374 * CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE (and the 375 * contents of @cte should not be relied on). 376 * 377 * The string @who is purely for the LOG_GUEST_ERROR messages, 378 * and should indicate the name of the calling function or similar. 379 */ 380 static ItsCmdResult lookup_cte(GICv3ITSState *s, const char *who, 381 uint32_t icid, CTEntry *cte) 382 { 383 if (icid >= s->ct.num_entries) { 384 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid ICID 0x%x\n", who, icid); 385 return CMD_CONTINUE; 386 } 387 if (get_cte(s, icid, cte) != MEMTX_OK) { 388 return CMD_STALL; 389 } 390 if (!cte->valid) { 391 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid CTE\n", who); 392 return CMD_CONTINUE; 393 } 394 if (cte->rdbase >= s->gicv3->num_cpu) { 395 return CMD_CONTINUE; 396 } 397 return CMD_CONTINUE_OK; 398 } 399 400 static ItsCmdResult process_its_cmd_phys(GICv3ITSState *s, const ITEntry *ite, 401 int irqlevel) 402 { 403 CTEntry cte; 404 ItsCmdResult cmdres; 405 406 cmdres = lookup_cte(s, __func__, ite->icid, &cte); 407 if (cmdres != CMD_CONTINUE_OK) { 408 return cmdres; 409 } 410 gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite->intid, irqlevel); 411 return CMD_CONTINUE_OK; 412 } 413 414 /* 415 * This function handles the processing of following commands based on 416 * the ItsCmdType parameter passed:- 417 * 1. triggering of lpi interrupt translation via ITS INT command 418 * 2. triggering of lpi interrupt translation via gits_translater register 419 * 3. handling of ITS CLEAR command 420 * 4. handling of ITS DISCARD command 421 */ 422 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid, 423 uint32_t eventid, ItsCmdType cmd) 424 { 425 DTEntry dte; 426 ITEntry ite; 427 ItsCmdResult cmdres; 428 int irqlevel; 429 430 cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte); 431 if (cmdres != CMD_CONTINUE_OK) { 432 return cmdres; 433 } 434 435 irqlevel = (cmd == CLEAR || cmd == DISCARD) ? 0 : 1; 436 437 switch (ite.inttype) { 438 case ITE_INTTYPE_PHYSICAL: 439 cmdres = process_its_cmd_phys(s, &ite, irqlevel); 440 break; 441 case ITE_INTTYPE_VIRTUAL: 442 if (!its_feature_virtual(s)) { 443 /* Can't happen unless guest is illegally writing to table memory */ 444 qemu_log_mask(LOG_GUEST_ERROR, 445 "%s: invalid type %d in ITE (table corrupted?)\n", 446 __func__, ite.inttype); 447 return CMD_CONTINUE; 448 } 449 /* The GICv4 virtual interrupt handling will go here */ 450 g_assert_not_reached(); 451 default: 452 g_assert_not_reached(); 453 } 454 455 if (cmdres == CMD_CONTINUE_OK && cmd == DISCARD) { 456 ITEntry ite = {}; 457 /* remove mapping from interrupt translation table */ 458 ite.valid = false; 459 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL; 460 } 461 return CMD_CONTINUE_OK; 462 } 463 464 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt, 465 ItsCmdType cmd) 466 { 467 uint32_t devid, eventid; 468 469 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT; 470 eventid = cmdpkt[1] & EVENTID_MASK; 471 switch (cmd) { 472 case INTERRUPT: 473 trace_gicv3_its_cmd_int(devid, eventid); 474 break; 475 case CLEAR: 476 trace_gicv3_its_cmd_clear(devid, eventid); 477 break; 478 case DISCARD: 479 trace_gicv3_its_cmd_discard(devid, eventid); 480 break; 481 default: 482 g_assert_not_reached(); 483 } 484 return do_process_its_cmd(s, devid, eventid, cmd); 485 } 486 487 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt, 488 bool ignore_pInt) 489 { 490 uint32_t devid, eventid; 491 uint32_t pIntid = 0; 492 uint64_t num_eventids; 493 uint16_t icid = 0; 494 DTEntry dte; 495 ITEntry ite; 496 497 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT; 498 eventid = cmdpkt[1] & EVENTID_MASK; 499 icid = cmdpkt[2] & ICID_MASK; 500 501 if (ignore_pInt) { 502 pIntid = eventid; 503 trace_gicv3_its_cmd_mapi(devid, eventid, icid); 504 } else { 505 pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT; 506 trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid); 507 } 508 509 if (devid >= s->dt.num_entries) { 510 qemu_log_mask(LOG_GUEST_ERROR, 511 "%s: invalid command attributes: devid %d>=%d", 512 __func__, devid, s->dt.num_entries); 513 return CMD_CONTINUE; 514 } 515 516 if (get_dte(s, devid, &dte) != MEMTX_OK) { 517 return CMD_STALL; 518 } 519 num_eventids = 1ULL << (dte.size + 1); 520 521 if (icid >= s->ct.num_entries) { 522 qemu_log_mask(LOG_GUEST_ERROR, 523 "%s: invalid ICID 0x%x >= 0x%x\n", 524 __func__, icid, s->ct.num_entries); 525 return CMD_CONTINUE; 526 } 527 528 if (!dte.valid) { 529 qemu_log_mask(LOG_GUEST_ERROR, 530 "%s: no valid DTE for devid 0x%x\n", __func__, devid); 531 return CMD_CONTINUE; 532 } 533 534 if (eventid >= num_eventids) { 535 qemu_log_mask(LOG_GUEST_ERROR, 536 "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n", 537 __func__, eventid, num_eventids); 538 return CMD_CONTINUE; 539 } 540 541 if (!intid_in_lpi_range(pIntid)) { 542 qemu_log_mask(LOG_GUEST_ERROR, 543 "%s: invalid interrupt ID 0x%x\n", __func__, pIntid); 544 return CMD_CONTINUE; 545 } 546 547 /* add ite entry to interrupt translation table */ 548 ite.valid = true; 549 ite.inttype = ITE_INTTYPE_PHYSICAL; 550 ite.intid = pIntid; 551 ite.icid = icid; 552 ite.doorbell = INTID_SPURIOUS; 553 ite.vpeid = 0; 554 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL; 555 } 556 557 static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt, 558 bool ignore_vintid) 559 { 560 uint32_t devid, eventid, vintid, doorbell, vpeid; 561 uint32_t num_eventids; 562 DTEntry dte; 563 ITEntry ite; 564 565 if (!its_feature_virtual(s)) { 566 return CMD_CONTINUE; 567 } 568 569 devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID); 570 eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID); 571 vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID); 572 doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL); 573 if (ignore_vintid) { 574 vintid = eventid; 575 trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell); 576 } else { 577 vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID); 578 trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell); 579 } 580 581 if (devid >= s->dt.num_entries) { 582 qemu_log_mask(LOG_GUEST_ERROR, 583 "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n", 584 __func__, devid, s->dt.num_entries); 585 return CMD_CONTINUE; 586 } 587 588 if (get_dte(s, devid, &dte) != MEMTX_OK) { 589 return CMD_STALL; 590 } 591 592 if (!dte.valid) { 593 qemu_log_mask(LOG_GUEST_ERROR, 594 "%s: no entry in device table for DeviceID 0x%x\n", 595 __func__, devid); 596 return CMD_CONTINUE; 597 } 598 599 num_eventids = 1ULL << (dte.size + 1); 600 601 if (eventid >= num_eventids) { 602 qemu_log_mask(LOG_GUEST_ERROR, 603 "%s: EventID 0x%x too large for DeviceID 0x%x " 604 "(must be less than 0x%x)\n", 605 __func__, eventid, devid, num_eventids); 606 return CMD_CONTINUE; 607 } 608 if (!intid_in_lpi_range(vintid)) { 609 qemu_log_mask(LOG_GUEST_ERROR, 610 "%s: VIntID 0x%x not a valid LPI\n", 611 __func__, vintid); 612 return CMD_CONTINUE; 613 } 614 if (!valid_doorbell(doorbell)) { 615 qemu_log_mask(LOG_GUEST_ERROR, 616 "%s: Doorbell %d not 1023 and not a valid LPI\n", 617 __func__, doorbell); 618 return CMD_CONTINUE; 619 } 620 if (vpeid >= s->vpet.num_entries) { 621 qemu_log_mask(LOG_GUEST_ERROR, 622 "%s: VPEID 0x%x out of range (must be less than 0x%x)\n", 623 __func__, vpeid, s->vpet.num_entries); 624 return CMD_CONTINUE; 625 } 626 /* add ite entry to interrupt translation table */ 627 ite.valid = true; 628 ite.inttype = ITE_INTTYPE_VIRTUAL; 629 ite.intid = vintid; 630 ite.icid = 0; 631 ite.doorbell = doorbell; 632 ite.vpeid = vpeid; 633 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL; 634 } 635 636 /* 637 * Update the Collection Table entry for @icid to @cte. Returns true 638 * on success, false if there was a memory access error. 639 */ 640 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte) 641 { 642 AddressSpace *as = &s->gicv3->dma_as; 643 uint64_t entry_addr; 644 uint64_t cteval = 0; 645 MemTxResult res = MEMTX_OK; 646 647 trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase); 648 649 if (cte->valid) { 650 /* add mapping entry to collection table */ 651 cteval = FIELD_DP64(cteval, CTE, VALID, 1); 652 cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase); 653 } 654 655 entry_addr = table_entry_addr(s, &s->ct, icid, &res); 656 if (res != MEMTX_OK) { 657 /* memory access error: stall */ 658 return false; 659 } 660 if (entry_addr == -1) { 661 /* No L2 table for this index: discard write and continue */ 662 return true; 663 } 664 665 address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res); 666 return res == MEMTX_OK; 667 } 668 669 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt) 670 { 671 uint16_t icid; 672 CTEntry cte; 673 674 icid = cmdpkt[2] & ICID_MASK; 675 cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK; 676 if (cte.valid) { 677 cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT; 678 cte.rdbase &= RDBASE_PROCNUM_MASK; 679 } else { 680 cte.rdbase = 0; 681 } 682 trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid); 683 684 if (icid >= s->ct.num_entries) { 685 qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid); 686 return CMD_CONTINUE; 687 } 688 if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) { 689 qemu_log_mask(LOG_GUEST_ERROR, 690 "ITS MAPC: invalid RDBASE %u\n", cte.rdbase); 691 return CMD_CONTINUE; 692 } 693 694 return update_cte(s, icid, &cte) ? CMD_CONTINUE_OK : CMD_STALL; 695 } 696 697 /* 698 * Update the Device Table entry for @devid to @dte. Returns true 699 * on success, false if there was a memory access error. 700 */ 701 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte) 702 { 703 AddressSpace *as = &s->gicv3->dma_as; 704 uint64_t entry_addr; 705 uint64_t dteval = 0; 706 MemTxResult res = MEMTX_OK; 707 708 trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr); 709 710 if (dte->valid) { 711 /* add mapping entry to device table */ 712 dteval = FIELD_DP64(dteval, DTE, VALID, 1); 713 dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size); 714 dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr); 715 } 716 717 entry_addr = table_entry_addr(s, &s->dt, devid, &res); 718 if (res != MEMTX_OK) { 719 /* memory access error: stall */ 720 return false; 721 } 722 if (entry_addr == -1) { 723 /* No L2 table for this index: discard write and continue */ 724 return true; 725 } 726 address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res); 727 return res == MEMTX_OK; 728 } 729 730 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt) 731 { 732 uint32_t devid; 733 DTEntry dte; 734 735 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT; 736 dte.size = cmdpkt[1] & SIZE_MASK; 737 dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT; 738 dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK; 739 740 trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid); 741 742 if (devid >= s->dt.num_entries) { 743 qemu_log_mask(LOG_GUEST_ERROR, 744 "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n", 745 devid, s->dt.num_entries); 746 return CMD_CONTINUE; 747 } 748 749 if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) { 750 qemu_log_mask(LOG_GUEST_ERROR, 751 "ITS MAPD: invalid size %d\n", dte.size); 752 return CMD_CONTINUE; 753 } 754 755 return update_dte(s, devid, &dte) ? CMD_CONTINUE_OK : CMD_STALL; 756 } 757 758 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt) 759 { 760 uint64_t rd1, rd2; 761 762 rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1); 763 rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2); 764 765 trace_gicv3_its_cmd_movall(rd1, rd2); 766 767 if (rd1 >= s->gicv3->num_cpu) { 768 qemu_log_mask(LOG_GUEST_ERROR, 769 "%s: RDBASE1 %" PRId64 770 " out of range (must be less than %d)\n", 771 __func__, rd1, s->gicv3->num_cpu); 772 return CMD_CONTINUE; 773 } 774 if (rd2 >= s->gicv3->num_cpu) { 775 qemu_log_mask(LOG_GUEST_ERROR, 776 "%s: RDBASE2 %" PRId64 777 " out of range (must be less than %d)\n", 778 __func__, rd2, s->gicv3->num_cpu); 779 return CMD_CONTINUE; 780 } 781 782 if (rd1 == rd2) { 783 /* Move to same target must succeed as a no-op */ 784 return CMD_CONTINUE_OK; 785 } 786 787 /* Move all pending LPIs from redistributor 1 to redistributor 2 */ 788 gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]); 789 790 return CMD_CONTINUE_OK; 791 } 792 793 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt) 794 { 795 uint32_t devid, eventid; 796 uint16_t new_icid; 797 DTEntry dte; 798 CTEntry old_cte, new_cte; 799 ITEntry old_ite; 800 ItsCmdResult cmdres; 801 802 devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID); 803 eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID); 804 new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID); 805 806 trace_gicv3_its_cmd_movi(devid, eventid, new_icid); 807 808 cmdres = lookup_ite(s, __func__, devid, eventid, &old_ite, &dte); 809 if (cmdres != CMD_CONTINUE_OK) { 810 return cmdres; 811 } 812 813 if (old_ite.inttype != ITE_INTTYPE_PHYSICAL) { 814 qemu_log_mask(LOG_GUEST_ERROR, 815 "%s: invalid command attributes: invalid ITE\n", 816 __func__); 817 return CMD_CONTINUE; 818 } 819 820 cmdres = lookup_cte(s, __func__, old_ite.icid, &old_cte); 821 if (cmdres != CMD_CONTINUE_OK) { 822 return cmdres; 823 } 824 cmdres = lookup_cte(s, __func__, new_icid, &new_cte); 825 if (cmdres != CMD_CONTINUE_OK) { 826 return cmdres; 827 } 828 829 if (old_cte.rdbase != new_cte.rdbase) { 830 /* Move the LPI from the old redistributor to the new one */ 831 gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase], 832 &s->gicv3->cpu[new_cte.rdbase], 833 old_ite.intid); 834 } 835 836 /* Update the ICID field in the interrupt translation table entry */ 837 old_ite.icid = new_icid; 838 return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE_OK : CMD_STALL; 839 } 840 841 /* 842 * Update the vPE Table entry at index @vpeid with the entry @vte. 843 * Returns true on success, false if there was a memory access error. 844 */ 845 static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte) 846 { 847 AddressSpace *as = &s->gicv3->dma_as; 848 uint64_t entry_addr; 849 uint64_t vteval = 0; 850 MemTxResult res = MEMTX_OK; 851 852 trace_gicv3_its_vte_write(vpeid, vte->valid, vte->vptsize, vte->vptaddr, 853 vte->rdbase); 854 855 if (vte->valid) { 856 vteval = FIELD_DP64(vteval, VTE, VALID, 1); 857 vteval = FIELD_DP64(vteval, VTE, VPTSIZE, vte->vptsize); 858 vteval = FIELD_DP64(vteval, VTE, VPTADDR, vte->vptaddr); 859 vteval = FIELD_DP64(vteval, VTE, RDBASE, vte->rdbase); 860 } 861 862 entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res); 863 if (res != MEMTX_OK) { 864 return false; 865 } 866 if (entry_addr == -1) { 867 /* No L2 table for this index: discard write and continue */ 868 return true; 869 } 870 address_space_stq_le(as, entry_addr, vteval, MEMTXATTRS_UNSPECIFIED, &res); 871 return res == MEMTX_OK; 872 } 873 874 static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt) 875 { 876 VTEntry vte; 877 uint32_t vpeid; 878 879 if (!its_feature_virtual(s)) { 880 return CMD_CONTINUE; 881 } 882 883 vpeid = FIELD_EX64(cmdpkt[1], VMAPP_1, VPEID); 884 vte.rdbase = FIELD_EX64(cmdpkt[2], VMAPP_2, RDBASE); 885 vte.valid = FIELD_EX64(cmdpkt[2], VMAPP_2, V); 886 vte.vptsize = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTSIZE); 887 vte.vptaddr = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTADDR); 888 889 trace_gicv3_its_cmd_vmapp(vpeid, vte.rdbase, vte.valid, 890 vte.vptaddr, vte.vptsize); 891 892 /* 893 * For GICv4.0 the VPT_size field is only 5 bits, whereas we 894 * define our field macros to include the full GICv4.1 8 bits. 895 * The range check on VPT_size will catch the cases where 896 * the guest set the RES0-in-GICv4.0 bits [7:6]. 897 */ 898 if (vte.vptsize > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) { 899 qemu_log_mask(LOG_GUEST_ERROR, 900 "%s: invalid VPT_size 0x%x\n", __func__, vte.vptsize); 901 return CMD_CONTINUE; 902 } 903 904 if (vte.valid && vte.rdbase >= s->gicv3->num_cpu) { 905 qemu_log_mask(LOG_GUEST_ERROR, 906 "%s: invalid rdbase 0x%x\n", __func__, vte.rdbase); 907 return CMD_CONTINUE; 908 } 909 910 if (vpeid >= s->vpet.num_entries) { 911 qemu_log_mask(LOG_GUEST_ERROR, 912 "%s: VPEID 0x%x out of range (must be less than 0x%x)\n", 913 __func__, vpeid, s->vpet.num_entries); 914 return CMD_CONTINUE; 915 } 916 917 return update_vte(s, vpeid, &vte) ? CMD_CONTINUE_OK : CMD_STALL; 918 } 919 920 /* 921 * Current implementation blocks until all 922 * commands are processed 923 */ 924 static void process_cmdq(GICv3ITSState *s) 925 { 926 uint32_t wr_offset = 0; 927 uint32_t rd_offset = 0; 928 uint32_t cq_offset = 0; 929 AddressSpace *as = &s->gicv3->dma_as; 930 uint8_t cmd; 931 int i; 932 933 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 934 return; 935 } 936 937 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET); 938 939 if (wr_offset >= s->cq.num_entries) { 940 qemu_log_mask(LOG_GUEST_ERROR, 941 "%s: invalid write offset " 942 "%d\n", __func__, wr_offset); 943 return; 944 } 945 946 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET); 947 948 if (rd_offset >= s->cq.num_entries) { 949 qemu_log_mask(LOG_GUEST_ERROR, 950 "%s: invalid read offset " 951 "%d\n", __func__, rd_offset); 952 return; 953 } 954 955 while (wr_offset != rd_offset) { 956 ItsCmdResult result = CMD_CONTINUE_OK; 957 void *hostmem; 958 hwaddr buflen; 959 uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS]; 960 961 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE); 962 963 buflen = GITS_CMDQ_ENTRY_SIZE; 964 hostmem = address_space_map(as, s->cq.base_addr + cq_offset, 965 &buflen, false, MEMTXATTRS_UNSPECIFIED); 966 if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) { 967 if (hostmem) { 968 address_space_unmap(as, hostmem, buflen, false, 0); 969 } 970 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 971 qemu_log_mask(LOG_GUEST_ERROR, 972 "%s: could not read command at 0x%" PRIx64 "\n", 973 __func__, s->cq.base_addr + cq_offset); 974 break; 975 } 976 for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) { 977 cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t)); 978 } 979 address_space_unmap(as, hostmem, buflen, false, 0); 980 981 cmd = cmdpkt[0] & CMD_MASK; 982 983 trace_gicv3_its_process_command(rd_offset, cmd); 984 985 switch (cmd) { 986 case GITS_CMD_INT: 987 result = process_its_cmd(s, cmdpkt, INTERRUPT); 988 break; 989 case GITS_CMD_CLEAR: 990 result = process_its_cmd(s, cmdpkt, CLEAR); 991 break; 992 case GITS_CMD_SYNC: 993 /* 994 * Current implementation makes a blocking synchronous call 995 * for every command issued earlier, hence the internal state 996 * is already consistent by the time SYNC command is executed. 997 * Hence no further processing is required for SYNC command. 998 */ 999 trace_gicv3_its_cmd_sync(); 1000 break; 1001 case GITS_CMD_MAPD: 1002 result = process_mapd(s, cmdpkt); 1003 break; 1004 case GITS_CMD_MAPC: 1005 result = process_mapc(s, cmdpkt); 1006 break; 1007 case GITS_CMD_MAPTI: 1008 result = process_mapti(s, cmdpkt, false); 1009 break; 1010 case GITS_CMD_MAPI: 1011 result = process_mapti(s, cmdpkt, true); 1012 break; 1013 case GITS_CMD_DISCARD: 1014 result = process_its_cmd(s, cmdpkt, DISCARD); 1015 break; 1016 case GITS_CMD_INV: 1017 case GITS_CMD_INVALL: 1018 /* 1019 * Current implementation doesn't cache any ITS tables, 1020 * but the calculated lpi priority information. We only 1021 * need to trigger lpi priority re-calculation to be in 1022 * sync with LPI config table or pending table changes. 1023 */ 1024 trace_gicv3_its_cmd_inv(); 1025 for (i = 0; i < s->gicv3->num_cpu; i++) { 1026 gicv3_redist_update_lpi(&s->gicv3->cpu[i]); 1027 } 1028 break; 1029 case GITS_CMD_MOVI: 1030 result = process_movi(s, cmdpkt); 1031 break; 1032 case GITS_CMD_MOVALL: 1033 result = process_movall(s, cmdpkt); 1034 break; 1035 case GITS_CMD_VMAPTI: 1036 result = process_vmapti(s, cmdpkt, false); 1037 break; 1038 case GITS_CMD_VMAPI: 1039 result = process_vmapti(s, cmdpkt, true); 1040 break; 1041 case GITS_CMD_VMAPP: 1042 result = process_vmapp(s, cmdpkt); 1043 break; 1044 default: 1045 trace_gicv3_its_cmd_unknown(cmd); 1046 break; 1047 } 1048 if (result != CMD_STALL) { 1049 /* CMD_CONTINUE or CMD_CONTINUE_OK */ 1050 rd_offset++; 1051 rd_offset %= s->cq.num_entries; 1052 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset); 1053 } else { 1054 /* CMD_STALL */ 1055 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 1056 qemu_log_mask(LOG_GUEST_ERROR, 1057 "%s: 0x%x cmd processing failed, stalling\n", 1058 __func__, cmd); 1059 break; 1060 } 1061 } 1062 } 1063 1064 /* 1065 * This function extracts the ITS Device and Collection table specific 1066 * parameters (like base_addr, size etc) from GITS_BASER register. 1067 * It is called during ITS enable and also during post_load migration 1068 */ 1069 static void extract_table_params(GICv3ITSState *s) 1070 { 1071 uint16_t num_pages = 0; 1072 uint8_t page_sz_type; 1073 uint8_t type; 1074 uint32_t page_sz = 0; 1075 uint64_t value; 1076 1077 for (int i = 0; i < 8; i++) { 1078 TableDesc *td; 1079 int idbits; 1080 1081 value = s->baser[i]; 1082 1083 if (!value) { 1084 continue; 1085 } 1086 1087 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE); 1088 1089 switch (page_sz_type) { 1090 case 0: 1091 page_sz = GITS_PAGE_SIZE_4K; 1092 break; 1093 1094 case 1: 1095 page_sz = GITS_PAGE_SIZE_16K; 1096 break; 1097 1098 case 2: 1099 case 3: 1100 page_sz = GITS_PAGE_SIZE_64K; 1101 break; 1102 1103 default: 1104 g_assert_not_reached(); 1105 } 1106 1107 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1; 1108 1109 type = FIELD_EX64(value, GITS_BASER, TYPE); 1110 1111 switch (type) { 1112 case GITS_BASER_TYPE_DEVICE: 1113 td = &s->dt; 1114 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1; 1115 break; 1116 case GITS_BASER_TYPE_COLLECTION: 1117 td = &s->ct; 1118 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) { 1119 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1; 1120 } else { 1121 /* 16-bit CollectionId supported when CIL == 0 */ 1122 idbits = 16; 1123 } 1124 break; 1125 case GITS_BASER_TYPE_VPE: 1126 td = &s->vpet; 1127 /* 1128 * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an 1129 * implementation to implement fewer bits and report this 1130 * via GICD_TYPER2.) 1131 */ 1132 idbits = 16; 1133 break; 1134 default: 1135 /* 1136 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK 1137 * ensures we will only see type values corresponding to 1138 * the values set up in gicv3_its_reset(). 1139 */ 1140 g_assert_not_reached(); 1141 } 1142 1143 memset(td, 0, sizeof(*td)); 1144 /* 1145 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process 1146 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we 1147 * do not have a special case where the GITS_BASER<n>.Valid bit is 0 1148 * for the register corresponding to the Collection table but we 1149 * still have to process interrupts using non-memory-backed 1150 * Collection table entries.) 1151 * The specification makes it UNPREDICTABLE to enable the ITS without 1152 * marking each BASER<n> as valid. We choose to handle these as if 1153 * the table was zero-sized, so commands using the table will fail 1154 * and interrupts requested via GITS_TRANSLATER writes will be ignored. 1155 * This happens automatically by leaving the num_entries field at 1156 * zero, which will be caught by the bounds checks we have before 1157 * every table lookup anyway. 1158 */ 1159 if (!FIELD_EX64(value, GITS_BASER, VALID)) { 1160 continue; 1161 } 1162 td->page_sz = page_sz; 1163 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT); 1164 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1; 1165 td->base_addr = baser_base_addr(value, page_sz); 1166 if (!td->indirect) { 1167 td->num_entries = (num_pages * page_sz) / td->entry_sz; 1168 } else { 1169 td->num_entries = (((num_pages * page_sz) / 1170 L1TABLE_ENTRY_SIZE) * 1171 (page_sz / td->entry_sz)); 1172 } 1173 td->num_entries = MIN(td->num_entries, 1ULL << idbits); 1174 } 1175 } 1176 1177 static void extract_cmdq_params(GICv3ITSState *s) 1178 { 1179 uint16_t num_pages = 0; 1180 uint64_t value = s->cbaser; 1181 1182 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1; 1183 1184 memset(&s->cq, 0 , sizeof(s->cq)); 1185 1186 if (FIELD_EX64(value, GITS_CBASER, VALID)) { 1187 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) / 1188 GITS_CMDQ_ENTRY_SIZE; 1189 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR); 1190 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT; 1191 } 1192 } 1193 1194 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset, 1195 uint64_t *data, unsigned size, 1196 MemTxAttrs attrs) 1197 { 1198 /* 1199 * GITS_TRANSLATER is write-only, and all other addresses 1200 * in the interrupt translation space frame are RES0. 1201 */ 1202 *data = 0; 1203 return MEMTX_OK; 1204 } 1205 1206 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset, 1207 uint64_t data, unsigned size, 1208 MemTxAttrs attrs) 1209 { 1210 GICv3ITSState *s = (GICv3ITSState *)opaque; 1211 bool result = true; 1212 1213 trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id); 1214 1215 switch (offset) { 1216 case GITS_TRANSLATER: 1217 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 1218 result = do_process_its_cmd(s, attrs.requester_id, data, NONE); 1219 } 1220 break; 1221 default: 1222 break; 1223 } 1224 1225 if (result) { 1226 return MEMTX_OK; 1227 } else { 1228 return MEMTX_ERROR; 1229 } 1230 } 1231 1232 static bool its_writel(GICv3ITSState *s, hwaddr offset, 1233 uint64_t value, MemTxAttrs attrs) 1234 { 1235 bool result = true; 1236 int index; 1237 1238 switch (offset) { 1239 case GITS_CTLR: 1240 if (value & R_GITS_CTLR_ENABLED_MASK) { 1241 s->ctlr |= R_GITS_CTLR_ENABLED_MASK; 1242 extract_table_params(s); 1243 extract_cmdq_params(s); 1244 process_cmdq(s); 1245 } else { 1246 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK; 1247 } 1248 break; 1249 case GITS_CBASER: 1250 /* 1251 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1252 * already enabled 1253 */ 1254 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1255 s->cbaser = deposit64(s->cbaser, 0, 32, value); 1256 s->creadr = 0; 1257 } 1258 break; 1259 case GITS_CBASER + 4: 1260 /* 1261 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1262 * already enabled 1263 */ 1264 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1265 s->cbaser = deposit64(s->cbaser, 32, 32, value); 1266 s->creadr = 0; 1267 } 1268 break; 1269 case GITS_CWRITER: 1270 s->cwriter = deposit64(s->cwriter, 0, 32, 1271 (value & ~R_GITS_CWRITER_RETRY_MASK)); 1272 if (s->cwriter != s->creadr) { 1273 process_cmdq(s); 1274 } 1275 break; 1276 case GITS_CWRITER + 4: 1277 s->cwriter = deposit64(s->cwriter, 32, 32, value); 1278 break; 1279 case GITS_CREADR: 1280 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1281 s->creadr = deposit64(s->creadr, 0, 32, 1282 (value & ~R_GITS_CREADR_STALLED_MASK)); 1283 } else { 1284 /* RO register, ignore the write */ 1285 qemu_log_mask(LOG_GUEST_ERROR, 1286 "%s: invalid guest write to RO register at offset " 1287 TARGET_FMT_plx "\n", __func__, offset); 1288 } 1289 break; 1290 case GITS_CREADR + 4: 1291 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1292 s->creadr = deposit64(s->creadr, 32, 32, value); 1293 } else { 1294 /* RO register, ignore the write */ 1295 qemu_log_mask(LOG_GUEST_ERROR, 1296 "%s: invalid guest write to RO register at offset " 1297 TARGET_FMT_plx "\n", __func__, offset); 1298 } 1299 break; 1300 case GITS_BASER ... GITS_BASER + 0x3f: 1301 /* 1302 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 1303 * already enabled 1304 */ 1305 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1306 index = (offset - GITS_BASER) / 8; 1307 1308 if (s->baser[index] == 0) { 1309 /* Unimplemented GITS_BASERn: RAZ/WI */ 1310 break; 1311 } 1312 if (offset & 7) { 1313 value <<= 32; 1314 value &= ~GITS_BASER_RO_MASK; 1315 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32); 1316 s->baser[index] |= value; 1317 } else { 1318 value &= ~GITS_BASER_RO_MASK; 1319 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32); 1320 s->baser[index] |= value; 1321 } 1322 } 1323 break; 1324 case GITS_IIDR: 1325 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 1326 /* RO registers, ignore the write */ 1327 qemu_log_mask(LOG_GUEST_ERROR, 1328 "%s: invalid guest write to RO register at offset " 1329 TARGET_FMT_plx "\n", __func__, offset); 1330 break; 1331 default: 1332 result = false; 1333 break; 1334 } 1335 return result; 1336 } 1337 1338 static bool its_readl(GICv3ITSState *s, hwaddr offset, 1339 uint64_t *data, MemTxAttrs attrs) 1340 { 1341 bool result = true; 1342 int index; 1343 1344 switch (offset) { 1345 case GITS_CTLR: 1346 *data = s->ctlr; 1347 break; 1348 case GITS_IIDR: 1349 *data = gicv3_iidr(); 1350 break; 1351 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 1352 /* ID registers */ 1353 *data = gicv3_idreg(offset - GITS_IDREGS, GICV3_PIDR0_ITS); 1354 break; 1355 case GITS_TYPER: 1356 *data = extract64(s->typer, 0, 32); 1357 break; 1358 case GITS_TYPER + 4: 1359 *data = extract64(s->typer, 32, 32); 1360 break; 1361 case GITS_CBASER: 1362 *data = extract64(s->cbaser, 0, 32); 1363 break; 1364 case GITS_CBASER + 4: 1365 *data = extract64(s->cbaser, 32, 32); 1366 break; 1367 case GITS_CREADR: 1368 *data = extract64(s->creadr, 0, 32); 1369 break; 1370 case GITS_CREADR + 4: 1371 *data = extract64(s->creadr, 32, 32); 1372 break; 1373 case GITS_CWRITER: 1374 *data = extract64(s->cwriter, 0, 32); 1375 break; 1376 case GITS_CWRITER + 4: 1377 *data = extract64(s->cwriter, 32, 32); 1378 break; 1379 case GITS_BASER ... GITS_BASER + 0x3f: 1380 index = (offset - GITS_BASER) / 8; 1381 if (offset & 7) { 1382 *data = extract64(s->baser[index], 32, 32); 1383 } else { 1384 *data = extract64(s->baser[index], 0, 32); 1385 } 1386 break; 1387 default: 1388 result = false; 1389 break; 1390 } 1391 return result; 1392 } 1393 1394 static bool its_writell(GICv3ITSState *s, hwaddr offset, 1395 uint64_t value, MemTxAttrs attrs) 1396 { 1397 bool result = true; 1398 int index; 1399 1400 switch (offset) { 1401 case GITS_BASER ... GITS_BASER + 0x3f: 1402 /* 1403 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 1404 * already enabled 1405 */ 1406 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1407 index = (offset - GITS_BASER) / 8; 1408 if (s->baser[index] == 0) { 1409 /* Unimplemented GITS_BASERn: RAZ/WI */ 1410 break; 1411 } 1412 s->baser[index] &= GITS_BASER_RO_MASK; 1413 s->baser[index] |= (value & ~GITS_BASER_RO_MASK); 1414 } 1415 break; 1416 case GITS_CBASER: 1417 /* 1418 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1419 * already enabled 1420 */ 1421 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1422 s->cbaser = value; 1423 s->creadr = 0; 1424 } 1425 break; 1426 case GITS_CWRITER: 1427 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK; 1428 if (s->cwriter != s->creadr) { 1429 process_cmdq(s); 1430 } 1431 break; 1432 case GITS_CREADR: 1433 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1434 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK; 1435 } else { 1436 /* RO register, ignore the write */ 1437 qemu_log_mask(LOG_GUEST_ERROR, 1438 "%s: invalid guest write to RO register at offset " 1439 TARGET_FMT_plx "\n", __func__, offset); 1440 } 1441 break; 1442 case GITS_TYPER: 1443 /* RO registers, ignore the write */ 1444 qemu_log_mask(LOG_GUEST_ERROR, 1445 "%s: invalid guest write to RO register at offset " 1446 TARGET_FMT_plx "\n", __func__, offset); 1447 break; 1448 default: 1449 result = false; 1450 break; 1451 } 1452 return result; 1453 } 1454 1455 static bool its_readll(GICv3ITSState *s, hwaddr offset, 1456 uint64_t *data, MemTxAttrs attrs) 1457 { 1458 bool result = true; 1459 int index; 1460 1461 switch (offset) { 1462 case GITS_TYPER: 1463 *data = s->typer; 1464 break; 1465 case GITS_BASER ... GITS_BASER + 0x3f: 1466 index = (offset - GITS_BASER) / 8; 1467 *data = s->baser[index]; 1468 break; 1469 case GITS_CBASER: 1470 *data = s->cbaser; 1471 break; 1472 case GITS_CREADR: 1473 *data = s->creadr; 1474 break; 1475 case GITS_CWRITER: 1476 *data = s->cwriter; 1477 break; 1478 default: 1479 result = false; 1480 break; 1481 } 1482 return result; 1483 } 1484 1485 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data, 1486 unsigned size, MemTxAttrs attrs) 1487 { 1488 GICv3ITSState *s = (GICv3ITSState *)opaque; 1489 bool result; 1490 1491 switch (size) { 1492 case 4: 1493 result = its_readl(s, offset, data, attrs); 1494 break; 1495 case 8: 1496 result = its_readll(s, offset, data, attrs); 1497 break; 1498 default: 1499 result = false; 1500 break; 1501 } 1502 1503 if (!result) { 1504 qemu_log_mask(LOG_GUEST_ERROR, 1505 "%s: invalid guest read at offset " TARGET_FMT_plx 1506 " size %u\n", __func__, offset, size); 1507 trace_gicv3_its_badread(offset, size); 1508 /* 1509 * The spec requires that reserved registers are RAZ/WI; 1510 * so use false returns from leaf functions as a way to 1511 * trigger the guest-error logging but don't return it to 1512 * the caller, or we'll cause a spurious guest data abort. 1513 */ 1514 *data = 0; 1515 } else { 1516 trace_gicv3_its_read(offset, *data, size); 1517 } 1518 return MEMTX_OK; 1519 } 1520 1521 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data, 1522 unsigned size, MemTxAttrs attrs) 1523 { 1524 GICv3ITSState *s = (GICv3ITSState *)opaque; 1525 bool result; 1526 1527 switch (size) { 1528 case 4: 1529 result = its_writel(s, offset, data, attrs); 1530 break; 1531 case 8: 1532 result = its_writell(s, offset, data, attrs); 1533 break; 1534 default: 1535 result = false; 1536 break; 1537 } 1538 1539 if (!result) { 1540 qemu_log_mask(LOG_GUEST_ERROR, 1541 "%s: invalid guest write at offset " TARGET_FMT_plx 1542 " size %u\n", __func__, offset, size); 1543 trace_gicv3_its_badwrite(offset, data, size); 1544 /* 1545 * The spec requires that reserved registers are RAZ/WI; 1546 * so use false returns from leaf functions as a way to 1547 * trigger the guest-error logging but don't return it to 1548 * the caller, or we'll cause a spurious guest data abort. 1549 */ 1550 } else { 1551 trace_gicv3_its_write(offset, data, size); 1552 } 1553 return MEMTX_OK; 1554 } 1555 1556 static const MemoryRegionOps gicv3_its_control_ops = { 1557 .read_with_attrs = gicv3_its_read, 1558 .write_with_attrs = gicv3_its_write, 1559 .valid.min_access_size = 4, 1560 .valid.max_access_size = 8, 1561 .impl.min_access_size = 4, 1562 .impl.max_access_size = 8, 1563 .endianness = DEVICE_NATIVE_ENDIAN, 1564 }; 1565 1566 static const MemoryRegionOps gicv3_its_translation_ops = { 1567 .read_with_attrs = gicv3_its_translation_read, 1568 .write_with_attrs = gicv3_its_translation_write, 1569 .valid.min_access_size = 2, 1570 .valid.max_access_size = 4, 1571 .impl.min_access_size = 2, 1572 .impl.max_access_size = 4, 1573 .endianness = DEVICE_NATIVE_ENDIAN, 1574 }; 1575 1576 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) 1577 { 1578 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1579 int i; 1580 1581 for (i = 0; i < s->gicv3->num_cpu; i++) { 1582 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) { 1583 error_setg(errp, "Physical LPI not supported by CPU %d", i); 1584 return; 1585 } 1586 } 1587 1588 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops); 1589 1590 /* set the ITS default features supported */ 1591 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1); 1592 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE, 1593 ITS_ITT_ENTRY_SIZE - 1); 1594 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS); 1595 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS); 1596 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1); 1597 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS); 1598 } 1599 1600 static void gicv3_its_reset(DeviceState *dev) 1601 { 1602 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1603 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s); 1604 1605 c->parent_reset(dev); 1606 1607 /* Quiescent bit reset to 1 */ 1608 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1); 1609 1610 /* 1611 * setting GITS_BASER0.Type = 0b001 (Device) 1612 * GITS_BASER1.Type = 0b100 (Collection Table) 1613 * GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later 1614 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented) 1615 * GITS_BASER<0,1>.Page_Size = 64KB 1616 * and default translation table entry size to 16 bytes 1617 */ 1618 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE, 1619 GITS_BASER_TYPE_DEVICE); 1620 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE, 1621 GITS_BASER_PAGESIZE_64K); 1622 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE, 1623 GITS_DTE_SIZE - 1); 1624 1625 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE, 1626 GITS_BASER_TYPE_COLLECTION); 1627 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE, 1628 GITS_BASER_PAGESIZE_64K); 1629 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE, 1630 GITS_CTE_SIZE - 1); 1631 1632 if (its_feature_virtual(s)) { 1633 s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE, 1634 GITS_BASER_TYPE_VPE); 1635 s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE, 1636 GITS_BASER_PAGESIZE_64K); 1637 s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE, 1638 GITS_VPE_SIZE - 1); 1639 } 1640 } 1641 1642 static void gicv3_its_post_load(GICv3ITSState *s) 1643 { 1644 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 1645 extract_table_params(s); 1646 extract_cmdq_params(s); 1647 } 1648 } 1649 1650 static Property gicv3_its_props[] = { 1651 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3", 1652 GICv3State *), 1653 DEFINE_PROP_END_OF_LIST(), 1654 }; 1655 1656 static void gicv3_its_class_init(ObjectClass *klass, void *data) 1657 { 1658 DeviceClass *dc = DEVICE_CLASS(klass); 1659 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass); 1660 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); 1661 1662 dc->realize = gicv3_arm_its_realize; 1663 device_class_set_props(dc, gicv3_its_props); 1664 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset); 1665 icc->post_load = gicv3_its_post_load; 1666 } 1667 1668 static const TypeInfo gicv3_its_info = { 1669 .name = TYPE_ARM_GICV3_ITS, 1670 .parent = TYPE_ARM_GICV3_ITS_COMMON, 1671 .instance_size = sizeof(GICv3ITSState), 1672 .class_init = gicv3_its_class_init, 1673 .class_size = sizeof(GICv3ITSClass), 1674 }; 1675 1676 static void gicv3_its_register_types(void) 1677 { 1678 type_register_static(&gicv3_its_info); 1679 } 1680 1681 type_init(gicv3_its_register_types) 1682