1 /* 2 * ITS emulation for a GICv3-based system 3 * 4 * Copyright Linaro.org 2021 5 * 6 * Authors: 7 * Shashi Mallela <shashi.mallela@linaro.org> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 10 * option) any later version. See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/log.h" 16 #include "trace.h" 17 #include "hw/qdev-properties.h" 18 #include "hw/intc/arm_gicv3_its_common.h" 19 #include "gicv3_internal.h" 20 #include "qom/object.h" 21 #include "qapi/error.h" 22 23 typedef struct GICv3ITSClass GICv3ITSClass; 24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */ 25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass, 26 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS) 27 28 struct GICv3ITSClass { 29 GICv3ITSCommonClass parent_class; 30 void (*parent_reset)(DeviceState *dev); 31 }; 32 33 /* 34 * This is an internal enum used to distinguish between LPI triggered 35 * via command queue and LPI triggered via gits_translater write. 36 */ 37 typedef enum ItsCmdType { 38 NONE = 0, /* internal indication for GITS_TRANSLATER write */ 39 CLEAR = 1, 40 DISCARD = 2, 41 INTERRUPT = 3, 42 } ItsCmdType; 43 44 typedef struct DTEntry { 45 bool valid; 46 unsigned size; 47 uint64_t ittaddr; 48 } DTEntry; 49 50 typedef struct CTEntry { 51 bool valid; 52 uint32_t rdbase; 53 } CTEntry; 54 55 typedef struct ITEntry { 56 bool valid; 57 int inttype; 58 uint32_t intid; 59 uint32_t doorbell; 60 uint32_t icid; 61 uint32_t vpeid; 62 } ITEntry; 63 64 65 /* 66 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options 67 * if a command parameter is not correct. These include both "stall 68 * processing of the command queue" and "ignore this command, and 69 * keep processing the queue". In our implementation we choose that 70 * memory transaction errors reading the command packet provoke a 71 * stall, but errors in parameters cause us to ignore the command 72 * and continue processing. 73 * The process_* functions which handle individual ITS commands all 74 * return an ItsCmdResult which tells process_cmdq() whether it should 75 * stall or keep going. 76 */ 77 typedef enum ItsCmdResult { 78 CMD_STALL = 0, 79 CMD_CONTINUE = 1, 80 } ItsCmdResult; 81 82 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz) 83 { 84 uint64_t result = 0; 85 86 switch (page_sz) { 87 case GITS_PAGE_SIZE_4K: 88 case GITS_PAGE_SIZE_16K: 89 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12; 90 break; 91 92 case GITS_PAGE_SIZE_64K: 93 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16; 94 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48; 95 break; 96 97 default: 98 break; 99 } 100 return result; 101 } 102 103 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td, 104 uint32_t idx, MemTxResult *res) 105 { 106 /* 107 * Given a TableDesc describing one of the ITS in-guest-memory 108 * tables and an index into it, return the guest address 109 * corresponding to that table entry. 110 * If there was a memory error reading the L1 table of an 111 * indirect table, *res is set accordingly, and we return -1. 112 * If the L1 table entry is marked not valid, we return -1 with 113 * *res set to MEMTX_OK. 114 * 115 * The specification defines the format of level 1 entries of a 116 * 2-level table, but the format of level 2 entries and the format 117 * of flat-mapped tables is IMPDEF. 118 */ 119 AddressSpace *as = &s->gicv3->dma_as; 120 uint32_t l2idx; 121 uint64_t l2; 122 uint32_t num_l2_entries; 123 124 *res = MEMTX_OK; 125 126 if (!td->indirect) { 127 /* Single level table */ 128 return td->base_addr + idx * td->entry_sz; 129 } 130 131 /* Two level table */ 132 l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE); 133 134 l2 = address_space_ldq_le(as, 135 td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE), 136 MEMTXATTRS_UNSPECIFIED, res); 137 if (*res != MEMTX_OK) { 138 return -1; 139 } 140 if (!(l2 & L2_TABLE_VALID_MASK)) { 141 return -1; 142 } 143 144 num_l2_entries = td->page_sz / td->entry_sz; 145 return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz; 146 } 147 148 /* 149 * Read the Collection Table entry at index @icid. On success (including 150 * successfully determining that there is no valid CTE for this index), 151 * we return MEMTX_OK and populate the CTEntry struct @cte accordingly. 152 * If there is an error reading memory then we return the error code. 153 */ 154 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte) 155 { 156 AddressSpace *as = &s->gicv3->dma_as; 157 MemTxResult res = MEMTX_OK; 158 uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res); 159 uint64_t cteval; 160 161 if (entry_addr == -1) { 162 /* No L2 table entry, i.e. no valid CTE, or a memory error */ 163 cte->valid = false; 164 goto out; 165 } 166 167 cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res); 168 if (res != MEMTX_OK) { 169 goto out; 170 } 171 cte->valid = FIELD_EX64(cteval, CTE, VALID); 172 cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE); 173 out: 174 if (res != MEMTX_OK) { 175 trace_gicv3_its_cte_read_fault(icid); 176 } else { 177 trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase); 178 } 179 return res; 180 } 181 182 /* 183 * Update the Interrupt Table entry at index @evinted in the table specified 184 * by the dte @dte. Returns true on success, false if there was a memory 185 * access error. 186 */ 187 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte, 188 const ITEntry *ite) 189 { 190 AddressSpace *as = &s->gicv3->dma_as; 191 MemTxResult res = MEMTX_OK; 192 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE; 193 uint64_t itel = 0; 194 uint32_t iteh = 0; 195 196 trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid, 197 ite->inttype, ite->intid, ite->icid, 198 ite->vpeid, ite->doorbell); 199 200 if (ite->valid) { 201 itel = FIELD_DP64(itel, ITE_L, VALID, 1); 202 itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype); 203 itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid); 204 itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid); 205 itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid); 206 iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell); 207 } 208 209 address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res); 210 if (res != MEMTX_OK) { 211 return false; 212 } 213 address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res); 214 return res == MEMTX_OK; 215 } 216 217 /* 218 * Read the Interrupt Table entry at index @eventid from the table specified 219 * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry 220 * struct @ite accordingly. If there is an error reading memory then we return 221 * the error code. 222 */ 223 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid, 224 const DTEntry *dte, ITEntry *ite) 225 { 226 AddressSpace *as = &s->gicv3->dma_as; 227 MemTxResult res = MEMTX_OK; 228 uint64_t itel; 229 uint32_t iteh; 230 hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE; 231 232 itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res); 233 if (res != MEMTX_OK) { 234 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid); 235 return res; 236 } 237 238 iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res); 239 if (res != MEMTX_OK) { 240 trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid); 241 return res; 242 } 243 244 ite->valid = FIELD_EX64(itel, ITE_L, VALID); 245 ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE); 246 ite->intid = FIELD_EX64(itel, ITE_L, INTID); 247 ite->icid = FIELD_EX64(itel, ITE_L, ICID); 248 ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID); 249 ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL); 250 trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid, 251 ite->inttype, ite->intid, ite->icid, 252 ite->vpeid, ite->doorbell); 253 return MEMTX_OK; 254 } 255 256 /* 257 * Read the Device Table entry at index @devid. On success (including 258 * successfully determining that there is no valid DTE for this index), 259 * we return MEMTX_OK and populate the DTEntry struct accordingly. 260 * If there is an error reading memory then we return the error code. 261 */ 262 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte) 263 { 264 MemTxResult res = MEMTX_OK; 265 AddressSpace *as = &s->gicv3->dma_as; 266 uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res); 267 uint64_t dteval; 268 269 if (entry_addr == -1) { 270 /* No L2 table entry, i.e. no valid DTE, or a memory error */ 271 dte->valid = false; 272 goto out; 273 } 274 dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res); 275 if (res != MEMTX_OK) { 276 goto out; 277 } 278 dte->valid = FIELD_EX64(dteval, DTE, VALID); 279 dte->size = FIELD_EX64(dteval, DTE, SIZE); 280 /* DTE word field stores bits [51:8] of the ITT address */ 281 dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT; 282 out: 283 if (res != MEMTX_OK) { 284 trace_gicv3_its_dte_read_fault(devid); 285 } else { 286 trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr); 287 } 288 return res; 289 } 290 291 /* 292 * This function handles the processing of following commands based on 293 * the ItsCmdType parameter passed:- 294 * 1. triggering of lpi interrupt translation via ITS INT command 295 * 2. triggering of lpi interrupt translation via gits_translater register 296 * 3. handling of ITS CLEAR command 297 * 4. handling of ITS DISCARD command 298 */ 299 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid, 300 uint32_t eventid, ItsCmdType cmd) 301 { 302 uint64_t num_eventids; 303 DTEntry dte; 304 CTEntry cte; 305 ITEntry ite; 306 307 if (devid >= s->dt.num_entries) { 308 qemu_log_mask(LOG_GUEST_ERROR, 309 "%s: invalid command attributes: devid %d>=%d", 310 __func__, devid, s->dt.num_entries); 311 return CMD_CONTINUE; 312 } 313 314 if (get_dte(s, devid, &dte) != MEMTX_OK) { 315 return CMD_STALL; 316 } 317 if (!dte.valid) { 318 qemu_log_mask(LOG_GUEST_ERROR, 319 "%s: invalid command attributes: " 320 "invalid dte for %d\n", __func__, devid); 321 return CMD_CONTINUE; 322 } 323 324 num_eventids = 1ULL << (dte.size + 1); 325 if (eventid >= num_eventids) { 326 qemu_log_mask(LOG_GUEST_ERROR, 327 "%s: invalid command attributes: eventid %d >= %" 328 PRId64 "\n", 329 __func__, eventid, num_eventids); 330 return CMD_CONTINUE; 331 } 332 333 if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) { 334 return CMD_STALL; 335 } 336 337 if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) { 338 qemu_log_mask(LOG_GUEST_ERROR, 339 "%s: invalid command attributes: invalid ITE\n", 340 __func__); 341 return CMD_CONTINUE; 342 } 343 344 if (ite.icid >= s->ct.num_entries) { 345 qemu_log_mask(LOG_GUEST_ERROR, 346 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n", 347 __func__, ite.icid); 348 return CMD_CONTINUE; 349 } 350 351 if (get_cte(s, ite.icid, &cte) != MEMTX_OK) { 352 return CMD_STALL; 353 } 354 if (!cte.valid) { 355 qemu_log_mask(LOG_GUEST_ERROR, 356 "%s: invalid command attributes: invalid CTE\n", 357 __func__); 358 return CMD_CONTINUE; 359 } 360 361 /* 362 * Current implementation only supports rdbase == procnum 363 * Hence rdbase physical address is ignored 364 */ 365 if (cte.rdbase >= s->gicv3->num_cpu) { 366 return CMD_CONTINUE; 367 } 368 369 if ((cmd == CLEAR) || (cmd == DISCARD)) { 370 gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0); 371 } else { 372 gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1); 373 } 374 375 if (cmd == DISCARD) { 376 ITEntry ite = {}; 377 /* remove mapping from interrupt translation table */ 378 ite.valid = false; 379 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL; 380 } 381 return CMD_CONTINUE; 382 } 383 384 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt, 385 ItsCmdType cmd) 386 { 387 uint32_t devid, eventid; 388 389 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT; 390 eventid = cmdpkt[1] & EVENTID_MASK; 391 switch (cmd) { 392 case INTERRUPT: 393 trace_gicv3_its_cmd_int(devid, eventid); 394 break; 395 case CLEAR: 396 trace_gicv3_its_cmd_clear(devid, eventid); 397 break; 398 case DISCARD: 399 trace_gicv3_its_cmd_discard(devid, eventid); 400 break; 401 default: 402 g_assert_not_reached(); 403 } 404 return do_process_its_cmd(s, devid, eventid, cmd); 405 } 406 407 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt, 408 bool ignore_pInt) 409 { 410 uint32_t devid, eventid; 411 uint32_t pIntid = 0; 412 uint64_t num_eventids; 413 uint32_t num_intids; 414 uint16_t icid = 0; 415 DTEntry dte; 416 ITEntry ite; 417 418 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT; 419 eventid = cmdpkt[1] & EVENTID_MASK; 420 icid = cmdpkt[2] & ICID_MASK; 421 422 if (ignore_pInt) { 423 pIntid = eventid; 424 trace_gicv3_its_cmd_mapi(devid, eventid, icid); 425 } else { 426 pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT; 427 trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid); 428 } 429 430 if (devid >= s->dt.num_entries) { 431 qemu_log_mask(LOG_GUEST_ERROR, 432 "%s: invalid command attributes: devid %d>=%d", 433 __func__, devid, s->dt.num_entries); 434 return CMD_CONTINUE; 435 } 436 437 if (get_dte(s, devid, &dte) != MEMTX_OK) { 438 return CMD_STALL; 439 } 440 num_eventids = 1ULL << (dte.size + 1); 441 num_intids = 1ULL << (GICD_TYPER_IDBITS + 1); 442 443 if (icid >= s->ct.num_entries) { 444 qemu_log_mask(LOG_GUEST_ERROR, 445 "%s: invalid ICID 0x%x >= 0x%x\n", 446 __func__, icid, s->ct.num_entries); 447 return CMD_CONTINUE; 448 } 449 450 if (!dte.valid) { 451 qemu_log_mask(LOG_GUEST_ERROR, 452 "%s: no valid DTE for devid 0x%x\n", __func__, devid); 453 return CMD_CONTINUE; 454 } 455 456 if (eventid >= num_eventids) { 457 qemu_log_mask(LOG_GUEST_ERROR, 458 "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n", 459 __func__, eventid, num_eventids); 460 return CMD_CONTINUE; 461 } 462 463 if (pIntid < GICV3_LPI_INTID_START || pIntid >= num_intids) { 464 qemu_log_mask(LOG_GUEST_ERROR, 465 "%s: invalid interrupt ID 0x%x\n", __func__, pIntid); 466 return CMD_CONTINUE; 467 } 468 469 /* add ite entry to interrupt translation table */ 470 ite.valid = true; 471 ite.inttype = ITE_INTTYPE_PHYSICAL; 472 ite.intid = pIntid; 473 ite.icid = icid; 474 ite.doorbell = INTID_SPURIOUS; 475 ite.vpeid = 0; 476 return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL; 477 } 478 479 /* 480 * Update the Collection Table entry for @icid to @cte. Returns true 481 * on success, false if there was a memory access error. 482 */ 483 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte) 484 { 485 AddressSpace *as = &s->gicv3->dma_as; 486 uint64_t entry_addr; 487 uint64_t cteval = 0; 488 MemTxResult res = MEMTX_OK; 489 490 trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase); 491 492 if (cte->valid) { 493 /* add mapping entry to collection table */ 494 cteval = FIELD_DP64(cteval, CTE, VALID, 1); 495 cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase); 496 } 497 498 entry_addr = table_entry_addr(s, &s->ct, icid, &res); 499 if (res != MEMTX_OK) { 500 /* memory access error: stall */ 501 return false; 502 } 503 if (entry_addr == -1) { 504 /* No L2 table for this index: discard write and continue */ 505 return true; 506 } 507 508 address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res); 509 return res == MEMTX_OK; 510 } 511 512 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt) 513 { 514 uint16_t icid; 515 CTEntry cte; 516 517 icid = cmdpkt[2] & ICID_MASK; 518 cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK; 519 if (cte.valid) { 520 cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT; 521 cte.rdbase &= RDBASE_PROCNUM_MASK; 522 } else { 523 cte.rdbase = 0; 524 } 525 trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid); 526 527 if (icid >= s->ct.num_entries) { 528 qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid); 529 return CMD_CONTINUE; 530 } 531 if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) { 532 qemu_log_mask(LOG_GUEST_ERROR, 533 "ITS MAPC: invalid RDBASE %u\n", cte.rdbase); 534 return CMD_CONTINUE; 535 } 536 537 return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL; 538 } 539 540 /* 541 * Update the Device Table entry for @devid to @dte. Returns true 542 * on success, false if there was a memory access error. 543 */ 544 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte) 545 { 546 AddressSpace *as = &s->gicv3->dma_as; 547 uint64_t entry_addr; 548 uint64_t dteval = 0; 549 MemTxResult res = MEMTX_OK; 550 551 trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr); 552 553 if (dte->valid) { 554 /* add mapping entry to device table */ 555 dteval = FIELD_DP64(dteval, DTE, VALID, 1); 556 dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size); 557 dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr); 558 } 559 560 entry_addr = table_entry_addr(s, &s->dt, devid, &res); 561 if (res != MEMTX_OK) { 562 /* memory access error: stall */ 563 return false; 564 } 565 if (entry_addr == -1) { 566 /* No L2 table for this index: discard write and continue */ 567 return true; 568 } 569 address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res); 570 return res == MEMTX_OK; 571 } 572 573 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt) 574 { 575 uint32_t devid; 576 DTEntry dte; 577 578 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT; 579 dte.size = cmdpkt[1] & SIZE_MASK; 580 dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT; 581 dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK; 582 583 trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid); 584 585 if (devid >= s->dt.num_entries) { 586 qemu_log_mask(LOG_GUEST_ERROR, 587 "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n", 588 devid, s->dt.num_entries); 589 return CMD_CONTINUE; 590 } 591 592 if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) { 593 qemu_log_mask(LOG_GUEST_ERROR, 594 "ITS MAPD: invalid size %d\n", dte.size); 595 return CMD_CONTINUE; 596 } 597 598 return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL; 599 } 600 601 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt) 602 { 603 uint64_t rd1, rd2; 604 605 rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1); 606 rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2); 607 608 trace_gicv3_its_cmd_movall(rd1, rd2); 609 610 if (rd1 >= s->gicv3->num_cpu) { 611 qemu_log_mask(LOG_GUEST_ERROR, 612 "%s: RDBASE1 %" PRId64 613 " out of range (must be less than %d)\n", 614 __func__, rd1, s->gicv3->num_cpu); 615 return CMD_CONTINUE; 616 } 617 if (rd2 >= s->gicv3->num_cpu) { 618 qemu_log_mask(LOG_GUEST_ERROR, 619 "%s: RDBASE2 %" PRId64 620 " out of range (must be less than %d)\n", 621 __func__, rd2, s->gicv3->num_cpu); 622 return CMD_CONTINUE; 623 } 624 625 if (rd1 == rd2) { 626 /* Move to same target must succeed as a no-op */ 627 return CMD_CONTINUE; 628 } 629 630 /* Move all pending LPIs from redistributor 1 to redistributor 2 */ 631 gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]); 632 633 return CMD_CONTINUE; 634 } 635 636 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt) 637 { 638 uint32_t devid, eventid; 639 uint16_t new_icid; 640 uint64_t num_eventids; 641 DTEntry dte; 642 CTEntry old_cte, new_cte; 643 ITEntry old_ite; 644 645 devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID); 646 eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID); 647 new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID); 648 649 trace_gicv3_its_cmd_movi(devid, eventid, new_icid); 650 651 if (devid >= s->dt.num_entries) { 652 qemu_log_mask(LOG_GUEST_ERROR, 653 "%s: invalid command attributes: devid %d>=%d", 654 __func__, devid, s->dt.num_entries); 655 return CMD_CONTINUE; 656 } 657 if (get_dte(s, devid, &dte) != MEMTX_OK) { 658 return CMD_STALL; 659 } 660 661 if (!dte.valid) { 662 qemu_log_mask(LOG_GUEST_ERROR, 663 "%s: invalid command attributes: " 664 "invalid dte for %d\n", __func__, devid); 665 return CMD_CONTINUE; 666 } 667 668 num_eventids = 1ULL << (dte.size + 1); 669 if (eventid >= num_eventids) { 670 qemu_log_mask(LOG_GUEST_ERROR, 671 "%s: invalid command attributes: eventid %d >= %" 672 PRId64 "\n", 673 __func__, eventid, num_eventids); 674 return CMD_CONTINUE; 675 } 676 677 if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) { 678 return CMD_STALL; 679 } 680 681 if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) { 682 qemu_log_mask(LOG_GUEST_ERROR, 683 "%s: invalid command attributes: invalid ITE\n", 684 __func__); 685 return CMD_CONTINUE; 686 } 687 688 if (old_ite.icid >= s->ct.num_entries) { 689 qemu_log_mask(LOG_GUEST_ERROR, 690 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n", 691 __func__, old_ite.icid); 692 return CMD_CONTINUE; 693 } 694 695 if (new_icid >= s->ct.num_entries) { 696 qemu_log_mask(LOG_GUEST_ERROR, 697 "%s: invalid command attributes: ICID 0x%x\n", 698 __func__, new_icid); 699 return CMD_CONTINUE; 700 } 701 702 if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) { 703 return CMD_STALL; 704 } 705 if (!old_cte.valid) { 706 qemu_log_mask(LOG_GUEST_ERROR, 707 "%s: invalid command attributes: " 708 "invalid CTE for old ICID 0x%x\n", 709 __func__, old_ite.icid); 710 return CMD_CONTINUE; 711 } 712 713 if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) { 714 return CMD_STALL; 715 } 716 if (!new_cte.valid) { 717 qemu_log_mask(LOG_GUEST_ERROR, 718 "%s: invalid command attributes: " 719 "invalid CTE for new ICID 0x%x\n", 720 __func__, new_icid); 721 return CMD_CONTINUE; 722 } 723 724 if (old_cte.rdbase >= s->gicv3->num_cpu) { 725 qemu_log_mask(LOG_GUEST_ERROR, 726 "%s: CTE has invalid rdbase 0x%x\n", 727 __func__, old_cte.rdbase); 728 return CMD_CONTINUE; 729 } 730 731 if (new_cte.rdbase >= s->gicv3->num_cpu) { 732 qemu_log_mask(LOG_GUEST_ERROR, 733 "%s: CTE has invalid rdbase 0x%x\n", 734 __func__, new_cte.rdbase); 735 return CMD_CONTINUE; 736 } 737 738 if (old_cte.rdbase != new_cte.rdbase) { 739 /* Move the LPI from the old redistributor to the new one */ 740 gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase], 741 &s->gicv3->cpu[new_cte.rdbase], 742 old_ite.intid); 743 } 744 745 /* Update the ICID field in the interrupt translation table entry */ 746 old_ite.icid = new_icid; 747 return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL; 748 } 749 750 /* 751 * Current implementation blocks until all 752 * commands are processed 753 */ 754 static void process_cmdq(GICv3ITSState *s) 755 { 756 uint32_t wr_offset = 0; 757 uint32_t rd_offset = 0; 758 uint32_t cq_offset = 0; 759 AddressSpace *as = &s->gicv3->dma_as; 760 uint8_t cmd; 761 int i; 762 763 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 764 return; 765 } 766 767 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET); 768 769 if (wr_offset >= s->cq.num_entries) { 770 qemu_log_mask(LOG_GUEST_ERROR, 771 "%s: invalid write offset " 772 "%d\n", __func__, wr_offset); 773 return; 774 } 775 776 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET); 777 778 if (rd_offset >= s->cq.num_entries) { 779 qemu_log_mask(LOG_GUEST_ERROR, 780 "%s: invalid read offset " 781 "%d\n", __func__, rd_offset); 782 return; 783 } 784 785 while (wr_offset != rd_offset) { 786 ItsCmdResult result = CMD_CONTINUE; 787 void *hostmem; 788 hwaddr buflen; 789 uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS]; 790 791 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE); 792 793 buflen = GITS_CMDQ_ENTRY_SIZE; 794 hostmem = address_space_map(as, s->cq.base_addr + cq_offset, 795 &buflen, false, MEMTXATTRS_UNSPECIFIED); 796 if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) { 797 if (hostmem) { 798 address_space_unmap(as, hostmem, buflen, false, 0); 799 } 800 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 801 qemu_log_mask(LOG_GUEST_ERROR, 802 "%s: could not read command at 0x%" PRIx64 "\n", 803 __func__, s->cq.base_addr + cq_offset); 804 break; 805 } 806 for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) { 807 cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t)); 808 } 809 address_space_unmap(as, hostmem, buflen, false, 0); 810 811 cmd = cmdpkt[0] & CMD_MASK; 812 813 trace_gicv3_its_process_command(rd_offset, cmd); 814 815 switch (cmd) { 816 case GITS_CMD_INT: 817 result = process_its_cmd(s, cmdpkt, INTERRUPT); 818 break; 819 case GITS_CMD_CLEAR: 820 result = process_its_cmd(s, cmdpkt, CLEAR); 821 break; 822 case GITS_CMD_SYNC: 823 /* 824 * Current implementation makes a blocking synchronous call 825 * for every command issued earlier, hence the internal state 826 * is already consistent by the time SYNC command is executed. 827 * Hence no further processing is required for SYNC command. 828 */ 829 trace_gicv3_its_cmd_sync(); 830 break; 831 case GITS_CMD_MAPD: 832 result = process_mapd(s, cmdpkt); 833 break; 834 case GITS_CMD_MAPC: 835 result = process_mapc(s, cmdpkt); 836 break; 837 case GITS_CMD_MAPTI: 838 result = process_mapti(s, cmdpkt, false); 839 break; 840 case GITS_CMD_MAPI: 841 result = process_mapti(s, cmdpkt, true); 842 break; 843 case GITS_CMD_DISCARD: 844 result = process_its_cmd(s, cmdpkt, DISCARD); 845 break; 846 case GITS_CMD_INV: 847 case GITS_CMD_INVALL: 848 /* 849 * Current implementation doesn't cache any ITS tables, 850 * but the calculated lpi priority information. We only 851 * need to trigger lpi priority re-calculation to be in 852 * sync with LPI config table or pending table changes. 853 */ 854 trace_gicv3_its_cmd_inv(); 855 for (i = 0; i < s->gicv3->num_cpu; i++) { 856 gicv3_redist_update_lpi(&s->gicv3->cpu[i]); 857 } 858 break; 859 case GITS_CMD_MOVI: 860 result = process_movi(s, cmdpkt); 861 break; 862 case GITS_CMD_MOVALL: 863 result = process_movall(s, cmdpkt); 864 break; 865 default: 866 trace_gicv3_its_cmd_unknown(cmd); 867 break; 868 } 869 if (result == CMD_CONTINUE) { 870 rd_offset++; 871 rd_offset %= s->cq.num_entries; 872 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset); 873 } else { 874 /* CMD_STALL */ 875 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 876 qemu_log_mask(LOG_GUEST_ERROR, 877 "%s: 0x%x cmd processing failed, stalling\n", 878 __func__, cmd); 879 break; 880 } 881 } 882 } 883 884 /* 885 * This function extracts the ITS Device and Collection table specific 886 * parameters (like base_addr, size etc) from GITS_BASER register. 887 * It is called during ITS enable and also during post_load migration 888 */ 889 static void extract_table_params(GICv3ITSState *s) 890 { 891 uint16_t num_pages = 0; 892 uint8_t page_sz_type; 893 uint8_t type; 894 uint32_t page_sz = 0; 895 uint64_t value; 896 897 for (int i = 0; i < 8; i++) { 898 TableDesc *td; 899 int idbits; 900 901 value = s->baser[i]; 902 903 if (!value) { 904 continue; 905 } 906 907 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE); 908 909 switch (page_sz_type) { 910 case 0: 911 page_sz = GITS_PAGE_SIZE_4K; 912 break; 913 914 case 1: 915 page_sz = GITS_PAGE_SIZE_16K; 916 break; 917 918 case 2: 919 case 3: 920 page_sz = GITS_PAGE_SIZE_64K; 921 break; 922 923 default: 924 g_assert_not_reached(); 925 } 926 927 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1; 928 929 type = FIELD_EX64(value, GITS_BASER, TYPE); 930 931 switch (type) { 932 case GITS_BASER_TYPE_DEVICE: 933 td = &s->dt; 934 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1; 935 break; 936 case GITS_BASER_TYPE_COLLECTION: 937 td = &s->ct; 938 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) { 939 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1; 940 } else { 941 /* 16-bit CollectionId supported when CIL == 0 */ 942 idbits = 16; 943 } 944 break; 945 default: 946 /* 947 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK 948 * ensures we will only see type values corresponding to 949 * the values set up in gicv3_its_reset(). 950 */ 951 g_assert_not_reached(); 952 } 953 954 memset(td, 0, sizeof(*td)); 955 /* 956 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process 957 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we 958 * do not have a special case where the GITS_BASER<n>.Valid bit is 0 959 * for the register corresponding to the Collection table but we 960 * still have to process interrupts using non-memory-backed 961 * Collection table entries.) 962 * The specification makes it UNPREDICTABLE to enable the ITS without 963 * marking each BASER<n> as valid. We choose to handle these as if 964 * the table was zero-sized, so commands using the table will fail 965 * and interrupts requested via GITS_TRANSLATER writes will be ignored. 966 * This happens automatically by leaving the num_entries field at 967 * zero, which will be caught by the bounds checks we have before 968 * every table lookup anyway. 969 */ 970 if (!FIELD_EX64(value, GITS_BASER, VALID)) { 971 continue; 972 } 973 td->page_sz = page_sz; 974 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT); 975 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1; 976 td->base_addr = baser_base_addr(value, page_sz); 977 if (!td->indirect) { 978 td->num_entries = (num_pages * page_sz) / td->entry_sz; 979 } else { 980 td->num_entries = (((num_pages * page_sz) / 981 L1TABLE_ENTRY_SIZE) * 982 (page_sz / td->entry_sz)); 983 } 984 td->num_entries = MIN(td->num_entries, 1ULL << idbits); 985 } 986 } 987 988 static void extract_cmdq_params(GICv3ITSState *s) 989 { 990 uint16_t num_pages = 0; 991 uint64_t value = s->cbaser; 992 993 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1; 994 995 memset(&s->cq, 0 , sizeof(s->cq)); 996 997 if (FIELD_EX64(value, GITS_CBASER, VALID)) { 998 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) / 999 GITS_CMDQ_ENTRY_SIZE; 1000 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR); 1001 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT; 1002 } 1003 } 1004 1005 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset, 1006 uint64_t *data, unsigned size, 1007 MemTxAttrs attrs) 1008 { 1009 /* 1010 * GITS_TRANSLATER is write-only, and all other addresses 1011 * in the interrupt translation space frame are RES0. 1012 */ 1013 *data = 0; 1014 return MEMTX_OK; 1015 } 1016 1017 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset, 1018 uint64_t data, unsigned size, 1019 MemTxAttrs attrs) 1020 { 1021 GICv3ITSState *s = (GICv3ITSState *)opaque; 1022 bool result = true; 1023 1024 trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id); 1025 1026 switch (offset) { 1027 case GITS_TRANSLATER: 1028 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 1029 result = do_process_its_cmd(s, attrs.requester_id, data, NONE); 1030 } 1031 break; 1032 default: 1033 break; 1034 } 1035 1036 if (result) { 1037 return MEMTX_OK; 1038 } else { 1039 return MEMTX_ERROR; 1040 } 1041 } 1042 1043 static bool its_writel(GICv3ITSState *s, hwaddr offset, 1044 uint64_t value, MemTxAttrs attrs) 1045 { 1046 bool result = true; 1047 int index; 1048 1049 switch (offset) { 1050 case GITS_CTLR: 1051 if (value & R_GITS_CTLR_ENABLED_MASK) { 1052 s->ctlr |= R_GITS_CTLR_ENABLED_MASK; 1053 extract_table_params(s); 1054 extract_cmdq_params(s); 1055 process_cmdq(s); 1056 } else { 1057 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK; 1058 } 1059 break; 1060 case GITS_CBASER: 1061 /* 1062 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1063 * already enabled 1064 */ 1065 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1066 s->cbaser = deposit64(s->cbaser, 0, 32, value); 1067 s->creadr = 0; 1068 } 1069 break; 1070 case GITS_CBASER + 4: 1071 /* 1072 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1073 * already enabled 1074 */ 1075 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1076 s->cbaser = deposit64(s->cbaser, 32, 32, value); 1077 s->creadr = 0; 1078 } 1079 break; 1080 case GITS_CWRITER: 1081 s->cwriter = deposit64(s->cwriter, 0, 32, 1082 (value & ~R_GITS_CWRITER_RETRY_MASK)); 1083 if (s->cwriter != s->creadr) { 1084 process_cmdq(s); 1085 } 1086 break; 1087 case GITS_CWRITER + 4: 1088 s->cwriter = deposit64(s->cwriter, 32, 32, value); 1089 break; 1090 case GITS_CREADR: 1091 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1092 s->creadr = deposit64(s->creadr, 0, 32, 1093 (value & ~R_GITS_CREADR_STALLED_MASK)); 1094 } else { 1095 /* RO register, ignore the write */ 1096 qemu_log_mask(LOG_GUEST_ERROR, 1097 "%s: invalid guest write to RO register at offset " 1098 TARGET_FMT_plx "\n", __func__, offset); 1099 } 1100 break; 1101 case GITS_CREADR + 4: 1102 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1103 s->creadr = deposit64(s->creadr, 32, 32, value); 1104 } else { 1105 /* RO register, ignore the write */ 1106 qemu_log_mask(LOG_GUEST_ERROR, 1107 "%s: invalid guest write to RO register at offset " 1108 TARGET_FMT_plx "\n", __func__, offset); 1109 } 1110 break; 1111 case GITS_BASER ... GITS_BASER + 0x3f: 1112 /* 1113 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 1114 * already enabled 1115 */ 1116 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1117 index = (offset - GITS_BASER) / 8; 1118 1119 if (s->baser[index] == 0) { 1120 /* Unimplemented GITS_BASERn: RAZ/WI */ 1121 break; 1122 } 1123 if (offset & 7) { 1124 value <<= 32; 1125 value &= ~GITS_BASER_RO_MASK; 1126 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32); 1127 s->baser[index] |= value; 1128 } else { 1129 value &= ~GITS_BASER_RO_MASK; 1130 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32); 1131 s->baser[index] |= value; 1132 } 1133 } 1134 break; 1135 case GITS_IIDR: 1136 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 1137 /* RO registers, ignore the write */ 1138 qemu_log_mask(LOG_GUEST_ERROR, 1139 "%s: invalid guest write to RO register at offset " 1140 TARGET_FMT_plx "\n", __func__, offset); 1141 break; 1142 default: 1143 result = false; 1144 break; 1145 } 1146 return result; 1147 } 1148 1149 static bool its_readl(GICv3ITSState *s, hwaddr offset, 1150 uint64_t *data, MemTxAttrs attrs) 1151 { 1152 bool result = true; 1153 int index; 1154 1155 switch (offset) { 1156 case GITS_CTLR: 1157 *data = s->ctlr; 1158 break; 1159 case GITS_IIDR: 1160 *data = gicv3_iidr(); 1161 break; 1162 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 1163 /* ID registers */ 1164 *data = gicv3_idreg(offset - GITS_IDREGS, GICV3_PIDR0_ITS); 1165 break; 1166 case GITS_TYPER: 1167 *data = extract64(s->typer, 0, 32); 1168 break; 1169 case GITS_TYPER + 4: 1170 *data = extract64(s->typer, 32, 32); 1171 break; 1172 case GITS_CBASER: 1173 *data = extract64(s->cbaser, 0, 32); 1174 break; 1175 case GITS_CBASER + 4: 1176 *data = extract64(s->cbaser, 32, 32); 1177 break; 1178 case GITS_CREADR: 1179 *data = extract64(s->creadr, 0, 32); 1180 break; 1181 case GITS_CREADR + 4: 1182 *data = extract64(s->creadr, 32, 32); 1183 break; 1184 case GITS_CWRITER: 1185 *data = extract64(s->cwriter, 0, 32); 1186 break; 1187 case GITS_CWRITER + 4: 1188 *data = extract64(s->cwriter, 32, 32); 1189 break; 1190 case GITS_BASER ... GITS_BASER + 0x3f: 1191 index = (offset - GITS_BASER) / 8; 1192 if (offset & 7) { 1193 *data = extract64(s->baser[index], 32, 32); 1194 } else { 1195 *data = extract64(s->baser[index], 0, 32); 1196 } 1197 break; 1198 default: 1199 result = false; 1200 break; 1201 } 1202 return result; 1203 } 1204 1205 static bool its_writell(GICv3ITSState *s, hwaddr offset, 1206 uint64_t value, MemTxAttrs attrs) 1207 { 1208 bool result = true; 1209 int index; 1210 1211 switch (offset) { 1212 case GITS_BASER ... GITS_BASER + 0x3f: 1213 /* 1214 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 1215 * already enabled 1216 */ 1217 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1218 index = (offset - GITS_BASER) / 8; 1219 if (s->baser[index] == 0) { 1220 /* Unimplemented GITS_BASERn: RAZ/WI */ 1221 break; 1222 } 1223 s->baser[index] &= GITS_BASER_RO_MASK; 1224 s->baser[index] |= (value & ~GITS_BASER_RO_MASK); 1225 } 1226 break; 1227 case GITS_CBASER: 1228 /* 1229 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1230 * already enabled 1231 */ 1232 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1233 s->cbaser = value; 1234 s->creadr = 0; 1235 } 1236 break; 1237 case GITS_CWRITER: 1238 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK; 1239 if (s->cwriter != s->creadr) { 1240 process_cmdq(s); 1241 } 1242 break; 1243 case GITS_CREADR: 1244 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1245 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK; 1246 } else { 1247 /* RO register, ignore the write */ 1248 qemu_log_mask(LOG_GUEST_ERROR, 1249 "%s: invalid guest write to RO register at offset " 1250 TARGET_FMT_plx "\n", __func__, offset); 1251 } 1252 break; 1253 case GITS_TYPER: 1254 /* RO registers, ignore the write */ 1255 qemu_log_mask(LOG_GUEST_ERROR, 1256 "%s: invalid guest write to RO register at offset " 1257 TARGET_FMT_plx "\n", __func__, offset); 1258 break; 1259 default: 1260 result = false; 1261 break; 1262 } 1263 return result; 1264 } 1265 1266 static bool its_readll(GICv3ITSState *s, hwaddr offset, 1267 uint64_t *data, MemTxAttrs attrs) 1268 { 1269 bool result = true; 1270 int index; 1271 1272 switch (offset) { 1273 case GITS_TYPER: 1274 *data = s->typer; 1275 break; 1276 case GITS_BASER ... GITS_BASER + 0x3f: 1277 index = (offset - GITS_BASER) / 8; 1278 *data = s->baser[index]; 1279 break; 1280 case GITS_CBASER: 1281 *data = s->cbaser; 1282 break; 1283 case GITS_CREADR: 1284 *data = s->creadr; 1285 break; 1286 case GITS_CWRITER: 1287 *data = s->cwriter; 1288 break; 1289 default: 1290 result = false; 1291 break; 1292 } 1293 return result; 1294 } 1295 1296 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data, 1297 unsigned size, MemTxAttrs attrs) 1298 { 1299 GICv3ITSState *s = (GICv3ITSState *)opaque; 1300 bool result; 1301 1302 switch (size) { 1303 case 4: 1304 result = its_readl(s, offset, data, attrs); 1305 break; 1306 case 8: 1307 result = its_readll(s, offset, data, attrs); 1308 break; 1309 default: 1310 result = false; 1311 break; 1312 } 1313 1314 if (!result) { 1315 qemu_log_mask(LOG_GUEST_ERROR, 1316 "%s: invalid guest read at offset " TARGET_FMT_plx 1317 " size %u\n", __func__, offset, size); 1318 trace_gicv3_its_badread(offset, size); 1319 /* 1320 * The spec requires that reserved registers are RAZ/WI; 1321 * so use false returns from leaf functions as a way to 1322 * trigger the guest-error logging but don't return it to 1323 * the caller, or we'll cause a spurious guest data abort. 1324 */ 1325 *data = 0; 1326 } else { 1327 trace_gicv3_its_read(offset, *data, size); 1328 } 1329 return MEMTX_OK; 1330 } 1331 1332 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data, 1333 unsigned size, MemTxAttrs attrs) 1334 { 1335 GICv3ITSState *s = (GICv3ITSState *)opaque; 1336 bool result; 1337 1338 switch (size) { 1339 case 4: 1340 result = its_writel(s, offset, data, attrs); 1341 break; 1342 case 8: 1343 result = its_writell(s, offset, data, attrs); 1344 break; 1345 default: 1346 result = false; 1347 break; 1348 } 1349 1350 if (!result) { 1351 qemu_log_mask(LOG_GUEST_ERROR, 1352 "%s: invalid guest write at offset " TARGET_FMT_plx 1353 " size %u\n", __func__, offset, size); 1354 trace_gicv3_its_badwrite(offset, data, size); 1355 /* 1356 * The spec requires that reserved registers are RAZ/WI; 1357 * so use false returns from leaf functions as a way to 1358 * trigger the guest-error logging but don't return it to 1359 * the caller, or we'll cause a spurious guest data abort. 1360 */ 1361 } else { 1362 trace_gicv3_its_write(offset, data, size); 1363 } 1364 return MEMTX_OK; 1365 } 1366 1367 static const MemoryRegionOps gicv3_its_control_ops = { 1368 .read_with_attrs = gicv3_its_read, 1369 .write_with_attrs = gicv3_its_write, 1370 .valid.min_access_size = 4, 1371 .valid.max_access_size = 8, 1372 .impl.min_access_size = 4, 1373 .impl.max_access_size = 8, 1374 .endianness = DEVICE_NATIVE_ENDIAN, 1375 }; 1376 1377 static const MemoryRegionOps gicv3_its_translation_ops = { 1378 .read_with_attrs = gicv3_its_translation_read, 1379 .write_with_attrs = gicv3_its_translation_write, 1380 .valid.min_access_size = 2, 1381 .valid.max_access_size = 4, 1382 .impl.min_access_size = 2, 1383 .impl.max_access_size = 4, 1384 .endianness = DEVICE_NATIVE_ENDIAN, 1385 }; 1386 1387 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) 1388 { 1389 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1390 int i; 1391 1392 for (i = 0; i < s->gicv3->num_cpu; i++) { 1393 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) { 1394 error_setg(errp, "Physical LPI not supported by CPU %d", i); 1395 return; 1396 } 1397 } 1398 1399 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops); 1400 1401 /* set the ITS default features supported */ 1402 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1); 1403 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE, 1404 ITS_ITT_ENTRY_SIZE - 1); 1405 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS); 1406 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS); 1407 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1); 1408 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS); 1409 } 1410 1411 static void gicv3_its_reset(DeviceState *dev) 1412 { 1413 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1414 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s); 1415 1416 c->parent_reset(dev); 1417 1418 /* Quiescent bit reset to 1 */ 1419 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1); 1420 1421 /* 1422 * setting GITS_BASER0.Type = 0b001 (Device) 1423 * GITS_BASER1.Type = 0b100 (Collection Table) 1424 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented) 1425 * GITS_BASER<0,1>.Page_Size = 64KB 1426 * and default translation table entry size to 16 bytes 1427 */ 1428 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE, 1429 GITS_BASER_TYPE_DEVICE); 1430 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE, 1431 GITS_BASER_PAGESIZE_64K); 1432 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE, 1433 GITS_DTE_SIZE - 1); 1434 1435 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE, 1436 GITS_BASER_TYPE_COLLECTION); 1437 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE, 1438 GITS_BASER_PAGESIZE_64K); 1439 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE, 1440 GITS_CTE_SIZE - 1); 1441 } 1442 1443 static void gicv3_its_post_load(GICv3ITSState *s) 1444 { 1445 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 1446 extract_table_params(s); 1447 extract_cmdq_params(s); 1448 } 1449 } 1450 1451 static Property gicv3_its_props[] = { 1452 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3", 1453 GICv3State *), 1454 DEFINE_PROP_END_OF_LIST(), 1455 }; 1456 1457 static void gicv3_its_class_init(ObjectClass *klass, void *data) 1458 { 1459 DeviceClass *dc = DEVICE_CLASS(klass); 1460 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass); 1461 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); 1462 1463 dc->realize = gicv3_arm_its_realize; 1464 device_class_set_props(dc, gicv3_its_props); 1465 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset); 1466 icc->post_load = gicv3_its_post_load; 1467 } 1468 1469 static const TypeInfo gicv3_its_info = { 1470 .name = TYPE_ARM_GICV3_ITS, 1471 .parent = TYPE_ARM_GICV3_ITS_COMMON, 1472 .instance_size = sizeof(GICv3ITSState), 1473 .class_init = gicv3_its_class_init, 1474 .class_size = sizeof(GICv3ITSClass), 1475 }; 1476 1477 static void gicv3_its_register_types(void) 1478 { 1479 type_register_static(&gicv3_its_info); 1480 } 1481 1482 type_init(gicv3_its_register_types) 1483