1 /* 2 * ITS emulation for a GICv3-based system 3 * 4 * Copyright Linaro.org 2021 5 * 6 * Authors: 7 * Shashi Mallela <shashi.mallela@linaro.org> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 10 * option) any later version. See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/log.h" 16 #include "hw/qdev-properties.h" 17 #include "hw/intc/arm_gicv3_its_common.h" 18 #include "gicv3_internal.h" 19 #include "qom/object.h" 20 #include "qapi/error.h" 21 22 typedef struct GICv3ITSClass GICv3ITSClass; 23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */ 24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass, 25 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS) 26 27 struct GICv3ITSClass { 28 GICv3ITSCommonClass parent_class; 29 void (*parent_reset)(DeviceState *dev); 30 }; 31 32 /* 33 * This is an internal enum used to distinguish between LPI triggered 34 * via command queue and LPI triggered via gits_translater write. 35 */ 36 typedef enum ItsCmdType { 37 NONE = 0, /* internal indication for GITS_TRANSLATER write */ 38 CLEAR = 1, 39 DISCARD = 2, 40 INTERRUPT = 3, 41 } ItsCmdType; 42 43 typedef struct { 44 uint32_t iteh; 45 uint64_t itel; 46 } IteEntry; 47 48 /* 49 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options 50 * if a command parameter is not correct. These include both "stall 51 * processing of the command queue" and "ignore this command, and 52 * keep processing the queue". In our implementation we choose that 53 * memory transaction errors reading the command packet provoke a 54 * stall, but errors in parameters cause us to ignore the command 55 * and continue processing. 56 * The process_* functions which handle individual ITS commands all 57 * return an ItsCmdResult which tells process_cmdq() whether it should 58 * stall or keep going. 59 */ 60 typedef enum ItsCmdResult { 61 CMD_STALL = 0, 62 CMD_CONTINUE = 1, 63 } ItsCmdResult; 64 65 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz) 66 { 67 uint64_t result = 0; 68 69 switch (page_sz) { 70 case GITS_PAGE_SIZE_4K: 71 case GITS_PAGE_SIZE_16K: 72 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12; 73 break; 74 75 case GITS_PAGE_SIZE_64K: 76 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16; 77 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48; 78 break; 79 80 default: 81 break; 82 } 83 return result; 84 } 85 86 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td, 87 uint32_t idx, MemTxResult *res) 88 { 89 /* 90 * Given a TableDesc describing one of the ITS in-guest-memory 91 * tables and an index into it, return the guest address 92 * corresponding to that table entry. 93 * If there was a memory error reading the L1 table of an 94 * indirect table, *res is set accordingly, and we return -1. 95 * If the L1 table entry is marked not valid, we return -1 with 96 * *res set to MEMTX_OK. 97 * 98 * The specification defines the format of level 1 entries of a 99 * 2-level table, but the format of level 2 entries and the format 100 * of flat-mapped tables is IMPDEF. 101 */ 102 AddressSpace *as = &s->gicv3->dma_as; 103 uint32_t l2idx; 104 uint64_t l2; 105 uint32_t num_l2_entries; 106 107 *res = MEMTX_OK; 108 109 if (!td->indirect) { 110 /* Single level table */ 111 return td->base_addr + idx * td->entry_sz; 112 } 113 114 /* Two level table */ 115 l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE); 116 117 l2 = address_space_ldq_le(as, 118 td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE), 119 MEMTXATTRS_UNSPECIFIED, res); 120 if (*res != MEMTX_OK) { 121 return -1; 122 } 123 if (!(l2 & L2_TABLE_VALID_MASK)) { 124 return -1; 125 } 126 127 num_l2_entries = td->page_sz / td->entry_sz; 128 return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz; 129 } 130 131 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte, 132 MemTxResult *res) 133 { 134 AddressSpace *as = &s->gicv3->dma_as; 135 uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, res); 136 137 if (entry_addr == -1) { 138 return false; /* not valid */ 139 } 140 141 *cte = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res); 142 return FIELD_EX64(*cte, CTE, VALID); 143 } 144 145 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, 146 IteEntry ite) 147 { 148 AddressSpace *as = &s->gicv3->dma_as; 149 uint64_t itt_addr; 150 MemTxResult res = MEMTX_OK; 151 152 itt_addr = FIELD_EX64(dte, DTE, ITTADDR); 153 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ 154 155 address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) + 156 sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED, 157 &res); 158 159 if (res == MEMTX_OK) { 160 address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) + 161 sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh, 162 MEMTXATTRS_UNSPECIFIED, &res); 163 } 164 if (res != MEMTX_OK) { 165 return false; 166 } else { 167 return true; 168 } 169 } 170 171 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, 172 uint16_t *icid, uint32_t *pIntid, MemTxResult *res) 173 { 174 AddressSpace *as = &s->gicv3->dma_as; 175 uint64_t itt_addr; 176 bool status = false; 177 IteEntry ite = {}; 178 179 itt_addr = FIELD_EX64(dte, DTE, ITTADDR); 180 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ 181 182 ite.itel = address_space_ldq_le(as, itt_addr + 183 (eventid * (sizeof(uint64_t) + 184 sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED, 185 res); 186 187 if (*res == MEMTX_OK) { 188 ite.iteh = address_space_ldl_le(as, itt_addr + 189 (eventid * (sizeof(uint64_t) + 190 sizeof(uint32_t))) + sizeof(uint32_t), 191 MEMTXATTRS_UNSPECIFIED, res); 192 193 if (*res == MEMTX_OK) { 194 if (FIELD_EX64(ite.itel, ITE_L, VALID)) { 195 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE); 196 if (inttype == ITE_INTTYPE_PHYSICAL) { 197 *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID); 198 *icid = FIELD_EX32(ite.iteh, ITE_H, ICID); 199 status = true; 200 } 201 } 202 } 203 } 204 return status; 205 } 206 207 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res) 208 { 209 AddressSpace *as = &s->gicv3->dma_as; 210 uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, res); 211 212 if (entry_addr == -1) { 213 return 0; /* a DTE entry with the Valid bit clear */ 214 } 215 return address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res); 216 } 217 218 /* 219 * This function handles the processing of following commands based on 220 * the ItsCmdType parameter passed:- 221 * 1. triggering of lpi interrupt translation via ITS INT command 222 * 2. triggering of lpi interrupt translation via gits_translater register 223 * 3. handling of ITS CLEAR command 224 * 4. handling of ITS DISCARD command 225 */ 226 static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value, 227 uint32_t offset, ItsCmdType cmd) 228 { 229 AddressSpace *as = &s->gicv3->dma_as; 230 uint32_t devid, eventid; 231 MemTxResult res = MEMTX_OK; 232 bool dte_valid; 233 uint64_t dte = 0; 234 uint64_t num_eventids; 235 uint16_t icid = 0; 236 uint32_t pIntid = 0; 237 bool ite_valid = false; 238 uint64_t cte = 0; 239 bool cte_valid = false; 240 uint64_t rdbase; 241 242 if (cmd == NONE) { 243 devid = offset; 244 } else { 245 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 246 247 offset += NUM_BYTES_IN_DW; 248 value = address_space_ldq_le(as, s->cq.base_addr + offset, 249 MEMTXATTRS_UNSPECIFIED, &res); 250 } 251 252 if (res != MEMTX_OK) { 253 return CMD_STALL; 254 } 255 256 eventid = (value & EVENTID_MASK); 257 258 dte = get_dte(s, devid, &res); 259 260 if (res != MEMTX_OK) { 261 return CMD_STALL; 262 } 263 dte_valid = FIELD_EX64(dte, DTE, VALID); 264 265 if (!dte_valid) { 266 qemu_log_mask(LOG_GUEST_ERROR, 267 "%s: invalid command attributes: " 268 "invalid dte: %"PRIx64" for %d\n", 269 __func__, dte, devid); 270 return CMD_CONTINUE; 271 } 272 273 num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1); 274 275 ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res); 276 if (res != MEMTX_OK) { 277 return CMD_STALL; 278 } 279 280 if (!ite_valid) { 281 qemu_log_mask(LOG_GUEST_ERROR, 282 "%s: invalid command attributes: invalid ITE\n", 283 __func__); 284 return CMD_CONTINUE; 285 } 286 287 cte_valid = get_cte(s, icid, &cte, &res); 288 if (res != MEMTX_OK) { 289 return CMD_STALL; 290 } 291 if (!cte_valid) { 292 qemu_log_mask(LOG_GUEST_ERROR, 293 "%s: invalid command attributes: " 294 "invalid cte: %"PRIx64"\n", 295 __func__, cte); 296 return CMD_CONTINUE; 297 } 298 299 if (devid >= s->dt.num_ids) { 300 qemu_log_mask(LOG_GUEST_ERROR, 301 "%s: invalid command attributes: devid %d>=%d", 302 __func__, devid, s->dt.num_ids); 303 return CMD_CONTINUE; 304 } 305 if (eventid >= num_eventids) { 306 qemu_log_mask(LOG_GUEST_ERROR, 307 "%s: invalid command attributes: eventid %d >= %" 308 PRId64 "\n", 309 __func__, eventid, num_eventids); 310 return CMD_CONTINUE; 311 } 312 313 /* 314 * Current implementation only supports rdbase == procnum 315 * Hence rdbase physical address is ignored 316 */ 317 rdbase = FIELD_EX64(cte, CTE, RDBASE); 318 319 if (rdbase >= s->gicv3->num_cpu) { 320 return CMD_CONTINUE; 321 } 322 323 if ((cmd == CLEAR) || (cmd == DISCARD)) { 324 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0); 325 } else { 326 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1); 327 } 328 329 if (cmd == DISCARD) { 330 IteEntry ite = {}; 331 /* remove mapping from interrupt translation table */ 332 return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL; 333 } 334 return CMD_CONTINUE; 335 } 336 337 static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value, 338 uint32_t offset, bool ignore_pInt) 339 { 340 AddressSpace *as = &s->gicv3->dma_as; 341 uint32_t devid, eventid; 342 uint32_t pIntid = 0; 343 uint64_t num_eventids; 344 uint32_t num_intids; 345 bool dte_valid; 346 MemTxResult res = MEMTX_OK; 347 uint16_t icid = 0; 348 uint64_t dte = 0; 349 IteEntry ite = {}; 350 351 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 352 offset += NUM_BYTES_IN_DW; 353 value = address_space_ldq_le(as, s->cq.base_addr + offset, 354 MEMTXATTRS_UNSPECIFIED, &res); 355 356 if (res != MEMTX_OK) { 357 return CMD_STALL; 358 } 359 360 eventid = (value & EVENTID_MASK); 361 362 if (ignore_pInt) { 363 pIntid = eventid; 364 } else { 365 pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT); 366 } 367 368 offset += NUM_BYTES_IN_DW; 369 value = address_space_ldq_le(as, s->cq.base_addr + offset, 370 MEMTXATTRS_UNSPECIFIED, &res); 371 372 if (res != MEMTX_OK) { 373 return CMD_STALL; 374 } 375 376 icid = value & ICID_MASK; 377 378 dte = get_dte(s, devid, &res); 379 380 if (res != MEMTX_OK) { 381 return CMD_STALL; 382 } 383 dte_valid = FIELD_EX64(dte, DTE, VALID); 384 num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1); 385 num_intids = 1ULL << (GICD_TYPER_IDBITS + 1); 386 387 if ((devid >= s->dt.num_ids) || (icid >= s->ct.num_ids) 388 || !dte_valid || (eventid >= num_eventids) || 389 (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) && 390 (pIntid != INTID_SPURIOUS))) { 391 qemu_log_mask(LOG_GUEST_ERROR, 392 "%s: invalid command attributes " 393 "devid %d or icid %d or eventid %d or pIntid %d or" 394 "unmapped dte %d\n", __func__, devid, icid, eventid, 395 pIntid, dte_valid); 396 /* 397 * in this implementation, in case of error 398 * we ignore this command and move onto the next 399 * command in the queue 400 */ 401 return CMD_CONTINUE; 402 } 403 404 /* add ite entry to interrupt translation table */ 405 ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid); 406 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL); 407 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid); 408 ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS); 409 ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid); 410 411 return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL; 412 } 413 414 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid, 415 uint64_t rdbase) 416 { 417 AddressSpace *as = &s->gicv3->dma_as; 418 uint64_t entry_addr; 419 uint64_t cte = 0; 420 MemTxResult res = MEMTX_OK; 421 422 if (!s->ct.valid) { 423 return true; 424 } 425 426 if (valid) { 427 /* add mapping entry to collection table */ 428 cte = FIELD_DP64(cte, CTE, VALID, 1); 429 cte = FIELD_DP64(cte, CTE, RDBASE, rdbase); 430 } 431 432 entry_addr = table_entry_addr(s, &s->ct, icid, &res); 433 if (res != MEMTX_OK) { 434 /* memory access error: stall */ 435 return false; 436 } 437 if (entry_addr == -1) { 438 /* No L2 table for this index: discard write and continue */ 439 return true; 440 } 441 442 address_space_stq_le(as, entry_addr, cte, MEMTXATTRS_UNSPECIFIED, &res); 443 return res == MEMTX_OK; 444 } 445 446 static ItsCmdResult process_mapc(GICv3ITSState *s, uint32_t offset) 447 { 448 AddressSpace *as = &s->gicv3->dma_as; 449 uint16_t icid; 450 uint64_t rdbase; 451 bool valid; 452 MemTxResult res = MEMTX_OK; 453 uint64_t value; 454 455 offset += NUM_BYTES_IN_DW; 456 offset += NUM_BYTES_IN_DW; 457 458 value = address_space_ldq_le(as, s->cq.base_addr + offset, 459 MEMTXATTRS_UNSPECIFIED, &res); 460 461 if (res != MEMTX_OK) { 462 return CMD_STALL; 463 } 464 465 icid = value & ICID_MASK; 466 467 rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT; 468 rdbase &= RDBASE_PROCNUM_MASK; 469 470 valid = (value & CMD_FIELD_VALID_MASK); 471 472 if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) { 473 qemu_log_mask(LOG_GUEST_ERROR, 474 "ITS MAPC: invalid collection table attributes " 475 "icid %d rdbase %" PRIu64 "\n", icid, rdbase); 476 /* 477 * in this implementation, in case of error 478 * we ignore this command and move onto the next 479 * command in the queue 480 */ 481 return CMD_CONTINUE; 482 } 483 484 return update_cte(s, icid, valid, rdbase) ? CMD_CONTINUE : CMD_STALL; 485 } 486 487 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid, 488 uint8_t size, uint64_t itt_addr) 489 { 490 AddressSpace *as = &s->gicv3->dma_as; 491 uint64_t entry_addr; 492 uint64_t dte = 0; 493 MemTxResult res = MEMTX_OK; 494 495 if (s->dt.valid) { 496 if (valid) { 497 /* add mapping entry to device table */ 498 dte = FIELD_DP64(dte, DTE, VALID, 1); 499 dte = FIELD_DP64(dte, DTE, SIZE, size); 500 dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr); 501 } 502 } else { 503 return true; 504 } 505 506 entry_addr = table_entry_addr(s, &s->dt, devid, &res); 507 if (res != MEMTX_OK) { 508 /* memory access error: stall */ 509 return false; 510 } 511 if (entry_addr == -1) { 512 /* No L2 table for this index: discard write and continue */ 513 return true; 514 } 515 address_space_stq_le(as, entry_addr, dte, MEMTXATTRS_UNSPECIFIED, &res); 516 return res == MEMTX_OK; 517 } 518 519 static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value, 520 uint32_t offset) 521 { 522 AddressSpace *as = &s->gicv3->dma_as; 523 uint32_t devid; 524 uint8_t size; 525 uint64_t itt_addr; 526 bool valid; 527 MemTxResult res = MEMTX_OK; 528 529 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 530 531 offset += NUM_BYTES_IN_DW; 532 value = address_space_ldq_le(as, s->cq.base_addr + offset, 533 MEMTXATTRS_UNSPECIFIED, &res); 534 535 if (res != MEMTX_OK) { 536 return CMD_STALL; 537 } 538 539 size = (value & SIZE_MASK); 540 541 offset += NUM_BYTES_IN_DW; 542 value = address_space_ldq_le(as, s->cq.base_addr + offset, 543 MEMTXATTRS_UNSPECIFIED, &res); 544 545 if (res != MEMTX_OK) { 546 return CMD_STALL; 547 } 548 549 itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT; 550 551 valid = (value & CMD_FIELD_VALID_MASK); 552 553 if ((devid >= s->dt.num_ids) || 554 (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) { 555 qemu_log_mask(LOG_GUEST_ERROR, 556 "ITS MAPD: invalid device table attributes " 557 "devid %d or size %d\n", devid, size); 558 /* 559 * in this implementation, in case of error 560 * we ignore this command and move onto the next 561 * command in the queue 562 */ 563 return CMD_CONTINUE; 564 } 565 566 return update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL; 567 } 568 569 /* 570 * Current implementation blocks until all 571 * commands are processed 572 */ 573 static void process_cmdq(GICv3ITSState *s) 574 { 575 uint32_t wr_offset = 0; 576 uint32_t rd_offset = 0; 577 uint32_t cq_offset = 0; 578 uint64_t data; 579 AddressSpace *as = &s->gicv3->dma_as; 580 MemTxResult res = MEMTX_OK; 581 uint8_t cmd; 582 int i; 583 584 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 585 return; 586 } 587 588 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET); 589 590 if (wr_offset >= s->cq.num_entries) { 591 qemu_log_mask(LOG_GUEST_ERROR, 592 "%s: invalid write offset " 593 "%d\n", __func__, wr_offset); 594 return; 595 } 596 597 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET); 598 599 if (rd_offset >= s->cq.num_entries) { 600 qemu_log_mask(LOG_GUEST_ERROR, 601 "%s: invalid read offset " 602 "%d\n", __func__, rd_offset); 603 return; 604 } 605 606 while (wr_offset != rd_offset) { 607 ItsCmdResult result = CMD_CONTINUE; 608 609 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE); 610 data = address_space_ldq_le(as, s->cq.base_addr + cq_offset, 611 MEMTXATTRS_UNSPECIFIED, &res); 612 if (res != MEMTX_OK) { 613 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 614 qemu_log_mask(LOG_GUEST_ERROR, 615 "%s: could not read command at 0x%" PRIx64 "\n", 616 __func__, s->cq.base_addr + cq_offset); 617 break; 618 } 619 620 cmd = (data & CMD_MASK); 621 622 switch (cmd) { 623 case GITS_CMD_INT: 624 result = process_its_cmd(s, data, cq_offset, INTERRUPT); 625 break; 626 case GITS_CMD_CLEAR: 627 result = process_its_cmd(s, data, cq_offset, CLEAR); 628 break; 629 case GITS_CMD_SYNC: 630 /* 631 * Current implementation makes a blocking synchronous call 632 * for every command issued earlier, hence the internal state 633 * is already consistent by the time SYNC command is executed. 634 * Hence no further processing is required for SYNC command. 635 */ 636 break; 637 case GITS_CMD_MAPD: 638 result = process_mapd(s, data, cq_offset); 639 break; 640 case GITS_CMD_MAPC: 641 result = process_mapc(s, cq_offset); 642 break; 643 case GITS_CMD_MAPTI: 644 result = process_mapti(s, data, cq_offset, false); 645 break; 646 case GITS_CMD_MAPI: 647 result = process_mapti(s, data, cq_offset, true); 648 break; 649 case GITS_CMD_DISCARD: 650 result = process_its_cmd(s, data, cq_offset, DISCARD); 651 break; 652 case GITS_CMD_INV: 653 case GITS_CMD_INVALL: 654 /* 655 * Current implementation doesn't cache any ITS tables, 656 * but the calculated lpi priority information. We only 657 * need to trigger lpi priority re-calculation to be in 658 * sync with LPI config table or pending table changes. 659 */ 660 for (i = 0; i < s->gicv3->num_cpu; i++) { 661 gicv3_redist_update_lpi(&s->gicv3->cpu[i]); 662 } 663 break; 664 default: 665 break; 666 } 667 if (result == CMD_CONTINUE) { 668 rd_offset++; 669 rd_offset %= s->cq.num_entries; 670 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset); 671 } else { 672 /* CMD_STALL */ 673 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 674 qemu_log_mask(LOG_GUEST_ERROR, 675 "%s: 0x%x cmd processing failed, stalling\n", 676 __func__, cmd); 677 break; 678 } 679 } 680 } 681 682 /* 683 * This function extracts the ITS Device and Collection table specific 684 * parameters (like base_addr, size etc) from GITS_BASER register. 685 * It is called during ITS enable and also during post_load migration 686 */ 687 static void extract_table_params(GICv3ITSState *s) 688 { 689 uint16_t num_pages = 0; 690 uint8_t page_sz_type; 691 uint8_t type; 692 uint32_t page_sz = 0; 693 uint64_t value; 694 695 for (int i = 0; i < 8; i++) { 696 TableDesc *td; 697 int idbits; 698 699 value = s->baser[i]; 700 701 if (!value) { 702 continue; 703 } 704 705 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE); 706 707 switch (page_sz_type) { 708 case 0: 709 page_sz = GITS_PAGE_SIZE_4K; 710 break; 711 712 case 1: 713 page_sz = GITS_PAGE_SIZE_16K; 714 break; 715 716 case 2: 717 case 3: 718 page_sz = GITS_PAGE_SIZE_64K; 719 break; 720 721 default: 722 g_assert_not_reached(); 723 } 724 725 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1; 726 727 type = FIELD_EX64(value, GITS_BASER, TYPE); 728 729 switch (type) { 730 case GITS_BASER_TYPE_DEVICE: 731 td = &s->dt; 732 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1; 733 break; 734 case GITS_BASER_TYPE_COLLECTION: 735 td = &s->ct; 736 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) { 737 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1; 738 } else { 739 /* 16-bit CollectionId supported when CIL == 0 */ 740 idbits = 16; 741 } 742 break; 743 default: 744 /* 745 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK 746 * ensures we will only see type values corresponding to 747 * the values set up in gicv3_its_reset(). 748 */ 749 g_assert_not_reached(); 750 } 751 752 memset(td, 0, sizeof(*td)); 753 td->valid = FIELD_EX64(value, GITS_BASER, VALID); 754 /* 755 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process 756 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we 757 * do not have a special case where the GITS_BASER<n>.Valid bit is 0 758 * for the register corresponding to the Collection table but we 759 * still have to process interrupts using non-memory-backed 760 * Collection table entries.) 761 */ 762 if (!td->valid) { 763 continue; 764 } 765 td->page_sz = page_sz; 766 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT); 767 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1; 768 td->base_addr = baser_base_addr(value, page_sz); 769 if (!td->indirect) { 770 td->num_entries = (num_pages * page_sz) / td->entry_sz; 771 } else { 772 td->num_entries = (((num_pages * page_sz) / 773 L1TABLE_ENTRY_SIZE) * 774 (page_sz / td->entry_sz)); 775 } 776 td->num_ids = 1ULL << idbits; 777 } 778 } 779 780 static void extract_cmdq_params(GICv3ITSState *s) 781 { 782 uint16_t num_pages = 0; 783 uint64_t value = s->cbaser; 784 785 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1; 786 787 memset(&s->cq, 0 , sizeof(s->cq)); 788 s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID); 789 790 if (s->cq.valid) { 791 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) / 792 GITS_CMDQ_ENTRY_SIZE; 793 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR); 794 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT; 795 } 796 } 797 798 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset, 799 uint64_t data, unsigned size, 800 MemTxAttrs attrs) 801 { 802 GICv3ITSState *s = (GICv3ITSState *)opaque; 803 bool result = true; 804 uint32_t devid = 0; 805 806 switch (offset) { 807 case GITS_TRANSLATER: 808 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 809 devid = attrs.requester_id; 810 result = process_its_cmd(s, data, devid, NONE); 811 } 812 break; 813 default: 814 break; 815 } 816 817 if (result) { 818 return MEMTX_OK; 819 } else { 820 return MEMTX_ERROR; 821 } 822 } 823 824 static bool its_writel(GICv3ITSState *s, hwaddr offset, 825 uint64_t value, MemTxAttrs attrs) 826 { 827 bool result = true; 828 int index; 829 830 switch (offset) { 831 case GITS_CTLR: 832 if (value & R_GITS_CTLR_ENABLED_MASK) { 833 s->ctlr |= R_GITS_CTLR_ENABLED_MASK; 834 extract_table_params(s); 835 extract_cmdq_params(s); 836 s->creadr = 0; 837 process_cmdq(s); 838 } else { 839 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK; 840 } 841 break; 842 case GITS_CBASER: 843 /* 844 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 845 * already enabled 846 */ 847 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 848 s->cbaser = deposit64(s->cbaser, 0, 32, value); 849 s->creadr = 0; 850 s->cwriter = s->creadr; 851 } 852 break; 853 case GITS_CBASER + 4: 854 /* 855 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 856 * already enabled 857 */ 858 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 859 s->cbaser = deposit64(s->cbaser, 32, 32, value); 860 s->creadr = 0; 861 s->cwriter = s->creadr; 862 } 863 break; 864 case GITS_CWRITER: 865 s->cwriter = deposit64(s->cwriter, 0, 32, 866 (value & ~R_GITS_CWRITER_RETRY_MASK)); 867 if (s->cwriter != s->creadr) { 868 process_cmdq(s); 869 } 870 break; 871 case GITS_CWRITER + 4: 872 s->cwriter = deposit64(s->cwriter, 32, 32, value); 873 break; 874 case GITS_CREADR: 875 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 876 s->creadr = deposit64(s->creadr, 0, 32, 877 (value & ~R_GITS_CREADR_STALLED_MASK)); 878 } else { 879 /* RO register, ignore the write */ 880 qemu_log_mask(LOG_GUEST_ERROR, 881 "%s: invalid guest write to RO register at offset " 882 TARGET_FMT_plx "\n", __func__, offset); 883 } 884 break; 885 case GITS_CREADR + 4: 886 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 887 s->creadr = deposit64(s->creadr, 32, 32, value); 888 } else { 889 /* RO register, ignore the write */ 890 qemu_log_mask(LOG_GUEST_ERROR, 891 "%s: invalid guest write to RO register at offset " 892 TARGET_FMT_plx "\n", __func__, offset); 893 } 894 break; 895 case GITS_BASER ... GITS_BASER + 0x3f: 896 /* 897 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 898 * already enabled 899 */ 900 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 901 index = (offset - GITS_BASER) / 8; 902 903 if (offset & 7) { 904 value <<= 32; 905 value &= ~GITS_BASER_RO_MASK; 906 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32); 907 s->baser[index] |= value; 908 } else { 909 value &= ~GITS_BASER_RO_MASK; 910 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32); 911 s->baser[index] |= value; 912 } 913 } 914 break; 915 case GITS_IIDR: 916 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 917 /* RO registers, ignore the write */ 918 qemu_log_mask(LOG_GUEST_ERROR, 919 "%s: invalid guest write to RO register at offset " 920 TARGET_FMT_plx "\n", __func__, offset); 921 break; 922 default: 923 result = false; 924 break; 925 } 926 return result; 927 } 928 929 static bool its_readl(GICv3ITSState *s, hwaddr offset, 930 uint64_t *data, MemTxAttrs attrs) 931 { 932 bool result = true; 933 int index; 934 935 switch (offset) { 936 case GITS_CTLR: 937 *data = s->ctlr; 938 break; 939 case GITS_IIDR: 940 *data = gicv3_iidr(); 941 break; 942 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 943 /* ID registers */ 944 *data = gicv3_idreg(offset - GITS_IDREGS); 945 break; 946 case GITS_TYPER: 947 *data = extract64(s->typer, 0, 32); 948 break; 949 case GITS_TYPER + 4: 950 *data = extract64(s->typer, 32, 32); 951 break; 952 case GITS_CBASER: 953 *data = extract64(s->cbaser, 0, 32); 954 break; 955 case GITS_CBASER + 4: 956 *data = extract64(s->cbaser, 32, 32); 957 break; 958 case GITS_CREADR: 959 *data = extract64(s->creadr, 0, 32); 960 break; 961 case GITS_CREADR + 4: 962 *data = extract64(s->creadr, 32, 32); 963 break; 964 case GITS_CWRITER: 965 *data = extract64(s->cwriter, 0, 32); 966 break; 967 case GITS_CWRITER + 4: 968 *data = extract64(s->cwriter, 32, 32); 969 break; 970 case GITS_BASER ... GITS_BASER + 0x3f: 971 index = (offset - GITS_BASER) / 8; 972 if (offset & 7) { 973 *data = extract64(s->baser[index], 32, 32); 974 } else { 975 *data = extract64(s->baser[index], 0, 32); 976 } 977 break; 978 default: 979 result = false; 980 break; 981 } 982 return result; 983 } 984 985 static bool its_writell(GICv3ITSState *s, hwaddr offset, 986 uint64_t value, MemTxAttrs attrs) 987 { 988 bool result = true; 989 int index; 990 991 switch (offset) { 992 case GITS_BASER ... GITS_BASER + 0x3f: 993 /* 994 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 995 * already enabled 996 */ 997 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 998 index = (offset - GITS_BASER) / 8; 999 s->baser[index] &= GITS_BASER_RO_MASK; 1000 s->baser[index] |= (value & ~GITS_BASER_RO_MASK); 1001 } 1002 break; 1003 case GITS_CBASER: 1004 /* 1005 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1006 * already enabled 1007 */ 1008 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1009 s->cbaser = value; 1010 s->creadr = 0; 1011 s->cwriter = s->creadr; 1012 } 1013 break; 1014 case GITS_CWRITER: 1015 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK; 1016 if (s->cwriter != s->creadr) { 1017 process_cmdq(s); 1018 } 1019 break; 1020 case GITS_CREADR: 1021 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1022 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK; 1023 } else { 1024 /* RO register, ignore the write */ 1025 qemu_log_mask(LOG_GUEST_ERROR, 1026 "%s: invalid guest write to RO register at offset " 1027 TARGET_FMT_plx "\n", __func__, offset); 1028 } 1029 break; 1030 case GITS_TYPER: 1031 /* RO registers, ignore the write */ 1032 qemu_log_mask(LOG_GUEST_ERROR, 1033 "%s: invalid guest write to RO register at offset " 1034 TARGET_FMT_plx "\n", __func__, offset); 1035 break; 1036 default: 1037 result = false; 1038 break; 1039 } 1040 return result; 1041 } 1042 1043 static bool its_readll(GICv3ITSState *s, hwaddr offset, 1044 uint64_t *data, MemTxAttrs attrs) 1045 { 1046 bool result = true; 1047 int index; 1048 1049 switch (offset) { 1050 case GITS_TYPER: 1051 *data = s->typer; 1052 break; 1053 case GITS_BASER ... GITS_BASER + 0x3f: 1054 index = (offset - GITS_BASER) / 8; 1055 *data = s->baser[index]; 1056 break; 1057 case GITS_CBASER: 1058 *data = s->cbaser; 1059 break; 1060 case GITS_CREADR: 1061 *data = s->creadr; 1062 break; 1063 case GITS_CWRITER: 1064 *data = s->cwriter; 1065 break; 1066 default: 1067 result = false; 1068 break; 1069 } 1070 return result; 1071 } 1072 1073 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data, 1074 unsigned size, MemTxAttrs attrs) 1075 { 1076 GICv3ITSState *s = (GICv3ITSState *)opaque; 1077 bool result; 1078 1079 switch (size) { 1080 case 4: 1081 result = its_readl(s, offset, data, attrs); 1082 break; 1083 case 8: 1084 result = its_readll(s, offset, data, attrs); 1085 break; 1086 default: 1087 result = false; 1088 break; 1089 } 1090 1091 if (!result) { 1092 qemu_log_mask(LOG_GUEST_ERROR, 1093 "%s: invalid guest read at offset " TARGET_FMT_plx 1094 "size %u\n", __func__, offset, size); 1095 /* 1096 * The spec requires that reserved registers are RAZ/WI; 1097 * so use false returns from leaf functions as a way to 1098 * trigger the guest-error logging but don't return it to 1099 * the caller, or we'll cause a spurious guest data abort. 1100 */ 1101 *data = 0; 1102 } 1103 return MEMTX_OK; 1104 } 1105 1106 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data, 1107 unsigned size, MemTxAttrs attrs) 1108 { 1109 GICv3ITSState *s = (GICv3ITSState *)opaque; 1110 bool result; 1111 1112 switch (size) { 1113 case 4: 1114 result = its_writel(s, offset, data, attrs); 1115 break; 1116 case 8: 1117 result = its_writell(s, offset, data, attrs); 1118 break; 1119 default: 1120 result = false; 1121 break; 1122 } 1123 1124 if (!result) { 1125 qemu_log_mask(LOG_GUEST_ERROR, 1126 "%s: invalid guest write at offset " TARGET_FMT_plx 1127 "size %u\n", __func__, offset, size); 1128 /* 1129 * The spec requires that reserved registers are RAZ/WI; 1130 * so use false returns from leaf functions as a way to 1131 * trigger the guest-error logging but don't return it to 1132 * the caller, or we'll cause a spurious guest data abort. 1133 */ 1134 } 1135 return MEMTX_OK; 1136 } 1137 1138 static const MemoryRegionOps gicv3_its_control_ops = { 1139 .read_with_attrs = gicv3_its_read, 1140 .write_with_attrs = gicv3_its_write, 1141 .valid.min_access_size = 4, 1142 .valid.max_access_size = 8, 1143 .impl.min_access_size = 4, 1144 .impl.max_access_size = 8, 1145 .endianness = DEVICE_NATIVE_ENDIAN, 1146 }; 1147 1148 static const MemoryRegionOps gicv3_its_translation_ops = { 1149 .write_with_attrs = gicv3_its_translation_write, 1150 .valid.min_access_size = 2, 1151 .valid.max_access_size = 4, 1152 .impl.min_access_size = 2, 1153 .impl.max_access_size = 4, 1154 .endianness = DEVICE_NATIVE_ENDIAN, 1155 }; 1156 1157 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) 1158 { 1159 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1160 int i; 1161 1162 for (i = 0; i < s->gicv3->num_cpu; i++) { 1163 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) { 1164 error_setg(errp, "Physical LPI not supported by CPU %d", i); 1165 return; 1166 } 1167 } 1168 1169 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops); 1170 1171 address_space_init(&s->gicv3->dma_as, s->gicv3->dma, 1172 "gicv3-its-sysmem"); 1173 1174 /* set the ITS default features supported */ 1175 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1); 1176 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE, 1177 ITS_ITT_ENTRY_SIZE - 1); 1178 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS); 1179 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS); 1180 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1); 1181 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS); 1182 } 1183 1184 static void gicv3_its_reset(DeviceState *dev) 1185 { 1186 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1187 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s); 1188 1189 c->parent_reset(dev); 1190 1191 /* Quiescent bit reset to 1 */ 1192 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1); 1193 1194 /* 1195 * setting GITS_BASER0.Type = 0b001 (Device) 1196 * GITS_BASER1.Type = 0b100 (Collection Table) 1197 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented) 1198 * GITS_BASER<0,1>.Page_Size = 64KB 1199 * and default translation table entry size to 16 bytes 1200 */ 1201 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE, 1202 GITS_BASER_TYPE_DEVICE); 1203 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE, 1204 GITS_BASER_PAGESIZE_64K); 1205 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE, 1206 GITS_DTE_SIZE - 1); 1207 1208 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE, 1209 GITS_BASER_TYPE_COLLECTION); 1210 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE, 1211 GITS_BASER_PAGESIZE_64K); 1212 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE, 1213 GITS_CTE_SIZE - 1); 1214 } 1215 1216 static void gicv3_its_post_load(GICv3ITSState *s) 1217 { 1218 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 1219 extract_table_params(s); 1220 extract_cmdq_params(s); 1221 } 1222 } 1223 1224 static Property gicv3_its_props[] = { 1225 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3", 1226 GICv3State *), 1227 DEFINE_PROP_END_OF_LIST(), 1228 }; 1229 1230 static void gicv3_its_class_init(ObjectClass *klass, void *data) 1231 { 1232 DeviceClass *dc = DEVICE_CLASS(klass); 1233 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass); 1234 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); 1235 1236 dc->realize = gicv3_arm_its_realize; 1237 device_class_set_props(dc, gicv3_its_props); 1238 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset); 1239 icc->post_load = gicv3_its_post_load; 1240 } 1241 1242 static const TypeInfo gicv3_its_info = { 1243 .name = TYPE_ARM_GICV3_ITS, 1244 .parent = TYPE_ARM_GICV3_ITS_COMMON, 1245 .instance_size = sizeof(GICv3ITSState), 1246 .class_init = gicv3_its_class_init, 1247 .class_size = sizeof(GICv3ITSClass), 1248 }; 1249 1250 static void gicv3_its_register_types(void) 1251 { 1252 type_register_static(&gicv3_its_info); 1253 } 1254 1255 type_init(gicv3_its_register_types) 1256