1 /* 2 * ITS emulation for a GICv3-based system 3 * 4 * Copyright Linaro.org 2021 5 * 6 * Authors: 7 * Shashi Mallela <shashi.mallela@linaro.org> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 10 * option) any later version. See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/log.h" 16 #include "hw/qdev-properties.h" 17 #include "hw/intc/arm_gicv3_its_common.h" 18 #include "gicv3_internal.h" 19 #include "qom/object.h" 20 #include "qapi/error.h" 21 22 typedef struct GICv3ITSClass GICv3ITSClass; 23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */ 24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass, 25 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS) 26 27 struct GICv3ITSClass { 28 GICv3ITSCommonClass parent_class; 29 void (*parent_reset)(DeviceState *dev); 30 }; 31 32 /* 33 * This is an internal enum used to distinguish between LPI triggered 34 * via command queue and LPI triggered via gits_translater write. 35 */ 36 typedef enum ItsCmdType { 37 NONE = 0, /* internal indication for GITS_TRANSLATER write */ 38 CLEAR = 1, 39 DISCARD = 2, 40 INTERRUPT = 3, 41 } ItsCmdType; 42 43 typedef struct { 44 uint32_t iteh; 45 uint64_t itel; 46 } IteEntry; 47 48 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz) 49 { 50 uint64_t result = 0; 51 52 switch (page_sz) { 53 case GITS_PAGE_SIZE_4K: 54 case GITS_PAGE_SIZE_16K: 55 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12; 56 break; 57 58 case GITS_PAGE_SIZE_64K: 59 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16; 60 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48; 61 break; 62 63 default: 64 break; 65 } 66 return result; 67 } 68 69 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte, 70 MemTxResult *res) 71 { 72 AddressSpace *as = &s->gicv3->dma_as; 73 uint64_t l2t_addr; 74 uint64_t value; 75 bool valid_l2t; 76 uint32_t l2t_id; 77 uint32_t num_l2_entries; 78 79 if (s->ct.indirect) { 80 l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE); 81 82 value = address_space_ldq_le(as, 83 s->ct.base_addr + 84 (l2t_id * L1TABLE_ENTRY_SIZE), 85 MEMTXATTRS_UNSPECIFIED, res); 86 87 if (*res == MEMTX_OK) { 88 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 89 90 if (valid_l2t) { 91 num_l2_entries = s->ct.page_sz / s->ct.entry_sz; 92 93 l2t_addr = value & ((1ULL << 51) - 1); 94 95 *cte = address_space_ldq_le(as, l2t_addr + 96 ((icid % num_l2_entries) * GITS_CTE_SIZE), 97 MEMTXATTRS_UNSPECIFIED, res); 98 } 99 } 100 } else { 101 /* Flat level table */ 102 *cte = address_space_ldq_le(as, s->ct.base_addr + 103 (icid * GITS_CTE_SIZE), 104 MEMTXATTRS_UNSPECIFIED, res); 105 } 106 107 return FIELD_EX64(*cte, CTE, VALID); 108 } 109 110 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, 111 IteEntry ite) 112 { 113 AddressSpace *as = &s->gicv3->dma_as; 114 uint64_t itt_addr; 115 MemTxResult res = MEMTX_OK; 116 117 itt_addr = FIELD_EX64(dte, DTE, ITTADDR); 118 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ 119 120 address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) + 121 sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED, 122 &res); 123 124 if (res == MEMTX_OK) { 125 address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) + 126 sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh, 127 MEMTXATTRS_UNSPECIFIED, &res); 128 } 129 if (res != MEMTX_OK) { 130 return false; 131 } else { 132 return true; 133 } 134 } 135 136 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, 137 uint16_t *icid, uint32_t *pIntid, MemTxResult *res) 138 { 139 AddressSpace *as = &s->gicv3->dma_as; 140 uint64_t itt_addr; 141 bool status = false; 142 IteEntry ite = {}; 143 144 itt_addr = FIELD_EX64(dte, DTE, ITTADDR); 145 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ 146 147 ite.itel = address_space_ldq_le(as, itt_addr + 148 (eventid * (sizeof(uint64_t) + 149 sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED, 150 res); 151 152 if (*res == MEMTX_OK) { 153 ite.iteh = address_space_ldl_le(as, itt_addr + 154 (eventid * (sizeof(uint64_t) + 155 sizeof(uint32_t))) + sizeof(uint32_t), 156 MEMTXATTRS_UNSPECIFIED, res); 157 158 if (*res == MEMTX_OK) { 159 if (FIELD_EX64(ite.itel, ITE_L, VALID)) { 160 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE); 161 if (inttype == ITE_INTTYPE_PHYSICAL) { 162 *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID); 163 *icid = FIELD_EX32(ite.iteh, ITE_H, ICID); 164 status = true; 165 } 166 } 167 } 168 } 169 return status; 170 } 171 172 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res) 173 { 174 AddressSpace *as = &s->gicv3->dma_as; 175 uint64_t l2t_addr; 176 uint64_t value; 177 bool valid_l2t; 178 uint32_t l2t_id; 179 uint32_t num_l2_entries; 180 181 if (s->dt.indirect) { 182 l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE); 183 184 value = address_space_ldq_le(as, 185 s->dt.base_addr + 186 (l2t_id * L1TABLE_ENTRY_SIZE), 187 MEMTXATTRS_UNSPECIFIED, res); 188 189 if (*res == MEMTX_OK) { 190 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 191 192 if (valid_l2t) { 193 num_l2_entries = s->dt.page_sz / s->dt.entry_sz; 194 195 l2t_addr = value & ((1ULL << 51) - 1); 196 197 value = address_space_ldq_le(as, l2t_addr + 198 ((devid % num_l2_entries) * GITS_DTE_SIZE), 199 MEMTXATTRS_UNSPECIFIED, res); 200 } 201 } 202 } else { 203 /* Flat level table */ 204 value = address_space_ldq_le(as, s->dt.base_addr + 205 (devid * GITS_DTE_SIZE), 206 MEMTXATTRS_UNSPECIFIED, res); 207 } 208 209 return value; 210 } 211 212 /* 213 * This function handles the processing of following commands based on 214 * the ItsCmdType parameter passed:- 215 * 1. triggering of lpi interrupt translation via ITS INT command 216 * 2. triggering of lpi interrupt translation via gits_translater register 217 * 3. handling of ITS CLEAR command 218 * 4. handling of ITS DISCARD command 219 */ 220 static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset, 221 ItsCmdType cmd) 222 { 223 AddressSpace *as = &s->gicv3->dma_as; 224 uint32_t devid, eventid; 225 MemTxResult res = MEMTX_OK; 226 bool dte_valid; 227 uint64_t dte = 0; 228 uint32_t max_eventid; 229 uint16_t icid = 0; 230 uint32_t pIntid = 0; 231 bool ite_valid = false; 232 uint64_t cte = 0; 233 bool cte_valid = false; 234 bool result = false; 235 uint64_t rdbase; 236 237 if (cmd == NONE) { 238 devid = offset; 239 } else { 240 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 241 242 offset += NUM_BYTES_IN_DW; 243 value = address_space_ldq_le(as, s->cq.base_addr + offset, 244 MEMTXATTRS_UNSPECIFIED, &res); 245 } 246 247 if (res != MEMTX_OK) { 248 return result; 249 } 250 251 eventid = (value & EVENTID_MASK); 252 253 dte = get_dte(s, devid, &res); 254 255 if (res != MEMTX_OK) { 256 return result; 257 } 258 dte_valid = FIELD_EX64(dte, DTE, VALID); 259 260 if (dte_valid) { 261 max_eventid = 1UL << (FIELD_EX64(dte, DTE, SIZE) + 1); 262 263 ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res); 264 265 if (res != MEMTX_OK) { 266 return result; 267 } 268 269 if (ite_valid) { 270 cte_valid = get_cte(s, icid, &cte, &res); 271 } 272 273 if (res != MEMTX_OK) { 274 return result; 275 } 276 } else { 277 qemu_log_mask(LOG_GUEST_ERROR, 278 "%s: invalid command attributes: " 279 "invalid dte: %"PRIx64" for %d (MEM_TX: %d)\n", 280 __func__, dte, devid, res); 281 return result; 282 } 283 284 285 /* 286 * In this implementation, in case of guest errors we ignore the 287 * command and move onto the next command in the queue. 288 */ 289 if (devid >= s->dt.num_ids) { 290 qemu_log_mask(LOG_GUEST_ERROR, 291 "%s: invalid command attributes: devid %d>=%d", 292 __func__, devid, s->dt.num_ids); 293 294 } else if (!dte_valid || !ite_valid || !cte_valid) { 295 qemu_log_mask(LOG_GUEST_ERROR, 296 "%s: invalid command attributes: " 297 "dte: %s, ite: %s, cte: %s\n", 298 __func__, 299 dte_valid ? "valid" : "invalid", 300 ite_valid ? "valid" : "invalid", 301 cte_valid ? "valid" : "invalid"); 302 } else if (eventid > max_eventid) { 303 qemu_log_mask(LOG_GUEST_ERROR, 304 "%s: invalid command attributes: eventid %d > %d\n", 305 __func__, eventid, max_eventid); 306 } else { 307 /* 308 * Current implementation only supports rdbase == procnum 309 * Hence rdbase physical address is ignored 310 */ 311 rdbase = FIELD_EX64(cte, CTE, RDBASE); 312 313 if (rdbase >= s->gicv3->num_cpu) { 314 return result; 315 } 316 317 if ((cmd == CLEAR) || (cmd == DISCARD)) { 318 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0); 319 } else { 320 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1); 321 } 322 323 if (cmd == DISCARD) { 324 IteEntry ite = {}; 325 /* remove mapping from interrupt translation table */ 326 result = update_ite(s, eventid, dte, ite); 327 } 328 } 329 330 return result; 331 } 332 333 static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset, 334 bool ignore_pInt) 335 { 336 AddressSpace *as = &s->gicv3->dma_as; 337 uint32_t devid, eventid; 338 uint32_t pIntid = 0; 339 uint32_t max_eventid, max_Intid; 340 bool dte_valid; 341 MemTxResult res = MEMTX_OK; 342 uint16_t icid = 0; 343 uint64_t dte = 0; 344 bool result = false; 345 346 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 347 offset += NUM_BYTES_IN_DW; 348 value = address_space_ldq_le(as, s->cq.base_addr + offset, 349 MEMTXATTRS_UNSPECIFIED, &res); 350 351 if (res != MEMTX_OK) { 352 return result; 353 } 354 355 eventid = (value & EVENTID_MASK); 356 357 if (ignore_pInt) { 358 pIntid = eventid; 359 } else { 360 pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT); 361 } 362 363 offset += NUM_BYTES_IN_DW; 364 value = address_space_ldq_le(as, s->cq.base_addr + offset, 365 MEMTXATTRS_UNSPECIFIED, &res); 366 367 if (res != MEMTX_OK) { 368 return result; 369 } 370 371 icid = value & ICID_MASK; 372 373 dte = get_dte(s, devid, &res); 374 375 if (res != MEMTX_OK) { 376 return result; 377 } 378 dte_valid = FIELD_EX64(dte, DTE, VALID); 379 max_eventid = 1UL << (FIELD_EX64(dte, DTE, SIZE) + 1); 380 max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1; 381 382 if ((devid >= s->dt.num_ids) || (icid >= s->ct.num_ids) 383 || !dte_valid || (eventid > max_eventid) || 384 (((pIntid < GICV3_LPI_INTID_START) || (pIntid > max_Intid)) && 385 (pIntid != INTID_SPURIOUS))) { 386 qemu_log_mask(LOG_GUEST_ERROR, 387 "%s: invalid command attributes " 388 "devid %d or icid %d or eventid %d or pIntid %d or" 389 "unmapped dte %d\n", __func__, devid, icid, eventid, 390 pIntid, dte_valid); 391 /* 392 * in this implementation, in case of error 393 * we ignore this command and move onto the next 394 * command in the queue 395 */ 396 } else { 397 /* add ite entry to interrupt translation table */ 398 IteEntry ite = {}; 399 ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid); 400 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL); 401 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid); 402 ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS); 403 ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid); 404 405 result = update_ite(s, eventid, dte, ite); 406 } 407 408 return result; 409 } 410 411 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid, 412 uint64_t rdbase) 413 { 414 AddressSpace *as = &s->gicv3->dma_as; 415 uint64_t value; 416 uint64_t l2t_addr; 417 bool valid_l2t; 418 uint32_t l2t_id; 419 uint32_t num_l2_entries; 420 uint64_t cte = 0; 421 MemTxResult res = MEMTX_OK; 422 423 if (!s->ct.valid) { 424 return true; 425 } 426 427 if (valid) { 428 /* add mapping entry to collection table */ 429 cte = FIELD_DP64(cte, CTE, VALID, 1); 430 cte = FIELD_DP64(cte, CTE, RDBASE, rdbase); 431 } 432 433 /* 434 * The specification defines the format of level 1 entries of a 435 * 2-level table, but the format of level 2 entries and the format 436 * of flat-mapped tables is IMPDEF. 437 */ 438 if (s->ct.indirect) { 439 l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE); 440 441 value = address_space_ldq_le(as, 442 s->ct.base_addr + 443 (l2t_id * L1TABLE_ENTRY_SIZE), 444 MEMTXATTRS_UNSPECIFIED, &res); 445 446 if (res != MEMTX_OK) { 447 return false; 448 } 449 450 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 451 452 if (valid_l2t) { 453 num_l2_entries = s->ct.page_sz / s->ct.entry_sz; 454 455 l2t_addr = value & ((1ULL << 51) - 1); 456 457 address_space_stq_le(as, l2t_addr + 458 ((icid % num_l2_entries) * GITS_CTE_SIZE), 459 cte, MEMTXATTRS_UNSPECIFIED, &res); 460 } 461 } else { 462 /* Flat level table */ 463 address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE), 464 cte, MEMTXATTRS_UNSPECIFIED, &res); 465 } 466 if (res != MEMTX_OK) { 467 return false; 468 } else { 469 return true; 470 } 471 } 472 473 static bool process_mapc(GICv3ITSState *s, uint32_t offset) 474 { 475 AddressSpace *as = &s->gicv3->dma_as; 476 uint16_t icid; 477 uint64_t rdbase; 478 bool valid; 479 MemTxResult res = MEMTX_OK; 480 bool result = false; 481 uint64_t value; 482 483 offset += NUM_BYTES_IN_DW; 484 offset += NUM_BYTES_IN_DW; 485 486 value = address_space_ldq_le(as, s->cq.base_addr + offset, 487 MEMTXATTRS_UNSPECIFIED, &res); 488 489 if (res != MEMTX_OK) { 490 return result; 491 } 492 493 icid = value & ICID_MASK; 494 495 rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT; 496 rdbase &= RDBASE_PROCNUM_MASK; 497 498 valid = (value & CMD_FIELD_VALID_MASK); 499 500 if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) { 501 qemu_log_mask(LOG_GUEST_ERROR, 502 "ITS MAPC: invalid collection table attributes " 503 "icid %d rdbase %" PRIu64 "\n", icid, rdbase); 504 /* 505 * in this implementation, in case of error 506 * we ignore this command and move onto the next 507 * command in the queue 508 */ 509 } else { 510 result = update_cte(s, icid, valid, rdbase); 511 } 512 513 return result; 514 } 515 516 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid, 517 uint8_t size, uint64_t itt_addr) 518 { 519 AddressSpace *as = &s->gicv3->dma_as; 520 uint64_t value; 521 uint64_t l2t_addr; 522 bool valid_l2t; 523 uint32_t l2t_id; 524 uint32_t num_l2_entries; 525 uint64_t dte = 0; 526 MemTxResult res = MEMTX_OK; 527 528 if (s->dt.valid) { 529 if (valid) { 530 /* add mapping entry to device table */ 531 dte = FIELD_DP64(dte, DTE, VALID, 1); 532 dte = FIELD_DP64(dte, DTE, SIZE, size); 533 dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr); 534 } 535 } else { 536 return true; 537 } 538 539 /* 540 * The specification defines the format of level 1 entries of a 541 * 2-level table, but the format of level 2 entries and the format 542 * of flat-mapped tables is IMPDEF. 543 */ 544 if (s->dt.indirect) { 545 l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE); 546 547 value = address_space_ldq_le(as, 548 s->dt.base_addr + 549 (l2t_id * L1TABLE_ENTRY_SIZE), 550 MEMTXATTRS_UNSPECIFIED, &res); 551 552 if (res != MEMTX_OK) { 553 return false; 554 } 555 556 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 557 558 if (valid_l2t) { 559 num_l2_entries = s->dt.page_sz / s->dt.entry_sz; 560 561 l2t_addr = value & ((1ULL << 51) - 1); 562 563 address_space_stq_le(as, l2t_addr + 564 ((devid % num_l2_entries) * GITS_DTE_SIZE), 565 dte, MEMTXATTRS_UNSPECIFIED, &res); 566 } 567 } else { 568 /* Flat level table */ 569 address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE), 570 dte, MEMTXATTRS_UNSPECIFIED, &res); 571 } 572 if (res != MEMTX_OK) { 573 return false; 574 } else { 575 return true; 576 } 577 } 578 579 static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset) 580 { 581 AddressSpace *as = &s->gicv3->dma_as; 582 uint32_t devid; 583 uint8_t size; 584 uint64_t itt_addr; 585 bool valid; 586 MemTxResult res = MEMTX_OK; 587 bool result = false; 588 589 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 590 591 offset += NUM_BYTES_IN_DW; 592 value = address_space_ldq_le(as, s->cq.base_addr + offset, 593 MEMTXATTRS_UNSPECIFIED, &res); 594 595 if (res != MEMTX_OK) { 596 return result; 597 } 598 599 size = (value & SIZE_MASK); 600 601 offset += NUM_BYTES_IN_DW; 602 value = address_space_ldq_le(as, s->cq.base_addr + offset, 603 MEMTXATTRS_UNSPECIFIED, &res); 604 605 if (res != MEMTX_OK) { 606 return result; 607 } 608 609 itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT; 610 611 valid = (value & CMD_FIELD_VALID_MASK); 612 613 if ((devid >= s->dt.num_ids) || 614 (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) { 615 qemu_log_mask(LOG_GUEST_ERROR, 616 "ITS MAPD: invalid device table attributes " 617 "devid %d or size %d\n", devid, size); 618 /* 619 * in this implementation, in case of error 620 * we ignore this command and move onto the next 621 * command in the queue 622 */ 623 } else { 624 result = update_dte(s, devid, valid, size, itt_addr); 625 } 626 627 return result; 628 } 629 630 /* 631 * Current implementation blocks until all 632 * commands are processed 633 */ 634 static void process_cmdq(GICv3ITSState *s) 635 { 636 uint32_t wr_offset = 0; 637 uint32_t rd_offset = 0; 638 uint32_t cq_offset = 0; 639 uint64_t data; 640 AddressSpace *as = &s->gicv3->dma_as; 641 MemTxResult res = MEMTX_OK; 642 bool result = true; 643 uint8_t cmd; 644 int i; 645 646 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 647 return; 648 } 649 650 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET); 651 652 if (wr_offset >= s->cq.num_entries) { 653 qemu_log_mask(LOG_GUEST_ERROR, 654 "%s: invalid write offset " 655 "%d\n", __func__, wr_offset); 656 return; 657 } 658 659 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET); 660 661 if (rd_offset >= s->cq.num_entries) { 662 qemu_log_mask(LOG_GUEST_ERROR, 663 "%s: invalid read offset " 664 "%d\n", __func__, rd_offset); 665 return; 666 } 667 668 while (wr_offset != rd_offset) { 669 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE); 670 data = address_space_ldq_le(as, s->cq.base_addr + cq_offset, 671 MEMTXATTRS_UNSPECIFIED, &res); 672 if (res != MEMTX_OK) { 673 result = false; 674 } 675 cmd = (data & CMD_MASK); 676 677 switch (cmd) { 678 case GITS_CMD_INT: 679 res = process_its_cmd(s, data, cq_offset, INTERRUPT); 680 break; 681 case GITS_CMD_CLEAR: 682 res = process_its_cmd(s, data, cq_offset, CLEAR); 683 break; 684 case GITS_CMD_SYNC: 685 /* 686 * Current implementation makes a blocking synchronous call 687 * for every command issued earlier, hence the internal state 688 * is already consistent by the time SYNC command is executed. 689 * Hence no further processing is required for SYNC command. 690 */ 691 break; 692 case GITS_CMD_MAPD: 693 result = process_mapd(s, data, cq_offset); 694 break; 695 case GITS_CMD_MAPC: 696 result = process_mapc(s, cq_offset); 697 break; 698 case GITS_CMD_MAPTI: 699 result = process_mapti(s, data, cq_offset, false); 700 break; 701 case GITS_CMD_MAPI: 702 result = process_mapti(s, data, cq_offset, true); 703 break; 704 case GITS_CMD_DISCARD: 705 result = process_its_cmd(s, data, cq_offset, DISCARD); 706 break; 707 case GITS_CMD_INV: 708 case GITS_CMD_INVALL: 709 /* 710 * Current implementation doesn't cache any ITS tables, 711 * but the calculated lpi priority information. We only 712 * need to trigger lpi priority re-calculation to be in 713 * sync with LPI config table or pending table changes. 714 */ 715 for (i = 0; i < s->gicv3->num_cpu; i++) { 716 gicv3_redist_update_lpi(&s->gicv3->cpu[i]); 717 } 718 break; 719 default: 720 break; 721 } 722 if (result) { 723 rd_offset++; 724 rd_offset %= s->cq.num_entries; 725 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset); 726 } else { 727 /* 728 * in this implementation, in case of dma read/write error 729 * we stall the command processing 730 */ 731 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 732 qemu_log_mask(LOG_GUEST_ERROR, 733 "%s: %x cmd processing failed\n", __func__, cmd); 734 break; 735 } 736 } 737 } 738 739 /* 740 * This function extracts the ITS Device and Collection table specific 741 * parameters (like base_addr, size etc) from GITS_BASER register. 742 * It is called during ITS enable and also during post_load migration 743 */ 744 static void extract_table_params(GICv3ITSState *s) 745 { 746 uint16_t num_pages = 0; 747 uint8_t page_sz_type; 748 uint8_t type; 749 uint32_t page_sz = 0; 750 uint64_t value; 751 752 for (int i = 0; i < 8; i++) { 753 TableDesc *td; 754 int idbits; 755 756 value = s->baser[i]; 757 758 if (!value) { 759 continue; 760 } 761 762 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE); 763 764 switch (page_sz_type) { 765 case 0: 766 page_sz = GITS_PAGE_SIZE_4K; 767 break; 768 769 case 1: 770 page_sz = GITS_PAGE_SIZE_16K; 771 break; 772 773 case 2: 774 case 3: 775 page_sz = GITS_PAGE_SIZE_64K; 776 break; 777 778 default: 779 g_assert_not_reached(); 780 } 781 782 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1; 783 784 type = FIELD_EX64(value, GITS_BASER, TYPE); 785 786 switch (type) { 787 case GITS_BASER_TYPE_DEVICE: 788 td = &s->dt; 789 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1; 790 break; 791 case GITS_BASER_TYPE_COLLECTION: 792 td = &s->ct; 793 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) { 794 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1; 795 } else { 796 /* 16-bit CollectionId supported when CIL == 0 */ 797 idbits = 16; 798 } 799 break; 800 default: 801 /* 802 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK 803 * ensures we will only see type values corresponding to 804 * the values set up in gicv3_its_reset(). 805 */ 806 g_assert_not_reached(); 807 } 808 809 memset(td, 0, sizeof(*td)); 810 td->valid = FIELD_EX64(value, GITS_BASER, VALID); 811 /* 812 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process 813 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we 814 * do not have a special case where the GITS_BASER<n>.Valid bit is 0 815 * for the register corresponding to the Collection table but we 816 * still have to process interrupts using non-memory-backed 817 * Collection table entries.) 818 */ 819 if (!td->valid) { 820 continue; 821 } 822 td->page_sz = page_sz; 823 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT); 824 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1; 825 td->base_addr = baser_base_addr(value, page_sz); 826 if (!td->indirect) { 827 td->num_entries = (num_pages * page_sz) / td->entry_sz; 828 } else { 829 td->num_entries = (((num_pages * page_sz) / 830 L1TABLE_ENTRY_SIZE) * 831 (page_sz / td->entry_sz)); 832 } 833 td->num_ids = 1ULL << idbits; 834 } 835 } 836 837 static void extract_cmdq_params(GICv3ITSState *s) 838 { 839 uint16_t num_pages = 0; 840 uint64_t value = s->cbaser; 841 842 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1; 843 844 memset(&s->cq, 0 , sizeof(s->cq)); 845 s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID); 846 847 if (s->cq.valid) { 848 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) / 849 GITS_CMDQ_ENTRY_SIZE; 850 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR); 851 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT; 852 } 853 } 854 855 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset, 856 uint64_t data, unsigned size, 857 MemTxAttrs attrs) 858 { 859 GICv3ITSState *s = (GICv3ITSState *)opaque; 860 bool result = true; 861 uint32_t devid = 0; 862 863 switch (offset) { 864 case GITS_TRANSLATER: 865 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 866 devid = attrs.requester_id; 867 result = process_its_cmd(s, data, devid, NONE); 868 } 869 break; 870 default: 871 break; 872 } 873 874 if (result) { 875 return MEMTX_OK; 876 } else { 877 return MEMTX_ERROR; 878 } 879 } 880 881 static bool its_writel(GICv3ITSState *s, hwaddr offset, 882 uint64_t value, MemTxAttrs attrs) 883 { 884 bool result = true; 885 int index; 886 887 switch (offset) { 888 case GITS_CTLR: 889 if (value & R_GITS_CTLR_ENABLED_MASK) { 890 s->ctlr |= R_GITS_CTLR_ENABLED_MASK; 891 extract_table_params(s); 892 extract_cmdq_params(s); 893 s->creadr = 0; 894 process_cmdq(s); 895 } else { 896 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK; 897 } 898 break; 899 case GITS_CBASER: 900 /* 901 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 902 * already enabled 903 */ 904 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 905 s->cbaser = deposit64(s->cbaser, 0, 32, value); 906 s->creadr = 0; 907 s->cwriter = s->creadr; 908 } 909 break; 910 case GITS_CBASER + 4: 911 /* 912 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 913 * already enabled 914 */ 915 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 916 s->cbaser = deposit64(s->cbaser, 32, 32, value); 917 s->creadr = 0; 918 s->cwriter = s->creadr; 919 } 920 break; 921 case GITS_CWRITER: 922 s->cwriter = deposit64(s->cwriter, 0, 32, 923 (value & ~R_GITS_CWRITER_RETRY_MASK)); 924 if (s->cwriter != s->creadr) { 925 process_cmdq(s); 926 } 927 break; 928 case GITS_CWRITER + 4: 929 s->cwriter = deposit64(s->cwriter, 32, 32, value); 930 break; 931 case GITS_CREADR: 932 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 933 s->creadr = deposit64(s->creadr, 0, 32, 934 (value & ~R_GITS_CREADR_STALLED_MASK)); 935 } else { 936 /* RO register, ignore the write */ 937 qemu_log_mask(LOG_GUEST_ERROR, 938 "%s: invalid guest write to RO register at offset " 939 TARGET_FMT_plx "\n", __func__, offset); 940 } 941 break; 942 case GITS_CREADR + 4: 943 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 944 s->creadr = deposit64(s->creadr, 32, 32, value); 945 } else { 946 /* RO register, ignore the write */ 947 qemu_log_mask(LOG_GUEST_ERROR, 948 "%s: invalid guest write to RO register at offset " 949 TARGET_FMT_plx "\n", __func__, offset); 950 } 951 break; 952 case GITS_BASER ... GITS_BASER + 0x3f: 953 /* 954 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 955 * already enabled 956 */ 957 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 958 index = (offset - GITS_BASER) / 8; 959 960 if (offset & 7) { 961 value <<= 32; 962 value &= ~GITS_BASER_RO_MASK; 963 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32); 964 s->baser[index] |= value; 965 } else { 966 value &= ~GITS_BASER_RO_MASK; 967 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32); 968 s->baser[index] |= value; 969 } 970 } 971 break; 972 case GITS_IIDR: 973 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 974 /* RO registers, ignore the write */ 975 qemu_log_mask(LOG_GUEST_ERROR, 976 "%s: invalid guest write to RO register at offset " 977 TARGET_FMT_plx "\n", __func__, offset); 978 break; 979 default: 980 result = false; 981 break; 982 } 983 return result; 984 } 985 986 static bool its_readl(GICv3ITSState *s, hwaddr offset, 987 uint64_t *data, MemTxAttrs attrs) 988 { 989 bool result = true; 990 int index; 991 992 switch (offset) { 993 case GITS_CTLR: 994 *data = s->ctlr; 995 break; 996 case GITS_IIDR: 997 *data = gicv3_iidr(); 998 break; 999 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 1000 /* ID registers */ 1001 *data = gicv3_idreg(offset - GITS_IDREGS); 1002 break; 1003 case GITS_TYPER: 1004 *data = extract64(s->typer, 0, 32); 1005 break; 1006 case GITS_TYPER + 4: 1007 *data = extract64(s->typer, 32, 32); 1008 break; 1009 case GITS_CBASER: 1010 *data = extract64(s->cbaser, 0, 32); 1011 break; 1012 case GITS_CBASER + 4: 1013 *data = extract64(s->cbaser, 32, 32); 1014 break; 1015 case GITS_CREADR: 1016 *data = extract64(s->creadr, 0, 32); 1017 break; 1018 case GITS_CREADR + 4: 1019 *data = extract64(s->creadr, 32, 32); 1020 break; 1021 case GITS_CWRITER: 1022 *data = extract64(s->cwriter, 0, 32); 1023 break; 1024 case GITS_CWRITER + 4: 1025 *data = extract64(s->cwriter, 32, 32); 1026 break; 1027 case GITS_BASER ... GITS_BASER + 0x3f: 1028 index = (offset - GITS_BASER) / 8; 1029 if (offset & 7) { 1030 *data = extract64(s->baser[index], 32, 32); 1031 } else { 1032 *data = extract64(s->baser[index], 0, 32); 1033 } 1034 break; 1035 default: 1036 result = false; 1037 break; 1038 } 1039 return result; 1040 } 1041 1042 static bool its_writell(GICv3ITSState *s, hwaddr offset, 1043 uint64_t value, MemTxAttrs attrs) 1044 { 1045 bool result = true; 1046 int index; 1047 1048 switch (offset) { 1049 case GITS_BASER ... GITS_BASER + 0x3f: 1050 /* 1051 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 1052 * already enabled 1053 */ 1054 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1055 index = (offset - GITS_BASER) / 8; 1056 s->baser[index] &= GITS_BASER_RO_MASK; 1057 s->baser[index] |= (value & ~GITS_BASER_RO_MASK); 1058 } 1059 break; 1060 case GITS_CBASER: 1061 /* 1062 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1063 * already enabled 1064 */ 1065 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1066 s->cbaser = value; 1067 s->creadr = 0; 1068 s->cwriter = s->creadr; 1069 } 1070 break; 1071 case GITS_CWRITER: 1072 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK; 1073 if (s->cwriter != s->creadr) { 1074 process_cmdq(s); 1075 } 1076 break; 1077 case GITS_CREADR: 1078 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1079 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK; 1080 } else { 1081 /* RO register, ignore the write */ 1082 qemu_log_mask(LOG_GUEST_ERROR, 1083 "%s: invalid guest write to RO register at offset " 1084 TARGET_FMT_plx "\n", __func__, offset); 1085 } 1086 break; 1087 case GITS_TYPER: 1088 /* RO registers, ignore the write */ 1089 qemu_log_mask(LOG_GUEST_ERROR, 1090 "%s: invalid guest write to RO register at offset " 1091 TARGET_FMT_plx "\n", __func__, offset); 1092 break; 1093 default: 1094 result = false; 1095 break; 1096 } 1097 return result; 1098 } 1099 1100 static bool its_readll(GICv3ITSState *s, hwaddr offset, 1101 uint64_t *data, MemTxAttrs attrs) 1102 { 1103 bool result = true; 1104 int index; 1105 1106 switch (offset) { 1107 case GITS_TYPER: 1108 *data = s->typer; 1109 break; 1110 case GITS_BASER ... GITS_BASER + 0x3f: 1111 index = (offset - GITS_BASER) / 8; 1112 *data = s->baser[index]; 1113 break; 1114 case GITS_CBASER: 1115 *data = s->cbaser; 1116 break; 1117 case GITS_CREADR: 1118 *data = s->creadr; 1119 break; 1120 case GITS_CWRITER: 1121 *data = s->cwriter; 1122 break; 1123 default: 1124 result = false; 1125 break; 1126 } 1127 return result; 1128 } 1129 1130 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data, 1131 unsigned size, MemTxAttrs attrs) 1132 { 1133 GICv3ITSState *s = (GICv3ITSState *)opaque; 1134 bool result; 1135 1136 switch (size) { 1137 case 4: 1138 result = its_readl(s, offset, data, attrs); 1139 break; 1140 case 8: 1141 result = its_readll(s, offset, data, attrs); 1142 break; 1143 default: 1144 result = false; 1145 break; 1146 } 1147 1148 if (!result) { 1149 qemu_log_mask(LOG_GUEST_ERROR, 1150 "%s: invalid guest read at offset " TARGET_FMT_plx 1151 "size %u\n", __func__, offset, size); 1152 /* 1153 * The spec requires that reserved registers are RAZ/WI; 1154 * so use false returns from leaf functions as a way to 1155 * trigger the guest-error logging but don't return it to 1156 * the caller, or we'll cause a spurious guest data abort. 1157 */ 1158 *data = 0; 1159 } 1160 return MEMTX_OK; 1161 } 1162 1163 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data, 1164 unsigned size, MemTxAttrs attrs) 1165 { 1166 GICv3ITSState *s = (GICv3ITSState *)opaque; 1167 bool result; 1168 1169 switch (size) { 1170 case 4: 1171 result = its_writel(s, offset, data, attrs); 1172 break; 1173 case 8: 1174 result = its_writell(s, offset, data, attrs); 1175 break; 1176 default: 1177 result = false; 1178 break; 1179 } 1180 1181 if (!result) { 1182 qemu_log_mask(LOG_GUEST_ERROR, 1183 "%s: invalid guest write at offset " TARGET_FMT_plx 1184 "size %u\n", __func__, offset, size); 1185 /* 1186 * The spec requires that reserved registers are RAZ/WI; 1187 * so use false returns from leaf functions as a way to 1188 * trigger the guest-error logging but don't return it to 1189 * the caller, or we'll cause a spurious guest data abort. 1190 */ 1191 } 1192 return MEMTX_OK; 1193 } 1194 1195 static const MemoryRegionOps gicv3_its_control_ops = { 1196 .read_with_attrs = gicv3_its_read, 1197 .write_with_attrs = gicv3_its_write, 1198 .valid.min_access_size = 4, 1199 .valid.max_access_size = 8, 1200 .impl.min_access_size = 4, 1201 .impl.max_access_size = 8, 1202 .endianness = DEVICE_NATIVE_ENDIAN, 1203 }; 1204 1205 static const MemoryRegionOps gicv3_its_translation_ops = { 1206 .write_with_attrs = gicv3_its_translation_write, 1207 .valid.min_access_size = 2, 1208 .valid.max_access_size = 4, 1209 .impl.min_access_size = 2, 1210 .impl.max_access_size = 4, 1211 .endianness = DEVICE_NATIVE_ENDIAN, 1212 }; 1213 1214 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) 1215 { 1216 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1217 int i; 1218 1219 for (i = 0; i < s->gicv3->num_cpu; i++) { 1220 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) { 1221 error_setg(errp, "Physical LPI not supported by CPU %d", i); 1222 return; 1223 } 1224 } 1225 1226 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops); 1227 1228 address_space_init(&s->gicv3->dma_as, s->gicv3->dma, 1229 "gicv3-its-sysmem"); 1230 1231 /* set the ITS default features supported */ 1232 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1); 1233 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE, 1234 ITS_ITT_ENTRY_SIZE - 1); 1235 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS); 1236 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS); 1237 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1); 1238 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS); 1239 } 1240 1241 static void gicv3_its_reset(DeviceState *dev) 1242 { 1243 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1244 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s); 1245 1246 c->parent_reset(dev); 1247 1248 /* Quiescent bit reset to 1 */ 1249 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1); 1250 1251 /* 1252 * setting GITS_BASER0.Type = 0b001 (Device) 1253 * GITS_BASER1.Type = 0b100 (Collection Table) 1254 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented) 1255 * GITS_BASER<0,1>.Page_Size = 64KB 1256 * and default translation table entry size to 16 bytes 1257 */ 1258 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE, 1259 GITS_BASER_TYPE_DEVICE); 1260 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE, 1261 GITS_BASER_PAGESIZE_64K); 1262 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE, 1263 GITS_DTE_SIZE - 1); 1264 1265 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE, 1266 GITS_BASER_TYPE_COLLECTION); 1267 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE, 1268 GITS_BASER_PAGESIZE_64K); 1269 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE, 1270 GITS_CTE_SIZE - 1); 1271 } 1272 1273 static void gicv3_its_post_load(GICv3ITSState *s) 1274 { 1275 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 1276 extract_table_params(s); 1277 extract_cmdq_params(s); 1278 } 1279 } 1280 1281 static Property gicv3_its_props[] = { 1282 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3", 1283 GICv3State *), 1284 DEFINE_PROP_END_OF_LIST(), 1285 }; 1286 1287 static void gicv3_its_class_init(ObjectClass *klass, void *data) 1288 { 1289 DeviceClass *dc = DEVICE_CLASS(klass); 1290 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass); 1291 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); 1292 1293 dc->realize = gicv3_arm_its_realize; 1294 device_class_set_props(dc, gicv3_its_props); 1295 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset); 1296 icc->post_load = gicv3_its_post_load; 1297 } 1298 1299 static const TypeInfo gicv3_its_info = { 1300 .name = TYPE_ARM_GICV3_ITS, 1301 .parent = TYPE_ARM_GICV3_ITS_COMMON, 1302 .instance_size = sizeof(GICv3ITSState), 1303 .class_init = gicv3_its_class_init, 1304 .class_size = sizeof(GICv3ITSClass), 1305 }; 1306 1307 static void gicv3_its_register_types(void) 1308 { 1309 type_register_static(&gicv3_its_info); 1310 } 1311 1312 type_init(gicv3_its_register_types) 1313