1 /* 2 * ITS emulation for a GICv3-based system 3 * 4 * Copyright Linaro.org 2021 5 * 6 * Authors: 7 * Shashi Mallela <shashi.mallela@linaro.org> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 10 * option) any later version. See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/log.h" 16 #include "hw/qdev-properties.h" 17 #include "hw/intc/arm_gicv3_its_common.h" 18 #include "gicv3_internal.h" 19 #include "qom/object.h" 20 #include "qapi/error.h" 21 22 typedef struct GICv3ITSClass GICv3ITSClass; 23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */ 24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass, 25 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS) 26 27 struct GICv3ITSClass { 28 GICv3ITSCommonClass parent_class; 29 void (*parent_reset)(DeviceState *dev); 30 }; 31 32 /* 33 * This is an internal enum used to distinguish between LPI triggered 34 * via command queue and LPI triggered via gits_translater write. 35 */ 36 typedef enum ItsCmdType { 37 NONE = 0, /* internal indication for GITS_TRANSLATER write */ 38 CLEAR = 1, 39 DISCARD = 2, 40 INTERRUPT = 3, 41 } ItsCmdType; 42 43 typedef struct { 44 uint32_t iteh; 45 uint64_t itel; 46 } IteEntry; 47 48 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz) 49 { 50 uint64_t result = 0; 51 52 switch (page_sz) { 53 case GITS_PAGE_SIZE_4K: 54 case GITS_PAGE_SIZE_16K: 55 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12; 56 break; 57 58 case GITS_PAGE_SIZE_64K: 59 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16; 60 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48; 61 break; 62 63 default: 64 break; 65 } 66 return result; 67 } 68 69 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte, 70 MemTxResult *res) 71 { 72 AddressSpace *as = &s->gicv3->dma_as; 73 uint64_t l2t_addr; 74 uint64_t value; 75 bool valid_l2t; 76 uint32_t l2t_id; 77 uint32_t max_l2_entries; 78 79 if (s->ct.indirect) { 80 l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE); 81 82 value = address_space_ldq_le(as, 83 s->ct.base_addr + 84 (l2t_id * L1TABLE_ENTRY_SIZE), 85 MEMTXATTRS_UNSPECIFIED, res); 86 87 if (*res == MEMTX_OK) { 88 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 89 90 if (valid_l2t) { 91 max_l2_entries = s->ct.page_sz / s->ct.entry_sz; 92 93 l2t_addr = value & ((1ULL << 51) - 1); 94 95 *cte = address_space_ldq_le(as, l2t_addr + 96 ((icid % max_l2_entries) * GITS_CTE_SIZE), 97 MEMTXATTRS_UNSPECIFIED, res); 98 } 99 } 100 } else { 101 /* Flat level table */ 102 *cte = address_space_ldq_le(as, s->ct.base_addr + 103 (icid * GITS_CTE_SIZE), 104 MEMTXATTRS_UNSPECIFIED, res); 105 } 106 107 return (*cte & TABLE_ENTRY_VALID_MASK) != 0; 108 } 109 110 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, 111 IteEntry ite) 112 { 113 AddressSpace *as = &s->gicv3->dma_as; 114 uint64_t itt_addr; 115 MemTxResult res = MEMTX_OK; 116 117 itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT; 118 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ 119 120 address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) + 121 sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED, 122 &res); 123 124 if (res == MEMTX_OK) { 125 address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) + 126 sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh, 127 MEMTXATTRS_UNSPECIFIED, &res); 128 } 129 if (res != MEMTX_OK) { 130 return false; 131 } else { 132 return true; 133 } 134 } 135 136 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, 137 uint16_t *icid, uint32_t *pIntid, MemTxResult *res) 138 { 139 AddressSpace *as = &s->gicv3->dma_as; 140 uint64_t itt_addr; 141 bool status = false; 142 IteEntry ite = {}; 143 144 itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT; 145 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ 146 147 ite.itel = address_space_ldq_le(as, itt_addr + 148 (eventid * (sizeof(uint64_t) + 149 sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED, 150 res); 151 152 if (*res == MEMTX_OK) { 153 ite.iteh = address_space_ldl_le(as, itt_addr + 154 (eventid * (sizeof(uint64_t) + 155 sizeof(uint32_t))) + sizeof(uint32_t), 156 MEMTXATTRS_UNSPECIFIED, res); 157 158 if (*res == MEMTX_OK) { 159 if (FIELD_EX64(ite.itel, ITE_L, VALID)) { 160 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE); 161 if (inttype == ITE_INTTYPE_PHYSICAL) { 162 *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID); 163 *icid = FIELD_EX32(ite.iteh, ITE_H, ICID); 164 status = true; 165 } 166 } 167 } 168 } 169 return status; 170 } 171 172 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res) 173 { 174 AddressSpace *as = &s->gicv3->dma_as; 175 uint64_t l2t_addr; 176 uint64_t value; 177 bool valid_l2t; 178 uint32_t l2t_id; 179 uint32_t max_l2_entries; 180 181 if (s->dt.indirect) { 182 l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE); 183 184 value = address_space_ldq_le(as, 185 s->dt.base_addr + 186 (l2t_id * L1TABLE_ENTRY_SIZE), 187 MEMTXATTRS_UNSPECIFIED, res); 188 189 if (*res == MEMTX_OK) { 190 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 191 192 if (valid_l2t) { 193 max_l2_entries = s->dt.page_sz / s->dt.entry_sz; 194 195 l2t_addr = value & ((1ULL << 51) - 1); 196 197 value = address_space_ldq_le(as, l2t_addr + 198 ((devid % max_l2_entries) * GITS_DTE_SIZE), 199 MEMTXATTRS_UNSPECIFIED, res); 200 } 201 } 202 } else { 203 /* Flat level table */ 204 value = address_space_ldq_le(as, s->dt.base_addr + 205 (devid * GITS_DTE_SIZE), 206 MEMTXATTRS_UNSPECIFIED, res); 207 } 208 209 return value; 210 } 211 212 /* 213 * This function handles the processing of following commands based on 214 * the ItsCmdType parameter passed:- 215 * 1. triggering of lpi interrupt translation via ITS INT command 216 * 2. triggering of lpi interrupt translation via gits_translater register 217 * 3. handling of ITS CLEAR command 218 * 4. handling of ITS DISCARD command 219 */ 220 static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset, 221 ItsCmdType cmd) 222 { 223 AddressSpace *as = &s->gicv3->dma_as; 224 uint32_t devid, eventid; 225 MemTxResult res = MEMTX_OK; 226 bool dte_valid; 227 uint64_t dte = 0; 228 uint32_t max_eventid; 229 uint16_t icid = 0; 230 uint32_t pIntid = 0; 231 bool ite_valid = false; 232 uint64_t cte = 0; 233 bool cte_valid = false; 234 bool result = false; 235 uint64_t rdbase; 236 237 if (cmd == NONE) { 238 devid = offset; 239 } else { 240 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 241 242 offset += NUM_BYTES_IN_DW; 243 value = address_space_ldq_le(as, s->cq.base_addr + offset, 244 MEMTXATTRS_UNSPECIFIED, &res); 245 } 246 247 if (res != MEMTX_OK) { 248 return result; 249 } 250 251 eventid = (value & EVENTID_MASK); 252 253 dte = get_dte(s, devid, &res); 254 255 if (res != MEMTX_OK) { 256 return result; 257 } 258 dte_valid = dte & TABLE_ENTRY_VALID_MASK; 259 260 if (dte_valid) { 261 max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1)); 262 263 ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res); 264 265 if (res != MEMTX_OK) { 266 return result; 267 } 268 269 if (ite_valid) { 270 cte_valid = get_cte(s, icid, &cte, &res); 271 } 272 273 if (res != MEMTX_OK) { 274 return result; 275 } 276 } else { 277 qemu_log_mask(LOG_GUEST_ERROR, 278 "%s: invalid command attributes: " 279 "invalid dte: %"PRIx64" for %d (MEM_TX: %d)\n", 280 __func__, dte, devid, res); 281 return result; 282 } 283 284 285 /* 286 * In this implementation, in case of guest errors we ignore the 287 * command and move onto the next command in the queue. 288 */ 289 if (devid > s->dt.max_ids) { 290 qemu_log_mask(LOG_GUEST_ERROR, 291 "%s: invalid command attributes: devid %d>%d", 292 __func__, devid, s->dt.max_ids); 293 294 } else if (!dte_valid || !ite_valid || !cte_valid) { 295 qemu_log_mask(LOG_GUEST_ERROR, 296 "%s: invalid command attributes: " 297 "dte: %s, ite: %s, cte: %s\n", 298 __func__, 299 dte_valid ? "valid" : "invalid", 300 ite_valid ? "valid" : "invalid", 301 cte_valid ? "valid" : "invalid"); 302 } else if (eventid > max_eventid) { 303 qemu_log_mask(LOG_GUEST_ERROR, 304 "%s: invalid command attributes: eventid %d > %d\n", 305 __func__, eventid, max_eventid); 306 } else { 307 /* 308 * Current implementation only supports rdbase == procnum 309 * Hence rdbase physical address is ignored 310 */ 311 rdbase = (cte & GITS_CTE_RDBASE_PROCNUM_MASK) >> 1U; 312 313 if (rdbase >= s->gicv3->num_cpu) { 314 return result; 315 } 316 317 if ((cmd == CLEAR) || (cmd == DISCARD)) { 318 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0); 319 } else { 320 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1); 321 } 322 323 if (cmd == DISCARD) { 324 IteEntry ite = {}; 325 /* remove mapping from interrupt translation table */ 326 result = update_ite(s, eventid, dte, ite); 327 } 328 } 329 330 return result; 331 } 332 333 static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset, 334 bool ignore_pInt) 335 { 336 AddressSpace *as = &s->gicv3->dma_as; 337 uint32_t devid, eventid; 338 uint32_t pIntid = 0; 339 uint32_t max_eventid, max_Intid; 340 bool dte_valid; 341 MemTxResult res = MEMTX_OK; 342 uint16_t icid = 0; 343 uint64_t dte = 0; 344 bool result = false; 345 346 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 347 offset += NUM_BYTES_IN_DW; 348 value = address_space_ldq_le(as, s->cq.base_addr + offset, 349 MEMTXATTRS_UNSPECIFIED, &res); 350 351 if (res != MEMTX_OK) { 352 return result; 353 } 354 355 eventid = (value & EVENTID_MASK); 356 357 if (ignore_pInt) { 358 pIntid = eventid; 359 } else { 360 pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT); 361 } 362 363 offset += NUM_BYTES_IN_DW; 364 value = address_space_ldq_le(as, s->cq.base_addr + offset, 365 MEMTXATTRS_UNSPECIFIED, &res); 366 367 if (res != MEMTX_OK) { 368 return result; 369 } 370 371 icid = value & ICID_MASK; 372 373 dte = get_dte(s, devid, &res); 374 375 if (res != MEMTX_OK) { 376 return result; 377 } 378 dte_valid = dte & TABLE_ENTRY_VALID_MASK; 379 380 max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1)); 381 382 max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1; 383 384 if ((devid > s->dt.max_ids) || (icid > s->ct.max_ids) 385 || !dte_valid || (eventid > max_eventid) || 386 (((pIntid < GICV3_LPI_INTID_START) || (pIntid > max_Intid)) && 387 (pIntid != INTID_SPURIOUS))) { 388 qemu_log_mask(LOG_GUEST_ERROR, 389 "%s: invalid command attributes " 390 "devid %d or icid %d or eventid %d or pIntid %d or" 391 "unmapped dte %d\n", __func__, devid, icid, eventid, 392 pIntid, dte_valid); 393 /* 394 * in this implementation, in case of error 395 * we ignore this command and move onto the next 396 * command in the queue 397 */ 398 } else { 399 /* add ite entry to interrupt translation table */ 400 IteEntry ite = {}; 401 ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid); 402 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL); 403 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid); 404 ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS); 405 ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid); 406 407 result = update_ite(s, eventid, dte, ite); 408 } 409 410 return result; 411 } 412 413 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid, 414 uint64_t rdbase) 415 { 416 AddressSpace *as = &s->gicv3->dma_as; 417 uint64_t value; 418 uint64_t l2t_addr; 419 bool valid_l2t; 420 uint32_t l2t_id; 421 uint32_t max_l2_entries; 422 uint64_t cte = 0; 423 MemTxResult res = MEMTX_OK; 424 425 if (!s->ct.valid) { 426 return true; 427 } 428 429 if (valid) { 430 /* add mapping entry to collection table */ 431 cte = (valid & TABLE_ENTRY_VALID_MASK) | (rdbase << 1ULL); 432 } 433 434 /* 435 * The specification defines the format of level 1 entries of a 436 * 2-level table, but the format of level 2 entries and the format 437 * of flat-mapped tables is IMPDEF. 438 */ 439 if (s->ct.indirect) { 440 l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE); 441 442 value = address_space_ldq_le(as, 443 s->ct.base_addr + 444 (l2t_id * L1TABLE_ENTRY_SIZE), 445 MEMTXATTRS_UNSPECIFIED, &res); 446 447 if (res != MEMTX_OK) { 448 return false; 449 } 450 451 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 452 453 if (valid_l2t) { 454 max_l2_entries = s->ct.page_sz / s->ct.entry_sz; 455 456 l2t_addr = value & ((1ULL << 51) - 1); 457 458 address_space_stq_le(as, l2t_addr + 459 ((icid % max_l2_entries) * GITS_CTE_SIZE), 460 cte, MEMTXATTRS_UNSPECIFIED, &res); 461 } 462 } else { 463 /* Flat level table */ 464 address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE), 465 cte, MEMTXATTRS_UNSPECIFIED, &res); 466 } 467 if (res != MEMTX_OK) { 468 return false; 469 } else { 470 return true; 471 } 472 } 473 474 static bool process_mapc(GICv3ITSState *s, uint32_t offset) 475 { 476 AddressSpace *as = &s->gicv3->dma_as; 477 uint16_t icid; 478 uint64_t rdbase; 479 bool valid; 480 MemTxResult res = MEMTX_OK; 481 bool result = false; 482 uint64_t value; 483 484 offset += NUM_BYTES_IN_DW; 485 offset += NUM_BYTES_IN_DW; 486 487 value = address_space_ldq_le(as, s->cq.base_addr + offset, 488 MEMTXATTRS_UNSPECIFIED, &res); 489 490 if (res != MEMTX_OK) { 491 return result; 492 } 493 494 icid = value & ICID_MASK; 495 496 rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT; 497 rdbase &= RDBASE_PROCNUM_MASK; 498 499 valid = (value & CMD_FIELD_VALID_MASK); 500 501 if ((icid > s->ct.max_ids) || (rdbase >= s->gicv3->num_cpu)) { 502 qemu_log_mask(LOG_GUEST_ERROR, 503 "ITS MAPC: invalid collection table attributes " 504 "icid %d rdbase %" PRIu64 "\n", icid, rdbase); 505 /* 506 * in this implementation, in case of error 507 * we ignore this command and move onto the next 508 * command in the queue 509 */ 510 } else { 511 result = update_cte(s, icid, valid, rdbase); 512 } 513 514 return result; 515 } 516 517 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid, 518 uint8_t size, uint64_t itt_addr) 519 { 520 AddressSpace *as = &s->gicv3->dma_as; 521 uint64_t value; 522 uint64_t l2t_addr; 523 bool valid_l2t; 524 uint32_t l2t_id; 525 uint32_t max_l2_entries; 526 uint64_t dte = 0; 527 MemTxResult res = MEMTX_OK; 528 529 if (s->dt.valid) { 530 if (valid) { 531 /* add mapping entry to device table */ 532 dte = (valid & TABLE_ENTRY_VALID_MASK) | 533 ((size & SIZE_MASK) << 1U) | 534 (itt_addr << GITS_DTE_ITTADDR_SHIFT); 535 } 536 } else { 537 return true; 538 } 539 540 /* 541 * The specification defines the format of level 1 entries of a 542 * 2-level table, but the format of level 2 entries and the format 543 * of flat-mapped tables is IMPDEF. 544 */ 545 if (s->dt.indirect) { 546 l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE); 547 548 value = address_space_ldq_le(as, 549 s->dt.base_addr + 550 (l2t_id * L1TABLE_ENTRY_SIZE), 551 MEMTXATTRS_UNSPECIFIED, &res); 552 553 if (res != MEMTX_OK) { 554 return false; 555 } 556 557 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 558 559 if (valid_l2t) { 560 max_l2_entries = s->dt.page_sz / s->dt.entry_sz; 561 562 l2t_addr = value & ((1ULL << 51) - 1); 563 564 address_space_stq_le(as, l2t_addr + 565 ((devid % max_l2_entries) * GITS_DTE_SIZE), 566 dte, MEMTXATTRS_UNSPECIFIED, &res); 567 } 568 } else { 569 /* Flat level table */ 570 address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE), 571 dte, MEMTXATTRS_UNSPECIFIED, &res); 572 } 573 if (res != MEMTX_OK) { 574 return false; 575 } else { 576 return true; 577 } 578 } 579 580 static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset) 581 { 582 AddressSpace *as = &s->gicv3->dma_as; 583 uint32_t devid; 584 uint8_t size; 585 uint64_t itt_addr; 586 bool valid; 587 MemTxResult res = MEMTX_OK; 588 bool result = false; 589 590 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 591 592 offset += NUM_BYTES_IN_DW; 593 value = address_space_ldq_le(as, s->cq.base_addr + offset, 594 MEMTXATTRS_UNSPECIFIED, &res); 595 596 if (res != MEMTX_OK) { 597 return result; 598 } 599 600 size = (value & SIZE_MASK); 601 602 offset += NUM_BYTES_IN_DW; 603 value = address_space_ldq_le(as, s->cq.base_addr + offset, 604 MEMTXATTRS_UNSPECIFIED, &res); 605 606 if (res != MEMTX_OK) { 607 return result; 608 } 609 610 itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT; 611 612 valid = (value & CMD_FIELD_VALID_MASK); 613 614 if ((devid > s->dt.max_ids) || 615 (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) { 616 qemu_log_mask(LOG_GUEST_ERROR, 617 "ITS MAPD: invalid device table attributes " 618 "devid %d or size %d\n", devid, size); 619 /* 620 * in this implementation, in case of error 621 * we ignore this command and move onto the next 622 * command in the queue 623 */ 624 } else { 625 result = update_dte(s, devid, valid, size, itt_addr); 626 } 627 628 return result; 629 } 630 631 /* 632 * Current implementation blocks until all 633 * commands are processed 634 */ 635 static void process_cmdq(GICv3ITSState *s) 636 { 637 uint32_t wr_offset = 0; 638 uint32_t rd_offset = 0; 639 uint32_t cq_offset = 0; 640 uint64_t data; 641 AddressSpace *as = &s->gicv3->dma_as; 642 MemTxResult res = MEMTX_OK; 643 bool result = true; 644 uint8_t cmd; 645 int i; 646 647 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 648 return; 649 } 650 651 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET); 652 653 if (wr_offset > s->cq.max_entries) { 654 qemu_log_mask(LOG_GUEST_ERROR, 655 "%s: invalid write offset " 656 "%d\n", __func__, wr_offset); 657 return; 658 } 659 660 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET); 661 662 if (rd_offset > s->cq.max_entries) { 663 qemu_log_mask(LOG_GUEST_ERROR, 664 "%s: invalid read offset " 665 "%d\n", __func__, rd_offset); 666 return; 667 } 668 669 while (wr_offset != rd_offset) { 670 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE); 671 data = address_space_ldq_le(as, s->cq.base_addr + cq_offset, 672 MEMTXATTRS_UNSPECIFIED, &res); 673 if (res != MEMTX_OK) { 674 result = false; 675 } 676 cmd = (data & CMD_MASK); 677 678 switch (cmd) { 679 case GITS_CMD_INT: 680 res = process_its_cmd(s, data, cq_offset, INTERRUPT); 681 break; 682 case GITS_CMD_CLEAR: 683 res = process_its_cmd(s, data, cq_offset, CLEAR); 684 break; 685 case GITS_CMD_SYNC: 686 /* 687 * Current implementation makes a blocking synchronous call 688 * for every command issued earlier, hence the internal state 689 * is already consistent by the time SYNC command is executed. 690 * Hence no further processing is required for SYNC command. 691 */ 692 break; 693 case GITS_CMD_MAPD: 694 result = process_mapd(s, data, cq_offset); 695 break; 696 case GITS_CMD_MAPC: 697 result = process_mapc(s, cq_offset); 698 break; 699 case GITS_CMD_MAPTI: 700 result = process_mapti(s, data, cq_offset, false); 701 break; 702 case GITS_CMD_MAPI: 703 result = process_mapti(s, data, cq_offset, true); 704 break; 705 case GITS_CMD_DISCARD: 706 result = process_its_cmd(s, data, cq_offset, DISCARD); 707 break; 708 case GITS_CMD_INV: 709 case GITS_CMD_INVALL: 710 /* 711 * Current implementation doesn't cache any ITS tables, 712 * but the calculated lpi priority information. We only 713 * need to trigger lpi priority re-calculation to be in 714 * sync with LPI config table or pending table changes. 715 */ 716 for (i = 0; i < s->gicv3->num_cpu; i++) { 717 gicv3_redist_update_lpi(&s->gicv3->cpu[i]); 718 } 719 break; 720 default: 721 break; 722 } 723 if (result) { 724 rd_offset++; 725 rd_offset %= s->cq.max_entries; 726 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset); 727 } else { 728 /* 729 * in this implementation, in case of dma read/write error 730 * we stall the command processing 731 */ 732 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 733 qemu_log_mask(LOG_GUEST_ERROR, 734 "%s: %x cmd processing failed\n", __func__, cmd); 735 break; 736 } 737 } 738 } 739 740 /* 741 * This function extracts the ITS Device and Collection table specific 742 * parameters (like base_addr, size etc) from GITS_BASER register. 743 * It is called during ITS enable and also during post_load migration 744 */ 745 static void extract_table_params(GICv3ITSState *s) 746 { 747 uint16_t num_pages = 0; 748 uint8_t page_sz_type; 749 uint8_t type; 750 uint32_t page_sz = 0; 751 uint64_t value; 752 753 for (int i = 0; i < 8; i++) { 754 TableDesc *td; 755 int idbits; 756 757 value = s->baser[i]; 758 759 if (!value) { 760 continue; 761 } 762 763 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE); 764 765 switch (page_sz_type) { 766 case 0: 767 page_sz = GITS_PAGE_SIZE_4K; 768 break; 769 770 case 1: 771 page_sz = GITS_PAGE_SIZE_16K; 772 break; 773 774 case 2: 775 case 3: 776 page_sz = GITS_PAGE_SIZE_64K; 777 break; 778 779 default: 780 g_assert_not_reached(); 781 } 782 783 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1; 784 785 type = FIELD_EX64(value, GITS_BASER, TYPE); 786 787 switch (type) { 788 case GITS_BASER_TYPE_DEVICE: 789 td = &s->dt; 790 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1; 791 break; 792 case GITS_BASER_TYPE_COLLECTION: 793 td = &s->ct; 794 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) { 795 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1; 796 } else { 797 /* 16-bit CollectionId supported when CIL == 0 */ 798 idbits = 16; 799 } 800 break; 801 default: 802 /* 803 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK 804 * ensures we will only see type values corresponding to 805 * the values set up in gicv3_its_reset(). 806 */ 807 g_assert_not_reached(); 808 } 809 810 memset(td, 0, sizeof(*td)); 811 td->valid = FIELD_EX64(value, GITS_BASER, VALID); 812 /* 813 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process 814 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we 815 * do not have a special case where the GITS_BASER<n>.Valid bit is 0 816 * for the register corresponding to the Collection table but we 817 * still have to process interrupts using non-memory-backed 818 * Collection table entries.) 819 */ 820 if (!td->valid) { 821 continue; 822 } 823 td->page_sz = page_sz; 824 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT); 825 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1; 826 td->base_addr = baser_base_addr(value, page_sz); 827 if (!td->indirect) { 828 td->max_entries = (num_pages * page_sz) / td->entry_sz; 829 } else { 830 td->max_entries = (((num_pages * page_sz) / 831 L1TABLE_ENTRY_SIZE) * 832 (page_sz / td->entry_sz)); 833 } 834 td->max_ids = 1ULL << idbits; 835 } 836 } 837 838 static void extract_cmdq_params(GICv3ITSState *s) 839 { 840 uint16_t num_pages = 0; 841 uint64_t value = s->cbaser; 842 843 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1; 844 845 memset(&s->cq, 0 , sizeof(s->cq)); 846 s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID); 847 848 if (s->cq.valid) { 849 s->cq.max_entries = (num_pages * GITS_PAGE_SIZE_4K) / 850 GITS_CMDQ_ENTRY_SIZE; 851 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR); 852 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT; 853 } 854 } 855 856 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset, 857 uint64_t data, unsigned size, 858 MemTxAttrs attrs) 859 { 860 GICv3ITSState *s = (GICv3ITSState *)opaque; 861 bool result = true; 862 uint32_t devid = 0; 863 864 switch (offset) { 865 case GITS_TRANSLATER: 866 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 867 devid = attrs.requester_id; 868 result = process_its_cmd(s, data, devid, NONE); 869 } 870 break; 871 default: 872 break; 873 } 874 875 if (result) { 876 return MEMTX_OK; 877 } else { 878 return MEMTX_ERROR; 879 } 880 } 881 882 static bool its_writel(GICv3ITSState *s, hwaddr offset, 883 uint64_t value, MemTxAttrs attrs) 884 { 885 bool result = true; 886 int index; 887 888 switch (offset) { 889 case GITS_CTLR: 890 if (value & R_GITS_CTLR_ENABLED_MASK) { 891 s->ctlr |= R_GITS_CTLR_ENABLED_MASK; 892 extract_table_params(s); 893 extract_cmdq_params(s); 894 s->creadr = 0; 895 process_cmdq(s); 896 } else { 897 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK; 898 } 899 break; 900 case GITS_CBASER: 901 /* 902 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 903 * already enabled 904 */ 905 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 906 s->cbaser = deposit64(s->cbaser, 0, 32, value); 907 s->creadr = 0; 908 s->cwriter = s->creadr; 909 } 910 break; 911 case GITS_CBASER + 4: 912 /* 913 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 914 * already enabled 915 */ 916 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 917 s->cbaser = deposit64(s->cbaser, 32, 32, value); 918 s->creadr = 0; 919 s->cwriter = s->creadr; 920 } 921 break; 922 case GITS_CWRITER: 923 s->cwriter = deposit64(s->cwriter, 0, 32, 924 (value & ~R_GITS_CWRITER_RETRY_MASK)); 925 if (s->cwriter != s->creadr) { 926 process_cmdq(s); 927 } 928 break; 929 case GITS_CWRITER + 4: 930 s->cwriter = deposit64(s->cwriter, 32, 32, value); 931 break; 932 case GITS_CREADR: 933 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 934 s->creadr = deposit64(s->creadr, 0, 32, 935 (value & ~R_GITS_CREADR_STALLED_MASK)); 936 } else { 937 /* RO register, ignore the write */ 938 qemu_log_mask(LOG_GUEST_ERROR, 939 "%s: invalid guest write to RO register at offset " 940 TARGET_FMT_plx "\n", __func__, offset); 941 } 942 break; 943 case GITS_CREADR + 4: 944 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 945 s->creadr = deposit64(s->creadr, 32, 32, value); 946 } else { 947 /* RO register, ignore the write */ 948 qemu_log_mask(LOG_GUEST_ERROR, 949 "%s: invalid guest write to RO register at offset " 950 TARGET_FMT_plx "\n", __func__, offset); 951 } 952 break; 953 case GITS_BASER ... GITS_BASER + 0x3f: 954 /* 955 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 956 * already enabled 957 */ 958 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 959 index = (offset - GITS_BASER) / 8; 960 961 if (offset & 7) { 962 value <<= 32; 963 value &= ~GITS_BASER_RO_MASK; 964 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32); 965 s->baser[index] |= value; 966 } else { 967 value &= ~GITS_BASER_RO_MASK; 968 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32); 969 s->baser[index] |= value; 970 } 971 } 972 break; 973 case GITS_IIDR: 974 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 975 /* RO registers, ignore the write */ 976 qemu_log_mask(LOG_GUEST_ERROR, 977 "%s: invalid guest write to RO register at offset " 978 TARGET_FMT_plx "\n", __func__, offset); 979 break; 980 default: 981 result = false; 982 break; 983 } 984 return result; 985 } 986 987 static bool its_readl(GICv3ITSState *s, hwaddr offset, 988 uint64_t *data, MemTxAttrs attrs) 989 { 990 bool result = true; 991 int index; 992 993 switch (offset) { 994 case GITS_CTLR: 995 *data = s->ctlr; 996 break; 997 case GITS_IIDR: 998 *data = gicv3_iidr(); 999 break; 1000 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 1001 /* ID registers */ 1002 *data = gicv3_idreg(offset - GITS_IDREGS); 1003 break; 1004 case GITS_TYPER: 1005 *data = extract64(s->typer, 0, 32); 1006 break; 1007 case GITS_TYPER + 4: 1008 *data = extract64(s->typer, 32, 32); 1009 break; 1010 case GITS_CBASER: 1011 *data = extract64(s->cbaser, 0, 32); 1012 break; 1013 case GITS_CBASER + 4: 1014 *data = extract64(s->cbaser, 32, 32); 1015 break; 1016 case GITS_CREADR: 1017 *data = extract64(s->creadr, 0, 32); 1018 break; 1019 case GITS_CREADR + 4: 1020 *data = extract64(s->creadr, 32, 32); 1021 break; 1022 case GITS_CWRITER: 1023 *data = extract64(s->cwriter, 0, 32); 1024 break; 1025 case GITS_CWRITER + 4: 1026 *data = extract64(s->cwriter, 32, 32); 1027 break; 1028 case GITS_BASER ... GITS_BASER + 0x3f: 1029 index = (offset - GITS_BASER) / 8; 1030 if (offset & 7) { 1031 *data = extract64(s->baser[index], 32, 32); 1032 } else { 1033 *data = extract64(s->baser[index], 0, 32); 1034 } 1035 break; 1036 default: 1037 result = false; 1038 break; 1039 } 1040 return result; 1041 } 1042 1043 static bool its_writell(GICv3ITSState *s, hwaddr offset, 1044 uint64_t value, MemTxAttrs attrs) 1045 { 1046 bool result = true; 1047 int index; 1048 1049 switch (offset) { 1050 case GITS_BASER ... GITS_BASER + 0x3f: 1051 /* 1052 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 1053 * already enabled 1054 */ 1055 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1056 index = (offset - GITS_BASER) / 8; 1057 s->baser[index] &= GITS_BASER_RO_MASK; 1058 s->baser[index] |= (value & ~GITS_BASER_RO_MASK); 1059 } 1060 break; 1061 case GITS_CBASER: 1062 /* 1063 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1064 * already enabled 1065 */ 1066 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1067 s->cbaser = value; 1068 s->creadr = 0; 1069 s->cwriter = s->creadr; 1070 } 1071 break; 1072 case GITS_CWRITER: 1073 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK; 1074 if (s->cwriter != s->creadr) { 1075 process_cmdq(s); 1076 } 1077 break; 1078 case GITS_CREADR: 1079 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1080 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK; 1081 } else { 1082 /* RO register, ignore the write */ 1083 qemu_log_mask(LOG_GUEST_ERROR, 1084 "%s: invalid guest write to RO register at offset " 1085 TARGET_FMT_plx "\n", __func__, offset); 1086 } 1087 break; 1088 case GITS_TYPER: 1089 /* RO registers, ignore the write */ 1090 qemu_log_mask(LOG_GUEST_ERROR, 1091 "%s: invalid guest write to RO register at offset " 1092 TARGET_FMT_plx "\n", __func__, offset); 1093 break; 1094 default: 1095 result = false; 1096 break; 1097 } 1098 return result; 1099 } 1100 1101 static bool its_readll(GICv3ITSState *s, hwaddr offset, 1102 uint64_t *data, MemTxAttrs attrs) 1103 { 1104 bool result = true; 1105 int index; 1106 1107 switch (offset) { 1108 case GITS_TYPER: 1109 *data = s->typer; 1110 break; 1111 case GITS_BASER ... GITS_BASER + 0x3f: 1112 index = (offset - GITS_BASER) / 8; 1113 *data = s->baser[index]; 1114 break; 1115 case GITS_CBASER: 1116 *data = s->cbaser; 1117 break; 1118 case GITS_CREADR: 1119 *data = s->creadr; 1120 break; 1121 case GITS_CWRITER: 1122 *data = s->cwriter; 1123 break; 1124 default: 1125 result = false; 1126 break; 1127 } 1128 return result; 1129 } 1130 1131 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data, 1132 unsigned size, MemTxAttrs attrs) 1133 { 1134 GICv3ITSState *s = (GICv3ITSState *)opaque; 1135 bool result; 1136 1137 switch (size) { 1138 case 4: 1139 result = its_readl(s, offset, data, attrs); 1140 break; 1141 case 8: 1142 result = its_readll(s, offset, data, attrs); 1143 break; 1144 default: 1145 result = false; 1146 break; 1147 } 1148 1149 if (!result) { 1150 qemu_log_mask(LOG_GUEST_ERROR, 1151 "%s: invalid guest read at offset " TARGET_FMT_plx 1152 "size %u\n", __func__, offset, size); 1153 /* 1154 * The spec requires that reserved registers are RAZ/WI; 1155 * so use false returns from leaf functions as a way to 1156 * trigger the guest-error logging but don't return it to 1157 * the caller, or we'll cause a spurious guest data abort. 1158 */ 1159 *data = 0; 1160 } 1161 return MEMTX_OK; 1162 } 1163 1164 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data, 1165 unsigned size, MemTxAttrs attrs) 1166 { 1167 GICv3ITSState *s = (GICv3ITSState *)opaque; 1168 bool result; 1169 1170 switch (size) { 1171 case 4: 1172 result = its_writel(s, offset, data, attrs); 1173 break; 1174 case 8: 1175 result = its_writell(s, offset, data, attrs); 1176 break; 1177 default: 1178 result = false; 1179 break; 1180 } 1181 1182 if (!result) { 1183 qemu_log_mask(LOG_GUEST_ERROR, 1184 "%s: invalid guest write at offset " TARGET_FMT_plx 1185 "size %u\n", __func__, offset, size); 1186 /* 1187 * The spec requires that reserved registers are RAZ/WI; 1188 * so use false returns from leaf functions as a way to 1189 * trigger the guest-error logging but don't return it to 1190 * the caller, or we'll cause a spurious guest data abort. 1191 */ 1192 } 1193 return MEMTX_OK; 1194 } 1195 1196 static const MemoryRegionOps gicv3_its_control_ops = { 1197 .read_with_attrs = gicv3_its_read, 1198 .write_with_attrs = gicv3_its_write, 1199 .valid.min_access_size = 4, 1200 .valid.max_access_size = 8, 1201 .impl.min_access_size = 4, 1202 .impl.max_access_size = 8, 1203 .endianness = DEVICE_NATIVE_ENDIAN, 1204 }; 1205 1206 static const MemoryRegionOps gicv3_its_translation_ops = { 1207 .write_with_attrs = gicv3_its_translation_write, 1208 .valid.min_access_size = 2, 1209 .valid.max_access_size = 4, 1210 .impl.min_access_size = 2, 1211 .impl.max_access_size = 4, 1212 .endianness = DEVICE_NATIVE_ENDIAN, 1213 }; 1214 1215 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) 1216 { 1217 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1218 int i; 1219 1220 for (i = 0; i < s->gicv3->num_cpu; i++) { 1221 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) { 1222 error_setg(errp, "Physical LPI not supported by CPU %d", i); 1223 return; 1224 } 1225 } 1226 1227 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops); 1228 1229 address_space_init(&s->gicv3->dma_as, s->gicv3->dma, 1230 "gicv3-its-sysmem"); 1231 1232 /* set the ITS default features supported */ 1233 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1); 1234 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE, 1235 ITS_ITT_ENTRY_SIZE - 1); 1236 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS); 1237 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS); 1238 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1); 1239 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS); 1240 } 1241 1242 static void gicv3_its_reset(DeviceState *dev) 1243 { 1244 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1245 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s); 1246 1247 c->parent_reset(dev); 1248 1249 /* Quiescent bit reset to 1 */ 1250 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1); 1251 1252 /* 1253 * setting GITS_BASER0.Type = 0b001 (Device) 1254 * GITS_BASER1.Type = 0b100 (Collection Table) 1255 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented) 1256 * GITS_BASER<0,1>.Page_Size = 64KB 1257 * and default translation table entry size to 16 bytes 1258 */ 1259 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE, 1260 GITS_BASER_TYPE_DEVICE); 1261 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE, 1262 GITS_BASER_PAGESIZE_64K); 1263 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE, 1264 GITS_DTE_SIZE - 1); 1265 1266 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE, 1267 GITS_BASER_TYPE_COLLECTION); 1268 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE, 1269 GITS_BASER_PAGESIZE_64K); 1270 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE, 1271 GITS_CTE_SIZE - 1); 1272 } 1273 1274 static void gicv3_its_post_load(GICv3ITSState *s) 1275 { 1276 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 1277 extract_table_params(s); 1278 extract_cmdq_params(s); 1279 } 1280 } 1281 1282 static Property gicv3_its_props[] = { 1283 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3", 1284 GICv3State *), 1285 DEFINE_PROP_END_OF_LIST(), 1286 }; 1287 1288 static void gicv3_its_class_init(ObjectClass *klass, void *data) 1289 { 1290 DeviceClass *dc = DEVICE_CLASS(klass); 1291 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass); 1292 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); 1293 1294 dc->realize = gicv3_arm_its_realize; 1295 device_class_set_props(dc, gicv3_its_props); 1296 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset); 1297 icc->post_load = gicv3_its_post_load; 1298 } 1299 1300 static const TypeInfo gicv3_its_info = { 1301 .name = TYPE_ARM_GICV3_ITS, 1302 .parent = TYPE_ARM_GICV3_ITS_COMMON, 1303 .instance_size = sizeof(GICv3ITSState), 1304 .class_init = gicv3_its_class_init, 1305 .class_size = sizeof(GICv3ITSClass), 1306 }; 1307 1308 static void gicv3_its_register_types(void) 1309 { 1310 type_register_static(&gicv3_its_info); 1311 } 1312 1313 type_init(gicv3_its_register_types) 1314