1 /* 2 * ITS emulation for a GICv3-based system 3 * 4 * Copyright Linaro.org 2021 5 * 6 * Authors: 7 * Shashi Mallela <shashi.mallela@linaro.org> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 10 * option) any later version. See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/log.h" 16 #include "hw/qdev-properties.h" 17 #include "hw/intc/arm_gicv3_its_common.h" 18 #include "gicv3_internal.h" 19 #include "qom/object.h" 20 #include "qapi/error.h" 21 22 typedef struct GICv3ITSClass GICv3ITSClass; 23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */ 24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass, 25 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS) 26 27 struct GICv3ITSClass { 28 GICv3ITSCommonClass parent_class; 29 void (*parent_reset)(DeviceState *dev); 30 }; 31 32 /* 33 * This is an internal enum used to distinguish between LPI triggered 34 * via command queue and LPI triggered via gits_translater write. 35 */ 36 typedef enum ItsCmdType { 37 NONE = 0, /* internal indication for GITS_TRANSLATER write */ 38 CLEAR = 1, 39 DISCARD = 2, 40 INTERRUPT = 3, 41 } ItsCmdType; 42 43 typedef struct { 44 uint32_t iteh; 45 uint64_t itel; 46 } IteEntry; 47 48 /* 49 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options 50 * if a command parameter is not correct. These include both "stall 51 * processing of the command queue" and "ignore this command, and 52 * keep processing the queue". In our implementation we choose that 53 * memory transaction errors reading the command packet provoke a 54 * stall, but errors in parameters cause us to ignore the command 55 * and continue processing. 56 * The process_* functions which handle individual ITS commands all 57 * return an ItsCmdResult which tells process_cmdq() whether it should 58 * stall or keep going. 59 */ 60 typedef enum ItsCmdResult { 61 CMD_STALL = 0, 62 CMD_CONTINUE = 1, 63 } ItsCmdResult; 64 65 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz) 66 { 67 uint64_t result = 0; 68 69 switch (page_sz) { 70 case GITS_PAGE_SIZE_4K: 71 case GITS_PAGE_SIZE_16K: 72 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12; 73 break; 74 75 case GITS_PAGE_SIZE_64K: 76 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16; 77 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48; 78 break; 79 80 default: 81 break; 82 } 83 return result; 84 } 85 86 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte, 87 MemTxResult *res) 88 { 89 AddressSpace *as = &s->gicv3->dma_as; 90 uint64_t l2t_addr; 91 uint64_t value; 92 bool valid_l2t; 93 uint32_t l2t_id; 94 uint32_t num_l2_entries; 95 96 if (s->ct.indirect) { 97 l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE); 98 99 value = address_space_ldq_le(as, 100 s->ct.base_addr + 101 (l2t_id * L1TABLE_ENTRY_SIZE), 102 MEMTXATTRS_UNSPECIFIED, res); 103 104 if (*res == MEMTX_OK) { 105 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 106 107 if (valid_l2t) { 108 num_l2_entries = s->ct.page_sz / s->ct.entry_sz; 109 110 l2t_addr = value & ((1ULL << 51) - 1); 111 112 *cte = address_space_ldq_le(as, l2t_addr + 113 ((icid % num_l2_entries) * GITS_CTE_SIZE), 114 MEMTXATTRS_UNSPECIFIED, res); 115 } 116 } 117 } else { 118 /* Flat level table */ 119 *cte = address_space_ldq_le(as, s->ct.base_addr + 120 (icid * GITS_CTE_SIZE), 121 MEMTXATTRS_UNSPECIFIED, res); 122 } 123 124 return FIELD_EX64(*cte, CTE, VALID); 125 } 126 127 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, 128 IteEntry ite) 129 { 130 AddressSpace *as = &s->gicv3->dma_as; 131 uint64_t itt_addr; 132 MemTxResult res = MEMTX_OK; 133 134 itt_addr = FIELD_EX64(dte, DTE, ITTADDR); 135 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ 136 137 address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) + 138 sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED, 139 &res); 140 141 if (res == MEMTX_OK) { 142 address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) + 143 sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh, 144 MEMTXATTRS_UNSPECIFIED, &res); 145 } 146 if (res != MEMTX_OK) { 147 return false; 148 } else { 149 return true; 150 } 151 } 152 153 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, 154 uint16_t *icid, uint32_t *pIntid, MemTxResult *res) 155 { 156 AddressSpace *as = &s->gicv3->dma_as; 157 uint64_t itt_addr; 158 bool status = false; 159 IteEntry ite = {}; 160 161 itt_addr = FIELD_EX64(dte, DTE, ITTADDR); 162 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ 163 164 ite.itel = address_space_ldq_le(as, itt_addr + 165 (eventid * (sizeof(uint64_t) + 166 sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED, 167 res); 168 169 if (*res == MEMTX_OK) { 170 ite.iteh = address_space_ldl_le(as, itt_addr + 171 (eventid * (sizeof(uint64_t) + 172 sizeof(uint32_t))) + sizeof(uint32_t), 173 MEMTXATTRS_UNSPECIFIED, res); 174 175 if (*res == MEMTX_OK) { 176 if (FIELD_EX64(ite.itel, ITE_L, VALID)) { 177 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE); 178 if (inttype == ITE_INTTYPE_PHYSICAL) { 179 *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID); 180 *icid = FIELD_EX32(ite.iteh, ITE_H, ICID); 181 status = true; 182 } 183 } 184 } 185 } 186 return status; 187 } 188 189 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res) 190 { 191 AddressSpace *as = &s->gicv3->dma_as; 192 uint64_t l2t_addr; 193 uint64_t value; 194 bool valid_l2t; 195 uint32_t l2t_id; 196 uint32_t num_l2_entries; 197 198 if (s->dt.indirect) { 199 l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE); 200 201 value = address_space_ldq_le(as, 202 s->dt.base_addr + 203 (l2t_id * L1TABLE_ENTRY_SIZE), 204 MEMTXATTRS_UNSPECIFIED, res); 205 206 if (*res == MEMTX_OK) { 207 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 208 209 if (valid_l2t) { 210 num_l2_entries = s->dt.page_sz / s->dt.entry_sz; 211 212 l2t_addr = value & ((1ULL << 51) - 1); 213 214 value = address_space_ldq_le(as, l2t_addr + 215 ((devid % num_l2_entries) * GITS_DTE_SIZE), 216 MEMTXATTRS_UNSPECIFIED, res); 217 } 218 } 219 } else { 220 /* Flat level table */ 221 value = address_space_ldq_le(as, s->dt.base_addr + 222 (devid * GITS_DTE_SIZE), 223 MEMTXATTRS_UNSPECIFIED, res); 224 } 225 226 return value; 227 } 228 229 /* 230 * This function handles the processing of following commands based on 231 * the ItsCmdType parameter passed:- 232 * 1. triggering of lpi interrupt translation via ITS INT command 233 * 2. triggering of lpi interrupt translation via gits_translater register 234 * 3. handling of ITS CLEAR command 235 * 4. handling of ITS DISCARD command 236 */ 237 static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value, 238 uint32_t offset, ItsCmdType cmd) 239 { 240 AddressSpace *as = &s->gicv3->dma_as; 241 uint32_t devid, eventid; 242 MemTxResult res = MEMTX_OK; 243 bool dte_valid; 244 uint64_t dte = 0; 245 uint64_t num_eventids; 246 uint16_t icid = 0; 247 uint32_t pIntid = 0; 248 bool ite_valid = false; 249 uint64_t cte = 0; 250 bool cte_valid = false; 251 uint64_t rdbase; 252 253 if (cmd == NONE) { 254 devid = offset; 255 } else { 256 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 257 258 offset += NUM_BYTES_IN_DW; 259 value = address_space_ldq_le(as, s->cq.base_addr + offset, 260 MEMTXATTRS_UNSPECIFIED, &res); 261 } 262 263 if (res != MEMTX_OK) { 264 return CMD_STALL; 265 } 266 267 eventid = (value & EVENTID_MASK); 268 269 dte = get_dte(s, devid, &res); 270 271 if (res != MEMTX_OK) { 272 return CMD_STALL; 273 } 274 dte_valid = FIELD_EX64(dte, DTE, VALID); 275 276 if (dte_valid) { 277 num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1); 278 279 ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res); 280 281 if (res != MEMTX_OK) { 282 return CMD_STALL; 283 } 284 285 if (ite_valid) { 286 cte_valid = get_cte(s, icid, &cte, &res); 287 } 288 289 if (res != MEMTX_OK) { 290 return CMD_STALL; 291 } 292 } else { 293 qemu_log_mask(LOG_GUEST_ERROR, 294 "%s: invalid command attributes: " 295 "invalid dte: %"PRIx64" for %d (MEM_TX: %d)\n", 296 __func__, dte, devid, res); 297 return CMD_CONTINUE; 298 } 299 300 301 /* 302 * In this implementation, in case of guest errors we ignore the 303 * command and move onto the next command in the queue. 304 */ 305 if (devid >= s->dt.num_ids) { 306 qemu_log_mask(LOG_GUEST_ERROR, 307 "%s: invalid command attributes: devid %d>=%d", 308 __func__, devid, s->dt.num_ids); 309 return CMD_CONTINUE; 310 } else if (!dte_valid || !ite_valid || !cte_valid) { 311 qemu_log_mask(LOG_GUEST_ERROR, 312 "%s: invalid command attributes: " 313 "dte: %s, ite: %s, cte: %s\n", 314 __func__, 315 dte_valid ? "valid" : "invalid", 316 ite_valid ? "valid" : "invalid", 317 cte_valid ? "valid" : "invalid"); 318 return CMD_CONTINUE; 319 } else if (eventid >= num_eventids) { 320 qemu_log_mask(LOG_GUEST_ERROR, 321 "%s: invalid command attributes: eventid %d >= %" 322 PRId64 "\n", 323 __func__, eventid, num_eventids); 324 return CMD_CONTINUE; 325 } else { 326 /* 327 * Current implementation only supports rdbase == procnum 328 * Hence rdbase physical address is ignored 329 */ 330 rdbase = FIELD_EX64(cte, CTE, RDBASE); 331 332 if (rdbase >= s->gicv3->num_cpu) { 333 return CMD_CONTINUE; 334 } 335 336 if ((cmd == CLEAR) || (cmd == DISCARD)) { 337 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0); 338 } else { 339 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1); 340 } 341 342 if (cmd == DISCARD) { 343 IteEntry ite = {}; 344 /* remove mapping from interrupt translation table */ 345 return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL; 346 } 347 return CMD_CONTINUE; 348 } 349 } 350 351 static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value, 352 uint32_t offset, bool ignore_pInt) 353 { 354 AddressSpace *as = &s->gicv3->dma_as; 355 uint32_t devid, eventid; 356 uint32_t pIntid = 0; 357 uint64_t num_eventids; 358 uint32_t num_intids; 359 bool dte_valid; 360 MemTxResult res = MEMTX_OK; 361 uint16_t icid = 0; 362 uint64_t dte = 0; 363 ItsCmdResult result = CMD_STALL; 364 365 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 366 offset += NUM_BYTES_IN_DW; 367 value = address_space_ldq_le(as, s->cq.base_addr + offset, 368 MEMTXATTRS_UNSPECIFIED, &res); 369 370 if (res != MEMTX_OK) { 371 return result; 372 } 373 374 eventid = (value & EVENTID_MASK); 375 376 if (ignore_pInt) { 377 pIntid = eventid; 378 } else { 379 pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT); 380 } 381 382 offset += NUM_BYTES_IN_DW; 383 value = address_space_ldq_le(as, s->cq.base_addr + offset, 384 MEMTXATTRS_UNSPECIFIED, &res); 385 386 if (res != MEMTX_OK) { 387 return result; 388 } 389 390 icid = value & ICID_MASK; 391 392 dte = get_dte(s, devid, &res); 393 394 if (res != MEMTX_OK) { 395 return result; 396 } 397 dte_valid = FIELD_EX64(dte, DTE, VALID); 398 num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1); 399 num_intids = 1ULL << (GICD_TYPER_IDBITS + 1); 400 401 if ((devid >= s->dt.num_ids) || (icid >= s->ct.num_ids) 402 || !dte_valid || (eventid >= num_eventids) || 403 (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) && 404 (pIntid != INTID_SPURIOUS))) { 405 qemu_log_mask(LOG_GUEST_ERROR, 406 "%s: invalid command attributes " 407 "devid %d or icid %d or eventid %d or pIntid %d or" 408 "unmapped dte %d\n", __func__, devid, icid, eventid, 409 pIntid, dte_valid); 410 /* 411 * in this implementation, in case of error 412 * we ignore this command and move onto the next 413 * command in the queue 414 */ 415 } else { 416 /* add ite entry to interrupt translation table */ 417 IteEntry ite = {}; 418 ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid); 419 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL); 420 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid); 421 ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS); 422 ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid); 423 424 result = update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL; 425 } 426 427 return result; 428 } 429 430 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid, 431 uint64_t rdbase) 432 { 433 AddressSpace *as = &s->gicv3->dma_as; 434 uint64_t value; 435 uint64_t l2t_addr; 436 bool valid_l2t; 437 uint32_t l2t_id; 438 uint32_t num_l2_entries; 439 uint64_t cte = 0; 440 MemTxResult res = MEMTX_OK; 441 442 if (!s->ct.valid) { 443 return true; 444 } 445 446 if (valid) { 447 /* add mapping entry to collection table */ 448 cte = FIELD_DP64(cte, CTE, VALID, 1); 449 cte = FIELD_DP64(cte, CTE, RDBASE, rdbase); 450 } 451 452 /* 453 * The specification defines the format of level 1 entries of a 454 * 2-level table, but the format of level 2 entries and the format 455 * of flat-mapped tables is IMPDEF. 456 */ 457 if (s->ct.indirect) { 458 l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE); 459 460 value = address_space_ldq_le(as, 461 s->ct.base_addr + 462 (l2t_id * L1TABLE_ENTRY_SIZE), 463 MEMTXATTRS_UNSPECIFIED, &res); 464 465 if (res != MEMTX_OK) { 466 return false; 467 } 468 469 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 470 471 if (valid_l2t) { 472 num_l2_entries = s->ct.page_sz / s->ct.entry_sz; 473 474 l2t_addr = value & ((1ULL << 51) - 1); 475 476 address_space_stq_le(as, l2t_addr + 477 ((icid % num_l2_entries) * GITS_CTE_SIZE), 478 cte, MEMTXATTRS_UNSPECIFIED, &res); 479 } 480 } else { 481 /* Flat level table */ 482 address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE), 483 cte, MEMTXATTRS_UNSPECIFIED, &res); 484 } 485 if (res != MEMTX_OK) { 486 return false; 487 } else { 488 return true; 489 } 490 } 491 492 static ItsCmdResult process_mapc(GICv3ITSState *s, uint32_t offset) 493 { 494 AddressSpace *as = &s->gicv3->dma_as; 495 uint16_t icid; 496 uint64_t rdbase; 497 bool valid; 498 MemTxResult res = MEMTX_OK; 499 ItsCmdResult result = CMD_STALL; 500 uint64_t value; 501 502 offset += NUM_BYTES_IN_DW; 503 offset += NUM_BYTES_IN_DW; 504 505 value = address_space_ldq_le(as, s->cq.base_addr + offset, 506 MEMTXATTRS_UNSPECIFIED, &res); 507 508 if (res != MEMTX_OK) { 509 return result; 510 } 511 512 icid = value & ICID_MASK; 513 514 rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT; 515 rdbase &= RDBASE_PROCNUM_MASK; 516 517 valid = (value & CMD_FIELD_VALID_MASK); 518 519 if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) { 520 qemu_log_mask(LOG_GUEST_ERROR, 521 "ITS MAPC: invalid collection table attributes " 522 "icid %d rdbase %" PRIu64 "\n", icid, rdbase); 523 /* 524 * in this implementation, in case of error 525 * we ignore this command and move onto the next 526 * command in the queue 527 */ 528 } else { 529 result = update_cte(s, icid, valid, rdbase) ? CMD_CONTINUE : CMD_STALL; 530 } 531 532 return result; 533 } 534 535 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid, 536 uint8_t size, uint64_t itt_addr) 537 { 538 AddressSpace *as = &s->gicv3->dma_as; 539 uint64_t value; 540 uint64_t l2t_addr; 541 bool valid_l2t; 542 uint32_t l2t_id; 543 uint32_t num_l2_entries; 544 uint64_t dte = 0; 545 MemTxResult res = MEMTX_OK; 546 547 if (s->dt.valid) { 548 if (valid) { 549 /* add mapping entry to device table */ 550 dte = FIELD_DP64(dte, DTE, VALID, 1); 551 dte = FIELD_DP64(dte, DTE, SIZE, size); 552 dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr); 553 } 554 } else { 555 return true; 556 } 557 558 /* 559 * The specification defines the format of level 1 entries of a 560 * 2-level table, but the format of level 2 entries and the format 561 * of flat-mapped tables is IMPDEF. 562 */ 563 if (s->dt.indirect) { 564 l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE); 565 566 value = address_space_ldq_le(as, 567 s->dt.base_addr + 568 (l2t_id * L1TABLE_ENTRY_SIZE), 569 MEMTXATTRS_UNSPECIFIED, &res); 570 571 if (res != MEMTX_OK) { 572 return false; 573 } 574 575 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; 576 577 if (valid_l2t) { 578 num_l2_entries = s->dt.page_sz / s->dt.entry_sz; 579 580 l2t_addr = value & ((1ULL << 51) - 1); 581 582 address_space_stq_le(as, l2t_addr + 583 ((devid % num_l2_entries) * GITS_DTE_SIZE), 584 dte, MEMTXATTRS_UNSPECIFIED, &res); 585 } 586 } else { 587 /* Flat level table */ 588 address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE), 589 dte, MEMTXATTRS_UNSPECIFIED, &res); 590 } 591 if (res != MEMTX_OK) { 592 return false; 593 } else { 594 return true; 595 } 596 } 597 598 static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value, 599 uint32_t offset) 600 { 601 AddressSpace *as = &s->gicv3->dma_as; 602 uint32_t devid; 603 uint8_t size; 604 uint64_t itt_addr; 605 bool valid; 606 MemTxResult res = MEMTX_OK; 607 ItsCmdResult result = CMD_STALL; 608 609 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 610 611 offset += NUM_BYTES_IN_DW; 612 value = address_space_ldq_le(as, s->cq.base_addr + offset, 613 MEMTXATTRS_UNSPECIFIED, &res); 614 615 if (res != MEMTX_OK) { 616 return result; 617 } 618 619 size = (value & SIZE_MASK); 620 621 offset += NUM_BYTES_IN_DW; 622 value = address_space_ldq_le(as, s->cq.base_addr + offset, 623 MEMTXATTRS_UNSPECIFIED, &res); 624 625 if (res != MEMTX_OK) { 626 return result; 627 } 628 629 itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT; 630 631 valid = (value & CMD_FIELD_VALID_MASK); 632 633 if ((devid >= s->dt.num_ids) || 634 (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) { 635 qemu_log_mask(LOG_GUEST_ERROR, 636 "ITS MAPD: invalid device table attributes " 637 "devid %d or size %d\n", devid, size); 638 /* 639 * in this implementation, in case of error 640 * we ignore this command and move onto the next 641 * command in the queue 642 */ 643 } else { 644 result = update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL; 645 } 646 647 return result; 648 } 649 650 /* 651 * Current implementation blocks until all 652 * commands are processed 653 */ 654 static void process_cmdq(GICv3ITSState *s) 655 { 656 uint32_t wr_offset = 0; 657 uint32_t rd_offset = 0; 658 uint32_t cq_offset = 0; 659 uint64_t data; 660 AddressSpace *as = &s->gicv3->dma_as; 661 MemTxResult res = MEMTX_OK; 662 uint8_t cmd; 663 int i; 664 665 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 666 return; 667 } 668 669 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET); 670 671 if (wr_offset >= s->cq.num_entries) { 672 qemu_log_mask(LOG_GUEST_ERROR, 673 "%s: invalid write offset " 674 "%d\n", __func__, wr_offset); 675 return; 676 } 677 678 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET); 679 680 if (rd_offset >= s->cq.num_entries) { 681 qemu_log_mask(LOG_GUEST_ERROR, 682 "%s: invalid read offset " 683 "%d\n", __func__, rd_offset); 684 return; 685 } 686 687 while (wr_offset != rd_offset) { 688 ItsCmdResult result = CMD_CONTINUE; 689 690 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE); 691 data = address_space_ldq_le(as, s->cq.base_addr + cq_offset, 692 MEMTXATTRS_UNSPECIFIED, &res); 693 if (res != MEMTX_OK) { 694 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 695 qemu_log_mask(LOG_GUEST_ERROR, 696 "%s: could not read command at 0x%" PRIx64 "\n", 697 __func__, s->cq.base_addr + cq_offset); 698 break; 699 } 700 701 cmd = (data & CMD_MASK); 702 703 switch (cmd) { 704 case GITS_CMD_INT: 705 result = process_its_cmd(s, data, cq_offset, INTERRUPT); 706 break; 707 case GITS_CMD_CLEAR: 708 result = process_its_cmd(s, data, cq_offset, CLEAR); 709 break; 710 case GITS_CMD_SYNC: 711 /* 712 * Current implementation makes a blocking synchronous call 713 * for every command issued earlier, hence the internal state 714 * is already consistent by the time SYNC command is executed. 715 * Hence no further processing is required for SYNC command. 716 */ 717 break; 718 case GITS_CMD_MAPD: 719 result = process_mapd(s, data, cq_offset); 720 break; 721 case GITS_CMD_MAPC: 722 result = process_mapc(s, cq_offset); 723 break; 724 case GITS_CMD_MAPTI: 725 result = process_mapti(s, data, cq_offset, false); 726 break; 727 case GITS_CMD_MAPI: 728 result = process_mapti(s, data, cq_offset, true); 729 break; 730 case GITS_CMD_DISCARD: 731 result = process_its_cmd(s, data, cq_offset, DISCARD); 732 break; 733 case GITS_CMD_INV: 734 case GITS_CMD_INVALL: 735 /* 736 * Current implementation doesn't cache any ITS tables, 737 * but the calculated lpi priority information. We only 738 * need to trigger lpi priority re-calculation to be in 739 * sync with LPI config table or pending table changes. 740 */ 741 for (i = 0; i < s->gicv3->num_cpu; i++) { 742 gicv3_redist_update_lpi(&s->gicv3->cpu[i]); 743 } 744 break; 745 default: 746 break; 747 } 748 if (result == CMD_CONTINUE) { 749 rd_offset++; 750 rd_offset %= s->cq.num_entries; 751 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset); 752 } else { 753 /* CMD_STALL */ 754 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 755 qemu_log_mask(LOG_GUEST_ERROR, 756 "%s: 0x%x cmd processing failed, stalling\n", 757 __func__, cmd); 758 break; 759 } 760 } 761 } 762 763 /* 764 * This function extracts the ITS Device and Collection table specific 765 * parameters (like base_addr, size etc) from GITS_BASER register. 766 * It is called during ITS enable and also during post_load migration 767 */ 768 static void extract_table_params(GICv3ITSState *s) 769 { 770 uint16_t num_pages = 0; 771 uint8_t page_sz_type; 772 uint8_t type; 773 uint32_t page_sz = 0; 774 uint64_t value; 775 776 for (int i = 0; i < 8; i++) { 777 TableDesc *td; 778 int idbits; 779 780 value = s->baser[i]; 781 782 if (!value) { 783 continue; 784 } 785 786 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE); 787 788 switch (page_sz_type) { 789 case 0: 790 page_sz = GITS_PAGE_SIZE_4K; 791 break; 792 793 case 1: 794 page_sz = GITS_PAGE_SIZE_16K; 795 break; 796 797 case 2: 798 case 3: 799 page_sz = GITS_PAGE_SIZE_64K; 800 break; 801 802 default: 803 g_assert_not_reached(); 804 } 805 806 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1; 807 808 type = FIELD_EX64(value, GITS_BASER, TYPE); 809 810 switch (type) { 811 case GITS_BASER_TYPE_DEVICE: 812 td = &s->dt; 813 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1; 814 break; 815 case GITS_BASER_TYPE_COLLECTION: 816 td = &s->ct; 817 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) { 818 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1; 819 } else { 820 /* 16-bit CollectionId supported when CIL == 0 */ 821 idbits = 16; 822 } 823 break; 824 default: 825 /* 826 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK 827 * ensures we will only see type values corresponding to 828 * the values set up in gicv3_its_reset(). 829 */ 830 g_assert_not_reached(); 831 } 832 833 memset(td, 0, sizeof(*td)); 834 td->valid = FIELD_EX64(value, GITS_BASER, VALID); 835 /* 836 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process 837 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we 838 * do not have a special case where the GITS_BASER<n>.Valid bit is 0 839 * for the register corresponding to the Collection table but we 840 * still have to process interrupts using non-memory-backed 841 * Collection table entries.) 842 */ 843 if (!td->valid) { 844 continue; 845 } 846 td->page_sz = page_sz; 847 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT); 848 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1; 849 td->base_addr = baser_base_addr(value, page_sz); 850 if (!td->indirect) { 851 td->num_entries = (num_pages * page_sz) / td->entry_sz; 852 } else { 853 td->num_entries = (((num_pages * page_sz) / 854 L1TABLE_ENTRY_SIZE) * 855 (page_sz / td->entry_sz)); 856 } 857 td->num_ids = 1ULL << idbits; 858 } 859 } 860 861 static void extract_cmdq_params(GICv3ITSState *s) 862 { 863 uint16_t num_pages = 0; 864 uint64_t value = s->cbaser; 865 866 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1; 867 868 memset(&s->cq, 0 , sizeof(s->cq)); 869 s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID); 870 871 if (s->cq.valid) { 872 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) / 873 GITS_CMDQ_ENTRY_SIZE; 874 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR); 875 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT; 876 } 877 } 878 879 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset, 880 uint64_t data, unsigned size, 881 MemTxAttrs attrs) 882 { 883 GICv3ITSState *s = (GICv3ITSState *)opaque; 884 bool result = true; 885 uint32_t devid = 0; 886 887 switch (offset) { 888 case GITS_TRANSLATER: 889 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 890 devid = attrs.requester_id; 891 result = process_its_cmd(s, data, devid, NONE); 892 } 893 break; 894 default: 895 break; 896 } 897 898 if (result) { 899 return MEMTX_OK; 900 } else { 901 return MEMTX_ERROR; 902 } 903 } 904 905 static bool its_writel(GICv3ITSState *s, hwaddr offset, 906 uint64_t value, MemTxAttrs attrs) 907 { 908 bool result = true; 909 int index; 910 911 switch (offset) { 912 case GITS_CTLR: 913 if (value & R_GITS_CTLR_ENABLED_MASK) { 914 s->ctlr |= R_GITS_CTLR_ENABLED_MASK; 915 extract_table_params(s); 916 extract_cmdq_params(s); 917 s->creadr = 0; 918 process_cmdq(s); 919 } else { 920 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK; 921 } 922 break; 923 case GITS_CBASER: 924 /* 925 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 926 * already enabled 927 */ 928 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 929 s->cbaser = deposit64(s->cbaser, 0, 32, value); 930 s->creadr = 0; 931 s->cwriter = s->creadr; 932 } 933 break; 934 case GITS_CBASER + 4: 935 /* 936 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 937 * already enabled 938 */ 939 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 940 s->cbaser = deposit64(s->cbaser, 32, 32, value); 941 s->creadr = 0; 942 s->cwriter = s->creadr; 943 } 944 break; 945 case GITS_CWRITER: 946 s->cwriter = deposit64(s->cwriter, 0, 32, 947 (value & ~R_GITS_CWRITER_RETRY_MASK)); 948 if (s->cwriter != s->creadr) { 949 process_cmdq(s); 950 } 951 break; 952 case GITS_CWRITER + 4: 953 s->cwriter = deposit64(s->cwriter, 32, 32, value); 954 break; 955 case GITS_CREADR: 956 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 957 s->creadr = deposit64(s->creadr, 0, 32, 958 (value & ~R_GITS_CREADR_STALLED_MASK)); 959 } else { 960 /* RO register, ignore the write */ 961 qemu_log_mask(LOG_GUEST_ERROR, 962 "%s: invalid guest write to RO register at offset " 963 TARGET_FMT_plx "\n", __func__, offset); 964 } 965 break; 966 case GITS_CREADR + 4: 967 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 968 s->creadr = deposit64(s->creadr, 32, 32, value); 969 } else { 970 /* RO register, ignore the write */ 971 qemu_log_mask(LOG_GUEST_ERROR, 972 "%s: invalid guest write to RO register at offset " 973 TARGET_FMT_plx "\n", __func__, offset); 974 } 975 break; 976 case GITS_BASER ... GITS_BASER + 0x3f: 977 /* 978 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 979 * already enabled 980 */ 981 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 982 index = (offset - GITS_BASER) / 8; 983 984 if (offset & 7) { 985 value <<= 32; 986 value &= ~GITS_BASER_RO_MASK; 987 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32); 988 s->baser[index] |= value; 989 } else { 990 value &= ~GITS_BASER_RO_MASK; 991 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32); 992 s->baser[index] |= value; 993 } 994 } 995 break; 996 case GITS_IIDR: 997 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 998 /* RO registers, ignore the write */ 999 qemu_log_mask(LOG_GUEST_ERROR, 1000 "%s: invalid guest write to RO register at offset " 1001 TARGET_FMT_plx "\n", __func__, offset); 1002 break; 1003 default: 1004 result = false; 1005 break; 1006 } 1007 return result; 1008 } 1009 1010 static bool its_readl(GICv3ITSState *s, hwaddr offset, 1011 uint64_t *data, MemTxAttrs attrs) 1012 { 1013 bool result = true; 1014 int index; 1015 1016 switch (offset) { 1017 case GITS_CTLR: 1018 *data = s->ctlr; 1019 break; 1020 case GITS_IIDR: 1021 *data = gicv3_iidr(); 1022 break; 1023 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 1024 /* ID registers */ 1025 *data = gicv3_idreg(offset - GITS_IDREGS); 1026 break; 1027 case GITS_TYPER: 1028 *data = extract64(s->typer, 0, 32); 1029 break; 1030 case GITS_TYPER + 4: 1031 *data = extract64(s->typer, 32, 32); 1032 break; 1033 case GITS_CBASER: 1034 *data = extract64(s->cbaser, 0, 32); 1035 break; 1036 case GITS_CBASER + 4: 1037 *data = extract64(s->cbaser, 32, 32); 1038 break; 1039 case GITS_CREADR: 1040 *data = extract64(s->creadr, 0, 32); 1041 break; 1042 case GITS_CREADR + 4: 1043 *data = extract64(s->creadr, 32, 32); 1044 break; 1045 case GITS_CWRITER: 1046 *data = extract64(s->cwriter, 0, 32); 1047 break; 1048 case GITS_CWRITER + 4: 1049 *data = extract64(s->cwriter, 32, 32); 1050 break; 1051 case GITS_BASER ... GITS_BASER + 0x3f: 1052 index = (offset - GITS_BASER) / 8; 1053 if (offset & 7) { 1054 *data = extract64(s->baser[index], 32, 32); 1055 } else { 1056 *data = extract64(s->baser[index], 0, 32); 1057 } 1058 break; 1059 default: 1060 result = false; 1061 break; 1062 } 1063 return result; 1064 } 1065 1066 static bool its_writell(GICv3ITSState *s, hwaddr offset, 1067 uint64_t value, MemTxAttrs attrs) 1068 { 1069 bool result = true; 1070 int index; 1071 1072 switch (offset) { 1073 case GITS_BASER ... GITS_BASER + 0x3f: 1074 /* 1075 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 1076 * already enabled 1077 */ 1078 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1079 index = (offset - GITS_BASER) / 8; 1080 s->baser[index] &= GITS_BASER_RO_MASK; 1081 s->baser[index] |= (value & ~GITS_BASER_RO_MASK); 1082 } 1083 break; 1084 case GITS_CBASER: 1085 /* 1086 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1087 * already enabled 1088 */ 1089 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1090 s->cbaser = value; 1091 s->creadr = 0; 1092 s->cwriter = s->creadr; 1093 } 1094 break; 1095 case GITS_CWRITER: 1096 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK; 1097 if (s->cwriter != s->creadr) { 1098 process_cmdq(s); 1099 } 1100 break; 1101 case GITS_CREADR: 1102 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1103 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK; 1104 } else { 1105 /* RO register, ignore the write */ 1106 qemu_log_mask(LOG_GUEST_ERROR, 1107 "%s: invalid guest write to RO register at offset " 1108 TARGET_FMT_plx "\n", __func__, offset); 1109 } 1110 break; 1111 case GITS_TYPER: 1112 /* RO registers, ignore the write */ 1113 qemu_log_mask(LOG_GUEST_ERROR, 1114 "%s: invalid guest write to RO register at offset " 1115 TARGET_FMT_plx "\n", __func__, offset); 1116 break; 1117 default: 1118 result = false; 1119 break; 1120 } 1121 return result; 1122 } 1123 1124 static bool its_readll(GICv3ITSState *s, hwaddr offset, 1125 uint64_t *data, MemTxAttrs attrs) 1126 { 1127 bool result = true; 1128 int index; 1129 1130 switch (offset) { 1131 case GITS_TYPER: 1132 *data = s->typer; 1133 break; 1134 case GITS_BASER ... GITS_BASER + 0x3f: 1135 index = (offset - GITS_BASER) / 8; 1136 *data = s->baser[index]; 1137 break; 1138 case GITS_CBASER: 1139 *data = s->cbaser; 1140 break; 1141 case GITS_CREADR: 1142 *data = s->creadr; 1143 break; 1144 case GITS_CWRITER: 1145 *data = s->cwriter; 1146 break; 1147 default: 1148 result = false; 1149 break; 1150 } 1151 return result; 1152 } 1153 1154 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data, 1155 unsigned size, MemTxAttrs attrs) 1156 { 1157 GICv3ITSState *s = (GICv3ITSState *)opaque; 1158 bool result; 1159 1160 switch (size) { 1161 case 4: 1162 result = its_readl(s, offset, data, attrs); 1163 break; 1164 case 8: 1165 result = its_readll(s, offset, data, attrs); 1166 break; 1167 default: 1168 result = false; 1169 break; 1170 } 1171 1172 if (!result) { 1173 qemu_log_mask(LOG_GUEST_ERROR, 1174 "%s: invalid guest read at offset " TARGET_FMT_plx 1175 "size %u\n", __func__, offset, size); 1176 /* 1177 * The spec requires that reserved registers are RAZ/WI; 1178 * so use false returns from leaf functions as a way to 1179 * trigger the guest-error logging but don't return it to 1180 * the caller, or we'll cause a spurious guest data abort. 1181 */ 1182 *data = 0; 1183 } 1184 return MEMTX_OK; 1185 } 1186 1187 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data, 1188 unsigned size, MemTxAttrs attrs) 1189 { 1190 GICv3ITSState *s = (GICv3ITSState *)opaque; 1191 bool result; 1192 1193 switch (size) { 1194 case 4: 1195 result = its_writel(s, offset, data, attrs); 1196 break; 1197 case 8: 1198 result = its_writell(s, offset, data, attrs); 1199 break; 1200 default: 1201 result = false; 1202 break; 1203 } 1204 1205 if (!result) { 1206 qemu_log_mask(LOG_GUEST_ERROR, 1207 "%s: invalid guest write at offset " TARGET_FMT_plx 1208 "size %u\n", __func__, offset, size); 1209 /* 1210 * The spec requires that reserved registers are RAZ/WI; 1211 * so use false returns from leaf functions as a way to 1212 * trigger the guest-error logging but don't return it to 1213 * the caller, or we'll cause a spurious guest data abort. 1214 */ 1215 } 1216 return MEMTX_OK; 1217 } 1218 1219 static const MemoryRegionOps gicv3_its_control_ops = { 1220 .read_with_attrs = gicv3_its_read, 1221 .write_with_attrs = gicv3_its_write, 1222 .valid.min_access_size = 4, 1223 .valid.max_access_size = 8, 1224 .impl.min_access_size = 4, 1225 .impl.max_access_size = 8, 1226 .endianness = DEVICE_NATIVE_ENDIAN, 1227 }; 1228 1229 static const MemoryRegionOps gicv3_its_translation_ops = { 1230 .write_with_attrs = gicv3_its_translation_write, 1231 .valid.min_access_size = 2, 1232 .valid.max_access_size = 4, 1233 .impl.min_access_size = 2, 1234 .impl.max_access_size = 4, 1235 .endianness = DEVICE_NATIVE_ENDIAN, 1236 }; 1237 1238 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) 1239 { 1240 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1241 int i; 1242 1243 for (i = 0; i < s->gicv3->num_cpu; i++) { 1244 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) { 1245 error_setg(errp, "Physical LPI not supported by CPU %d", i); 1246 return; 1247 } 1248 } 1249 1250 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops); 1251 1252 address_space_init(&s->gicv3->dma_as, s->gicv3->dma, 1253 "gicv3-its-sysmem"); 1254 1255 /* set the ITS default features supported */ 1256 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1); 1257 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE, 1258 ITS_ITT_ENTRY_SIZE - 1); 1259 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS); 1260 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS); 1261 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1); 1262 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS); 1263 } 1264 1265 static void gicv3_its_reset(DeviceState *dev) 1266 { 1267 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1268 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s); 1269 1270 c->parent_reset(dev); 1271 1272 /* Quiescent bit reset to 1 */ 1273 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1); 1274 1275 /* 1276 * setting GITS_BASER0.Type = 0b001 (Device) 1277 * GITS_BASER1.Type = 0b100 (Collection Table) 1278 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented) 1279 * GITS_BASER<0,1>.Page_Size = 64KB 1280 * and default translation table entry size to 16 bytes 1281 */ 1282 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE, 1283 GITS_BASER_TYPE_DEVICE); 1284 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE, 1285 GITS_BASER_PAGESIZE_64K); 1286 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE, 1287 GITS_DTE_SIZE - 1); 1288 1289 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE, 1290 GITS_BASER_TYPE_COLLECTION); 1291 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE, 1292 GITS_BASER_PAGESIZE_64K); 1293 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE, 1294 GITS_CTE_SIZE - 1); 1295 } 1296 1297 static void gicv3_its_post_load(GICv3ITSState *s) 1298 { 1299 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 1300 extract_table_params(s); 1301 extract_cmdq_params(s); 1302 } 1303 } 1304 1305 static Property gicv3_its_props[] = { 1306 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3", 1307 GICv3State *), 1308 DEFINE_PROP_END_OF_LIST(), 1309 }; 1310 1311 static void gicv3_its_class_init(ObjectClass *klass, void *data) 1312 { 1313 DeviceClass *dc = DEVICE_CLASS(klass); 1314 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass); 1315 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); 1316 1317 dc->realize = gicv3_arm_its_realize; 1318 device_class_set_props(dc, gicv3_its_props); 1319 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset); 1320 icc->post_load = gicv3_its_post_load; 1321 } 1322 1323 static const TypeInfo gicv3_its_info = { 1324 .name = TYPE_ARM_GICV3_ITS, 1325 .parent = TYPE_ARM_GICV3_ITS_COMMON, 1326 .instance_size = sizeof(GICv3ITSState), 1327 .class_init = gicv3_its_class_init, 1328 .class_size = sizeof(GICv3ITSClass), 1329 }; 1330 1331 static void gicv3_its_register_types(void) 1332 { 1333 type_register_static(&gicv3_its_info); 1334 } 1335 1336 type_init(gicv3_its_register_types) 1337