1 /* 2 * ITS emulation for a GICv3-based system 3 * 4 * Copyright Linaro.org 2021 5 * 6 * Authors: 7 * Shashi Mallela <shashi.mallela@linaro.org> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 10 * option) any later version. See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/log.h" 16 #include "trace.h" 17 #include "hw/qdev-properties.h" 18 #include "hw/intc/arm_gicv3_its_common.h" 19 #include "gicv3_internal.h" 20 #include "qom/object.h" 21 #include "qapi/error.h" 22 23 typedef struct GICv3ITSClass GICv3ITSClass; 24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */ 25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass, 26 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS) 27 28 struct GICv3ITSClass { 29 GICv3ITSCommonClass parent_class; 30 void (*parent_reset)(DeviceState *dev); 31 }; 32 33 /* 34 * This is an internal enum used to distinguish between LPI triggered 35 * via command queue and LPI triggered via gits_translater write. 36 */ 37 typedef enum ItsCmdType { 38 NONE = 0, /* internal indication for GITS_TRANSLATER write */ 39 CLEAR = 1, 40 DISCARD = 2, 41 INTERRUPT = 3, 42 } ItsCmdType; 43 44 typedef struct { 45 uint32_t iteh; 46 uint64_t itel; 47 } IteEntry; 48 49 /* 50 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options 51 * if a command parameter is not correct. These include both "stall 52 * processing of the command queue" and "ignore this command, and 53 * keep processing the queue". In our implementation we choose that 54 * memory transaction errors reading the command packet provoke a 55 * stall, but errors in parameters cause us to ignore the command 56 * and continue processing. 57 * The process_* functions which handle individual ITS commands all 58 * return an ItsCmdResult which tells process_cmdq() whether it should 59 * stall or keep going. 60 */ 61 typedef enum ItsCmdResult { 62 CMD_STALL = 0, 63 CMD_CONTINUE = 1, 64 } ItsCmdResult; 65 66 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz) 67 { 68 uint64_t result = 0; 69 70 switch (page_sz) { 71 case GITS_PAGE_SIZE_4K: 72 case GITS_PAGE_SIZE_16K: 73 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12; 74 break; 75 76 case GITS_PAGE_SIZE_64K: 77 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16; 78 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48; 79 break; 80 81 default: 82 break; 83 } 84 return result; 85 } 86 87 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td, 88 uint32_t idx, MemTxResult *res) 89 { 90 /* 91 * Given a TableDesc describing one of the ITS in-guest-memory 92 * tables and an index into it, return the guest address 93 * corresponding to that table entry. 94 * If there was a memory error reading the L1 table of an 95 * indirect table, *res is set accordingly, and we return -1. 96 * If the L1 table entry is marked not valid, we return -1 with 97 * *res set to MEMTX_OK. 98 * 99 * The specification defines the format of level 1 entries of a 100 * 2-level table, but the format of level 2 entries and the format 101 * of flat-mapped tables is IMPDEF. 102 */ 103 AddressSpace *as = &s->gicv3->dma_as; 104 uint32_t l2idx; 105 uint64_t l2; 106 uint32_t num_l2_entries; 107 108 *res = MEMTX_OK; 109 110 if (!td->indirect) { 111 /* Single level table */ 112 return td->base_addr + idx * td->entry_sz; 113 } 114 115 /* Two level table */ 116 l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE); 117 118 l2 = address_space_ldq_le(as, 119 td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE), 120 MEMTXATTRS_UNSPECIFIED, res); 121 if (*res != MEMTX_OK) { 122 return -1; 123 } 124 if (!(l2 & L2_TABLE_VALID_MASK)) { 125 return -1; 126 } 127 128 num_l2_entries = td->page_sz / td->entry_sz; 129 return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz; 130 } 131 132 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte, 133 MemTxResult *res) 134 { 135 AddressSpace *as = &s->gicv3->dma_as; 136 uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, res); 137 138 if (entry_addr == -1) { 139 return false; /* not valid */ 140 } 141 142 *cte = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res); 143 return FIELD_EX64(*cte, CTE, VALID); 144 } 145 146 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, 147 IteEntry ite) 148 { 149 AddressSpace *as = &s->gicv3->dma_as; 150 uint64_t itt_addr; 151 MemTxResult res = MEMTX_OK; 152 153 itt_addr = FIELD_EX64(dte, DTE, ITTADDR); 154 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ 155 156 address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) + 157 sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED, 158 &res); 159 160 if (res == MEMTX_OK) { 161 address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) + 162 sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh, 163 MEMTXATTRS_UNSPECIFIED, &res); 164 } 165 if (res != MEMTX_OK) { 166 return false; 167 } else { 168 return true; 169 } 170 } 171 172 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, 173 uint16_t *icid, uint32_t *pIntid, MemTxResult *res) 174 { 175 AddressSpace *as = &s->gicv3->dma_as; 176 uint64_t itt_addr; 177 bool status = false; 178 IteEntry ite = {}; 179 180 itt_addr = FIELD_EX64(dte, DTE, ITTADDR); 181 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ 182 183 ite.itel = address_space_ldq_le(as, itt_addr + 184 (eventid * (sizeof(uint64_t) + 185 sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED, 186 res); 187 188 if (*res == MEMTX_OK) { 189 ite.iteh = address_space_ldl_le(as, itt_addr + 190 (eventid * (sizeof(uint64_t) + 191 sizeof(uint32_t))) + sizeof(uint32_t), 192 MEMTXATTRS_UNSPECIFIED, res); 193 194 if (*res == MEMTX_OK) { 195 if (FIELD_EX64(ite.itel, ITE_L, VALID)) { 196 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE); 197 if (inttype == ITE_INTTYPE_PHYSICAL) { 198 *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID); 199 *icid = FIELD_EX32(ite.iteh, ITE_H, ICID); 200 status = true; 201 } 202 } 203 } 204 } 205 return status; 206 } 207 208 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res) 209 { 210 AddressSpace *as = &s->gicv3->dma_as; 211 uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, res); 212 213 if (entry_addr == -1) { 214 return 0; /* a DTE entry with the Valid bit clear */ 215 } 216 return address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res); 217 } 218 219 /* 220 * This function handles the processing of following commands based on 221 * the ItsCmdType parameter passed:- 222 * 1. triggering of lpi interrupt translation via ITS INT command 223 * 2. triggering of lpi interrupt translation via gits_translater register 224 * 3. handling of ITS CLEAR command 225 * 4. handling of ITS DISCARD command 226 */ 227 static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value, 228 uint32_t offset, ItsCmdType cmd) 229 { 230 AddressSpace *as = &s->gicv3->dma_as; 231 uint32_t devid, eventid; 232 MemTxResult res = MEMTX_OK; 233 bool dte_valid; 234 uint64_t dte = 0; 235 uint64_t num_eventids; 236 uint16_t icid = 0; 237 uint32_t pIntid = 0; 238 bool ite_valid = false; 239 uint64_t cte = 0; 240 bool cte_valid = false; 241 uint64_t rdbase; 242 243 if (cmd == NONE) { 244 devid = offset; 245 } else { 246 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 247 248 offset += NUM_BYTES_IN_DW; 249 value = address_space_ldq_le(as, s->cq.base_addr + offset, 250 MEMTXATTRS_UNSPECIFIED, &res); 251 } 252 253 if (res != MEMTX_OK) { 254 return CMD_STALL; 255 } 256 257 eventid = (value & EVENTID_MASK); 258 259 if (devid >= s->dt.num_entries) { 260 qemu_log_mask(LOG_GUEST_ERROR, 261 "%s: invalid command attributes: devid %d>=%d", 262 __func__, devid, s->dt.num_entries); 263 return CMD_CONTINUE; 264 } 265 266 dte = get_dte(s, devid, &res); 267 268 if (res != MEMTX_OK) { 269 return CMD_STALL; 270 } 271 dte_valid = FIELD_EX64(dte, DTE, VALID); 272 273 if (!dte_valid) { 274 qemu_log_mask(LOG_GUEST_ERROR, 275 "%s: invalid command attributes: " 276 "invalid dte: %"PRIx64" for %d\n", 277 __func__, dte, devid); 278 return CMD_CONTINUE; 279 } 280 281 num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1); 282 283 if (eventid >= num_eventids) { 284 qemu_log_mask(LOG_GUEST_ERROR, 285 "%s: invalid command attributes: eventid %d >= %" 286 PRId64 "\n", 287 __func__, eventid, num_eventids); 288 return CMD_CONTINUE; 289 } 290 291 ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res); 292 if (res != MEMTX_OK) { 293 return CMD_STALL; 294 } 295 296 if (!ite_valid) { 297 qemu_log_mask(LOG_GUEST_ERROR, 298 "%s: invalid command attributes: invalid ITE\n", 299 __func__); 300 return CMD_CONTINUE; 301 } 302 303 if (icid >= s->ct.num_entries) { 304 qemu_log_mask(LOG_GUEST_ERROR, 305 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n", 306 __func__, icid); 307 return CMD_CONTINUE; 308 } 309 310 cte_valid = get_cte(s, icid, &cte, &res); 311 if (res != MEMTX_OK) { 312 return CMD_STALL; 313 } 314 if (!cte_valid) { 315 qemu_log_mask(LOG_GUEST_ERROR, 316 "%s: invalid command attributes: " 317 "invalid cte: %"PRIx64"\n", 318 __func__, cte); 319 return CMD_CONTINUE; 320 } 321 322 /* 323 * Current implementation only supports rdbase == procnum 324 * Hence rdbase physical address is ignored 325 */ 326 rdbase = FIELD_EX64(cte, CTE, RDBASE); 327 328 if (rdbase >= s->gicv3->num_cpu) { 329 return CMD_CONTINUE; 330 } 331 332 if ((cmd == CLEAR) || (cmd == DISCARD)) { 333 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0); 334 } else { 335 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1); 336 } 337 338 if (cmd == DISCARD) { 339 IteEntry ite = {}; 340 /* remove mapping from interrupt translation table */ 341 return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL; 342 } 343 return CMD_CONTINUE; 344 } 345 346 static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value, 347 uint32_t offset, bool ignore_pInt) 348 { 349 AddressSpace *as = &s->gicv3->dma_as; 350 uint32_t devid, eventid; 351 uint32_t pIntid = 0; 352 uint64_t num_eventids; 353 uint32_t num_intids; 354 bool dte_valid; 355 MemTxResult res = MEMTX_OK; 356 uint16_t icid = 0; 357 uint64_t dte = 0; 358 IteEntry ite = {}; 359 360 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 361 offset += NUM_BYTES_IN_DW; 362 value = address_space_ldq_le(as, s->cq.base_addr + offset, 363 MEMTXATTRS_UNSPECIFIED, &res); 364 365 if (res != MEMTX_OK) { 366 return CMD_STALL; 367 } 368 369 eventid = (value & EVENTID_MASK); 370 371 if (ignore_pInt) { 372 pIntid = eventid; 373 } else { 374 pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT); 375 } 376 377 offset += NUM_BYTES_IN_DW; 378 value = address_space_ldq_le(as, s->cq.base_addr + offset, 379 MEMTXATTRS_UNSPECIFIED, &res); 380 381 if (res != MEMTX_OK) { 382 return CMD_STALL; 383 } 384 385 icid = value & ICID_MASK; 386 387 if (devid >= s->dt.num_entries) { 388 qemu_log_mask(LOG_GUEST_ERROR, 389 "%s: invalid command attributes: devid %d>=%d", 390 __func__, devid, s->dt.num_entries); 391 return CMD_CONTINUE; 392 } 393 394 dte = get_dte(s, devid, &res); 395 396 if (res != MEMTX_OK) { 397 return CMD_STALL; 398 } 399 dte_valid = FIELD_EX64(dte, DTE, VALID); 400 num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1); 401 num_intids = 1ULL << (GICD_TYPER_IDBITS + 1); 402 403 if ((icid >= s->ct.num_entries) 404 || !dte_valid || (eventid >= num_eventids) || 405 (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) && 406 (pIntid != INTID_SPURIOUS))) { 407 qemu_log_mask(LOG_GUEST_ERROR, 408 "%s: invalid command attributes " 409 "icid %d or eventid %d or pIntid %d or" 410 "unmapped dte %d\n", __func__, icid, eventid, 411 pIntid, dte_valid); 412 /* 413 * in this implementation, in case of error 414 * we ignore this command and move onto the next 415 * command in the queue 416 */ 417 return CMD_CONTINUE; 418 } 419 420 /* add ite entry to interrupt translation table */ 421 ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid); 422 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL); 423 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid); 424 ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS); 425 ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid); 426 427 return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL; 428 } 429 430 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid, 431 uint64_t rdbase) 432 { 433 AddressSpace *as = &s->gicv3->dma_as; 434 uint64_t entry_addr; 435 uint64_t cte = 0; 436 MemTxResult res = MEMTX_OK; 437 438 if (!s->ct.valid) { 439 return true; 440 } 441 442 if (valid) { 443 /* add mapping entry to collection table */ 444 cte = FIELD_DP64(cte, CTE, VALID, 1); 445 cte = FIELD_DP64(cte, CTE, RDBASE, rdbase); 446 } 447 448 entry_addr = table_entry_addr(s, &s->ct, icid, &res); 449 if (res != MEMTX_OK) { 450 /* memory access error: stall */ 451 return false; 452 } 453 if (entry_addr == -1) { 454 /* No L2 table for this index: discard write and continue */ 455 return true; 456 } 457 458 address_space_stq_le(as, entry_addr, cte, MEMTXATTRS_UNSPECIFIED, &res); 459 return res == MEMTX_OK; 460 } 461 462 static ItsCmdResult process_mapc(GICv3ITSState *s, uint32_t offset) 463 { 464 AddressSpace *as = &s->gicv3->dma_as; 465 uint16_t icid; 466 uint64_t rdbase; 467 bool valid; 468 MemTxResult res = MEMTX_OK; 469 uint64_t value; 470 471 offset += NUM_BYTES_IN_DW; 472 offset += NUM_BYTES_IN_DW; 473 474 value = address_space_ldq_le(as, s->cq.base_addr + offset, 475 MEMTXATTRS_UNSPECIFIED, &res); 476 477 if (res != MEMTX_OK) { 478 return CMD_STALL; 479 } 480 481 icid = value & ICID_MASK; 482 483 rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT; 484 rdbase &= RDBASE_PROCNUM_MASK; 485 486 valid = (value & CMD_FIELD_VALID_MASK); 487 488 if ((icid >= s->ct.num_entries) || (rdbase >= s->gicv3->num_cpu)) { 489 qemu_log_mask(LOG_GUEST_ERROR, 490 "ITS MAPC: invalid collection table attributes " 491 "icid %d rdbase %" PRIu64 "\n", icid, rdbase); 492 /* 493 * in this implementation, in case of error 494 * we ignore this command and move onto the next 495 * command in the queue 496 */ 497 return CMD_CONTINUE; 498 } 499 500 return update_cte(s, icid, valid, rdbase) ? CMD_CONTINUE : CMD_STALL; 501 } 502 503 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid, 504 uint8_t size, uint64_t itt_addr) 505 { 506 AddressSpace *as = &s->gicv3->dma_as; 507 uint64_t entry_addr; 508 uint64_t dte = 0; 509 MemTxResult res = MEMTX_OK; 510 511 if (s->dt.valid) { 512 if (valid) { 513 /* add mapping entry to device table */ 514 dte = FIELD_DP64(dte, DTE, VALID, 1); 515 dte = FIELD_DP64(dte, DTE, SIZE, size); 516 dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr); 517 } 518 } else { 519 return true; 520 } 521 522 entry_addr = table_entry_addr(s, &s->dt, devid, &res); 523 if (res != MEMTX_OK) { 524 /* memory access error: stall */ 525 return false; 526 } 527 if (entry_addr == -1) { 528 /* No L2 table for this index: discard write and continue */ 529 return true; 530 } 531 address_space_stq_le(as, entry_addr, dte, MEMTXATTRS_UNSPECIFIED, &res); 532 return res == MEMTX_OK; 533 } 534 535 static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value, 536 uint32_t offset) 537 { 538 AddressSpace *as = &s->gicv3->dma_as; 539 uint32_t devid; 540 uint8_t size; 541 uint64_t itt_addr; 542 bool valid; 543 MemTxResult res = MEMTX_OK; 544 545 devid = ((value & DEVID_MASK) >> DEVID_SHIFT); 546 547 offset += NUM_BYTES_IN_DW; 548 value = address_space_ldq_le(as, s->cq.base_addr + offset, 549 MEMTXATTRS_UNSPECIFIED, &res); 550 551 if (res != MEMTX_OK) { 552 return CMD_STALL; 553 } 554 555 size = (value & SIZE_MASK); 556 557 offset += NUM_BYTES_IN_DW; 558 value = address_space_ldq_le(as, s->cq.base_addr + offset, 559 MEMTXATTRS_UNSPECIFIED, &res); 560 561 if (res != MEMTX_OK) { 562 return CMD_STALL; 563 } 564 565 itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT; 566 567 valid = (value & CMD_FIELD_VALID_MASK); 568 569 if ((devid >= s->dt.num_entries) || 570 (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) { 571 qemu_log_mask(LOG_GUEST_ERROR, 572 "ITS MAPD: invalid device table attributes " 573 "devid %d or size %d\n", devid, size); 574 /* 575 * in this implementation, in case of error 576 * we ignore this command and move onto the next 577 * command in the queue 578 */ 579 return CMD_CONTINUE; 580 } 581 582 return update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL; 583 } 584 585 static ItsCmdResult process_movall(GICv3ITSState *s, uint64_t value, 586 uint32_t offset) 587 { 588 AddressSpace *as = &s->gicv3->dma_as; 589 MemTxResult res = MEMTX_OK; 590 uint64_t rd1, rd2; 591 592 /* No fields in dwords 0 or 1 */ 593 offset += NUM_BYTES_IN_DW; 594 offset += NUM_BYTES_IN_DW; 595 value = address_space_ldq_le(as, s->cq.base_addr + offset, 596 MEMTXATTRS_UNSPECIFIED, &res); 597 if (res != MEMTX_OK) { 598 return CMD_STALL; 599 } 600 601 rd1 = FIELD_EX64(value, MOVALL_2, RDBASE1); 602 if (rd1 >= s->gicv3->num_cpu) { 603 qemu_log_mask(LOG_GUEST_ERROR, 604 "%s: RDBASE1 %" PRId64 605 " out of range (must be less than %d)\n", 606 __func__, rd1, s->gicv3->num_cpu); 607 return CMD_CONTINUE; 608 } 609 610 offset += NUM_BYTES_IN_DW; 611 value = address_space_ldq_le(as, s->cq.base_addr + offset, 612 MEMTXATTRS_UNSPECIFIED, &res); 613 if (res != MEMTX_OK) { 614 return CMD_STALL; 615 } 616 617 rd2 = FIELD_EX64(value, MOVALL_3, RDBASE2); 618 if (rd2 >= s->gicv3->num_cpu) { 619 qemu_log_mask(LOG_GUEST_ERROR, 620 "%s: RDBASE2 %" PRId64 621 " out of range (must be less than %d)\n", 622 __func__, rd2, s->gicv3->num_cpu); 623 return CMD_CONTINUE; 624 } 625 626 if (rd1 == rd2) { 627 /* Move to same target must succeed as a no-op */ 628 return CMD_CONTINUE; 629 } 630 631 /* Move all pending LPIs from redistributor 1 to redistributor 2 */ 632 gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]); 633 634 return CMD_CONTINUE; 635 } 636 637 static ItsCmdResult process_movi(GICv3ITSState *s, uint64_t value, 638 uint32_t offset) 639 { 640 AddressSpace *as = &s->gicv3->dma_as; 641 MemTxResult res = MEMTX_OK; 642 uint32_t devid, eventid, intid; 643 uint16_t old_icid, new_icid; 644 uint64_t old_cte, new_cte; 645 uint64_t old_rdbase, new_rdbase; 646 uint64_t dte; 647 bool dte_valid, ite_valid, cte_valid; 648 uint64_t num_eventids; 649 IteEntry ite = {}; 650 651 devid = FIELD_EX64(value, MOVI_0, DEVICEID); 652 653 offset += NUM_BYTES_IN_DW; 654 value = address_space_ldq_le(as, s->cq.base_addr + offset, 655 MEMTXATTRS_UNSPECIFIED, &res); 656 if (res != MEMTX_OK) { 657 return CMD_STALL; 658 } 659 eventid = FIELD_EX64(value, MOVI_1, EVENTID); 660 661 offset += NUM_BYTES_IN_DW; 662 value = address_space_ldq_le(as, s->cq.base_addr + offset, 663 MEMTXATTRS_UNSPECIFIED, &res); 664 if (res != MEMTX_OK) { 665 return CMD_STALL; 666 } 667 new_icid = FIELD_EX64(value, MOVI_2, ICID); 668 669 if (devid >= s->dt.num_entries) { 670 qemu_log_mask(LOG_GUEST_ERROR, 671 "%s: invalid command attributes: devid %d>=%d", 672 __func__, devid, s->dt.num_entries); 673 return CMD_CONTINUE; 674 } 675 dte = get_dte(s, devid, &res); 676 if (res != MEMTX_OK) { 677 return CMD_STALL; 678 } 679 680 dte_valid = FIELD_EX64(dte, DTE, VALID); 681 if (!dte_valid) { 682 qemu_log_mask(LOG_GUEST_ERROR, 683 "%s: invalid command attributes: " 684 "invalid dte: %"PRIx64" for %d\n", 685 __func__, dte, devid); 686 return CMD_CONTINUE; 687 } 688 689 num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1); 690 if (eventid >= num_eventids) { 691 qemu_log_mask(LOG_GUEST_ERROR, 692 "%s: invalid command attributes: eventid %d >= %" 693 PRId64 "\n", 694 __func__, eventid, num_eventids); 695 return CMD_CONTINUE; 696 } 697 698 ite_valid = get_ite(s, eventid, dte, &old_icid, &intid, &res); 699 if (res != MEMTX_OK) { 700 return CMD_STALL; 701 } 702 703 if (!ite_valid) { 704 qemu_log_mask(LOG_GUEST_ERROR, 705 "%s: invalid command attributes: invalid ITE\n", 706 __func__); 707 return CMD_CONTINUE; 708 } 709 710 if (old_icid >= s->ct.num_entries) { 711 qemu_log_mask(LOG_GUEST_ERROR, 712 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n", 713 __func__, old_icid); 714 return CMD_CONTINUE; 715 } 716 717 if (new_icid >= s->ct.num_entries) { 718 qemu_log_mask(LOG_GUEST_ERROR, 719 "%s: invalid command attributes: ICID 0x%x\n", 720 __func__, new_icid); 721 return CMD_CONTINUE; 722 } 723 724 cte_valid = get_cte(s, old_icid, &old_cte, &res); 725 if (res != MEMTX_OK) { 726 return CMD_STALL; 727 } 728 if (!cte_valid) { 729 qemu_log_mask(LOG_GUEST_ERROR, 730 "%s: invalid command attributes: " 731 "invalid cte: %"PRIx64"\n", 732 __func__, old_cte); 733 return CMD_CONTINUE; 734 } 735 736 cte_valid = get_cte(s, new_icid, &new_cte, &res); 737 if (res != MEMTX_OK) { 738 return CMD_STALL; 739 } 740 if (!cte_valid) { 741 qemu_log_mask(LOG_GUEST_ERROR, 742 "%s: invalid command attributes: " 743 "invalid cte: %"PRIx64"\n", 744 __func__, new_cte); 745 return CMD_CONTINUE; 746 } 747 748 old_rdbase = FIELD_EX64(old_cte, CTE, RDBASE); 749 if (old_rdbase >= s->gicv3->num_cpu) { 750 qemu_log_mask(LOG_GUEST_ERROR, 751 "%s: CTE has invalid rdbase 0x%"PRIx64"\n", 752 __func__, old_rdbase); 753 return CMD_CONTINUE; 754 } 755 756 new_rdbase = FIELD_EX64(new_cte, CTE, RDBASE); 757 if (new_rdbase >= s->gicv3->num_cpu) { 758 qemu_log_mask(LOG_GUEST_ERROR, 759 "%s: CTE has invalid rdbase 0x%"PRIx64"\n", 760 __func__, new_rdbase); 761 return CMD_CONTINUE; 762 } 763 764 if (old_rdbase != new_rdbase) { 765 /* Move the LPI from the old redistributor to the new one */ 766 gicv3_redist_mov_lpi(&s->gicv3->cpu[old_rdbase], 767 &s->gicv3->cpu[new_rdbase], 768 intid); 769 } 770 771 /* Update the ICID field in the interrupt translation table entry */ 772 ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, 1); 773 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL); 774 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, intid); 775 ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS); 776 ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, new_icid); 777 return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL; 778 } 779 780 /* 781 * Current implementation blocks until all 782 * commands are processed 783 */ 784 static void process_cmdq(GICv3ITSState *s) 785 { 786 uint32_t wr_offset = 0; 787 uint32_t rd_offset = 0; 788 uint32_t cq_offset = 0; 789 uint64_t data; 790 AddressSpace *as = &s->gicv3->dma_as; 791 MemTxResult res = MEMTX_OK; 792 uint8_t cmd; 793 int i; 794 795 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 796 return; 797 } 798 799 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET); 800 801 if (wr_offset >= s->cq.num_entries) { 802 qemu_log_mask(LOG_GUEST_ERROR, 803 "%s: invalid write offset " 804 "%d\n", __func__, wr_offset); 805 return; 806 } 807 808 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET); 809 810 if (rd_offset >= s->cq.num_entries) { 811 qemu_log_mask(LOG_GUEST_ERROR, 812 "%s: invalid read offset " 813 "%d\n", __func__, rd_offset); 814 return; 815 } 816 817 while (wr_offset != rd_offset) { 818 ItsCmdResult result = CMD_CONTINUE; 819 820 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE); 821 data = address_space_ldq_le(as, s->cq.base_addr + cq_offset, 822 MEMTXATTRS_UNSPECIFIED, &res); 823 if (res != MEMTX_OK) { 824 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 825 qemu_log_mask(LOG_GUEST_ERROR, 826 "%s: could not read command at 0x%" PRIx64 "\n", 827 __func__, s->cq.base_addr + cq_offset); 828 break; 829 } 830 831 cmd = (data & CMD_MASK); 832 833 trace_gicv3_its_process_command(rd_offset, cmd); 834 835 switch (cmd) { 836 case GITS_CMD_INT: 837 result = process_its_cmd(s, data, cq_offset, INTERRUPT); 838 break; 839 case GITS_CMD_CLEAR: 840 result = process_its_cmd(s, data, cq_offset, CLEAR); 841 break; 842 case GITS_CMD_SYNC: 843 /* 844 * Current implementation makes a blocking synchronous call 845 * for every command issued earlier, hence the internal state 846 * is already consistent by the time SYNC command is executed. 847 * Hence no further processing is required for SYNC command. 848 */ 849 break; 850 case GITS_CMD_MAPD: 851 result = process_mapd(s, data, cq_offset); 852 break; 853 case GITS_CMD_MAPC: 854 result = process_mapc(s, cq_offset); 855 break; 856 case GITS_CMD_MAPTI: 857 result = process_mapti(s, data, cq_offset, false); 858 break; 859 case GITS_CMD_MAPI: 860 result = process_mapti(s, data, cq_offset, true); 861 break; 862 case GITS_CMD_DISCARD: 863 result = process_its_cmd(s, data, cq_offset, DISCARD); 864 break; 865 case GITS_CMD_INV: 866 case GITS_CMD_INVALL: 867 /* 868 * Current implementation doesn't cache any ITS tables, 869 * but the calculated lpi priority information. We only 870 * need to trigger lpi priority re-calculation to be in 871 * sync with LPI config table or pending table changes. 872 */ 873 for (i = 0; i < s->gicv3->num_cpu; i++) { 874 gicv3_redist_update_lpi(&s->gicv3->cpu[i]); 875 } 876 break; 877 case GITS_CMD_MOVI: 878 result = process_movi(s, data, cq_offset); 879 break; 880 case GITS_CMD_MOVALL: 881 result = process_movall(s, data, cq_offset); 882 break; 883 default: 884 break; 885 } 886 if (result == CMD_CONTINUE) { 887 rd_offset++; 888 rd_offset %= s->cq.num_entries; 889 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset); 890 } else { 891 /* CMD_STALL */ 892 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); 893 qemu_log_mask(LOG_GUEST_ERROR, 894 "%s: 0x%x cmd processing failed, stalling\n", 895 __func__, cmd); 896 break; 897 } 898 } 899 } 900 901 /* 902 * This function extracts the ITS Device and Collection table specific 903 * parameters (like base_addr, size etc) from GITS_BASER register. 904 * It is called during ITS enable and also during post_load migration 905 */ 906 static void extract_table_params(GICv3ITSState *s) 907 { 908 uint16_t num_pages = 0; 909 uint8_t page_sz_type; 910 uint8_t type; 911 uint32_t page_sz = 0; 912 uint64_t value; 913 914 for (int i = 0; i < 8; i++) { 915 TableDesc *td; 916 int idbits; 917 918 value = s->baser[i]; 919 920 if (!value) { 921 continue; 922 } 923 924 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE); 925 926 switch (page_sz_type) { 927 case 0: 928 page_sz = GITS_PAGE_SIZE_4K; 929 break; 930 931 case 1: 932 page_sz = GITS_PAGE_SIZE_16K; 933 break; 934 935 case 2: 936 case 3: 937 page_sz = GITS_PAGE_SIZE_64K; 938 break; 939 940 default: 941 g_assert_not_reached(); 942 } 943 944 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1; 945 946 type = FIELD_EX64(value, GITS_BASER, TYPE); 947 948 switch (type) { 949 case GITS_BASER_TYPE_DEVICE: 950 td = &s->dt; 951 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1; 952 break; 953 case GITS_BASER_TYPE_COLLECTION: 954 td = &s->ct; 955 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) { 956 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1; 957 } else { 958 /* 16-bit CollectionId supported when CIL == 0 */ 959 idbits = 16; 960 } 961 break; 962 default: 963 /* 964 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK 965 * ensures we will only see type values corresponding to 966 * the values set up in gicv3_its_reset(). 967 */ 968 g_assert_not_reached(); 969 } 970 971 memset(td, 0, sizeof(*td)); 972 td->valid = FIELD_EX64(value, GITS_BASER, VALID); 973 /* 974 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process 975 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we 976 * do not have a special case where the GITS_BASER<n>.Valid bit is 0 977 * for the register corresponding to the Collection table but we 978 * still have to process interrupts using non-memory-backed 979 * Collection table entries.) 980 */ 981 if (!td->valid) { 982 continue; 983 } 984 td->page_sz = page_sz; 985 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT); 986 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1; 987 td->base_addr = baser_base_addr(value, page_sz); 988 if (!td->indirect) { 989 td->num_entries = (num_pages * page_sz) / td->entry_sz; 990 } else { 991 td->num_entries = (((num_pages * page_sz) / 992 L1TABLE_ENTRY_SIZE) * 993 (page_sz / td->entry_sz)); 994 } 995 td->num_entries = MIN(td->num_entries, 1ULL << idbits); 996 } 997 } 998 999 static void extract_cmdq_params(GICv3ITSState *s) 1000 { 1001 uint16_t num_pages = 0; 1002 uint64_t value = s->cbaser; 1003 1004 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1; 1005 1006 memset(&s->cq, 0 , sizeof(s->cq)); 1007 s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID); 1008 1009 if (s->cq.valid) { 1010 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) / 1011 GITS_CMDQ_ENTRY_SIZE; 1012 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR); 1013 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT; 1014 } 1015 } 1016 1017 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset, 1018 uint64_t *data, unsigned size, 1019 MemTxAttrs attrs) 1020 { 1021 /* 1022 * GITS_TRANSLATER is write-only, and all other addresses 1023 * in the interrupt translation space frame are RES0. 1024 */ 1025 *data = 0; 1026 return MEMTX_OK; 1027 } 1028 1029 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset, 1030 uint64_t data, unsigned size, 1031 MemTxAttrs attrs) 1032 { 1033 GICv3ITSState *s = (GICv3ITSState *)opaque; 1034 bool result = true; 1035 uint32_t devid = 0; 1036 1037 trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id); 1038 1039 switch (offset) { 1040 case GITS_TRANSLATER: 1041 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 1042 devid = attrs.requester_id; 1043 result = process_its_cmd(s, data, devid, NONE); 1044 } 1045 break; 1046 default: 1047 break; 1048 } 1049 1050 if (result) { 1051 return MEMTX_OK; 1052 } else { 1053 return MEMTX_ERROR; 1054 } 1055 } 1056 1057 static bool its_writel(GICv3ITSState *s, hwaddr offset, 1058 uint64_t value, MemTxAttrs attrs) 1059 { 1060 bool result = true; 1061 int index; 1062 1063 switch (offset) { 1064 case GITS_CTLR: 1065 if (value & R_GITS_CTLR_ENABLED_MASK) { 1066 s->ctlr |= R_GITS_CTLR_ENABLED_MASK; 1067 extract_table_params(s); 1068 extract_cmdq_params(s); 1069 process_cmdq(s); 1070 } else { 1071 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK; 1072 } 1073 break; 1074 case GITS_CBASER: 1075 /* 1076 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1077 * already enabled 1078 */ 1079 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1080 s->cbaser = deposit64(s->cbaser, 0, 32, value); 1081 s->creadr = 0; 1082 } 1083 break; 1084 case GITS_CBASER + 4: 1085 /* 1086 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1087 * already enabled 1088 */ 1089 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1090 s->cbaser = deposit64(s->cbaser, 32, 32, value); 1091 s->creadr = 0; 1092 } 1093 break; 1094 case GITS_CWRITER: 1095 s->cwriter = deposit64(s->cwriter, 0, 32, 1096 (value & ~R_GITS_CWRITER_RETRY_MASK)); 1097 if (s->cwriter != s->creadr) { 1098 process_cmdq(s); 1099 } 1100 break; 1101 case GITS_CWRITER + 4: 1102 s->cwriter = deposit64(s->cwriter, 32, 32, value); 1103 break; 1104 case GITS_CREADR: 1105 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1106 s->creadr = deposit64(s->creadr, 0, 32, 1107 (value & ~R_GITS_CREADR_STALLED_MASK)); 1108 } else { 1109 /* RO register, ignore the write */ 1110 qemu_log_mask(LOG_GUEST_ERROR, 1111 "%s: invalid guest write to RO register at offset " 1112 TARGET_FMT_plx "\n", __func__, offset); 1113 } 1114 break; 1115 case GITS_CREADR + 4: 1116 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1117 s->creadr = deposit64(s->creadr, 32, 32, value); 1118 } else { 1119 /* RO register, ignore the write */ 1120 qemu_log_mask(LOG_GUEST_ERROR, 1121 "%s: invalid guest write to RO register at offset " 1122 TARGET_FMT_plx "\n", __func__, offset); 1123 } 1124 break; 1125 case GITS_BASER ... GITS_BASER + 0x3f: 1126 /* 1127 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 1128 * already enabled 1129 */ 1130 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1131 index = (offset - GITS_BASER) / 8; 1132 1133 if (s->baser[index] == 0) { 1134 /* Unimplemented GITS_BASERn: RAZ/WI */ 1135 break; 1136 } 1137 if (offset & 7) { 1138 value <<= 32; 1139 value &= ~GITS_BASER_RO_MASK; 1140 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32); 1141 s->baser[index] |= value; 1142 } else { 1143 value &= ~GITS_BASER_RO_MASK; 1144 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32); 1145 s->baser[index] |= value; 1146 } 1147 } 1148 break; 1149 case GITS_IIDR: 1150 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 1151 /* RO registers, ignore the write */ 1152 qemu_log_mask(LOG_GUEST_ERROR, 1153 "%s: invalid guest write to RO register at offset " 1154 TARGET_FMT_plx "\n", __func__, offset); 1155 break; 1156 default: 1157 result = false; 1158 break; 1159 } 1160 return result; 1161 } 1162 1163 static bool its_readl(GICv3ITSState *s, hwaddr offset, 1164 uint64_t *data, MemTxAttrs attrs) 1165 { 1166 bool result = true; 1167 int index; 1168 1169 switch (offset) { 1170 case GITS_CTLR: 1171 *data = s->ctlr; 1172 break; 1173 case GITS_IIDR: 1174 *data = gicv3_iidr(); 1175 break; 1176 case GITS_IDREGS ... GITS_IDREGS + 0x2f: 1177 /* ID registers */ 1178 *data = gicv3_idreg(offset - GITS_IDREGS); 1179 break; 1180 case GITS_TYPER: 1181 *data = extract64(s->typer, 0, 32); 1182 break; 1183 case GITS_TYPER + 4: 1184 *data = extract64(s->typer, 32, 32); 1185 break; 1186 case GITS_CBASER: 1187 *data = extract64(s->cbaser, 0, 32); 1188 break; 1189 case GITS_CBASER + 4: 1190 *data = extract64(s->cbaser, 32, 32); 1191 break; 1192 case GITS_CREADR: 1193 *data = extract64(s->creadr, 0, 32); 1194 break; 1195 case GITS_CREADR + 4: 1196 *data = extract64(s->creadr, 32, 32); 1197 break; 1198 case GITS_CWRITER: 1199 *data = extract64(s->cwriter, 0, 32); 1200 break; 1201 case GITS_CWRITER + 4: 1202 *data = extract64(s->cwriter, 32, 32); 1203 break; 1204 case GITS_BASER ... GITS_BASER + 0x3f: 1205 index = (offset - GITS_BASER) / 8; 1206 if (offset & 7) { 1207 *data = extract64(s->baser[index], 32, 32); 1208 } else { 1209 *data = extract64(s->baser[index], 0, 32); 1210 } 1211 break; 1212 default: 1213 result = false; 1214 break; 1215 } 1216 return result; 1217 } 1218 1219 static bool its_writell(GICv3ITSState *s, hwaddr offset, 1220 uint64_t value, MemTxAttrs attrs) 1221 { 1222 bool result = true; 1223 int index; 1224 1225 switch (offset) { 1226 case GITS_BASER ... GITS_BASER + 0x3f: 1227 /* 1228 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is 1229 * already enabled 1230 */ 1231 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1232 index = (offset - GITS_BASER) / 8; 1233 if (s->baser[index] == 0) { 1234 /* Unimplemented GITS_BASERn: RAZ/WI */ 1235 break; 1236 } 1237 s->baser[index] &= GITS_BASER_RO_MASK; 1238 s->baser[index] |= (value & ~GITS_BASER_RO_MASK); 1239 } 1240 break; 1241 case GITS_CBASER: 1242 /* 1243 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is 1244 * already enabled 1245 */ 1246 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) { 1247 s->cbaser = value; 1248 s->creadr = 0; 1249 } 1250 break; 1251 case GITS_CWRITER: 1252 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK; 1253 if (s->cwriter != s->creadr) { 1254 process_cmdq(s); 1255 } 1256 break; 1257 case GITS_CREADR: 1258 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { 1259 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK; 1260 } else { 1261 /* RO register, ignore the write */ 1262 qemu_log_mask(LOG_GUEST_ERROR, 1263 "%s: invalid guest write to RO register at offset " 1264 TARGET_FMT_plx "\n", __func__, offset); 1265 } 1266 break; 1267 case GITS_TYPER: 1268 /* RO registers, ignore the write */ 1269 qemu_log_mask(LOG_GUEST_ERROR, 1270 "%s: invalid guest write to RO register at offset " 1271 TARGET_FMT_plx "\n", __func__, offset); 1272 break; 1273 default: 1274 result = false; 1275 break; 1276 } 1277 return result; 1278 } 1279 1280 static bool its_readll(GICv3ITSState *s, hwaddr offset, 1281 uint64_t *data, MemTxAttrs attrs) 1282 { 1283 bool result = true; 1284 int index; 1285 1286 switch (offset) { 1287 case GITS_TYPER: 1288 *data = s->typer; 1289 break; 1290 case GITS_BASER ... GITS_BASER + 0x3f: 1291 index = (offset - GITS_BASER) / 8; 1292 *data = s->baser[index]; 1293 break; 1294 case GITS_CBASER: 1295 *data = s->cbaser; 1296 break; 1297 case GITS_CREADR: 1298 *data = s->creadr; 1299 break; 1300 case GITS_CWRITER: 1301 *data = s->cwriter; 1302 break; 1303 default: 1304 result = false; 1305 break; 1306 } 1307 return result; 1308 } 1309 1310 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data, 1311 unsigned size, MemTxAttrs attrs) 1312 { 1313 GICv3ITSState *s = (GICv3ITSState *)opaque; 1314 bool result; 1315 1316 switch (size) { 1317 case 4: 1318 result = its_readl(s, offset, data, attrs); 1319 break; 1320 case 8: 1321 result = its_readll(s, offset, data, attrs); 1322 break; 1323 default: 1324 result = false; 1325 break; 1326 } 1327 1328 if (!result) { 1329 qemu_log_mask(LOG_GUEST_ERROR, 1330 "%s: invalid guest read at offset " TARGET_FMT_plx 1331 "size %u\n", __func__, offset, size); 1332 trace_gicv3_its_badread(offset, size); 1333 /* 1334 * The spec requires that reserved registers are RAZ/WI; 1335 * so use false returns from leaf functions as a way to 1336 * trigger the guest-error logging but don't return it to 1337 * the caller, or we'll cause a spurious guest data abort. 1338 */ 1339 *data = 0; 1340 } else { 1341 trace_gicv3_its_read(offset, *data, size); 1342 } 1343 return MEMTX_OK; 1344 } 1345 1346 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data, 1347 unsigned size, MemTxAttrs attrs) 1348 { 1349 GICv3ITSState *s = (GICv3ITSState *)opaque; 1350 bool result; 1351 1352 switch (size) { 1353 case 4: 1354 result = its_writel(s, offset, data, attrs); 1355 break; 1356 case 8: 1357 result = its_writell(s, offset, data, attrs); 1358 break; 1359 default: 1360 result = false; 1361 break; 1362 } 1363 1364 if (!result) { 1365 qemu_log_mask(LOG_GUEST_ERROR, 1366 "%s: invalid guest write at offset " TARGET_FMT_plx 1367 "size %u\n", __func__, offset, size); 1368 trace_gicv3_its_badwrite(offset, data, size); 1369 /* 1370 * The spec requires that reserved registers are RAZ/WI; 1371 * so use false returns from leaf functions as a way to 1372 * trigger the guest-error logging but don't return it to 1373 * the caller, or we'll cause a spurious guest data abort. 1374 */ 1375 } else { 1376 trace_gicv3_its_write(offset, data, size); 1377 } 1378 return MEMTX_OK; 1379 } 1380 1381 static const MemoryRegionOps gicv3_its_control_ops = { 1382 .read_with_attrs = gicv3_its_read, 1383 .write_with_attrs = gicv3_its_write, 1384 .valid.min_access_size = 4, 1385 .valid.max_access_size = 8, 1386 .impl.min_access_size = 4, 1387 .impl.max_access_size = 8, 1388 .endianness = DEVICE_NATIVE_ENDIAN, 1389 }; 1390 1391 static const MemoryRegionOps gicv3_its_translation_ops = { 1392 .read_with_attrs = gicv3_its_translation_read, 1393 .write_with_attrs = gicv3_its_translation_write, 1394 .valid.min_access_size = 2, 1395 .valid.max_access_size = 4, 1396 .impl.min_access_size = 2, 1397 .impl.max_access_size = 4, 1398 .endianness = DEVICE_NATIVE_ENDIAN, 1399 }; 1400 1401 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) 1402 { 1403 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1404 int i; 1405 1406 for (i = 0; i < s->gicv3->num_cpu; i++) { 1407 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) { 1408 error_setg(errp, "Physical LPI not supported by CPU %d", i); 1409 return; 1410 } 1411 } 1412 1413 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops); 1414 1415 /* set the ITS default features supported */ 1416 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1); 1417 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE, 1418 ITS_ITT_ENTRY_SIZE - 1); 1419 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS); 1420 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS); 1421 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1); 1422 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS); 1423 } 1424 1425 static void gicv3_its_reset(DeviceState *dev) 1426 { 1427 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); 1428 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s); 1429 1430 c->parent_reset(dev); 1431 1432 /* Quiescent bit reset to 1 */ 1433 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1); 1434 1435 /* 1436 * setting GITS_BASER0.Type = 0b001 (Device) 1437 * GITS_BASER1.Type = 0b100 (Collection Table) 1438 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented) 1439 * GITS_BASER<0,1>.Page_Size = 64KB 1440 * and default translation table entry size to 16 bytes 1441 */ 1442 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE, 1443 GITS_BASER_TYPE_DEVICE); 1444 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE, 1445 GITS_BASER_PAGESIZE_64K); 1446 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE, 1447 GITS_DTE_SIZE - 1); 1448 1449 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE, 1450 GITS_BASER_TYPE_COLLECTION); 1451 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE, 1452 GITS_BASER_PAGESIZE_64K); 1453 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE, 1454 GITS_CTE_SIZE - 1); 1455 } 1456 1457 static void gicv3_its_post_load(GICv3ITSState *s) 1458 { 1459 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) { 1460 extract_table_params(s); 1461 extract_cmdq_params(s); 1462 } 1463 } 1464 1465 static Property gicv3_its_props[] = { 1466 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3", 1467 GICv3State *), 1468 DEFINE_PROP_END_OF_LIST(), 1469 }; 1470 1471 static void gicv3_its_class_init(ObjectClass *klass, void *data) 1472 { 1473 DeviceClass *dc = DEVICE_CLASS(klass); 1474 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass); 1475 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); 1476 1477 dc->realize = gicv3_arm_its_realize; 1478 device_class_set_props(dc, gicv3_its_props); 1479 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset); 1480 icc->post_load = gicv3_its_post_load; 1481 } 1482 1483 static const TypeInfo gicv3_its_info = { 1484 .name = TYPE_ARM_GICV3_ITS, 1485 .parent = TYPE_ARM_GICV3_ITS_COMMON, 1486 .instance_size = sizeof(GICv3ITSState), 1487 .class_init = gicv3_its_class_init, 1488 .class_size = sizeof(GICv3ITSClass), 1489 }; 1490 1491 static void gicv3_its_register_types(void) 1492 { 1493 type_register_static(&gicv3_its_info); 1494 } 1495 1496 type_init(gicv3_its_register_types) 1497