1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2019, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/dma.h" 17 #include "monitor/monitor.h" 18 #include "hw/ppc/fdt.h" 19 #include "hw/ppc/pnv.h" 20 #include "hw/ppc/pnv_core.h" 21 #include "hw/ppc/pnv_xscom.h" 22 #include "hw/ppc/pnv_xive.h" 23 #include "hw/ppc/xive_regs.h" 24 #include "hw/ppc/ppc.h" 25 26 #include <libfdt.h> 27 28 #include "pnv_xive_regs.h" 29 30 #define XIVE_DEBUG 31 32 /* 33 * Virtual structures table (VST) 34 */ 35 #define SBE_PER_BYTE 4 36 37 typedef struct XiveVstInfo { 38 const char *name; 39 uint32_t size; 40 uint32_t max_blocks; 41 } XiveVstInfo; 42 43 static const XiveVstInfo vst_infos[] = { 44 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 }, 45 [VST_TSEL_SBE] = { "SBE", 1, 16 }, 46 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 }, 47 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 }, 48 49 /* 50 * Interrupt fifo backing store table (not modeled) : 51 * 52 * 0 - IPI, 53 * 1 - HWD, 54 * 2 - First escalate, 55 * 3 - Second escalate, 56 * 4 - Redistribution, 57 * 5 - IPI cascaded queue ? 58 */ 59 [VST_TSEL_IRQ] = { "IRQ", 1, 6 }, 60 }; 61 62 #define xive_error(xive, fmt, ...) \ 63 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ 64 (xive)->chip->chip_id, ## __VA_ARGS__); 65 66 /* 67 * QEMU version of the GETFIELD/SETFIELD macros 68 * 69 * TODO: It might be better to use the existing extract64() and 70 * deposit64() but this means that all the register definitions will 71 * change and become incompatible with the ones found in skiboot. 72 * 73 * Keep it as it is for now until we find a common ground. 74 */ 75 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) 76 { 77 return (word & mask) >> ctz64(mask); 78 } 79 80 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, 81 uint64_t value) 82 { 83 return (word & ~mask) | ((value << ctz64(mask)) & mask); 84 } 85 86 /* 87 * Remote access to controllers. HW uses MMIOs. For now, a simple scan 88 * of the chips is good enough. 89 * 90 * TODO: Block scope support 91 */ 92 static PnvXive *pnv_xive_get_ic(uint8_t blk) 93 { 94 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); 95 int i; 96 97 for (i = 0; i < pnv->num_chips; i++) { 98 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); 99 PnvXive *xive = &chip9->xive; 100 101 if (xive->chip->chip_id == blk) { 102 return xive; 103 } 104 } 105 return NULL; 106 } 107 108 /* 109 * VST accessors for SBE, EAT, ENDT, NVT 110 * 111 * Indirect VST tables are arrays of VSDs pointing to a page (of same 112 * size). Each page is a direct VST table. 113 */ 114 115 #define XIVE_VSD_SIZE 8 116 117 /* Indirect page size can be 4K, 64K, 2M, 16M. */ 118 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift) 119 { 120 return page_shift == 12 || page_shift == 16 || 121 page_shift == 21 || page_shift == 24; 122 } 123 124 static uint64_t pnv_xive_vst_size(uint64_t vsd) 125 { 126 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 127 128 /* 129 * Read the first descriptor to get the page size of the indirect 130 * table. 131 */ 132 if (VSD_INDIRECT & vsd) { 133 uint32_t nr_pages = vst_tsize / XIVE_VSD_SIZE; 134 uint32_t page_shift; 135 136 vsd = ldq_be_dma(&address_space_memory, vsd & VSD_ADDRESS_MASK); 137 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 138 139 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 140 return 0; 141 } 142 143 return nr_pages * (1ull << page_shift); 144 } 145 146 return vst_tsize; 147 } 148 149 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type, 150 uint64_t vsd, uint32_t idx) 151 { 152 const XiveVstInfo *info = &vst_infos[type]; 153 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 154 155 return vst_addr + idx * info->size; 156 } 157 158 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, 159 uint64_t vsd, uint32_t idx) 160 { 161 const XiveVstInfo *info = &vst_infos[type]; 162 uint64_t vsd_addr; 163 uint32_t vsd_idx; 164 uint32_t page_shift; 165 uint32_t vst_per_page; 166 167 /* Get the page size of the indirect table. */ 168 vsd_addr = vsd & VSD_ADDRESS_MASK; 169 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 170 171 if (!(vsd & VSD_ADDRESS_MASK)) { 172 xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0); 173 return 0; 174 } 175 176 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 177 178 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 179 xive_error(xive, "VST: invalid %s page shift %d", info->name, 180 page_shift); 181 return 0; 182 } 183 184 vst_per_page = (1ull << page_shift) / info->size; 185 vsd_idx = idx / vst_per_page; 186 187 /* Load the VSD we are looking for, if not already done */ 188 if (vsd_idx) { 189 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; 190 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 191 192 if (!(vsd & VSD_ADDRESS_MASK)) { 193 xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0); 194 return 0; 195 } 196 197 /* 198 * Check that the pages have a consistent size across the 199 * indirect table 200 */ 201 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { 202 xive_error(xive, "VST: %s entry %x indirect page size differ !?", 203 info->name, idx); 204 return 0; 205 } 206 } 207 208 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); 209 } 210 211 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk, 212 uint32_t idx) 213 { 214 const XiveVstInfo *info = &vst_infos[type]; 215 uint64_t vsd; 216 uint32_t idx_max; 217 218 if (blk >= info->max_blocks) { 219 xive_error(xive, "VST: invalid block id %d for VST %s %d !?", 220 blk, info->name, idx); 221 return 0; 222 } 223 224 vsd = xive->vsds[type][blk]; 225 226 /* Remote VST access */ 227 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { 228 xive = pnv_xive_get_ic(blk); 229 230 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0; 231 } 232 233 idx_max = pnv_xive_vst_size(vsd) / info->size - 1; 234 if (idx > idx_max) { 235 #ifdef XIVE_DEBUG 236 xive_error(xive, "VST: %s entry %x/%x out of range [ 0 .. %x ] !?", 237 info->name, blk, idx, idx_max); 238 #endif 239 return 0; 240 } 241 242 if (VSD_INDIRECT & vsd) { 243 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx); 244 } 245 246 return pnv_xive_vst_addr_direct(xive, type, vsd, idx); 247 } 248 249 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk, 250 uint32_t idx, void *data) 251 { 252 const XiveVstInfo *info = &vst_infos[type]; 253 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 254 255 if (!addr) { 256 return -1; 257 } 258 259 cpu_physical_memory_read(addr, data, info->size); 260 return 0; 261 } 262 263 #define XIVE_VST_WORD_ALL -1 264 265 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk, 266 uint32_t idx, void *data, uint32_t word_number) 267 { 268 const XiveVstInfo *info = &vst_infos[type]; 269 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 270 271 if (!addr) { 272 return -1; 273 } 274 275 if (word_number == XIVE_VST_WORD_ALL) { 276 cpu_physical_memory_write(addr, data, info->size); 277 } else { 278 cpu_physical_memory_write(addr + word_number * 4, 279 data + word_number * 4, 4); 280 } 281 return 0; 282 } 283 284 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 285 XiveEND *end) 286 { 287 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end); 288 } 289 290 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 291 XiveEND *end, uint8_t word_number) 292 { 293 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end, 294 word_number); 295 } 296 297 static int pnv_xive_end_update(PnvXive *xive, uint8_t blk, uint32_t idx) 298 { 299 int i; 300 uint64_t eqc_watch[4]; 301 302 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 303 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]); 304 } 305 306 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch, 307 XIVE_VST_WORD_ALL); 308 } 309 310 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 311 XiveNVT *nvt) 312 { 313 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt); 314 } 315 316 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 317 XiveNVT *nvt, uint8_t word_number) 318 { 319 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt, 320 word_number); 321 } 322 323 static int pnv_xive_nvt_update(PnvXive *xive, uint8_t blk, uint32_t idx) 324 { 325 int i; 326 uint64_t vpc_watch[8]; 327 328 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 329 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]); 330 } 331 332 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch, 333 XIVE_VST_WORD_ALL); 334 } 335 336 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 337 XiveEAS *eas) 338 { 339 PnvXive *xive = PNV_XIVE(xrtr); 340 341 if (pnv_xive_get_ic(blk) != xive) { 342 xive_error(xive, "VST: EAS %x is remote !?", XIVE_SRCNO(blk, idx)); 343 return -1; 344 } 345 346 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); 347 } 348 349 static int pnv_xive_eas_update(PnvXive *xive, uint8_t blk, uint32_t idx) 350 { 351 /* All done. */ 352 return 0; 353 } 354 355 static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs) 356 { 357 PowerPCCPU *cpu = POWERPC_CPU(cs); 358 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 359 PnvXive *xive = NULL; 360 CPUPPCState *env = &cpu->env; 361 int pir = env->spr_cb[SPR_PIR].default_value; 362 363 /* 364 * Perform an extra check on the HW thread enablement. 365 * 366 * The TIMA is shared among the chips and to identify the chip 367 * from which the access is being done, we extract the chip id 368 * from the PIR. 369 */ 370 xive = pnv_xive_get_ic((pir >> 8) & 0xf); 371 if (!xive) { 372 return NULL; 373 } 374 375 if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { 376 xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir); 377 } 378 379 return tctx; 380 } 381 382 /* 383 * The internal sources (IPIs) of the interrupt controller have no 384 * knowledge of the XIVE chip on which they reside. Encode the block 385 * id in the source interrupt number before forwarding the source 386 * event notification to the Router. This is required on a multichip 387 * system. 388 */ 389 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno) 390 { 391 PnvXive *xive = PNV_XIVE(xn); 392 uint8_t blk = xive->chip->chip_id; 393 394 xive_router_notify(xn, XIVE_SRCNO(blk, srcno)); 395 } 396 397 /* 398 * XIVE helpers 399 */ 400 401 static uint64_t pnv_xive_vc_size(PnvXive *xive) 402 { 403 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK; 404 } 405 406 static uint64_t pnv_xive_edt_shift(PnvXive *xive) 407 { 408 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX); 409 } 410 411 static uint64_t pnv_xive_pc_size(PnvXive *xive) 412 { 413 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK; 414 } 415 416 static uint32_t pnv_xive_nr_ipis(PnvXive *xive) 417 { 418 uint8_t blk = xive->chip->chip_id; 419 420 return pnv_xive_vst_size(xive->vsds[VST_TSEL_SBE][blk]) * SBE_PER_BYTE; 421 } 422 423 static uint32_t pnv_xive_nr_ends(PnvXive *xive) 424 { 425 uint8_t blk = xive->chip->chip_id; 426 427 return pnv_xive_vst_size(xive->vsds[VST_TSEL_EQDT][blk]) 428 / vst_infos[VST_TSEL_EQDT].size; 429 } 430 431 /* 432 * EDT Table 433 * 434 * The Virtualization Controller MMIO region containing the IPI ESB 435 * pages and END ESB pages is sub-divided into "sets" which map 436 * portions of the VC region to the different ESB pages. It is 437 * configured at runtime through the EDT "Domain Table" to let the 438 * firmware decide how to split the VC address space between IPI ESB 439 * pages and END ESB pages. 440 */ 441 442 /* 443 * Computes the overall size of the IPI or the END ESB pages 444 */ 445 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type) 446 { 447 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 448 uint64_t size = 0; 449 int i; 450 451 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) { 452 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 453 454 if (edt_type == type) { 455 size += edt_size; 456 } 457 } 458 459 return size; 460 } 461 462 /* 463 * Maps an offset of the VC region in the IPI or END region using the 464 * layout defined by the EDT "Domaine Table" 465 */ 466 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset, 467 uint64_t type) 468 { 469 int i; 470 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 471 uint64_t edt_offset = vc_offset; 472 473 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) { 474 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 475 476 if (edt_type != type) { 477 edt_offset -= edt_size; 478 } 479 } 480 481 return edt_offset; 482 } 483 484 static void pnv_xive_edt_resize(PnvXive *xive) 485 { 486 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI); 487 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ); 488 489 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size); 490 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio); 491 492 memory_region_set_size(&xive->end_edt_mmio, end_edt_size); 493 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio); 494 } 495 496 /* 497 * XIVE Table configuration. Only EDT is supported. 498 */ 499 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val) 500 { 501 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL; 502 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]); 503 uint64_t *xive_table; 504 uint8_t max_index; 505 506 switch (tsel) { 507 case CQ_TAR_TSEL_BLK: 508 max_index = ARRAY_SIZE(xive->blk); 509 xive_table = xive->blk; 510 break; 511 case CQ_TAR_TSEL_MIG: 512 max_index = ARRAY_SIZE(xive->mig); 513 xive_table = xive->mig; 514 break; 515 case CQ_TAR_TSEL_EDT: 516 max_index = ARRAY_SIZE(xive->edt); 517 xive_table = xive->edt; 518 break; 519 case CQ_TAR_TSEL_VDT: 520 max_index = ARRAY_SIZE(xive->vdt); 521 xive_table = xive->vdt; 522 break; 523 default: 524 xive_error(xive, "IC: invalid table %d", (int) tsel); 525 return -1; 526 } 527 528 if (tsel_index >= max_index) { 529 xive_error(xive, "IC: invalid index %d", (int) tsel_index); 530 return -1; 531 } 532 533 xive_table[tsel_index] = val; 534 535 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) { 536 xive->regs[CQ_TAR >> 3] = 537 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index); 538 } 539 540 /* 541 * EDT configuration is complete. Resize the MMIO windows exposing 542 * the IPI and the END ESBs in the VC region. 543 */ 544 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) { 545 pnv_xive_edt_resize(xive); 546 } 547 548 return 0; 549 } 550 551 /* 552 * Virtual Structure Tables (VST) configuration 553 */ 554 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type, 555 uint8_t blk, uint64_t vsd) 556 { 557 XiveENDSource *end_xsrc = &xive->end_source; 558 XiveSource *xsrc = &xive->ipi_source; 559 const XiveVstInfo *info = &vst_infos[type]; 560 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 561 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 562 563 /* Basic checks */ 564 565 if (VSD_INDIRECT & vsd) { 566 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) { 567 xive_error(xive, "VST: %s indirect tables are not enabled", 568 info->name); 569 return; 570 } 571 572 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 573 xive_error(xive, "VST: invalid %s page shift %d", info->name, 574 page_shift); 575 return; 576 } 577 } 578 579 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { 580 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with" 581 " page shift %d", info->name, vst_addr, page_shift); 582 return; 583 } 584 585 /* Record the table configuration (in SRAM on HW) */ 586 xive->vsds[type][blk] = vsd; 587 588 /* Now tune the models with the configuration provided by the FW */ 589 590 switch (type) { 591 case VST_TSEL_IVT: /* Nothing to be done */ 592 break; 593 594 case VST_TSEL_EQDT: 595 /* 596 * Backing store pages for the END. Compute the number of ENDs 597 * provisioned by FW and resize the END ESB window accordingly. 598 */ 599 memory_region_set_size(&end_xsrc->esb_mmio, pnv_xive_nr_ends(xive) * 600 (1ull << (end_xsrc->esb_shift + 1))); 601 memory_region_add_subregion(&xive->end_edt_mmio, 0, 602 &end_xsrc->esb_mmio); 603 break; 604 605 case VST_TSEL_SBE: 606 /* 607 * Backing store pages for the source PQ bits. The model does 608 * not use these PQ bits backed in RAM because the XiveSource 609 * model has its own. Compute the number of IRQs provisioned 610 * by FW and resize the IPI ESB window accordingly. 611 */ 612 memory_region_set_size(&xsrc->esb_mmio, pnv_xive_nr_ipis(xive) * 613 (1ull << xsrc->esb_shift)); 614 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio); 615 break; 616 617 case VST_TSEL_VPDT: /* Not modeled */ 618 case VST_TSEL_IRQ: /* Not modeled */ 619 /* 620 * These tables contains the backing store pages for the 621 * interrupt fifos of the VC sub-engine in case of overflow. 622 */ 623 break; 624 625 default: 626 g_assert_not_reached(); 627 } 628 } 629 630 /* 631 * Both PC and VC sub-engines are configured as each use the Virtual 632 * Structure Tables : SBE, EAS, END and NVT. 633 */ 634 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine) 635 { 636 uint8_t mode = GETFIELD(VSD_MODE, vsd); 637 uint8_t type = GETFIELD(VST_TABLE_SELECT, 638 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 639 uint8_t blk = GETFIELD(VST_TABLE_BLOCK, 640 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 641 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 642 643 if (type > VST_TSEL_IRQ) { 644 xive_error(xive, "VST: invalid table type %d", type); 645 return; 646 } 647 648 if (blk >= vst_infos[type].max_blocks) { 649 xive_error(xive, "VST: invalid block id %d for" 650 " %s table", blk, vst_infos[type].name); 651 return; 652 } 653 654 /* 655 * Only take the VC sub-engine configuration into account because 656 * the XiveRouter model combines both VC and PC sub-engines 657 */ 658 if (pc_engine) { 659 return; 660 } 661 662 if (!vst_addr) { 663 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name); 664 return; 665 } 666 667 switch (mode) { 668 case VSD_MODE_FORWARD: 669 xive->vsds[type][blk] = vsd; 670 break; 671 672 case VSD_MODE_EXCLUSIVE: 673 pnv_xive_vst_set_exclusive(xive, type, blk, vsd); 674 break; 675 676 default: 677 xive_error(xive, "VST: unsupported table mode %d", mode); 678 return; 679 } 680 } 681 682 /* 683 * Interrupt controller MMIO region. The layout is compatible between 684 * 4K and 64K pages : 685 * 686 * Page 0 sub-engine BARs 687 * 0x000 - 0x3FF IC registers 688 * 0x400 - 0x7FF PC registers 689 * 0x800 - 0xFFF VC registers 690 * 691 * Page 1 Notify page (writes only) 692 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB) 693 * 0x800 - 0xFFF forwards and syncs 694 * 695 * Page 2 LSI Trigger page (writes only) (not modeled) 696 * Page 3 LSI SB EOI page (reads only) (not modeled) 697 * 698 * Page 4-7 indirect TIMA 699 */ 700 701 /* 702 * IC - registers MMIO 703 */ 704 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, 705 uint64_t val, unsigned size) 706 { 707 PnvXive *xive = PNV_XIVE(opaque); 708 MemoryRegion *sysmem = get_system_memory(); 709 uint32_t reg = offset >> 3; 710 bool is_chip0 = xive->chip->chip_id == 0; 711 712 switch (offset) { 713 714 /* 715 * XIVE CQ (PowerBus bridge) settings 716 */ 717 case CQ_MSGSND: /* msgsnd for doorbells */ 718 case CQ_FIRMASK_OR: /* FIR error reporting */ 719 break; 720 case CQ_PBI_CTL: 721 if (val & CQ_PBI_PC_64K) { 722 xive->pc_shift = 16; 723 } 724 if (val & CQ_PBI_VC_64K) { 725 xive->vc_shift = 16; 726 } 727 break; 728 case CQ_CFG_PB_GEN: /* PowerBus General Configuration */ 729 /* 730 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode 731 */ 732 break; 733 734 /* 735 * XIVE Virtualization Controller settings 736 */ 737 case VC_GLOBAL_CONFIG: 738 break; 739 740 /* 741 * XIVE Presenter Controller settings 742 */ 743 case PC_GLOBAL_CONFIG: 744 /* 745 * PC_GCONF_CHIPID_OVR 746 * Overrides Int command Chip ID with the Chip ID field (DEBUG) 747 */ 748 break; 749 case PC_TCTXT_CFG: 750 /* 751 * TODO: block group support 752 * 753 * PC_TCTXT_CFG_BLKGRP_EN 754 * PC_TCTXT_CFG_HARD_CHIPID_BLK : 755 * Moves the chipid into block field for hardwired CAM compares. 756 * Block offset value is adjusted to 0b0..01 & ThrdId 757 * 758 * Will require changes in xive_presenter_tctx_match(). I am 759 * not sure how to handle that yet. 760 */ 761 762 /* Overrides hardwired chip ID with the chip ID field */ 763 if (val & PC_TCTXT_CHIPID_OVERRIDE) { 764 xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val); 765 } 766 break; 767 case PC_TCTXT_TRACK: 768 /* 769 * PC_TCTXT_TRACK_EN: 770 * enable block tracking and exchange of block ownership 771 * information between Interrupt controllers 772 */ 773 break; 774 775 /* 776 * Misc settings 777 */ 778 case VC_SBC_CONFIG: /* Store EOI configuration */ 779 /* 780 * Configure store EOI if required by firwmare (skiboot has removed 781 * support recently though) 782 */ 783 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { 784 object_property_set_int(OBJECT(&xive->ipi_source), 785 XIVE_SRC_STORE_EOI, "flags", &error_fatal); 786 } 787 break; 788 789 case VC_EQC_CONFIG: /* TODO: silent escalation */ 790 case VC_AIB_TX_ORDER_TAG2: /* relax ordering */ 791 break; 792 793 /* 794 * XIVE BAR settings (XSCOM only) 795 */ 796 case CQ_RST_CTL: 797 /* bit4: resets all BAR registers */ 798 break; 799 800 case CQ_IC_BAR: /* IC BAR. 8 pages */ 801 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; 802 if (!(val & CQ_IC_BAR_VALID)) { 803 xive->ic_base = 0; 804 if (xive->regs[reg] & CQ_IC_BAR_VALID) { 805 memory_region_del_subregion(&xive->ic_mmio, 806 &xive->ic_reg_mmio); 807 memory_region_del_subregion(&xive->ic_mmio, 808 &xive->ic_notify_mmio); 809 memory_region_del_subregion(&xive->ic_mmio, 810 &xive->ic_lsi_mmio); 811 memory_region_del_subregion(&xive->ic_mmio, 812 &xive->tm_indirect_mmio); 813 814 memory_region_del_subregion(sysmem, &xive->ic_mmio); 815 } 816 } else { 817 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); 818 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) { 819 memory_region_add_subregion(sysmem, xive->ic_base, 820 &xive->ic_mmio); 821 822 memory_region_add_subregion(&xive->ic_mmio, 0, 823 &xive->ic_reg_mmio); 824 memory_region_add_subregion(&xive->ic_mmio, 825 1ul << xive->ic_shift, 826 &xive->ic_notify_mmio); 827 memory_region_add_subregion(&xive->ic_mmio, 828 2ul << xive->ic_shift, 829 &xive->ic_lsi_mmio); 830 memory_region_add_subregion(&xive->ic_mmio, 831 4ull << xive->ic_shift, 832 &xive->tm_indirect_mmio); 833 } 834 } 835 break; 836 837 case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */ 838 case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */ 839 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; 840 if (!(val & CQ_TM_BAR_VALID)) { 841 xive->tm_base = 0; 842 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) { 843 memory_region_del_subregion(sysmem, &xive->tm_mmio); 844 } 845 } else { 846 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); 847 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) { 848 memory_region_add_subregion(sysmem, xive->tm_base, 849 &xive->tm_mmio); 850 } 851 } 852 break; 853 854 case CQ_PC_BARM: 855 xive->regs[reg] = val; 856 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive)); 857 break; 858 case CQ_PC_BAR: /* From 32M to 512G */ 859 if (!(val & CQ_PC_BAR_VALID)) { 860 xive->pc_base = 0; 861 if (xive->regs[reg] & CQ_PC_BAR_VALID) { 862 memory_region_del_subregion(sysmem, &xive->pc_mmio); 863 } 864 } else { 865 xive->pc_base = val & ~(CQ_PC_BAR_VALID); 866 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) { 867 memory_region_add_subregion(sysmem, xive->pc_base, 868 &xive->pc_mmio); 869 } 870 } 871 break; 872 873 case CQ_VC_BARM: 874 xive->regs[reg] = val; 875 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive)); 876 break; 877 case CQ_VC_BAR: /* From 64M to 4TB */ 878 if (!(val & CQ_VC_BAR_VALID)) { 879 xive->vc_base = 0; 880 if (xive->regs[reg] & CQ_VC_BAR_VALID) { 881 memory_region_del_subregion(sysmem, &xive->vc_mmio); 882 } 883 } else { 884 xive->vc_base = val & ~(CQ_VC_BAR_VALID); 885 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) { 886 memory_region_add_subregion(sysmem, xive->vc_base, 887 &xive->vc_mmio); 888 } 889 } 890 break; 891 892 /* 893 * XIVE Table settings. 894 */ 895 case CQ_TAR: /* Table Address */ 896 break; 897 case CQ_TDR: /* Table Data */ 898 pnv_xive_table_set_data(xive, val); 899 break; 900 901 /* 902 * XIVE VC & PC Virtual Structure Table settings 903 */ 904 case VC_VSD_TABLE_ADDR: 905 case PC_VSD_TABLE_ADDR: /* Virtual table selector */ 906 break; 907 case VC_VSD_TABLE_DATA: /* Virtual table setting */ 908 case PC_VSD_TABLE_DATA: 909 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA); 910 break; 911 912 /* 913 * Interrupt fifo overflow in memory backing store (Not modeled) 914 */ 915 case VC_IRQ_CONFIG_IPI: 916 case VC_IRQ_CONFIG_HW: 917 case VC_IRQ_CONFIG_CASCADE1: 918 case VC_IRQ_CONFIG_CASCADE2: 919 case VC_IRQ_CONFIG_REDIST: 920 case VC_IRQ_CONFIG_IPI_CASC: 921 break; 922 923 /* 924 * XIVE hardware thread enablement 925 */ 926 case PC_THREAD_EN_REG0: /* Physical Thread Enable */ 927 case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */ 928 break; 929 930 case PC_THREAD_EN_REG0_SET: 931 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val; 932 break; 933 case PC_THREAD_EN_REG1_SET: 934 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val; 935 break; 936 case PC_THREAD_EN_REG0_CLR: 937 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val; 938 break; 939 case PC_THREAD_EN_REG1_CLR: 940 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val; 941 break; 942 943 /* 944 * Indirect TIMA access set up. Defines the PIR of the HW thread 945 * to use. 946 */ 947 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3: 948 break; 949 950 /* 951 * XIVE PC & VC cache updates for EAS, NVT and END 952 */ 953 case VC_IVC_SCRUB_MASK: 954 break; 955 case VC_IVC_SCRUB_TRIG: 956 pnv_xive_eas_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val), 957 GETFIELD(VC_SCRUB_OFFSET, val)); 958 break; 959 960 case VC_EQC_SCRUB_MASK: 961 case VC_EQC_CWATCH_SPEC: 962 case VC_EQC_CWATCH_DAT0 ... VC_EQC_CWATCH_DAT3: 963 break; 964 case VC_EQC_SCRUB_TRIG: 965 pnv_xive_end_update(xive, GETFIELD(VC_SCRUB_BLOCK_ID, val), 966 GETFIELD(VC_SCRUB_OFFSET, val)); 967 break; 968 969 case PC_VPC_SCRUB_MASK: 970 case PC_VPC_CWATCH_SPEC: 971 case PC_VPC_CWATCH_DAT0 ... PC_VPC_CWATCH_DAT7: 972 break; 973 case PC_VPC_SCRUB_TRIG: 974 pnv_xive_nvt_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val), 975 GETFIELD(PC_SCRUB_OFFSET, val)); 976 break; 977 978 979 /* 980 * XIVE PC & VC cache invalidation 981 */ 982 case PC_AT_KILL: 983 break; 984 case VC_AT_MACRO_KILL: 985 break; 986 case PC_AT_KILL_MASK: 987 case VC_AT_MACRO_KILL_MASK: 988 break; 989 990 default: 991 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset); 992 return; 993 } 994 995 xive->regs[reg] = val; 996 } 997 998 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) 999 { 1000 PnvXive *xive = PNV_XIVE(opaque); 1001 uint64_t val = 0; 1002 uint32_t reg = offset >> 3; 1003 1004 switch (offset) { 1005 case CQ_CFG_PB_GEN: 1006 case CQ_IC_BAR: 1007 case CQ_TM1_BAR: 1008 case CQ_TM2_BAR: 1009 case CQ_PC_BAR: 1010 case CQ_PC_BARM: 1011 case CQ_VC_BAR: 1012 case CQ_VC_BARM: 1013 case CQ_TAR: 1014 case CQ_TDR: 1015 case CQ_PBI_CTL: 1016 1017 case PC_TCTXT_CFG: 1018 case PC_TCTXT_TRACK: 1019 case PC_TCTXT_INDIR0: 1020 case PC_TCTXT_INDIR1: 1021 case PC_TCTXT_INDIR2: 1022 case PC_TCTXT_INDIR3: 1023 case PC_GLOBAL_CONFIG: 1024 1025 case PC_VPC_SCRUB_MASK: 1026 case PC_VPC_CWATCH_SPEC: 1027 case PC_VPC_CWATCH_DAT0: 1028 case PC_VPC_CWATCH_DAT1: 1029 case PC_VPC_CWATCH_DAT2: 1030 case PC_VPC_CWATCH_DAT3: 1031 case PC_VPC_CWATCH_DAT4: 1032 case PC_VPC_CWATCH_DAT5: 1033 case PC_VPC_CWATCH_DAT6: 1034 case PC_VPC_CWATCH_DAT7: 1035 1036 case VC_GLOBAL_CONFIG: 1037 case VC_AIB_TX_ORDER_TAG2: 1038 1039 case VC_IRQ_CONFIG_IPI: 1040 case VC_IRQ_CONFIG_HW: 1041 case VC_IRQ_CONFIG_CASCADE1: 1042 case VC_IRQ_CONFIG_CASCADE2: 1043 case VC_IRQ_CONFIG_REDIST: 1044 case VC_IRQ_CONFIG_IPI_CASC: 1045 1046 case VC_EQC_SCRUB_MASK: 1047 case VC_EQC_CWATCH_DAT0: 1048 case VC_EQC_CWATCH_DAT1: 1049 case VC_EQC_CWATCH_DAT2: 1050 case VC_EQC_CWATCH_DAT3: 1051 1052 case VC_EQC_CWATCH_SPEC: 1053 case VC_IVC_SCRUB_MASK: 1054 case VC_SBC_CONFIG: 1055 case VC_AT_MACRO_KILL_MASK: 1056 case VC_VSD_TABLE_ADDR: 1057 case PC_VSD_TABLE_ADDR: 1058 case VC_VSD_TABLE_DATA: 1059 case PC_VSD_TABLE_DATA: 1060 case PC_THREAD_EN_REG0: 1061 case PC_THREAD_EN_REG1: 1062 val = xive->regs[reg]; 1063 break; 1064 1065 /* 1066 * XIVE hardware thread enablement 1067 */ 1068 case PC_THREAD_EN_REG0_SET: 1069 case PC_THREAD_EN_REG0_CLR: 1070 val = xive->regs[PC_THREAD_EN_REG0 >> 3]; 1071 break; 1072 case PC_THREAD_EN_REG1_SET: 1073 case PC_THREAD_EN_REG1_CLR: 1074 val = xive->regs[PC_THREAD_EN_REG1 >> 3]; 1075 break; 1076 1077 case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */ 1078 val = 0xffffff0000000000; 1079 break; 1080 1081 /* 1082 * XIVE PC & VC cache updates for EAS, NVT and END 1083 */ 1084 case PC_VPC_SCRUB_TRIG: 1085 case VC_IVC_SCRUB_TRIG: 1086 case VC_EQC_SCRUB_TRIG: 1087 xive->regs[reg] &= ~VC_SCRUB_VALID; 1088 val = xive->regs[reg]; 1089 break; 1090 1091 /* 1092 * XIVE PC & VC cache invalidation 1093 */ 1094 case PC_AT_KILL: 1095 xive->regs[reg] &= ~PC_AT_KILL_VALID; 1096 val = xive->regs[reg]; 1097 break; 1098 case VC_AT_MACRO_KILL: 1099 xive->regs[reg] &= ~VC_KILL_VALID; 1100 val = xive->regs[reg]; 1101 break; 1102 1103 /* 1104 * XIVE synchronisation 1105 */ 1106 case VC_EQC_CONFIG: 1107 val = VC_EQC_SYNC_MASK; 1108 break; 1109 1110 default: 1111 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset); 1112 } 1113 1114 return val; 1115 } 1116 1117 static const MemoryRegionOps pnv_xive_ic_reg_ops = { 1118 .read = pnv_xive_ic_reg_read, 1119 .write = pnv_xive_ic_reg_write, 1120 .endianness = DEVICE_BIG_ENDIAN, 1121 .valid = { 1122 .min_access_size = 8, 1123 .max_access_size = 8, 1124 }, 1125 .impl = { 1126 .min_access_size = 8, 1127 .max_access_size = 8, 1128 }, 1129 }; 1130 1131 /* 1132 * IC - Notify MMIO port page (write only) 1133 */ 1134 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */ 1135 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */ 1136 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */ 1137 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */ 1138 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */ 1139 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */ 1140 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */ 1141 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */ 1142 1143 /* VC synchronisation */ 1144 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */ 1145 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */ 1146 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */ 1147 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */ 1148 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */ 1149 1150 /* PC synchronisation */ 1151 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */ 1152 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */ 1153 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */ 1154 1155 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) 1156 { 1157 /* 1158 * Forward the source event notification directly to the Router. 1159 * The source interrupt number should already be correctly encoded 1160 * with the chip block id by the sending device (PHB, PSI). 1161 */ 1162 xive_router_notify(XIVE_NOTIFIER(xive), val); 1163 } 1164 1165 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, 1166 unsigned size) 1167 { 1168 PnvXive *xive = PNV_XIVE(opaque); 1169 1170 /* VC: HW triggers */ 1171 switch (addr) { 1172 case 0x000 ... 0x7FF: 1173 pnv_xive_ic_hw_trigger(opaque, addr, val); 1174 break; 1175 1176 /* VC: Forwarded IRQs */ 1177 case PNV_XIVE_FORWARD_IPI: 1178 case PNV_XIVE_FORWARD_HW: 1179 case PNV_XIVE_FORWARD_OS_ESC: 1180 case PNV_XIVE_FORWARD_HW_ESC: 1181 case PNV_XIVE_FORWARD_REDIS: 1182 /* TODO: forwarded IRQs. Should be like HW triggers */ 1183 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64, 1184 addr, val); 1185 break; 1186 1187 /* VC syncs */ 1188 case PNV_XIVE_SYNC_IPI: 1189 case PNV_XIVE_SYNC_HW: 1190 case PNV_XIVE_SYNC_OS_ESC: 1191 case PNV_XIVE_SYNC_HW_ESC: 1192 case PNV_XIVE_SYNC_REDIS: 1193 break; 1194 1195 /* PC syncs */ 1196 case PNV_XIVE_SYNC_PULL: 1197 case PNV_XIVE_SYNC_PUSH: 1198 case PNV_XIVE_SYNC_VPC: 1199 break; 1200 1201 default: 1202 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr); 1203 } 1204 } 1205 1206 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr, 1207 unsigned size) 1208 { 1209 PnvXive *xive = PNV_XIVE(opaque); 1210 1211 /* loads are invalid */ 1212 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr); 1213 return -1; 1214 } 1215 1216 static const MemoryRegionOps pnv_xive_ic_notify_ops = { 1217 .read = pnv_xive_ic_notify_read, 1218 .write = pnv_xive_ic_notify_write, 1219 .endianness = DEVICE_BIG_ENDIAN, 1220 .valid = { 1221 .min_access_size = 8, 1222 .max_access_size = 8, 1223 }, 1224 .impl = { 1225 .min_access_size = 8, 1226 .max_access_size = 8, 1227 }, 1228 }; 1229 1230 /* 1231 * IC - LSI MMIO handlers (not modeled) 1232 */ 1233 1234 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr, 1235 uint64_t val, unsigned size) 1236 { 1237 PnvXive *xive = PNV_XIVE(opaque); 1238 1239 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr); 1240 } 1241 1242 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size) 1243 { 1244 PnvXive *xive = PNV_XIVE(opaque); 1245 1246 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr); 1247 return -1; 1248 } 1249 1250 static const MemoryRegionOps pnv_xive_ic_lsi_ops = { 1251 .read = pnv_xive_ic_lsi_read, 1252 .write = pnv_xive_ic_lsi_write, 1253 .endianness = DEVICE_BIG_ENDIAN, 1254 .valid = { 1255 .min_access_size = 8, 1256 .max_access_size = 8, 1257 }, 1258 .impl = { 1259 .min_access_size = 8, 1260 .max_access_size = 8, 1261 }, 1262 }; 1263 1264 /* 1265 * IC - Indirect TIMA MMIO handlers 1266 */ 1267 1268 /* 1269 * When the TIMA is accessed from the indirect page, the thread id 1270 * (PIR) has to be configured in the IC registers before. This is used 1271 * for resets and for debug purpose also. 1272 */ 1273 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive) 1274 { 1275 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3]; 1276 PowerPCCPU *cpu = NULL; 1277 int pir; 1278 1279 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) { 1280 xive_error(xive, "IC: no indirect TIMA access in progress"); 1281 return NULL; 1282 } 1283 1284 pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff; 1285 cpu = ppc_get_vcpu_by_pir(pir); 1286 if (!cpu) { 1287 xive_error(xive, "IC: invalid PIR %x for indirect access", pir); 1288 return NULL; 1289 } 1290 1291 /* Check that HW thread is XIVE enabled */ 1292 if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { 1293 xive_error(xive, "IC: CPU %x is not enabled", pir); 1294 } 1295 1296 return XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1297 } 1298 1299 static void xive_tm_indirect_write(void *opaque, hwaddr offset, 1300 uint64_t value, unsigned size) 1301 { 1302 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1303 1304 xive_tctx_tm_write(tctx, offset, value, size); 1305 } 1306 1307 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset, 1308 unsigned size) 1309 { 1310 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1311 1312 return xive_tctx_tm_read(tctx, offset, size); 1313 } 1314 1315 static const MemoryRegionOps xive_tm_indirect_ops = { 1316 .read = xive_tm_indirect_read, 1317 .write = xive_tm_indirect_write, 1318 .endianness = DEVICE_BIG_ENDIAN, 1319 .valid = { 1320 .min_access_size = 1, 1321 .max_access_size = 8, 1322 }, 1323 .impl = { 1324 .min_access_size = 1, 1325 .max_access_size = 8, 1326 }, 1327 }; 1328 1329 /* 1330 * Interrupt controller XSCOM region. 1331 */ 1332 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size) 1333 { 1334 switch (addr >> 3) { 1335 case X_VC_EQC_CONFIG: 1336 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */ 1337 return VC_EQC_SYNC_MASK; 1338 default: 1339 return pnv_xive_ic_reg_read(opaque, addr, size); 1340 } 1341 } 1342 1343 static void pnv_xive_xscom_write(void *opaque, hwaddr addr, 1344 uint64_t val, unsigned size) 1345 { 1346 pnv_xive_ic_reg_write(opaque, addr, val, size); 1347 } 1348 1349 static const MemoryRegionOps pnv_xive_xscom_ops = { 1350 .read = pnv_xive_xscom_read, 1351 .write = pnv_xive_xscom_write, 1352 .endianness = DEVICE_BIG_ENDIAN, 1353 .valid = { 1354 .min_access_size = 8, 1355 .max_access_size = 8, 1356 }, 1357 .impl = { 1358 .min_access_size = 8, 1359 .max_access_size = 8, 1360 } 1361 }; 1362 1363 /* 1364 * Virtualization Controller MMIO region containing the IPI and END ESB pages 1365 */ 1366 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset, 1367 unsigned size) 1368 { 1369 PnvXive *xive = PNV_XIVE(opaque); 1370 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1371 uint64_t edt_type = 0; 1372 uint64_t edt_offset; 1373 MemTxResult result; 1374 AddressSpace *edt_as = NULL; 1375 uint64_t ret = -1; 1376 1377 if (edt_index < XIVE_TABLE_EDT_MAX) { 1378 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1379 } 1380 1381 switch (edt_type) { 1382 case CQ_TDR_EDT_IPI: 1383 edt_as = &xive->ipi_as; 1384 break; 1385 case CQ_TDR_EDT_EQ: 1386 edt_as = &xive->end_as; 1387 break; 1388 default: 1389 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset); 1390 return -1; 1391 } 1392 1393 /* Remap the offset for the targeted address space */ 1394 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1395 1396 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED, 1397 &result); 1398 1399 if (result != MEMTX_OK) { 1400 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%" 1401 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END", 1402 offset, edt_offset); 1403 return -1; 1404 } 1405 1406 return ret; 1407 } 1408 1409 static void pnv_xive_vc_write(void *opaque, hwaddr offset, 1410 uint64_t val, unsigned size) 1411 { 1412 PnvXive *xive = PNV_XIVE(opaque); 1413 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1414 uint64_t edt_type = 0; 1415 uint64_t edt_offset; 1416 MemTxResult result; 1417 AddressSpace *edt_as = NULL; 1418 1419 if (edt_index < XIVE_TABLE_EDT_MAX) { 1420 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1421 } 1422 1423 switch (edt_type) { 1424 case CQ_TDR_EDT_IPI: 1425 edt_as = &xive->ipi_as; 1426 break; 1427 case CQ_TDR_EDT_EQ: 1428 edt_as = &xive->end_as; 1429 break; 1430 default: 1431 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx, 1432 offset); 1433 return; 1434 } 1435 1436 /* Remap the offset for the targeted address space */ 1437 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1438 1439 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result); 1440 if (result != MEMTX_OK) { 1441 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset); 1442 } 1443 } 1444 1445 static const MemoryRegionOps pnv_xive_vc_ops = { 1446 .read = pnv_xive_vc_read, 1447 .write = pnv_xive_vc_write, 1448 .endianness = DEVICE_BIG_ENDIAN, 1449 .valid = { 1450 .min_access_size = 8, 1451 .max_access_size = 8, 1452 }, 1453 .impl = { 1454 .min_access_size = 8, 1455 .max_access_size = 8, 1456 }, 1457 }; 1458 1459 /* 1460 * Presenter Controller MMIO region. The Virtualization Controller 1461 * updates the IPB in the NVT table when required. Not modeled. 1462 */ 1463 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr, 1464 unsigned size) 1465 { 1466 PnvXive *xive = PNV_XIVE(opaque); 1467 1468 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr); 1469 return -1; 1470 } 1471 1472 static void pnv_xive_pc_write(void *opaque, hwaddr addr, 1473 uint64_t value, unsigned size) 1474 { 1475 PnvXive *xive = PNV_XIVE(opaque); 1476 1477 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr); 1478 } 1479 1480 static const MemoryRegionOps pnv_xive_pc_ops = { 1481 .read = pnv_xive_pc_read, 1482 .write = pnv_xive_pc_write, 1483 .endianness = DEVICE_BIG_ENDIAN, 1484 .valid = { 1485 .min_access_size = 8, 1486 .max_access_size = 8, 1487 }, 1488 .impl = { 1489 .min_access_size = 8, 1490 .max_access_size = 8, 1491 }, 1492 }; 1493 1494 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon) 1495 { 1496 XiveRouter *xrtr = XIVE_ROUTER(xive); 1497 uint8_t blk = xive->chip->chip_id; 1498 uint32_t srcno0 = XIVE_SRCNO(blk, 0); 1499 uint32_t nr_ipis = pnv_xive_nr_ipis(xive); 1500 uint32_t nr_ends = pnv_xive_nr_ends(xive); 1501 XiveEAS eas; 1502 XiveEND end; 1503 int i; 1504 1505 monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0, 1506 srcno0 + nr_ipis - 1); 1507 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); 1508 1509 monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0, 1510 srcno0 + nr_ipis - 1); 1511 for (i = 0; i < nr_ipis; i++) { 1512 if (xive_router_get_eas(xrtr, blk, i, &eas)) { 1513 break; 1514 } 1515 if (!xive_eas_is_masked(&eas)) { 1516 xive_eas_pic_print_info(&eas, i, mon); 1517 } 1518 } 1519 1520 monitor_printf(mon, "XIVE[%x] ENDT %08x .. %08x\n", blk, 0, nr_ends - 1); 1521 for (i = 0; i < nr_ends; i++) { 1522 if (xive_router_get_end(xrtr, blk, i, &end)) { 1523 break; 1524 } 1525 xive_end_pic_print_info(&end, i, mon); 1526 } 1527 } 1528 1529 static void pnv_xive_reset(void *dev) 1530 { 1531 PnvXive *xive = PNV_XIVE(dev); 1532 XiveSource *xsrc = &xive->ipi_source; 1533 XiveENDSource *end_xsrc = &xive->end_source; 1534 1535 /* 1536 * Use the PnvChip id to identify the XIVE interrupt controller. 1537 * It can be overriden by configuration at runtime. 1538 */ 1539 xive->tctx_chipid = xive->chip->chip_id; 1540 1541 /* Default page size (Should be changed at runtime to 64k) */ 1542 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1543 1544 /* Clear subregions */ 1545 if (memory_region_is_mapped(&xsrc->esb_mmio)) { 1546 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio); 1547 } 1548 1549 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) { 1550 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio); 1551 } 1552 1553 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { 1554 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio); 1555 } 1556 1557 if (memory_region_is_mapped(&xive->end_edt_mmio)) { 1558 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio); 1559 } 1560 } 1561 1562 static void pnv_xive_init(Object *obj) 1563 { 1564 PnvXive *xive = PNV_XIVE(obj); 1565 1566 object_initialize_child(obj, "ipi_source", &xive->ipi_source, 1567 sizeof(xive->ipi_source), TYPE_XIVE_SOURCE, 1568 &error_abort, NULL); 1569 object_initialize_child(obj, "end_source", &xive->end_source, 1570 sizeof(xive->end_source), TYPE_XIVE_END_SOURCE, 1571 &error_abort, NULL); 1572 } 1573 1574 /* 1575 * Maximum number of IRQs and ENDs supported by HW 1576 */ 1577 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1578 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1579 1580 static void pnv_xive_realize(DeviceState *dev, Error **errp) 1581 { 1582 PnvXive *xive = PNV_XIVE(dev); 1583 XiveSource *xsrc = &xive->ipi_source; 1584 XiveENDSource *end_xsrc = &xive->end_source; 1585 Error *local_err = NULL; 1586 Object *obj; 1587 1588 obj = object_property_get_link(OBJECT(dev), "chip", &local_err); 1589 if (!obj) { 1590 error_propagate(errp, local_err); 1591 error_prepend(errp, "required link 'chip' not found: "); 1592 return; 1593 } 1594 1595 /* The PnvChip id identifies the XIVE interrupt controller. */ 1596 xive->chip = PNV_CHIP(obj); 1597 1598 /* 1599 * The XiveSource and XiveENDSource objects are realized with the 1600 * maximum allowed HW configuration. The ESB MMIO regions will be 1601 * resized dynamically when the controller is configured by the FW 1602 * to limit accesses to resources not provisioned. 1603 */ 1604 object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs", 1605 &error_fatal); 1606 object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive), 1607 &error_fatal); 1608 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err); 1609 if (local_err) { 1610 error_propagate(errp, local_err); 1611 return; 1612 } 1613 1614 object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends", 1615 &error_fatal); 1616 object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive), 1617 &error_fatal); 1618 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err); 1619 if (local_err) { 1620 error_propagate(errp, local_err); 1621 return; 1622 } 1623 1624 /* Default page size. Generally changed at runtime to 64k */ 1625 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1626 1627 /* XSCOM region, used for initial configuration of the BARs */ 1628 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops, 1629 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3); 1630 1631 /* Interrupt controller MMIO regions */ 1632 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", 1633 PNV9_XIVE_IC_SIZE); 1634 1635 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops, 1636 xive, "xive-ic-reg", 1 << xive->ic_shift); 1637 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev), 1638 &pnv_xive_ic_notify_ops, 1639 xive, "xive-ic-notify", 1 << xive->ic_shift); 1640 1641 /* The Pervasive LSI trigger and EOI pages (not modeled) */ 1642 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops, 1643 xive, "xive-ic-lsi", 2 << xive->ic_shift); 1644 1645 /* Thread Interrupt Management Area (Indirect) */ 1646 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev), 1647 &xive_tm_indirect_ops, 1648 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE); 1649 /* 1650 * Overall Virtualization Controller MMIO region containing the 1651 * IPI ESB pages and END ESB pages. The layout is defined by the 1652 * EDT "Domain table" and the accesses are dispatched using 1653 * address spaces for each. 1654 */ 1655 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive, 1656 "xive-vc", PNV9_XIVE_VC_SIZE); 1657 1658 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi", 1659 PNV9_XIVE_VC_SIZE); 1660 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi"); 1661 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end", 1662 PNV9_XIVE_VC_SIZE); 1663 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end"); 1664 1665 /* 1666 * The MMIO windows exposing the IPI ESBs and the END ESBs in the 1667 * VC region. Their size is configured by the FW in the EDT table. 1668 */ 1669 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0); 1670 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0); 1671 1672 /* Presenter Controller MMIO region (not modeled) */ 1673 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive, 1674 "xive-pc", PNV9_XIVE_PC_SIZE); 1675 1676 /* Thread Interrupt Management Area (Direct) */ 1677 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, 1678 xive, "xive-tima", PNV9_XIVE_TM_SIZE); 1679 1680 qemu_register_reset(pnv_xive_reset, dev); 1681 } 1682 1683 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, 1684 int xscom_offset) 1685 { 1686 const char compat[] = "ibm,power9-xive-x"; 1687 char *name; 1688 int offset; 1689 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE; 1690 uint32_t reg[] = { 1691 cpu_to_be32(lpc_pcba), 1692 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE) 1693 }; 1694 1695 name = g_strdup_printf("xive@%x", lpc_pcba); 1696 offset = fdt_add_subnode(fdt, xscom_offset, name); 1697 _FDT(offset); 1698 g_free(name); 1699 1700 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); 1701 _FDT((fdt_setprop(fdt, offset, "compatible", compat, 1702 sizeof(compat)))); 1703 return 0; 1704 } 1705 1706 static Property pnv_xive_properties[] = { 1707 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0), 1708 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0), 1709 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0), 1710 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0), 1711 DEFINE_PROP_END_OF_LIST(), 1712 }; 1713 1714 static void pnv_xive_class_init(ObjectClass *klass, void *data) 1715 { 1716 DeviceClass *dc = DEVICE_CLASS(klass); 1717 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); 1718 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 1719 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1720 1721 xdc->dt_xscom = pnv_xive_dt_xscom; 1722 1723 dc->desc = "PowerNV XIVE Interrupt Controller"; 1724 dc->realize = pnv_xive_realize; 1725 dc->props = pnv_xive_properties; 1726 1727 xrc->get_eas = pnv_xive_get_eas; 1728 xrc->get_end = pnv_xive_get_end; 1729 xrc->write_end = pnv_xive_write_end; 1730 xrc->get_nvt = pnv_xive_get_nvt; 1731 xrc->write_nvt = pnv_xive_write_nvt; 1732 xrc->get_tctx = pnv_xive_get_tctx; 1733 1734 xnc->notify = pnv_xive_notify; 1735 }; 1736 1737 static const TypeInfo pnv_xive_info = { 1738 .name = TYPE_PNV_XIVE, 1739 .parent = TYPE_XIVE_ROUTER, 1740 .instance_init = pnv_xive_init, 1741 .instance_size = sizeof(PnvXive), 1742 .class_init = pnv_xive_class_init, 1743 .interfaces = (InterfaceInfo[]) { 1744 { TYPE_PNV_XSCOM_INTERFACE }, 1745 { } 1746 } 1747 }; 1748 1749 static void pnv_xive_register_types(void) 1750 { 1751 type_register_static(&pnv_xive_info); 1752 } 1753 1754 type_init(pnv_xive_register_types) 1755