1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2019, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qapi/error.h" 13 #include "target/ppc/cpu.h" 14 #include "sysemu/cpus.h" 15 #include "sysemu/dma.h" 16 #include "monitor/monitor.h" 17 #include "hw/ppc/fdt.h" 18 #include "hw/ppc/pnv.h" 19 #include "hw/ppc/pnv_core.h" 20 #include "hw/ppc/pnv_xscom.h" 21 #include "hw/ppc/pnv_xive.h" 22 #include "hw/ppc/xive_regs.h" 23 #include "hw/ppc/ppc.h" 24 25 #include <libfdt.h> 26 27 #include "pnv_xive_regs.h" 28 29 #define XIVE_DEBUG 30 31 /* 32 * Virtual structures table (VST) 33 */ 34 #define SBE_PER_BYTE 4 35 36 typedef struct XiveVstInfo { 37 const char *name; 38 uint32_t size; 39 uint32_t max_blocks; 40 } XiveVstInfo; 41 42 static const XiveVstInfo vst_infos[] = { 43 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 }, 44 [VST_TSEL_SBE] = { "SBE", 1, 16 }, 45 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 }, 46 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 }, 47 48 /* 49 * Interrupt fifo backing store table (not modeled) : 50 * 51 * 0 - IPI, 52 * 1 - HWD, 53 * 2 - First escalate, 54 * 3 - Second escalate, 55 * 4 - Redistribution, 56 * 5 - IPI cascaded queue ? 57 */ 58 [VST_TSEL_IRQ] = { "IRQ", 1, 6 }, 59 }; 60 61 #define xive_error(xive, fmt, ...) \ 62 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ 63 (xive)->chip->chip_id, ## __VA_ARGS__); 64 65 /* 66 * QEMU version of the GETFIELD/SETFIELD macros 67 * 68 * TODO: It might be better to use the existing extract64() and 69 * deposit64() but this means that all the register definitions will 70 * change and become incompatible with the ones found in skiboot. 71 * 72 * Keep it as it is for now until we find a common ground. 73 */ 74 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) 75 { 76 return (word & mask) >> ctz64(mask); 77 } 78 79 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, 80 uint64_t value) 81 { 82 return (word & ~mask) | ((value << ctz64(mask)) & mask); 83 } 84 85 /* 86 * Remote access to controllers. HW uses MMIOs. For now, a simple scan 87 * of the chips is good enough. 88 * 89 * TODO: Block scope support 90 */ 91 static PnvXive *pnv_xive_get_ic(uint8_t blk) 92 { 93 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); 94 int i; 95 96 for (i = 0; i < pnv->num_chips; i++) { 97 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); 98 PnvXive *xive = &chip9->xive; 99 100 if (xive->chip->chip_id == blk) { 101 return xive; 102 } 103 } 104 return NULL; 105 } 106 107 /* 108 * VST accessors for SBE, EAT, ENDT, NVT 109 * 110 * Indirect VST tables are arrays of VSDs pointing to a page (of same 111 * size). Each page is a direct VST table. 112 */ 113 114 #define XIVE_VSD_SIZE 8 115 116 /* Indirect page size can be 4K, 64K, 2M, 16M. */ 117 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift) 118 { 119 return page_shift == 12 || page_shift == 16 || 120 page_shift == 21 || page_shift == 24; 121 } 122 123 static uint64_t pnv_xive_vst_size(uint64_t vsd) 124 { 125 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 126 127 /* 128 * Read the first descriptor to get the page size of the indirect 129 * table. 130 */ 131 if (VSD_INDIRECT & vsd) { 132 uint32_t nr_pages = vst_tsize / XIVE_VSD_SIZE; 133 uint32_t page_shift; 134 135 vsd = ldq_be_dma(&address_space_memory, vsd & VSD_ADDRESS_MASK); 136 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 137 138 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 139 return 0; 140 } 141 142 return nr_pages * (1ull << page_shift); 143 } 144 145 return vst_tsize; 146 } 147 148 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type, 149 uint64_t vsd, uint32_t idx) 150 { 151 const XiveVstInfo *info = &vst_infos[type]; 152 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 153 154 return vst_addr + idx * info->size; 155 } 156 157 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, 158 uint64_t vsd, uint32_t idx) 159 { 160 const XiveVstInfo *info = &vst_infos[type]; 161 uint64_t vsd_addr; 162 uint32_t vsd_idx; 163 uint32_t page_shift; 164 uint32_t vst_per_page; 165 166 /* Get the page size of the indirect table. */ 167 vsd_addr = vsd & VSD_ADDRESS_MASK; 168 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 169 170 if (!(vsd & VSD_ADDRESS_MASK)) { 171 xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0); 172 return 0; 173 } 174 175 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 176 177 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 178 xive_error(xive, "VST: invalid %s page shift %d", info->name, 179 page_shift); 180 return 0; 181 } 182 183 vst_per_page = (1ull << page_shift) / info->size; 184 vsd_idx = idx / vst_per_page; 185 186 /* Load the VSD we are looking for, if not already done */ 187 if (vsd_idx) { 188 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; 189 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 190 191 if (!(vsd & VSD_ADDRESS_MASK)) { 192 xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0); 193 return 0; 194 } 195 196 /* 197 * Check that the pages have a consistent size across the 198 * indirect table 199 */ 200 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { 201 xive_error(xive, "VST: %s entry %x indirect page size differ !?", 202 info->name, idx); 203 return 0; 204 } 205 } 206 207 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); 208 } 209 210 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk, 211 uint32_t idx) 212 { 213 const XiveVstInfo *info = &vst_infos[type]; 214 uint64_t vsd; 215 uint32_t idx_max; 216 217 if (blk >= info->max_blocks) { 218 xive_error(xive, "VST: invalid block id %d for VST %s %d !?", 219 blk, info->name, idx); 220 return 0; 221 } 222 223 vsd = xive->vsds[type][blk]; 224 225 /* Remote VST access */ 226 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { 227 xive = pnv_xive_get_ic(blk); 228 229 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0; 230 } 231 232 idx_max = pnv_xive_vst_size(vsd) / info->size - 1; 233 if (idx > idx_max) { 234 #ifdef XIVE_DEBUG 235 xive_error(xive, "VST: %s entry %x/%x out of range [ 0 .. %x ] !?", 236 info->name, blk, idx, idx_max); 237 #endif 238 return 0; 239 } 240 241 if (VSD_INDIRECT & vsd) { 242 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx); 243 } 244 245 return pnv_xive_vst_addr_direct(xive, type, vsd, idx); 246 } 247 248 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk, 249 uint32_t idx, void *data) 250 { 251 const XiveVstInfo *info = &vst_infos[type]; 252 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 253 254 if (!addr) { 255 return -1; 256 } 257 258 cpu_physical_memory_read(addr, data, info->size); 259 return 0; 260 } 261 262 #define XIVE_VST_WORD_ALL -1 263 264 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk, 265 uint32_t idx, void *data, uint32_t word_number) 266 { 267 const XiveVstInfo *info = &vst_infos[type]; 268 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 269 270 if (!addr) { 271 return -1; 272 } 273 274 if (word_number == XIVE_VST_WORD_ALL) { 275 cpu_physical_memory_write(addr, data, info->size); 276 } else { 277 cpu_physical_memory_write(addr + word_number * 4, 278 data + word_number * 4, 4); 279 } 280 return 0; 281 } 282 283 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 284 XiveEND *end) 285 { 286 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end); 287 } 288 289 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 290 XiveEND *end, uint8_t word_number) 291 { 292 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end, 293 word_number); 294 } 295 296 static int pnv_xive_end_update(PnvXive *xive, uint8_t blk, uint32_t idx) 297 { 298 int i; 299 uint64_t eqc_watch[4]; 300 301 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 302 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]); 303 } 304 305 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch, 306 XIVE_VST_WORD_ALL); 307 } 308 309 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 310 XiveNVT *nvt) 311 { 312 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt); 313 } 314 315 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 316 XiveNVT *nvt, uint8_t word_number) 317 { 318 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt, 319 word_number); 320 } 321 322 static int pnv_xive_nvt_update(PnvXive *xive, uint8_t blk, uint32_t idx) 323 { 324 int i; 325 uint64_t vpc_watch[8]; 326 327 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 328 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]); 329 } 330 331 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch, 332 XIVE_VST_WORD_ALL); 333 } 334 335 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 336 XiveEAS *eas) 337 { 338 PnvXive *xive = PNV_XIVE(xrtr); 339 340 if (pnv_xive_get_ic(blk) != xive) { 341 xive_error(xive, "VST: EAS %x is remote !?", XIVE_SRCNO(blk, idx)); 342 return -1; 343 } 344 345 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); 346 } 347 348 static int pnv_xive_eas_update(PnvXive *xive, uint8_t blk, uint32_t idx) 349 { 350 /* All done. */ 351 return 0; 352 } 353 354 static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs) 355 { 356 PowerPCCPU *cpu = POWERPC_CPU(cs); 357 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 358 PnvXive *xive = NULL; 359 CPUPPCState *env = &cpu->env; 360 int pir = env->spr_cb[SPR_PIR].default_value; 361 362 /* 363 * Perform an extra check on the HW thread enablement. 364 * 365 * The TIMA is shared among the chips and to identify the chip 366 * from which the access is being done, we extract the chip id 367 * from the PIR. 368 */ 369 xive = pnv_xive_get_ic((pir >> 8) & 0xf); 370 if (!xive) { 371 return NULL; 372 } 373 374 if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { 375 xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir); 376 } 377 378 return tctx; 379 } 380 381 /* 382 * The internal sources (IPIs) of the interrupt controller have no 383 * knowledge of the XIVE chip on which they reside. Encode the block 384 * id in the source interrupt number before forwarding the source 385 * event notification to the Router. This is required on a multichip 386 * system. 387 */ 388 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno) 389 { 390 PnvXive *xive = PNV_XIVE(xn); 391 uint8_t blk = xive->chip->chip_id; 392 393 xive_router_notify(xn, XIVE_SRCNO(blk, srcno)); 394 } 395 396 /* 397 * XIVE helpers 398 */ 399 400 static uint64_t pnv_xive_vc_size(PnvXive *xive) 401 { 402 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK; 403 } 404 405 static uint64_t pnv_xive_edt_shift(PnvXive *xive) 406 { 407 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX); 408 } 409 410 static uint64_t pnv_xive_pc_size(PnvXive *xive) 411 { 412 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK; 413 } 414 415 static uint32_t pnv_xive_nr_ipis(PnvXive *xive) 416 { 417 uint8_t blk = xive->chip->chip_id; 418 419 return pnv_xive_vst_size(xive->vsds[VST_TSEL_SBE][blk]) * SBE_PER_BYTE; 420 } 421 422 static uint32_t pnv_xive_nr_ends(PnvXive *xive) 423 { 424 uint8_t blk = xive->chip->chip_id; 425 426 return pnv_xive_vst_size(xive->vsds[VST_TSEL_EQDT][blk]) 427 / vst_infos[VST_TSEL_EQDT].size; 428 } 429 430 /* 431 * EDT Table 432 * 433 * The Virtualization Controller MMIO region containing the IPI ESB 434 * pages and END ESB pages is sub-divided into "sets" which map 435 * portions of the VC region to the different ESB pages. It is 436 * configured at runtime through the EDT "Domain Table" to let the 437 * firmware decide how to split the VC address space between IPI ESB 438 * pages and END ESB pages. 439 */ 440 441 /* 442 * Computes the overall size of the IPI or the END ESB pages 443 */ 444 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type) 445 { 446 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 447 uint64_t size = 0; 448 int i; 449 450 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) { 451 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 452 453 if (edt_type == type) { 454 size += edt_size; 455 } 456 } 457 458 return size; 459 } 460 461 /* 462 * Maps an offset of the VC region in the IPI or END region using the 463 * layout defined by the EDT "Domaine Table" 464 */ 465 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset, 466 uint64_t type) 467 { 468 int i; 469 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 470 uint64_t edt_offset = vc_offset; 471 472 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) { 473 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 474 475 if (edt_type != type) { 476 edt_offset -= edt_size; 477 } 478 } 479 480 return edt_offset; 481 } 482 483 static void pnv_xive_edt_resize(PnvXive *xive) 484 { 485 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI); 486 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ); 487 488 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size); 489 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio); 490 491 memory_region_set_size(&xive->end_edt_mmio, end_edt_size); 492 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio); 493 } 494 495 /* 496 * XIVE Table configuration. Only EDT is supported. 497 */ 498 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val) 499 { 500 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL; 501 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]); 502 uint64_t *xive_table; 503 uint8_t max_index; 504 505 switch (tsel) { 506 case CQ_TAR_TSEL_BLK: 507 max_index = ARRAY_SIZE(xive->blk); 508 xive_table = xive->blk; 509 break; 510 case CQ_TAR_TSEL_MIG: 511 max_index = ARRAY_SIZE(xive->mig); 512 xive_table = xive->mig; 513 break; 514 case CQ_TAR_TSEL_EDT: 515 max_index = ARRAY_SIZE(xive->edt); 516 xive_table = xive->edt; 517 break; 518 case CQ_TAR_TSEL_VDT: 519 max_index = ARRAY_SIZE(xive->vdt); 520 xive_table = xive->vdt; 521 break; 522 default: 523 xive_error(xive, "IC: invalid table %d", (int) tsel); 524 return -1; 525 } 526 527 if (tsel_index >= max_index) { 528 xive_error(xive, "IC: invalid index %d", (int) tsel_index); 529 return -1; 530 } 531 532 xive_table[tsel_index] = val; 533 534 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) { 535 xive->regs[CQ_TAR >> 3] = 536 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index); 537 } 538 539 /* 540 * EDT configuration is complete. Resize the MMIO windows exposing 541 * the IPI and the END ESBs in the VC region. 542 */ 543 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) { 544 pnv_xive_edt_resize(xive); 545 } 546 547 return 0; 548 } 549 550 /* 551 * Virtual Structure Tables (VST) configuration 552 */ 553 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type, 554 uint8_t blk, uint64_t vsd) 555 { 556 XiveENDSource *end_xsrc = &xive->end_source; 557 XiveSource *xsrc = &xive->ipi_source; 558 const XiveVstInfo *info = &vst_infos[type]; 559 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 560 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 561 562 /* Basic checks */ 563 564 if (VSD_INDIRECT & vsd) { 565 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) { 566 xive_error(xive, "VST: %s indirect tables are not enabled", 567 info->name); 568 return; 569 } 570 571 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 572 xive_error(xive, "VST: invalid %s page shift %d", info->name, 573 page_shift); 574 return; 575 } 576 } 577 578 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { 579 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with" 580 " page shift %d", info->name, vst_addr, page_shift); 581 return; 582 } 583 584 /* Record the table configuration (in SRAM on HW) */ 585 xive->vsds[type][blk] = vsd; 586 587 /* Now tune the models with the configuration provided by the FW */ 588 589 switch (type) { 590 case VST_TSEL_IVT: /* Nothing to be done */ 591 break; 592 593 case VST_TSEL_EQDT: 594 /* 595 * Backing store pages for the END. Compute the number of ENDs 596 * provisioned by FW and resize the END ESB window accordingly. 597 */ 598 memory_region_set_size(&end_xsrc->esb_mmio, pnv_xive_nr_ends(xive) * 599 (1ull << (end_xsrc->esb_shift + 1))); 600 memory_region_add_subregion(&xive->end_edt_mmio, 0, 601 &end_xsrc->esb_mmio); 602 break; 603 604 case VST_TSEL_SBE: 605 /* 606 * Backing store pages for the source PQ bits. The model does 607 * not use these PQ bits backed in RAM because the XiveSource 608 * model has its own. Compute the number of IRQs provisioned 609 * by FW and resize the IPI ESB window accordingly. 610 */ 611 memory_region_set_size(&xsrc->esb_mmio, pnv_xive_nr_ipis(xive) * 612 (1ull << xsrc->esb_shift)); 613 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio); 614 break; 615 616 case VST_TSEL_VPDT: /* Not modeled */ 617 case VST_TSEL_IRQ: /* Not modeled */ 618 /* 619 * These tables contains the backing store pages for the 620 * interrupt fifos of the VC sub-engine in case of overflow. 621 */ 622 break; 623 624 default: 625 g_assert_not_reached(); 626 } 627 } 628 629 /* 630 * Both PC and VC sub-engines are configured as each use the Virtual 631 * Structure Tables : SBE, EAS, END and NVT. 632 */ 633 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine) 634 { 635 uint8_t mode = GETFIELD(VSD_MODE, vsd); 636 uint8_t type = GETFIELD(VST_TABLE_SELECT, 637 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 638 uint8_t blk = GETFIELD(VST_TABLE_BLOCK, 639 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 640 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 641 642 if (type > VST_TSEL_IRQ) { 643 xive_error(xive, "VST: invalid table type %d", type); 644 return; 645 } 646 647 if (blk >= vst_infos[type].max_blocks) { 648 xive_error(xive, "VST: invalid block id %d for" 649 " %s table", blk, vst_infos[type].name); 650 return; 651 } 652 653 /* 654 * Only take the VC sub-engine configuration into account because 655 * the XiveRouter model combines both VC and PC sub-engines 656 */ 657 if (pc_engine) { 658 return; 659 } 660 661 if (!vst_addr) { 662 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name); 663 return; 664 } 665 666 switch (mode) { 667 case VSD_MODE_FORWARD: 668 xive->vsds[type][blk] = vsd; 669 break; 670 671 case VSD_MODE_EXCLUSIVE: 672 pnv_xive_vst_set_exclusive(xive, type, blk, vsd); 673 break; 674 675 default: 676 xive_error(xive, "VST: unsupported table mode %d", mode); 677 return; 678 } 679 } 680 681 /* 682 * Interrupt controller MMIO region. The layout is compatible between 683 * 4K and 64K pages : 684 * 685 * Page 0 sub-engine BARs 686 * 0x000 - 0x3FF IC registers 687 * 0x400 - 0x7FF PC registers 688 * 0x800 - 0xFFF VC registers 689 * 690 * Page 1 Notify page (writes only) 691 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB) 692 * 0x800 - 0xFFF forwards and syncs 693 * 694 * Page 2 LSI Trigger page (writes only) (not modeled) 695 * Page 3 LSI SB EOI page (reads only) (not modeled) 696 * 697 * Page 4-7 indirect TIMA 698 */ 699 700 /* 701 * IC - registers MMIO 702 */ 703 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, 704 uint64_t val, unsigned size) 705 { 706 PnvXive *xive = PNV_XIVE(opaque); 707 MemoryRegion *sysmem = get_system_memory(); 708 uint32_t reg = offset >> 3; 709 bool is_chip0 = xive->chip->chip_id == 0; 710 711 switch (offset) { 712 713 /* 714 * XIVE CQ (PowerBus bridge) settings 715 */ 716 case CQ_MSGSND: /* msgsnd for doorbells */ 717 case CQ_FIRMASK_OR: /* FIR error reporting */ 718 break; 719 case CQ_PBI_CTL: 720 if (val & CQ_PBI_PC_64K) { 721 xive->pc_shift = 16; 722 } 723 if (val & CQ_PBI_VC_64K) { 724 xive->vc_shift = 16; 725 } 726 break; 727 case CQ_CFG_PB_GEN: /* PowerBus General Configuration */ 728 /* 729 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode 730 */ 731 break; 732 733 /* 734 * XIVE Virtualization Controller settings 735 */ 736 case VC_GLOBAL_CONFIG: 737 break; 738 739 /* 740 * XIVE Presenter Controller settings 741 */ 742 case PC_GLOBAL_CONFIG: 743 /* 744 * PC_GCONF_CHIPID_OVR 745 * Overrides Int command Chip ID with the Chip ID field (DEBUG) 746 */ 747 break; 748 case PC_TCTXT_CFG: 749 /* 750 * TODO: block group support 751 * 752 * PC_TCTXT_CFG_BLKGRP_EN 753 * PC_TCTXT_CFG_HARD_CHIPID_BLK : 754 * Moves the chipid into block field for hardwired CAM compares. 755 * Block offset value is adjusted to 0b0..01 & ThrdId 756 * 757 * Will require changes in xive_presenter_tctx_match(). I am 758 * not sure how to handle that yet. 759 */ 760 761 /* Overrides hardwired chip ID with the chip ID field */ 762 if (val & PC_TCTXT_CHIPID_OVERRIDE) { 763 xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val); 764 } 765 break; 766 case PC_TCTXT_TRACK: 767 /* 768 * PC_TCTXT_TRACK_EN: 769 * enable block tracking and exchange of block ownership 770 * information between Interrupt controllers 771 */ 772 break; 773 774 /* 775 * Misc settings 776 */ 777 case VC_SBC_CONFIG: /* Store EOI configuration */ 778 /* 779 * Configure store EOI if required by firwmare (skiboot has removed 780 * support recently though) 781 */ 782 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { 783 object_property_set_int(OBJECT(&xive->ipi_source), 784 XIVE_SRC_STORE_EOI, "flags", &error_fatal); 785 } 786 break; 787 788 case VC_EQC_CONFIG: /* TODO: silent escalation */ 789 case VC_AIB_TX_ORDER_TAG2: /* relax ordering */ 790 break; 791 792 /* 793 * XIVE BAR settings (XSCOM only) 794 */ 795 case CQ_RST_CTL: 796 /* bit4: resets all BAR registers */ 797 break; 798 799 case CQ_IC_BAR: /* IC BAR. 8 pages */ 800 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; 801 if (!(val & CQ_IC_BAR_VALID)) { 802 xive->ic_base = 0; 803 if (xive->regs[reg] & CQ_IC_BAR_VALID) { 804 memory_region_del_subregion(&xive->ic_mmio, 805 &xive->ic_reg_mmio); 806 memory_region_del_subregion(&xive->ic_mmio, 807 &xive->ic_notify_mmio); 808 memory_region_del_subregion(&xive->ic_mmio, 809 &xive->ic_lsi_mmio); 810 memory_region_del_subregion(&xive->ic_mmio, 811 &xive->tm_indirect_mmio); 812 813 memory_region_del_subregion(sysmem, &xive->ic_mmio); 814 } 815 } else { 816 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); 817 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) { 818 memory_region_add_subregion(sysmem, xive->ic_base, 819 &xive->ic_mmio); 820 821 memory_region_add_subregion(&xive->ic_mmio, 0, 822 &xive->ic_reg_mmio); 823 memory_region_add_subregion(&xive->ic_mmio, 824 1ul << xive->ic_shift, 825 &xive->ic_notify_mmio); 826 memory_region_add_subregion(&xive->ic_mmio, 827 2ul << xive->ic_shift, 828 &xive->ic_lsi_mmio); 829 memory_region_add_subregion(&xive->ic_mmio, 830 4ull << xive->ic_shift, 831 &xive->tm_indirect_mmio); 832 } 833 } 834 break; 835 836 case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */ 837 case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */ 838 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; 839 if (!(val & CQ_TM_BAR_VALID)) { 840 xive->tm_base = 0; 841 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) { 842 memory_region_del_subregion(sysmem, &xive->tm_mmio); 843 } 844 } else { 845 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); 846 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) { 847 memory_region_add_subregion(sysmem, xive->tm_base, 848 &xive->tm_mmio); 849 } 850 } 851 break; 852 853 case CQ_PC_BARM: 854 xive->regs[reg] = val; 855 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive)); 856 break; 857 case CQ_PC_BAR: /* From 32M to 512G */ 858 if (!(val & CQ_PC_BAR_VALID)) { 859 xive->pc_base = 0; 860 if (xive->regs[reg] & CQ_PC_BAR_VALID) { 861 memory_region_del_subregion(sysmem, &xive->pc_mmio); 862 } 863 } else { 864 xive->pc_base = val & ~(CQ_PC_BAR_VALID); 865 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) { 866 memory_region_add_subregion(sysmem, xive->pc_base, 867 &xive->pc_mmio); 868 } 869 } 870 break; 871 872 case CQ_VC_BARM: 873 xive->regs[reg] = val; 874 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive)); 875 break; 876 case CQ_VC_BAR: /* From 64M to 4TB */ 877 if (!(val & CQ_VC_BAR_VALID)) { 878 xive->vc_base = 0; 879 if (xive->regs[reg] & CQ_VC_BAR_VALID) { 880 memory_region_del_subregion(sysmem, &xive->vc_mmio); 881 } 882 } else { 883 xive->vc_base = val & ~(CQ_VC_BAR_VALID); 884 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) { 885 memory_region_add_subregion(sysmem, xive->vc_base, 886 &xive->vc_mmio); 887 } 888 } 889 break; 890 891 /* 892 * XIVE Table settings. 893 */ 894 case CQ_TAR: /* Table Address */ 895 break; 896 case CQ_TDR: /* Table Data */ 897 pnv_xive_table_set_data(xive, val); 898 break; 899 900 /* 901 * XIVE VC & PC Virtual Structure Table settings 902 */ 903 case VC_VSD_TABLE_ADDR: 904 case PC_VSD_TABLE_ADDR: /* Virtual table selector */ 905 break; 906 case VC_VSD_TABLE_DATA: /* Virtual table setting */ 907 case PC_VSD_TABLE_DATA: 908 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA); 909 break; 910 911 /* 912 * Interrupt fifo overflow in memory backing store (Not modeled) 913 */ 914 case VC_IRQ_CONFIG_IPI: 915 case VC_IRQ_CONFIG_HW: 916 case VC_IRQ_CONFIG_CASCADE1: 917 case VC_IRQ_CONFIG_CASCADE2: 918 case VC_IRQ_CONFIG_REDIST: 919 case VC_IRQ_CONFIG_IPI_CASC: 920 break; 921 922 /* 923 * XIVE hardware thread enablement 924 */ 925 case PC_THREAD_EN_REG0: /* Physical Thread Enable */ 926 case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */ 927 break; 928 929 case PC_THREAD_EN_REG0_SET: 930 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val; 931 break; 932 case PC_THREAD_EN_REG1_SET: 933 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val; 934 break; 935 case PC_THREAD_EN_REG0_CLR: 936 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val; 937 break; 938 case PC_THREAD_EN_REG1_CLR: 939 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val; 940 break; 941 942 /* 943 * Indirect TIMA access set up. Defines the PIR of the HW thread 944 * to use. 945 */ 946 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3: 947 break; 948 949 /* 950 * XIVE PC & VC cache updates for EAS, NVT and END 951 */ 952 case VC_IVC_SCRUB_MASK: 953 break; 954 case VC_IVC_SCRUB_TRIG: 955 pnv_xive_eas_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val), 956 GETFIELD(VC_SCRUB_OFFSET, val)); 957 break; 958 959 case VC_EQC_SCRUB_MASK: 960 case VC_EQC_CWATCH_SPEC: 961 case VC_EQC_CWATCH_DAT0 ... VC_EQC_CWATCH_DAT3: 962 break; 963 case VC_EQC_SCRUB_TRIG: 964 pnv_xive_end_update(xive, GETFIELD(VC_SCRUB_BLOCK_ID, val), 965 GETFIELD(VC_SCRUB_OFFSET, val)); 966 break; 967 968 case PC_VPC_SCRUB_MASK: 969 case PC_VPC_CWATCH_SPEC: 970 case PC_VPC_CWATCH_DAT0 ... PC_VPC_CWATCH_DAT7: 971 break; 972 case PC_VPC_SCRUB_TRIG: 973 pnv_xive_nvt_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val), 974 GETFIELD(PC_SCRUB_OFFSET, val)); 975 break; 976 977 978 /* 979 * XIVE PC & VC cache invalidation 980 */ 981 case PC_AT_KILL: 982 break; 983 case VC_AT_MACRO_KILL: 984 break; 985 case PC_AT_KILL_MASK: 986 case VC_AT_MACRO_KILL_MASK: 987 break; 988 989 default: 990 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset); 991 return; 992 } 993 994 xive->regs[reg] = val; 995 } 996 997 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) 998 { 999 PnvXive *xive = PNV_XIVE(opaque); 1000 uint64_t val = 0; 1001 uint32_t reg = offset >> 3; 1002 1003 switch (offset) { 1004 case CQ_CFG_PB_GEN: 1005 case CQ_IC_BAR: 1006 case CQ_TM1_BAR: 1007 case CQ_TM2_BAR: 1008 case CQ_PC_BAR: 1009 case CQ_PC_BARM: 1010 case CQ_VC_BAR: 1011 case CQ_VC_BARM: 1012 case CQ_TAR: 1013 case CQ_TDR: 1014 case CQ_PBI_CTL: 1015 1016 case PC_TCTXT_CFG: 1017 case PC_TCTXT_TRACK: 1018 case PC_TCTXT_INDIR0: 1019 case PC_TCTXT_INDIR1: 1020 case PC_TCTXT_INDIR2: 1021 case PC_TCTXT_INDIR3: 1022 case PC_GLOBAL_CONFIG: 1023 1024 case PC_VPC_SCRUB_MASK: 1025 case PC_VPC_CWATCH_SPEC: 1026 case PC_VPC_CWATCH_DAT0: 1027 case PC_VPC_CWATCH_DAT1: 1028 case PC_VPC_CWATCH_DAT2: 1029 case PC_VPC_CWATCH_DAT3: 1030 case PC_VPC_CWATCH_DAT4: 1031 case PC_VPC_CWATCH_DAT5: 1032 case PC_VPC_CWATCH_DAT6: 1033 case PC_VPC_CWATCH_DAT7: 1034 1035 case VC_GLOBAL_CONFIG: 1036 case VC_AIB_TX_ORDER_TAG2: 1037 1038 case VC_IRQ_CONFIG_IPI: 1039 case VC_IRQ_CONFIG_HW: 1040 case VC_IRQ_CONFIG_CASCADE1: 1041 case VC_IRQ_CONFIG_CASCADE2: 1042 case VC_IRQ_CONFIG_REDIST: 1043 case VC_IRQ_CONFIG_IPI_CASC: 1044 1045 case VC_EQC_SCRUB_MASK: 1046 case VC_EQC_CWATCH_DAT0: 1047 case VC_EQC_CWATCH_DAT1: 1048 case VC_EQC_CWATCH_DAT2: 1049 case VC_EQC_CWATCH_DAT3: 1050 1051 case VC_EQC_CWATCH_SPEC: 1052 case VC_IVC_SCRUB_MASK: 1053 case VC_SBC_CONFIG: 1054 case VC_AT_MACRO_KILL_MASK: 1055 case VC_VSD_TABLE_ADDR: 1056 case PC_VSD_TABLE_ADDR: 1057 case VC_VSD_TABLE_DATA: 1058 case PC_VSD_TABLE_DATA: 1059 case PC_THREAD_EN_REG0: 1060 case PC_THREAD_EN_REG1: 1061 val = xive->regs[reg]; 1062 break; 1063 1064 /* 1065 * XIVE hardware thread enablement 1066 */ 1067 case PC_THREAD_EN_REG0_SET: 1068 case PC_THREAD_EN_REG0_CLR: 1069 val = xive->regs[PC_THREAD_EN_REG0 >> 3]; 1070 break; 1071 case PC_THREAD_EN_REG1_SET: 1072 case PC_THREAD_EN_REG1_CLR: 1073 val = xive->regs[PC_THREAD_EN_REG1 >> 3]; 1074 break; 1075 1076 case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */ 1077 val = 0xffffff0000000000; 1078 break; 1079 1080 /* 1081 * XIVE PC & VC cache updates for EAS, NVT and END 1082 */ 1083 case PC_VPC_SCRUB_TRIG: 1084 case VC_IVC_SCRUB_TRIG: 1085 case VC_EQC_SCRUB_TRIG: 1086 xive->regs[reg] &= ~VC_SCRUB_VALID; 1087 val = xive->regs[reg]; 1088 break; 1089 1090 /* 1091 * XIVE PC & VC cache invalidation 1092 */ 1093 case PC_AT_KILL: 1094 xive->regs[reg] &= ~PC_AT_KILL_VALID; 1095 val = xive->regs[reg]; 1096 break; 1097 case VC_AT_MACRO_KILL: 1098 xive->regs[reg] &= ~VC_KILL_VALID; 1099 val = xive->regs[reg]; 1100 break; 1101 1102 /* 1103 * XIVE synchronisation 1104 */ 1105 case VC_EQC_CONFIG: 1106 val = VC_EQC_SYNC_MASK; 1107 break; 1108 1109 default: 1110 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset); 1111 } 1112 1113 return val; 1114 } 1115 1116 static const MemoryRegionOps pnv_xive_ic_reg_ops = { 1117 .read = pnv_xive_ic_reg_read, 1118 .write = pnv_xive_ic_reg_write, 1119 .endianness = DEVICE_BIG_ENDIAN, 1120 .valid = { 1121 .min_access_size = 8, 1122 .max_access_size = 8, 1123 }, 1124 .impl = { 1125 .min_access_size = 8, 1126 .max_access_size = 8, 1127 }, 1128 }; 1129 1130 /* 1131 * IC - Notify MMIO port page (write only) 1132 */ 1133 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */ 1134 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */ 1135 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */ 1136 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */ 1137 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */ 1138 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */ 1139 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */ 1140 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */ 1141 1142 /* VC synchronisation */ 1143 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */ 1144 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */ 1145 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */ 1146 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */ 1147 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */ 1148 1149 /* PC synchronisation */ 1150 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */ 1151 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */ 1152 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */ 1153 1154 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) 1155 { 1156 /* 1157 * Forward the source event notification directly to the Router. 1158 * The source interrupt number should already be correctly encoded 1159 * with the chip block id by the sending device (PHB, PSI). 1160 */ 1161 xive_router_notify(XIVE_NOTIFIER(xive), val); 1162 } 1163 1164 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, 1165 unsigned size) 1166 { 1167 PnvXive *xive = PNV_XIVE(opaque); 1168 1169 /* VC: HW triggers */ 1170 switch (addr) { 1171 case 0x000 ... 0x7FF: 1172 pnv_xive_ic_hw_trigger(opaque, addr, val); 1173 break; 1174 1175 /* VC: Forwarded IRQs */ 1176 case PNV_XIVE_FORWARD_IPI: 1177 case PNV_XIVE_FORWARD_HW: 1178 case PNV_XIVE_FORWARD_OS_ESC: 1179 case PNV_XIVE_FORWARD_HW_ESC: 1180 case PNV_XIVE_FORWARD_REDIS: 1181 /* TODO: forwarded IRQs. Should be like HW triggers */ 1182 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64, 1183 addr, val); 1184 break; 1185 1186 /* VC syncs */ 1187 case PNV_XIVE_SYNC_IPI: 1188 case PNV_XIVE_SYNC_HW: 1189 case PNV_XIVE_SYNC_OS_ESC: 1190 case PNV_XIVE_SYNC_HW_ESC: 1191 case PNV_XIVE_SYNC_REDIS: 1192 break; 1193 1194 /* PC syncs */ 1195 case PNV_XIVE_SYNC_PULL: 1196 case PNV_XIVE_SYNC_PUSH: 1197 case PNV_XIVE_SYNC_VPC: 1198 break; 1199 1200 default: 1201 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr); 1202 } 1203 } 1204 1205 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr, 1206 unsigned size) 1207 { 1208 PnvXive *xive = PNV_XIVE(opaque); 1209 1210 /* loads are invalid */ 1211 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr); 1212 return -1; 1213 } 1214 1215 static const MemoryRegionOps pnv_xive_ic_notify_ops = { 1216 .read = pnv_xive_ic_notify_read, 1217 .write = pnv_xive_ic_notify_write, 1218 .endianness = DEVICE_BIG_ENDIAN, 1219 .valid = { 1220 .min_access_size = 8, 1221 .max_access_size = 8, 1222 }, 1223 .impl = { 1224 .min_access_size = 8, 1225 .max_access_size = 8, 1226 }, 1227 }; 1228 1229 /* 1230 * IC - LSI MMIO handlers (not modeled) 1231 */ 1232 1233 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr, 1234 uint64_t val, unsigned size) 1235 { 1236 PnvXive *xive = PNV_XIVE(opaque); 1237 1238 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr); 1239 } 1240 1241 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size) 1242 { 1243 PnvXive *xive = PNV_XIVE(opaque); 1244 1245 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr); 1246 return -1; 1247 } 1248 1249 static const MemoryRegionOps pnv_xive_ic_lsi_ops = { 1250 .read = pnv_xive_ic_lsi_read, 1251 .write = pnv_xive_ic_lsi_write, 1252 .endianness = DEVICE_BIG_ENDIAN, 1253 .valid = { 1254 .min_access_size = 8, 1255 .max_access_size = 8, 1256 }, 1257 .impl = { 1258 .min_access_size = 8, 1259 .max_access_size = 8, 1260 }, 1261 }; 1262 1263 /* 1264 * IC - Indirect TIMA MMIO handlers 1265 */ 1266 1267 /* 1268 * When the TIMA is accessed from the indirect page, the thread id 1269 * (PIR) has to be configured in the IC registers before. This is used 1270 * for resets and for debug purpose also. 1271 */ 1272 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive) 1273 { 1274 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3]; 1275 PowerPCCPU *cpu = NULL; 1276 int pir; 1277 1278 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) { 1279 xive_error(xive, "IC: no indirect TIMA access in progress"); 1280 return NULL; 1281 } 1282 1283 pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff; 1284 cpu = ppc_get_vcpu_by_pir(pir); 1285 if (!cpu) { 1286 xive_error(xive, "IC: invalid PIR %x for indirect access", pir); 1287 return NULL; 1288 } 1289 1290 /* Check that HW thread is XIVE enabled */ 1291 if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { 1292 xive_error(xive, "IC: CPU %x is not enabled", pir); 1293 } 1294 1295 return XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1296 } 1297 1298 static void xive_tm_indirect_write(void *opaque, hwaddr offset, 1299 uint64_t value, unsigned size) 1300 { 1301 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1302 1303 xive_tctx_tm_write(tctx, offset, value, size); 1304 } 1305 1306 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset, 1307 unsigned size) 1308 { 1309 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1310 1311 return xive_tctx_tm_read(tctx, offset, size); 1312 } 1313 1314 static const MemoryRegionOps xive_tm_indirect_ops = { 1315 .read = xive_tm_indirect_read, 1316 .write = xive_tm_indirect_write, 1317 .endianness = DEVICE_BIG_ENDIAN, 1318 .valid = { 1319 .min_access_size = 1, 1320 .max_access_size = 8, 1321 }, 1322 .impl = { 1323 .min_access_size = 1, 1324 .max_access_size = 8, 1325 }, 1326 }; 1327 1328 /* 1329 * Interrupt controller XSCOM region. 1330 */ 1331 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size) 1332 { 1333 switch (addr >> 3) { 1334 case X_VC_EQC_CONFIG: 1335 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */ 1336 return VC_EQC_SYNC_MASK; 1337 default: 1338 return pnv_xive_ic_reg_read(opaque, addr, size); 1339 } 1340 } 1341 1342 static void pnv_xive_xscom_write(void *opaque, hwaddr addr, 1343 uint64_t val, unsigned size) 1344 { 1345 pnv_xive_ic_reg_write(opaque, addr, val, size); 1346 } 1347 1348 static const MemoryRegionOps pnv_xive_xscom_ops = { 1349 .read = pnv_xive_xscom_read, 1350 .write = pnv_xive_xscom_write, 1351 .endianness = DEVICE_BIG_ENDIAN, 1352 .valid = { 1353 .min_access_size = 8, 1354 .max_access_size = 8, 1355 }, 1356 .impl = { 1357 .min_access_size = 8, 1358 .max_access_size = 8, 1359 } 1360 }; 1361 1362 /* 1363 * Virtualization Controller MMIO region containing the IPI and END ESB pages 1364 */ 1365 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset, 1366 unsigned size) 1367 { 1368 PnvXive *xive = PNV_XIVE(opaque); 1369 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1370 uint64_t edt_type = 0; 1371 uint64_t edt_offset; 1372 MemTxResult result; 1373 AddressSpace *edt_as = NULL; 1374 uint64_t ret = -1; 1375 1376 if (edt_index < XIVE_TABLE_EDT_MAX) { 1377 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1378 } 1379 1380 switch (edt_type) { 1381 case CQ_TDR_EDT_IPI: 1382 edt_as = &xive->ipi_as; 1383 break; 1384 case CQ_TDR_EDT_EQ: 1385 edt_as = &xive->end_as; 1386 break; 1387 default: 1388 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset); 1389 return -1; 1390 } 1391 1392 /* Remap the offset for the targeted address space */ 1393 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1394 1395 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED, 1396 &result); 1397 1398 if (result != MEMTX_OK) { 1399 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%" 1400 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END", 1401 offset, edt_offset); 1402 return -1; 1403 } 1404 1405 return ret; 1406 } 1407 1408 static void pnv_xive_vc_write(void *opaque, hwaddr offset, 1409 uint64_t val, unsigned size) 1410 { 1411 PnvXive *xive = PNV_XIVE(opaque); 1412 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1413 uint64_t edt_type = 0; 1414 uint64_t edt_offset; 1415 MemTxResult result; 1416 AddressSpace *edt_as = NULL; 1417 1418 if (edt_index < XIVE_TABLE_EDT_MAX) { 1419 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1420 } 1421 1422 switch (edt_type) { 1423 case CQ_TDR_EDT_IPI: 1424 edt_as = &xive->ipi_as; 1425 break; 1426 case CQ_TDR_EDT_EQ: 1427 edt_as = &xive->end_as; 1428 break; 1429 default: 1430 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx, 1431 offset); 1432 return; 1433 } 1434 1435 /* Remap the offset for the targeted address space */ 1436 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1437 1438 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result); 1439 if (result != MEMTX_OK) { 1440 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset); 1441 } 1442 } 1443 1444 static const MemoryRegionOps pnv_xive_vc_ops = { 1445 .read = pnv_xive_vc_read, 1446 .write = pnv_xive_vc_write, 1447 .endianness = DEVICE_BIG_ENDIAN, 1448 .valid = { 1449 .min_access_size = 8, 1450 .max_access_size = 8, 1451 }, 1452 .impl = { 1453 .min_access_size = 8, 1454 .max_access_size = 8, 1455 }, 1456 }; 1457 1458 /* 1459 * Presenter Controller MMIO region. The Virtualization Controller 1460 * updates the IPB in the NVT table when required. Not modeled. 1461 */ 1462 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr, 1463 unsigned size) 1464 { 1465 PnvXive *xive = PNV_XIVE(opaque); 1466 1467 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr); 1468 return -1; 1469 } 1470 1471 static void pnv_xive_pc_write(void *opaque, hwaddr addr, 1472 uint64_t value, unsigned size) 1473 { 1474 PnvXive *xive = PNV_XIVE(opaque); 1475 1476 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr); 1477 } 1478 1479 static const MemoryRegionOps pnv_xive_pc_ops = { 1480 .read = pnv_xive_pc_read, 1481 .write = pnv_xive_pc_write, 1482 .endianness = DEVICE_BIG_ENDIAN, 1483 .valid = { 1484 .min_access_size = 8, 1485 .max_access_size = 8, 1486 }, 1487 .impl = { 1488 .min_access_size = 8, 1489 .max_access_size = 8, 1490 }, 1491 }; 1492 1493 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon) 1494 { 1495 XiveRouter *xrtr = XIVE_ROUTER(xive); 1496 uint8_t blk = xive->chip->chip_id; 1497 uint32_t srcno0 = XIVE_SRCNO(blk, 0); 1498 uint32_t nr_ipis = pnv_xive_nr_ipis(xive); 1499 uint32_t nr_ends = pnv_xive_nr_ends(xive); 1500 XiveEAS eas; 1501 XiveEND end; 1502 int i; 1503 1504 monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0, 1505 srcno0 + nr_ipis - 1); 1506 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); 1507 1508 monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0, 1509 srcno0 + nr_ipis - 1); 1510 for (i = 0; i < nr_ipis; i++) { 1511 if (xive_router_get_eas(xrtr, blk, i, &eas)) { 1512 break; 1513 } 1514 if (!xive_eas_is_masked(&eas)) { 1515 xive_eas_pic_print_info(&eas, i, mon); 1516 } 1517 } 1518 1519 monitor_printf(mon, "XIVE[%x] ENDT %08x .. %08x\n", blk, 0, nr_ends - 1); 1520 for (i = 0; i < nr_ends; i++) { 1521 if (xive_router_get_end(xrtr, blk, i, &end)) { 1522 break; 1523 } 1524 xive_end_pic_print_info(&end, i, mon); 1525 } 1526 } 1527 1528 static void pnv_xive_reset(void *dev) 1529 { 1530 PnvXive *xive = PNV_XIVE(dev); 1531 XiveSource *xsrc = &xive->ipi_source; 1532 XiveENDSource *end_xsrc = &xive->end_source; 1533 1534 /* 1535 * Use the PnvChip id to identify the XIVE interrupt controller. 1536 * It can be overriden by configuration at runtime. 1537 */ 1538 xive->tctx_chipid = xive->chip->chip_id; 1539 1540 /* Default page size (Should be changed at runtime to 64k) */ 1541 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1542 1543 /* Clear subregions */ 1544 if (memory_region_is_mapped(&xsrc->esb_mmio)) { 1545 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio); 1546 } 1547 1548 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) { 1549 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio); 1550 } 1551 1552 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { 1553 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio); 1554 } 1555 1556 if (memory_region_is_mapped(&xive->end_edt_mmio)) { 1557 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio); 1558 } 1559 } 1560 1561 static void pnv_xive_init(Object *obj) 1562 { 1563 PnvXive *xive = PNV_XIVE(obj); 1564 1565 object_initialize_child(obj, "ipi_source", &xive->ipi_source, 1566 sizeof(xive->ipi_source), TYPE_XIVE_SOURCE, 1567 &error_abort, NULL); 1568 object_initialize_child(obj, "end_source", &xive->end_source, 1569 sizeof(xive->end_source), TYPE_XIVE_END_SOURCE, 1570 &error_abort, NULL); 1571 } 1572 1573 /* 1574 * Maximum number of IRQs and ENDs supported by HW 1575 */ 1576 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1577 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1578 1579 static void pnv_xive_realize(DeviceState *dev, Error **errp) 1580 { 1581 PnvXive *xive = PNV_XIVE(dev); 1582 XiveSource *xsrc = &xive->ipi_source; 1583 XiveENDSource *end_xsrc = &xive->end_source; 1584 Error *local_err = NULL; 1585 Object *obj; 1586 1587 obj = object_property_get_link(OBJECT(dev), "chip", &local_err); 1588 if (!obj) { 1589 error_propagate(errp, local_err); 1590 error_prepend(errp, "required link 'chip' not found: "); 1591 return; 1592 } 1593 1594 /* The PnvChip id identifies the XIVE interrupt controller. */ 1595 xive->chip = PNV_CHIP(obj); 1596 1597 /* 1598 * The XiveSource and XiveENDSource objects are realized with the 1599 * maximum allowed HW configuration. The ESB MMIO regions will be 1600 * resized dynamically when the controller is configured by the FW 1601 * to limit accesses to resources not provisioned. 1602 */ 1603 object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs", 1604 &error_fatal); 1605 object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive), 1606 &error_fatal); 1607 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err); 1608 if (local_err) { 1609 error_propagate(errp, local_err); 1610 return; 1611 } 1612 1613 object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends", 1614 &error_fatal); 1615 object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive), 1616 &error_fatal); 1617 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err); 1618 if (local_err) { 1619 error_propagate(errp, local_err); 1620 return; 1621 } 1622 1623 /* Default page size. Generally changed at runtime to 64k */ 1624 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1625 1626 /* XSCOM region, used for initial configuration of the BARs */ 1627 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops, 1628 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3); 1629 1630 /* Interrupt controller MMIO regions */ 1631 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", 1632 PNV9_XIVE_IC_SIZE); 1633 1634 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops, 1635 xive, "xive-ic-reg", 1 << xive->ic_shift); 1636 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev), 1637 &pnv_xive_ic_notify_ops, 1638 xive, "xive-ic-notify", 1 << xive->ic_shift); 1639 1640 /* The Pervasive LSI trigger and EOI pages (not modeled) */ 1641 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops, 1642 xive, "xive-ic-lsi", 2 << xive->ic_shift); 1643 1644 /* Thread Interrupt Management Area (Indirect) */ 1645 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev), 1646 &xive_tm_indirect_ops, 1647 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE); 1648 /* 1649 * Overall Virtualization Controller MMIO region containing the 1650 * IPI ESB pages and END ESB pages. The layout is defined by the 1651 * EDT "Domain table" and the accesses are dispatched using 1652 * address spaces for each. 1653 */ 1654 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive, 1655 "xive-vc", PNV9_XIVE_VC_SIZE); 1656 1657 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi", 1658 PNV9_XIVE_VC_SIZE); 1659 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi"); 1660 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end", 1661 PNV9_XIVE_VC_SIZE); 1662 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end"); 1663 1664 /* 1665 * The MMIO windows exposing the IPI ESBs and the END ESBs in the 1666 * VC region. Their size is configured by the FW in the EDT table. 1667 */ 1668 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0); 1669 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0); 1670 1671 /* Presenter Controller MMIO region (not modeled) */ 1672 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive, 1673 "xive-pc", PNV9_XIVE_PC_SIZE); 1674 1675 /* Thread Interrupt Management Area (Direct) */ 1676 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, 1677 xive, "xive-tima", PNV9_XIVE_TM_SIZE); 1678 1679 qemu_register_reset(pnv_xive_reset, dev); 1680 } 1681 1682 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, 1683 int xscom_offset) 1684 { 1685 const char compat[] = "ibm,power9-xive-x"; 1686 char *name; 1687 int offset; 1688 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE; 1689 uint32_t reg[] = { 1690 cpu_to_be32(lpc_pcba), 1691 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE) 1692 }; 1693 1694 name = g_strdup_printf("xive@%x", lpc_pcba); 1695 offset = fdt_add_subnode(fdt, xscom_offset, name); 1696 _FDT(offset); 1697 g_free(name); 1698 1699 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); 1700 _FDT((fdt_setprop(fdt, offset, "compatible", compat, 1701 sizeof(compat)))); 1702 return 0; 1703 } 1704 1705 static Property pnv_xive_properties[] = { 1706 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0), 1707 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0), 1708 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0), 1709 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0), 1710 DEFINE_PROP_END_OF_LIST(), 1711 }; 1712 1713 static void pnv_xive_class_init(ObjectClass *klass, void *data) 1714 { 1715 DeviceClass *dc = DEVICE_CLASS(klass); 1716 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); 1717 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 1718 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1719 1720 xdc->dt_xscom = pnv_xive_dt_xscom; 1721 1722 dc->desc = "PowerNV XIVE Interrupt Controller"; 1723 dc->realize = pnv_xive_realize; 1724 dc->props = pnv_xive_properties; 1725 1726 xrc->get_eas = pnv_xive_get_eas; 1727 xrc->get_end = pnv_xive_get_end; 1728 xrc->write_end = pnv_xive_write_end; 1729 xrc->get_nvt = pnv_xive_get_nvt; 1730 xrc->write_nvt = pnv_xive_write_nvt; 1731 xrc->get_tctx = pnv_xive_get_tctx; 1732 1733 xnc->notify = pnv_xive_notify; 1734 }; 1735 1736 static const TypeInfo pnv_xive_info = { 1737 .name = TYPE_PNV_XIVE, 1738 .parent = TYPE_XIVE_ROUTER, 1739 .instance_init = pnv_xive_init, 1740 .instance_size = sizeof(PnvXive), 1741 .class_init = pnv_xive_class_init, 1742 .interfaces = (InterfaceInfo[]) { 1743 { TYPE_PNV_XSCOM_INTERFACE }, 1744 { } 1745 } 1746 }; 1747 1748 static void pnv_xive_register_types(void) 1749 { 1750 type_register_static(&pnv_xive_info); 1751 } 1752 1753 type_init(pnv_xive_register_types) 1754