1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2019, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/dma.h" 17 #include "monitor/monitor.h" 18 #include "hw/ppc/fdt.h" 19 #include "hw/ppc/pnv.h" 20 #include "hw/ppc/pnv_core.h" 21 #include "hw/ppc/pnv_xscom.h" 22 #include "hw/ppc/pnv_xive.h" 23 #include "hw/ppc/xive_regs.h" 24 #include "hw/ppc/ppc.h" 25 26 #include <libfdt.h> 27 28 #include "pnv_xive_regs.h" 29 30 #define XIVE_DEBUG 31 32 /* 33 * Virtual structures table (VST) 34 */ 35 #define SBE_PER_BYTE 4 36 37 typedef struct XiveVstInfo { 38 const char *name; 39 uint32_t size; 40 uint32_t max_blocks; 41 } XiveVstInfo; 42 43 static const XiveVstInfo vst_infos[] = { 44 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 }, 45 [VST_TSEL_SBE] = { "SBE", 1, 16 }, 46 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 }, 47 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 }, 48 49 /* 50 * Interrupt fifo backing store table (not modeled) : 51 * 52 * 0 - IPI, 53 * 1 - HWD, 54 * 2 - First escalate, 55 * 3 - Second escalate, 56 * 4 - Redistribution, 57 * 5 - IPI cascaded queue ? 58 */ 59 [VST_TSEL_IRQ] = { "IRQ", 1, 6 }, 60 }; 61 62 #define xive_error(xive, fmt, ...) \ 63 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ 64 (xive)->chip->chip_id, ## __VA_ARGS__); 65 66 /* 67 * QEMU version of the GETFIELD/SETFIELD macros 68 * 69 * TODO: It might be better to use the existing extract64() and 70 * deposit64() but this means that all the register definitions will 71 * change and become incompatible with the ones found in skiboot. 72 * 73 * Keep it as it is for now until we find a common ground. 74 */ 75 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) 76 { 77 return (word & mask) >> ctz64(mask); 78 } 79 80 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, 81 uint64_t value) 82 { 83 return (word & ~mask) | ((value << ctz64(mask)) & mask); 84 } 85 86 /* 87 * Remote access to controllers. HW uses MMIOs. For now, a simple scan 88 * of the chips is good enough. 89 * 90 * TODO: Block scope support 91 */ 92 static PnvXive *pnv_xive_get_ic(uint8_t blk) 93 { 94 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); 95 int i; 96 97 for (i = 0; i < pnv->num_chips; i++) { 98 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); 99 PnvXive *xive = &chip9->xive; 100 101 if (xive->chip->chip_id == blk) { 102 return xive; 103 } 104 } 105 return NULL; 106 } 107 108 /* 109 * VST accessors for SBE, EAT, ENDT, NVT 110 * 111 * Indirect VST tables are arrays of VSDs pointing to a page (of same 112 * size). Each page is a direct VST table. 113 */ 114 115 #define XIVE_VSD_SIZE 8 116 117 /* Indirect page size can be 4K, 64K, 2M, 16M. */ 118 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift) 119 { 120 return page_shift == 12 || page_shift == 16 || 121 page_shift == 21 || page_shift == 24; 122 } 123 124 static uint64_t pnv_xive_vst_size(uint64_t vsd) 125 { 126 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 127 128 /* 129 * Read the first descriptor to get the page size of the indirect 130 * table. 131 */ 132 if (VSD_INDIRECT & vsd) { 133 uint32_t nr_pages = vst_tsize / XIVE_VSD_SIZE; 134 uint32_t page_shift; 135 136 vsd = ldq_be_dma(&address_space_memory, vsd & VSD_ADDRESS_MASK); 137 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 138 139 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 140 return 0; 141 } 142 143 return nr_pages * (1ull << page_shift); 144 } 145 146 return vst_tsize; 147 } 148 149 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type, 150 uint64_t vsd, uint32_t idx) 151 { 152 const XiveVstInfo *info = &vst_infos[type]; 153 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 154 155 return vst_addr + idx * info->size; 156 } 157 158 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, 159 uint64_t vsd, uint32_t idx) 160 { 161 const XiveVstInfo *info = &vst_infos[type]; 162 uint64_t vsd_addr; 163 uint32_t vsd_idx; 164 uint32_t page_shift; 165 uint32_t vst_per_page; 166 167 /* Get the page size of the indirect table. */ 168 vsd_addr = vsd & VSD_ADDRESS_MASK; 169 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 170 171 if (!(vsd & VSD_ADDRESS_MASK)) { 172 xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0); 173 return 0; 174 } 175 176 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 177 178 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 179 xive_error(xive, "VST: invalid %s page shift %d", info->name, 180 page_shift); 181 return 0; 182 } 183 184 vst_per_page = (1ull << page_shift) / info->size; 185 vsd_idx = idx / vst_per_page; 186 187 /* Load the VSD we are looking for, if not already done */ 188 if (vsd_idx) { 189 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; 190 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 191 192 if (!(vsd & VSD_ADDRESS_MASK)) { 193 xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0); 194 return 0; 195 } 196 197 /* 198 * Check that the pages have a consistent size across the 199 * indirect table 200 */ 201 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { 202 xive_error(xive, "VST: %s entry %x indirect page size differ !?", 203 info->name, idx); 204 return 0; 205 } 206 } 207 208 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); 209 } 210 211 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk, 212 uint32_t idx) 213 { 214 const XiveVstInfo *info = &vst_infos[type]; 215 uint64_t vsd; 216 uint32_t idx_max; 217 218 if (blk >= info->max_blocks) { 219 xive_error(xive, "VST: invalid block id %d for VST %s %d !?", 220 blk, info->name, idx); 221 return 0; 222 } 223 224 vsd = xive->vsds[type][blk]; 225 226 /* Remote VST access */ 227 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { 228 xive = pnv_xive_get_ic(blk); 229 230 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0; 231 } 232 233 idx_max = pnv_xive_vst_size(vsd) / info->size - 1; 234 if (idx > idx_max) { 235 #ifdef XIVE_DEBUG 236 xive_error(xive, "VST: %s entry %x/%x out of range [ 0 .. %x ] !?", 237 info->name, blk, idx, idx_max); 238 #endif 239 return 0; 240 } 241 242 if (VSD_INDIRECT & vsd) { 243 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx); 244 } 245 246 return pnv_xive_vst_addr_direct(xive, type, vsd, idx); 247 } 248 249 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk, 250 uint32_t idx, void *data) 251 { 252 const XiveVstInfo *info = &vst_infos[type]; 253 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 254 255 if (!addr) { 256 return -1; 257 } 258 259 cpu_physical_memory_read(addr, data, info->size); 260 return 0; 261 } 262 263 #define XIVE_VST_WORD_ALL -1 264 265 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk, 266 uint32_t idx, void *data, uint32_t word_number) 267 { 268 const XiveVstInfo *info = &vst_infos[type]; 269 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 270 271 if (!addr) { 272 return -1; 273 } 274 275 if (word_number == XIVE_VST_WORD_ALL) { 276 cpu_physical_memory_write(addr, data, info->size); 277 } else { 278 cpu_physical_memory_write(addr + word_number * 4, 279 data + word_number * 4, 4); 280 } 281 return 0; 282 } 283 284 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 285 XiveEND *end) 286 { 287 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end); 288 } 289 290 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 291 XiveEND *end, uint8_t word_number) 292 { 293 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end, 294 word_number); 295 } 296 297 static int pnv_xive_end_update(PnvXive *xive, uint8_t blk, uint32_t idx) 298 { 299 int i; 300 uint64_t eqc_watch[4]; 301 302 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 303 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]); 304 } 305 306 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch, 307 XIVE_VST_WORD_ALL); 308 } 309 310 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 311 XiveNVT *nvt) 312 { 313 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt); 314 } 315 316 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 317 XiveNVT *nvt, uint8_t word_number) 318 { 319 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt, 320 word_number); 321 } 322 323 static int pnv_xive_nvt_update(PnvXive *xive, uint8_t blk, uint32_t idx) 324 { 325 int i; 326 uint64_t vpc_watch[8]; 327 328 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 329 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]); 330 } 331 332 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch, 333 XIVE_VST_WORD_ALL); 334 } 335 336 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 337 XiveEAS *eas) 338 { 339 PnvXive *xive = PNV_XIVE(xrtr); 340 341 if (pnv_xive_get_ic(blk) != xive) { 342 xive_error(xive, "VST: EAS %x is remote !?", XIVE_SRCNO(blk, idx)); 343 return -1; 344 } 345 346 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); 347 } 348 349 static int pnv_xive_eas_update(PnvXive *xive, uint8_t blk, uint32_t idx) 350 { 351 /* All done. */ 352 return 0; 353 } 354 355 static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs) 356 { 357 PowerPCCPU *cpu = POWERPC_CPU(cs); 358 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 359 PnvXive *xive = NULL; 360 CPUPPCState *env = &cpu->env; 361 int pir = env->spr_cb[SPR_PIR].default_value; 362 363 /* 364 * Perform an extra check on the HW thread enablement. 365 * 366 * The TIMA is shared among the chips and to identify the chip 367 * from which the access is being done, we extract the chip id 368 * from the PIR. 369 */ 370 xive = pnv_xive_get_ic((pir >> 8) & 0xf); 371 if (!xive) { 372 return NULL; 373 } 374 375 if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { 376 xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir); 377 } 378 379 return tctx; 380 } 381 382 /* 383 * The internal sources (IPIs) of the interrupt controller have no 384 * knowledge of the XIVE chip on which they reside. Encode the block 385 * id in the source interrupt number before forwarding the source 386 * event notification to the Router. This is required on a multichip 387 * system. 388 */ 389 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno) 390 { 391 PnvXive *xive = PNV_XIVE(xn); 392 uint8_t blk = xive->chip->chip_id; 393 394 xive_router_notify(xn, XIVE_SRCNO(blk, srcno)); 395 } 396 397 /* 398 * XIVE helpers 399 */ 400 401 static uint64_t pnv_xive_vc_size(PnvXive *xive) 402 { 403 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK; 404 } 405 406 static uint64_t pnv_xive_edt_shift(PnvXive *xive) 407 { 408 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX); 409 } 410 411 static uint64_t pnv_xive_pc_size(PnvXive *xive) 412 { 413 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK; 414 } 415 416 static uint32_t pnv_xive_nr_ipis(PnvXive *xive) 417 { 418 uint8_t blk = xive->chip->chip_id; 419 420 return pnv_xive_vst_size(xive->vsds[VST_TSEL_SBE][blk]) * SBE_PER_BYTE; 421 } 422 423 static uint32_t pnv_xive_nr_ends(PnvXive *xive) 424 { 425 uint8_t blk = xive->chip->chip_id; 426 427 return pnv_xive_vst_size(xive->vsds[VST_TSEL_EQDT][blk]) 428 / vst_infos[VST_TSEL_EQDT].size; 429 } 430 431 /* 432 * EDT Table 433 * 434 * The Virtualization Controller MMIO region containing the IPI ESB 435 * pages and END ESB pages is sub-divided into "sets" which map 436 * portions of the VC region to the different ESB pages. It is 437 * configured at runtime through the EDT "Domain Table" to let the 438 * firmware decide how to split the VC address space between IPI ESB 439 * pages and END ESB pages. 440 */ 441 442 /* 443 * Computes the overall size of the IPI or the END ESB pages 444 */ 445 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type) 446 { 447 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 448 uint64_t size = 0; 449 int i; 450 451 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) { 452 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 453 454 if (edt_type == type) { 455 size += edt_size; 456 } 457 } 458 459 return size; 460 } 461 462 /* 463 * Maps an offset of the VC region in the IPI or END region using the 464 * layout defined by the EDT "Domaine Table" 465 */ 466 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset, 467 uint64_t type) 468 { 469 int i; 470 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 471 uint64_t edt_offset = vc_offset; 472 473 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) { 474 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 475 476 if (edt_type != type) { 477 edt_offset -= edt_size; 478 } 479 } 480 481 return edt_offset; 482 } 483 484 static void pnv_xive_edt_resize(PnvXive *xive) 485 { 486 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI); 487 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ); 488 489 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size); 490 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio); 491 492 memory_region_set_size(&xive->end_edt_mmio, end_edt_size); 493 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio); 494 } 495 496 /* 497 * XIVE Table configuration. Only EDT is supported. 498 */ 499 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val) 500 { 501 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL; 502 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]); 503 uint64_t *xive_table; 504 uint8_t max_index; 505 506 switch (tsel) { 507 case CQ_TAR_TSEL_BLK: 508 max_index = ARRAY_SIZE(xive->blk); 509 xive_table = xive->blk; 510 break; 511 case CQ_TAR_TSEL_MIG: 512 max_index = ARRAY_SIZE(xive->mig); 513 xive_table = xive->mig; 514 break; 515 case CQ_TAR_TSEL_EDT: 516 max_index = ARRAY_SIZE(xive->edt); 517 xive_table = xive->edt; 518 break; 519 case CQ_TAR_TSEL_VDT: 520 max_index = ARRAY_SIZE(xive->vdt); 521 xive_table = xive->vdt; 522 break; 523 default: 524 xive_error(xive, "IC: invalid table %d", (int) tsel); 525 return -1; 526 } 527 528 if (tsel_index >= max_index) { 529 xive_error(xive, "IC: invalid index %d", (int) tsel_index); 530 return -1; 531 } 532 533 xive_table[tsel_index] = val; 534 535 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) { 536 xive->regs[CQ_TAR >> 3] = 537 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index); 538 } 539 540 /* 541 * EDT configuration is complete. Resize the MMIO windows exposing 542 * the IPI and the END ESBs in the VC region. 543 */ 544 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) { 545 pnv_xive_edt_resize(xive); 546 } 547 548 return 0; 549 } 550 551 /* 552 * Virtual Structure Tables (VST) configuration 553 */ 554 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type, 555 uint8_t blk, uint64_t vsd) 556 { 557 XiveENDSource *end_xsrc = &xive->end_source; 558 XiveSource *xsrc = &xive->ipi_source; 559 const XiveVstInfo *info = &vst_infos[type]; 560 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 561 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 562 563 /* Basic checks */ 564 565 if (VSD_INDIRECT & vsd) { 566 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) { 567 xive_error(xive, "VST: %s indirect tables are not enabled", 568 info->name); 569 return; 570 } 571 572 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 573 xive_error(xive, "VST: invalid %s page shift %d", info->name, 574 page_shift); 575 return; 576 } 577 } 578 579 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { 580 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with" 581 " page shift %d", info->name, vst_addr, page_shift); 582 return; 583 } 584 585 /* Record the table configuration (in SRAM on HW) */ 586 xive->vsds[type][blk] = vsd; 587 588 /* Now tune the models with the configuration provided by the FW */ 589 590 switch (type) { 591 case VST_TSEL_IVT: /* Nothing to be done */ 592 break; 593 594 case VST_TSEL_EQDT: 595 /* 596 * Backing store pages for the END. Compute the number of ENDs 597 * provisioned by FW and resize the END ESB window accordingly. 598 */ 599 memory_region_set_size(&end_xsrc->esb_mmio, pnv_xive_nr_ends(xive) * 600 (1ull << (end_xsrc->esb_shift + 1))); 601 memory_region_add_subregion(&xive->end_edt_mmio, 0, 602 &end_xsrc->esb_mmio); 603 break; 604 605 case VST_TSEL_SBE: 606 /* 607 * Backing store pages for the source PQ bits. The model does 608 * not use these PQ bits backed in RAM because the XiveSource 609 * model has its own. Compute the number of IRQs provisioned 610 * by FW and resize the IPI ESB window accordingly. 611 */ 612 memory_region_set_size(&xsrc->esb_mmio, pnv_xive_nr_ipis(xive) * 613 (1ull << xsrc->esb_shift)); 614 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio); 615 break; 616 617 case VST_TSEL_VPDT: /* Not modeled */ 618 case VST_TSEL_IRQ: /* Not modeled */ 619 /* 620 * These tables contains the backing store pages for the 621 * interrupt fifos of the VC sub-engine in case of overflow. 622 */ 623 break; 624 625 default: 626 g_assert_not_reached(); 627 } 628 } 629 630 /* 631 * Both PC and VC sub-engines are configured as each use the Virtual 632 * Structure Tables : SBE, EAS, END and NVT. 633 */ 634 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine) 635 { 636 uint8_t mode = GETFIELD(VSD_MODE, vsd); 637 uint8_t type = GETFIELD(VST_TABLE_SELECT, 638 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 639 uint8_t blk = GETFIELD(VST_TABLE_BLOCK, 640 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 641 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 642 643 if (type > VST_TSEL_IRQ) { 644 xive_error(xive, "VST: invalid table type %d", type); 645 return; 646 } 647 648 if (blk >= vst_infos[type].max_blocks) { 649 xive_error(xive, "VST: invalid block id %d for" 650 " %s table", blk, vst_infos[type].name); 651 return; 652 } 653 654 /* 655 * Only take the VC sub-engine configuration into account because 656 * the XiveRouter model combines both VC and PC sub-engines 657 */ 658 if (pc_engine) { 659 return; 660 } 661 662 if (!vst_addr) { 663 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name); 664 return; 665 } 666 667 switch (mode) { 668 case VSD_MODE_FORWARD: 669 xive->vsds[type][blk] = vsd; 670 break; 671 672 case VSD_MODE_EXCLUSIVE: 673 pnv_xive_vst_set_exclusive(xive, type, blk, vsd); 674 break; 675 676 default: 677 xive_error(xive, "VST: unsupported table mode %d", mode); 678 return; 679 } 680 } 681 682 /* 683 * Interrupt controller MMIO region. The layout is compatible between 684 * 4K and 64K pages : 685 * 686 * Page 0 sub-engine BARs 687 * 0x000 - 0x3FF IC registers 688 * 0x400 - 0x7FF PC registers 689 * 0x800 - 0xFFF VC registers 690 * 691 * Page 1 Notify page (writes only) 692 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB) 693 * 0x800 - 0xFFF forwards and syncs 694 * 695 * Page 2 LSI Trigger page (writes only) (not modeled) 696 * Page 3 LSI SB EOI page (reads only) (not modeled) 697 * 698 * Page 4-7 indirect TIMA 699 */ 700 701 /* 702 * IC - registers MMIO 703 */ 704 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, 705 uint64_t val, unsigned size) 706 { 707 PnvXive *xive = PNV_XIVE(opaque); 708 MemoryRegion *sysmem = get_system_memory(); 709 uint32_t reg = offset >> 3; 710 bool is_chip0 = xive->chip->chip_id == 0; 711 712 switch (offset) { 713 714 /* 715 * XIVE CQ (PowerBus bridge) settings 716 */ 717 case CQ_MSGSND: /* msgsnd for doorbells */ 718 case CQ_FIRMASK_OR: /* FIR error reporting */ 719 break; 720 case CQ_PBI_CTL: 721 if (val & CQ_PBI_PC_64K) { 722 xive->pc_shift = 16; 723 } 724 if (val & CQ_PBI_VC_64K) { 725 xive->vc_shift = 16; 726 } 727 break; 728 case CQ_CFG_PB_GEN: /* PowerBus General Configuration */ 729 /* 730 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode 731 */ 732 break; 733 734 /* 735 * XIVE Virtualization Controller settings 736 */ 737 case VC_GLOBAL_CONFIG: 738 break; 739 740 /* 741 * XIVE Presenter Controller settings 742 */ 743 case PC_GLOBAL_CONFIG: 744 /* 745 * PC_GCONF_CHIPID_OVR 746 * Overrides Int command Chip ID with the Chip ID field (DEBUG) 747 */ 748 break; 749 case PC_TCTXT_CFG: 750 /* 751 * TODO: block group support 752 * 753 * PC_TCTXT_CFG_BLKGRP_EN 754 * PC_TCTXT_CFG_HARD_CHIPID_BLK : 755 * Moves the chipid into block field for hardwired CAM compares. 756 * Block offset value is adjusted to 0b0..01 & ThrdId 757 * 758 * Will require changes in xive_presenter_tctx_match(). I am 759 * not sure how to handle that yet. 760 */ 761 762 /* Overrides hardwired chip ID with the chip ID field */ 763 if (val & PC_TCTXT_CHIPID_OVERRIDE) { 764 xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val); 765 } 766 break; 767 case PC_TCTXT_TRACK: 768 /* 769 * PC_TCTXT_TRACK_EN: 770 * enable block tracking and exchange of block ownership 771 * information between Interrupt controllers 772 */ 773 break; 774 775 /* 776 * Misc settings 777 */ 778 case VC_SBC_CONFIG: /* Store EOI configuration */ 779 /* 780 * Configure store EOI if required by firwmare (skiboot has removed 781 * support recently though) 782 */ 783 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { 784 xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI; 785 } 786 break; 787 788 case VC_EQC_CONFIG: /* TODO: silent escalation */ 789 case VC_AIB_TX_ORDER_TAG2: /* relax ordering */ 790 break; 791 792 /* 793 * XIVE BAR settings (XSCOM only) 794 */ 795 case CQ_RST_CTL: 796 /* bit4: resets all BAR registers */ 797 break; 798 799 case CQ_IC_BAR: /* IC BAR. 8 pages */ 800 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; 801 if (!(val & CQ_IC_BAR_VALID)) { 802 xive->ic_base = 0; 803 if (xive->regs[reg] & CQ_IC_BAR_VALID) { 804 memory_region_del_subregion(&xive->ic_mmio, 805 &xive->ic_reg_mmio); 806 memory_region_del_subregion(&xive->ic_mmio, 807 &xive->ic_notify_mmio); 808 memory_region_del_subregion(&xive->ic_mmio, 809 &xive->ic_lsi_mmio); 810 memory_region_del_subregion(&xive->ic_mmio, 811 &xive->tm_indirect_mmio); 812 813 memory_region_del_subregion(sysmem, &xive->ic_mmio); 814 } 815 } else { 816 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); 817 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) { 818 memory_region_add_subregion(sysmem, xive->ic_base, 819 &xive->ic_mmio); 820 821 memory_region_add_subregion(&xive->ic_mmio, 0, 822 &xive->ic_reg_mmio); 823 memory_region_add_subregion(&xive->ic_mmio, 824 1ul << xive->ic_shift, 825 &xive->ic_notify_mmio); 826 memory_region_add_subregion(&xive->ic_mmio, 827 2ul << xive->ic_shift, 828 &xive->ic_lsi_mmio); 829 memory_region_add_subregion(&xive->ic_mmio, 830 4ull << xive->ic_shift, 831 &xive->tm_indirect_mmio); 832 } 833 } 834 break; 835 836 case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */ 837 case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */ 838 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; 839 if (!(val & CQ_TM_BAR_VALID)) { 840 xive->tm_base = 0; 841 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) { 842 memory_region_del_subregion(sysmem, &xive->tm_mmio); 843 } 844 } else { 845 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); 846 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) { 847 memory_region_add_subregion(sysmem, xive->tm_base, 848 &xive->tm_mmio); 849 } 850 } 851 break; 852 853 case CQ_PC_BARM: 854 xive->regs[reg] = val; 855 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive)); 856 break; 857 case CQ_PC_BAR: /* From 32M to 512G */ 858 if (!(val & CQ_PC_BAR_VALID)) { 859 xive->pc_base = 0; 860 if (xive->regs[reg] & CQ_PC_BAR_VALID) { 861 memory_region_del_subregion(sysmem, &xive->pc_mmio); 862 } 863 } else { 864 xive->pc_base = val & ~(CQ_PC_BAR_VALID); 865 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) { 866 memory_region_add_subregion(sysmem, xive->pc_base, 867 &xive->pc_mmio); 868 } 869 } 870 break; 871 872 case CQ_VC_BARM: 873 xive->regs[reg] = val; 874 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive)); 875 break; 876 case CQ_VC_BAR: /* From 64M to 4TB */ 877 if (!(val & CQ_VC_BAR_VALID)) { 878 xive->vc_base = 0; 879 if (xive->regs[reg] & CQ_VC_BAR_VALID) { 880 memory_region_del_subregion(sysmem, &xive->vc_mmio); 881 } 882 } else { 883 xive->vc_base = val & ~(CQ_VC_BAR_VALID); 884 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) { 885 memory_region_add_subregion(sysmem, xive->vc_base, 886 &xive->vc_mmio); 887 } 888 } 889 break; 890 891 /* 892 * XIVE Table settings. 893 */ 894 case CQ_TAR: /* Table Address */ 895 break; 896 case CQ_TDR: /* Table Data */ 897 pnv_xive_table_set_data(xive, val); 898 break; 899 900 /* 901 * XIVE VC & PC Virtual Structure Table settings 902 */ 903 case VC_VSD_TABLE_ADDR: 904 case PC_VSD_TABLE_ADDR: /* Virtual table selector */ 905 break; 906 case VC_VSD_TABLE_DATA: /* Virtual table setting */ 907 case PC_VSD_TABLE_DATA: 908 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA); 909 break; 910 911 /* 912 * Interrupt fifo overflow in memory backing store (Not modeled) 913 */ 914 case VC_IRQ_CONFIG_IPI: 915 case VC_IRQ_CONFIG_HW: 916 case VC_IRQ_CONFIG_CASCADE1: 917 case VC_IRQ_CONFIG_CASCADE2: 918 case VC_IRQ_CONFIG_REDIST: 919 case VC_IRQ_CONFIG_IPI_CASC: 920 break; 921 922 /* 923 * XIVE hardware thread enablement 924 */ 925 case PC_THREAD_EN_REG0: /* Physical Thread Enable */ 926 case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */ 927 break; 928 929 case PC_THREAD_EN_REG0_SET: 930 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val; 931 break; 932 case PC_THREAD_EN_REG1_SET: 933 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val; 934 break; 935 case PC_THREAD_EN_REG0_CLR: 936 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val; 937 break; 938 case PC_THREAD_EN_REG1_CLR: 939 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val; 940 break; 941 942 /* 943 * Indirect TIMA access set up. Defines the PIR of the HW thread 944 * to use. 945 */ 946 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3: 947 break; 948 949 /* 950 * XIVE PC & VC cache updates for EAS, NVT and END 951 */ 952 case VC_IVC_SCRUB_MASK: 953 break; 954 case VC_IVC_SCRUB_TRIG: 955 pnv_xive_eas_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val), 956 GETFIELD(VC_SCRUB_OFFSET, val)); 957 break; 958 959 case VC_EQC_SCRUB_MASK: 960 case VC_EQC_CWATCH_SPEC: 961 case VC_EQC_CWATCH_DAT0 ... VC_EQC_CWATCH_DAT3: 962 break; 963 case VC_EQC_SCRUB_TRIG: 964 pnv_xive_end_update(xive, GETFIELD(VC_SCRUB_BLOCK_ID, val), 965 GETFIELD(VC_SCRUB_OFFSET, val)); 966 break; 967 968 case PC_VPC_SCRUB_MASK: 969 case PC_VPC_CWATCH_SPEC: 970 case PC_VPC_CWATCH_DAT0 ... PC_VPC_CWATCH_DAT7: 971 break; 972 case PC_VPC_SCRUB_TRIG: 973 pnv_xive_nvt_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val), 974 GETFIELD(PC_SCRUB_OFFSET, val)); 975 break; 976 977 978 /* 979 * XIVE PC & VC cache invalidation 980 */ 981 case PC_AT_KILL: 982 break; 983 case VC_AT_MACRO_KILL: 984 break; 985 case PC_AT_KILL_MASK: 986 case VC_AT_MACRO_KILL_MASK: 987 break; 988 989 default: 990 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset); 991 return; 992 } 993 994 xive->regs[reg] = val; 995 } 996 997 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) 998 { 999 PnvXive *xive = PNV_XIVE(opaque); 1000 uint64_t val = 0; 1001 uint32_t reg = offset >> 3; 1002 1003 switch (offset) { 1004 case CQ_CFG_PB_GEN: 1005 case CQ_IC_BAR: 1006 case CQ_TM1_BAR: 1007 case CQ_TM2_BAR: 1008 case CQ_PC_BAR: 1009 case CQ_PC_BARM: 1010 case CQ_VC_BAR: 1011 case CQ_VC_BARM: 1012 case CQ_TAR: 1013 case CQ_TDR: 1014 case CQ_PBI_CTL: 1015 1016 case PC_TCTXT_CFG: 1017 case PC_TCTXT_TRACK: 1018 case PC_TCTXT_INDIR0: 1019 case PC_TCTXT_INDIR1: 1020 case PC_TCTXT_INDIR2: 1021 case PC_TCTXT_INDIR3: 1022 case PC_GLOBAL_CONFIG: 1023 1024 case PC_VPC_SCRUB_MASK: 1025 case PC_VPC_CWATCH_SPEC: 1026 case PC_VPC_CWATCH_DAT0: 1027 case PC_VPC_CWATCH_DAT1: 1028 case PC_VPC_CWATCH_DAT2: 1029 case PC_VPC_CWATCH_DAT3: 1030 case PC_VPC_CWATCH_DAT4: 1031 case PC_VPC_CWATCH_DAT5: 1032 case PC_VPC_CWATCH_DAT6: 1033 case PC_VPC_CWATCH_DAT7: 1034 1035 case VC_GLOBAL_CONFIG: 1036 case VC_AIB_TX_ORDER_TAG2: 1037 1038 case VC_IRQ_CONFIG_IPI: 1039 case VC_IRQ_CONFIG_HW: 1040 case VC_IRQ_CONFIG_CASCADE1: 1041 case VC_IRQ_CONFIG_CASCADE2: 1042 case VC_IRQ_CONFIG_REDIST: 1043 case VC_IRQ_CONFIG_IPI_CASC: 1044 1045 case VC_EQC_SCRUB_MASK: 1046 case VC_EQC_CWATCH_DAT0: 1047 case VC_EQC_CWATCH_DAT1: 1048 case VC_EQC_CWATCH_DAT2: 1049 case VC_EQC_CWATCH_DAT3: 1050 1051 case VC_EQC_CWATCH_SPEC: 1052 case VC_IVC_SCRUB_MASK: 1053 case VC_SBC_CONFIG: 1054 case VC_AT_MACRO_KILL_MASK: 1055 case VC_VSD_TABLE_ADDR: 1056 case PC_VSD_TABLE_ADDR: 1057 case VC_VSD_TABLE_DATA: 1058 case PC_VSD_TABLE_DATA: 1059 case PC_THREAD_EN_REG0: 1060 case PC_THREAD_EN_REG1: 1061 val = xive->regs[reg]; 1062 break; 1063 1064 /* 1065 * XIVE hardware thread enablement 1066 */ 1067 case PC_THREAD_EN_REG0_SET: 1068 case PC_THREAD_EN_REG0_CLR: 1069 val = xive->regs[PC_THREAD_EN_REG0 >> 3]; 1070 break; 1071 case PC_THREAD_EN_REG1_SET: 1072 case PC_THREAD_EN_REG1_CLR: 1073 val = xive->regs[PC_THREAD_EN_REG1 >> 3]; 1074 break; 1075 1076 case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */ 1077 val = 0xffffff0000000000; 1078 break; 1079 1080 /* 1081 * XIVE PC & VC cache updates for EAS, NVT and END 1082 */ 1083 case PC_VPC_SCRUB_TRIG: 1084 case VC_IVC_SCRUB_TRIG: 1085 case VC_EQC_SCRUB_TRIG: 1086 xive->regs[reg] &= ~VC_SCRUB_VALID; 1087 val = xive->regs[reg]; 1088 break; 1089 1090 /* 1091 * XIVE PC & VC cache invalidation 1092 */ 1093 case PC_AT_KILL: 1094 xive->regs[reg] &= ~PC_AT_KILL_VALID; 1095 val = xive->regs[reg]; 1096 break; 1097 case VC_AT_MACRO_KILL: 1098 xive->regs[reg] &= ~VC_KILL_VALID; 1099 val = xive->regs[reg]; 1100 break; 1101 1102 /* 1103 * XIVE synchronisation 1104 */ 1105 case VC_EQC_CONFIG: 1106 val = VC_EQC_SYNC_MASK; 1107 break; 1108 1109 default: 1110 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset); 1111 } 1112 1113 return val; 1114 } 1115 1116 static const MemoryRegionOps pnv_xive_ic_reg_ops = { 1117 .read = pnv_xive_ic_reg_read, 1118 .write = pnv_xive_ic_reg_write, 1119 .endianness = DEVICE_BIG_ENDIAN, 1120 .valid = { 1121 .min_access_size = 8, 1122 .max_access_size = 8, 1123 }, 1124 .impl = { 1125 .min_access_size = 8, 1126 .max_access_size = 8, 1127 }, 1128 }; 1129 1130 /* 1131 * IC - Notify MMIO port page (write only) 1132 */ 1133 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */ 1134 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */ 1135 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */ 1136 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */ 1137 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */ 1138 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */ 1139 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */ 1140 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */ 1141 1142 /* VC synchronisation */ 1143 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */ 1144 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */ 1145 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */ 1146 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */ 1147 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */ 1148 1149 /* PC synchronisation */ 1150 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */ 1151 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */ 1152 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */ 1153 1154 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) 1155 { 1156 /* 1157 * Forward the source event notification directly to the Router. 1158 * The source interrupt number should already be correctly encoded 1159 * with the chip block id by the sending device (PHB, PSI). 1160 */ 1161 xive_router_notify(XIVE_NOTIFIER(xive), val); 1162 } 1163 1164 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, 1165 unsigned size) 1166 { 1167 PnvXive *xive = PNV_XIVE(opaque); 1168 1169 /* VC: HW triggers */ 1170 switch (addr) { 1171 case 0x000 ... 0x7FF: 1172 pnv_xive_ic_hw_trigger(opaque, addr, val); 1173 break; 1174 1175 /* VC: Forwarded IRQs */ 1176 case PNV_XIVE_FORWARD_IPI: 1177 case PNV_XIVE_FORWARD_HW: 1178 case PNV_XIVE_FORWARD_OS_ESC: 1179 case PNV_XIVE_FORWARD_HW_ESC: 1180 case PNV_XIVE_FORWARD_REDIS: 1181 /* TODO: forwarded IRQs. Should be like HW triggers */ 1182 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64, 1183 addr, val); 1184 break; 1185 1186 /* VC syncs */ 1187 case PNV_XIVE_SYNC_IPI: 1188 case PNV_XIVE_SYNC_HW: 1189 case PNV_XIVE_SYNC_OS_ESC: 1190 case PNV_XIVE_SYNC_HW_ESC: 1191 case PNV_XIVE_SYNC_REDIS: 1192 break; 1193 1194 /* PC syncs */ 1195 case PNV_XIVE_SYNC_PULL: 1196 case PNV_XIVE_SYNC_PUSH: 1197 case PNV_XIVE_SYNC_VPC: 1198 break; 1199 1200 default: 1201 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr); 1202 } 1203 } 1204 1205 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr, 1206 unsigned size) 1207 { 1208 PnvXive *xive = PNV_XIVE(opaque); 1209 1210 /* loads are invalid */ 1211 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr); 1212 return -1; 1213 } 1214 1215 static const MemoryRegionOps pnv_xive_ic_notify_ops = { 1216 .read = pnv_xive_ic_notify_read, 1217 .write = pnv_xive_ic_notify_write, 1218 .endianness = DEVICE_BIG_ENDIAN, 1219 .valid = { 1220 .min_access_size = 8, 1221 .max_access_size = 8, 1222 }, 1223 .impl = { 1224 .min_access_size = 8, 1225 .max_access_size = 8, 1226 }, 1227 }; 1228 1229 /* 1230 * IC - LSI MMIO handlers (not modeled) 1231 */ 1232 1233 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr, 1234 uint64_t val, unsigned size) 1235 { 1236 PnvXive *xive = PNV_XIVE(opaque); 1237 1238 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr); 1239 } 1240 1241 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size) 1242 { 1243 PnvXive *xive = PNV_XIVE(opaque); 1244 1245 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr); 1246 return -1; 1247 } 1248 1249 static const MemoryRegionOps pnv_xive_ic_lsi_ops = { 1250 .read = pnv_xive_ic_lsi_read, 1251 .write = pnv_xive_ic_lsi_write, 1252 .endianness = DEVICE_BIG_ENDIAN, 1253 .valid = { 1254 .min_access_size = 8, 1255 .max_access_size = 8, 1256 }, 1257 .impl = { 1258 .min_access_size = 8, 1259 .max_access_size = 8, 1260 }, 1261 }; 1262 1263 /* 1264 * IC - Indirect TIMA MMIO handlers 1265 */ 1266 1267 /* 1268 * When the TIMA is accessed from the indirect page, the thread id 1269 * (PIR) has to be configured in the IC registers before. This is used 1270 * for resets and for debug purpose also. 1271 */ 1272 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive) 1273 { 1274 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3]; 1275 PowerPCCPU *cpu = NULL; 1276 int pir; 1277 1278 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) { 1279 xive_error(xive, "IC: no indirect TIMA access in progress"); 1280 return NULL; 1281 } 1282 1283 pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff; 1284 cpu = ppc_get_vcpu_by_pir(pir); 1285 if (!cpu) { 1286 xive_error(xive, "IC: invalid PIR %x for indirect access", pir); 1287 return NULL; 1288 } 1289 1290 /* Check that HW thread is XIVE enabled */ 1291 if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { 1292 xive_error(xive, "IC: CPU %x is not enabled", pir); 1293 } 1294 1295 return XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1296 } 1297 1298 static void xive_tm_indirect_write(void *opaque, hwaddr offset, 1299 uint64_t value, unsigned size) 1300 { 1301 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1302 1303 xive_tctx_tm_write(tctx, offset, value, size); 1304 } 1305 1306 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset, 1307 unsigned size) 1308 { 1309 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1310 1311 return xive_tctx_tm_read(tctx, offset, size); 1312 } 1313 1314 static const MemoryRegionOps xive_tm_indirect_ops = { 1315 .read = xive_tm_indirect_read, 1316 .write = xive_tm_indirect_write, 1317 .endianness = DEVICE_BIG_ENDIAN, 1318 .valid = { 1319 .min_access_size = 1, 1320 .max_access_size = 8, 1321 }, 1322 .impl = { 1323 .min_access_size = 1, 1324 .max_access_size = 8, 1325 }, 1326 }; 1327 1328 /* 1329 * Interrupt controller XSCOM region. 1330 */ 1331 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size) 1332 { 1333 switch (addr >> 3) { 1334 case X_VC_EQC_CONFIG: 1335 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */ 1336 return VC_EQC_SYNC_MASK; 1337 default: 1338 return pnv_xive_ic_reg_read(opaque, addr, size); 1339 } 1340 } 1341 1342 static void pnv_xive_xscom_write(void *opaque, hwaddr addr, 1343 uint64_t val, unsigned size) 1344 { 1345 pnv_xive_ic_reg_write(opaque, addr, val, size); 1346 } 1347 1348 static const MemoryRegionOps pnv_xive_xscom_ops = { 1349 .read = pnv_xive_xscom_read, 1350 .write = pnv_xive_xscom_write, 1351 .endianness = DEVICE_BIG_ENDIAN, 1352 .valid = { 1353 .min_access_size = 8, 1354 .max_access_size = 8, 1355 }, 1356 .impl = { 1357 .min_access_size = 8, 1358 .max_access_size = 8, 1359 } 1360 }; 1361 1362 /* 1363 * Virtualization Controller MMIO region containing the IPI and END ESB pages 1364 */ 1365 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset, 1366 unsigned size) 1367 { 1368 PnvXive *xive = PNV_XIVE(opaque); 1369 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1370 uint64_t edt_type = 0; 1371 uint64_t edt_offset; 1372 MemTxResult result; 1373 AddressSpace *edt_as = NULL; 1374 uint64_t ret = -1; 1375 1376 if (edt_index < XIVE_TABLE_EDT_MAX) { 1377 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1378 } 1379 1380 switch (edt_type) { 1381 case CQ_TDR_EDT_IPI: 1382 edt_as = &xive->ipi_as; 1383 break; 1384 case CQ_TDR_EDT_EQ: 1385 edt_as = &xive->end_as; 1386 break; 1387 default: 1388 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset); 1389 return -1; 1390 } 1391 1392 /* Remap the offset for the targeted address space */ 1393 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1394 1395 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED, 1396 &result); 1397 1398 if (result != MEMTX_OK) { 1399 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%" 1400 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END", 1401 offset, edt_offset); 1402 return -1; 1403 } 1404 1405 return ret; 1406 } 1407 1408 static void pnv_xive_vc_write(void *opaque, hwaddr offset, 1409 uint64_t val, unsigned size) 1410 { 1411 PnvXive *xive = PNV_XIVE(opaque); 1412 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1413 uint64_t edt_type = 0; 1414 uint64_t edt_offset; 1415 MemTxResult result; 1416 AddressSpace *edt_as = NULL; 1417 1418 if (edt_index < XIVE_TABLE_EDT_MAX) { 1419 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1420 } 1421 1422 switch (edt_type) { 1423 case CQ_TDR_EDT_IPI: 1424 edt_as = &xive->ipi_as; 1425 break; 1426 case CQ_TDR_EDT_EQ: 1427 edt_as = &xive->end_as; 1428 break; 1429 default: 1430 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx, 1431 offset); 1432 return; 1433 } 1434 1435 /* Remap the offset for the targeted address space */ 1436 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1437 1438 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result); 1439 if (result != MEMTX_OK) { 1440 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset); 1441 } 1442 } 1443 1444 static const MemoryRegionOps pnv_xive_vc_ops = { 1445 .read = pnv_xive_vc_read, 1446 .write = pnv_xive_vc_write, 1447 .endianness = DEVICE_BIG_ENDIAN, 1448 .valid = { 1449 .min_access_size = 8, 1450 .max_access_size = 8, 1451 }, 1452 .impl = { 1453 .min_access_size = 8, 1454 .max_access_size = 8, 1455 }, 1456 }; 1457 1458 /* 1459 * Presenter Controller MMIO region. The Virtualization Controller 1460 * updates the IPB in the NVT table when required. Not modeled. 1461 */ 1462 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr, 1463 unsigned size) 1464 { 1465 PnvXive *xive = PNV_XIVE(opaque); 1466 1467 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr); 1468 return -1; 1469 } 1470 1471 static void pnv_xive_pc_write(void *opaque, hwaddr addr, 1472 uint64_t value, unsigned size) 1473 { 1474 PnvXive *xive = PNV_XIVE(opaque); 1475 1476 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr); 1477 } 1478 1479 static const MemoryRegionOps pnv_xive_pc_ops = { 1480 .read = pnv_xive_pc_read, 1481 .write = pnv_xive_pc_write, 1482 .endianness = DEVICE_BIG_ENDIAN, 1483 .valid = { 1484 .min_access_size = 8, 1485 .max_access_size = 8, 1486 }, 1487 .impl = { 1488 .min_access_size = 8, 1489 .max_access_size = 8, 1490 }, 1491 }; 1492 1493 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon) 1494 { 1495 XiveRouter *xrtr = XIVE_ROUTER(xive); 1496 uint8_t blk = xive->chip->chip_id; 1497 uint32_t srcno0 = XIVE_SRCNO(blk, 0); 1498 uint32_t nr_ipis = pnv_xive_nr_ipis(xive); 1499 uint32_t nr_ends = pnv_xive_nr_ends(xive); 1500 XiveEAS eas; 1501 XiveEND end; 1502 int i; 1503 1504 monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0, 1505 srcno0 + nr_ipis - 1); 1506 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); 1507 1508 monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0, 1509 srcno0 + nr_ipis - 1); 1510 for (i = 0; i < nr_ipis; i++) { 1511 if (xive_router_get_eas(xrtr, blk, i, &eas)) { 1512 break; 1513 } 1514 if (!xive_eas_is_masked(&eas)) { 1515 xive_eas_pic_print_info(&eas, i, mon); 1516 } 1517 } 1518 1519 monitor_printf(mon, "XIVE[%x] ENDT %08x .. %08x\n", blk, 0, nr_ends - 1); 1520 for (i = 0; i < nr_ends; i++) { 1521 if (xive_router_get_end(xrtr, blk, i, &end)) { 1522 break; 1523 } 1524 xive_end_pic_print_info(&end, i, mon); 1525 } 1526 } 1527 1528 static void pnv_xive_reset(void *dev) 1529 { 1530 PnvXive *xive = PNV_XIVE(dev); 1531 XiveSource *xsrc = &xive->ipi_source; 1532 XiveENDSource *end_xsrc = &xive->end_source; 1533 1534 /* 1535 * Use the PnvChip id to identify the XIVE interrupt controller. 1536 * It can be overriden by configuration at runtime. 1537 */ 1538 xive->tctx_chipid = xive->chip->chip_id; 1539 1540 /* Default page size (Should be changed at runtime to 64k) */ 1541 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1542 1543 /* Clear subregions */ 1544 if (memory_region_is_mapped(&xsrc->esb_mmio)) { 1545 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio); 1546 } 1547 1548 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) { 1549 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio); 1550 } 1551 1552 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { 1553 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio); 1554 } 1555 1556 if (memory_region_is_mapped(&xive->end_edt_mmio)) { 1557 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio); 1558 } 1559 } 1560 1561 static void pnv_xive_init(Object *obj) 1562 { 1563 PnvXive *xive = PNV_XIVE(obj); 1564 1565 object_initialize_child(obj, "ipi_source", &xive->ipi_source, 1566 sizeof(xive->ipi_source), TYPE_XIVE_SOURCE, 1567 &error_abort, NULL); 1568 object_initialize_child(obj, "end_source", &xive->end_source, 1569 sizeof(xive->end_source), TYPE_XIVE_END_SOURCE, 1570 &error_abort, NULL); 1571 } 1572 1573 /* 1574 * Maximum number of IRQs and ENDs supported by HW 1575 */ 1576 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1577 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1578 1579 static void pnv_xive_realize(DeviceState *dev, Error **errp) 1580 { 1581 PnvXive *xive = PNV_XIVE(dev); 1582 XiveSource *xsrc = &xive->ipi_source; 1583 XiveENDSource *end_xsrc = &xive->end_source; 1584 Error *local_err = NULL; 1585 Object *obj; 1586 1587 obj = object_property_get_link(OBJECT(dev), "chip", &local_err); 1588 if (!obj) { 1589 error_propagate(errp, local_err); 1590 error_prepend(errp, "required link 'chip' not found: "); 1591 return; 1592 } 1593 1594 /* The PnvChip id identifies the XIVE interrupt controller. */ 1595 xive->chip = PNV_CHIP(obj); 1596 1597 /* 1598 * The XiveSource and XiveENDSource objects are realized with the 1599 * maximum allowed HW configuration. The ESB MMIO regions will be 1600 * resized dynamically when the controller is configured by the FW 1601 * to limit accesses to resources not provisioned. 1602 */ 1603 object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs", 1604 &error_fatal); 1605 object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive), 1606 &error_fatal); 1607 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err); 1608 if (local_err) { 1609 error_propagate(errp, local_err); 1610 return; 1611 } 1612 1613 object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends", 1614 &error_fatal); 1615 object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive), 1616 &error_fatal); 1617 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err); 1618 if (local_err) { 1619 error_propagate(errp, local_err); 1620 return; 1621 } 1622 1623 /* Default page size. Generally changed at runtime to 64k */ 1624 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1625 1626 /* XSCOM region, used for initial configuration of the BARs */ 1627 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops, 1628 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3); 1629 1630 /* Interrupt controller MMIO regions */ 1631 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", 1632 PNV9_XIVE_IC_SIZE); 1633 1634 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops, 1635 xive, "xive-ic-reg", 1 << xive->ic_shift); 1636 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev), 1637 &pnv_xive_ic_notify_ops, 1638 xive, "xive-ic-notify", 1 << xive->ic_shift); 1639 1640 /* The Pervasive LSI trigger and EOI pages (not modeled) */ 1641 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops, 1642 xive, "xive-ic-lsi", 2 << xive->ic_shift); 1643 1644 /* Thread Interrupt Management Area (Indirect) */ 1645 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev), 1646 &xive_tm_indirect_ops, 1647 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE); 1648 /* 1649 * Overall Virtualization Controller MMIO region containing the 1650 * IPI ESB pages and END ESB pages. The layout is defined by the 1651 * EDT "Domain table" and the accesses are dispatched using 1652 * address spaces for each. 1653 */ 1654 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive, 1655 "xive-vc", PNV9_XIVE_VC_SIZE); 1656 1657 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi", 1658 PNV9_XIVE_VC_SIZE); 1659 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi"); 1660 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end", 1661 PNV9_XIVE_VC_SIZE); 1662 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end"); 1663 1664 /* 1665 * The MMIO windows exposing the IPI ESBs and the END ESBs in the 1666 * VC region. Their size is configured by the FW in the EDT table. 1667 */ 1668 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0); 1669 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0); 1670 1671 /* Presenter Controller MMIO region (not modeled) */ 1672 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive, 1673 "xive-pc", PNV9_XIVE_PC_SIZE); 1674 1675 /* Thread Interrupt Management Area (Direct) */ 1676 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, 1677 xive, "xive-tima", PNV9_XIVE_TM_SIZE); 1678 1679 qemu_register_reset(pnv_xive_reset, dev); 1680 } 1681 1682 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, 1683 int xscom_offset) 1684 { 1685 const char compat[] = "ibm,power9-xive-x"; 1686 char *name; 1687 int offset; 1688 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE; 1689 uint32_t reg[] = { 1690 cpu_to_be32(lpc_pcba), 1691 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE) 1692 }; 1693 1694 name = g_strdup_printf("xive@%x", lpc_pcba); 1695 offset = fdt_add_subnode(fdt, xscom_offset, name); 1696 _FDT(offset); 1697 g_free(name); 1698 1699 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); 1700 _FDT((fdt_setprop(fdt, offset, "compatible", compat, 1701 sizeof(compat)))); 1702 return 0; 1703 } 1704 1705 static Property pnv_xive_properties[] = { 1706 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0), 1707 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0), 1708 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0), 1709 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0), 1710 DEFINE_PROP_END_OF_LIST(), 1711 }; 1712 1713 static void pnv_xive_class_init(ObjectClass *klass, void *data) 1714 { 1715 DeviceClass *dc = DEVICE_CLASS(klass); 1716 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); 1717 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 1718 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1719 1720 xdc->dt_xscom = pnv_xive_dt_xscom; 1721 1722 dc->desc = "PowerNV XIVE Interrupt Controller"; 1723 dc->realize = pnv_xive_realize; 1724 dc->props = pnv_xive_properties; 1725 1726 xrc->get_eas = pnv_xive_get_eas; 1727 xrc->get_end = pnv_xive_get_end; 1728 xrc->write_end = pnv_xive_write_end; 1729 xrc->get_nvt = pnv_xive_get_nvt; 1730 xrc->write_nvt = pnv_xive_write_nvt; 1731 xrc->get_tctx = pnv_xive_get_tctx; 1732 1733 xnc->notify = pnv_xive_notify; 1734 }; 1735 1736 static const TypeInfo pnv_xive_info = { 1737 .name = TYPE_PNV_XIVE, 1738 .parent = TYPE_XIVE_ROUTER, 1739 .instance_init = pnv_xive_init, 1740 .instance_size = sizeof(PnvXive), 1741 .class_init = pnv_xive_class_init, 1742 .interfaces = (InterfaceInfo[]) { 1743 { TYPE_PNV_XSCOM_INTERFACE }, 1744 { } 1745 } 1746 }; 1747 1748 static void pnv_xive_register_types(void) 1749 { 1750 type_register_static(&pnv_xive_info); 1751 } 1752 1753 type_init(pnv_xive_register_types) 1754