1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2019, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/dma.h" 17 #include "sysemu/reset.h" 18 #include "monitor/monitor.h" 19 #include "hw/ppc/fdt.h" 20 #include "hw/ppc/pnv.h" 21 #include "hw/ppc/pnv_core.h" 22 #include "hw/ppc/pnv_xscom.h" 23 #include "hw/ppc/pnv_xive.h" 24 #include "hw/ppc/xive_regs.h" 25 #include "hw/qdev-properties.h" 26 #include "hw/ppc/ppc.h" 27 28 #include <libfdt.h> 29 30 #include "pnv_xive_regs.h" 31 32 #define XIVE_DEBUG 33 34 /* 35 * Virtual structures table (VST) 36 */ 37 #define SBE_PER_BYTE 4 38 39 typedef struct XiveVstInfo { 40 const char *name; 41 uint32_t size; 42 uint32_t max_blocks; 43 } XiveVstInfo; 44 45 static const XiveVstInfo vst_infos[] = { 46 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 }, 47 [VST_TSEL_SBE] = { "SBE", 1, 16 }, 48 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 }, 49 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 }, 50 51 /* 52 * Interrupt fifo backing store table (not modeled) : 53 * 54 * 0 - IPI, 55 * 1 - HWD, 56 * 2 - First escalate, 57 * 3 - Second escalate, 58 * 4 - Redistribution, 59 * 5 - IPI cascaded queue ? 60 */ 61 [VST_TSEL_IRQ] = { "IRQ", 1, 6 }, 62 }; 63 64 #define xive_error(xive, fmt, ...) \ 65 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ 66 (xive)->chip->chip_id, ## __VA_ARGS__); 67 68 /* 69 * QEMU version of the GETFIELD/SETFIELD macros 70 * 71 * TODO: It might be better to use the existing extract64() and 72 * deposit64() but this means that all the register definitions will 73 * change and become incompatible with the ones found in skiboot. 74 * 75 * Keep it as it is for now until we find a common ground. 76 */ 77 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) 78 { 79 return (word & mask) >> ctz64(mask); 80 } 81 82 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, 83 uint64_t value) 84 { 85 return (word & ~mask) | ((value << ctz64(mask)) & mask); 86 } 87 88 /* 89 * Remote access to controllers. HW uses MMIOs. For now, a simple scan 90 * of the chips is good enough. 91 * 92 * TODO: Block scope support 93 */ 94 static PnvXive *pnv_xive_get_ic(uint8_t blk) 95 { 96 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); 97 int i; 98 99 for (i = 0; i < pnv->num_chips; i++) { 100 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); 101 PnvXive *xive = &chip9->xive; 102 103 if (xive->chip->chip_id == blk) { 104 return xive; 105 } 106 } 107 return NULL; 108 } 109 110 /* 111 * VST accessors for SBE, EAT, ENDT, NVT 112 * 113 * Indirect VST tables are arrays of VSDs pointing to a page (of same 114 * size). Each page is a direct VST table. 115 */ 116 117 #define XIVE_VSD_SIZE 8 118 119 /* Indirect page size can be 4K, 64K, 2M, 16M. */ 120 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift) 121 { 122 return page_shift == 12 || page_shift == 16 || 123 page_shift == 21 || page_shift == 24; 124 } 125 126 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type, 127 uint64_t vsd, uint32_t idx) 128 { 129 const XiveVstInfo *info = &vst_infos[type]; 130 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 131 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 132 uint32_t idx_max; 133 134 idx_max = vst_tsize / info->size - 1; 135 if (idx > idx_max) { 136 #ifdef XIVE_DEBUG 137 xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?", 138 info->name, idx, idx_max); 139 #endif 140 return 0; 141 } 142 143 return vst_addr + idx * info->size; 144 } 145 146 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, 147 uint64_t vsd, uint32_t idx) 148 { 149 const XiveVstInfo *info = &vst_infos[type]; 150 uint64_t vsd_addr; 151 uint32_t vsd_idx; 152 uint32_t page_shift; 153 uint32_t vst_per_page; 154 155 /* Get the page size of the indirect table. */ 156 vsd_addr = vsd & VSD_ADDRESS_MASK; 157 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 158 159 if (!(vsd & VSD_ADDRESS_MASK)) { 160 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 161 return 0; 162 } 163 164 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 165 166 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 167 xive_error(xive, "VST: invalid %s page shift %d", info->name, 168 page_shift); 169 return 0; 170 } 171 172 vst_per_page = (1ull << page_shift) / info->size; 173 vsd_idx = idx / vst_per_page; 174 175 /* Load the VSD we are looking for, if not already done */ 176 if (vsd_idx) { 177 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; 178 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 179 180 if (!(vsd & VSD_ADDRESS_MASK)) { 181 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 182 return 0; 183 } 184 185 /* 186 * Check that the pages have a consistent size across the 187 * indirect table 188 */ 189 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { 190 xive_error(xive, "VST: %s entry %x indirect page size differ !?", 191 info->name, idx); 192 return 0; 193 } 194 } 195 196 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); 197 } 198 199 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk, 200 uint32_t idx) 201 { 202 const XiveVstInfo *info = &vst_infos[type]; 203 uint64_t vsd; 204 205 if (blk >= info->max_blocks) { 206 xive_error(xive, "VST: invalid block id %d for VST %s %d !?", 207 blk, info->name, idx); 208 return 0; 209 } 210 211 vsd = xive->vsds[type][blk]; 212 213 /* Remote VST access */ 214 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { 215 xive = pnv_xive_get_ic(blk); 216 217 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0; 218 } 219 220 if (VSD_INDIRECT & vsd) { 221 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx); 222 } 223 224 return pnv_xive_vst_addr_direct(xive, type, vsd, idx); 225 } 226 227 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk, 228 uint32_t idx, void *data) 229 { 230 const XiveVstInfo *info = &vst_infos[type]; 231 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 232 233 if (!addr) { 234 return -1; 235 } 236 237 cpu_physical_memory_read(addr, data, info->size); 238 return 0; 239 } 240 241 #define XIVE_VST_WORD_ALL -1 242 243 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk, 244 uint32_t idx, void *data, uint32_t word_number) 245 { 246 const XiveVstInfo *info = &vst_infos[type]; 247 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 248 249 if (!addr) { 250 return -1; 251 } 252 253 if (word_number == XIVE_VST_WORD_ALL) { 254 cpu_physical_memory_write(addr, data, info->size); 255 } else { 256 cpu_physical_memory_write(addr + word_number * 4, 257 data + word_number * 4, 4); 258 } 259 return 0; 260 } 261 262 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 263 XiveEND *end) 264 { 265 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end); 266 } 267 268 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 269 XiveEND *end, uint8_t word_number) 270 { 271 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end, 272 word_number); 273 } 274 275 static int pnv_xive_end_update(PnvXive *xive) 276 { 277 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, 278 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 279 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, 280 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 281 int i; 282 uint64_t eqc_watch[4]; 283 284 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 285 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]); 286 } 287 288 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch, 289 XIVE_VST_WORD_ALL); 290 } 291 292 static void pnv_xive_end_cache_load(PnvXive *xive) 293 { 294 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, 295 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 296 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, 297 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 298 uint64_t eqc_watch[4] = { 0 }; 299 int i; 300 301 if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) { 302 xive_error(xive, "VST: no END entry %x/%x !?", blk, idx); 303 } 304 305 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 306 xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]); 307 } 308 } 309 310 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 311 XiveNVT *nvt) 312 { 313 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt); 314 } 315 316 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 317 XiveNVT *nvt, uint8_t word_number) 318 { 319 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt, 320 word_number); 321 } 322 323 static int pnv_xive_nvt_update(PnvXive *xive) 324 { 325 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, 326 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 327 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, 328 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 329 int i; 330 uint64_t vpc_watch[8]; 331 332 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 333 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]); 334 } 335 336 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch, 337 XIVE_VST_WORD_ALL); 338 } 339 340 static void pnv_xive_nvt_cache_load(PnvXive *xive) 341 { 342 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, 343 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 344 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, 345 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 346 uint64_t vpc_watch[8] = { 0 }; 347 int i; 348 349 if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) { 350 xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx); 351 } 352 353 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 354 xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]); 355 } 356 } 357 358 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 359 XiveEAS *eas) 360 { 361 PnvXive *xive = PNV_XIVE(xrtr); 362 363 if (pnv_xive_get_ic(blk) != xive) { 364 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); 365 return -1; 366 } 367 368 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); 369 } 370 371 static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs) 372 { 373 PowerPCCPU *cpu = POWERPC_CPU(cs); 374 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 375 PnvXive *xive = NULL; 376 CPUPPCState *env = &cpu->env; 377 int pir = env->spr_cb[SPR_PIR].default_value; 378 379 /* 380 * Perform an extra check on the HW thread enablement. 381 * 382 * The TIMA is shared among the chips and to identify the chip 383 * from which the access is being done, we extract the chip id 384 * from the PIR. 385 */ 386 xive = pnv_xive_get_ic((pir >> 8) & 0xf); 387 if (!xive) { 388 return NULL; 389 } 390 391 if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { 392 xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir); 393 } 394 395 return tctx; 396 } 397 398 /* 399 * The internal sources (IPIs) of the interrupt controller have no 400 * knowledge of the XIVE chip on which they reside. Encode the block 401 * id in the source interrupt number before forwarding the source 402 * event notification to the Router. This is required on a multichip 403 * system. 404 */ 405 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno) 406 { 407 PnvXive *xive = PNV_XIVE(xn); 408 uint8_t blk = xive->chip->chip_id; 409 410 xive_router_notify(xn, XIVE_EAS(blk, srcno)); 411 } 412 413 /* 414 * XIVE helpers 415 */ 416 417 static uint64_t pnv_xive_vc_size(PnvXive *xive) 418 { 419 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK; 420 } 421 422 static uint64_t pnv_xive_edt_shift(PnvXive *xive) 423 { 424 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX); 425 } 426 427 static uint64_t pnv_xive_pc_size(PnvXive *xive) 428 { 429 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK; 430 } 431 432 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk) 433 { 434 uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk]; 435 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 436 437 return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE; 438 } 439 440 /* 441 * EDT Table 442 * 443 * The Virtualization Controller MMIO region containing the IPI ESB 444 * pages and END ESB pages is sub-divided into "sets" which map 445 * portions of the VC region to the different ESB pages. It is 446 * configured at runtime through the EDT "Domain Table" to let the 447 * firmware decide how to split the VC address space between IPI ESB 448 * pages and END ESB pages. 449 */ 450 451 /* 452 * Computes the overall size of the IPI or the END ESB pages 453 */ 454 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type) 455 { 456 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 457 uint64_t size = 0; 458 int i; 459 460 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) { 461 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 462 463 if (edt_type == type) { 464 size += edt_size; 465 } 466 } 467 468 return size; 469 } 470 471 /* 472 * Maps an offset of the VC region in the IPI or END region using the 473 * layout defined by the EDT "Domaine Table" 474 */ 475 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset, 476 uint64_t type) 477 { 478 int i; 479 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 480 uint64_t edt_offset = vc_offset; 481 482 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) { 483 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 484 485 if (edt_type != type) { 486 edt_offset -= edt_size; 487 } 488 } 489 490 return edt_offset; 491 } 492 493 static void pnv_xive_edt_resize(PnvXive *xive) 494 { 495 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI); 496 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ); 497 498 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size); 499 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio); 500 501 memory_region_set_size(&xive->end_edt_mmio, end_edt_size); 502 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio); 503 } 504 505 /* 506 * XIVE Table configuration. Only EDT is supported. 507 */ 508 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val) 509 { 510 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL; 511 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]); 512 uint64_t *xive_table; 513 uint8_t max_index; 514 515 switch (tsel) { 516 case CQ_TAR_TSEL_BLK: 517 max_index = ARRAY_SIZE(xive->blk); 518 xive_table = xive->blk; 519 break; 520 case CQ_TAR_TSEL_MIG: 521 max_index = ARRAY_SIZE(xive->mig); 522 xive_table = xive->mig; 523 break; 524 case CQ_TAR_TSEL_EDT: 525 max_index = ARRAY_SIZE(xive->edt); 526 xive_table = xive->edt; 527 break; 528 case CQ_TAR_TSEL_VDT: 529 max_index = ARRAY_SIZE(xive->vdt); 530 xive_table = xive->vdt; 531 break; 532 default: 533 xive_error(xive, "IC: invalid table %d", (int) tsel); 534 return -1; 535 } 536 537 if (tsel_index >= max_index) { 538 xive_error(xive, "IC: invalid index %d", (int) tsel_index); 539 return -1; 540 } 541 542 xive_table[tsel_index] = val; 543 544 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) { 545 xive->regs[CQ_TAR >> 3] = 546 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index); 547 } 548 549 /* 550 * EDT configuration is complete. Resize the MMIO windows exposing 551 * the IPI and the END ESBs in the VC region. 552 */ 553 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) { 554 pnv_xive_edt_resize(xive); 555 } 556 557 return 0; 558 } 559 560 /* 561 * Virtual Structure Tables (VST) configuration 562 */ 563 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type, 564 uint8_t blk, uint64_t vsd) 565 { 566 XiveENDSource *end_xsrc = &xive->end_source; 567 XiveSource *xsrc = &xive->ipi_source; 568 const XiveVstInfo *info = &vst_infos[type]; 569 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 570 uint64_t vst_tsize = 1ull << page_shift; 571 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 572 573 /* Basic checks */ 574 575 if (VSD_INDIRECT & vsd) { 576 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) { 577 xive_error(xive, "VST: %s indirect tables are not enabled", 578 info->name); 579 return; 580 } 581 582 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 583 xive_error(xive, "VST: invalid %s page shift %d", info->name, 584 page_shift); 585 return; 586 } 587 } 588 589 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { 590 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with" 591 " page shift %d", info->name, vst_addr, page_shift); 592 return; 593 } 594 595 /* Record the table configuration (in SRAM on HW) */ 596 xive->vsds[type][blk] = vsd; 597 598 /* Now tune the models with the configuration provided by the FW */ 599 600 switch (type) { 601 case VST_TSEL_IVT: /* Nothing to be done */ 602 break; 603 604 case VST_TSEL_EQDT: 605 /* 606 * Backing store pages for the END. 607 * 608 * If the table is direct, we can compute the number of PQ 609 * entries provisioned by FW (such as skiboot) and resize the 610 * END ESB window accordingly. 611 */ 612 if (!(VSD_INDIRECT & vsd)) { 613 memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size) 614 * (1ull << xsrc->esb_shift)); 615 } 616 memory_region_add_subregion(&xive->end_edt_mmio, 0, 617 &end_xsrc->esb_mmio); 618 break; 619 620 case VST_TSEL_SBE: 621 /* 622 * Backing store pages for the source PQ bits. The model does 623 * not use these PQ bits backed in RAM because the XiveSource 624 * model has its own. 625 * 626 * If the table is direct, we can compute the number of PQ 627 * entries provisioned by FW (such as skiboot) and resize the 628 * ESB window accordingly. 629 */ 630 if (!(VSD_INDIRECT & vsd)) { 631 memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE 632 * (1ull << xsrc->esb_shift)); 633 } 634 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio); 635 break; 636 637 case VST_TSEL_VPDT: /* Not modeled */ 638 case VST_TSEL_IRQ: /* Not modeled */ 639 /* 640 * These tables contains the backing store pages for the 641 * interrupt fifos of the VC sub-engine in case of overflow. 642 */ 643 break; 644 645 default: 646 g_assert_not_reached(); 647 } 648 } 649 650 /* 651 * Both PC and VC sub-engines are configured as each use the Virtual 652 * Structure Tables : SBE, EAS, END and NVT. 653 */ 654 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine) 655 { 656 uint8_t mode = GETFIELD(VSD_MODE, vsd); 657 uint8_t type = GETFIELD(VST_TABLE_SELECT, 658 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 659 uint8_t blk = GETFIELD(VST_TABLE_BLOCK, 660 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 661 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 662 663 if (type > VST_TSEL_IRQ) { 664 xive_error(xive, "VST: invalid table type %d", type); 665 return; 666 } 667 668 if (blk >= vst_infos[type].max_blocks) { 669 xive_error(xive, "VST: invalid block id %d for" 670 " %s table", blk, vst_infos[type].name); 671 return; 672 } 673 674 /* 675 * Only take the VC sub-engine configuration into account because 676 * the XiveRouter model combines both VC and PC sub-engines 677 */ 678 if (pc_engine) { 679 return; 680 } 681 682 if (!vst_addr) { 683 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name); 684 return; 685 } 686 687 switch (mode) { 688 case VSD_MODE_FORWARD: 689 xive->vsds[type][blk] = vsd; 690 break; 691 692 case VSD_MODE_EXCLUSIVE: 693 pnv_xive_vst_set_exclusive(xive, type, blk, vsd); 694 break; 695 696 default: 697 xive_error(xive, "VST: unsupported table mode %d", mode); 698 return; 699 } 700 } 701 702 /* 703 * Interrupt controller MMIO region. The layout is compatible between 704 * 4K and 64K pages : 705 * 706 * Page 0 sub-engine BARs 707 * 0x000 - 0x3FF IC registers 708 * 0x400 - 0x7FF PC registers 709 * 0x800 - 0xFFF VC registers 710 * 711 * Page 1 Notify page (writes only) 712 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB) 713 * 0x800 - 0xFFF forwards and syncs 714 * 715 * Page 2 LSI Trigger page (writes only) (not modeled) 716 * Page 3 LSI SB EOI page (reads only) (not modeled) 717 * 718 * Page 4-7 indirect TIMA 719 */ 720 721 /* 722 * IC - registers MMIO 723 */ 724 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, 725 uint64_t val, unsigned size) 726 { 727 PnvXive *xive = PNV_XIVE(opaque); 728 MemoryRegion *sysmem = get_system_memory(); 729 uint32_t reg = offset >> 3; 730 bool is_chip0 = xive->chip->chip_id == 0; 731 732 switch (offset) { 733 734 /* 735 * XIVE CQ (PowerBus bridge) settings 736 */ 737 case CQ_MSGSND: /* msgsnd for doorbells */ 738 case CQ_FIRMASK_OR: /* FIR error reporting */ 739 break; 740 case CQ_PBI_CTL: 741 if (val & CQ_PBI_PC_64K) { 742 xive->pc_shift = 16; 743 } 744 if (val & CQ_PBI_VC_64K) { 745 xive->vc_shift = 16; 746 } 747 break; 748 case CQ_CFG_PB_GEN: /* PowerBus General Configuration */ 749 /* 750 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode 751 */ 752 break; 753 754 /* 755 * XIVE Virtualization Controller settings 756 */ 757 case VC_GLOBAL_CONFIG: 758 break; 759 760 /* 761 * XIVE Presenter Controller settings 762 */ 763 case PC_GLOBAL_CONFIG: 764 /* 765 * PC_GCONF_CHIPID_OVR 766 * Overrides Int command Chip ID with the Chip ID field (DEBUG) 767 */ 768 break; 769 case PC_TCTXT_CFG: 770 /* 771 * TODO: block group support 772 * 773 * PC_TCTXT_CFG_BLKGRP_EN 774 * PC_TCTXT_CFG_HARD_CHIPID_BLK : 775 * Moves the chipid into block field for hardwired CAM compares. 776 * Block offset value is adjusted to 0b0..01 & ThrdId 777 * 778 * Will require changes in xive_presenter_tctx_match(). I am 779 * not sure how to handle that yet. 780 */ 781 782 /* Overrides hardwired chip ID with the chip ID field */ 783 if (val & PC_TCTXT_CHIPID_OVERRIDE) { 784 xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val); 785 } 786 break; 787 case PC_TCTXT_TRACK: 788 /* 789 * PC_TCTXT_TRACK_EN: 790 * enable block tracking and exchange of block ownership 791 * information between Interrupt controllers 792 */ 793 break; 794 795 /* 796 * Misc settings 797 */ 798 case VC_SBC_CONFIG: /* Store EOI configuration */ 799 /* 800 * Configure store EOI if required by firwmare (skiboot has removed 801 * support recently though) 802 */ 803 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { 804 xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI; 805 } 806 break; 807 808 case VC_EQC_CONFIG: /* TODO: silent escalation */ 809 case VC_AIB_TX_ORDER_TAG2: /* relax ordering */ 810 break; 811 812 /* 813 * XIVE BAR settings (XSCOM only) 814 */ 815 case CQ_RST_CTL: 816 /* bit4: resets all BAR registers */ 817 break; 818 819 case CQ_IC_BAR: /* IC BAR. 8 pages */ 820 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; 821 if (!(val & CQ_IC_BAR_VALID)) { 822 xive->ic_base = 0; 823 if (xive->regs[reg] & CQ_IC_BAR_VALID) { 824 memory_region_del_subregion(&xive->ic_mmio, 825 &xive->ic_reg_mmio); 826 memory_region_del_subregion(&xive->ic_mmio, 827 &xive->ic_notify_mmio); 828 memory_region_del_subregion(&xive->ic_mmio, 829 &xive->ic_lsi_mmio); 830 memory_region_del_subregion(&xive->ic_mmio, 831 &xive->tm_indirect_mmio); 832 833 memory_region_del_subregion(sysmem, &xive->ic_mmio); 834 } 835 } else { 836 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); 837 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) { 838 memory_region_add_subregion(sysmem, xive->ic_base, 839 &xive->ic_mmio); 840 841 memory_region_add_subregion(&xive->ic_mmio, 0, 842 &xive->ic_reg_mmio); 843 memory_region_add_subregion(&xive->ic_mmio, 844 1ul << xive->ic_shift, 845 &xive->ic_notify_mmio); 846 memory_region_add_subregion(&xive->ic_mmio, 847 2ul << xive->ic_shift, 848 &xive->ic_lsi_mmio); 849 memory_region_add_subregion(&xive->ic_mmio, 850 4ull << xive->ic_shift, 851 &xive->tm_indirect_mmio); 852 } 853 } 854 break; 855 856 case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */ 857 case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */ 858 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; 859 if (!(val & CQ_TM_BAR_VALID)) { 860 xive->tm_base = 0; 861 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) { 862 memory_region_del_subregion(sysmem, &xive->tm_mmio); 863 } 864 } else { 865 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); 866 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) { 867 memory_region_add_subregion(sysmem, xive->tm_base, 868 &xive->tm_mmio); 869 } 870 } 871 break; 872 873 case CQ_PC_BARM: 874 xive->regs[reg] = val; 875 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive)); 876 break; 877 case CQ_PC_BAR: /* From 32M to 512G */ 878 if (!(val & CQ_PC_BAR_VALID)) { 879 xive->pc_base = 0; 880 if (xive->regs[reg] & CQ_PC_BAR_VALID) { 881 memory_region_del_subregion(sysmem, &xive->pc_mmio); 882 } 883 } else { 884 xive->pc_base = val & ~(CQ_PC_BAR_VALID); 885 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) { 886 memory_region_add_subregion(sysmem, xive->pc_base, 887 &xive->pc_mmio); 888 } 889 } 890 break; 891 892 case CQ_VC_BARM: 893 xive->regs[reg] = val; 894 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive)); 895 break; 896 case CQ_VC_BAR: /* From 64M to 4TB */ 897 if (!(val & CQ_VC_BAR_VALID)) { 898 xive->vc_base = 0; 899 if (xive->regs[reg] & CQ_VC_BAR_VALID) { 900 memory_region_del_subregion(sysmem, &xive->vc_mmio); 901 } 902 } else { 903 xive->vc_base = val & ~(CQ_VC_BAR_VALID); 904 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) { 905 memory_region_add_subregion(sysmem, xive->vc_base, 906 &xive->vc_mmio); 907 } 908 } 909 break; 910 911 /* 912 * XIVE Table settings. 913 */ 914 case CQ_TAR: /* Table Address */ 915 break; 916 case CQ_TDR: /* Table Data */ 917 pnv_xive_table_set_data(xive, val); 918 break; 919 920 /* 921 * XIVE VC & PC Virtual Structure Table settings 922 */ 923 case VC_VSD_TABLE_ADDR: 924 case PC_VSD_TABLE_ADDR: /* Virtual table selector */ 925 break; 926 case VC_VSD_TABLE_DATA: /* Virtual table setting */ 927 case PC_VSD_TABLE_DATA: 928 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA); 929 break; 930 931 /* 932 * Interrupt fifo overflow in memory backing store (Not modeled) 933 */ 934 case VC_IRQ_CONFIG_IPI: 935 case VC_IRQ_CONFIG_HW: 936 case VC_IRQ_CONFIG_CASCADE1: 937 case VC_IRQ_CONFIG_CASCADE2: 938 case VC_IRQ_CONFIG_REDIST: 939 case VC_IRQ_CONFIG_IPI_CASC: 940 break; 941 942 /* 943 * XIVE hardware thread enablement 944 */ 945 case PC_THREAD_EN_REG0: /* Physical Thread Enable */ 946 case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */ 947 break; 948 949 case PC_THREAD_EN_REG0_SET: 950 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val; 951 break; 952 case PC_THREAD_EN_REG1_SET: 953 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val; 954 break; 955 case PC_THREAD_EN_REG0_CLR: 956 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val; 957 break; 958 case PC_THREAD_EN_REG1_CLR: 959 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val; 960 break; 961 962 /* 963 * Indirect TIMA access set up. Defines the PIR of the HW thread 964 * to use. 965 */ 966 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3: 967 break; 968 969 /* 970 * XIVE PC & VC cache updates for EAS, NVT and END 971 */ 972 case VC_IVC_SCRUB_MASK: 973 case VC_IVC_SCRUB_TRIG: 974 break; 975 976 case VC_EQC_CWATCH_SPEC: 977 val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */ 978 break; 979 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: 980 break; 981 case VC_EQC_CWATCH_DAT0: 982 /* writing to DATA0 triggers the cache write */ 983 xive->regs[reg] = val; 984 pnv_xive_end_update(xive); 985 break; 986 case VC_EQC_SCRUB_MASK: 987 case VC_EQC_SCRUB_TRIG: 988 /* 989 * The scrubbing registers flush the cache in RAM and can also 990 * invalidate. 991 */ 992 break; 993 994 case PC_VPC_CWATCH_SPEC: 995 val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */ 996 break; 997 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: 998 break; 999 case PC_VPC_CWATCH_DAT0: 1000 /* writing to DATA0 triggers the cache write */ 1001 xive->regs[reg] = val; 1002 pnv_xive_nvt_update(xive); 1003 break; 1004 case PC_VPC_SCRUB_MASK: 1005 case PC_VPC_SCRUB_TRIG: 1006 /* 1007 * The scrubbing registers flush the cache in RAM and can also 1008 * invalidate. 1009 */ 1010 break; 1011 1012 1013 /* 1014 * XIVE PC & VC cache invalidation 1015 */ 1016 case PC_AT_KILL: 1017 break; 1018 case VC_AT_MACRO_KILL: 1019 break; 1020 case PC_AT_KILL_MASK: 1021 case VC_AT_MACRO_KILL_MASK: 1022 break; 1023 1024 default: 1025 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset); 1026 return; 1027 } 1028 1029 xive->regs[reg] = val; 1030 } 1031 1032 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) 1033 { 1034 PnvXive *xive = PNV_XIVE(opaque); 1035 uint64_t val = 0; 1036 uint32_t reg = offset >> 3; 1037 1038 switch (offset) { 1039 case CQ_CFG_PB_GEN: 1040 case CQ_IC_BAR: 1041 case CQ_TM1_BAR: 1042 case CQ_TM2_BAR: 1043 case CQ_PC_BAR: 1044 case CQ_PC_BARM: 1045 case CQ_VC_BAR: 1046 case CQ_VC_BARM: 1047 case CQ_TAR: 1048 case CQ_TDR: 1049 case CQ_PBI_CTL: 1050 1051 case PC_TCTXT_CFG: 1052 case PC_TCTXT_TRACK: 1053 case PC_TCTXT_INDIR0: 1054 case PC_TCTXT_INDIR1: 1055 case PC_TCTXT_INDIR2: 1056 case PC_TCTXT_INDIR3: 1057 case PC_GLOBAL_CONFIG: 1058 1059 case PC_VPC_SCRUB_MASK: 1060 1061 case VC_GLOBAL_CONFIG: 1062 case VC_AIB_TX_ORDER_TAG2: 1063 1064 case VC_IRQ_CONFIG_IPI: 1065 case VC_IRQ_CONFIG_HW: 1066 case VC_IRQ_CONFIG_CASCADE1: 1067 case VC_IRQ_CONFIG_CASCADE2: 1068 case VC_IRQ_CONFIG_REDIST: 1069 case VC_IRQ_CONFIG_IPI_CASC: 1070 1071 case VC_EQC_SCRUB_MASK: 1072 case VC_IVC_SCRUB_MASK: 1073 case VC_SBC_CONFIG: 1074 case VC_AT_MACRO_KILL_MASK: 1075 case VC_VSD_TABLE_ADDR: 1076 case PC_VSD_TABLE_ADDR: 1077 case VC_VSD_TABLE_DATA: 1078 case PC_VSD_TABLE_DATA: 1079 case PC_THREAD_EN_REG0: 1080 case PC_THREAD_EN_REG1: 1081 val = xive->regs[reg]; 1082 break; 1083 1084 /* 1085 * XIVE hardware thread enablement 1086 */ 1087 case PC_THREAD_EN_REG0_SET: 1088 case PC_THREAD_EN_REG0_CLR: 1089 val = xive->regs[PC_THREAD_EN_REG0 >> 3]; 1090 break; 1091 case PC_THREAD_EN_REG1_SET: 1092 case PC_THREAD_EN_REG1_CLR: 1093 val = xive->regs[PC_THREAD_EN_REG1 >> 3]; 1094 break; 1095 1096 case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */ 1097 val = 0xffffff0000000000; 1098 break; 1099 1100 /* 1101 * XIVE PC & VC cache updates for EAS, NVT and END 1102 */ 1103 case VC_EQC_CWATCH_SPEC: 1104 xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT); 1105 val = xive->regs[reg]; 1106 break; 1107 case VC_EQC_CWATCH_DAT0: 1108 /* 1109 * Load DATA registers from cache with data requested by the 1110 * SPEC register 1111 */ 1112 pnv_xive_end_cache_load(xive); 1113 val = xive->regs[reg]; 1114 break; 1115 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: 1116 val = xive->regs[reg]; 1117 break; 1118 1119 case PC_VPC_CWATCH_SPEC: 1120 xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT); 1121 val = xive->regs[reg]; 1122 break; 1123 case PC_VPC_CWATCH_DAT0: 1124 /* 1125 * Load DATA registers from cache with data requested by the 1126 * SPEC register 1127 */ 1128 pnv_xive_nvt_cache_load(xive); 1129 val = xive->regs[reg]; 1130 break; 1131 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: 1132 val = xive->regs[reg]; 1133 break; 1134 1135 case PC_VPC_SCRUB_TRIG: 1136 case VC_IVC_SCRUB_TRIG: 1137 case VC_EQC_SCRUB_TRIG: 1138 xive->regs[reg] &= ~VC_SCRUB_VALID; 1139 val = xive->regs[reg]; 1140 break; 1141 1142 /* 1143 * XIVE PC & VC cache invalidation 1144 */ 1145 case PC_AT_KILL: 1146 xive->regs[reg] &= ~PC_AT_KILL_VALID; 1147 val = xive->regs[reg]; 1148 break; 1149 case VC_AT_MACRO_KILL: 1150 xive->regs[reg] &= ~VC_KILL_VALID; 1151 val = xive->regs[reg]; 1152 break; 1153 1154 /* 1155 * XIVE synchronisation 1156 */ 1157 case VC_EQC_CONFIG: 1158 val = VC_EQC_SYNC_MASK; 1159 break; 1160 1161 default: 1162 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset); 1163 } 1164 1165 return val; 1166 } 1167 1168 static const MemoryRegionOps pnv_xive_ic_reg_ops = { 1169 .read = pnv_xive_ic_reg_read, 1170 .write = pnv_xive_ic_reg_write, 1171 .endianness = DEVICE_BIG_ENDIAN, 1172 .valid = { 1173 .min_access_size = 8, 1174 .max_access_size = 8, 1175 }, 1176 .impl = { 1177 .min_access_size = 8, 1178 .max_access_size = 8, 1179 }, 1180 }; 1181 1182 /* 1183 * IC - Notify MMIO port page (write only) 1184 */ 1185 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */ 1186 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */ 1187 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */ 1188 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */ 1189 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */ 1190 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */ 1191 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */ 1192 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */ 1193 1194 /* VC synchronisation */ 1195 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */ 1196 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */ 1197 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */ 1198 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */ 1199 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */ 1200 1201 /* PC synchronisation */ 1202 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */ 1203 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */ 1204 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */ 1205 1206 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) 1207 { 1208 uint8_t blk; 1209 uint32_t idx; 1210 1211 if (val & XIVE_TRIGGER_END) { 1212 xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64, 1213 addr, val); 1214 return; 1215 } 1216 1217 /* 1218 * Forward the source event notification directly to the Router. 1219 * The source interrupt number should already be correctly encoded 1220 * with the chip block id by the sending device (PHB, PSI). 1221 */ 1222 blk = XIVE_EAS_BLOCK(val); 1223 idx = XIVE_EAS_INDEX(val); 1224 1225 xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx)); 1226 } 1227 1228 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, 1229 unsigned size) 1230 { 1231 PnvXive *xive = PNV_XIVE(opaque); 1232 1233 /* VC: HW triggers */ 1234 switch (addr) { 1235 case 0x000 ... 0x7FF: 1236 pnv_xive_ic_hw_trigger(opaque, addr, val); 1237 break; 1238 1239 /* VC: Forwarded IRQs */ 1240 case PNV_XIVE_FORWARD_IPI: 1241 case PNV_XIVE_FORWARD_HW: 1242 case PNV_XIVE_FORWARD_OS_ESC: 1243 case PNV_XIVE_FORWARD_HW_ESC: 1244 case PNV_XIVE_FORWARD_REDIS: 1245 /* TODO: forwarded IRQs. Should be like HW triggers */ 1246 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64, 1247 addr, val); 1248 break; 1249 1250 /* VC syncs */ 1251 case PNV_XIVE_SYNC_IPI: 1252 case PNV_XIVE_SYNC_HW: 1253 case PNV_XIVE_SYNC_OS_ESC: 1254 case PNV_XIVE_SYNC_HW_ESC: 1255 case PNV_XIVE_SYNC_REDIS: 1256 break; 1257 1258 /* PC syncs */ 1259 case PNV_XIVE_SYNC_PULL: 1260 case PNV_XIVE_SYNC_PUSH: 1261 case PNV_XIVE_SYNC_VPC: 1262 break; 1263 1264 default: 1265 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr); 1266 } 1267 } 1268 1269 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr, 1270 unsigned size) 1271 { 1272 PnvXive *xive = PNV_XIVE(opaque); 1273 1274 /* loads are invalid */ 1275 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr); 1276 return -1; 1277 } 1278 1279 static const MemoryRegionOps pnv_xive_ic_notify_ops = { 1280 .read = pnv_xive_ic_notify_read, 1281 .write = pnv_xive_ic_notify_write, 1282 .endianness = DEVICE_BIG_ENDIAN, 1283 .valid = { 1284 .min_access_size = 8, 1285 .max_access_size = 8, 1286 }, 1287 .impl = { 1288 .min_access_size = 8, 1289 .max_access_size = 8, 1290 }, 1291 }; 1292 1293 /* 1294 * IC - LSI MMIO handlers (not modeled) 1295 */ 1296 1297 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr, 1298 uint64_t val, unsigned size) 1299 { 1300 PnvXive *xive = PNV_XIVE(opaque); 1301 1302 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr); 1303 } 1304 1305 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size) 1306 { 1307 PnvXive *xive = PNV_XIVE(opaque); 1308 1309 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr); 1310 return -1; 1311 } 1312 1313 static const MemoryRegionOps pnv_xive_ic_lsi_ops = { 1314 .read = pnv_xive_ic_lsi_read, 1315 .write = pnv_xive_ic_lsi_write, 1316 .endianness = DEVICE_BIG_ENDIAN, 1317 .valid = { 1318 .min_access_size = 8, 1319 .max_access_size = 8, 1320 }, 1321 .impl = { 1322 .min_access_size = 8, 1323 .max_access_size = 8, 1324 }, 1325 }; 1326 1327 /* 1328 * IC - Indirect TIMA MMIO handlers 1329 */ 1330 1331 /* 1332 * When the TIMA is accessed from the indirect page, the thread id 1333 * (PIR) has to be configured in the IC registers before. This is used 1334 * for resets and for debug purpose also. 1335 */ 1336 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive) 1337 { 1338 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3]; 1339 PowerPCCPU *cpu = NULL; 1340 int pir; 1341 1342 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) { 1343 xive_error(xive, "IC: no indirect TIMA access in progress"); 1344 return NULL; 1345 } 1346 1347 pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff; 1348 cpu = ppc_get_vcpu_by_pir(pir); 1349 if (!cpu) { 1350 xive_error(xive, "IC: invalid PIR %x for indirect access", pir); 1351 return NULL; 1352 } 1353 1354 /* Check that HW thread is XIVE enabled */ 1355 if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { 1356 xive_error(xive, "IC: CPU %x is not enabled", pir); 1357 } 1358 1359 return XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1360 } 1361 1362 static void xive_tm_indirect_write(void *opaque, hwaddr offset, 1363 uint64_t value, unsigned size) 1364 { 1365 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1366 1367 xive_tctx_tm_write(tctx, offset, value, size); 1368 } 1369 1370 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset, 1371 unsigned size) 1372 { 1373 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1374 1375 return xive_tctx_tm_read(tctx, offset, size); 1376 } 1377 1378 static const MemoryRegionOps xive_tm_indirect_ops = { 1379 .read = xive_tm_indirect_read, 1380 .write = xive_tm_indirect_write, 1381 .endianness = DEVICE_BIG_ENDIAN, 1382 .valid = { 1383 .min_access_size = 1, 1384 .max_access_size = 8, 1385 }, 1386 .impl = { 1387 .min_access_size = 1, 1388 .max_access_size = 8, 1389 }, 1390 }; 1391 1392 /* 1393 * Interrupt controller XSCOM region. 1394 */ 1395 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size) 1396 { 1397 switch (addr >> 3) { 1398 case X_VC_EQC_CONFIG: 1399 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */ 1400 return VC_EQC_SYNC_MASK; 1401 default: 1402 return pnv_xive_ic_reg_read(opaque, addr, size); 1403 } 1404 } 1405 1406 static void pnv_xive_xscom_write(void *opaque, hwaddr addr, 1407 uint64_t val, unsigned size) 1408 { 1409 pnv_xive_ic_reg_write(opaque, addr, val, size); 1410 } 1411 1412 static const MemoryRegionOps pnv_xive_xscom_ops = { 1413 .read = pnv_xive_xscom_read, 1414 .write = pnv_xive_xscom_write, 1415 .endianness = DEVICE_BIG_ENDIAN, 1416 .valid = { 1417 .min_access_size = 8, 1418 .max_access_size = 8, 1419 }, 1420 .impl = { 1421 .min_access_size = 8, 1422 .max_access_size = 8, 1423 } 1424 }; 1425 1426 /* 1427 * Virtualization Controller MMIO region containing the IPI and END ESB pages 1428 */ 1429 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset, 1430 unsigned size) 1431 { 1432 PnvXive *xive = PNV_XIVE(opaque); 1433 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1434 uint64_t edt_type = 0; 1435 uint64_t edt_offset; 1436 MemTxResult result; 1437 AddressSpace *edt_as = NULL; 1438 uint64_t ret = -1; 1439 1440 if (edt_index < XIVE_TABLE_EDT_MAX) { 1441 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1442 } 1443 1444 switch (edt_type) { 1445 case CQ_TDR_EDT_IPI: 1446 edt_as = &xive->ipi_as; 1447 break; 1448 case CQ_TDR_EDT_EQ: 1449 edt_as = &xive->end_as; 1450 break; 1451 default: 1452 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset); 1453 return -1; 1454 } 1455 1456 /* Remap the offset for the targeted address space */ 1457 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1458 1459 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED, 1460 &result); 1461 1462 if (result != MEMTX_OK) { 1463 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%" 1464 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END", 1465 offset, edt_offset); 1466 return -1; 1467 } 1468 1469 return ret; 1470 } 1471 1472 static void pnv_xive_vc_write(void *opaque, hwaddr offset, 1473 uint64_t val, unsigned size) 1474 { 1475 PnvXive *xive = PNV_XIVE(opaque); 1476 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1477 uint64_t edt_type = 0; 1478 uint64_t edt_offset; 1479 MemTxResult result; 1480 AddressSpace *edt_as = NULL; 1481 1482 if (edt_index < XIVE_TABLE_EDT_MAX) { 1483 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1484 } 1485 1486 switch (edt_type) { 1487 case CQ_TDR_EDT_IPI: 1488 edt_as = &xive->ipi_as; 1489 break; 1490 case CQ_TDR_EDT_EQ: 1491 edt_as = &xive->end_as; 1492 break; 1493 default: 1494 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx, 1495 offset); 1496 return; 1497 } 1498 1499 /* Remap the offset for the targeted address space */ 1500 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1501 1502 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result); 1503 if (result != MEMTX_OK) { 1504 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset); 1505 } 1506 } 1507 1508 static const MemoryRegionOps pnv_xive_vc_ops = { 1509 .read = pnv_xive_vc_read, 1510 .write = pnv_xive_vc_write, 1511 .endianness = DEVICE_BIG_ENDIAN, 1512 .valid = { 1513 .min_access_size = 8, 1514 .max_access_size = 8, 1515 }, 1516 .impl = { 1517 .min_access_size = 8, 1518 .max_access_size = 8, 1519 }, 1520 }; 1521 1522 /* 1523 * Presenter Controller MMIO region. The Virtualization Controller 1524 * updates the IPB in the NVT table when required. Not modeled. 1525 */ 1526 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr, 1527 unsigned size) 1528 { 1529 PnvXive *xive = PNV_XIVE(opaque); 1530 1531 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr); 1532 return -1; 1533 } 1534 1535 static void pnv_xive_pc_write(void *opaque, hwaddr addr, 1536 uint64_t value, unsigned size) 1537 { 1538 PnvXive *xive = PNV_XIVE(opaque); 1539 1540 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr); 1541 } 1542 1543 static const MemoryRegionOps pnv_xive_pc_ops = { 1544 .read = pnv_xive_pc_read, 1545 .write = pnv_xive_pc_write, 1546 .endianness = DEVICE_BIG_ENDIAN, 1547 .valid = { 1548 .min_access_size = 8, 1549 .max_access_size = 8, 1550 }, 1551 .impl = { 1552 .min_access_size = 8, 1553 .max_access_size = 8, 1554 }, 1555 }; 1556 1557 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon) 1558 { 1559 XiveRouter *xrtr = XIVE_ROUTER(xive); 1560 uint8_t blk = xive->chip->chip_id; 1561 uint32_t srcno0 = XIVE_EAS(blk, 0); 1562 uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk); 1563 XiveEAS eas; 1564 XiveEND end; 1565 int i; 1566 1567 monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0, 1568 srcno0 + nr_ipis - 1); 1569 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); 1570 1571 monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0, 1572 srcno0 + nr_ipis - 1); 1573 for (i = 0; i < nr_ipis; i++) { 1574 if (xive_router_get_eas(xrtr, blk, i, &eas)) { 1575 break; 1576 } 1577 if (!xive_eas_is_masked(&eas)) { 1578 xive_eas_pic_print_info(&eas, i, mon); 1579 } 1580 } 1581 1582 monitor_printf(mon, "XIVE[%x] ENDT\n", blk); 1583 i = 0; 1584 while (!xive_router_get_end(xrtr, blk, i, &end)) { 1585 xive_end_pic_print_info(&end, i++, mon); 1586 } 1587 1588 monitor_printf(mon, "XIVE[%x] END Escalation EAT\n", blk); 1589 i = 0; 1590 while (!xive_router_get_end(xrtr, blk, i, &end)) { 1591 xive_end_eas_pic_print_info(&end, i++, mon); 1592 } 1593 } 1594 1595 static void pnv_xive_reset(void *dev) 1596 { 1597 PnvXive *xive = PNV_XIVE(dev); 1598 XiveSource *xsrc = &xive->ipi_source; 1599 XiveENDSource *end_xsrc = &xive->end_source; 1600 1601 /* 1602 * Use the PnvChip id to identify the XIVE interrupt controller. 1603 * It can be overriden by configuration at runtime. 1604 */ 1605 xive->tctx_chipid = xive->chip->chip_id; 1606 1607 /* Default page size (Should be changed at runtime to 64k) */ 1608 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1609 1610 /* Clear subregions */ 1611 if (memory_region_is_mapped(&xsrc->esb_mmio)) { 1612 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio); 1613 } 1614 1615 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) { 1616 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio); 1617 } 1618 1619 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { 1620 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio); 1621 } 1622 1623 if (memory_region_is_mapped(&xive->end_edt_mmio)) { 1624 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio); 1625 } 1626 } 1627 1628 static void pnv_xive_init(Object *obj) 1629 { 1630 PnvXive *xive = PNV_XIVE(obj); 1631 1632 object_initialize_child(obj, "ipi_source", &xive->ipi_source, 1633 sizeof(xive->ipi_source), TYPE_XIVE_SOURCE, 1634 &error_abort, NULL); 1635 object_initialize_child(obj, "end_source", &xive->end_source, 1636 sizeof(xive->end_source), TYPE_XIVE_END_SOURCE, 1637 &error_abort, NULL); 1638 } 1639 1640 /* 1641 * Maximum number of IRQs and ENDs supported by HW 1642 */ 1643 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1644 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1645 1646 static void pnv_xive_realize(DeviceState *dev, Error **errp) 1647 { 1648 PnvXive *xive = PNV_XIVE(dev); 1649 XiveSource *xsrc = &xive->ipi_source; 1650 XiveENDSource *end_xsrc = &xive->end_source; 1651 Error *local_err = NULL; 1652 1653 assert(xive->chip); 1654 1655 /* 1656 * The XiveSource and XiveENDSource objects are realized with the 1657 * maximum allowed HW configuration. The ESB MMIO regions will be 1658 * resized dynamically when the controller is configured by the FW 1659 * to limit accesses to resources not provisioned. 1660 */ 1661 object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs", 1662 &error_fatal); 1663 object_property_set_link(OBJECT(xsrc), OBJECT(xive), "xive", 1664 &error_abort); 1665 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err); 1666 if (local_err) { 1667 error_propagate(errp, local_err); 1668 return; 1669 } 1670 1671 object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends", 1672 &error_fatal); 1673 object_property_set_link(OBJECT(end_xsrc), OBJECT(xive), "xive", 1674 &error_abort); 1675 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err); 1676 if (local_err) { 1677 error_propagate(errp, local_err); 1678 return; 1679 } 1680 1681 /* Default page size. Generally changed at runtime to 64k */ 1682 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1683 1684 /* XSCOM region, used for initial configuration of the BARs */ 1685 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops, 1686 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3); 1687 1688 /* Interrupt controller MMIO regions */ 1689 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", 1690 PNV9_XIVE_IC_SIZE); 1691 1692 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops, 1693 xive, "xive-ic-reg", 1 << xive->ic_shift); 1694 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev), 1695 &pnv_xive_ic_notify_ops, 1696 xive, "xive-ic-notify", 1 << xive->ic_shift); 1697 1698 /* The Pervasive LSI trigger and EOI pages (not modeled) */ 1699 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops, 1700 xive, "xive-ic-lsi", 2 << xive->ic_shift); 1701 1702 /* Thread Interrupt Management Area (Indirect) */ 1703 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev), 1704 &xive_tm_indirect_ops, 1705 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE); 1706 /* 1707 * Overall Virtualization Controller MMIO region containing the 1708 * IPI ESB pages and END ESB pages. The layout is defined by the 1709 * EDT "Domain table" and the accesses are dispatched using 1710 * address spaces for each. 1711 */ 1712 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive, 1713 "xive-vc", PNV9_XIVE_VC_SIZE); 1714 1715 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi", 1716 PNV9_XIVE_VC_SIZE); 1717 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi"); 1718 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end", 1719 PNV9_XIVE_VC_SIZE); 1720 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end"); 1721 1722 /* 1723 * The MMIO windows exposing the IPI ESBs and the END ESBs in the 1724 * VC region. Their size is configured by the FW in the EDT table. 1725 */ 1726 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0); 1727 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0); 1728 1729 /* Presenter Controller MMIO region (not modeled) */ 1730 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive, 1731 "xive-pc", PNV9_XIVE_PC_SIZE); 1732 1733 /* Thread Interrupt Management Area (Direct) */ 1734 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, 1735 xive, "xive-tima", PNV9_XIVE_TM_SIZE); 1736 1737 qemu_register_reset(pnv_xive_reset, dev); 1738 } 1739 1740 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, 1741 int xscom_offset) 1742 { 1743 const char compat[] = "ibm,power9-xive-x"; 1744 char *name; 1745 int offset; 1746 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE; 1747 uint32_t reg[] = { 1748 cpu_to_be32(lpc_pcba), 1749 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE) 1750 }; 1751 1752 name = g_strdup_printf("xive@%x", lpc_pcba); 1753 offset = fdt_add_subnode(fdt, xscom_offset, name); 1754 _FDT(offset); 1755 g_free(name); 1756 1757 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); 1758 _FDT((fdt_setprop(fdt, offset, "compatible", compat, 1759 sizeof(compat)))); 1760 return 0; 1761 } 1762 1763 static Property pnv_xive_properties[] = { 1764 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0), 1765 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0), 1766 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0), 1767 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0), 1768 /* The PnvChip id identifies the XIVE interrupt controller. */ 1769 DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *), 1770 DEFINE_PROP_END_OF_LIST(), 1771 }; 1772 1773 static void pnv_xive_class_init(ObjectClass *klass, void *data) 1774 { 1775 DeviceClass *dc = DEVICE_CLASS(klass); 1776 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); 1777 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 1778 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1779 1780 xdc->dt_xscom = pnv_xive_dt_xscom; 1781 1782 dc->desc = "PowerNV XIVE Interrupt Controller"; 1783 dc->realize = pnv_xive_realize; 1784 dc->props = pnv_xive_properties; 1785 1786 xrc->get_eas = pnv_xive_get_eas; 1787 xrc->get_end = pnv_xive_get_end; 1788 xrc->write_end = pnv_xive_write_end; 1789 xrc->get_nvt = pnv_xive_get_nvt; 1790 xrc->write_nvt = pnv_xive_write_nvt; 1791 xrc->get_tctx = pnv_xive_get_tctx; 1792 1793 xnc->notify = pnv_xive_notify; 1794 }; 1795 1796 static const TypeInfo pnv_xive_info = { 1797 .name = TYPE_PNV_XIVE, 1798 .parent = TYPE_XIVE_ROUTER, 1799 .instance_init = pnv_xive_init, 1800 .instance_size = sizeof(PnvXive), 1801 .class_init = pnv_xive_class_init, 1802 .interfaces = (InterfaceInfo[]) { 1803 { TYPE_PNV_XSCOM_INTERFACE }, 1804 { } 1805 } 1806 }; 1807 1808 static void pnv_xive_register_types(void) 1809 { 1810 type_register_static(&pnv_xive_info); 1811 } 1812 1813 type_init(pnv_xive_register_types) 1814