1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2019, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/dma.h" 17 #include "sysemu/reset.h" 18 #include "monitor/monitor.h" 19 #include "hw/ppc/fdt.h" 20 #include "hw/ppc/pnv.h" 21 #include "hw/ppc/pnv_chip.h" 22 #include "hw/ppc/pnv_core.h" 23 #include "hw/ppc/pnv_xscom.h" 24 #include "hw/ppc/pnv_xive.h" 25 #include "hw/ppc/xive_regs.h" 26 #include "hw/qdev-properties.h" 27 #include "hw/ppc/ppc.h" 28 #include "trace.h" 29 30 #include <libfdt.h> 31 32 #include "pnv_xive_regs.h" 33 34 #undef XIVE_DEBUG 35 36 /* 37 * Virtual structures table (VST) 38 */ 39 #define SBE_PER_BYTE 4 40 41 typedef struct XiveVstInfo { 42 const char *name; 43 uint32_t size; 44 uint32_t max_blocks; 45 } XiveVstInfo; 46 47 static const XiveVstInfo vst_infos[] = { 48 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 }, 49 [VST_TSEL_SBE] = { "SBE", 1, 16 }, 50 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 }, 51 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 }, 52 53 /* 54 * Interrupt fifo backing store table (not modeled) : 55 * 56 * 0 - IPI, 57 * 1 - HWD, 58 * 2 - First escalate, 59 * 3 - Second escalate, 60 * 4 - Redistribution, 61 * 5 - IPI cascaded queue ? 62 */ 63 [VST_TSEL_IRQ] = { "IRQ", 1, 6 }, 64 }; 65 66 #define xive_error(xive, fmt, ...) \ 67 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ 68 (xive)->chip->chip_id, ## __VA_ARGS__); 69 70 /* 71 * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID 72 * field overrides the hardwired chip ID in the Powerbus operations 73 * and for CAM compares 74 */ 75 static uint8_t pnv_xive_block_id(PnvXive *xive) 76 { 77 uint8_t blk = xive->chip->chip_id; 78 uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3]; 79 80 if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) { 81 blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val); 82 } 83 84 return blk; 85 } 86 87 /* 88 * Remote access to controllers. HW uses MMIOs. For now, a simple scan 89 * of the chips is good enough. 90 * 91 * TODO: Block scope support 92 */ 93 static PnvXive *pnv_xive_get_remote(uint8_t blk) 94 { 95 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); 96 int i; 97 98 for (i = 0; i < pnv->num_chips; i++) { 99 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); 100 PnvXive *xive = &chip9->xive; 101 102 if (pnv_xive_block_id(xive) == blk) { 103 return xive; 104 } 105 } 106 return NULL; 107 } 108 109 /* 110 * VST accessors for SBE, EAT, ENDT, NVT 111 * 112 * Indirect VST tables are arrays of VSDs pointing to a page (of same 113 * size). Each page is a direct VST table. 114 */ 115 116 #define XIVE_VSD_SIZE 8 117 118 /* Indirect page size can be 4K, 64K, 2M, 16M. */ 119 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift) 120 { 121 return page_shift == 12 || page_shift == 16 || 122 page_shift == 21 || page_shift == 24; 123 } 124 125 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type, 126 uint64_t vsd, uint32_t idx) 127 { 128 const XiveVstInfo *info = &vst_infos[type]; 129 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 130 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 131 uint32_t idx_max; 132 133 idx_max = vst_tsize / info->size - 1; 134 if (idx > idx_max) { 135 #ifdef XIVE_DEBUG 136 xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?", 137 info->name, idx, idx_max); 138 #endif 139 return 0; 140 } 141 142 return vst_addr + idx * info->size; 143 } 144 145 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, 146 uint64_t vsd, uint32_t idx) 147 { 148 const XiveVstInfo *info = &vst_infos[type]; 149 uint64_t vsd_addr; 150 uint32_t vsd_idx; 151 uint32_t page_shift; 152 uint32_t vst_per_page; 153 154 /* Get the page size of the indirect table. */ 155 vsd_addr = vsd & VSD_ADDRESS_MASK; 156 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd, 157 MEMTXATTRS_UNSPECIFIED)) { 158 xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64, 159 info->name, idx, vsd_addr); 160 return 0; 161 } 162 163 if (!(vsd & VSD_ADDRESS_MASK)) { 164 #ifdef XIVE_DEBUG 165 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 166 #endif 167 return 0; 168 } 169 170 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 171 172 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 173 xive_error(xive, "VST: invalid %s page shift %d", info->name, 174 page_shift); 175 return 0; 176 } 177 178 vst_per_page = (1ull << page_shift) / info->size; 179 vsd_idx = idx / vst_per_page; 180 181 /* Load the VSD we are looking for, if not already done */ 182 if (vsd_idx) { 183 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; 184 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd, 185 MEMTXATTRS_UNSPECIFIED)) { 186 xive_error(xive, "VST: failed to access %s entry %x @0x%" 187 PRIx64, info->name, vsd_idx, vsd_addr); 188 return 0; 189 } 190 191 if (!(vsd & VSD_ADDRESS_MASK)) { 192 #ifdef XIVE_DEBUG 193 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 194 #endif 195 return 0; 196 } 197 198 /* 199 * Check that the pages have a consistent size across the 200 * indirect table 201 */ 202 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { 203 xive_error(xive, "VST: %s entry %x indirect page size differ !?", 204 info->name, idx); 205 return 0; 206 } 207 } 208 209 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); 210 } 211 212 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk, 213 uint32_t idx) 214 { 215 const XiveVstInfo *info = &vst_infos[type]; 216 uint64_t vsd; 217 218 if (blk >= info->max_blocks) { 219 xive_error(xive, "VST: invalid block id %d for VST %s %d !?", 220 blk, info->name, idx); 221 return 0; 222 } 223 224 vsd = xive->vsds[type][blk]; 225 226 /* Remote VST access */ 227 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { 228 xive = pnv_xive_get_remote(blk); 229 230 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0; 231 } 232 233 if (VSD_INDIRECT & vsd) { 234 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx); 235 } 236 237 return pnv_xive_vst_addr_direct(xive, type, vsd, idx); 238 } 239 240 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk, 241 uint32_t idx, void *data) 242 { 243 const XiveVstInfo *info = &vst_infos[type]; 244 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 245 246 if (!addr) { 247 return -1; 248 } 249 250 cpu_physical_memory_read(addr, data, info->size); 251 return 0; 252 } 253 254 #define XIVE_VST_WORD_ALL -1 255 256 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk, 257 uint32_t idx, void *data, uint32_t word_number) 258 { 259 const XiveVstInfo *info = &vst_infos[type]; 260 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 261 262 if (!addr) { 263 return -1; 264 } 265 266 if (word_number == XIVE_VST_WORD_ALL) { 267 cpu_physical_memory_write(addr, data, info->size); 268 } else { 269 cpu_physical_memory_write(addr + word_number * 4, 270 data + word_number * 4, 4); 271 } 272 return 0; 273 } 274 275 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 276 XiveEND *end) 277 { 278 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end); 279 } 280 281 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 282 XiveEND *end, uint8_t word_number) 283 { 284 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end, 285 word_number); 286 } 287 288 static int pnv_xive_end_update(PnvXive *xive) 289 { 290 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, 291 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 292 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, 293 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 294 int i; 295 uint64_t eqc_watch[4]; 296 297 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 298 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]); 299 } 300 301 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch, 302 XIVE_VST_WORD_ALL); 303 } 304 305 static void pnv_xive_end_cache_load(PnvXive *xive) 306 { 307 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, 308 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 309 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, 310 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 311 uint64_t eqc_watch[4] = { 0 }; 312 int i; 313 314 if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) { 315 xive_error(xive, "VST: no END entry %x/%x !?", blk, idx); 316 } 317 318 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 319 xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]); 320 } 321 } 322 323 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 324 XiveNVT *nvt) 325 { 326 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt); 327 } 328 329 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 330 XiveNVT *nvt, uint8_t word_number) 331 { 332 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt, 333 word_number); 334 } 335 336 static int pnv_xive_nvt_update(PnvXive *xive) 337 { 338 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, 339 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 340 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, 341 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 342 int i; 343 uint64_t vpc_watch[8]; 344 345 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 346 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]); 347 } 348 349 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch, 350 XIVE_VST_WORD_ALL); 351 } 352 353 static void pnv_xive_nvt_cache_load(PnvXive *xive) 354 { 355 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, 356 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 357 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, 358 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 359 uint64_t vpc_watch[8] = { 0 }; 360 int i; 361 362 if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) { 363 xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx); 364 } 365 366 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 367 xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]); 368 } 369 } 370 371 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 372 XiveEAS *eas) 373 { 374 PnvXive *xive = PNV_XIVE(xrtr); 375 376 /* 377 * EAT lookups should be local to the IC 378 */ 379 if (pnv_xive_block_id(xive) != blk) { 380 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); 381 return -1; 382 } 383 384 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); 385 } 386 387 static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 388 uint8_t *pq) 389 { 390 PnvXive *xive = PNV_XIVE(xrtr); 391 392 if (pnv_xive_block_id(xive) != blk) { 393 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); 394 return -1; 395 } 396 397 *pq = xive_source_esb_get(&xive->ipi_source, idx); 398 return 0; 399 } 400 401 static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 402 uint8_t *pq) 403 { 404 PnvXive *xive = PNV_XIVE(xrtr); 405 406 if (pnv_xive_block_id(xive) != blk) { 407 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); 408 return -1; 409 } 410 411 *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq); 412 return 0; 413 } 414 415 /* 416 * One bit per thread id. The first register PC_THREAD_EN_REG0 covers 417 * the first cores 0-15 (normal) of the chip or 0-7 (fused). The 418 * second register covers cores 16-23 (normal) or 8-11 (fused). 419 */ 420 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu) 421 { 422 int pir = ppc_cpu_pir(cpu); 423 uint32_t fc = PNV9_PIR2FUSEDCORE(pir); 424 uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1; 425 uint32_t bit = pir & 0x3f; 426 427 return xive->regs[reg >> 3] & PPC_BIT(bit); 428 } 429 430 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, 431 uint8_t nvt_blk, uint32_t nvt_idx, 432 bool cam_ignore, uint8_t priority, 433 uint32_t logic_serv, XiveTCTXMatch *match) 434 { 435 PnvXive *xive = PNV_XIVE(xptr); 436 PnvChip *chip = xive->chip; 437 int count = 0; 438 int i, j; 439 440 for (i = 0; i < chip->nr_cores; i++) { 441 PnvCore *pc = chip->cores[i]; 442 CPUCore *cc = CPU_CORE(pc); 443 444 for (j = 0; j < cc->nr_threads; j++) { 445 PowerPCCPU *cpu = pc->threads[j]; 446 XiveTCTX *tctx; 447 int ring; 448 449 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 450 continue; 451 } 452 453 tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 454 455 /* 456 * Check the thread context CAM lines and record matches. 457 */ 458 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, 459 nvt_idx, cam_ignore, logic_serv); 460 /* 461 * Save the context and follow on to catch duplicates, that we 462 * don't support yet. 463 */ 464 if (ring != -1) { 465 if (match->tctx) { 466 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " 467 "thread context NVT %x/%x\n", 468 nvt_blk, nvt_idx); 469 return -1; 470 } 471 472 match->ring = ring; 473 match->tctx = tctx; 474 count++; 475 } 476 } 477 } 478 479 return count; 480 } 481 482 static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr) 483 { 484 uint32_t cfg = 0; 485 486 /* TIMA GEN1 is all P9 knows */ 487 cfg |= XIVE_PRESENTER_GEN1_TIMA_OS; 488 489 return cfg; 490 } 491 492 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr) 493 { 494 return pnv_xive_block_id(PNV_XIVE(xrtr)); 495 } 496 497 /* 498 * The TIMA MMIO space is shared among the chips and to identify the 499 * chip from which the access is being done, we extract the chip id 500 * from the PIR. 501 */ 502 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu) 503 { 504 int pir = ppc_cpu_pir(cpu); 505 XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr; 506 PnvXive *xive = PNV_XIVE(xptr); 507 508 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 509 xive_error(xive, "IC: CPU %x is not enabled", pir); 510 } 511 return xive; 512 } 513 514 /* 515 * The internal sources (IPIs) of the interrupt controller have no 516 * knowledge of the XIVE chip on which they reside. Encode the block 517 * id in the source interrupt number before forwarding the source 518 * event notification to the Router. This is required on a multichip 519 * system. 520 */ 521 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked) 522 { 523 PnvXive *xive = PNV_XIVE(xn); 524 uint8_t blk = pnv_xive_block_id(xive); 525 526 xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked); 527 } 528 529 /* 530 * XIVE helpers 531 */ 532 533 static uint64_t pnv_xive_vc_size(PnvXive *xive) 534 { 535 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK; 536 } 537 538 static uint64_t pnv_xive_edt_shift(PnvXive *xive) 539 { 540 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX); 541 } 542 543 static uint64_t pnv_xive_pc_size(PnvXive *xive) 544 { 545 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK; 546 } 547 548 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk) 549 { 550 uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk]; 551 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 552 553 return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE; 554 } 555 556 /* 557 * Compute the number of entries per indirect subpage. 558 */ 559 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type) 560 { 561 uint8_t blk = pnv_xive_block_id(xive); 562 uint64_t vsd = xive->vsds[type][blk]; 563 const XiveVstInfo *info = &vst_infos[type]; 564 uint64_t vsd_addr; 565 uint32_t page_shift; 566 567 /* For direct tables, fake a valid value */ 568 if (!(VSD_INDIRECT & vsd)) { 569 return 1; 570 } 571 572 /* Get the page size of the indirect table. */ 573 vsd_addr = vsd & VSD_ADDRESS_MASK; 574 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd, 575 MEMTXATTRS_UNSPECIFIED)) { 576 xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64, 577 info->name, vsd_addr); 578 return 0; 579 } 580 581 if (!(vsd & VSD_ADDRESS_MASK)) { 582 #ifdef XIVE_DEBUG 583 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 584 #endif 585 return 0; 586 } 587 588 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 589 590 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 591 xive_error(xive, "VST: invalid %s page shift %d", info->name, 592 page_shift); 593 return 0; 594 } 595 596 return (1ull << page_shift) / info->size; 597 } 598 599 /* 600 * EDT Table 601 * 602 * The Virtualization Controller MMIO region containing the IPI ESB 603 * pages and END ESB pages is sub-divided into "sets" which map 604 * portions of the VC region to the different ESB pages. It is 605 * configured at runtime through the EDT "Domain Table" to let the 606 * firmware decide how to split the VC address space between IPI ESB 607 * pages and END ESB pages. 608 */ 609 610 /* 611 * Computes the overall size of the IPI or the END ESB pages 612 */ 613 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type) 614 { 615 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 616 uint64_t size = 0; 617 int i; 618 619 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) { 620 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 621 622 if (edt_type == type) { 623 size += edt_size; 624 } 625 } 626 627 return size; 628 } 629 630 /* 631 * Maps an offset of the VC region in the IPI or END region using the 632 * layout defined by the EDT "Domaine Table" 633 */ 634 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset, 635 uint64_t type) 636 { 637 int i; 638 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 639 uint64_t edt_offset = vc_offset; 640 641 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) { 642 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 643 644 if (edt_type != type) { 645 edt_offset -= edt_size; 646 } 647 } 648 649 return edt_offset; 650 } 651 652 static void pnv_xive_edt_resize(PnvXive *xive) 653 { 654 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI); 655 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ); 656 657 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size); 658 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio); 659 660 memory_region_set_size(&xive->end_edt_mmio, end_edt_size); 661 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio); 662 } 663 664 /* 665 * XIVE Table configuration. Only EDT is supported. 666 */ 667 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val) 668 { 669 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL; 670 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]); 671 uint64_t *xive_table; 672 uint8_t max_index; 673 674 switch (tsel) { 675 case CQ_TAR_TSEL_BLK: 676 max_index = ARRAY_SIZE(xive->blk); 677 xive_table = xive->blk; 678 break; 679 case CQ_TAR_TSEL_MIG: 680 max_index = ARRAY_SIZE(xive->mig); 681 xive_table = xive->mig; 682 break; 683 case CQ_TAR_TSEL_EDT: 684 max_index = ARRAY_SIZE(xive->edt); 685 xive_table = xive->edt; 686 break; 687 case CQ_TAR_TSEL_VDT: 688 max_index = ARRAY_SIZE(xive->vdt); 689 xive_table = xive->vdt; 690 break; 691 default: 692 xive_error(xive, "IC: invalid table %d", (int) tsel); 693 return -1; 694 } 695 696 if (tsel_index >= max_index) { 697 xive_error(xive, "IC: invalid index %d", (int) tsel_index); 698 return -1; 699 } 700 701 xive_table[tsel_index] = val; 702 703 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) { 704 xive->regs[CQ_TAR >> 3] = 705 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index); 706 } 707 708 /* 709 * EDT configuration is complete. Resize the MMIO windows exposing 710 * the IPI and the END ESBs in the VC region. 711 */ 712 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) { 713 pnv_xive_edt_resize(xive); 714 } 715 716 return 0; 717 } 718 719 /* 720 * Virtual Structure Tables (VST) configuration 721 */ 722 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type, 723 uint8_t blk, uint64_t vsd) 724 { 725 XiveENDSource *end_xsrc = &xive->end_source; 726 XiveSource *xsrc = &xive->ipi_source; 727 const XiveVstInfo *info = &vst_infos[type]; 728 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 729 uint64_t vst_tsize = 1ull << page_shift; 730 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 731 732 /* Basic checks */ 733 734 if (VSD_INDIRECT & vsd) { 735 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) { 736 xive_error(xive, "VST: %s indirect tables are not enabled", 737 info->name); 738 return; 739 } 740 741 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 742 xive_error(xive, "VST: invalid %s page shift %d", info->name, 743 page_shift); 744 return; 745 } 746 } 747 748 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { 749 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with" 750 " page shift %d", info->name, vst_addr, page_shift); 751 return; 752 } 753 754 /* Record the table configuration (in SRAM on HW) */ 755 xive->vsds[type][blk] = vsd; 756 757 /* Now tune the models with the configuration provided by the FW */ 758 759 switch (type) { 760 case VST_TSEL_IVT: /* Nothing to be done */ 761 break; 762 763 case VST_TSEL_EQDT: 764 /* 765 * Backing store pages for the END. 766 * 767 * If the table is direct, we can compute the number of PQ 768 * entries provisioned by FW (such as skiboot) and resize the 769 * END ESB window accordingly. 770 */ 771 if (!(VSD_INDIRECT & vsd)) { 772 memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size) 773 * (1ull << xsrc->esb_shift)); 774 } 775 memory_region_add_subregion(&xive->end_edt_mmio, 0, 776 &end_xsrc->esb_mmio); 777 break; 778 779 case VST_TSEL_SBE: 780 /* 781 * Backing store pages for the source PQ bits. The model does 782 * not use these PQ bits backed in RAM because the XiveSource 783 * model has its own. 784 * 785 * If the table is direct, we can compute the number of PQ 786 * entries provisioned by FW (such as skiboot) and resize the 787 * ESB window accordingly. 788 */ 789 if (!(VSD_INDIRECT & vsd)) { 790 memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE 791 * (1ull << xsrc->esb_shift)); 792 } 793 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio); 794 break; 795 796 case VST_TSEL_VPDT: /* Not modeled */ 797 case VST_TSEL_IRQ: /* Not modeled */ 798 /* 799 * These tables contains the backing store pages for the 800 * interrupt fifos of the VC sub-engine in case of overflow. 801 */ 802 break; 803 804 default: 805 g_assert_not_reached(); 806 } 807 } 808 809 /* 810 * Both PC and VC sub-engines are configured as each use the Virtual 811 * Structure Tables : SBE, EAS, END and NVT. 812 */ 813 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine) 814 { 815 uint8_t mode = GETFIELD(VSD_MODE, vsd); 816 uint8_t type = GETFIELD(VST_TABLE_SELECT, 817 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 818 uint8_t blk = GETFIELD(VST_TABLE_BLOCK, 819 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 820 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 821 822 if (type > VST_TSEL_IRQ) { 823 xive_error(xive, "VST: invalid table type %d", type); 824 return; 825 } 826 827 if (blk >= vst_infos[type].max_blocks) { 828 xive_error(xive, "VST: invalid block id %d for" 829 " %s table", blk, vst_infos[type].name); 830 return; 831 } 832 833 /* 834 * Only take the VC sub-engine configuration into account because 835 * the XiveRouter model combines both VC and PC sub-engines 836 */ 837 if (pc_engine) { 838 return; 839 } 840 841 if (!vst_addr) { 842 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name); 843 return; 844 } 845 846 switch (mode) { 847 case VSD_MODE_FORWARD: 848 xive->vsds[type][blk] = vsd; 849 break; 850 851 case VSD_MODE_EXCLUSIVE: 852 pnv_xive_vst_set_exclusive(xive, type, blk, vsd); 853 break; 854 855 default: 856 xive_error(xive, "VST: unsupported table mode %d", mode); 857 return; 858 } 859 } 860 861 /* 862 * Interrupt controller MMIO region. The layout is compatible between 863 * 4K and 64K pages : 864 * 865 * Page 0 sub-engine BARs 866 * 0x000 - 0x3FF IC registers 867 * 0x400 - 0x7FF PC registers 868 * 0x800 - 0xFFF VC registers 869 * 870 * Page 1 Notify page (writes only) 871 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB) 872 * 0x800 - 0xFFF forwards and syncs 873 * 874 * Page 2 LSI Trigger page (writes only) (not modeled) 875 * Page 3 LSI SB EOI page (reads only) (not modeled) 876 * 877 * Page 4-7 indirect TIMA 878 */ 879 880 /* 881 * IC - registers MMIO 882 */ 883 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, 884 uint64_t val, unsigned size) 885 { 886 PnvXive *xive = PNV_XIVE(opaque); 887 MemoryRegion *sysmem = get_system_memory(); 888 uint32_t reg = offset >> 3; 889 bool is_chip0 = xive->chip->chip_id == 0; 890 891 switch (offset) { 892 893 /* 894 * XIVE CQ (PowerBus bridge) settings 895 */ 896 case CQ_MSGSND: /* msgsnd for doorbells */ 897 case CQ_FIRMASK_OR: /* FIR error reporting */ 898 break; 899 case CQ_PBI_CTL: 900 if (val & CQ_PBI_PC_64K) { 901 xive->pc_shift = 16; 902 } 903 if (val & CQ_PBI_VC_64K) { 904 xive->vc_shift = 16; 905 } 906 break; 907 case CQ_CFG_PB_GEN: /* PowerBus General Configuration */ 908 /* 909 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode 910 */ 911 break; 912 913 /* 914 * XIVE Virtualization Controller settings 915 */ 916 case VC_GLOBAL_CONFIG: 917 break; 918 919 /* 920 * XIVE Presenter Controller settings 921 */ 922 case PC_GLOBAL_CONFIG: 923 /* 924 * PC_GCONF_CHIPID_OVR 925 * Overrides Int command Chip ID with the Chip ID field (DEBUG) 926 */ 927 break; 928 case PC_TCTXT_CFG: 929 /* 930 * TODO: block group support 931 */ 932 break; 933 case PC_TCTXT_TRACK: 934 /* 935 * PC_TCTXT_TRACK_EN: 936 * enable block tracking and exchange of block ownership 937 * information between Interrupt controllers 938 */ 939 break; 940 941 /* 942 * Misc settings 943 */ 944 case VC_SBC_CONFIG: /* Store EOI configuration */ 945 /* 946 * Configure store EOI if required by firwmare (skiboot has removed 947 * support recently though) 948 */ 949 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { 950 xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI; 951 } 952 break; 953 954 case VC_EQC_CONFIG: /* TODO: silent escalation */ 955 case VC_AIB_TX_ORDER_TAG2: /* relax ordering */ 956 break; 957 958 /* 959 * XIVE BAR settings (XSCOM only) 960 */ 961 case CQ_RST_CTL: 962 /* bit4: resets all BAR registers */ 963 break; 964 965 case CQ_IC_BAR: /* IC BAR. 8 pages */ 966 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; 967 if (!(val & CQ_IC_BAR_VALID)) { 968 xive->ic_base = 0; 969 if (xive->regs[reg] & CQ_IC_BAR_VALID) { 970 memory_region_del_subregion(&xive->ic_mmio, 971 &xive->ic_reg_mmio); 972 memory_region_del_subregion(&xive->ic_mmio, 973 &xive->ic_notify_mmio); 974 memory_region_del_subregion(&xive->ic_mmio, 975 &xive->ic_lsi_mmio); 976 memory_region_del_subregion(&xive->ic_mmio, 977 &xive->tm_indirect_mmio); 978 979 memory_region_del_subregion(sysmem, &xive->ic_mmio); 980 } 981 } else { 982 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); 983 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) { 984 memory_region_add_subregion(sysmem, xive->ic_base, 985 &xive->ic_mmio); 986 987 memory_region_add_subregion(&xive->ic_mmio, 0, 988 &xive->ic_reg_mmio); 989 memory_region_add_subregion(&xive->ic_mmio, 990 1ul << xive->ic_shift, 991 &xive->ic_notify_mmio); 992 memory_region_add_subregion(&xive->ic_mmio, 993 2ul << xive->ic_shift, 994 &xive->ic_lsi_mmio); 995 memory_region_add_subregion(&xive->ic_mmio, 996 4ull << xive->ic_shift, 997 &xive->tm_indirect_mmio); 998 } 999 } 1000 break; 1001 1002 case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */ 1003 case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */ 1004 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; 1005 if (!(val & CQ_TM_BAR_VALID)) { 1006 xive->tm_base = 0; 1007 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) { 1008 memory_region_del_subregion(sysmem, &xive->tm_mmio); 1009 } 1010 } else { 1011 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); 1012 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) { 1013 memory_region_add_subregion(sysmem, xive->tm_base, 1014 &xive->tm_mmio); 1015 } 1016 } 1017 break; 1018 1019 case CQ_PC_BARM: 1020 xive->regs[reg] = val; 1021 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive)); 1022 break; 1023 case CQ_PC_BAR: /* From 32M to 512G */ 1024 if (!(val & CQ_PC_BAR_VALID)) { 1025 xive->pc_base = 0; 1026 if (xive->regs[reg] & CQ_PC_BAR_VALID) { 1027 memory_region_del_subregion(sysmem, &xive->pc_mmio); 1028 } 1029 } else { 1030 xive->pc_base = val & ~(CQ_PC_BAR_VALID); 1031 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) { 1032 memory_region_add_subregion(sysmem, xive->pc_base, 1033 &xive->pc_mmio); 1034 } 1035 } 1036 break; 1037 1038 case CQ_VC_BARM: 1039 xive->regs[reg] = val; 1040 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive)); 1041 break; 1042 case CQ_VC_BAR: /* From 64M to 4TB */ 1043 if (!(val & CQ_VC_BAR_VALID)) { 1044 xive->vc_base = 0; 1045 if (xive->regs[reg] & CQ_VC_BAR_VALID) { 1046 memory_region_del_subregion(sysmem, &xive->vc_mmio); 1047 } 1048 } else { 1049 xive->vc_base = val & ~(CQ_VC_BAR_VALID); 1050 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) { 1051 memory_region_add_subregion(sysmem, xive->vc_base, 1052 &xive->vc_mmio); 1053 } 1054 } 1055 break; 1056 1057 /* 1058 * XIVE Table settings. 1059 */ 1060 case CQ_TAR: /* Table Address */ 1061 break; 1062 case CQ_TDR: /* Table Data */ 1063 pnv_xive_table_set_data(xive, val); 1064 break; 1065 1066 /* 1067 * XIVE VC & PC Virtual Structure Table settings 1068 */ 1069 case VC_VSD_TABLE_ADDR: 1070 case PC_VSD_TABLE_ADDR: /* Virtual table selector */ 1071 break; 1072 case VC_VSD_TABLE_DATA: /* Virtual table setting */ 1073 case PC_VSD_TABLE_DATA: 1074 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA); 1075 break; 1076 1077 /* 1078 * Interrupt fifo overflow in memory backing store (Not modeled) 1079 */ 1080 case VC_IRQ_CONFIG_IPI: 1081 case VC_IRQ_CONFIG_HW: 1082 case VC_IRQ_CONFIG_CASCADE1: 1083 case VC_IRQ_CONFIG_CASCADE2: 1084 case VC_IRQ_CONFIG_REDIST: 1085 case VC_IRQ_CONFIG_IPI_CASC: 1086 break; 1087 1088 /* 1089 * XIVE hardware thread enablement 1090 */ 1091 case PC_THREAD_EN_REG0: /* Physical Thread Enable */ 1092 case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */ 1093 break; 1094 1095 case PC_THREAD_EN_REG0_SET: 1096 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val; 1097 break; 1098 case PC_THREAD_EN_REG1_SET: 1099 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val; 1100 break; 1101 case PC_THREAD_EN_REG0_CLR: 1102 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val; 1103 break; 1104 case PC_THREAD_EN_REG1_CLR: 1105 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val; 1106 break; 1107 1108 /* 1109 * Indirect TIMA access set up. Defines the PIR of the HW thread 1110 * to use. 1111 */ 1112 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3: 1113 break; 1114 1115 /* 1116 * XIVE PC & VC cache updates for EAS, NVT and END 1117 */ 1118 case VC_IVC_SCRUB_MASK: 1119 case VC_IVC_SCRUB_TRIG: 1120 break; 1121 1122 case VC_EQC_CWATCH_SPEC: 1123 val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */ 1124 break; 1125 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: 1126 break; 1127 case VC_EQC_CWATCH_DAT0: 1128 /* writing to DATA0 triggers the cache write */ 1129 xive->regs[reg] = val; 1130 pnv_xive_end_update(xive); 1131 break; 1132 case VC_EQC_SCRUB_MASK: 1133 case VC_EQC_SCRUB_TRIG: 1134 /* 1135 * The scrubbing registers flush the cache in RAM and can also 1136 * invalidate. 1137 */ 1138 break; 1139 1140 case PC_VPC_CWATCH_SPEC: 1141 val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */ 1142 break; 1143 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: 1144 break; 1145 case PC_VPC_CWATCH_DAT0: 1146 /* writing to DATA0 triggers the cache write */ 1147 xive->regs[reg] = val; 1148 pnv_xive_nvt_update(xive); 1149 break; 1150 case PC_VPC_SCRUB_MASK: 1151 case PC_VPC_SCRUB_TRIG: 1152 /* 1153 * The scrubbing registers flush the cache in RAM and can also 1154 * invalidate. 1155 */ 1156 break; 1157 1158 1159 /* 1160 * XIVE PC & VC cache invalidation 1161 */ 1162 case PC_AT_KILL: 1163 break; 1164 case VC_AT_MACRO_KILL: 1165 break; 1166 case PC_AT_KILL_MASK: 1167 case VC_AT_MACRO_KILL_MASK: 1168 break; 1169 1170 default: 1171 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset); 1172 return; 1173 } 1174 1175 xive->regs[reg] = val; 1176 } 1177 1178 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) 1179 { 1180 PnvXive *xive = PNV_XIVE(opaque); 1181 uint64_t val = 0; 1182 uint32_t reg = offset >> 3; 1183 1184 switch (offset) { 1185 case CQ_CFG_PB_GEN: 1186 case CQ_IC_BAR: 1187 case CQ_TM1_BAR: 1188 case CQ_TM2_BAR: 1189 case CQ_PC_BAR: 1190 case CQ_PC_BARM: 1191 case CQ_VC_BAR: 1192 case CQ_VC_BARM: 1193 case CQ_TAR: 1194 case CQ_TDR: 1195 case CQ_PBI_CTL: 1196 1197 case PC_TCTXT_CFG: 1198 case PC_TCTXT_TRACK: 1199 case PC_TCTXT_INDIR0: 1200 case PC_TCTXT_INDIR1: 1201 case PC_TCTXT_INDIR2: 1202 case PC_TCTXT_INDIR3: 1203 case PC_GLOBAL_CONFIG: 1204 1205 case PC_VPC_SCRUB_MASK: 1206 1207 case VC_GLOBAL_CONFIG: 1208 case VC_AIB_TX_ORDER_TAG2: 1209 1210 case VC_IRQ_CONFIG_IPI: 1211 case VC_IRQ_CONFIG_HW: 1212 case VC_IRQ_CONFIG_CASCADE1: 1213 case VC_IRQ_CONFIG_CASCADE2: 1214 case VC_IRQ_CONFIG_REDIST: 1215 case VC_IRQ_CONFIG_IPI_CASC: 1216 1217 case VC_EQC_SCRUB_MASK: 1218 case VC_IVC_SCRUB_MASK: 1219 case VC_SBC_CONFIG: 1220 case VC_AT_MACRO_KILL_MASK: 1221 case VC_VSD_TABLE_ADDR: 1222 case PC_VSD_TABLE_ADDR: 1223 case VC_VSD_TABLE_DATA: 1224 case PC_VSD_TABLE_DATA: 1225 case PC_THREAD_EN_REG0: 1226 case PC_THREAD_EN_REG1: 1227 val = xive->regs[reg]; 1228 break; 1229 1230 /* 1231 * XIVE hardware thread enablement 1232 */ 1233 case PC_THREAD_EN_REG0_SET: 1234 case PC_THREAD_EN_REG0_CLR: 1235 val = xive->regs[PC_THREAD_EN_REG0 >> 3]; 1236 break; 1237 case PC_THREAD_EN_REG1_SET: 1238 case PC_THREAD_EN_REG1_CLR: 1239 val = xive->regs[PC_THREAD_EN_REG1 >> 3]; 1240 break; 1241 1242 case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */ 1243 val = 0xffffff0000000000; 1244 break; 1245 1246 /* 1247 * XIVE PC & VC cache updates for EAS, NVT and END 1248 */ 1249 case VC_EQC_CWATCH_SPEC: 1250 xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT); 1251 val = xive->regs[reg]; 1252 break; 1253 case VC_EQC_CWATCH_DAT0: 1254 /* 1255 * Load DATA registers from cache with data requested by the 1256 * SPEC register 1257 */ 1258 pnv_xive_end_cache_load(xive); 1259 val = xive->regs[reg]; 1260 break; 1261 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: 1262 val = xive->regs[reg]; 1263 break; 1264 1265 case PC_VPC_CWATCH_SPEC: 1266 xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT); 1267 val = xive->regs[reg]; 1268 break; 1269 case PC_VPC_CWATCH_DAT0: 1270 /* 1271 * Load DATA registers from cache with data requested by the 1272 * SPEC register 1273 */ 1274 pnv_xive_nvt_cache_load(xive); 1275 val = xive->regs[reg]; 1276 break; 1277 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: 1278 val = xive->regs[reg]; 1279 break; 1280 1281 case PC_VPC_SCRUB_TRIG: 1282 case VC_IVC_SCRUB_TRIG: 1283 case VC_EQC_SCRUB_TRIG: 1284 xive->regs[reg] &= ~VC_SCRUB_VALID; 1285 val = xive->regs[reg]; 1286 break; 1287 1288 /* 1289 * XIVE PC & VC cache invalidation 1290 */ 1291 case PC_AT_KILL: 1292 xive->regs[reg] &= ~PC_AT_KILL_VALID; 1293 val = xive->regs[reg]; 1294 break; 1295 case VC_AT_MACRO_KILL: 1296 xive->regs[reg] &= ~VC_KILL_VALID; 1297 val = xive->regs[reg]; 1298 break; 1299 1300 /* 1301 * XIVE synchronisation 1302 */ 1303 case VC_EQC_CONFIG: 1304 val = VC_EQC_SYNC_MASK; 1305 break; 1306 1307 default: 1308 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset); 1309 } 1310 1311 return val; 1312 } 1313 1314 static const MemoryRegionOps pnv_xive_ic_reg_ops = { 1315 .read = pnv_xive_ic_reg_read, 1316 .write = pnv_xive_ic_reg_write, 1317 .endianness = DEVICE_BIG_ENDIAN, 1318 .valid = { 1319 .min_access_size = 8, 1320 .max_access_size = 8, 1321 }, 1322 .impl = { 1323 .min_access_size = 8, 1324 .max_access_size = 8, 1325 }, 1326 }; 1327 1328 /* 1329 * IC - Notify MMIO port page (write only) 1330 */ 1331 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */ 1332 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */ 1333 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */ 1334 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */ 1335 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */ 1336 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */ 1337 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */ 1338 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */ 1339 1340 /* VC synchronisation */ 1341 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */ 1342 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */ 1343 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */ 1344 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */ 1345 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */ 1346 1347 /* PC synchronisation */ 1348 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */ 1349 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */ 1350 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */ 1351 1352 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) 1353 { 1354 uint8_t blk; 1355 uint32_t idx; 1356 1357 trace_pnv_xive_ic_hw_trigger(addr, val); 1358 1359 if (val & XIVE_TRIGGER_END) { 1360 xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64, 1361 addr, val); 1362 return; 1363 } 1364 1365 /* 1366 * Forward the source event notification directly to the Router. 1367 * The source interrupt number should already be correctly encoded 1368 * with the chip block id by the sending device (PHB, PSI). 1369 */ 1370 blk = XIVE_EAS_BLOCK(val); 1371 idx = XIVE_EAS_INDEX(val); 1372 1373 xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx), 1374 !!(val & XIVE_TRIGGER_PQ)); 1375 } 1376 1377 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, 1378 unsigned size) 1379 { 1380 PnvXive *xive = PNV_XIVE(opaque); 1381 1382 /* VC: HW triggers */ 1383 switch (addr) { 1384 case 0x000 ... 0x7FF: 1385 pnv_xive_ic_hw_trigger(opaque, addr, val); 1386 break; 1387 1388 /* VC: Forwarded IRQs */ 1389 case PNV_XIVE_FORWARD_IPI: 1390 case PNV_XIVE_FORWARD_HW: 1391 case PNV_XIVE_FORWARD_OS_ESC: 1392 case PNV_XIVE_FORWARD_HW_ESC: 1393 case PNV_XIVE_FORWARD_REDIS: 1394 /* TODO: forwarded IRQs. Should be like HW triggers */ 1395 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64, 1396 addr, val); 1397 break; 1398 1399 /* VC syncs */ 1400 case PNV_XIVE_SYNC_IPI: 1401 case PNV_XIVE_SYNC_HW: 1402 case PNV_XIVE_SYNC_OS_ESC: 1403 case PNV_XIVE_SYNC_HW_ESC: 1404 case PNV_XIVE_SYNC_REDIS: 1405 break; 1406 1407 /* PC syncs */ 1408 case PNV_XIVE_SYNC_PULL: 1409 case PNV_XIVE_SYNC_PUSH: 1410 case PNV_XIVE_SYNC_VPC: 1411 break; 1412 1413 default: 1414 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr); 1415 } 1416 } 1417 1418 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr, 1419 unsigned size) 1420 { 1421 PnvXive *xive = PNV_XIVE(opaque); 1422 1423 /* loads are invalid */ 1424 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr); 1425 return -1; 1426 } 1427 1428 static const MemoryRegionOps pnv_xive_ic_notify_ops = { 1429 .read = pnv_xive_ic_notify_read, 1430 .write = pnv_xive_ic_notify_write, 1431 .endianness = DEVICE_BIG_ENDIAN, 1432 .valid = { 1433 .min_access_size = 8, 1434 .max_access_size = 8, 1435 }, 1436 .impl = { 1437 .min_access_size = 8, 1438 .max_access_size = 8, 1439 }, 1440 }; 1441 1442 /* 1443 * IC - LSI MMIO handlers (not modeled) 1444 */ 1445 1446 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr, 1447 uint64_t val, unsigned size) 1448 { 1449 PnvXive *xive = PNV_XIVE(opaque); 1450 1451 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr); 1452 } 1453 1454 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size) 1455 { 1456 PnvXive *xive = PNV_XIVE(opaque); 1457 1458 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr); 1459 return -1; 1460 } 1461 1462 static const MemoryRegionOps pnv_xive_ic_lsi_ops = { 1463 .read = pnv_xive_ic_lsi_read, 1464 .write = pnv_xive_ic_lsi_write, 1465 .endianness = DEVICE_BIG_ENDIAN, 1466 .valid = { 1467 .min_access_size = 8, 1468 .max_access_size = 8, 1469 }, 1470 .impl = { 1471 .min_access_size = 8, 1472 .max_access_size = 8, 1473 }, 1474 }; 1475 1476 /* 1477 * IC - Indirect TIMA MMIO handlers 1478 */ 1479 1480 /* 1481 * When the TIMA is accessed from the indirect page, the thread id of 1482 * the target CPU is configured in the PC_TCTXT_INDIR0 register before 1483 * use. This is used for resets and for debug purpose also. 1484 */ 1485 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive) 1486 { 1487 PnvChip *chip = xive->chip; 1488 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3]; 1489 PowerPCCPU *cpu = NULL; 1490 int pir; 1491 1492 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) { 1493 xive_error(xive, "IC: no indirect TIMA access in progress"); 1494 return NULL; 1495 } 1496 1497 pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir); 1498 cpu = pnv_chip_find_cpu(chip, pir); 1499 if (!cpu) { 1500 xive_error(xive, "IC: invalid PIR %x for indirect access", pir); 1501 return NULL; 1502 } 1503 1504 /* Check that HW thread is XIVE enabled */ 1505 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 1506 xive_error(xive, "IC: CPU %x is not enabled", pir); 1507 } 1508 1509 return XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1510 } 1511 1512 static void xive_tm_indirect_write(void *opaque, hwaddr offset, 1513 uint64_t value, unsigned size) 1514 { 1515 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1516 1517 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size); 1518 } 1519 1520 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset, 1521 unsigned size) 1522 { 1523 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1524 1525 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size); 1526 } 1527 1528 static const MemoryRegionOps xive_tm_indirect_ops = { 1529 .read = xive_tm_indirect_read, 1530 .write = xive_tm_indirect_write, 1531 .endianness = DEVICE_BIG_ENDIAN, 1532 .valid = { 1533 .min_access_size = 1, 1534 .max_access_size = 8, 1535 }, 1536 .impl = { 1537 .min_access_size = 1, 1538 .max_access_size = 8, 1539 }, 1540 }; 1541 1542 static void pnv_xive_tm_write(void *opaque, hwaddr offset, 1543 uint64_t value, unsigned size) 1544 { 1545 PowerPCCPU *cpu = POWERPC_CPU(current_cpu); 1546 PnvXive *xive = pnv_xive_tm_get_xive(cpu); 1547 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1548 1549 xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size); 1550 } 1551 1552 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size) 1553 { 1554 PowerPCCPU *cpu = POWERPC_CPU(current_cpu); 1555 PnvXive *xive = pnv_xive_tm_get_xive(cpu); 1556 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1557 1558 return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size); 1559 } 1560 1561 const MemoryRegionOps pnv_xive_tm_ops = { 1562 .read = pnv_xive_tm_read, 1563 .write = pnv_xive_tm_write, 1564 .endianness = DEVICE_BIG_ENDIAN, 1565 .valid = { 1566 .min_access_size = 1, 1567 .max_access_size = 8, 1568 }, 1569 .impl = { 1570 .min_access_size = 1, 1571 .max_access_size = 8, 1572 }, 1573 }; 1574 1575 /* 1576 * Interrupt controller XSCOM region. 1577 */ 1578 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size) 1579 { 1580 switch (addr >> 3) { 1581 case X_VC_EQC_CONFIG: 1582 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */ 1583 return VC_EQC_SYNC_MASK; 1584 default: 1585 return pnv_xive_ic_reg_read(opaque, addr, size); 1586 } 1587 } 1588 1589 static void pnv_xive_xscom_write(void *opaque, hwaddr addr, 1590 uint64_t val, unsigned size) 1591 { 1592 pnv_xive_ic_reg_write(opaque, addr, val, size); 1593 } 1594 1595 static const MemoryRegionOps pnv_xive_xscom_ops = { 1596 .read = pnv_xive_xscom_read, 1597 .write = pnv_xive_xscom_write, 1598 .endianness = DEVICE_BIG_ENDIAN, 1599 .valid = { 1600 .min_access_size = 8, 1601 .max_access_size = 8, 1602 }, 1603 .impl = { 1604 .min_access_size = 8, 1605 .max_access_size = 8, 1606 } 1607 }; 1608 1609 /* 1610 * Virtualization Controller MMIO region containing the IPI and END ESB pages 1611 */ 1612 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset, 1613 unsigned size) 1614 { 1615 PnvXive *xive = PNV_XIVE(opaque); 1616 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1617 uint64_t edt_type = 0; 1618 uint64_t edt_offset; 1619 MemTxResult result; 1620 AddressSpace *edt_as = NULL; 1621 uint64_t ret = -1; 1622 1623 if (edt_index < XIVE_TABLE_EDT_MAX) { 1624 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1625 } 1626 1627 switch (edt_type) { 1628 case CQ_TDR_EDT_IPI: 1629 edt_as = &xive->ipi_as; 1630 break; 1631 case CQ_TDR_EDT_EQ: 1632 edt_as = &xive->end_as; 1633 break; 1634 default: 1635 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset); 1636 return -1; 1637 } 1638 1639 /* Remap the offset for the targeted address space */ 1640 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1641 1642 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED, 1643 &result); 1644 1645 if (result != MEMTX_OK) { 1646 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%" 1647 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END", 1648 offset, edt_offset); 1649 return -1; 1650 } 1651 1652 return ret; 1653 } 1654 1655 static void pnv_xive_vc_write(void *opaque, hwaddr offset, 1656 uint64_t val, unsigned size) 1657 { 1658 PnvXive *xive = PNV_XIVE(opaque); 1659 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1660 uint64_t edt_type = 0; 1661 uint64_t edt_offset; 1662 MemTxResult result; 1663 AddressSpace *edt_as = NULL; 1664 1665 if (edt_index < XIVE_TABLE_EDT_MAX) { 1666 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1667 } 1668 1669 switch (edt_type) { 1670 case CQ_TDR_EDT_IPI: 1671 edt_as = &xive->ipi_as; 1672 break; 1673 case CQ_TDR_EDT_EQ: 1674 edt_as = &xive->end_as; 1675 break; 1676 default: 1677 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx, 1678 offset); 1679 return; 1680 } 1681 1682 /* Remap the offset for the targeted address space */ 1683 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1684 1685 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result); 1686 if (result != MEMTX_OK) { 1687 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset); 1688 } 1689 } 1690 1691 static const MemoryRegionOps pnv_xive_vc_ops = { 1692 .read = pnv_xive_vc_read, 1693 .write = pnv_xive_vc_write, 1694 .endianness = DEVICE_BIG_ENDIAN, 1695 .valid = { 1696 .min_access_size = 8, 1697 .max_access_size = 8, 1698 }, 1699 .impl = { 1700 .min_access_size = 8, 1701 .max_access_size = 8, 1702 }, 1703 }; 1704 1705 /* 1706 * Presenter Controller MMIO region. The Virtualization Controller 1707 * updates the IPB in the NVT table when required. Not modeled. 1708 */ 1709 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr, 1710 unsigned size) 1711 { 1712 PnvXive *xive = PNV_XIVE(opaque); 1713 1714 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr); 1715 return -1; 1716 } 1717 1718 static void pnv_xive_pc_write(void *opaque, hwaddr addr, 1719 uint64_t value, unsigned size) 1720 { 1721 PnvXive *xive = PNV_XIVE(opaque); 1722 1723 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr); 1724 } 1725 1726 static const MemoryRegionOps pnv_xive_pc_ops = { 1727 .read = pnv_xive_pc_read, 1728 .write = pnv_xive_pc_write, 1729 .endianness = DEVICE_BIG_ENDIAN, 1730 .valid = { 1731 .min_access_size = 8, 1732 .max_access_size = 8, 1733 }, 1734 .impl = { 1735 .min_access_size = 8, 1736 .max_access_size = 8, 1737 }, 1738 }; 1739 1740 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx, 1741 Monitor *mon) 1742 { 1743 uint8_t eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1); 1744 uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1); 1745 1746 if (!xive_nvt_is_valid(nvt)) { 1747 return; 1748 } 1749 1750 monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x\n", nvt_idx, 1751 eq_blk, eq_idx, 1752 xive_get_field32(NVT_W4_IPB, nvt->w4)); 1753 } 1754 1755 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon) 1756 { 1757 XiveRouter *xrtr = XIVE_ROUTER(xive); 1758 uint8_t blk = pnv_xive_block_id(xive); 1759 uint8_t chip_id = xive->chip->chip_id; 1760 uint32_t srcno0 = XIVE_EAS(blk, 0); 1761 uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk); 1762 XiveEAS eas; 1763 XiveEND end; 1764 XiveNVT nvt; 1765 int i; 1766 uint64_t xive_nvt_per_subpage; 1767 1768 monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk, 1769 srcno0, srcno0 + nr_ipis - 1); 1770 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); 1771 1772 monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk, 1773 srcno0, srcno0 + nr_ipis - 1); 1774 for (i = 0; i < nr_ipis; i++) { 1775 if (xive_router_get_eas(xrtr, blk, i, &eas)) { 1776 break; 1777 } 1778 if (!xive_eas_is_masked(&eas)) { 1779 xive_eas_pic_print_info(&eas, i, mon); 1780 } 1781 } 1782 1783 monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk); 1784 i = 0; 1785 while (!xive_router_get_end(xrtr, blk, i, &end)) { 1786 xive_end_pic_print_info(&end, i++, mon); 1787 } 1788 1789 monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk); 1790 i = 0; 1791 while (!xive_router_get_end(xrtr, blk, i, &end)) { 1792 xive_end_eas_pic_print_info(&end, i++, mon); 1793 } 1794 1795 monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk, 1796 0, XIVE_NVT_COUNT - 1); 1797 xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT); 1798 for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) { 1799 while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) { 1800 xive_nvt_pic_print_info(&nvt, i++, mon); 1801 } 1802 } 1803 } 1804 1805 static void pnv_xive_reset(void *dev) 1806 { 1807 PnvXive *xive = PNV_XIVE(dev); 1808 XiveSource *xsrc = &xive->ipi_source; 1809 XiveENDSource *end_xsrc = &xive->end_source; 1810 1811 /* Default page size (Should be changed at runtime to 64k) */ 1812 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1813 1814 /* Clear subregions */ 1815 if (memory_region_is_mapped(&xsrc->esb_mmio)) { 1816 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio); 1817 } 1818 1819 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) { 1820 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio); 1821 } 1822 1823 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { 1824 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio); 1825 } 1826 1827 if (memory_region_is_mapped(&xive->end_edt_mmio)) { 1828 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio); 1829 } 1830 } 1831 1832 static void pnv_xive_init(Object *obj) 1833 { 1834 PnvXive *xive = PNV_XIVE(obj); 1835 1836 object_initialize_child(obj, "ipi_source", &xive->ipi_source, 1837 TYPE_XIVE_SOURCE); 1838 object_initialize_child(obj, "end_source", &xive->end_source, 1839 TYPE_XIVE_END_SOURCE); 1840 } 1841 1842 /* 1843 * Maximum number of IRQs and ENDs supported by HW 1844 */ 1845 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1846 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1847 1848 static void pnv_xive_realize(DeviceState *dev, Error **errp) 1849 { 1850 PnvXive *xive = PNV_XIVE(dev); 1851 PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev); 1852 XiveSource *xsrc = &xive->ipi_source; 1853 XiveENDSource *end_xsrc = &xive->end_source; 1854 Error *local_err = NULL; 1855 1856 pxc->parent_realize(dev, &local_err); 1857 if (local_err) { 1858 error_propagate(errp, local_err); 1859 return; 1860 } 1861 1862 assert(xive->chip); 1863 1864 /* 1865 * The XiveSource and XiveENDSource objects are realized with the 1866 * maximum allowed HW configuration. The ESB MMIO regions will be 1867 * resized dynamically when the controller is configured by the FW 1868 * to limit accesses to resources not provisioned. 1869 */ 1870 object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS, 1871 &error_fatal); 1872 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort); 1873 if (!qdev_realize(DEVICE(xsrc), NULL, errp)) { 1874 return; 1875 } 1876 1877 object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS, 1878 &error_fatal); 1879 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive), 1880 &error_abort); 1881 if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) { 1882 return; 1883 } 1884 1885 /* Default page size. Generally changed at runtime to 64k */ 1886 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1887 1888 /* XSCOM region, used for initial configuration of the BARs */ 1889 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops, 1890 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3); 1891 1892 /* Interrupt controller MMIO regions */ 1893 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", 1894 PNV9_XIVE_IC_SIZE); 1895 1896 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops, 1897 xive, "xive-ic-reg", 1 << xive->ic_shift); 1898 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev), 1899 &pnv_xive_ic_notify_ops, 1900 xive, "xive-ic-notify", 1 << xive->ic_shift); 1901 1902 /* The Pervasive LSI trigger and EOI pages (not modeled) */ 1903 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops, 1904 xive, "xive-ic-lsi", 2 << xive->ic_shift); 1905 1906 /* Thread Interrupt Management Area (Indirect) */ 1907 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev), 1908 &xive_tm_indirect_ops, 1909 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE); 1910 /* 1911 * Overall Virtualization Controller MMIO region containing the 1912 * IPI ESB pages and END ESB pages. The layout is defined by the 1913 * EDT "Domain table" and the accesses are dispatched using 1914 * address spaces for each. 1915 */ 1916 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive, 1917 "xive-vc", PNV9_XIVE_VC_SIZE); 1918 1919 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi", 1920 PNV9_XIVE_VC_SIZE); 1921 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi"); 1922 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end", 1923 PNV9_XIVE_VC_SIZE); 1924 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end"); 1925 1926 /* 1927 * The MMIO windows exposing the IPI ESBs and the END ESBs in the 1928 * VC region. Their size is configured by the FW in the EDT table. 1929 */ 1930 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0); 1931 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0); 1932 1933 /* Presenter Controller MMIO region (not modeled) */ 1934 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive, 1935 "xive-pc", PNV9_XIVE_PC_SIZE); 1936 1937 /* Thread Interrupt Management Area (Direct) */ 1938 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops, 1939 xive, "xive-tima", PNV9_XIVE_TM_SIZE); 1940 1941 qemu_register_reset(pnv_xive_reset, dev); 1942 } 1943 1944 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, 1945 int xscom_offset) 1946 { 1947 const char compat[] = "ibm,power9-xive-x"; 1948 char *name; 1949 int offset; 1950 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE; 1951 uint32_t reg[] = { 1952 cpu_to_be32(lpc_pcba), 1953 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE) 1954 }; 1955 1956 name = g_strdup_printf("xive@%x", lpc_pcba); 1957 offset = fdt_add_subnode(fdt, xscom_offset, name); 1958 _FDT(offset); 1959 g_free(name); 1960 1961 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); 1962 _FDT((fdt_setprop(fdt, offset, "compatible", compat, 1963 sizeof(compat)))); 1964 return 0; 1965 } 1966 1967 static Property pnv_xive_properties[] = { 1968 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0), 1969 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0), 1970 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0), 1971 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0), 1972 /* The PnvChip id identifies the XIVE interrupt controller. */ 1973 DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *), 1974 DEFINE_PROP_END_OF_LIST(), 1975 }; 1976 1977 static void pnv_xive_class_init(ObjectClass *klass, void *data) 1978 { 1979 DeviceClass *dc = DEVICE_CLASS(klass); 1980 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); 1981 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 1982 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1983 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass); 1984 PnvXiveClass *pxc = PNV_XIVE_CLASS(klass); 1985 1986 xdc->dt_xscom = pnv_xive_dt_xscom; 1987 1988 dc->desc = "PowerNV XIVE Interrupt Controller"; 1989 device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize); 1990 dc->realize = pnv_xive_realize; 1991 device_class_set_props(dc, pnv_xive_properties); 1992 1993 xrc->get_eas = pnv_xive_get_eas; 1994 xrc->get_pq = pnv_xive_get_pq; 1995 xrc->set_pq = pnv_xive_set_pq; 1996 xrc->get_end = pnv_xive_get_end; 1997 xrc->write_end = pnv_xive_write_end; 1998 xrc->get_nvt = pnv_xive_get_nvt; 1999 xrc->write_nvt = pnv_xive_write_nvt; 2000 xrc->get_block_id = pnv_xive_get_block_id; 2001 2002 xnc->notify = pnv_xive_notify; 2003 xpc->match_nvt = pnv_xive_match_nvt; 2004 xpc->get_config = pnv_xive_presenter_get_config; 2005 }; 2006 2007 static const TypeInfo pnv_xive_info = { 2008 .name = TYPE_PNV_XIVE, 2009 .parent = TYPE_XIVE_ROUTER, 2010 .instance_init = pnv_xive_init, 2011 .instance_size = sizeof(PnvXive), 2012 .class_init = pnv_xive_class_init, 2013 .class_size = sizeof(PnvXiveClass), 2014 .interfaces = (InterfaceInfo[]) { 2015 { TYPE_PNV_XSCOM_INTERFACE }, 2016 { } 2017 } 2018 }; 2019 2020 static void pnv_xive_register_types(void) 2021 { 2022 type_register_static(&pnv_xive_info); 2023 } 2024 2025 type_init(pnv_xive_register_types) 2026