1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2019, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/dma.h" 17 #include "sysemu/reset.h" 18 #include "monitor/monitor.h" 19 #include "hw/ppc/fdt.h" 20 #include "hw/ppc/pnv.h" 21 #include "hw/ppc/pnv_core.h" 22 #include "hw/ppc/pnv_xscom.h" 23 #include "hw/ppc/pnv_xive.h" 24 #include "hw/ppc/xive_regs.h" 25 #include "hw/qdev-properties.h" 26 #include "hw/ppc/ppc.h" 27 #include "trace.h" 28 29 #include <libfdt.h> 30 31 #include "pnv_xive_regs.h" 32 33 #undef XIVE_DEBUG 34 35 /* 36 * Virtual structures table (VST) 37 */ 38 #define SBE_PER_BYTE 4 39 40 typedef struct XiveVstInfo { 41 const char *name; 42 uint32_t size; 43 uint32_t max_blocks; 44 } XiveVstInfo; 45 46 static const XiveVstInfo vst_infos[] = { 47 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 }, 48 [VST_TSEL_SBE] = { "SBE", 1, 16 }, 49 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 }, 50 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 }, 51 52 /* 53 * Interrupt fifo backing store table (not modeled) : 54 * 55 * 0 - IPI, 56 * 1 - HWD, 57 * 2 - First escalate, 58 * 3 - Second escalate, 59 * 4 - Redistribution, 60 * 5 - IPI cascaded queue ? 61 */ 62 [VST_TSEL_IRQ] = { "IRQ", 1, 6 }, 63 }; 64 65 #define xive_error(xive, fmt, ...) \ 66 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ 67 (xive)->chip->chip_id, ## __VA_ARGS__); 68 69 /* 70 * QEMU version of the GETFIELD/SETFIELD macros 71 * 72 * TODO: It might be better to use the existing extract64() and 73 * deposit64() but this means that all the register definitions will 74 * change and become incompatible with the ones found in skiboot. 75 * 76 * Keep it as it is for now until we find a common ground. 77 */ 78 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) 79 { 80 return (word & mask) >> ctz64(mask); 81 } 82 83 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, 84 uint64_t value) 85 { 86 return (word & ~mask) | ((value << ctz64(mask)) & mask); 87 } 88 89 /* 90 * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID 91 * field overrides the hardwired chip ID in the Powerbus operations 92 * and for CAM compares 93 */ 94 static uint8_t pnv_xive_block_id(PnvXive *xive) 95 { 96 uint8_t blk = xive->chip->chip_id; 97 uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3]; 98 99 if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) { 100 blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val); 101 } 102 103 return blk; 104 } 105 106 /* 107 * Remote access to controllers. HW uses MMIOs. For now, a simple scan 108 * of the chips is good enough. 109 * 110 * TODO: Block scope support 111 */ 112 static PnvXive *pnv_xive_get_remote(uint8_t blk) 113 { 114 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); 115 int i; 116 117 for (i = 0; i < pnv->num_chips; i++) { 118 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); 119 PnvXive *xive = &chip9->xive; 120 121 if (pnv_xive_block_id(xive) == blk) { 122 return xive; 123 } 124 } 125 return NULL; 126 } 127 128 /* 129 * VST accessors for SBE, EAT, ENDT, NVT 130 * 131 * Indirect VST tables are arrays of VSDs pointing to a page (of same 132 * size). Each page is a direct VST table. 133 */ 134 135 #define XIVE_VSD_SIZE 8 136 137 /* Indirect page size can be 4K, 64K, 2M, 16M. */ 138 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift) 139 { 140 return page_shift == 12 || page_shift == 16 || 141 page_shift == 21 || page_shift == 24; 142 } 143 144 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type, 145 uint64_t vsd, uint32_t idx) 146 { 147 const XiveVstInfo *info = &vst_infos[type]; 148 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 149 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 150 uint32_t idx_max; 151 152 idx_max = vst_tsize / info->size - 1; 153 if (idx > idx_max) { 154 #ifdef XIVE_DEBUG 155 xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?", 156 info->name, idx, idx_max); 157 #endif 158 return 0; 159 } 160 161 return vst_addr + idx * info->size; 162 } 163 164 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, 165 uint64_t vsd, uint32_t idx) 166 { 167 const XiveVstInfo *info = &vst_infos[type]; 168 uint64_t vsd_addr; 169 uint32_t vsd_idx; 170 uint32_t page_shift; 171 uint32_t vst_per_page; 172 173 /* Get the page size of the indirect table. */ 174 vsd_addr = vsd & VSD_ADDRESS_MASK; 175 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd, 176 MEMTXATTRS_UNSPECIFIED)) { 177 xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64, 178 info->name, idx, vsd_addr); 179 return 0; 180 } 181 182 if (!(vsd & VSD_ADDRESS_MASK)) { 183 #ifdef XIVE_DEBUG 184 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 185 #endif 186 return 0; 187 } 188 189 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 190 191 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 192 xive_error(xive, "VST: invalid %s page shift %d", info->name, 193 page_shift); 194 return 0; 195 } 196 197 vst_per_page = (1ull << page_shift) / info->size; 198 vsd_idx = idx / vst_per_page; 199 200 /* Load the VSD we are looking for, if not already done */ 201 if (vsd_idx) { 202 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; 203 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd, 204 MEMTXATTRS_UNSPECIFIED)) { 205 xive_error(xive, "VST: failed to access %s entry %x @0x%" 206 PRIx64, info->name, vsd_idx, vsd_addr); 207 return 0; 208 } 209 210 if (!(vsd & VSD_ADDRESS_MASK)) { 211 #ifdef XIVE_DEBUG 212 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 213 #endif 214 return 0; 215 } 216 217 /* 218 * Check that the pages have a consistent size across the 219 * indirect table 220 */ 221 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { 222 xive_error(xive, "VST: %s entry %x indirect page size differ !?", 223 info->name, idx); 224 return 0; 225 } 226 } 227 228 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); 229 } 230 231 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk, 232 uint32_t idx) 233 { 234 const XiveVstInfo *info = &vst_infos[type]; 235 uint64_t vsd; 236 237 if (blk >= info->max_blocks) { 238 xive_error(xive, "VST: invalid block id %d for VST %s %d !?", 239 blk, info->name, idx); 240 return 0; 241 } 242 243 vsd = xive->vsds[type][blk]; 244 245 /* Remote VST access */ 246 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { 247 xive = pnv_xive_get_remote(blk); 248 249 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0; 250 } 251 252 if (VSD_INDIRECT & vsd) { 253 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx); 254 } 255 256 return pnv_xive_vst_addr_direct(xive, type, vsd, idx); 257 } 258 259 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk, 260 uint32_t idx, void *data) 261 { 262 const XiveVstInfo *info = &vst_infos[type]; 263 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 264 265 if (!addr) { 266 return -1; 267 } 268 269 cpu_physical_memory_read(addr, data, info->size); 270 return 0; 271 } 272 273 #define XIVE_VST_WORD_ALL -1 274 275 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk, 276 uint32_t idx, void *data, uint32_t word_number) 277 { 278 const XiveVstInfo *info = &vst_infos[type]; 279 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 280 281 if (!addr) { 282 return -1; 283 } 284 285 if (word_number == XIVE_VST_WORD_ALL) { 286 cpu_physical_memory_write(addr, data, info->size); 287 } else { 288 cpu_physical_memory_write(addr + word_number * 4, 289 data + word_number * 4, 4); 290 } 291 return 0; 292 } 293 294 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 295 XiveEND *end) 296 { 297 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end); 298 } 299 300 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 301 XiveEND *end, uint8_t word_number) 302 { 303 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end, 304 word_number); 305 } 306 307 static int pnv_xive_end_update(PnvXive *xive) 308 { 309 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, 310 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 311 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, 312 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 313 int i; 314 uint64_t eqc_watch[4]; 315 316 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 317 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]); 318 } 319 320 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch, 321 XIVE_VST_WORD_ALL); 322 } 323 324 static void pnv_xive_end_cache_load(PnvXive *xive) 325 { 326 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, 327 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 328 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, 329 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 330 uint64_t eqc_watch[4] = { 0 }; 331 int i; 332 333 if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) { 334 xive_error(xive, "VST: no END entry %x/%x !?", blk, idx); 335 } 336 337 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 338 xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]); 339 } 340 } 341 342 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 343 XiveNVT *nvt) 344 { 345 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt); 346 } 347 348 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 349 XiveNVT *nvt, uint8_t word_number) 350 { 351 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt, 352 word_number); 353 } 354 355 static int pnv_xive_nvt_update(PnvXive *xive) 356 { 357 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, 358 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 359 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, 360 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 361 int i; 362 uint64_t vpc_watch[8]; 363 364 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 365 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]); 366 } 367 368 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch, 369 XIVE_VST_WORD_ALL); 370 } 371 372 static void pnv_xive_nvt_cache_load(PnvXive *xive) 373 { 374 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, 375 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 376 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, 377 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 378 uint64_t vpc_watch[8] = { 0 }; 379 int i; 380 381 if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) { 382 xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx); 383 } 384 385 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 386 xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]); 387 } 388 } 389 390 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 391 XiveEAS *eas) 392 { 393 PnvXive *xive = PNV_XIVE(xrtr); 394 395 /* 396 * EAT lookups should be local to the IC 397 */ 398 if (pnv_xive_block_id(xive) != blk) { 399 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); 400 return -1; 401 } 402 403 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); 404 } 405 406 /* 407 * One bit per thread id. The first register PC_THREAD_EN_REG0 covers 408 * the first cores 0-15 (normal) of the chip or 0-7 (fused). The 409 * second register covers cores 16-23 (normal) or 8-11 (fused). 410 */ 411 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu) 412 { 413 int pir = ppc_cpu_pir(cpu); 414 uint32_t fc = PNV9_PIR2FUSEDCORE(pir); 415 uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1; 416 uint32_t bit = pir & 0x3f; 417 418 return xive->regs[reg >> 3] & PPC_BIT(bit); 419 } 420 421 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, 422 uint8_t nvt_blk, uint32_t nvt_idx, 423 bool cam_ignore, uint8_t priority, 424 uint32_t logic_serv, XiveTCTXMatch *match) 425 { 426 PnvXive *xive = PNV_XIVE(xptr); 427 PnvChip *chip = xive->chip; 428 int count = 0; 429 int i, j; 430 431 for (i = 0; i < chip->nr_cores; i++) { 432 PnvCore *pc = chip->cores[i]; 433 CPUCore *cc = CPU_CORE(pc); 434 435 for (j = 0; j < cc->nr_threads; j++) { 436 PowerPCCPU *cpu = pc->threads[j]; 437 XiveTCTX *tctx; 438 int ring; 439 440 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 441 continue; 442 } 443 444 tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 445 446 /* 447 * Check the thread context CAM lines and record matches. 448 */ 449 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, 450 nvt_idx, cam_ignore, logic_serv); 451 /* 452 * Save the context and follow on to catch duplicates, that we 453 * don't support yet. 454 */ 455 if (ring != -1) { 456 if (match->tctx) { 457 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " 458 "thread context NVT %x/%x\n", 459 nvt_blk, nvt_idx); 460 return -1; 461 } 462 463 match->ring = ring; 464 match->tctx = tctx; 465 count++; 466 } 467 } 468 } 469 470 return count; 471 } 472 473 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr) 474 { 475 return pnv_xive_block_id(PNV_XIVE(xrtr)); 476 } 477 478 /* 479 * The TIMA MMIO space is shared among the chips and to identify the 480 * chip from which the access is being done, we extract the chip id 481 * from the PIR. 482 */ 483 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu) 484 { 485 int pir = ppc_cpu_pir(cpu); 486 XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr; 487 PnvXive *xive = PNV_XIVE(xptr); 488 489 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 490 xive_error(xive, "IC: CPU %x is not enabled", pir); 491 } 492 return xive; 493 } 494 495 /* 496 * The internal sources (IPIs) of the interrupt controller have no 497 * knowledge of the XIVE chip on which they reside. Encode the block 498 * id in the source interrupt number before forwarding the source 499 * event notification to the Router. This is required on a multichip 500 * system. 501 */ 502 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno) 503 { 504 PnvXive *xive = PNV_XIVE(xn); 505 uint8_t blk = pnv_xive_block_id(xive); 506 507 xive_router_notify(xn, XIVE_EAS(blk, srcno)); 508 } 509 510 /* 511 * XIVE helpers 512 */ 513 514 static uint64_t pnv_xive_vc_size(PnvXive *xive) 515 { 516 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK; 517 } 518 519 static uint64_t pnv_xive_edt_shift(PnvXive *xive) 520 { 521 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX); 522 } 523 524 static uint64_t pnv_xive_pc_size(PnvXive *xive) 525 { 526 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK; 527 } 528 529 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk) 530 { 531 uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk]; 532 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 533 534 return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE; 535 } 536 537 /* 538 * Compute the number of entries per indirect subpage. 539 */ 540 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type) 541 { 542 uint8_t blk = pnv_xive_block_id(xive); 543 uint64_t vsd = xive->vsds[type][blk]; 544 const XiveVstInfo *info = &vst_infos[type]; 545 uint64_t vsd_addr; 546 uint32_t page_shift; 547 548 /* For direct tables, fake a valid value */ 549 if (!(VSD_INDIRECT & vsd)) { 550 return 1; 551 } 552 553 /* Get the page size of the indirect table. */ 554 vsd_addr = vsd & VSD_ADDRESS_MASK; 555 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd, 556 MEMTXATTRS_UNSPECIFIED)) { 557 xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64, 558 info->name, vsd_addr); 559 return 0; 560 } 561 562 if (!(vsd & VSD_ADDRESS_MASK)) { 563 #ifdef XIVE_DEBUG 564 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 565 #endif 566 return 0; 567 } 568 569 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 570 571 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 572 xive_error(xive, "VST: invalid %s page shift %d", info->name, 573 page_shift); 574 return 0; 575 } 576 577 return (1ull << page_shift) / info->size; 578 } 579 580 /* 581 * EDT Table 582 * 583 * The Virtualization Controller MMIO region containing the IPI ESB 584 * pages and END ESB pages is sub-divided into "sets" which map 585 * portions of the VC region to the different ESB pages. It is 586 * configured at runtime through the EDT "Domain Table" to let the 587 * firmware decide how to split the VC address space between IPI ESB 588 * pages and END ESB pages. 589 */ 590 591 /* 592 * Computes the overall size of the IPI or the END ESB pages 593 */ 594 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type) 595 { 596 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 597 uint64_t size = 0; 598 int i; 599 600 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) { 601 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 602 603 if (edt_type == type) { 604 size += edt_size; 605 } 606 } 607 608 return size; 609 } 610 611 /* 612 * Maps an offset of the VC region in the IPI or END region using the 613 * layout defined by the EDT "Domaine Table" 614 */ 615 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset, 616 uint64_t type) 617 { 618 int i; 619 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 620 uint64_t edt_offset = vc_offset; 621 622 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) { 623 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 624 625 if (edt_type != type) { 626 edt_offset -= edt_size; 627 } 628 } 629 630 return edt_offset; 631 } 632 633 static void pnv_xive_edt_resize(PnvXive *xive) 634 { 635 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI); 636 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ); 637 638 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size); 639 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio); 640 641 memory_region_set_size(&xive->end_edt_mmio, end_edt_size); 642 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio); 643 } 644 645 /* 646 * XIVE Table configuration. Only EDT is supported. 647 */ 648 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val) 649 { 650 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL; 651 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]); 652 uint64_t *xive_table; 653 uint8_t max_index; 654 655 switch (tsel) { 656 case CQ_TAR_TSEL_BLK: 657 max_index = ARRAY_SIZE(xive->blk); 658 xive_table = xive->blk; 659 break; 660 case CQ_TAR_TSEL_MIG: 661 max_index = ARRAY_SIZE(xive->mig); 662 xive_table = xive->mig; 663 break; 664 case CQ_TAR_TSEL_EDT: 665 max_index = ARRAY_SIZE(xive->edt); 666 xive_table = xive->edt; 667 break; 668 case CQ_TAR_TSEL_VDT: 669 max_index = ARRAY_SIZE(xive->vdt); 670 xive_table = xive->vdt; 671 break; 672 default: 673 xive_error(xive, "IC: invalid table %d", (int) tsel); 674 return -1; 675 } 676 677 if (tsel_index >= max_index) { 678 xive_error(xive, "IC: invalid index %d", (int) tsel_index); 679 return -1; 680 } 681 682 xive_table[tsel_index] = val; 683 684 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) { 685 xive->regs[CQ_TAR >> 3] = 686 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index); 687 } 688 689 /* 690 * EDT configuration is complete. Resize the MMIO windows exposing 691 * the IPI and the END ESBs in the VC region. 692 */ 693 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) { 694 pnv_xive_edt_resize(xive); 695 } 696 697 return 0; 698 } 699 700 /* 701 * Virtual Structure Tables (VST) configuration 702 */ 703 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type, 704 uint8_t blk, uint64_t vsd) 705 { 706 XiveENDSource *end_xsrc = &xive->end_source; 707 XiveSource *xsrc = &xive->ipi_source; 708 const XiveVstInfo *info = &vst_infos[type]; 709 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 710 uint64_t vst_tsize = 1ull << page_shift; 711 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 712 713 /* Basic checks */ 714 715 if (VSD_INDIRECT & vsd) { 716 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) { 717 xive_error(xive, "VST: %s indirect tables are not enabled", 718 info->name); 719 return; 720 } 721 722 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 723 xive_error(xive, "VST: invalid %s page shift %d", info->name, 724 page_shift); 725 return; 726 } 727 } 728 729 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { 730 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with" 731 " page shift %d", info->name, vst_addr, page_shift); 732 return; 733 } 734 735 /* Record the table configuration (in SRAM on HW) */ 736 xive->vsds[type][blk] = vsd; 737 738 /* Now tune the models with the configuration provided by the FW */ 739 740 switch (type) { 741 case VST_TSEL_IVT: /* Nothing to be done */ 742 break; 743 744 case VST_TSEL_EQDT: 745 /* 746 * Backing store pages for the END. 747 * 748 * If the table is direct, we can compute the number of PQ 749 * entries provisioned by FW (such as skiboot) and resize the 750 * END ESB window accordingly. 751 */ 752 if (!(VSD_INDIRECT & vsd)) { 753 memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size) 754 * (1ull << xsrc->esb_shift)); 755 } 756 memory_region_add_subregion(&xive->end_edt_mmio, 0, 757 &end_xsrc->esb_mmio); 758 break; 759 760 case VST_TSEL_SBE: 761 /* 762 * Backing store pages for the source PQ bits. The model does 763 * not use these PQ bits backed in RAM because the XiveSource 764 * model has its own. 765 * 766 * If the table is direct, we can compute the number of PQ 767 * entries provisioned by FW (such as skiboot) and resize the 768 * ESB window accordingly. 769 */ 770 if (!(VSD_INDIRECT & vsd)) { 771 memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE 772 * (1ull << xsrc->esb_shift)); 773 } 774 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio); 775 break; 776 777 case VST_TSEL_VPDT: /* Not modeled */ 778 case VST_TSEL_IRQ: /* Not modeled */ 779 /* 780 * These tables contains the backing store pages for the 781 * interrupt fifos of the VC sub-engine in case of overflow. 782 */ 783 break; 784 785 default: 786 g_assert_not_reached(); 787 } 788 } 789 790 /* 791 * Both PC and VC sub-engines are configured as each use the Virtual 792 * Structure Tables : SBE, EAS, END and NVT. 793 */ 794 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine) 795 { 796 uint8_t mode = GETFIELD(VSD_MODE, vsd); 797 uint8_t type = GETFIELD(VST_TABLE_SELECT, 798 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 799 uint8_t blk = GETFIELD(VST_TABLE_BLOCK, 800 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 801 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 802 803 if (type > VST_TSEL_IRQ) { 804 xive_error(xive, "VST: invalid table type %d", type); 805 return; 806 } 807 808 if (blk >= vst_infos[type].max_blocks) { 809 xive_error(xive, "VST: invalid block id %d for" 810 " %s table", blk, vst_infos[type].name); 811 return; 812 } 813 814 /* 815 * Only take the VC sub-engine configuration into account because 816 * the XiveRouter model combines both VC and PC sub-engines 817 */ 818 if (pc_engine) { 819 return; 820 } 821 822 if (!vst_addr) { 823 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name); 824 return; 825 } 826 827 switch (mode) { 828 case VSD_MODE_FORWARD: 829 xive->vsds[type][blk] = vsd; 830 break; 831 832 case VSD_MODE_EXCLUSIVE: 833 pnv_xive_vst_set_exclusive(xive, type, blk, vsd); 834 break; 835 836 default: 837 xive_error(xive, "VST: unsupported table mode %d", mode); 838 return; 839 } 840 } 841 842 /* 843 * Interrupt controller MMIO region. The layout is compatible between 844 * 4K and 64K pages : 845 * 846 * Page 0 sub-engine BARs 847 * 0x000 - 0x3FF IC registers 848 * 0x400 - 0x7FF PC registers 849 * 0x800 - 0xFFF VC registers 850 * 851 * Page 1 Notify page (writes only) 852 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB) 853 * 0x800 - 0xFFF forwards and syncs 854 * 855 * Page 2 LSI Trigger page (writes only) (not modeled) 856 * Page 3 LSI SB EOI page (reads only) (not modeled) 857 * 858 * Page 4-7 indirect TIMA 859 */ 860 861 /* 862 * IC - registers MMIO 863 */ 864 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, 865 uint64_t val, unsigned size) 866 { 867 PnvXive *xive = PNV_XIVE(opaque); 868 MemoryRegion *sysmem = get_system_memory(); 869 uint32_t reg = offset >> 3; 870 bool is_chip0 = xive->chip->chip_id == 0; 871 872 switch (offset) { 873 874 /* 875 * XIVE CQ (PowerBus bridge) settings 876 */ 877 case CQ_MSGSND: /* msgsnd for doorbells */ 878 case CQ_FIRMASK_OR: /* FIR error reporting */ 879 break; 880 case CQ_PBI_CTL: 881 if (val & CQ_PBI_PC_64K) { 882 xive->pc_shift = 16; 883 } 884 if (val & CQ_PBI_VC_64K) { 885 xive->vc_shift = 16; 886 } 887 break; 888 case CQ_CFG_PB_GEN: /* PowerBus General Configuration */ 889 /* 890 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode 891 */ 892 break; 893 894 /* 895 * XIVE Virtualization Controller settings 896 */ 897 case VC_GLOBAL_CONFIG: 898 break; 899 900 /* 901 * XIVE Presenter Controller settings 902 */ 903 case PC_GLOBAL_CONFIG: 904 /* 905 * PC_GCONF_CHIPID_OVR 906 * Overrides Int command Chip ID with the Chip ID field (DEBUG) 907 */ 908 break; 909 case PC_TCTXT_CFG: 910 /* 911 * TODO: block group support 912 */ 913 break; 914 case PC_TCTXT_TRACK: 915 /* 916 * PC_TCTXT_TRACK_EN: 917 * enable block tracking and exchange of block ownership 918 * information between Interrupt controllers 919 */ 920 break; 921 922 /* 923 * Misc settings 924 */ 925 case VC_SBC_CONFIG: /* Store EOI configuration */ 926 /* 927 * Configure store EOI if required by firwmare (skiboot has removed 928 * support recently though) 929 */ 930 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { 931 xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI; 932 } 933 break; 934 935 case VC_EQC_CONFIG: /* TODO: silent escalation */ 936 case VC_AIB_TX_ORDER_TAG2: /* relax ordering */ 937 break; 938 939 /* 940 * XIVE BAR settings (XSCOM only) 941 */ 942 case CQ_RST_CTL: 943 /* bit4: resets all BAR registers */ 944 break; 945 946 case CQ_IC_BAR: /* IC BAR. 8 pages */ 947 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; 948 if (!(val & CQ_IC_BAR_VALID)) { 949 xive->ic_base = 0; 950 if (xive->regs[reg] & CQ_IC_BAR_VALID) { 951 memory_region_del_subregion(&xive->ic_mmio, 952 &xive->ic_reg_mmio); 953 memory_region_del_subregion(&xive->ic_mmio, 954 &xive->ic_notify_mmio); 955 memory_region_del_subregion(&xive->ic_mmio, 956 &xive->ic_lsi_mmio); 957 memory_region_del_subregion(&xive->ic_mmio, 958 &xive->tm_indirect_mmio); 959 960 memory_region_del_subregion(sysmem, &xive->ic_mmio); 961 } 962 } else { 963 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); 964 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) { 965 memory_region_add_subregion(sysmem, xive->ic_base, 966 &xive->ic_mmio); 967 968 memory_region_add_subregion(&xive->ic_mmio, 0, 969 &xive->ic_reg_mmio); 970 memory_region_add_subregion(&xive->ic_mmio, 971 1ul << xive->ic_shift, 972 &xive->ic_notify_mmio); 973 memory_region_add_subregion(&xive->ic_mmio, 974 2ul << xive->ic_shift, 975 &xive->ic_lsi_mmio); 976 memory_region_add_subregion(&xive->ic_mmio, 977 4ull << xive->ic_shift, 978 &xive->tm_indirect_mmio); 979 } 980 } 981 break; 982 983 case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */ 984 case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */ 985 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; 986 if (!(val & CQ_TM_BAR_VALID)) { 987 xive->tm_base = 0; 988 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) { 989 memory_region_del_subregion(sysmem, &xive->tm_mmio); 990 } 991 } else { 992 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); 993 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) { 994 memory_region_add_subregion(sysmem, xive->tm_base, 995 &xive->tm_mmio); 996 } 997 } 998 break; 999 1000 case CQ_PC_BARM: 1001 xive->regs[reg] = val; 1002 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive)); 1003 break; 1004 case CQ_PC_BAR: /* From 32M to 512G */ 1005 if (!(val & CQ_PC_BAR_VALID)) { 1006 xive->pc_base = 0; 1007 if (xive->regs[reg] & CQ_PC_BAR_VALID) { 1008 memory_region_del_subregion(sysmem, &xive->pc_mmio); 1009 } 1010 } else { 1011 xive->pc_base = val & ~(CQ_PC_BAR_VALID); 1012 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) { 1013 memory_region_add_subregion(sysmem, xive->pc_base, 1014 &xive->pc_mmio); 1015 } 1016 } 1017 break; 1018 1019 case CQ_VC_BARM: 1020 xive->regs[reg] = val; 1021 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive)); 1022 break; 1023 case CQ_VC_BAR: /* From 64M to 4TB */ 1024 if (!(val & CQ_VC_BAR_VALID)) { 1025 xive->vc_base = 0; 1026 if (xive->regs[reg] & CQ_VC_BAR_VALID) { 1027 memory_region_del_subregion(sysmem, &xive->vc_mmio); 1028 } 1029 } else { 1030 xive->vc_base = val & ~(CQ_VC_BAR_VALID); 1031 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) { 1032 memory_region_add_subregion(sysmem, xive->vc_base, 1033 &xive->vc_mmio); 1034 } 1035 } 1036 break; 1037 1038 /* 1039 * XIVE Table settings. 1040 */ 1041 case CQ_TAR: /* Table Address */ 1042 break; 1043 case CQ_TDR: /* Table Data */ 1044 pnv_xive_table_set_data(xive, val); 1045 break; 1046 1047 /* 1048 * XIVE VC & PC Virtual Structure Table settings 1049 */ 1050 case VC_VSD_TABLE_ADDR: 1051 case PC_VSD_TABLE_ADDR: /* Virtual table selector */ 1052 break; 1053 case VC_VSD_TABLE_DATA: /* Virtual table setting */ 1054 case PC_VSD_TABLE_DATA: 1055 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA); 1056 break; 1057 1058 /* 1059 * Interrupt fifo overflow in memory backing store (Not modeled) 1060 */ 1061 case VC_IRQ_CONFIG_IPI: 1062 case VC_IRQ_CONFIG_HW: 1063 case VC_IRQ_CONFIG_CASCADE1: 1064 case VC_IRQ_CONFIG_CASCADE2: 1065 case VC_IRQ_CONFIG_REDIST: 1066 case VC_IRQ_CONFIG_IPI_CASC: 1067 break; 1068 1069 /* 1070 * XIVE hardware thread enablement 1071 */ 1072 case PC_THREAD_EN_REG0: /* Physical Thread Enable */ 1073 case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */ 1074 break; 1075 1076 case PC_THREAD_EN_REG0_SET: 1077 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val; 1078 break; 1079 case PC_THREAD_EN_REG1_SET: 1080 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val; 1081 break; 1082 case PC_THREAD_EN_REG0_CLR: 1083 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val; 1084 break; 1085 case PC_THREAD_EN_REG1_CLR: 1086 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val; 1087 break; 1088 1089 /* 1090 * Indirect TIMA access set up. Defines the PIR of the HW thread 1091 * to use. 1092 */ 1093 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3: 1094 break; 1095 1096 /* 1097 * XIVE PC & VC cache updates for EAS, NVT and END 1098 */ 1099 case VC_IVC_SCRUB_MASK: 1100 case VC_IVC_SCRUB_TRIG: 1101 break; 1102 1103 case VC_EQC_CWATCH_SPEC: 1104 val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */ 1105 break; 1106 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: 1107 break; 1108 case VC_EQC_CWATCH_DAT0: 1109 /* writing to DATA0 triggers the cache write */ 1110 xive->regs[reg] = val; 1111 pnv_xive_end_update(xive); 1112 break; 1113 case VC_EQC_SCRUB_MASK: 1114 case VC_EQC_SCRUB_TRIG: 1115 /* 1116 * The scrubbing registers flush the cache in RAM and can also 1117 * invalidate. 1118 */ 1119 break; 1120 1121 case PC_VPC_CWATCH_SPEC: 1122 val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */ 1123 break; 1124 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: 1125 break; 1126 case PC_VPC_CWATCH_DAT0: 1127 /* writing to DATA0 triggers the cache write */ 1128 xive->regs[reg] = val; 1129 pnv_xive_nvt_update(xive); 1130 break; 1131 case PC_VPC_SCRUB_MASK: 1132 case PC_VPC_SCRUB_TRIG: 1133 /* 1134 * The scrubbing registers flush the cache in RAM and can also 1135 * invalidate. 1136 */ 1137 break; 1138 1139 1140 /* 1141 * XIVE PC & VC cache invalidation 1142 */ 1143 case PC_AT_KILL: 1144 break; 1145 case VC_AT_MACRO_KILL: 1146 break; 1147 case PC_AT_KILL_MASK: 1148 case VC_AT_MACRO_KILL_MASK: 1149 break; 1150 1151 default: 1152 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset); 1153 return; 1154 } 1155 1156 xive->regs[reg] = val; 1157 } 1158 1159 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) 1160 { 1161 PnvXive *xive = PNV_XIVE(opaque); 1162 uint64_t val = 0; 1163 uint32_t reg = offset >> 3; 1164 1165 switch (offset) { 1166 case CQ_CFG_PB_GEN: 1167 case CQ_IC_BAR: 1168 case CQ_TM1_BAR: 1169 case CQ_TM2_BAR: 1170 case CQ_PC_BAR: 1171 case CQ_PC_BARM: 1172 case CQ_VC_BAR: 1173 case CQ_VC_BARM: 1174 case CQ_TAR: 1175 case CQ_TDR: 1176 case CQ_PBI_CTL: 1177 1178 case PC_TCTXT_CFG: 1179 case PC_TCTXT_TRACK: 1180 case PC_TCTXT_INDIR0: 1181 case PC_TCTXT_INDIR1: 1182 case PC_TCTXT_INDIR2: 1183 case PC_TCTXT_INDIR3: 1184 case PC_GLOBAL_CONFIG: 1185 1186 case PC_VPC_SCRUB_MASK: 1187 1188 case VC_GLOBAL_CONFIG: 1189 case VC_AIB_TX_ORDER_TAG2: 1190 1191 case VC_IRQ_CONFIG_IPI: 1192 case VC_IRQ_CONFIG_HW: 1193 case VC_IRQ_CONFIG_CASCADE1: 1194 case VC_IRQ_CONFIG_CASCADE2: 1195 case VC_IRQ_CONFIG_REDIST: 1196 case VC_IRQ_CONFIG_IPI_CASC: 1197 1198 case VC_EQC_SCRUB_MASK: 1199 case VC_IVC_SCRUB_MASK: 1200 case VC_SBC_CONFIG: 1201 case VC_AT_MACRO_KILL_MASK: 1202 case VC_VSD_TABLE_ADDR: 1203 case PC_VSD_TABLE_ADDR: 1204 case VC_VSD_TABLE_DATA: 1205 case PC_VSD_TABLE_DATA: 1206 case PC_THREAD_EN_REG0: 1207 case PC_THREAD_EN_REG1: 1208 val = xive->regs[reg]; 1209 break; 1210 1211 /* 1212 * XIVE hardware thread enablement 1213 */ 1214 case PC_THREAD_EN_REG0_SET: 1215 case PC_THREAD_EN_REG0_CLR: 1216 val = xive->regs[PC_THREAD_EN_REG0 >> 3]; 1217 break; 1218 case PC_THREAD_EN_REG1_SET: 1219 case PC_THREAD_EN_REG1_CLR: 1220 val = xive->regs[PC_THREAD_EN_REG1 >> 3]; 1221 break; 1222 1223 case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */ 1224 val = 0xffffff0000000000; 1225 break; 1226 1227 /* 1228 * XIVE PC & VC cache updates for EAS, NVT and END 1229 */ 1230 case VC_EQC_CWATCH_SPEC: 1231 xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT); 1232 val = xive->regs[reg]; 1233 break; 1234 case VC_EQC_CWATCH_DAT0: 1235 /* 1236 * Load DATA registers from cache with data requested by the 1237 * SPEC register 1238 */ 1239 pnv_xive_end_cache_load(xive); 1240 val = xive->regs[reg]; 1241 break; 1242 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: 1243 val = xive->regs[reg]; 1244 break; 1245 1246 case PC_VPC_CWATCH_SPEC: 1247 xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT); 1248 val = xive->regs[reg]; 1249 break; 1250 case PC_VPC_CWATCH_DAT0: 1251 /* 1252 * Load DATA registers from cache with data requested by the 1253 * SPEC register 1254 */ 1255 pnv_xive_nvt_cache_load(xive); 1256 val = xive->regs[reg]; 1257 break; 1258 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: 1259 val = xive->regs[reg]; 1260 break; 1261 1262 case PC_VPC_SCRUB_TRIG: 1263 case VC_IVC_SCRUB_TRIG: 1264 case VC_EQC_SCRUB_TRIG: 1265 xive->regs[reg] &= ~VC_SCRUB_VALID; 1266 val = xive->regs[reg]; 1267 break; 1268 1269 /* 1270 * XIVE PC & VC cache invalidation 1271 */ 1272 case PC_AT_KILL: 1273 xive->regs[reg] &= ~PC_AT_KILL_VALID; 1274 val = xive->regs[reg]; 1275 break; 1276 case VC_AT_MACRO_KILL: 1277 xive->regs[reg] &= ~VC_KILL_VALID; 1278 val = xive->regs[reg]; 1279 break; 1280 1281 /* 1282 * XIVE synchronisation 1283 */ 1284 case VC_EQC_CONFIG: 1285 val = VC_EQC_SYNC_MASK; 1286 break; 1287 1288 default: 1289 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset); 1290 } 1291 1292 return val; 1293 } 1294 1295 static const MemoryRegionOps pnv_xive_ic_reg_ops = { 1296 .read = pnv_xive_ic_reg_read, 1297 .write = pnv_xive_ic_reg_write, 1298 .endianness = DEVICE_BIG_ENDIAN, 1299 .valid = { 1300 .min_access_size = 8, 1301 .max_access_size = 8, 1302 }, 1303 .impl = { 1304 .min_access_size = 8, 1305 .max_access_size = 8, 1306 }, 1307 }; 1308 1309 /* 1310 * IC - Notify MMIO port page (write only) 1311 */ 1312 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */ 1313 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */ 1314 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */ 1315 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */ 1316 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */ 1317 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */ 1318 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */ 1319 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */ 1320 1321 /* VC synchronisation */ 1322 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */ 1323 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */ 1324 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */ 1325 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */ 1326 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */ 1327 1328 /* PC synchronisation */ 1329 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */ 1330 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */ 1331 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */ 1332 1333 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) 1334 { 1335 uint8_t blk; 1336 uint32_t idx; 1337 1338 trace_pnv_xive_ic_hw_trigger(addr, val); 1339 1340 if (val & XIVE_TRIGGER_END) { 1341 xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64, 1342 addr, val); 1343 return; 1344 } 1345 1346 /* 1347 * Forward the source event notification directly to the Router. 1348 * The source interrupt number should already be correctly encoded 1349 * with the chip block id by the sending device (PHB, PSI). 1350 */ 1351 blk = XIVE_EAS_BLOCK(val); 1352 idx = XIVE_EAS_INDEX(val); 1353 1354 xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx)); 1355 } 1356 1357 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, 1358 unsigned size) 1359 { 1360 PnvXive *xive = PNV_XIVE(opaque); 1361 1362 /* VC: HW triggers */ 1363 switch (addr) { 1364 case 0x000 ... 0x7FF: 1365 pnv_xive_ic_hw_trigger(opaque, addr, val); 1366 break; 1367 1368 /* VC: Forwarded IRQs */ 1369 case PNV_XIVE_FORWARD_IPI: 1370 case PNV_XIVE_FORWARD_HW: 1371 case PNV_XIVE_FORWARD_OS_ESC: 1372 case PNV_XIVE_FORWARD_HW_ESC: 1373 case PNV_XIVE_FORWARD_REDIS: 1374 /* TODO: forwarded IRQs. Should be like HW triggers */ 1375 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64, 1376 addr, val); 1377 break; 1378 1379 /* VC syncs */ 1380 case PNV_XIVE_SYNC_IPI: 1381 case PNV_XIVE_SYNC_HW: 1382 case PNV_XIVE_SYNC_OS_ESC: 1383 case PNV_XIVE_SYNC_HW_ESC: 1384 case PNV_XIVE_SYNC_REDIS: 1385 break; 1386 1387 /* PC syncs */ 1388 case PNV_XIVE_SYNC_PULL: 1389 case PNV_XIVE_SYNC_PUSH: 1390 case PNV_XIVE_SYNC_VPC: 1391 break; 1392 1393 default: 1394 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr); 1395 } 1396 } 1397 1398 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr, 1399 unsigned size) 1400 { 1401 PnvXive *xive = PNV_XIVE(opaque); 1402 1403 /* loads are invalid */ 1404 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr); 1405 return -1; 1406 } 1407 1408 static const MemoryRegionOps pnv_xive_ic_notify_ops = { 1409 .read = pnv_xive_ic_notify_read, 1410 .write = pnv_xive_ic_notify_write, 1411 .endianness = DEVICE_BIG_ENDIAN, 1412 .valid = { 1413 .min_access_size = 8, 1414 .max_access_size = 8, 1415 }, 1416 .impl = { 1417 .min_access_size = 8, 1418 .max_access_size = 8, 1419 }, 1420 }; 1421 1422 /* 1423 * IC - LSI MMIO handlers (not modeled) 1424 */ 1425 1426 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr, 1427 uint64_t val, unsigned size) 1428 { 1429 PnvXive *xive = PNV_XIVE(opaque); 1430 1431 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr); 1432 } 1433 1434 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size) 1435 { 1436 PnvXive *xive = PNV_XIVE(opaque); 1437 1438 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr); 1439 return -1; 1440 } 1441 1442 static const MemoryRegionOps pnv_xive_ic_lsi_ops = { 1443 .read = pnv_xive_ic_lsi_read, 1444 .write = pnv_xive_ic_lsi_write, 1445 .endianness = DEVICE_BIG_ENDIAN, 1446 .valid = { 1447 .min_access_size = 8, 1448 .max_access_size = 8, 1449 }, 1450 .impl = { 1451 .min_access_size = 8, 1452 .max_access_size = 8, 1453 }, 1454 }; 1455 1456 /* 1457 * IC - Indirect TIMA MMIO handlers 1458 */ 1459 1460 /* 1461 * When the TIMA is accessed from the indirect page, the thread id of 1462 * the target CPU is configured in the PC_TCTXT_INDIR0 register before 1463 * use. This is used for resets and for debug purpose also. 1464 */ 1465 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive) 1466 { 1467 PnvChip *chip = xive->chip; 1468 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3]; 1469 PowerPCCPU *cpu = NULL; 1470 int pir; 1471 1472 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) { 1473 xive_error(xive, "IC: no indirect TIMA access in progress"); 1474 return NULL; 1475 } 1476 1477 pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir); 1478 cpu = pnv_chip_find_cpu(chip, pir); 1479 if (!cpu) { 1480 xive_error(xive, "IC: invalid PIR %x for indirect access", pir); 1481 return NULL; 1482 } 1483 1484 /* Check that HW thread is XIVE enabled */ 1485 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 1486 xive_error(xive, "IC: CPU %x is not enabled", pir); 1487 } 1488 1489 return XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1490 } 1491 1492 static void xive_tm_indirect_write(void *opaque, hwaddr offset, 1493 uint64_t value, unsigned size) 1494 { 1495 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1496 1497 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size); 1498 } 1499 1500 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset, 1501 unsigned size) 1502 { 1503 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1504 1505 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size); 1506 } 1507 1508 static const MemoryRegionOps xive_tm_indirect_ops = { 1509 .read = xive_tm_indirect_read, 1510 .write = xive_tm_indirect_write, 1511 .endianness = DEVICE_BIG_ENDIAN, 1512 .valid = { 1513 .min_access_size = 1, 1514 .max_access_size = 8, 1515 }, 1516 .impl = { 1517 .min_access_size = 1, 1518 .max_access_size = 8, 1519 }, 1520 }; 1521 1522 static void pnv_xive_tm_write(void *opaque, hwaddr offset, 1523 uint64_t value, unsigned size) 1524 { 1525 PowerPCCPU *cpu = POWERPC_CPU(current_cpu); 1526 PnvXive *xive = pnv_xive_tm_get_xive(cpu); 1527 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1528 1529 xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size); 1530 } 1531 1532 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size) 1533 { 1534 PowerPCCPU *cpu = POWERPC_CPU(current_cpu); 1535 PnvXive *xive = pnv_xive_tm_get_xive(cpu); 1536 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1537 1538 return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size); 1539 } 1540 1541 const MemoryRegionOps pnv_xive_tm_ops = { 1542 .read = pnv_xive_tm_read, 1543 .write = pnv_xive_tm_write, 1544 .endianness = DEVICE_BIG_ENDIAN, 1545 .valid = { 1546 .min_access_size = 1, 1547 .max_access_size = 8, 1548 }, 1549 .impl = { 1550 .min_access_size = 1, 1551 .max_access_size = 8, 1552 }, 1553 }; 1554 1555 /* 1556 * Interrupt controller XSCOM region. 1557 */ 1558 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size) 1559 { 1560 switch (addr >> 3) { 1561 case X_VC_EQC_CONFIG: 1562 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */ 1563 return VC_EQC_SYNC_MASK; 1564 default: 1565 return pnv_xive_ic_reg_read(opaque, addr, size); 1566 } 1567 } 1568 1569 static void pnv_xive_xscom_write(void *opaque, hwaddr addr, 1570 uint64_t val, unsigned size) 1571 { 1572 pnv_xive_ic_reg_write(opaque, addr, val, size); 1573 } 1574 1575 static const MemoryRegionOps pnv_xive_xscom_ops = { 1576 .read = pnv_xive_xscom_read, 1577 .write = pnv_xive_xscom_write, 1578 .endianness = DEVICE_BIG_ENDIAN, 1579 .valid = { 1580 .min_access_size = 8, 1581 .max_access_size = 8, 1582 }, 1583 .impl = { 1584 .min_access_size = 8, 1585 .max_access_size = 8, 1586 } 1587 }; 1588 1589 /* 1590 * Virtualization Controller MMIO region containing the IPI and END ESB pages 1591 */ 1592 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset, 1593 unsigned size) 1594 { 1595 PnvXive *xive = PNV_XIVE(opaque); 1596 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1597 uint64_t edt_type = 0; 1598 uint64_t edt_offset; 1599 MemTxResult result; 1600 AddressSpace *edt_as = NULL; 1601 uint64_t ret = -1; 1602 1603 if (edt_index < XIVE_TABLE_EDT_MAX) { 1604 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1605 } 1606 1607 switch (edt_type) { 1608 case CQ_TDR_EDT_IPI: 1609 edt_as = &xive->ipi_as; 1610 break; 1611 case CQ_TDR_EDT_EQ: 1612 edt_as = &xive->end_as; 1613 break; 1614 default: 1615 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset); 1616 return -1; 1617 } 1618 1619 /* Remap the offset for the targeted address space */ 1620 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1621 1622 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED, 1623 &result); 1624 1625 if (result != MEMTX_OK) { 1626 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%" 1627 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END", 1628 offset, edt_offset); 1629 return -1; 1630 } 1631 1632 return ret; 1633 } 1634 1635 static void pnv_xive_vc_write(void *opaque, hwaddr offset, 1636 uint64_t val, unsigned size) 1637 { 1638 PnvXive *xive = PNV_XIVE(opaque); 1639 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1640 uint64_t edt_type = 0; 1641 uint64_t edt_offset; 1642 MemTxResult result; 1643 AddressSpace *edt_as = NULL; 1644 1645 if (edt_index < XIVE_TABLE_EDT_MAX) { 1646 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1647 } 1648 1649 switch (edt_type) { 1650 case CQ_TDR_EDT_IPI: 1651 edt_as = &xive->ipi_as; 1652 break; 1653 case CQ_TDR_EDT_EQ: 1654 edt_as = &xive->end_as; 1655 break; 1656 default: 1657 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx, 1658 offset); 1659 return; 1660 } 1661 1662 /* Remap the offset for the targeted address space */ 1663 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1664 1665 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result); 1666 if (result != MEMTX_OK) { 1667 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset); 1668 } 1669 } 1670 1671 static const MemoryRegionOps pnv_xive_vc_ops = { 1672 .read = pnv_xive_vc_read, 1673 .write = pnv_xive_vc_write, 1674 .endianness = DEVICE_BIG_ENDIAN, 1675 .valid = { 1676 .min_access_size = 8, 1677 .max_access_size = 8, 1678 }, 1679 .impl = { 1680 .min_access_size = 8, 1681 .max_access_size = 8, 1682 }, 1683 }; 1684 1685 /* 1686 * Presenter Controller MMIO region. The Virtualization Controller 1687 * updates the IPB in the NVT table when required. Not modeled. 1688 */ 1689 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr, 1690 unsigned size) 1691 { 1692 PnvXive *xive = PNV_XIVE(opaque); 1693 1694 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr); 1695 return -1; 1696 } 1697 1698 static void pnv_xive_pc_write(void *opaque, hwaddr addr, 1699 uint64_t value, unsigned size) 1700 { 1701 PnvXive *xive = PNV_XIVE(opaque); 1702 1703 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr); 1704 } 1705 1706 static const MemoryRegionOps pnv_xive_pc_ops = { 1707 .read = pnv_xive_pc_read, 1708 .write = pnv_xive_pc_write, 1709 .endianness = DEVICE_BIG_ENDIAN, 1710 .valid = { 1711 .min_access_size = 8, 1712 .max_access_size = 8, 1713 }, 1714 .impl = { 1715 .min_access_size = 8, 1716 .max_access_size = 8, 1717 }, 1718 }; 1719 1720 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx, 1721 Monitor *mon) 1722 { 1723 uint8_t eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1); 1724 uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1); 1725 1726 if (!xive_nvt_is_valid(nvt)) { 1727 return; 1728 } 1729 1730 monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x\n", nvt_idx, 1731 eq_blk, eq_idx, 1732 xive_get_field32(NVT_W4_IPB, nvt->w4)); 1733 } 1734 1735 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon) 1736 { 1737 XiveRouter *xrtr = XIVE_ROUTER(xive); 1738 uint8_t blk = pnv_xive_block_id(xive); 1739 uint8_t chip_id = xive->chip->chip_id; 1740 uint32_t srcno0 = XIVE_EAS(blk, 0); 1741 uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk); 1742 XiveEAS eas; 1743 XiveEND end; 1744 XiveNVT nvt; 1745 int i; 1746 uint64_t xive_nvt_per_subpage; 1747 1748 monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk, 1749 srcno0, srcno0 + nr_ipis - 1); 1750 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); 1751 1752 monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk, 1753 srcno0, srcno0 + nr_ipis - 1); 1754 for (i = 0; i < nr_ipis; i++) { 1755 if (xive_router_get_eas(xrtr, blk, i, &eas)) { 1756 break; 1757 } 1758 if (!xive_eas_is_masked(&eas)) { 1759 xive_eas_pic_print_info(&eas, i, mon); 1760 } 1761 } 1762 1763 monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk); 1764 i = 0; 1765 while (!xive_router_get_end(xrtr, blk, i, &end)) { 1766 xive_end_pic_print_info(&end, i++, mon); 1767 } 1768 1769 monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk); 1770 i = 0; 1771 while (!xive_router_get_end(xrtr, blk, i, &end)) { 1772 xive_end_eas_pic_print_info(&end, i++, mon); 1773 } 1774 1775 monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk, 1776 0, XIVE_NVT_COUNT - 1); 1777 xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT); 1778 for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) { 1779 while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) { 1780 xive_nvt_pic_print_info(&nvt, i++, mon); 1781 } 1782 } 1783 } 1784 1785 static void pnv_xive_reset(void *dev) 1786 { 1787 PnvXive *xive = PNV_XIVE(dev); 1788 XiveSource *xsrc = &xive->ipi_source; 1789 XiveENDSource *end_xsrc = &xive->end_source; 1790 1791 /* Default page size (Should be changed at runtime to 64k) */ 1792 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1793 1794 /* Clear subregions */ 1795 if (memory_region_is_mapped(&xsrc->esb_mmio)) { 1796 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio); 1797 } 1798 1799 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) { 1800 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio); 1801 } 1802 1803 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { 1804 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio); 1805 } 1806 1807 if (memory_region_is_mapped(&xive->end_edt_mmio)) { 1808 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio); 1809 } 1810 } 1811 1812 static void pnv_xive_init(Object *obj) 1813 { 1814 PnvXive *xive = PNV_XIVE(obj); 1815 1816 object_initialize_child(obj, "ipi_source", &xive->ipi_source, 1817 TYPE_XIVE_SOURCE); 1818 object_initialize_child(obj, "end_source", &xive->end_source, 1819 TYPE_XIVE_END_SOURCE); 1820 } 1821 1822 /* 1823 * Maximum number of IRQs and ENDs supported by HW 1824 */ 1825 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1826 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1827 1828 static void pnv_xive_realize(DeviceState *dev, Error **errp) 1829 { 1830 PnvXive *xive = PNV_XIVE(dev); 1831 PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev); 1832 XiveSource *xsrc = &xive->ipi_source; 1833 XiveENDSource *end_xsrc = &xive->end_source; 1834 Error *local_err = NULL; 1835 1836 pxc->parent_realize(dev, &local_err); 1837 if (local_err) { 1838 error_propagate(errp, local_err); 1839 return; 1840 } 1841 1842 assert(xive->chip); 1843 1844 /* 1845 * The XiveSource and XiveENDSource objects are realized with the 1846 * maximum allowed HW configuration. The ESB MMIO regions will be 1847 * resized dynamically when the controller is configured by the FW 1848 * to limit accesses to resources not provisioned. 1849 */ 1850 object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS, 1851 &error_fatal); 1852 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort); 1853 if (!qdev_realize(DEVICE(xsrc), NULL, errp)) { 1854 return; 1855 } 1856 1857 object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS, 1858 &error_fatal); 1859 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive), 1860 &error_abort); 1861 if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) { 1862 return; 1863 } 1864 1865 /* Default page size. Generally changed at runtime to 64k */ 1866 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1867 1868 /* XSCOM region, used for initial configuration of the BARs */ 1869 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops, 1870 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3); 1871 1872 /* Interrupt controller MMIO regions */ 1873 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", 1874 PNV9_XIVE_IC_SIZE); 1875 1876 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops, 1877 xive, "xive-ic-reg", 1 << xive->ic_shift); 1878 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev), 1879 &pnv_xive_ic_notify_ops, 1880 xive, "xive-ic-notify", 1 << xive->ic_shift); 1881 1882 /* The Pervasive LSI trigger and EOI pages (not modeled) */ 1883 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops, 1884 xive, "xive-ic-lsi", 2 << xive->ic_shift); 1885 1886 /* Thread Interrupt Management Area (Indirect) */ 1887 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev), 1888 &xive_tm_indirect_ops, 1889 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE); 1890 /* 1891 * Overall Virtualization Controller MMIO region containing the 1892 * IPI ESB pages and END ESB pages. The layout is defined by the 1893 * EDT "Domain table" and the accesses are dispatched using 1894 * address spaces for each. 1895 */ 1896 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive, 1897 "xive-vc", PNV9_XIVE_VC_SIZE); 1898 1899 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi", 1900 PNV9_XIVE_VC_SIZE); 1901 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi"); 1902 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end", 1903 PNV9_XIVE_VC_SIZE); 1904 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end"); 1905 1906 /* 1907 * The MMIO windows exposing the IPI ESBs and the END ESBs in the 1908 * VC region. Their size is configured by the FW in the EDT table. 1909 */ 1910 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0); 1911 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0); 1912 1913 /* Presenter Controller MMIO region (not modeled) */ 1914 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive, 1915 "xive-pc", PNV9_XIVE_PC_SIZE); 1916 1917 /* Thread Interrupt Management Area (Direct) */ 1918 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops, 1919 xive, "xive-tima", PNV9_XIVE_TM_SIZE); 1920 1921 qemu_register_reset(pnv_xive_reset, dev); 1922 } 1923 1924 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, 1925 int xscom_offset) 1926 { 1927 const char compat[] = "ibm,power9-xive-x"; 1928 char *name; 1929 int offset; 1930 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE; 1931 uint32_t reg[] = { 1932 cpu_to_be32(lpc_pcba), 1933 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE) 1934 }; 1935 1936 name = g_strdup_printf("xive@%x", lpc_pcba); 1937 offset = fdt_add_subnode(fdt, xscom_offset, name); 1938 _FDT(offset); 1939 g_free(name); 1940 1941 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); 1942 _FDT((fdt_setprop(fdt, offset, "compatible", compat, 1943 sizeof(compat)))); 1944 return 0; 1945 } 1946 1947 static Property pnv_xive_properties[] = { 1948 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0), 1949 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0), 1950 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0), 1951 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0), 1952 /* The PnvChip id identifies the XIVE interrupt controller. */ 1953 DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *), 1954 DEFINE_PROP_END_OF_LIST(), 1955 }; 1956 1957 static void pnv_xive_class_init(ObjectClass *klass, void *data) 1958 { 1959 DeviceClass *dc = DEVICE_CLASS(klass); 1960 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); 1961 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 1962 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1963 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass); 1964 PnvXiveClass *pxc = PNV_XIVE_CLASS(klass); 1965 1966 xdc->dt_xscom = pnv_xive_dt_xscom; 1967 1968 dc->desc = "PowerNV XIVE Interrupt Controller"; 1969 device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize); 1970 dc->realize = pnv_xive_realize; 1971 device_class_set_props(dc, pnv_xive_properties); 1972 1973 xrc->get_eas = pnv_xive_get_eas; 1974 xrc->get_end = pnv_xive_get_end; 1975 xrc->write_end = pnv_xive_write_end; 1976 xrc->get_nvt = pnv_xive_get_nvt; 1977 xrc->write_nvt = pnv_xive_write_nvt; 1978 xrc->get_block_id = pnv_xive_get_block_id; 1979 1980 xnc->notify = pnv_xive_notify; 1981 xpc->match_nvt = pnv_xive_match_nvt; 1982 }; 1983 1984 static const TypeInfo pnv_xive_info = { 1985 .name = TYPE_PNV_XIVE, 1986 .parent = TYPE_XIVE_ROUTER, 1987 .instance_init = pnv_xive_init, 1988 .instance_size = sizeof(PnvXive), 1989 .class_init = pnv_xive_class_init, 1990 .class_size = sizeof(PnvXiveClass), 1991 .interfaces = (InterfaceInfo[]) { 1992 { TYPE_PNV_XSCOM_INTERFACE }, 1993 { } 1994 } 1995 }; 1996 1997 static void pnv_xive_register_types(void) 1998 { 1999 type_register_static(&pnv_xive_info); 2000 } 2001 2002 type_init(pnv_xive_register_types) 2003