1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2019, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/dma.h" 17 #include "sysemu/reset.h" 18 #include "monitor/monitor.h" 19 #include "hw/ppc/fdt.h" 20 #include "hw/ppc/pnv.h" 21 #include "hw/ppc/pnv_core.h" 22 #include "hw/ppc/pnv_xscom.h" 23 #include "hw/ppc/pnv_xive.h" 24 #include "hw/ppc/xive_regs.h" 25 #include "hw/qdev-properties.h" 26 #include "hw/ppc/ppc.h" 27 28 #include <libfdt.h> 29 30 #include "pnv_xive_regs.h" 31 32 #undef XIVE_DEBUG 33 34 /* 35 * Virtual structures table (VST) 36 */ 37 #define SBE_PER_BYTE 4 38 39 typedef struct XiveVstInfo { 40 const char *name; 41 uint32_t size; 42 uint32_t max_blocks; 43 } XiveVstInfo; 44 45 static const XiveVstInfo vst_infos[] = { 46 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 }, 47 [VST_TSEL_SBE] = { "SBE", 1, 16 }, 48 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 }, 49 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 }, 50 51 /* 52 * Interrupt fifo backing store table (not modeled) : 53 * 54 * 0 - IPI, 55 * 1 - HWD, 56 * 2 - First escalate, 57 * 3 - Second escalate, 58 * 4 - Redistribution, 59 * 5 - IPI cascaded queue ? 60 */ 61 [VST_TSEL_IRQ] = { "IRQ", 1, 6 }, 62 }; 63 64 #define xive_error(xive, fmt, ...) \ 65 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ 66 (xive)->chip->chip_id, ## __VA_ARGS__); 67 68 /* 69 * QEMU version of the GETFIELD/SETFIELD macros 70 * 71 * TODO: It might be better to use the existing extract64() and 72 * deposit64() but this means that all the register definitions will 73 * change and become incompatible with the ones found in skiboot. 74 * 75 * Keep it as it is for now until we find a common ground. 76 */ 77 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) 78 { 79 return (word & mask) >> ctz64(mask); 80 } 81 82 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, 83 uint64_t value) 84 { 85 return (word & ~mask) | ((value << ctz64(mask)) & mask); 86 } 87 88 /* 89 * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID 90 * field overrides the hardwired chip ID in the Powerbus operations 91 * and for CAM compares 92 */ 93 static uint8_t pnv_xive_block_id(PnvXive *xive) 94 { 95 uint8_t blk = xive->chip->chip_id; 96 uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3]; 97 98 if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) { 99 blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val); 100 } 101 102 return blk; 103 } 104 105 /* 106 * Remote access to controllers. HW uses MMIOs. For now, a simple scan 107 * of the chips is good enough. 108 * 109 * TODO: Block scope support 110 */ 111 static PnvXive *pnv_xive_get_remote(uint8_t blk) 112 { 113 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); 114 int i; 115 116 for (i = 0; i < pnv->num_chips; i++) { 117 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); 118 PnvXive *xive = &chip9->xive; 119 120 if (pnv_xive_block_id(xive) == blk) { 121 return xive; 122 } 123 } 124 return NULL; 125 } 126 127 /* 128 * VST accessors for SBE, EAT, ENDT, NVT 129 * 130 * Indirect VST tables are arrays of VSDs pointing to a page (of same 131 * size). Each page is a direct VST table. 132 */ 133 134 #define XIVE_VSD_SIZE 8 135 136 /* Indirect page size can be 4K, 64K, 2M, 16M. */ 137 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift) 138 { 139 return page_shift == 12 || page_shift == 16 || 140 page_shift == 21 || page_shift == 24; 141 } 142 143 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type, 144 uint64_t vsd, uint32_t idx) 145 { 146 const XiveVstInfo *info = &vst_infos[type]; 147 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 148 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 149 uint32_t idx_max; 150 151 idx_max = vst_tsize / info->size - 1; 152 if (idx > idx_max) { 153 #ifdef XIVE_DEBUG 154 xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?", 155 info->name, idx, idx_max); 156 #endif 157 return 0; 158 } 159 160 return vst_addr + idx * info->size; 161 } 162 163 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, 164 uint64_t vsd, uint32_t idx) 165 { 166 const XiveVstInfo *info = &vst_infos[type]; 167 uint64_t vsd_addr; 168 uint32_t vsd_idx; 169 uint32_t page_shift; 170 uint32_t vst_per_page; 171 172 /* Get the page size of the indirect table. */ 173 vsd_addr = vsd & VSD_ADDRESS_MASK; 174 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 175 176 if (!(vsd & VSD_ADDRESS_MASK)) { 177 #ifdef XIVE_DEBUG 178 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 179 #endif 180 return 0; 181 } 182 183 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 184 185 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 186 xive_error(xive, "VST: invalid %s page shift %d", info->name, 187 page_shift); 188 return 0; 189 } 190 191 vst_per_page = (1ull << page_shift) / info->size; 192 vsd_idx = idx / vst_per_page; 193 194 /* Load the VSD we are looking for, if not already done */ 195 if (vsd_idx) { 196 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; 197 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 198 199 if (!(vsd & VSD_ADDRESS_MASK)) { 200 #ifdef XIVE_DEBUG 201 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 202 #endif 203 return 0; 204 } 205 206 /* 207 * Check that the pages have a consistent size across the 208 * indirect table 209 */ 210 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { 211 xive_error(xive, "VST: %s entry %x indirect page size differ !?", 212 info->name, idx); 213 return 0; 214 } 215 } 216 217 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); 218 } 219 220 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk, 221 uint32_t idx) 222 { 223 const XiveVstInfo *info = &vst_infos[type]; 224 uint64_t vsd; 225 226 if (blk >= info->max_blocks) { 227 xive_error(xive, "VST: invalid block id %d for VST %s %d !?", 228 blk, info->name, idx); 229 return 0; 230 } 231 232 vsd = xive->vsds[type][blk]; 233 234 /* Remote VST access */ 235 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { 236 xive = pnv_xive_get_remote(blk); 237 238 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0; 239 } 240 241 if (VSD_INDIRECT & vsd) { 242 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx); 243 } 244 245 return pnv_xive_vst_addr_direct(xive, type, vsd, idx); 246 } 247 248 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk, 249 uint32_t idx, void *data) 250 { 251 const XiveVstInfo *info = &vst_infos[type]; 252 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 253 254 if (!addr) { 255 return -1; 256 } 257 258 cpu_physical_memory_read(addr, data, info->size); 259 return 0; 260 } 261 262 #define XIVE_VST_WORD_ALL -1 263 264 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk, 265 uint32_t idx, void *data, uint32_t word_number) 266 { 267 const XiveVstInfo *info = &vst_infos[type]; 268 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 269 270 if (!addr) { 271 return -1; 272 } 273 274 if (word_number == XIVE_VST_WORD_ALL) { 275 cpu_physical_memory_write(addr, data, info->size); 276 } else { 277 cpu_physical_memory_write(addr + word_number * 4, 278 data + word_number * 4, 4); 279 } 280 return 0; 281 } 282 283 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 284 XiveEND *end) 285 { 286 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end); 287 } 288 289 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 290 XiveEND *end, uint8_t word_number) 291 { 292 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end, 293 word_number); 294 } 295 296 static int pnv_xive_end_update(PnvXive *xive) 297 { 298 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, 299 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 300 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, 301 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 302 int i; 303 uint64_t eqc_watch[4]; 304 305 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 306 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]); 307 } 308 309 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch, 310 XIVE_VST_WORD_ALL); 311 } 312 313 static void pnv_xive_end_cache_load(PnvXive *xive) 314 { 315 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, 316 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 317 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, 318 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 319 uint64_t eqc_watch[4] = { 0 }; 320 int i; 321 322 if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) { 323 xive_error(xive, "VST: no END entry %x/%x !?", blk, idx); 324 } 325 326 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 327 xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]); 328 } 329 } 330 331 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 332 XiveNVT *nvt) 333 { 334 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt); 335 } 336 337 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 338 XiveNVT *nvt, uint8_t word_number) 339 { 340 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt, 341 word_number); 342 } 343 344 static int pnv_xive_nvt_update(PnvXive *xive) 345 { 346 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, 347 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 348 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, 349 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 350 int i; 351 uint64_t vpc_watch[8]; 352 353 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 354 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]); 355 } 356 357 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch, 358 XIVE_VST_WORD_ALL); 359 } 360 361 static void pnv_xive_nvt_cache_load(PnvXive *xive) 362 { 363 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, 364 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 365 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, 366 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 367 uint64_t vpc_watch[8] = { 0 }; 368 int i; 369 370 if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) { 371 xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx); 372 } 373 374 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 375 xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]); 376 } 377 } 378 379 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 380 XiveEAS *eas) 381 { 382 PnvXive *xive = PNV_XIVE(xrtr); 383 384 /* 385 * EAT lookups should be local to the IC 386 */ 387 if (pnv_xive_block_id(xive) != blk) { 388 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); 389 return -1; 390 } 391 392 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); 393 } 394 395 /* 396 * One bit per thread id. The first register PC_THREAD_EN_REG0 covers 397 * the first cores 0-15 (normal) of the chip or 0-7 (fused). The 398 * second register covers cores 16-23 (normal) or 8-11 (fused). 399 */ 400 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu) 401 { 402 int pir = ppc_cpu_pir(cpu); 403 uint32_t fc = PNV9_PIR2FUSEDCORE(pir); 404 uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1; 405 uint32_t bit = pir & 0x3f; 406 407 return xive->regs[reg >> 3] & PPC_BIT(bit); 408 } 409 410 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, 411 uint8_t nvt_blk, uint32_t nvt_idx, 412 bool cam_ignore, uint8_t priority, 413 uint32_t logic_serv, XiveTCTXMatch *match) 414 { 415 PnvXive *xive = PNV_XIVE(xptr); 416 PnvChip *chip = xive->chip; 417 int count = 0; 418 int i, j; 419 420 for (i = 0; i < chip->nr_cores; i++) { 421 PnvCore *pc = chip->cores[i]; 422 CPUCore *cc = CPU_CORE(pc); 423 424 for (j = 0; j < cc->nr_threads; j++) { 425 PowerPCCPU *cpu = pc->threads[j]; 426 XiveTCTX *tctx; 427 int ring; 428 429 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 430 continue; 431 } 432 433 tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 434 435 /* 436 * Check the thread context CAM lines and record matches. 437 */ 438 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, 439 nvt_idx, cam_ignore, logic_serv); 440 /* 441 * Save the context and follow on to catch duplicates, that we 442 * don't support yet. 443 */ 444 if (ring != -1) { 445 if (match->tctx) { 446 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " 447 "thread context NVT %x/%x\n", 448 nvt_blk, nvt_idx); 449 return -1; 450 } 451 452 match->ring = ring; 453 match->tctx = tctx; 454 count++; 455 } 456 } 457 } 458 459 return count; 460 } 461 462 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr) 463 { 464 return pnv_xive_block_id(PNV_XIVE(xrtr)); 465 } 466 467 /* 468 * The TIMA MMIO space is shared among the chips and to identify the 469 * chip from which the access is being done, we extract the chip id 470 * from the PIR. 471 */ 472 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu) 473 { 474 int pir = ppc_cpu_pir(cpu); 475 PnvChip *chip; 476 PnvXive *xive; 477 478 chip = pnv_get_chip(PNV9_PIR2CHIP(pir)); 479 assert(chip); 480 xive = &PNV9_CHIP(chip)->xive; 481 482 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 483 xive_error(xive, "IC: CPU %x is not enabled", pir); 484 } 485 return xive; 486 } 487 488 /* 489 * The internal sources (IPIs) of the interrupt controller have no 490 * knowledge of the XIVE chip on which they reside. Encode the block 491 * id in the source interrupt number before forwarding the source 492 * event notification to the Router. This is required on a multichip 493 * system. 494 */ 495 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno) 496 { 497 PnvXive *xive = PNV_XIVE(xn); 498 uint8_t blk = pnv_xive_block_id(xive); 499 500 xive_router_notify(xn, XIVE_EAS(blk, srcno)); 501 } 502 503 /* 504 * XIVE helpers 505 */ 506 507 static uint64_t pnv_xive_vc_size(PnvXive *xive) 508 { 509 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK; 510 } 511 512 static uint64_t pnv_xive_edt_shift(PnvXive *xive) 513 { 514 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX); 515 } 516 517 static uint64_t pnv_xive_pc_size(PnvXive *xive) 518 { 519 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK; 520 } 521 522 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk) 523 { 524 uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk]; 525 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 526 527 return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE; 528 } 529 530 /* 531 * Compute the number of entries per indirect subpage. 532 */ 533 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type) 534 { 535 uint8_t blk = pnv_xive_block_id(xive); 536 uint64_t vsd = xive->vsds[type][blk]; 537 const XiveVstInfo *info = &vst_infos[type]; 538 uint64_t vsd_addr; 539 uint32_t page_shift; 540 541 /* For direct tables, fake a valid value */ 542 if (!(VSD_INDIRECT & vsd)) { 543 return 1; 544 } 545 546 /* Get the page size of the indirect table. */ 547 vsd_addr = vsd & VSD_ADDRESS_MASK; 548 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 549 550 if (!(vsd & VSD_ADDRESS_MASK)) { 551 #ifdef XIVE_DEBUG 552 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 553 #endif 554 return 0; 555 } 556 557 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 558 559 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 560 xive_error(xive, "VST: invalid %s page shift %d", info->name, 561 page_shift); 562 return 0; 563 } 564 565 return (1ull << page_shift) / info->size; 566 } 567 568 /* 569 * EDT Table 570 * 571 * The Virtualization Controller MMIO region containing the IPI ESB 572 * pages and END ESB pages is sub-divided into "sets" which map 573 * portions of the VC region to the different ESB pages. It is 574 * configured at runtime through the EDT "Domain Table" to let the 575 * firmware decide how to split the VC address space between IPI ESB 576 * pages and END ESB pages. 577 */ 578 579 /* 580 * Computes the overall size of the IPI or the END ESB pages 581 */ 582 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type) 583 { 584 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 585 uint64_t size = 0; 586 int i; 587 588 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) { 589 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 590 591 if (edt_type == type) { 592 size += edt_size; 593 } 594 } 595 596 return size; 597 } 598 599 /* 600 * Maps an offset of the VC region in the IPI or END region using the 601 * layout defined by the EDT "Domaine Table" 602 */ 603 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset, 604 uint64_t type) 605 { 606 int i; 607 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 608 uint64_t edt_offset = vc_offset; 609 610 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) { 611 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 612 613 if (edt_type != type) { 614 edt_offset -= edt_size; 615 } 616 } 617 618 return edt_offset; 619 } 620 621 static void pnv_xive_edt_resize(PnvXive *xive) 622 { 623 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI); 624 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ); 625 626 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size); 627 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio); 628 629 memory_region_set_size(&xive->end_edt_mmio, end_edt_size); 630 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio); 631 } 632 633 /* 634 * XIVE Table configuration. Only EDT is supported. 635 */ 636 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val) 637 { 638 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL; 639 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]); 640 uint64_t *xive_table; 641 uint8_t max_index; 642 643 switch (tsel) { 644 case CQ_TAR_TSEL_BLK: 645 max_index = ARRAY_SIZE(xive->blk); 646 xive_table = xive->blk; 647 break; 648 case CQ_TAR_TSEL_MIG: 649 max_index = ARRAY_SIZE(xive->mig); 650 xive_table = xive->mig; 651 break; 652 case CQ_TAR_TSEL_EDT: 653 max_index = ARRAY_SIZE(xive->edt); 654 xive_table = xive->edt; 655 break; 656 case CQ_TAR_TSEL_VDT: 657 max_index = ARRAY_SIZE(xive->vdt); 658 xive_table = xive->vdt; 659 break; 660 default: 661 xive_error(xive, "IC: invalid table %d", (int) tsel); 662 return -1; 663 } 664 665 if (tsel_index >= max_index) { 666 xive_error(xive, "IC: invalid index %d", (int) tsel_index); 667 return -1; 668 } 669 670 xive_table[tsel_index] = val; 671 672 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) { 673 xive->regs[CQ_TAR >> 3] = 674 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index); 675 } 676 677 /* 678 * EDT configuration is complete. Resize the MMIO windows exposing 679 * the IPI and the END ESBs in the VC region. 680 */ 681 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) { 682 pnv_xive_edt_resize(xive); 683 } 684 685 return 0; 686 } 687 688 /* 689 * Virtual Structure Tables (VST) configuration 690 */ 691 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type, 692 uint8_t blk, uint64_t vsd) 693 { 694 XiveENDSource *end_xsrc = &xive->end_source; 695 XiveSource *xsrc = &xive->ipi_source; 696 const XiveVstInfo *info = &vst_infos[type]; 697 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 698 uint64_t vst_tsize = 1ull << page_shift; 699 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 700 701 /* Basic checks */ 702 703 if (VSD_INDIRECT & vsd) { 704 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) { 705 xive_error(xive, "VST: %s indirect tables are not enabled", 706 info->name); 707 return; 708 } 709 710 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 711 xive_error(xive, "VST: invalid %s page shift %d", info->name, 712 page_shift); 713 return; 714 } 715 } 716 717 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { 718 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with" 719 " page shift %d", info->name, vst_addr, page_shift); 720 return; 721 } 722 723 /* Record the table configuration (in SRAM on HW) */ 724 xive->vsds[type][blk] = vsd; 725 726 /* Now tune the models with the configuration provided by the FW */ 727 728 switch (type) { 729 case VST_TSEL_IVT: /* Nothing to be done */ 730 break; 731 732 case VST_TSEL_EQDT: 733 /* 734 * Backing store pages for the END. 735 * 736 * If the table is direct, we can compute the number of PQ 737 * entries provisioned by FW (such as skiboot) and resize the 738 * END ESB window accordingly. 739 */ 740 if (!(VSD_INDIRECT & vsd)) { 741 memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size) 742 * (1ull << xsrc->esb_shift)); 743 } 744 memory_region_add_subregion(&xive->end_edt_mmio, 0, 745 &end_xsrc->esb_mmio); 746 break; 747 748 case VST_TSEL_SBE: 749 /* 750 * Backing store pages for the source PQ bits. The model does 751 * not use these PQ bits backed in RAM because the XiveSource 752 * model has its own. 753 * 754 * If the table is direct, we can compute the number of PQ 755 * entries provisioned by FW (such as skiboot) and resize the 756 * ESB window accordingly. 757 */ 758 if (!(VSD_INDIRECT & vsd)) { 759 memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE 760 * (1ull << xsrc->esb_shift)); 761 } 762 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio); 763 break; 764 765 case VST_TSEL_VPDT: /* Not modeled */ 766 case VST_TSEL_IRQ: /* Not modeled */ 767 /* 768 * These tables contains the backing store pages for the 769 * interrupt fifos of the VC sub-engine in case of overflow. 770 */ 771 break; 772 773 default: 774 g_assert_not_reached(); 775 } 776 } 777 778 /* 779 * Both PC and VC sub-engines are configured as each use the Virtual 780 * Structure Tables : SBE, EAS, END and NVT. 781 */ 782 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine) 783 { 784 uint8_t mode = GETFIELD(VSD_MODE, vsd); 785 uint8_t type = GETFIELD(VST_TABLE_SELECT, 786 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 787 uint8_t blk = GETFIELD(VST_TABLE_BLOCK, 788 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 789 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 790 791 if (type > VST_TSEL_IRQ) { 792 xive_error(xive, "VST: invalid table type %d", type); 793 return; 794 } 795 796 if (blk >= vst_infos[type].max_blocks) { 797 xive_error(xive, "VST: invalid block id %d for" 798 " %s table", blk, vst_infos[type].name); 799 return; 800 } 801 802 /* 803 * Only take the VC sub-engine configuration into account because 804 * the XiveRouter model combines both VC and PC sub-engines 805 */ 806 if (pc_engine) { 807 return; 808 } 809 810 if (!vst_addr) { 811 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name); 812 return; 813 } 814 815 switch (mode) { 816 case VSD_MODE_FORWARD: 817 xive->vsds[type][blk] = vsd; 818 break; 819 820 case VSD_MODE_EXCLUSIVE: 821 pnv_xive_vst_set_exclusive(xive, type, blk, vsd); 822 break; 823 824 default: 825 xive_error(xive, "VST: unsupported table mode %d", mode); 826 return; 827 } 828 } 829 830 /* 831 * Interrupt controller MMIO region. The layout is compatible between 832 * 4K and 64K pages : 833 * 834 * Page 0 sub-engine BARs 835 * 0x000 - 0x3FF IC registers 836 * 0x400 - 0x7FF PC registers 837 * 0x800 - 0xFFF VC registers 838 * 839 * Page 1 Notify page (writes only) 840 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB) 841 * 0x800 - 0xFFF forwards and syncs 842 * 843 * Page 2 LSI Trigger page (writes only) (not modeled) 844 * Page 3 LSI SB EOI page (reads only) (not modeled) 845 * 846 * Page 4-7 indirect TIMA 847 */ 848 849 /* 850 * IC - registers MMIO 851 */ 852 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, 853 uint64_t val, unsigned size) 854 { 855 PnvXive *xive = PNV_XIVE(opaque); 856 MemoryRegion *sysmem = get_system_memory(); 857 uint32_t reg = offset >> 3; 858 bool is_chip0 = xive->chip->chip_id == 0; 859 860 switch (offset) { 861 862 /* 863 * XIVE CQ (PowerBus bridge) settings 864 */ 865 case CQ_MSGSND: /* msgsnd for doorbells */ 866 case CQ_FIRMASK_OR: /* FIR error reporting */ 867 break; 868 case CQ_PBI_CTL: 869 if (val & CQ_PBI_PC_64K) { 870 xive->pc_shift = 16; 871 } 872 if (val & CQ_PBI_VC_64K) { 873 xive->vc_shift = 16; 874 } 875 break; 876 case CQ_CFG_PB_GEN: /* PowerBus General Configuration */ 877 /* 878 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode 879 */ 880 break; 881 882 /* 883 * XIVE Virtualization Controller settings 884 */ 885 case VC_GLOBAL_CONFIG: 886 break; 887 888 /* 889 * XIVE Presenter Controller settings 890 */ 891 case PC_GLOBAL_CONFIG: 892 /* 893 * PC_GCONF_CHIPID_OVR 894 * Overrides Int command Chip ID with the Chip ID field (DEBUG) 895 */ 896 break; 897 case PC_TCTXT_CFG: 898 /* 899 * TODO: block group support 900 */ 901 break; 902 case PC_TCTXT_TRACK: 903 /* 904 * PC_TCTXT_TRACK_EN: 905 * enable block tracking and exchange of block ownership 906 * information between Interrupt controllers 907 */ 908 break; 909 910 /* 911 * Misc settings 912 */ 913 case VC_SBC_CONFIG: /* Store EOI configuration */ 914 /* 915 * Configure store EOI if required by firwmare (skiboot has removed 916 * support recently though) 917 */ 918 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { 919 xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI; 920 } 921 break; 922 923 case VC_EQC_CONFIG: /* TODO: silent escalation */ 924 case VC_AIB_TX_ORDER_TAG2: /* relax ordering */ 925 break; 926 927 /* 928 * XIVE BAR settings (XSCOM only) 929 */ 930 case CQ_RST_CTL: 931 /* bit4: resets all BAR registers */ 932 break; 933 934 case CQ_IC_BAR: /* IC BAR. 8 pages */ 935 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; 936 if (!(val & CQ_IC_BAR_VALID)) { 937 xive->ic_base = 0; 938 if (xive->regs[reg] & CQ_IC_BAR_VALID) { 939 memory_region_del_subregion(&xive->ic_mmio, 940 &xive->ic_reg_mmio); 941 memory_region_del_subregion(&xive->ic_mmio, 942 &xive->ic_notify_mmio); 943 memory_region_del_subregion(&xive->ic_mmio, 944 &xive->ic_lsi_mmio); 945 memory_region_del_subregion(&xive->ic_mmio, 946 &xive->tm_indirect_mmio); 947 948 memory_region_del_subregion(sysmem, &xive->ic_mmio); 949 } 950 } else { 951 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); 952 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) { 953 memory_region_add_subregion(sysmem, xive->ic_base, 954 &xive->ic_mmio); 955 956 memory_region_add_subregion(&xive->ic_mmio, 0, 957 &xive->ic_reg_mmio); 958 memory_region_add_subregion(&xive->ic_mmio, 959 1ul << xive->ic_shift, 960 &xive->ic_notify_mmio); 961 memory_region_add_subregion(&xive->ic_mmio, 962 2ul << xive->ic_shift, 963 &xive->ic_lsi_mmio); 964 memory_region_add_subregion(&xive->ic_mmio, 965 4ull << xive->ic_shift, 966 &xive->tm_indirect_mmio); 967 } 968 } 969 break; 970 971 case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */ 972 case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */ 973 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; 974 if (!(val & CQ_TM_BAR_VALID)) { 975 xive->tm_base = 0; 976 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) { 977 memory_region_del_subregion(sysmem, &xive->tm_mmio); 978 } 979 } else { 980 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); 981 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) { 982 memory_region_add_subregion(sysmem, xive->tm_base, 983 &xive->tm_mmio); 984 } 985 } 986 break; 987 988 case CQ_PC_BARM: 989 xive->regs[reg] = val; 990 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive)); 991 break; 992 case CQ_PC_BAR: /* From 32M to 512G */ 993 if (!(val & CQ_PC_BAR_VALID)) { 994 xive->pc_base = 0; 995 if (xive->regs[reg] & CQ_PC_BAR_VALID) { 996 memory_region_del_subregion(sysmem, &xive->pc_mmio); 997 } 998 } else { 999 xive->pc_base = val & ~(CQ_PC_BAR_VALID); 1000 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) { 1001 memory_region_add_subregion(sysmem, xive->pc_base, 1002 &xive->pc_mmio); 1003 } 1004 } 1005 break; 1006 1007 case CQ_VC_BARM: 1008 xive->regs[reg] = val; 1009 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive)); 1010 break; 1011 case CQ_VC_BAR: /* From 64M to 4TB */ 1012 if (!(val & CQ_VC_BAR_VALID)) { 1013 xive->vc_base = 0; 1014 if (xive->regs[reg] & CQ_VC_BAR_VALID) { 1015 memory_region_del_subregion(sysmem, &xive->vc_mmio); 1016 } 1017 } else { 1018 xive->vc_base = val & ~(CQ_VC_BAR_VALID); 1019 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) { 1020 memory_region_add_subregion(sysmem, xive->vc_base, 1021 &xive->vc_mmio); 1022 } 1023 } 1024 break; 1025 1026 /* 1027 * XIVE Table settings. 1028 */ 1029 case CQ_TAR: /* Table Address */ 1030 break; 1031 case CQ_TDR: /* Table Data */ 1032 pnv_xive_table_set_data(xive, val); 1033 break; 1034 1035 /* 1036 * XIVE VC & PC Virtual Structure Table settings 1037 */ 1038 case VC_VSD_TABLE_ADDR: 1039 case PC_VSD_TABLE_ADDR: /* Virtual table selector */ 1040 break; 1041 case VC_VSD_TABLE_DATA: /* Virtual table setting */ 1042 case PC_VSD_TABLE_DATA: 1043 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA); 1044 break; 1045 1046 /* 1047 * Interrupt fifo overflow in memory backing store (Not modeled) 1048 */ 1049 case VC_IRQ_CONFIG_IPI: 1050 case VC_IRQ_CONFIG_HW: 1051 case VC_IRQ_CONFIG_CASCADE1: 1052 case VC_IRQ_CONFIG_CASCADE2: 1053 case VC_IRQ_CONFIG_REDIST: 1054 case VC_IRQ_CONFIG_IPI_CASC: 1055 break; 1056 1057 /* 1058 * XIVE hardware thread enablement 1059 */ 1060 case PC_THREAD_EN_REG0: /* Physical Thread Enable */ 1061 case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */ 1062 break; 1063 1064 case PC_THREAD_EN_REG0_SET: 1065 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val; 1066 break; 1067 case PC_THREAD_EN_REG1_SET: 1068 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val; 1069 break; 1070 case PC_THREAD_EN_REG0_CLR: 1071 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val; 1072 break; 1073 case PC_THREAD_EN_REG1_CLR: 1074 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val; 1075 break; 1076 1077 /* 1078 * Indirect TIMA access set up. Defines the PIR of the HW thread 1079 * to use. 1080 */ 1081 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3: 1082 break; 1083 1084 /* 1085 * XIVE PC & VC cache updates for EAS, NVT and END 1086 */ 1087 case VC_IVC_SCRUB_MASK: 1088 case VC_IVC_SCRUB_TRIG: 1089 break; 1090 1091 case VC_EQC_CWATCH_SPEC: 1092 val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */ 1093 break; 1094 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: 1095 break; 1096 case VC_EQC_CWATCH_DAT0: 1097 /* writing to DATA0 triggers the cache write */ 1098 xive->regs[reg] = val; 1099 pnv_xive_end_update(xive); 1100 break; 1101 case VC_EQC_SCRUB_MASK: 1102 case VC_EQC_SCRUB_TRIG: 1103 /* 1104 * The scrubbing registers flush the cache in RAM and can also 1105 * invalidate. 1106 */ 1107 break; 1108 1109 case PC_VPC_CWATCH_SPEC: 1110 val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */ 1111 break; 1112 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: 1113 break; 1114 case PC_VPC_CWATCH_DAT0: 1115 /* writing to DATA0 triggers the cache write */ 1116 xive->regs[reg] = val; 1117 pnv_xive_nvt_update(xive); 1118 break; 1119 case PC_VPC_SCRUB_MASK: 1120 case PC_VPC_SCRUB_TRIG: 1121 /* 1122 * The scrubbing registers flush the cache in RAM and can also 1123 * invalidate. 1124 */ 1125 break; 1126 1127 1128 /* 1129 * XIVE PC & VC cache invalidation 1130 */ 1131 case PC_AT_KILL: 1132 break; 1133 case VC_AT_MACRO_KILL: 1134 break; 1135 case PC_AT_KILL_MASK: 1136 case VC_AT_MACRO_KILL_MASK: 1137 break; 1138 1139 default: 1140 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset); 1141 return; 1142 } 1143 1144 xive->regs[reg] = val; 1145 } 1146 1147 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) 1148 { 1149 PnvXive *xive = PNV_XIVE(opaque); 1150 uint64_t val = 0; 1151 uint32_t reg = offset >> 3; 1152 1153 switch (offset) { 1154 case CQ_CFG_PB_GEN: 1155 case CQ_IC_BAR: 1156 case CQ_TM1_BAR: 1157 case CQ_TM2_BAR: 1158 case CQ_PC_BAR: 1159 case CQ_PC_BARM: 1160 case CQ_VC_BAR: 1161 case CQ_VC_BARM: 1162 case CQ_TAR: 1163 case CQ_TDR: 1164 case CQ_PBI_CTL: 1165 1166 case PC_TCTXT_CFG: 1167 case PC_TCTXT_TRACK: 1168 case PC_TCTXT_INDIR0: 1169 case PC_TCTXT_INDIR1: 1170 case PC_TCTXT_INDIR2: 1171 case PC_TCTXT_INDIR3: 1172 case PC_GLOBAL_CONFIG: 1173 1174 case PC_VPC_SCRUB_MASK: 1175 1176 case VC_GLOBAL_CONFIG: 1177 case VC_AIB_TX_ORDER_TAG2: 1178 1179 case VC_IRQ_CONFIG_IPI: 1180 case VC_IRQ_CONFIG_HW: 1181 case VC_IRQ_CONFIG_CASCADE1: 1182 case VC_IRQ_CONFIG_CASCADE2: 1183 case VC_IRQ_CONFIG_REDIST: 1184 case VC_IRQ_CONFIG_IPI_CASC: 1185 1186 case VC_EQC_SCRUB_MASK: 1187 case VC_IVC_SCRUB_MASK: 1188 case VC_SBC_CONFIG: 1189 case VC_AT_MACRO_KILL_MASK: 1190 case VC_VSD_TABLE_ADDR: 1191 case PC_VSD_TABLE_ADDR: 1192 case VC_VSD_TABLE_DATA: 1193 case PC_VSD_TABLE_DATA: 1194 case PC_THREAD_EN_REG0: 1195 case PC_THREAD_EN_REG1: 1196 val = xive->regs[reg]; 1197 break; 1198 1199 /* 1200 * XIVE hardware thread enablement 1201 */ 1202 case PC_THREAD_EN_REG0_SET: 1203 case PC_THREAD_EN_REG0_CLR: 1204 val = xive->regs[PC_THREAD_EN_REG0 >> 3]; 1205 break; 1206 case PC_THREAD_EN_REG1_SET: 1207 case PC_THREAD_EN_REG1_CLR: 1208 val = xive->regs[PC_THREAD_EN_REG1 >> 3]; 1209 break; 1210 1211 case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */ 1212 val = 0xffffff0000000000; 1213 break; 1214 1215 /* 1216 * XIVE PC & VC cache updates for EAS, NVT and END 1217 */ 1218 case VC_EQC_CWATCH_SPEC: 1219 xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT); 1220 val = xive->regs[reg]; 1221 break; 1222 case VC_EQC_CWATCH_DAT0: 1223 /* 1224 * Load DATA registers from cache with data requested by the 1225 * SPEC register 1226 */ 1227 pnv_xive_end_cache_load(xive); 1228 val = xive->regs[reg]; 1229 break; 1230 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: 1231 val = xive->regs[reg]; 1232 break; 1233 1234 case PC_VPC_CWATCH_SPEC: 1235 xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT); 1236 val = xive->regs[reg]; 1237 break; 1238 case PC_VPC_CWATCH_DAT0: 1239 /* 1240 * Load DATA registers from cache with data requested by the 1241 * SPEC register 1242 */ 1243 pnv_xive_nvt_cache_load(xive); 1244 val = xive->regs[reg]; 1245 break; 1246 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: 1247 val = xive->regs[reg]; 1248 break; 1249 1250 case PC_VPC_SCRUB_TRIG: 1251 case VC_IVC_SCRUB_TRIG: 1252 case VC_EQC_SCRUB_TRIG: 1253 xive->regs[reg] &= ~VC_SCRUB_VALID; 1254 val = xive->regs[reg]; 1255 break; 1256 1257 /* 1258 * XIVE PC & VC cache invalidation 1259 */ 1260 case PC_AT_KILL: 1261 xive->regs[reg] &= ~PC_AT_KILL_VALID; 1262 val = xive->regs[reg]; 1263 break; 1264 case VC_AT_MACRO_KILL: 1265 xive->regs[reg] &= ~VC_KILL_VALID; 1266 val = xive->regs[reg]; 1267 break; 1268 1269 /* 1270 * XIVE synchronisation 1271 */ 1272 case VC_EQC_CONFIG: 1273 val = VC_EQC_SYNC_MASK; 1274 break; 1275 1276 default: 1277 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset); 1278 } 1279 1280 return val; 1281 } 1282 1283 static const MemoryRegionOps pnv_xive_ic_reg_ops = { 1284 .read = pnv_xive_ic_reg_read, 1285 .write = pnv_xive_ic_reg_write, 1286 .endianness = DEVICE_BIG_ENDIAN, 1287 .valid = { 1288 .min_access_size = 8, 1289 .max_access_size = 8, 1290 }, 1291 .impl = { 1292 .min_access_size = 8, 1293 .max_access_size = 8, 1294 }, 1295 }; 1296 1297 /* 1298 * IC - Notify MMIO port page (write only) 1299 */ 1300 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */ 1301 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */ 1302 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */ 1303 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */ 1304 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */ 1305 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */ 1306 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */ 1307 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */ 1308 1309 /* VC synchronisation */ 1310 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */ 1311 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */ 1312 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */ 1313 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */ 1314 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */ 1315 1316 /* PC synchronisation */ 1317 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */ 1318 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */ 1319 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */ 1320 1321 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) 1322 { 1323 uint8_t blk; 1324 uint32_t idx; 1325 1326 if (val & XIVE_TRIGGER_END) { 1327 xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64, 1328 addr, val); 1329 return; 1330 } 1331 1332 /* 1333 * Forward the source event notification directly to the Router. 1334 * The source interrupt number should already be correctly encoded 1335 * with the chip block id by the sending device (PHB, PSI). 1336 */ 1337 blk = XIVE_EAS_BLOCK(val); 1338 idx = XIVE_EAS_INDEX(val); 1339 1340 xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx)); 1341 } 1342 1343 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, 1344 unsigned size) 1345 { 1346 PnvXive *xive = PNV_XIVE(opaque); 1347 1348 /* VC: HW triggers */ 1349 switch (addr) { 1350 case 0x000 ... 0x7FF: 1351 pnv_xive_ic_hw_trigger(opaque, addr, val); 1352 break; 1353 1354 /* VC: Forwarded IRQs */ 1355 case PNV_XIVE_FORWARD_IPI: 1356 case PNV_XIVE_FORWARD_HW: 1357 case PNV_XIVE_FORWARD_OS_ESC: 1358 case PNV_XIVE_FORWARD_HW_ESC: 1359 case PNV_XIVE_FORWARD_REDIS: 1360 /* TODO: forwarded IRQs. Should be like HW triggers */ 1361 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64, 1362 addr, val); 1363 break; 1364 1365 /* VC syncs */ 1366 case PNV_XIVE_SYNC_IPI: 1367 case PNV_XIVE_SYNC_HW: 1368 case PNV_XIVE_SYNC_OS_ESC: 1369 case PNV_XIVE_SYNC_HW_ESC: 1370 case PNV_XIVE_SYNC_REDIS: 1371 break; 1372 1373 /* PC syncs */ 1374 case PNV_XIVE_SYNC_PULL: 1375 case PNV_XIVE_SYNC_PUSH: 1376 case PNV_XIVE_SYNC_VPC: 1377 break; 1378 1379 default: 1380 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr); 1381 } 1382 } 1383 1384 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr, 1385 unsigned size) 1386 { 1387 PnvXive *xive = PNV_XIVE(opaque); 1388 1389 /* loads are invalid */ 1390 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr); 1391 return -1; 1392 } 1393 1394 static const MemoryRegionOps pnv_xive_ic_notify_ops = { 1395 .read = pnv_xive_ic_notify_read, 1396 .write = pnv_xive_ic_notify_write, 1397 .endianness = DEVICE_BIG_ENDIAN, 1398 .valid = { 1399 .min_access_size = 8, 1400 .max_access_size = 8, 1401 }, 1402 .impl = { 1403 .min_access_size = 8, 1404 .max_access_size = 8, 1405 }, 1406 }; 1407 1408 /* 1409 * IC - LSI MMIO handlers (not modeled) 1410 */ 1411 1412 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr, 1413 uint64_t val, unsigned size) 1414 { 1415 PnvXive *xive = PNV_XIVE(opaque); 1416 1417 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr); 1418 } 1419 1420 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size) 1421 { 1422 PnvXive *xive = PNV_XIVE(opaque); 1423 1424 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr); 1425 return -1; 1426 } 1427 1428 static const MemoryRegionOps pnv_xive_ic_lsi_ops = { 1429 .read = pnv_xive_ic_lsi_read, 1430 .write = pnv_xive_ic_lsi_write, 1431 .endianness = DEVICE_BIG_ENDIAN, 1432 .valid = { 1433 .min_access_size = 8, 1434 .max_access_size = 8, 1435 }, 1436 .impl = { 1437 .min_access_size = 8, 1438 .max_access_size = 8, 1439 }, 1440 }; 1441 1442 /* 1443 * IC - Indirect TIMA MMIO handlers 1444 */ 1445 1446 /* 1447 * When the TIMA is accessed from the indirect page, the thread id of 1448 * the target CPU is configured in the PC_TCTXT_INDIR0 register before 1449 * use. This is used for resets and for debug purpose also. 1450 */ 1451 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive) 1452 { 1453 PnvChip *chip = xive->chip; 1454 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3]; 1455 PowerPCCPU *cpu = NULL; 1456 int pir; 1457 1458 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) { 1459 xive_error(xive, "IC: no indirect TIMA access in progress"); 1460 return NULL; 1461 } 1462 1463 pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir); 1464 cpu = pnv_chip_find_cpu(chip, pir); 1465 if (!cpu) { 1466 xive_error(xive, "IC: invalid PIR %x for indirect access", pir); 1467 return NULL; 1468 } 1469 1470 /* Check that HW thread is XIVE enabled */ 1471 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 1472 xive_error(xive, "IC: CPU %x is not enabled", pir); 1473 } 1474 1475 return XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1476 } 1477 1478 static void xive_tm_indirect_write(void *opaque, hwaddr offset, 1479 uint64_t value, unsigned size) 1480 { 1481 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1482 1483 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size); 1484 } 1485 1486 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset, 1487 unsigned size) 1488 { 1489 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1490 1491 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size); 1492 } 1493 1494 static const MemoryRegionOps xive_tm_indirect_ops = { 1495 .read = xive_tm_indirect_read, 1496 .write = xive_tm_indirect_write, 1497 .endianness = DEVICE_BIG_ENDIAN, 1498 .valid = { 1499 .min_access_size = 1, 1500 .max_access_size = 8, 1501 }, 1502 .impl = { 1503 .min_access_size = 1, 1504 .max_access_size = 8, 1505 }, 1506 }; 1507 1508 static void pnv_xive_tm_write(void *opaque, hwaddr offset, 1509 uint64_t value, unsigned size) 1510 { 1511 PowerPCCPU *cpu = POWERPC_CPU(current_cpu); 1512 PnvXive *xive = pnv_xive_tm_get_xive(cpu); 1513 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1514 1515 xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size); 1516 } 1517 1518 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size) 1519 { 1520 PowerPCCPU *cpu = POWERPC_CPU(current_cpu); 1521 PnvXive *xive = pnv_xive_tm_get_xive(cpu); 1522 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1523 1524 return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size); 1525 } 1526 1527 const MemoryRegionOps pnv_xive_tm_ops = { 1528 .read = pnv_xive_tm_read, 1529 .write = pnv_xive_tm_write, 1530 .endianness = DEVICE_BIG_ENDIAN, 1531 .valid = { 1532 .min_access_size = 1, 1533 .max_access_size = 8, 1534 }, 1535 .impl = { 1536 .min_access_size = 1, 1537 .max_access_size = 8, 1538 }, 1539 }; 1540 1541 /* 1542 * Interrupt controller XSCOM region. 1543 */ 1544 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size) 1545 { 1546 switch (addr >> 3) { 1547 case X_VC_EQC_CONFIG: 1548 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */ 1549 return VC_EQC_SYNC_MASK; 1550 default: 1551 return pnv_xive_ic_reg_read(opaque, addr, size); 1552 } 1553 } 1554 1555 static void pnv_xive_xscom_write(void *opaque, hwaddr addr, 1556 uint64_t val, unsigned size) 1557 { 1558 pnv_xive_ic_reg_write(opaque, addr, val, size); 1559 } 1560 1561 static const MemoryRegionOps pnv_xive_xscom_ops = { 1562 .read = pnv_xive_xscom_read, 1563 .write = pnv_xive_xscom_write, 1564 .endianness = DEVICE_BIG_ENDIAN, 1565 .valid = { 1566 .min_access_size = 8, 1567 .max_access_size = 8, 1568 }, 1569 .impl = { 1570 .min_access_size = 8, 1571 .max_access_size = 8, 1572 } 1573 }; 1574 1575 /* 1576 * Virtualization Controller MMIO region containing the IPI and END ESB pages 1577 */ 1578 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset, 1579 unsigned size) 1580 { 1581 PnvXive *xive = PNV_XIVE(opaque); 1582 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1583 uint64_t edt_type = 0; 1584 uint64_t edt_offset; 1585 MemTxResult result; 1586 AddressSpace *edt_as = NULL; 1587 uint64_t ret = -1; 1588 1589 if (edt_index < XIVE_TABLE_EDT_MAX) { 1590 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1591 } 1592 1593 switch (edt_type) { 1594 case CQ_TDR_EDT_IPI: 1595 edt_as = &xive->ipi_as; 1596 break; 1597 case CQ_TDR_EDT_EQ: 1598 edt_as = &xive->end_as; 1599 break; 1600 default: 1601 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset); 1602 return -1; 1603 } 1604 1605 /* Remap the offset for the targeted address space */ 1606 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1607 1608 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED, 1609 &result); 1610 1611 if (result != MEMTX_OK) { 1612 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%" 1613 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END", 1614 offset, edt_offset); 1615 return -1; 1616 } 1617 1618 return ret; 1619 } 1620 1621 static void pnv_xive_vc_write(void *opaque, hwaddr offset, 1622 uint64_t val, unsigned size) 1623 { 1624 PnvXive *xive = PNV_XIVE(opaque); 1625 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1626 uint64_t edt_type = 0; 1627 uint64_t edt_offset; 1628 MemTxResult result; 1629 AddressSpace *edt_as = NULL; 1630 1631 if (edt_index < XIVE_TABLE_EDT_MAX) { 1632 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1633 } 1634 1635 switch (edt_type) { 1636 case CQ_TDR_EDT_IPI: 1637 edt_as = &xive->ipi_as; 1638 break; 1639 case CQ_TDR_EDT_EQ: 1640 edt_as = &xive->end_as; 1641 break; 1642 default: 1643 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx, 1644 offset); 1645 return; 1646 } 1647 1648 /* Remap the offset for the targeted address space */ 1649 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1650 1651 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result); 1652 if (result != MEMTX_OK) { 1653 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset); 1654 } 1655 } 1656 1657 static const MemoryRegionOps pnv_xive_vc_ops = { 1658 .read = pnv_xive_vc_read, 1659 .write = pnv_xive_vc_write, 1660 .endianness = DEVICE_BIG_ENDIAN, 1661 .valid = { 1662 .min_access_size = 8, 1663 .max_access_size = 8, 1664 }, 1665 .impl = { 1666 .min_access_size = 8, 1667 .max_access_size = 8, 1668 }, 1669 }; 1670 1671 /* 1672 * Presenter Controller MMIO region. The Virtualization Controller 1673 * updates the IPB in the NVT table when required. Not modeled. 1674 */ 1675 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr, 1676 unsigned size) 1677 { 1678 PnvXive *xive = PNV_XIVE(opaque); 1679 1680 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr); 1681 return -1; 1682 } 1683 1684 static void pnv_xive_pc_write(void *opaque, hwaddr addr, 1685 uint64_t value, unsigned size) 1686 { 1687 PnvXive *xive = PNV_XIVE(opaque); 1688 1689 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr); 1690 } 1691 1692 static const MemoryRegionOps pnv_xive_pc_ops = { 1693 .read = pnv_xive_pc_read, 1694 .write = pnv_xive_pc_write, 1695 .endianness = DEVICE_BIG_ENDIAN, 1696 .valid = { 1697 .min_access_size = 8, 1698 .max_access_size = 8, 1699 }, 1700 .impl = { 1701 .min_access_size = 8, 1702 .max_access_size = 8, 1703 }, 1704 }; 1705 1706 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx, 1707 Monitor *mon) 1708 { 1709 uint8_t eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1); 1710 uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1); 1711 1712 if (!xive_nvt_is_valid(nvt)) { 1713 return; 1714 } 1715 1716 monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x\n", nvt_idx, 1717 eq_blk, eq_idx, 1718 xive_get_field32(NVT_W4_IPB, nvt->w4)); 1719 } 1720 1721 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon) 1722 { 1723 XiveRouter *xrtr = XIVE_ROUTER(xive); 1724 uint8_t blk = pnv_xive_block_id(xive); 1725 uint8_t chip_id = xive->chip->chip_id; 1726 uint32_t srcno0 = XIVE_EAS(blk, 0); 1727 uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk); 1728 XiveEAS eas; 1729 XiveEND end; 1730 XiveNVT nvt; 1731 int i; 1732 uint64_t xive_nvt_per_subpage; 1733 1734 monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk, 1735 srcno0, srcno0 + nr_ipis - 1); 1736 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); 1737 1738 monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk, 1739 srcno0, srcno0 + nr_ipis - 1); 1740 for (i = 0; i < nr_ipis; i++) { 1741 if (xive_router_get_eas(xrtr, blk, i, &eas)) { 1742 break; 1743 } 1744 if (!xive_eas_is_masked(&eas)) { 1745 xive_eas_pic_print_info(&eas, i, mon); 1746 } 1747 } 1748 1749 monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk); 1750 i = 0; 1751 while (!xive_router_get_end(xrtr, blk, i, &end)) { 1752 xive_end_pic_print_info(&end, i++, mon); 1753 } 1754 1755 monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk); 1756 i = 0; 1757 while (!xive_router_get_end(xrtr, blk, i, &end)) { 1758 xive_end_eas_pic_print_info(&end, i++, mon); 1759 } 1760 1761 monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk, 1762 0, XIVE_NVT_COUNT - 1); 1763 xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT); 1764 for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) { 1765 while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) { 1766 xive_nvt_pic_print_info(&nvt, i++, mon); 1767 } 1768 } 1769 } 1770 1771 static void pnv_xive_reset(void *dev) 1772 { 1773 PnvXive *xive = PNV_XIVE(dev); 1774 XiveSource *xsrc = &xive->ipi_source; 1775 XiveENDSource *end_xsrc = &xive->end_source; 1776 1777 /* Default page size (Should be changed at runtime to 64k) */ 1778 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1779 1780 /* Clear subregions */ 1781 if (memory_region_is_mapped(&xsrc->esb_mmio)) { 1782 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio); 1783 } 1784 1785 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) { 1786 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio); 1787 } 1788 1789 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { 1790 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio); 1791 } 1792 1793 if (memory_region_is_mapped(&xive->end_edt_mmio)) { 1794 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio); 1795 } 1796 } 1797 1798 static void pnv_xive_init(Object *obj) 1799 { 1800 PnvXive *xive = PNV_XIVE(obj); 1801 1802 object_initialize_child(obj, "ipi_source", &xive->ipi_source, 1803 sizeof(xive->ipi_source), TYPE_XIVE_SOURCE, 1804 &error_abort, NULL); 1805 object_initialize_child(obj, "end_source", &xive->end_source, 1806 sizeof(xive->end_source), TYPE_XIVE_END_SOURCE, 1807 &error_abort, NULL); 1808 } 1809 1810 /* 1811 * Maximum number of IRQs and ENDs supported by HW 1812 */ 1813 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1814 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1815 1816 static void pnv_xive_realize(DeviceState *dev, Error **errp) 1817 { 1818 PnvXive *xive = PNV_XIVE(dev); 1819 XiveSource *xsrc = &xive->ipi_source; 1820 XiveENDSource *end_xsrc = &xive->end_source; 1821 Error *local_err = NULL; 1822 1823 assert(xive->chip); 1824 1825 /* 1826 * The XiveSource and XiveENDSource objects are realized with the 1827 * maximum allowed HW configuration. The ESB MMIO regions will be 1828 * resized dynamically when the controller is configured by the FW 1829 * to limit accesses to resources not provisioned. 1830 */ 1831 object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs", 1832 &error_fatal); 1833 object_property_set_link(OBJECT(xsrc), OBJECT(xive), "xive", 1834 &error_abort); 1835 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err); 1836 if (local_err) { 1837 error_propagate(errp, local_err); 1838 return; 1839 } 1840 1841 object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends", 1842 &error_fatal); 1843 object_property_set_link(OBJECT(end_xsrc), OBJECT(xive), "xive", 1844 &error_abort); 1845 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err); 1846 if (local_err) { 1847 error_propagate(errp, local_err); 1848 return; 1849 } 1850 1851 /* Default page size. Generally changed at runtime to 64k */ 1852 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1853 1854 /* XSCOM region, used for initial configuration of the BARs */ 1855 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops, 1856 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3); 1857 1858 /* Interrupt controller MMIO regions */ 1859 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", 1860 PNV9_XIVE_IC_SIZE); 1861 1862 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops, 1863 xive, "xive-ic-reg", 1 << xive->ic_shift); 1864 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev), 1865 &pnv_xive_ic_notify_ops, 1866 xive, "xive-ic-notify", 1 << xive->ic_shift); 1867 1868 /* The Pervasive LSI trigger and EOI pages (not modeled) */ 1869 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops, 1870 xive, "xive-ic-lsi", 2 << xive->ic_shift); 1871 1872 /* Thread Interrupt Management Area (Indirect) */ 1873 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev), 1874 &xive_tm_indirect_ops, 1875 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE); 1876 /* 1877 * Overall Virtualization Controller MMIO region containing the 1878 * IPI ESB pages and END ESB pages. The layout is defined by the 1879 * EDT "Domain table" and the accesses are dispatched using 1880 * address spaces for each. 1881 */ 1882 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive, 1883 "xive-vc", PNV9_XIVE_VC_SIZE); 1884 1885 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi", 1886 PNV9_XIVE_VC_SIZE); 1887 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi"); 1888 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end", 1889 PNV9_XIVE_VC_SIZE); 1890 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end"); 1891 1892 /* 1893 * The MMIO windows exposing the IPI ESBs and the END ESBs in the 1894 * VC region. Their size is configured by the FW in the EDT table. 1895 */ 1896 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0); 1897 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0); 1898 1899 /* Presenter Controller MMIO region (not modeled) */ 1900 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive, 1901 "xive-pc", PNV9_XIVE_PC_SIZE); 1902 1903 /* Thread Interrupt Management Area (Direct) */ 1904 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops, 1905 xive, "xive-tima", PNV9_XIVE_TM_SIZE); 1906 1907 qemu_register_reset(pnv_xive_reset, dev); 1908 } 1909 1910 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, 1911 int xscom_offset) 1912 { 1913 const char compat[] = "ibm,power9-xive-x"; 1914 char *name; 1915 int offset; 1916 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE; 1917 uint32_t reg[] = { 1918 cpu_to_be32(lpc_pcba), 1919 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE) 1920 }; 1921 1922 name = g_strdup_printf("xive@%x", lpc_pcba); 1923 offset = fdt_add_subnode(fdt, xscom_offset, name); 1924 _FDT(offset); 1925 g_free(name); 1926 1927 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); 1928 _FDT((fdt_setprop(fdt, offset, "compatible", compat, 1929 sizeof(compat)))); 1930 return 0; 1931 } 1932 1933 static Property pnv_xive_properties[] = { 1934 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0), 1935 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0), 1936 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0), 1937 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0), 1938 /* The PnvChip id identifies the XIVE interrupt controller. */ 1939 DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *), 1940 DEFINE_PROP_END_OF_LIST(), 1941 }; 1942 1943 static void pnv_xive_class_init(ObjectClass *klass, void *data) 1944 { 1945 DeviceClass *dc = DEVICE_CLASS(klass); 1946 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); 1947 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 1948 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1949 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass); 1950 1951 xdc->dt_xscom = pnv_xive_dt_xscom; 1952 1953 dc->desc = "PowerNV XIVE Interrupt Controller"; 1954 dc->realize = pnv_xive_realize; 1955 dc->props = pnv_xive_properties; 1956 1957 xrc->get_eas = pnv_xive_get_eas; 1958 xrc->get_end = pnv_xive_get_end; 1959 xrc->write_end = pnv_xive_write_end; 1960 xrc->get_nvt = pnv_xive_get_nvt; 1961 xrc->write_nvt = pnv_xive_write_nvt; 1962 xrc->get_block_id = pnv_xive_get_block_id; 1963 1964 xnc->notify = pnv_xive_notify; 1965 xpc->match_nvt = pnv_xive_match_nvt; 1966 }; 1967 1968 static const TypeInfo pnv_xive_info = { 1969 .name = TYPE_PNV_XIVE, 1970 .parent = TYPE_XIVE_ROUTER, 1971 .instance_init = pnv_xive_init, 1972 .instance_size = sizeof(PnvXive), 1973 .class_init = pnv_xive_class_init, 1974 .interfaces = (InterfaceInfo[]) { 1975 { TYPE_PNV_XSCOM_INTERFACE }, 1976 { } 1977 } 1978 }; 1979 1980 static void pnv_xive_register_types(void) 1981 { 1982 type_register_static(&pnv_xive_info); 1983 } 1984 1985 type_init(pnv_xive_register_types) 1986