1 /* 2 * QEMU PowerPC XIVE interrupt controller model 3 * 4 * Copyright (c) 2017-2019, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/module.h" 13 #include "qapi/error.h" 14 #include "target/ppc/cpu.h" 15 #include "sysemu/cpus.h" 16 #include "sysemu/dma.h" 17 #include "sysemu/reset.h" 18 #include "monitor/monitor.h" 19 #include "hw/ppc/fdt.h" 20 #include "hw/ppc/pnv.h" 21 #include "hw/ppc/pnv_core.h" 22 #include "hw/ppc/pnv_xscom.h" 23 #include "hw/ppc/pnv_xive.h" 24 #include "hw/ppc/xive_regs.h" 25 #include "hw/qdev-properties.h" 26 #include "hw/ppc/ppc.h" 27 28 #include <libfdt.h> 29 30 #include "pnv_xive_regs.h" 31 32 #undef XIVE_DEBUG 33 34 /* 35 * Virtual structures table (VST) 36 */ 37 #define SBE_PER_BYTE 4 38 39 typedef struct XiveVstInfo { 40 const char *name; 41 uint32_t size; 42 uint32_t max_blocks; 43 } XiveVstInfo; 44 45 static const XiveVstInfo vst_infos[] = { 46 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 }, 47 [VST_TSEL_SBE] = { "SBE", 1, 16 }, 48 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 }, 49 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 }, 50 51 /* 52 * Interrupt fifo backing store table (not modeled) : 53 * 54 * 0 - IPI, 55 * 1 - HWD, 56 * 2 - First escalate, 57 * 3 - Second escalate, 58 * 4 - Redistribution, 59 * 5 - IPI cascaded queue ? 60 */ 61 [VST_TSEL_IRQ] = { "IRQ", 1, 6 }, 62 }; 63 64 #define xive_error(xive, fmt, ...) \ 65 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ 66 (xive)->chip->chip_id, ## __VA_ARGS__); 67 68 /* 69 * QEMU version of the GETFIELD/SETFIELD macros 70 * 71 * TODO: It might be better to use the existing extract64() and 72 * deposit64() but this means that all the register definitions will 73 * change and become incompatible with the ones found in skiboot. 74 * 75 * Keep it as it is for now until we find a common ground. 76 */ 77 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) 78 { 79 return (word & mask) >> ctz64(mask); 80 } 81 82 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, 83 uint64_t value) 84 { 85 return (word & ~mask) | ((value << ctz64(mask)) & mask); 86 } 87 88 /* 89 * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID 90 * field overrides the hardwired chip ID in the Powerbus operations 91 * and for CAM compares 92 */ 93 static uint8_t pnv_xive_block_id(PnvXive *xive) 94 { 95 uint8_t blk = xive->chip->chip_id; 96 uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3]; 97 98 if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) { 99 blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val); 100 } 101 102 return blk; 103 } 104 105 /* 106 * Remote access to controllers. HW uses MMIOs. For now, a simple scan 107 * of the chips is good enough. 108 * 109 * TODO: Block scope support 110 */ 111 static PnvXive *pnv_xive_get_remote(uint8_t blk) 112 { 113 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); 114 int i; 115 116 for (i = 0; i < pnv->num_chips; i++) { 117 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); 118 PnvXive *xive = &chip9->xive; 119 120 if (pnv_xive_block_id(xive) == blk) { 121 return xive; 122 } 123 } 124 return NULL; 125 } 126 127 /* 128 * VST accessors for SBE, EAT, ENDT, NVT 129 * 130 * Indirect VST tables are arrays of VSDs pointing to a page (of same 131 * size). Each page is a direct VST table. 132 */ 133 134 #define XIVE_VSD_SIZE 8 135 136 /* Indirect page size can be 4K, 64K, 2M, 16M. */ 137 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift) 138 { 139 return page_shift == 12 || page_shift == 16 || 140 page_shift == 21 || page_shift == 24; 141 } 142 143 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type, 144 uint64_t vsd, uint32_t idx) 145 { 146 const XiveVstInfo *info = &vst_infos[type]; 147 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 148 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 149 uint32_t idx_max; 150 151 idx_max = vst_tsize / info->size - 1; 152 if (idx > idx_max) { 153 #ifdef XIVE_DEBUG 154 xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?", 155 info->name, idx, idx_max); 156 #endif 157 return 0; 158 } 159 160 return vst_addr + idx * info->size; 161 } 162 163 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, 164 uint64_t vsd, uint32_t idx) 165 { 166 const XiveVstInfo *info = &vst_infos[type]; 167 uint64_t vsd_addr; 168 uint32_t vsd_idx; 169 uint32_t page_shift; 170 uint32_t vst_per_page; 171 172 /* Get the page size of the indirect table. */ 173 vsd_addr = vsd & VSD_ADDRESS_MASK; 174 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 175 176 if (!(vsd & VSD_ADDRESS_MASK)) { 177 #ifdef XIVE_DEBUG 178 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 179 #endif 180 return 0; 181 } 182 183 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 184 185 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 186 xive_error(xive, "VST: invalid %s page shift %d", info->name, 187 page_shift); 188 return 0; 189 } 190 191 vst_per_page = (1ull << page_shift) / info->size; 192 vsd_idx = idx / vst_per_page; 193 194 /* Load the VSD we are looking for, if not already done */ 195 if (vsd_idx) { 196 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; 197 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 198 199 if (!(vsd & VSD_ADDRESS_MASK)) { 200 #ifdef XIVE_DEBUG 201 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 202 #endif 203 return 0; 204 } 205 206 /* 207 * Check that the pages have a consistent size across the 208 * indirect table 209 */ 210 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { 211 xive_error(xive, "VST: %s entry %x indirect page size differ !?", 212 info->name, idx); 213 return 0; 214 } 215 } 216 217 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); 218 } 219 220 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk, 221 uint32_t idx) 222 { 223 const XiveVstInfo *info = &vst_infos[type]; 224 uint64_t vsd; 225 226 if (blk >= info->max_blocks) { 227 xive_error(xive, "VST: invalid block id %d for VST %s %d !?", 228 blk, info->name, idx); 229 return 0; 230 } 231 232 vsd = xive->vsds[type][blk]; 233 234 /* Remote VST access */ 235 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { 236 xive = pnv_xive_get_remote(blk); 237 238 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0; 239 } 240 241 if (VSD_INDIRECT & vsd) { 242 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx); 243 } 244 245 return pnv_xive_vst_addr_direct(xive, type, vsd, idx); 246 } 247 248 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk, 249 uint32_t idx, void *data) 250 { 251 const XiveVstInfo *info = &vst_infos[type]; 252 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 253 254 if (!addr) { 255 return -1; 256 } 257 258 cpu_physical_memory_read(addr, data, info->size); 259 return 0; 260 } 261 262 #define XIVE_VST_WORD_ALL -1 263 264 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk, 265 uint32_t idx, void *data, uint32_t word_number) 266 { 267 const XiveVstInfo *info = &vst_infos[type]; 268 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); 269 270 if (!addr) { 271 return -1; 272 } 273 274 if (word_number == XIVE_VST_WORD_ALL) { 275 cpu_physical_memory_write(addr, data, info->size); 276 } else { 277 cpu_physical_memory_write(addr + word_number * 4, 278 data + word_number * 4, 4); 279 } 280 return 0; 281 } 282 283 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 284 XiveEND *end) 285 { 286 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end); 287 } 288 289 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 290 XiveEND *end, uint8_t word_number) 291 { 292 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end, 293 word_number); 294 } 295 296 static int pnv_xive_end_update(PnvXive *xive) 297 { 298 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, 299 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 300 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, 301 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 302 int i; 303 uint64_t eqc_watch[4]; 304 305 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 306 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]); 307 } 308 309 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch, 310 XIVE_VST_WORD_ALL); 311 } 312 313 static void pnv_xive_end_cache_load(PnvXive *xive) 314 { 315 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, 316 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 317 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, 318 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); 319 uint64_t eqc_watch[4] = { 0 }; 320 int i; 321 322 if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) { 323 xive_error(xive, "VST: no END entry %x/%x !?", blk, idx); 324 } 325 326 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { 327 xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]); 328 } 329 } 330 331 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 332 XiveNVT *nvt) 333 { 334 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt); 335 } 336 337 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 338 XiveNVT *nvt, uint8_t word_number) 339 { 340 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt, 341 word_number); 342 } 343 344 static int pnv_xive_nvt_update(PnvXive *xive) 345 { 346 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, 347 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 348 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, 349 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 350 int i; 351 uint64_t vpc_watch[8]; 352 353 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 354 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]); 355 } 356 357 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch, 358 XIVE_VST_WORD_ALL); 359 } 360 361 static void pnv_xive_nvt_cache_load(PnvXive *xive) 362 { 363 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, 364 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 365 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, 366 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); 367 uint64_t vpc_watch[8] = { 0 }; 368 int i; 369 370 if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) { 371 xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx); 372 } 373 374 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { 375 xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]); 376 } 377 } 378 379 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, 380 XiveEAS *eas) 381 { 382 PnvXive *xive = PNV_XIVE(xrtr); 383 384 /* 385 * EAT lookups should be local to the IC 386 */ 387 if (pnv_xive_block_id(xive) != blk) { 388 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); 389 return -1; 390 } 391 392 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); 393 } 394 395 /* 396 * One bit per thread id. The first register PC_THREAD_EN_REG0 covers 397 * the first cores 0-15 (normal) of the chip or 0-7 (fused). The 398 * second register covers cores 16-23 (normal) or 8-11 (fused). 399 */ 400 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu) 401 { 402 int pir = ppc_cpu_pir(cpu); 403 uint32_t fc = PNV9_PIR2FUSEDCORE(pir); 404 uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1; 405 uint32_t bit = pir & 0x3f; 406 407 return xive->regs[reg >> 3] & PPC_BIT(bit); 408 } 409 410 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, 411 uint8_t nvt_blk, uint32_t nvt_idx, 412 bool cam_ignore, uint8_t priority, 413 uint32_t logic_serv, XiveTCTXMatch *match) 414 { 415 PnvXive *xive = PNV_XIVE(xptr); 416 PnvChip *chip = xive->chip; 417 int count = 0; 418 int i, j; 419 420 for (i = 0; i < chip->nr_cores; i++) { 421 PnvCore *pc = chip->cores[i]; 422 CPUCore *cc = CPU_CORE(pc); 423 424 for (j = 0; j < cc->nr_threads; j++) { 425 PowerPCCPU *cpu = pc->threads[j]; 426 XiveTCTX *tctx; 427 int ring; 428 429 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 430 continue; 431 } 432 433 tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 434 435 /* 436 * Check the thread context CAM lines and record matches. 437 */ 438 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, 439 nvt_idx, cam_ignore, logic_serv); 440 /* 441 * Save the context and follow on to catch duplicates, that we 442 * don't support yet. 443 */ 444 if (ring != -1) { 445 if (match->tctx) { 446 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " 447 "thread context NVT %x/%x\n", 448 nvt_blk, nvt_idx); 449 return -1; 450 } 451 452 match->ring = ring; 453 match->tctx = tctx; 454 count++; 455 } 456 } 457 } 458 459 return count; 460 } 461 462 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr) 463 { 464 return pnv_xive_block_id(PNV_XIVE(xrtr)); 465 } 466 467 /* 468 * The TIMA MMIO space is shared among the chips and to identify the 469 * chip from which the access is being done, we extract the chip id 470 * from the PIR. 471 */ 472 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu) 473 { 474 int pir = ppc_cpu_pir(cpu); 475 XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr; 476 PnvXive *xive = PNV_XIVE(xptr); 477 478 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 479 xive_error(xive, "IC: CPU %x is not enabled", pir); 480 } 481 return xive; 482 } 483 484 /* 485 * The internal sources (IPIs) of the interrupt controller have no 486 * knowledge of the XIVE chip on which they reside. Encode the block 487 * id in the source interrupt number before forwarding the source 488 * event notification to the Router. This is required on a multichip 489 * system. 490 */ 491 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno) 492 { 493 PnvXive *xive = PNV_XIVE(xn); 494 uint8_t blk = pnv_xive_block_id(xive); 495 496 xive_router_notify(xn, XIVE_EAS(blk, srcno)); 497 } 498 499 /* 500 * XIVE helpers 501 */ 502 503 static uint64_t pnv_xive_vc_size(PnvXive *xive) 504 { 505 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK; 506 } 507 508 static uint64_t pnv_xive_edt_shift(PnvXive *xive) 509 { 510 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX); 511 } 512 513 static uint64_t pnv_xive_pc_size(PnvXive *xive) 514 { 515 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK; 516 } 517 518 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk) 519 { 520 uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk]; 521 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 522 523 return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE; 524 } 525 526 /* 527 * Compute the number of entries per indirect subpage. 528 */ 529 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type) 530 { 531 uint8_t blk = pnv_xive_block_id(xive); 532 uint64_t vsd = xive->vsds[type][blk]; 533 const XiveVstInfo *info = &vst_infos[type]; 534 uint64_t vsd_addr; 535 uint32_t page_shift; 536 537 /* For direct tables, fake a valid value */ 538 if (!(VSD_INDIRECT & vsd)) { 539 return 1; 540 } 541 542 /* Get the page size of the indirect table. */ 543 vsd_addr = vsd & VSD_ADDRESS_MASK; 544 vsd = ldq_be_dma(&address_space_memory, vsd_addr); 545 546 if (!(vsd & VSD_ADDRESS_MASK)) { 547 #ifdef XIVE_DEBUG 548 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 549 #endif 550 return 0; 551 } 552 553 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 554 555 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 556 xive_error(xive, "VST: invalid %s page shift %d", info->name, 557 page_shift); 558 return 0; 559 } 560 561 return (1ull << page_shift) / info->size; 562 } 563 564 /* 565 * EDT Table 566 * 567 * The Virtualization Controller MMIO region containing the IPI ESB 568 * pages and END ESB pages is sub-divided into "sets" which map 569 * portions of the VC region to the different ESB pages. It is 570 * configured at runtime through the EDT "Domain Table" to let the 571 * firmware decide how to split the VC address space between IPI ESB 572 * pages and END ESB pages. 573 */ 574 575 /* 576 * Computes the overall size of the IPI or the END ESB pages 577 */ 578 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type) 579 { 580 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 581 uint64_t size = 0; 582 int i; 583 584 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) { 585 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 586 587 if (edt_type == type) { 588 size += edt_size; 589 } 590 } 591 592 return size; 593 } 594 595 /* 596 * Maps an offset of the VC region in the IPI or END region using the 597 * layout defined by the EDT "Domaine Table" 598 */ 599 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset, 600 uint64_t type) 601 { 602 int i; 603 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); 604 uint64_t edt_offset = vc_offset; 605 606 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) { 607 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); 608 609 if (edt_type != type) { 610 edt_offset -= edt_size; 611 } 612 } 613 614 return edt_offset; 615 } 616 617 static void pnv_xive_edt_resize(PnvXive *xive) 618 { 619 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI); 620 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ); 621 622 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size); 623 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio); 624 625 memory_region_set_size(&xive->end_edt_mmio, end_edt_size); 626 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio); 627 } 628 629 /* 630 * XIVE Table configuration. Only EDT is supported. 631 */ 632 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val) 633 { 634 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL; 635 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]); 636 uint64_t *xive_table; 637 uint8_t max_index; 638 639 switch (tsel) { 640 case CQ_TAR_TSEL_BLK: 641 max_index = ARRAY_SIZE(xive->blk); 642 xive_table = xive->blk; 643 break; 644 case CQ_TAR_TSEL_MIG: 645 max_index = ARRAY_SIZE(xive->mig); 646 xive_table = xive->mig; 647 break; 648 case CQ_TAR_TSEL_EDT: 649 max_index = ARRAY_SIZE(xive->edt); 650 xive_table = xive->edt; 651 break; 652 case CQ_TAR_TSEL_VDT: 653 max_index = ARRAY_SIZE(xive->vdt); 654 xive_table = xive->vdt; 655 break; 656 default: 657 xive_error(xive, "IC: invalid table %d", (int) tsel); 658 return -1; 659 } 660 661 if (tsel_index >= max_index) { 662 xive_error(xive, "IC: invalid index %d", (int) tsel_index); 663 return -1; 664 } 665 666 xive_table[tsel_index] = val; 667 668 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) { 669 xive->regs[CQ_TAR >> 3] = 670 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index); 671 } 672 673 /* 674 * EDT configuration is complete. Resize the MMIO windows exposing 675 * the IPI and the END ESBs in the VC region. 676 */ 677 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) { 678 pnv_xive_edt_resize(xive); 679 } 680 681 return 0; 682 } 683 684 /* 685 * Virtual Structure Tables (VST) configuration 686 */ 687 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type, 688 uint8_t blk, uint64_t vsd) 689 { 690 XiveENDSource *end_xsrc = &xive->end_source; 691 XiveSource *xsrc = &xive->ipi_source; 692 const XiveVstInfo *info = &vst_infos[type]; 693 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 694 uint64_t vst_tsize = 1ull << page_shift; 695 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 696 697 /* Basic checks */ 698 699 if (VSD_INDIRECT & vsd) { 700 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) { 701 xive_error(xive, "VST: %s indirect tables are not enabled", 702 info->name); 703 return; 704 } 705 706 if (!pnv_xive_vst_page_size_allowed(page_shift)) { 707 xive_error(xive, "VST: invalid %s page shift %d", info->name, 708 page_shift); 709 return; 710 } 711 } 712 713 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { 714 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with" 715 " page shift %d", info->name, vst_addr, page_shift); 716 return; 717 } 718 719 /* Record the table configuration (in SRAM on HW) */ 720 xive->vsds[type][blk] = vsd; 721 722 /* Now tune the models with the configuration provided by the FW */ 723 724 switch (type) { 725 case VST_TSEL_IVT: /* Nothing to be done */ 726 break; 727 728 case VST_TSEL_EQDT: 729 /* 730 * Backing store pages for the END. 731 * 732 * If the table is direct, we can compute the number of PQ 733 * entries provisioned by FW (such as skiboot) and resize the 734 * END ESB window accordingly. 735 */ 736 if (!(VSD_INDIRECT & vsd)) { 737 memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size) 738 * (1ull << xsrc->esb_shift)); 739 } 740 memory_region_add_subregion(&xive->end_edt_mmio, 0, 741 &end_xsrc->esb_mmio); 742 break; 743 744 case VST_TSEL_SBE: 745 /* 746 * Backing store pages for the source PQ bits. The model does 747 * not use these PQ bits backed in RAM because the XiveSource 748 * model has its own. 749 * 750 * If the table is direct, we can compute the number of PQ 751 * entries provisioned by FW (such as skiboot) and resize the 752 * ESB window accordingly. 753 */ 754 if (!(VSD_INDIRECT & vsd)) { 755 memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE 756 * (1ull << xsrc->esb_shift)); 757 } 758 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio); 759 break; 760 761 case VST_TSEL_VPDT: /* Not modeled */ 762 case VST_TSEL_IRQ: /* Not modeled */ 763 /* 764 * These tables contains the backing store pages for the 765 * interrupt fifos of the VC sub-engine in case of overflow. 766 */ 767 break; 768 769 default: 770 g_assert_not_reached(); 771 } 772 } 773 774 /* 775 * Both PC and VC sub-engines are configured as each use the Virtual 776 * Structure Tables : SBE, EAS, END and NVT. 777 */ 778 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine) 779 { 780 uint8_t mode = GETFIELD(VSD_MODE, vsd); 781 uint8_t type = GETFIELD(VST_TABLE_SELECT, 782 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 783 uint8_t blk = GETFIELD(VST_TABLE_BLOCK, 784 xive->regs[VC_VSD_TABLE_ADDR >> 3]); 785 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 786 787 if (type > VST_TSEL_IRQ) { 788 xive_error(xive, "VST: invalid table type %d", type); 789 return; 790 } 791 792 if (blk >= vst_infos[type].max_blocks) { 793 xive_error(xive, "VST: invalid block id %d for" 794 " %s table", blk, vst_infos[type].name); 795 return; 796 } 797 798 /* 799 * Only take the VC sub-engine configuration into account because 800 * the XiveRouter model combines both VC and PC sub-engines 801 */ 802 if (pc_engine) { 803 return; 804 } 805 806 if (!vst_addr) { 807 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name); 808 return; 809 } 810 811 switch (mode) { 812 case VSD_MODE_FORWARD: 813 xive->vsds[type][blk] = vsd; 814 break; 815 816 case VSD_MODE_EXCLUSIVE: 817 pnv_xive_vst_set_exclusive(xive, type, blk, vsd); 818 break; 819 820 default: 821 xive_error(xive, "VST: unsupported table mode %d", mode); 822 return; 823 } 824 } 825 826 /* 827 * Interrupt controller MMIO region. The layout is compatible between 828 * 4K and 64K pages : 829 * 830 * Page 0 sub-engine BARs 831 * 0x000 - 0x3FF IC registers 832 * 0x400 - 0x7FF PC registers 833 * 0x800 - 0xFFF VC registers 834 * 835 * Page 1 Notify page (writes only) 836 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB) 837 * 0x800 - 0xFFF forwards and syncs 838 * 839 * Page 2 LSI Trigger page (writes only) (not modeled) 840 * Page 3 LSI SB EOI page (reads only) (not modeled) 841 * 842 * Page 4-7 indirect TIMA 843 */ 844 845 /* 846 * IC - registers MMIO 847 */ 848 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, 849 uint64_t val, unsigned size) 850 { 851 PnvXive *xive = PNV_XIVE(opaque); 852 MemoryRegion *sysmem = get_system_memory(); 853 uint32_t reg = offset >> 3; 854 bool is_chip0 = xive->chip->chip_id == 0; 855 856 switch (offset) { 857 858 /* 859 * XIVE CQ (PowerBus bridge) settings 860 */ 861 case CQ_MSGSND: /* msgsnd for doorbells */ 862 case CQ_FIRMASK_OR: /* FIR error reporting */ 863 break; 864 case CQ_PBI_CTL: 865 if (val & CQ_PBI_PC_64K) { 866 xive->pc_shift = 16; 867 } 868 if (val & CQ_PBI_VC_64K) { 869 xive->vc_shift = 16; 870 } 871 break; 872 case CQ_CFG_PB_GEN: /* PowerBus General Configuration */ 873 /* 874 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode 875 */ 876 break; 877 878 /* 879 * XIVE Virtualization Controller settings 880 */ 881 case VC_GLOBAL_CONFIG: 882 break; 883 884 /* 885 * XIVE Presenter Controller settings 886 */ 887 case PC_GLOBAL_CONFIG: 888 /* 889 * PC_GCONF_CHIPID_OVR 890 * Overrides Int command Chip ID with the Chip ID field (DEBUG) 891 */ 892 break; 893 case PC_TCTXT_CFG: 894 /* 895 * TODO: block group support 896 */ 897 break; 898 case PC_TCTXT_TRACK: 899 /* 900 * PC_TCTXT_TRACK_EN: 901 * enable block tracking and exchange of block ownership 902 * information between Interrupt controllers 903 */ 904 break; 905 906 /* 907 * Misc settings 908 */ 909 case VC_SBC_CONFIG: /* Store EOI configuration */ 910 /* 911 * Configure store EOI if required by firwmare (skiboot has removed 912 * support recently though) 913 */ 914 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { 915 xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI; 916 } 917 break; 918 919 case VC_EQC_CONFIG: /* TODO: silent escalation */ 920 case VC_AIB_TX_ORDER_TAG2: /* relax ordering */ 921 break; 922 923 /* 924 * XIVE BAR settings (XSCOM only) 925 */ 926 case CQ_RST_CTL: 927 /* bit4: resets all BAR registers */ 928 break; 929 930 case CQ_IC_BAR: /* IC BAR. 8 pages */ 931 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; 932 if (!(val & CQ_IC_BAR_VALID)) { 933 xive->ic_base = 0; 934 if (xive->regs[reg] & CQ_IC_BAR_VALID) { 935 memory_region_del_subregion(&xive->ic_mmio, 936 &xive->ic_reg_mmio); 937 memory_region_del_subregion(&xive->ic_mmio, 938 &xive->ic_notify_mmio); 939 memory_region_del_subregion(&xive->ic_mmio, 940 &xive->ic_lsi_mmio); 941 memory_region_del_subregion(&xive->ic_mmio, 942 &xive->tm_indirect_mmio); 943 944 memory_region_del_subregion(sysmem, &xive->ic_mmio); 945 } 946 } else { 947 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); 948 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) { 949 memory_region_add_subregion(sysmem, xive->ic_base, 950 &xive->ic_mmio); 951 952 memory_region_add_subregion(&xive->ic_mmio, 0, 953 &xive->ic_reg_mmio); 954 memory_region_add_subregion(&xive->ic_mmio, 955 1ul << xive->ic_shift, 956 &xive->ic_notify_mmio); 957 memory_region_add_subregion(&xive->ic_mmio, 958 2ul << xive->ic_shift, 959 &xive->ic_lsi_mmio); 960 memory_region_add_subregion(&xive->ic_mmio, 961 4ull << xive->ic_shift, 962 &xive->tm_indirect_mmio); 963 } 964 } 965 break; 966 967 case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */ 968 case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */ 969 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; 970 if (!(val & CQ_TM_BAR_VALID)) { 971 xive->tm_base = 0; 972 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) { 973 memory_region_del_subregion(sysmem, &xive->tm_mmio); 974 } 975 } else { 976 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); 977 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) { 978 memory_region_add_subregion(sysmem, xive->tm_base, 979 &xive->tm_mmio); 980 } 981 } 982 break; 983 984 case CQ_PC_BARM: 985 xive->regs[reg] = val; 986 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive)); 987 break; 988 case CQ_PC_BAR: /* From 32M to 512G */ 989 if (!(val & CQ_PC_BAR_VALID)) { 990 xive->pc_base = 0; 991 if (xive->regs[reg] & CQ_PC_BAR_VALID) { 992 memory_region_del_subregion(sysmem, &xive->pc_mmio); 993 } 994 } else { 995 xive->pc_base = val & ~(CQ_PC_BAR_VALID); 996 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) { 997 memory_region_add_subregion(sysmem, xive->pc_base, 998 &xive->pc_mmio); 999 } 1000 } 1001 break; 1002 1003 case CQ_VC_BARM: 1004 xive->regs[reg] = val; 1005 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive)); 1006 break; 1007 case CQ_VC_BAR: /* From 64M to 4TB */ 1008 if (!(val & CQ_VC_BAR_VALID)) { 1009 xive->vc_base = 0; 1010 if (xive->regs[reg] & CQ_VC_BAR_VALID) { 1011 memory_region_del_subregion(sysmem, &xive->vc_mmio); 1012 } 1013 } else { 1014 xive->vc_base = val & ~(CQ_VC_BAR_VALID); 1015 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) { 1016 memory_region_add_subregion(sysmem, xive->vc_base, 1017 &xive->vc_mmio); 1018 } 1019 } 1020 break; 1021 1022 /* 1023 * XIVE Table settings. 1024 */ 1025 case CQ_TAR: /* Table Address */ 1026 break; 1027 case CQ_TDR: /* Table Data */ 1028 pnv_xive_table_set_data(xive, val); 1029 break; 1030 1031 /* 1032 * XIVE VC & PC Virtual Structure Table settings 1033 */ 1034 case VC_VSD_TABLE_ADDR: 1035 case PC_VSD_TABLE_ADDR: /* Virtual table selector */ 1036 break; 1037 case VC_VSD_TABLE_DATA: /* Virtual table setting */ 1038 case PC_VSD_TABLE_DATA: 1039 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA); 1040 break; 1041 1042 /* 1043 * Interrupt fifo overflow in memory backing store (Not modeled) 1044 */ 1045 case VC_IRQ_CONFIG_IPI: 1046 case VC_IRQ_CONFIG_HW: 1047 case VC_IRQ_CONFIG_CASCADE1: 1048 case VC_IRQ_CONFIG_CASCADE2: 1049 case VC_IRQ_CONFIG_REDIST: 1050 case VC_IRQ_CONFIG_IPI_CASC: 1051 break; 1052 1053 /* 1054 * XIVE hardware thread enablement 1055 */ 1056 case PC_THREAD_EN_REG0: /* Physical Thread Enable */ 1057 case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */ 1058 break; 1059 1060 case PC_THREAD_EN_REG0_SET: 1061 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val; 1062 break; 1063 case PC_THREAD_EN_REG1_SET: 1064 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val; 1065 break; 1066 case PC_THREAD_EN_REG0_CLR: 1067 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val; 1068 break; 1069 case PC_THREAD_EN_REG1_CLR: 1070 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val; 1071 break; 1072 1073 /* 1074 * Indirect TIMA access set up. Defines the PIR of the HW thread 1075 * to use. 1076 */ 1077 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3: 1078 break; 1079 1080 /* 1081 * XIVE PC & VC cache updates for EAS, NVT and END 1082 */ 1083 case VC_IVC_SCRUB_MASK: 1084 case VC_IVC_SCRUB_TRIG: 1085 break; 1086 1087 case VC_EQC_CWATCH_SPEC: 1088 val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */ 1089 break; 1090 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: 1091 break; 1092 case VC_EQC_CWATCH_DAT0: 1093 /* writing to DATA0 triggers the cache write */ 1094 xive->regs[reg] = val; 1095 pnv_xive_end_update(xive); 1096 break; 1097 case VC_EQC_SCRUB_MASK: 1098 case VC_EQC_SCRUB_TRIG: 1099 /* 1100 * The scrubbing registers flush the cache in RAM and can also 1101 * invalidate. 1102 */ 1103 break; 1104 1105 case PC_VPC_CWATCH_SPEC: 1106 val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */ 1107 break; 1108 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: 1109 break; 1110 case PC_VPC_CWATCH_DAT0: 1111 /* writing to DATA0 triggers the cache write */ 1112 xive->regs[reg] = val; 1113 pnv_xive_nvt_update(xive); 1114 break; 1115 case PC_VPC_SCRUB_MASK: 1116 case PC_VPC_SCRUB_TRIG: 1117 /* 1118 * The scrubbing registers flush the cache in RAM and can also 1119 * invalidate. 1120 */ 1121 break; 1122 1123 1124 /* 1125 * XIVE PC & VC cache invalidation 1126 */ 1127 case PC_AT_KILL: 1128 break; 1129 case VC_AT_MACRO_KILL: 1130 break; 1131 case PC_AT_KILL_MASK: 1132 case VC_AT_MACRO_KILL_MASK: 1133 break; 1134 1135 default: 1136 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset); 1137 return; 1138 } 1139 1140 xive->regs[reg] = val; 1141 } 1142 1143 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) 1144 { 1145 PnvXive *xive = PNV_XIVE(opaque); 1146 uint64_t val = 0; 1147 uint32_t reg = offset >> 3; 1148 1149 switch (offset) { 1150 case CQ_CFG_PB_GEN: 1151 case CQ_IC_BAR: 1152 case CQ_TM1_BAR: 1153 case CQ_TM2_BAR: 1154 case CQ_PC_BAR: 1155 case CQ_PC_BARM: 1156 case CQ_VC_BAR: 1157 case CQ_VC_BARM: 1158 case CQ_TAR: 1159 case CQ_TDR: 1160 case CQ_PBI_CTL: 1161 1162 case PC_TCTXT_CFG: 1163 case PC_TCTXT_TRACK: 1164 case PC_TCTXT_INDIR0: 1165 case PC_TCTXT_INDIR1: 1166 case PC_TCTXT_INDIR2: 1167 case PC_TCTXT_INDIR3: 1168 case PC_GLOBAL_CONFIG: 1169 1170 case PC_VPC_SCRUB_MASK: 1171 1172 case VC_GLOBAL_CONFIG: 1173 case VC_AIB_TX_ORDER_TAG2: 1174 1175 case VC_IRQ_CONFIG_IPI: 1176 case VC_IRQ_CONFIG_HW: 1177 case VC_IRQ_CONFIG_CASCADE1: 1178 case VC_IRQ_CONFIG_CASCADE2: 1179 case VC_IRQ_CONFIG_REDIST: 1180 case VC_IRQ_CONFIG_IPI_CASC: 1181 1182 case VC_EQC_SCRUB_MASK: 1183 case VC_IVC_SCRUB_MASK: 1184 case VC_SBC_CONFIG: 1185 case VC_AT_MACRO_KILL_MASK: 1186 case VC_VSD_TABLE_ADDR: 1187 case PC_VSD_TABLE_ADDR: 1188 case VC_VSD_TABLE_DATA: 1189 case PC_VSD_TABLE_DATA: 1190 case PC_THREAD_EN_REG0: 1191 case PC_THREAD_EN_REG1: 1192 val = xive->regs[reg]; 1193 break; 1194 1195 /* 1196 * XIVE hardware thread enablement 1197 */ 1198 case PC_THREAD_EN_REG0_SET: 1199 case PC_THREAD_EN_REG0_CLR: 1200 val = xive->regs[PC_THREAD_EN_REG0 >> 3]; 1201 break; 1202 case PC_THREAD_EN_REG1_SET: 1203 case PC_THREAD_EN_REG1_CLR: 1204 val = xive->regs[PC_THREAD_EN_REG1 >> 3]; 1205 break; 1206 1207 case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */ 1208 val = 0xffffff0000000000; 1209 break; 1210 1211 /* 1212 * XIVE PC & VC cache updates for EAS, NVT and END 1213 */ 1214 case VC_EQC_CWATCH_SPEC: 1215 xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT); 1216 val = xive->regs[reg]; 1217 break; 1218 case VC_EQC_CWATCH_DAT0: 1219 /* 1220 * Load DATA registers from cache with data requested by the 1221 * SPEC register 1222 */ 1223 pnv_xive_end_cache_load(xive); 1224 val = xive->regs[reg]; 1225 break; 1226 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: 1227 val = xive->regs[reg]; 1228 break; 1229 1230 case PC_VPC_CWATCH_SPEC: 1231 xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT); 1232 val = xive->regs[reg]; 1233 break; 1234 case PC_VPC_CWATCH_DAT0: 1235 /* 1236 * Load DATA registers from cache with data requested by the 1237 * SPEC register 1238 */ 1239 pnv_xive_nvt_cache_load(xive); 1240 val = xive->regs[reg]; 1241 break; 1242 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: 1243 val = xive->regs[reg]; 1244 break; 1245 1246 case PC_VPC_SCRUB_TRIG: 1247 case VC_IVC_SCRUB_TRIG: 1248 case VC_EQC_SCRUB_TRIG: 1249 xive->regs[reg] &= ~VC_SCRUB_VALID; 1250 val = xive->regs[reg]; 1251 break; 1252 1253 /* 1254 * XIVE PC & VC cache invalidation 1255 */ 1256 case PC_AT_KILL: 1257 xive->regs[reg] &= ~PC_AT_KILL_VALID; 1258 val = xive->regs[reg]; 1259 break; 1260 case VC_AT_MACRO_KILL: 1261 xive->regs[reg] &= ~VC_KILL_VALID; 1262 val = xive->regs[reg]; 1263 break; 1264 1265 /* 1266 * XIVE synchronisation 1267 */ 1268 case VC_EQC_CONFIG: 1269 val = VC_EQC_SYNC_MASK; 1270 break; 1271 1272 default: 1273 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset); 1274 } 1275 1276 return val; 1277 } 1278 1279 static const MemoryRegionOps pnv_xive_ic_reg_ops = { 1280 .read = pnv_xive_ic_reg_read, 1281 .write = pnv_xive_ic_reg_write, 1282 .endianness = DEVICE_BIG_ENDIAN, 1283 .valid = { 1284 .min_access_size = 8, 1285 .max_access_size = 8, 1286 }, 1287 .impl = { 1288 .min_access_size = 8, 1289 .max_access_size = 8, 1290 }, 1291 }; 1292 1293 /* 1294 * IC - Notify MMIO port page (write only) 1295 */ 1296 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */ 1297 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */ 1298 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */ 1299 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */ 1300 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */ 1301 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */ 1302 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */ 1303 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */ 1304 1305 /* VC synchronisation */ 1306 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */ 1307 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */ 1308 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */ 1309 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */ 1310 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */ 1311 1312 /* PC synchronisation */ 1313 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */ 1314 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */ 1315 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */ 1316 1317 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) 1318 { 1319 uint8_t blk; 1320 uint32_t idx; 1321 1322 if (val & XIVE_TRIGGER_END) { 1323 xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64, 1324 addr, val); 1325 return; 1326 } 1327 1328 /* 1329 * Forward the source event notification directly to the Router. 1330 * The source interrupt number should already be correctly encoded 1331 * with the chip block id by the sending device (PHB, PSI). 1332 */ 1333 blk = XIVE_EAS_BLOCK(val); 1334 idx = XIVE_EAS_INDEX(val); 1335 1336 xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx)); 1337 } 1338 1339 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, 1340 unsigned size) 1341 { 1342 PnvXive *xive = PNV_XIVE(opaque); 1343 1344 /* VC: HW triggers */ 1345 switch (addr) { 1346 case 0x000 ... 0x7FF: 1347 pnv_xive_ic_hw_trigger(opaque, addr, val); 1348 break; 1349 1350 /* VC: Forwarded IRQs */ 1351 case PNV_XIVE_FORWARD_IPI: 1352 case PNV_XIVE_FORWARD_HW: 1353 case PNV_XIVE_FORWARD_OS_ESC: 1354 case PNV_XIVE_FORWARD_HW_ESC: 1355 case PNV_XIVE_FORWARD_REDIS: 1356 /* TODO: forwarded IRQs. Should be like HW triggers */ 1357 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64, 1358 addr, val); 1359 break; 1360 1361 /* VC syncs */ 1362 case PNV_XIVE_SYNC_IPI: 1363 case PNV_XIVE_SYNC_HW: 1364 case PNV_XIVE_SYNC_OS_ESC: 1365 case PNV_XIVE_SYNC_HW_ESC: 1366 case PNV_XIVE_SYNC_REDIS: 1367 break; 1368 1369 /* PC syncs */ 1370 case PNV_XIVE_SYNC_PULL: 1371 case PNV_XIVE_SYNC_PUSH: 1372 case PNV_XIVE_SYNC_VPC: 1373 break; 1374 1375 default: 1376 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr); 1377 } 1378 } 1379 1380 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr, 1381 unsigned size) 1382 { 1383 PnvXive *xive = PNV_XIVE(opaque); 1384 1385 /* loads are invalid */ 1386 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr); 1387 return -1; 1388 } 1389 1390 static const MemoryRegionOps pnv_xive_ic_notify_ops = { 1391 .read = pnv_xive_ic_notify_read, 1392 .write = pnv_xive_ic_notify_write, 1393 .endianness = DEVICE_BIG_ENDIAN, 1394 .valid = { 1395 .min_access_size = 8, 1396 .max_access_size = 8, 1397 }, 1398 .impl = { 1399 .min_access_size = 8, 1400 .max_access_size = 8, 1401 }, 1402 }; 1403 1404 /* 1405 * IC - LSI MMIO handlers (not modeled) 1406 */ 1407 1408 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr, 1409 uint64_t val, unsigned size) 1410 { 1411 PnvXive *xive = PNV_XIVE(opaque); 1412 1413 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr); 1414 } 1415 1416 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size) 1417 { 1418 PnvXive *xive = PNV_XIVE(opaque); 1419 1420 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr); 1421 return -1; 1422 } 1423 1424 static const MemoryRegionOps pnv_xive_ic_lsi_ops = { 1425 .read = pnv_xive_ic_lsi_read, 1426 .write = pnv_xive_ic_lsi_write, 1427 .endianness = DEVICE_BIG_ENDIAN, 1428 .valid = { 1429 .min_access_size = 8, 1430 .max_access_size = 8, 1431 }, 1432 .impl = { 1433 .min_access_size = 8, 1434 .max_access_size = 8, 1435 }, 1436 }; 1437 1438 /* 1439 * IC - Indirect TIMA MMIO handlers 1440 */ 1441 1442 /* 1443 * When the TIMA is accessed from the indirect page, the thread id of 1444 * the target CPU is configured in the PC_TCTXT_INDIR0 register before 1445 * use. This is used for resets and for debug purpose also. 1446 */ 1447 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive) 1448 { 1449 PnvChip *chip = xive->chip; 1450 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3]; 1451 PowerPCCPU *cpu = NULL; 1452 int pir; 1453 1454 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) { 1455 xive_error(xive, "IC: no indirect TIMA access in progress"); 1456 return NULL; 1457 } 1458 1459 pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir); 1460 cpu = pnv_chip_find_cpu(chip, pir); 1461 if (!cpu) { 1462 xive_error(xive, "IC: invalid PIR %x for indirect access", pir); 1463 return NULL; 1464 } 1465 1466 /* Check that HW thread is XIVE enabled */ 1467 if (!pnv_xive_is_cpu_enabled(xive, cpu)) { 1468 xive_error(xive, "IC: CPU %x is not enabled", pir); 1469 } 1470 1471 return XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1472 } 1473 1474 static void xive_tm_indirect_write(void *opaque, hwaddr offset, 1475 uint64_t value, unsigned size) 1476 { 1477 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1478 1479 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size); 1480 } 1481 1482 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset, 1483 unsigned size) 1484 { 1485 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); 1486 1487 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size); 1488 } 1489 1490 static const MemoryRegionOps xive_tm_indirect_ops = { 1491 .read = xive_tm_indirect_read, 1492 .write = xive_tm_indirect_write, 1493 .endianness = DEVICE_BIG_ENDIAN, 1494 .valid = { 1495 .min_access_size = 1, 1496 .max_access_size = 8, 1497 }, 1498 .impl = { 1499 .min_access_size = 1, 1500 .max_access_size = 8, 1501 }, 1502 }; 1503 1504 static void pnv_xive_tm_write(void *opaque, hwaddr offset, 1505 uint64_t value, unsigned size) 1506 { 1507 PowerPCCPU *cpu = POWERPC_CPU(current_cpu); 1508 PnvXive *xive = pnv_xive_tm_get_xive(cpu); 1509 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1510 1511 xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size); 1512 } 1513 1514 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size) 1515 { 1516 PowerPCCPU *cpu = POWERPC_CPU(current_cpu); 1517 PnvXive *xive = pnv_xive_tm_get_xive(cpu); 1518 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1519 1520 return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size); 1521 } 1522 1523 const MemoryRegionOps pnv_xive_tm_ops = { 1524 .read = pnv_xive_tm_read, 1525 .write = pnv_xive_tm_write, 1526 .endianness = DEVICE_BIG_ENDIAN, 1527 .valid = { 1528 .min_access_size = 1, 1529 .max_access_size = 8, 1530 }, 1531 .impl = { 1532 .min_access_size = 1, 1533 .max_access_size = 8, 1534 }, 1535 }; 1536 1537 /* 1538 * Interrupt controller XSCOM region. 1539 */ 1540 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size) 1541 { 1542 switch (addr >> 3) { 1543 case X_VC_EQC_CONFIG: 1544 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */ 1545 return VC_EQC_SYNC_MASK; 1546 default: 1547 return pnv_xive_ic_reg_read(opaque, addr, size); 1548 } 1549 } 1550 1551 static void pnv_xive_xscom_write(void *opaque, hwaddr addr, 1552 uint64_t val, unsigned size) 1553 { 1554 pnv_xive_ic_reg_write(opaque, addr, val, size); 1555 } 1556 1557 static const MemoryRegionOps pnv_xive_xscom_ops = { 1558 .read = pnv_xive_xscom_read, 1559 .write = pnv_xive_xscom_write, 1560 .endianness = DEVICE_BIG_ENDIAN, 1561 .valid = { 1562 .min_access_size = 8, 1563 .max_access_size = 8, 1564 }, 1565 .impl = { 1566 .min_access_size = 8, 1567 .max_access_size = 8, 1568 } 1569 }; 1570 1571 /* 1572 * Virtualization Controller MMIO region containing the IPI and END ESB pages 1573 */ 1574 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset, 1575 unsigned size) 1576 { 1577 PnvXive *xive = PNV_XIVE(opaque); 1578 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1579 uint64_t edt_type = 0; 1580 uint64_t edt_offset; 1581 MemTxResult result; 1582 AddressSpace *edt_as = NULL; 1583 uint64_t ret = -1; 1584 1585 if (edt_index < XIVE_TABLE_EDT_MAX) { 1586 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1587 } 1588 1589 switch (edt_type) { 1590 case CQ_TDR_EDT_IPI: 1591 edt_as = &xive->ipi_as; 1592 break; 1593 case CQ_TDR_EDT_EQ: 1594 edt_as = &xive->end_as; 1595 break; 1596 default: 1597 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset); 1598 return -1; 1599 } 1600 1601 /* Remap the offset for the targeted address space */ 1602 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1603 1604 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED, 1605 &result); 1606 1607 if (result != MEMTX_OK) { 1608 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%" 1609 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END", 1610 offset, edt_offset); 1611 return -1; 1612 } 1613 1614 return ret; 1615 } 1616 1617 static void pnv_xive_vc_write(void *opaque, hwaddr offset, 1618 uint64_t val, unsigned size) 1619 { 1620 PnvXive *xive = PNV_XIVE(opaque); 1621 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); 1622 uint64_t edt_type = 0; 1623 uint64_t edt_offset; 1624 MemTxResult result; 1625 AddressSpace *edt_as = NULL; 1626 1627 if (edt_index < XIVE_TABLE_EDT_MAX) { 1628 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); 1629 } 1630 1631 switch (edt_type) { 1632 case CQ_TDR_EDT_IPI: 1633 edt_as = &xive->ipi_as; 1634 break; 1635 case CQ_TDR_EDT_EQ: 1636 edt_as = &xive->end_as; 1637 break; 1638 default: 1639 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx, 1640 offset); 1641 return; 1642 } 1643 1644 /* Remap the offset for the targeted address space */ 1645 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); 1646 1647 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result); 1648 if (result != MEMTX_OK) { 1649 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset); 1650 } 1651 } 1652 1653 static const MemoryRegionOps pnv_xive_vc_ops = { 1654 .read = pnv_xive_vc_read, 1655 .write = pnv_xive_vc_write, 1656 .endianness = DEVICE_BIG_ENDIAN, 1657 .valid = { 1658 .min_access_size = 8, 1659 .max_access_size = 8, 1660 }, 1661 .impl = { 1662 .min_access_size = 8, 1663 .max_access_size = 8, 1664 }, 1665 }; 1666 1667 /* 1668 * Presenter Controller MMIO region. The Virtualization Controller 1669 * updates the IPB in the NVT table when required. Not modeled. 1670 */ 1671 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr, 1672 unsigned size) 1673 { 1674 PnvXive *xive = PNV_XIVE(opaque); 1675 1676 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr); 1677 return -1; 1678 } 1679 1680 static void pnv_xive_pc_write(void *opaque, hwaddr addr, 1681 uint64_t value, unsigned size) 1682 { 1683 PnvXive *xive = PNV_XIVE(opaque); 1684 1685 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr); 1686 } 1687 1688 static const MemoryRegionOps pnv_xive_pc_ops = { 1689 .read = pnv_xive_pc_read, 1690 .write = pnv_xive_pc_write, 1691 .endianness = DEVICE_BIG_ENDIAN, 1692 .valid = { 1693 .min_access_size = 8, 1694 .max_access_size = 8, 1695 }, 1696 .impl = { 1697 .min_access_size = 8, 1698 .max_access_size = 8, 1699 }, 1700 }; 1701 1702 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx, 1703 Monitor *mon) 1704 { 1705 uint8_t eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1); 1706 uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1); 1707 1708 if (!xive_nvt_is_valid(nvt)) { 1709 return; 1710 } 1711 1712 monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x\n", nvt_idx, 1713 eq_blk, eq_idx, 1714 xive_get_field32(NVT_W4_IPB, nvt->w4)); 1715 } 1716 1717 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon) 1718 { 1719 XiveRouter *xrtr = XIVE_ROUTER(xive); 1720 uint8_t blk = pnv_xive_block_id(xive); 1721 uint8_t chip_id = xive->chip->chip_id; 1722 uint32_t srcno0 = XIVE_EAS(blk, 0); 1723 uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk); 1724 XiveEAS eas; 1725 XiveEND end; 1726 XiveNVT nvt; 1727 int i; 1728 uint64_t xive_nvt_per_subpage; 1729 1730 monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk, 1731 srcno0, srcno0 + nr_ipis - 1); 1732 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); 1733 1734 monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk, 1735 srcno0, srcno0 + nr_ipis - 1); 1736 for (i = 0; i < nr_ipis; i++) { 1737 if (xive_router_get_eas(xrtr, blk, i, &eas)) { 1738 break; 1739 } 1740 if (!xive_eas_is_masked(&eas)) { 1741 xive_eas_pic_print_info(&eas, i, mon); 1742 } 1743 } 1744 1745 monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk); 1746 i = 0; 1747 while (!xive_router_get_end(xrtr, blk, i, &end)) { 1748 xive_end_pic_print_info(&end, i++, mon); 1749 } 1750 1751 monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk); 1752 i = 0; 1753 while (!xive_router_get_end(xrtr, blk, i, &end)) { 1754 xive_end_eas_pic_print_info(&end, i++, mon); 1755 } 1756 1757 monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk, 1758 0, XIVE_NVT_COUNT - 1); 1759 xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT); 1760 for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) { 1761 while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) { 1762 xive_nvt_pic_print_info(&nvt, i++, mon); 1763 } 1764 } 1765 } 1766 1767 static void pnv_xive_reset(void *dev) 1768 { 1769 PnvXive *xive = PNV_XIVE(dev); 1770 XiveSource *xsrc = &xive->ipi_source; 1771 XiveENDSource *end_xsrc = &xive->end_source; 1772 1773 /* Default page size (Should be changed at runtime to 64k) */ 1774 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1775 1776 /* Clear subregions */ 1777 if (memory_region_is_mapped(&xsrc->esb_mmio)) { 1778 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio); 1779 } 1780 1781 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) { 1782 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio); 1783 } 1784 1785 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { 1786 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio); 1787 } 1788 1789 if (memory_region_is_mapped(&xive->end_edt_mmio)) { 1790 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio); 1791 } 1792 } 1793 1794 static void pnv_xive_init(Object *obj) 1795 { 1796 PnvXive *xive = PNV_XIVE(obj); 1797 1798 object_initialize_child(obj, "ipi_source", &xive->ipi_source, 1799 TYPE_XIVE_SOURCE); 1800 object_initialize_child(obj, "end_source", &xive->end_source, 1801 TYPE_XIVE_END_SOURCE); 1802 } 1803 1804 /* 1805 * Maximum number of IRQs and ENDs supported by HW 1806 */ 1807 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1808 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1809 1810 static void pnv_xive_realize(DeviceState *dev, Error **errp) 1811 { 1812 PnvXive *xive = PNV_XIVE(dev); 1813 PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev); 1814 XiveSource *xsrc = &xive->ipi_source; 1815 XiveENDSource *end_xsrc = &xive->end_source; 1816 Error *local_err = NULL; 1817 1818 pxc->parent_realize(dev, &local_err); 1819 if (local_err) { 1820 error_propagate(errp, local_err); 1821 return; 1822 } 1823 1824 assert(xive->chip); 1825 1826 /* 1827 * The XiveSource and XiveENDSource objects are realized with the 1828 * maximum allowed HW configuration. The ESB MMIO regions will be 1829 * resized dynamically when the controller is configured by the FW 1830 * to limit accesses to resources not provisioned. 1831 */ 1832 object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs", 1833 &error_fatal); 1834 object_property_set_link(OBJECT(xsrc), OBJECT(xive), "xive", 1835 &error_abort); 1836 qdev_realize(DEVICE(xsrc), NULL, &local_err); 1837 if (local_err) { 1838 error_propagate(errp, local_err); 1839 return; 1840 } 1841 1842 object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends", 1843 &error_fatal); 1844 object_property_set_link(OBJECT(end_xsrc), OBJECT(xive), "xive", 1845 &error_abort); 1846 qdev_realize(DEVICE(end_xsrc), NULL, &local_err); 1847 if (local_err) { 1848 error_propagate(errp, local_err); 1849 return; 1850 } 1851 1852 /* Default page size. Generally changed at runtime to 64k */ 1853 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; 1854 1855 /* XSCOM region, used for initial configuration of the BARs */ 1856 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops, 1857 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3); 1858 1859 /* Interrupt controller MMIO regions */ 1860 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", 1861 PNV9_XIVE_IC_SIZE); 1862 1863 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops, 1864 xive, "xive-ic-reg", 1 << xive->ic_shift); 1865 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev), 1866 &pnv_xive_ic_notify_ops, 1867 xive, "xive-ic-notify", 1 << xive->ic_shift); 1868 1869 /* The Pervasive LSI trigger and EOI pages (not modeled) */ 1870 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops, 1871 xive, "xive-ic-lsi", 2 << xive->ic_shift); 1872 1873 /* Thread Interrupt Management Area (Indirect) */ 1874 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev), 1875 &xive_tm_indirect_ops, 1876 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE); 1877 /* 1878 * Overall Virtualization Controller MMIO region containing the 1879 * IPI ESB pages and END ESB pages. The layout is defined by the 1880 * EDT "Domain table" and the accesses are dispatched using 1881 * address spaces for each. 1882 */ 1883 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive, 1884 "xive-vc", PNV9_XIVE_VC_SIZE); 1885 1886 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi", 1887 PNV9_XIVE_VC_SIZE); 1888 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi"); 1889 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end", 1890 PNV9_XIVE_VC_SIZE); 1891 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end"); 1892 1893 /* 1894 * The MMIO windows exposing the IPI ESBs and the END ESBs in the 1895 * VC region. Their size is configured by the FW in the EDT table. 1896 */ 1897 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0); 1898 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0); 1899 1900 /* Presenter Controller MMIO region (not modeled) */ 1901 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive, 1902 "xive-pc", PNV9_XIVE_PC_SIZE); 1903 1904 /* Thread Interrupt Management Area (Direct) */ 1905 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops, 1906 xive, "xive-tima", PNV9_XIVE_TM_SIZE); 1907 1908 qemu_register_reset(pnv_xive_reset, dev); 1909 } 1910 1911 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, 1912 int xscom_offset) 1913 { 1914 const char compat[] = "ibm,power9-xive-x"; 1915 char *name; 1916 int offset; 1917 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE; 1918 uint32_t reg[] = { 1919 cpu_to_be32(lpc_pcba), 1920 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE) 1921 }; 1922 1923 name = g_strdup_printf("xive@%x", lpc_pcba); 1924 offset = fdt_add_subnode(fdt, xscom_offset, name); 1925 _FDT(offset); 1926 g_free(name); 1927 1928 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); 1929 _FDT((fdt_setprop(fdt, offset, "compatible", compat, 1930 sizeof(compat)))); 1931 return 0; 1932 } 1933 1934 static Property pnv_xive_properties[] = { 1935 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0), 1936 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0), 1937 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0), 1938 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0), 1939 /* The PnvChip id identifies the XIVE interrupt controller. */ 1940 DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *), 1941 DEFINE_PROP_END_OF_LIST(), 1942 }; 1943 1944 static void pnv_xive_class_init(ObjectClass *klass, void *data) 1945 { 1946 DeviceClass *dc = DEVICE_CLASS(klass); 1947 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); 1948 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); 1949 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1950 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass); 1951 PnvXiveClass *pxc = PNV_XIVE_CLASS(klass); 1952 1953 xdc->dt_xscom = pnv_xive_dt_xscom; 1954 1955 dc->desc = "PowerNV XIVE Interrupt Controller"; 1956 device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize); 1957 dc->realize = pnv_xive_realize; 1958 device_class_set_props(dc, pnv_xive_properties); 1959 1960 xrc->get_eas = pnv_xive_get_eas; 1961 xrc->get_end = pnv_xive_get_end; 1962 xrc->write_end = pnv_xive_write_end; 1963 xrc->get_nvt = pnv_xive_get_nvt; 1964 xrc->write_nvt = pnv_xive_write_nvt; 1965 xrc->get_block_id = pnv_xive_get_block_id; 1966 1967 xnc->notify = pnv_xive_notify; 1968 xpc->match_nvt = pnv_xive_match_nvt; 1969 }; 1970 1971 static const TypeInfo pnv_xive_info = { 1972 .name = TYPE_PNV_XIVE, 1973 .parent = TYPE_XIVE_ROUTER, 1974 .instance_init = pnv_xive_init, 1975 .instance_size = sizeof(PnvXive), 1976 .class_init = pnv_xive_class_init, 1977 .class_size = sizeof(PnvXiveClass), 1978 .interfaces = (InterfaceInfo[]) { 1979 { TYPE_PNV_XSCOM_INTERFACE }, 1980 { } 1981 } 1982 }; 1983 1984 static void pnv_xive_register_types(void) 1985 { 1986 type_register_static(&pnv_xive_info); 1987 } 1988 1989 type_init(pnv_xive_register_types) 1990