1 /* 2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10) 3 * 4 * Copyright (c) 2019-2022, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qapi/error.h" 13 #include "target/ppc/cpu.h" 14 #include "sysemu/cpus.h" 15 #include "sysemu/dma.h" 16 #include "monitor/monitor.h" 17 #include "hw/ppc/fdt.h" 18 #include "hw/ppc/pnv.h" 19 #include "hw/ppc/pnv_core.h" 20 #include "hw/ppc/pnv_xscom.h" 21 #include "hw/ppc/xive2.h" 22 #include "hw/ppc/pnv_xive.h" 23 #include "hw/ppc/xive_regs.h" 24 #include "hw/ppc/xive2_regs.h" 25 #include "hw/ppc/ppc.h" 26 #include "hw/qdev-properties.h" 27 #include "sysemu/reset.h" 28 29 #include <libfdt.h> 30 31 #include "pnv_xive2_regs.h" 32 33 #undef XIVE2_DEBUG 34 35 /* 36 * Virtual structures table (VST) 37 */ 38 #define SBE_PER_BYTE 4 39 40 typedef struct XiveVstInfo { 41 const char *name; 42 uint32_t size; 43 uint32_t max_blocks; 44 } XiveVstInfo; 45 46 static const XiveVstInfo vst_infos[] = { 47 48 [VST_EAS] = { "EAT", sizeof(Xive2Eas), 16 }, 49 [VST_ESB] = { "ESB", 1, 16 }, 50 [VST_END] = { "ENDT", sizeof(Xive2End), 16 }, 51 52 [VST_NVP] = { "NVPT", sizeof(Xive2Nvp), 16 }, 53 [VST_NVG] = { "NVGT", sizeof(Xive2Nvgc), 16 }, 54 [VST_NVC] = { "NVCT", sizeof(Xive2Nvgc), 16 }, 55 56 [VST_IC] = { "IC", 1 /* ? */ , 16 }, /* Topology # */ 57 [VST_SYNC] = { "SYNC", 1 /* ? */ , 16 }, /* Topology # */ 58 59 /* 60 * This table contains the backing store pages for the interrupt 61 * fifos of the VC sub-engine in case of overflow. 62 * 63 * 0 - IPI, 64 * 1 - HWD, 65 * 2 - NxC, 66 * 3 - INT, 67 * 4 - OS-Queue, 68 * 5 - Pool-Queue, 69 * 6 - Hard-Queue 70 */ 71 [VST_ERQ] = { "ERQ", 1, VC_QUEUE_COUNT }, 72 }; 73 74 #define xive2_error(xive, fmt, ...) \ 75 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ 76 (xive)->chip->chip_id, ## __VA_ARGS__); 77 78 /* 79 * QEMU version of the GETFIELD/SETFIELD macros 80 * 81 * TODO: It might be better to use the existing extract64() and 82 * deposit64() but this means that all the register definitions will 83 * change and become incompatible with the ones found in skiboot. 84 * 85 * Keep it as it is for now until we find a common ground. 86 */ 87 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) 88 { 89 return (word & mask) >> ctz64(mask); 90 } 91 92 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, 93 uint64_t value) 94 { 95 return (word & ~mask) | ((value << ctz64(mask)) & mask); 96 } 97 98 /* 99 * TODO: Document block id override 100 */ 101 static uint32_t pnv_xive2_block_id(PnvXive2 *xive) 102 { 103 uint8_t blk = xive->chip->chip_id; 104 uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3]; 105 106 if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) { 107 blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val); 108 } 109 110 return blk; 111 } 112 113 /* 114 * Remote access to controllers. HW uses MMIOs. For now, a simple scan 115 * of the chips is good enough. 116 * 117 * TODO: Block scope support 118 */ 119 static PnvXive2 *pnv_xive2_get_remote(uint8_t blk) 120 { 121 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); 122 int i; 123 124 for (i = 0; i < pnv->num_chips; i++) { 125 Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); 126 PnvXive2 *xive = &chip10->xive; 127 128 if (pnv_xive2_block_id(xive) == blk) { 129 return xive; 130 } 131 } 132 return NULL; 133 } 134 135 /* 136 * VST accessors for ESB, EAT, ENDT, NVP 137 * 138 * Indirect VST tables are arrays of VSDs pointing to a page (of same 139 * size). Each page is a direct VST table. 140 */ 141 142 #define XIVE_VSD_SIZE 8 143 144 /* Indirect page size can be 4K, 64K, 2M, 16M. */ 145 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift) 146 { 147 return page_shift == 12 || page_shift == 16 || 148 page_shift == 21 || page_shift == 24; 149 } 150 151 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type, 152 uint64_t vsd, uint32_t idx) 153 { 154 const XiveVstInfo *info = &vst_infos[type]; 155 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 156 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 157 uint32_t idx_max; 158 159 idx_max = vst_tsize / info->size - 1; 160 if (idx > idx_max) { 161 #ifdef XIVE2_DEBUG 162 xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?", 163 info->name, idx, idx_max); 164 #endif 165 return 0; 166 } 167 168 return vst_addr + idx * info->size; 169 } 170 171 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type, 172 uint64_t vsd, uint32_t idx) 173 { 174 const XiveVstInfo *info = &vst_infos[type]; 175 uint64_t vsd_addr; 176 uint32_t vsd_idx; 177 uint32_t page_shift; 178 uint32_t vst_per_page; 179 180 /* Get the page size of the indirect table. */ 181 vsd_addr = vsd & VSD_ADDRESS_MASK; 182 ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED); 183 184 if (!(vsd & VSD_ADDRESS_MASK)) { 185 xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 186 return 0; 187 } 188 189 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 190 191 if (!pnv_xive2_vst_page_size_allowed(page_shift)) { 192 xive2_error(xive, "VST: invalid %s page shift %d", info->name, 193 page_shift); 194 return 0; 195 } 196 197 vst_per_page = (1ull << page_shift) / info->size; 198 vsd_idx = idx / vst_per_page; 199 200 /* Load the VSD we are looking for, if not already done */ 201 if (vsd_idx) { 202 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; 203 ldq_be_dma(&address_space_memory, vsd_addr, &vsd, 204 MEMTXATTRS_UNSPECIFIED); 205 206 if (!(vsd & VSD_ADDRESS_MASK)) { 207 xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx); 208 return 0; 209 } 210 211 /* 212 * Check that the pages have a consistent size across the 213 * indirect table 214 */ 215 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { 216 xive2_error(xive, "VST: %s entry %x indirect page size differ !?", 217 info->name, idx); 218 return 0; 219 } 220 } 221 222 return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); 223 } 224 225 static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk, 226 uint32_t idx) 227 { 228 const XiveVstInfo *info = &vst_infos[type]; 229 uint64_t vsd; 230 231 if (blk >= info->max_blocks) { 232 xive2_error(xive, "VST: invalid block id %d for VST %s %d !?", 233 blk, info->name, idx); 234 return 0; 235 } 236 237 vsd = xive->vsds[type][blk]; 238 239 /* Remote VST access */ 240 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { 241 xive = pnv_xive2_get_remote(blk); 242 243 return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0; 244 } 245 246 if (VSD_INDIRECT & vsd) { 247 return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx); 248 } 249 250 return pnv_xive2_vst_addr_direct(xive, type, vsd, idx); 251 } 252 253 static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk, 254 uint32_t idx, void *data) 255 { 256 const XiveVstInfo *info = &vst_infos[type]; 257 uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx); 258 259 if (!addr) { 260 return -1; 261 } 262 263 cpu_physical_memory_read(addr, data, info->size); 264 return 0; 265 } 266 267 #define XIVE_VST_WORD_ALL -1 268 269 static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk, 270 uint32_t idx, void *data, uint32_t word_number) 271 { 272 const XiveVstInfo *info = &vst_infos[type]; 273 uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx); 274 275 if (!addr) { 276 return -1; 277 } 278 279 if (word_number == XIVE_VST_WORD_ALL) { 280 cpu_physical_memory_write(addr, data, info->size); 281 } else { 282 cpu_physical_memory_write(addr + word_number * 4, 283 data + word_number * 4, 4); 284 } 285 return 0; 286 } 287 288 static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx, 289 uint8_t *pq) 290 { 291 PnvXive2 *xive = PNV_XIVE2(xrtr); 292 293 if (pnv_xive2_block_id(xive) != blk) { 294 xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); 295 return -1; 296 } 297 298 *pq = xive_source_esb_get(&xive->ipi_source, idx); 299 return 0; 300 } 301 302 static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx, 303 uint8_t *pq) 304 { 305 PnvXive2 *xive = PNV_XIVE2(xrtr); 306 307 if (pnv_xive2_block_id(xive) != blk) { 308 xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); 309 return -1; 310 } 311 312 *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq); 313 return 0; 314 } 315 316 static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx, 317 Xive2End *end) 318 { 319 return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end); 320 } 321 322 static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx, 323 Xive2End *end, uint8_t word_number) 324 { 325 return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end, 326 word_number); 327 } 328 329 static int pnv_xive2_end_update(PnvXive2 *xive) 330 { 331 uint8_t blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, 332 xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]); 333 uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX, 334 xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]); 335 int i; 336 uint64_t endc_watch[4]; 337 338 for (i = 0; i < ARRAY_SIZE(endc_watch); i++) { 339 endc_watch[i] = 340 cpu_to_be64(xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i]); 341 } 342 343 return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch, 344 XIVE_VST_WORD_ALL); 345 } 346 347 static void pnv_xive2_end_cache_load(PnvXive2 *xive) 348 { 349 uint8_t blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, 350 xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]); 351 uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX, 352 xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]); 353 uint64_t endc_watch[4] = { 0 }; 354 int i; 355 356 if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) { 357 xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx); 358 } 359 360 for (i = 0; i < ARRAY_SIZE(endc_watch); i++) { 361 xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i] = 362 be64_to_cpu(endc_watch[i]); 363 } 364 } 365 366 static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx, 367 Xive2Nvp *nvp) 368 { 369 return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp); 370 } 371 372 static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx, 373 Xive2Nvp *nvp, uint8_t word_number) 374 { 375 return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp, 376 word_number); 377 } 378 379 static int pnv_xive2_nvp_update(PnvXive2 *xive) 380 { 381 uint8_t blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, 382 xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]); 383 uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX, 384 xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]); 385 int i; 386 uint64_t nxc_watch[4]; 387 388 for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) { 389 nxc_watch[i] = 390 cpu_to_be64(xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i]); 391 } 392 393 return pnv_xive2_vst_write(xive, VST_NVP, blk, idx, nxc_watch, 394 XIVE_VST_WORD_ALL); 395 } 396 397 static void pnv_xive2_nvp_cache_load(PnvXive2 *xive) 398 { 399 uint8_t blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, 400 xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]); 401 uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX, 402 xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]); 403 uint64_t nxc_watch[4] = { 0 }; 404 int i; 405 406 if (pnv_xive2_vst_read(xive, VST_NVP, blk, idx, nxc_watch)) { 407 xive2_error(xive, "VST: no NVP entry %x/%x !?", blk, idx); 408 } 409 410 for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) { 411 xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i] = 412 be64_to_cpu(nxc_watch[i]); 413 } 414 } 415 416 static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx, 417 Xive2Eas *eas) 418 { 419 PnvXive2 *xive = PNV_XIVE2(xrtr); 420 421 if (pnv_xive2_block_id(xive) != blk) { 422 xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); 423 return -1; 424 } 425 426 return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas); 427 } 428 429 static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu) 430 { 431 int pir = ppc_cpu_pir(cpu); 432 uint32_t fc = PNV10_PIR2FUSEDCORE(pir); 433 uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1; 434 uint32_t bit = pir & 0x3f; 435 436 return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit); 437 } 438 439 static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, 440 uint8_t nvt_blk, uint32_t nvt_idx, 441 bool cam_ignore, uint8_t priority, 442 uint32_t logic_serv, XiveTCTXMatch *match) 443 { 444 PnvXive2 *xive = PNV_XIVE2(xptr); 445 PnvChip *chip = xive->chip; 446 int count = 0; 447 int i, j; 448 bool gen1_tima_os = 449 xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; 450 451 for (i = 0; i < chip->nr_cores; i++) { 452 PnvCore *pc = chip->cores[i]; 453 CPUCore *cc = CPU_CORE(pc); 454 455 for (j = 0; j < cc->nr_threads; j++) { 456 PowerPCCPU *cpu = pc->threads[j]; 457 XiveTCTX *tctx; 458 int ring; 459 460 if (!pnv_xive2_is_cpu_enabled(xive, cpu)) { 461 continue; 462 } 463 464 tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 465 466 if (gen1_tima_os) { 467 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, 468 nvt_idx, cam_ignore, 469 logic_serv); 470 } else { 471 ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk, 472 nvt_idx, cam_ignore, 473 logic_serv); 474 } 475 476 /* 477 * Save the context and follow on to catch duplicates, 478 * that we don't support yet. 479 */ 480 if (ring != -1) { 481 if (match->tctx) { 482 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " 483 "thread context NVT %x/%x\n", 484 nvt_blk, nvt_idx); 485 return false; 486 } 487 488 match->ring = ring; 489 match->tctx = tctx; 490 count++; 491 } 492 } 493 } 494 495 return count; 496 } 497 498 static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr) 499 { 500 return pnv_xive2_block_id(PNV_XIVE2(xrtr)); 501 } 502 503 /* 504 * The TIMA MMIO space is shared among the chips and to identify the 505 * chip from which the access is being done, we extract the chip id 506 * from the PIR. 507 */ 508 static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu) 509 { 510 int pir = ppc_cpu_pir(cpu); 511 XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr; 512 PnvXive2 *xive = PNV_XIVE2(xptr); 513 514 if (!pnv_xive2_is_cpu_enabled(xive, cpu)) { 515 xive2_error(xive, "IC: CPU %x is not enabled", pir); 516 } 517 return xive; 518 } 519 520 /* 521 * The internal sources of the interrupt controller have no knowledge 522 * of the XIVE2 chip on which they reside. Encode the block id in the 523 * source interrupt number before forwarding the source event 524 * notification to the Router. This is required on a multichip system. 525 */ 526 static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked) 527 { 528 PnvXive2 *xive = PNV_XIVE2(xn); 529 uint8_t blk = pnv_xive2_block_id(xive); 530 531 xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked); 532 } 533 534 /* 535 * Set Translation Tables 536 * 537 * TODO add support for multiple sets 538 */ 539 static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val) 540 { 541 uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]); 542 uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT, 543 xive->cq_regs[CQ_TAR >> 3]); 544 545 switch (tsel) { 546 case CQ_TAR_NVPG: 547 case CQ_TAR_ESB: 548 case CQ_TAR_END: 549 xive->tables[tsel][entry] = val; 550 break; 551 default: 552 xive2_error(xive, "IC: unsupported table %d", tsel); 553 return -1; 554 } 555 556 if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) { 557 xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT, 558 xive->cq_regs[CQ_TAR >> 3], ++entry); 559 } 560 561 return 0; 562 } 563 /* 564 * Virtual Structure Tables (VST) configuration 565 */ 566 static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type, 567 uint8_t blk, uint64_t vsd) 568 { 569 Xive2EndSource *end_xsrc = &xive->end_source; 570 XiveSource *xsrc = &xive->ipi_source; 571 const XiveVstInfo *info = &vst_infos[type]; 572 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 573 uint64_t vst_tsize = 1ull << page_shift; 574 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 575 576 /* Basic checks */ 577 578 if (VSD_INDIRECT & vsd) { 579 if (!pnv_xive2_vst_page_size_allowed(page_shift)) { 580 xive2_error(xive, "VST: invalid %s page shift %d", info->name, 581 page_shift); 582 return; 583 } 584 } 585 586 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { 587 xive2_error(xive, "VST: %s table address 0x%"PRIx64 588 " is not aligned with page shift %d", 589 info->name, vst_addr, page_shift); 590 return; 591 } 592 593 /* Record the table configuration (in SRAM on HW) */ 594 xive->vsds[type][blk] = vsd; 595 596 /* Now tune the models with the configuration provided by the FW */ 597 598 switch (type) { 599 case VST_ESB: 600 /* 601 * Backing store pages for the source PQ bits. The model does 602 * not use these PQ bits backed in RAM because the XiveSource 603 * model has its own. 604 * 605 * If the table is direct, we can compute the number of PQ 606 * entries provisioned by FW (such as skiboot) and resize the 607 * ESB window accordingly. 608 */ 609 if (!(VSD_INDIRECT & vsd)) { 610 memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE 611 * (1ull << xsrc->esb_shift)); 612 } 613 614 memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio); 615 break; 616 617 case VST_EAS: /* Nothing to be done */ 618 break; 619 620 case VST_END: 621 /* 622 * Backing store pages for the END. 623 */ 624 if (!(VSD_INDIRECT & vsd)) { 625 memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size) 626 * (1ull << end_xsrc->esb_shift)); 627 } 628 memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio); 629 break; 630 631 case VST_NVP: /* Not modeled */ 632 case VST_NVG: /* Not modeled */ 633 case VST_NVC: /* Not modeled */ 634 case VST_IC: /* Not modeled */ 635 case VST_SYNC: /* Not modeled */ 636 case VST_ERQ: /* Not modeled */ 637 break; 638 639 default: 640 g_assert_not_reached(); 641 } 642 } 643 644 /* 645 * Both PC and VC sub-engines are configured as each use the Virtual 646 * Structure Tables 647 */ 648 static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd) 649 { 650 uint8_t mode = GETFIELD(VSD_MODE, vsd); 651 uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT, 652 xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]); 653 uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS, 654 xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]); 655 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; 656 657 if (type > VST_ERQ) { 658 xive2_error(xive, "VST: invalid table type %d", type); 659 return; 660 } 661 662 if (blk >= vst_infos[type].max_blocks) { 663 xive2_error(xive, "VST: invalid block id %d for" 664 " %s table", blk, vst_infos[type].name); 665 return; 666 } 667 668 if (!vst_addr) { 669 xive2_error(xive, "VST: invalid %s table address", 670 vst_infos[type].name); 671 return; 672 } 673 674 switch (mode) { 675 case VSD_MODE_FORWARD: 676 xive->vsds[type][blk] = vsd; 677 break; 678 679 case VSD_MODE_EXCLUSIVE: 680 pnv_xive2_vst_set_exclusive(xive, type, blk, vsd); 681 break; 682 683 default: 684 xive2_error(xive, "VST: unsupported table mode %d", mode); 685 return; 686 } 687 } 688 689 /* 690 * MMIO handlers 691 */ 692 693 694 /* 695 * IC BAR layout 696 * 697 * Page 0: Internal CQ register accesses (reads & writes) 698 * Page 1: Internal PC register accesses (reads & writes) 699 * Page 2: Internal VC register accesses (reads & writes) 700 * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes) 701 * Page 4: Notify Port page (writes only, w/data), 702 * Page 5: Reserved 703 * Page 6: Sync Poll page (writes only, dataless) 704 * Page 7: Sync Inject page (writes only, dataless) 705 * Page 8: LSI Trigger page (writes only, dataless) 706 * Page 9: LSI SB Management page (reads & writes dataless) 707 * Pages 10-255: Reserved 708 * Pages 256-383: Direct mapped Thread Context Area (reads & writes) 709 * covering the 128 threads in P10. 710 * Pages 384-511: Reserved 711 */ 712 typedef struct PnvXive2Region { 713 const char *name; 714 uint32_t pgoff; 715 uint32_t pgsize; 716 const MemoryRegionOps *ops; 717 } PnvXive2Region; 718 719 static const MemoryRegionOps pnv_xive2_ic_cq_ops; 720 static const MemoryRegionOps pnv_xive2_ic_pc_ops; 721 static const MemoryRegionOps pnv_xive2_ic_vc_ops; 722 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops; 723 static const MemoryRegionOps pnv_xive2_ic_notify_ops; 724 static const MemoryRegionOps pnv_xive2_ic_sync_ops; 725 static const MemoryRegionOps pnv_xive2_ic_lsi_ops; 726 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops; 727 728 /* 512 pages. 4K: 2M range, 64K: 32M range */ 729 static const PnvXive2Region pnv_xive2_ic_regions[] = { 730 { "xive-ic-cq", 0, 1, &pnv_xive2_ic_cq_ops }, 731 { "xive-ic-vc", 1, 1, &pnv_xive2_ic_vc_ops }, 732 { "xive-ic-pc", 2, 1, &pnv_xive2_ic_pc_ops }, 733 { "xive-ic-tctxt", 3, 1, &pnv_xive2_ic_tctxt_ops }, 734 { "xive-ic-notify", 4, 1, &pnv_xive2_ic_notify_ops }, 735 /* page 5 reserved */ 736 { "xive-ic-sync", 6, 2, &pnv_xive2_ic_sync_ops }, 737 { "xive-ic-lsi", 8, 2, &pnv_xive2_ic_lsi_ops }, 738 /* pages 10-255 reserved */ 739 { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops }, 740 /* pages 384-511 reserved */ 741 }; 742 743 /* 744 * CQ operations 745 */ 746 747 static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset, 748 unsigned size) 749 { 750 PnvXive2 *xive = PNV_XIVE2(opaque); 751 uint32_t reg = offset >> 3; 752 uint64_t val = 0; 753 754 switch (offset) { 755 case CQ_XIVE_CAP: /* Set at reset */ 756 case CQ_XIVE_CFG: 757 val = xive->cq_regs[reg]; 758 break; 759 case CQ_MSGSND: /* TODO check the #cores of the machine */ 760 val = 0xffffffff00000000; 761 break; 762 case CQ_CFG_PB_GEN: 763 val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */ 764 break; 765 default: 766 xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset); 767 } 768 769 return val; 770 } 771 772 static uint64_t pnv_xive2_bar_size(uint64_t val) 773 { 774 return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24); 775 } 776 777 static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset, 778 uint64_t val, unsigned size) 779 { 780 PnvXive2 *xive = PNV_XIVE2(opaque); 781 MemoryRegion *sysmem = get_system_memory(); 782 uint32_t reg = offset >> 3; 783 int i; 784 785 switch (offset) { 786 case CQ_XIVE_CFG: 787 case CQ_RST_CTL: /* TODO: reset all BARs */ 788 break; 789 790 case CQ_IC_BAR: 791 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; 792 if (!(val & CQ_IC_BAR_VALID)) { 793 xive->ic_base = 0; 794 if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) { 795 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) { 796 memory_region_del_subregion(&xive->ic_mmio, 797 &xive->ic_mmios[i]); 798 } 799 memory_region_del_subregion(sysmem, &xive->ic_mmio); 800 } 801 } else { 802 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); 803 if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) { 804 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) { 805 memory_region_add_subregion(&xive->ic_mmio, 806 pnv_xive2_ic_regions[i].pgoff << xive->ic_shift, 807 &xive->ic_mmios[i]); 808 } 809 memory_region_add_subregion(sysmem, xive->ic_base, 810 &xive->ic_mmio); 811 } 812 } 813 break; 814 815 case CQ_TM_BAR: 816 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; 817 if (!(val & CQ_TM_BAR_VALID)) { 818 xive->tm_base = 0; 819 if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) { 820 memory_region_del_subregion(sysmem, &xive->tm_mmio); 821 } 822 } else { 823 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); 824 if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) { 825 memory_region_add_subregion(sysmem, xive->tm_base, 826 &xive->tm_mmio); 827 } 828 } 829 break; 830 831 case CQ_ESB_BAR: 832 xive->esb_shift = val & CQ_BAR_64K ? 16 : 12; 833 if (!(val & CQ_BAR_VALID)) { 834 xive->esb_base = 0; 835 if (xive->cq_regs[reg] & CQ_BAR_VALID) { 836 memory_region_del_subregion(sysmem, &xive->esb_mmio); 837 } 838 } else { 839 xive->esb_base = val & CQ_BAR_ADDR; 840 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) { 841 memory_region_set_size(&xive->esb_mmio, 842 pnv_xive2_bar_size(val)); 843 memory_region_add_subregion(sysmem, xive->esb_base, 844 &xive->esb_mmio); 845 } 846 } 847 break; 848 849 case CQ_END_BAR: 850 xive->end_shift = val & CQ_BAR_64K ? 16 : 12; 851 if (!(val & CQ_BAR_VALID)) { 852 xive->end_base = 0; 853 if (xive->cq_regs[reg] & CQ_BAR_VALID) { 854 memory_region_del_subregion(sysmem, &xive->end_mmio); 855 } 856 } else { 857 xive->end_base = val & CQ_BAR_ADDR; 858 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) { 859 memory_region_set_size(&xive->end_mmio, 860 pnv_xive2_bar_size(val)); 861 memory_region_add_subregion(sysmem, xive->end_base, 862 &xive->end_mmio); 863 } 864 } 865 break; 866 867 case CQ_NVC_BAR: 868 xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12; 869 if (!(val & CQ_BAR_VALID)) { 870 xive->nvc_base = 0; 871 if (xive->cq_regs[reg] & CQ_BAR_VALID) { 872 memory_region_del_subregion(sysmem, &xive->nvc_mmio); 873 } 874 } else { 875 xive->nvc_base = val & CQ_BAR_ADDR; 876 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) { 877 memory_region_set_size(&xive->nvc_mmio, 878 pnv_xive2_bar_size(val)); 879 memory_region_add_subregion(sysmem, xive->nvc_base, 880 &xive->nvc_mmio); 881 } 882 } 883 break; 884 885 case CQ_NVPG_BAR: 886 xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12; 887 if (!(val & CQ_BAR_VALID)) { 888 xive->nvpg_base = 0; 889 if (xive->cq_regs[reg] & CQ_BAR_VALID) { 890 memory_region_del_subregion(sysmem, &xive->nvpg_mmio); 891 } 892 } else { 893 xive->nvpg_base = val & CQ_BAR_ADDR; 894 if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) { 895 memory_region_set_size(&xive->nvpg_mmio, 896 pnv_xive2_bar_size(val)); 897 memory_region_add_subregion(sysmem, xive->nvpg_base, 898 &xive->nvpg_mmio); 899 } 900 } 901 break; 902 903 case CQ_TAR: /* Set Translation Table Address */ 904 break; 905 case CQ_TDR: /* Set Translation Table Data */ 906 pnv_xive2_stt_set_data(xive, val); 907 break; 908 case CQ_FIRMASK_OR: /* FIR error reporting */ 909 break; 910 default: 911 xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset); 912 return; 913 } 914 915 xive->cq_regs[reg] = val; 916 } 917 918 static const MemoryRegionOps pnv_xive2_ic_cq_ops = { 919 .read = pnv_xive2_ic_cq_read, 920 .write = pnv_xive2_ic_cq_write, 921 .endianness = DEVICE_BIG_ENDIAN, 922 .valid = { 923 .min_access_size = 8, 924 .max_access_size = 8, 925 }, 926 .impl = { 927 .min_access_size = 8, 928 .max_access_size = 8, 929 }, 930 }; 931 932 static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset, 933 unsigned size) 934 { 935 PnvXive2 *xive = PNV_XIVE2(opaque); 936 uint64_t val = 0; 937 uint32_t reg = offset >> 3; 938 939 switch (offset) { 940 /* 941 * VSD table settings. 942 */ 943 case VC_VSD_TABLE_ADDR: 944 case VC_VSD_TABLE_DATA: 945 val = xive->vc_regs[reg]; 946 break; 947 948 /* 949 * ESB cache updates (not modeled) 950 */ 951 case VC_ESBC_FLUSH_CTRL: 952 xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID; 953 val = xive->vc_regs[reg]; 954 break; 955 956 /* 957 * EAS cache updates (not modeled) 958 */ 959 case VC_EASC_FLUSH_CTRL: 960 xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID; 961 val = xive->vc_regs[reg]; 962 break; 963 964 /* 965 * END cache updates 966 */ 967 case VC_ENDC_WATCH0_SPEC: 968 xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT); 969 val = xive->vc_regs[reg]; 970 break; 971 972 case VC_ENDC_WATCH0_DATA0: 973 /* 974 * Load DATA registers from cache with data requested by the 975 * SPEC register 976 */ 977 pnv_xive2_end_cache_load(xive); 978 val = xive->vc_regs[reg]; 979 break; 980 981 case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3: 982 val = xive->vc_regs[reg]; 983 break; 984 985 case VC_ENDC_FLUSH_CTRL: 986 xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID; 987 val = xive->vc_regs[reg]; 988 break; 989 990 /* 991 * Indirect invalidation 992 */ 993 case VC_AT_MACRO_KILL_MASK: 994 val = xive->vc_regs[reg]; 995 break; 996 997 case VC_AT_MACRO_KILL: 998 xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID; 999 val = xive->vc_regs[reg]; 1000 break; 1001 1002 /* 1003 * Interrupt fifo overflow in memory backing store (Not modeled) 1004 */ 1005 case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6: 1006 val = xive->vc_regs[reg]; 1007 break; 1008 1009 /* 1010 * Synchronisation 1011 */ 1012 case VC_ENDC_SYNC_DONE: 1013 val = VC_ENDC_SYNC_POLL_DONE; 1014 break; 1015 default: 1016 xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset); 1017 } 1018 1019 return val; 1020 } 1021 1022 static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, 1023 uint64_t val, unsigned size) 1024 { 1025 PnvXive2 *xive = PNV_XIVE2(opaque); 1026 uint32_t reg = offset >> 3; 1027 1028 switch (offset) { 1029 /* 1030 * VSD table settings. 1031 */ 1032 case VC_VSD_TABLE_ADDR: 1033 break; 1034 case VC_VSD_TABLE_DATA: 1035 pnv_xive2_vst_set_data(xive, val); 1036 break; 1037 1038 /* 1039 * ESB cache updates (not modeled) 1040 */ 1041 /* case VC_ESBC_FLUSH_CTRL: */ 1042 case VC_ESBC_FLUSH_POLL: 1043 xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID; 1044 /* ESB update */ 1045 break; 1046 1047 /* 1048 * EAS cache updates (not modeled) 1049 */ 1050 /* case VC_EASC_FLUSH_CTRL: */ 1051 case VC_EASC_FLUSH_POLL: 1052 xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID; 1053 /* EAS update */ 1054 break; 1055 1056 /* 1057 * END cache updates 1058 */ 1059 case VC_ENDC_WATCH0_SPEC: 1060 val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */ 1061 break; 1062 1063 case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3: 1064 break; 1065 case VC_ENDC_WATCH0_DATA0: 1066 /* writing to DATA0 triggers the cache write */ 1067 xive->vc_regs[reg] = val; 1068 pnv_xive2_end_update(xive); 1069 break; 1070 1071 1072 /* case VC_ENDC_FLUSH_CTRL: */ 1073 case VC_ENDC_FLUSH_POLL: 1074 xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID; 1075 break; 1076 1077 /* 1078 * Indirect invalidation 1079 */ 1080 case VC_AT_MACRO_KILL: 1081 case VC_AT_MACRO_KILL_MASK: 1082 break; 1083 1084 /* 1085 * Interrupt fifo overflow in memory backing store (Not modeled) 1086 */ 1087 case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6: 1088 break; 1089 1090 /* 1091 * Synchronisation 1092 */ 1093 case VC_ENDC_SYNC_DONE: 1094 break; 1095 1096 default: 1097 xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset); 1098 return; 1099 } 1100 1101 xive->vc_regs[reg] = val; 1102 } 1103 1104 static const MemoryRegionOps pnv_xive2_ic_vc_ops = { 1105 .read = pnv_xive2_ic_vc_read, 1106 .write = pnv_xive2_ic_vc_write, 1107 .endianness = DEVICE_BIG_ENDIAN, 1108 .valid = { 1109 .min_access_size = 8, 1110 .max_access_size = 8, 1111 }, 1112 .impl = { 1113 .min_access_size = 8, 1114 .max_access_size = 8, 1115 }, 1116 }; 1117 1118 static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset, 1119 unsigned size) 1120 { 1121 PnvXive2 *xive = PNV_XIVE2(opaque); 1122 uint64_t val = -1; 1123 uint32_t reg = offset >> 3; 1124 1125 switch (offset) { 1126 /* 1127 * VSD table settings. 1128 */ 1129 case PC_VSD_TABLE_ADDR: 1130 case PC_VSD_TABLE_DATA: 1131 val = xive->pc_regs[reg]; 1132 break; 1133 1134 /* 1135 * cache updates 1136 */ 1137 case PC_NXC_WATCH0_SPEC: 1138 xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT); 1139 val = xive->pc_regs[reg]; 1140 break; 1141 1142 case PC_NXC_WATCH0_DATA0: 1143 /* 1144 * Load DATA registers from cache with data requested by the 1145 * SPEC register 1146 */ 1147 pnv_xive2_nvp_cache_load(xive); 1148 val = xive->pc_regs[reg]; 1149 break; 1150 1151 case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3: 1152 val = xive->pc_regs[reg]; 1153 break; 1154 1155 case PC_NXC_FLUSH_CTRL: 1156 xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID; 1157 val = xive->pc_regs[reg]; 1158 break; 1159 1160 /* 1161 * Indirect invalidation 1162 */ 1163 case PC_AT_KILL: 1164 xive->pc_regs[reg] &= ~PC_AT_KILL_VALID; 1165 val = xive->pc_regs[reg]; 1166 break; 1167 1168 default: 1169 xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset); 1170 } 1171 1172 return val; 1173 } 1174 1175 static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset, 1176 uint64_t val, unsigned size) 1177 { 1178 PnvXive2 *xive = PNV_XIVE2(opaque); 1179 uint32_t reg = offset >> 3; 1180 1181 switch (offset) { 1182 1183 /* 1184 * VSD table settings. Only taken into account in the VC 1185 * sub-engine because the Xive2Router model combines both VC and PC 1186 * sub-engines 1187 */ 1188 case PC_VSD_TABLE_ADDR: 1189 case PC_VSD_TABLE_DATA: 1190 break; 1191 1192 /* 1193 * cache updates 1194 */ 1195 case PC_NXC_WATCH0_SPEC: 1196 val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */ 1197 break; 1198 1199 case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3: 1200 break; 1201 case PC_NXC_WATCH0_DATA0: 1202 /* writing to DATA0 triggers the cache write */ 1203 xive->pc_regs[reg] = val; 1204 pnv_xive2_nvp_update(xive); 1205 break; 1206 1207 /* case PC_NXC_FLUSH_CTRL: */ 1208 case PC_NXC_FLUSH_POLL: 1209 xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID; 1210 break; 1211 1212 /* 1213 * Indirect invalidation 1214 */ 1215 case PC_AT_KILL: 1216 case PC_AT_KILL_MASK: 1217 break; 1218 1219 default: 1220 xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset); 1221 return; 1222 } 1223 1224 xive->pc_regs[reg] = val; 1225 } 1226 1227 static const MemoryRegionOps pnv_xive2_ic_pc_ops = { 1228 .read = pnv_xive2_ic_pc_read, 1229 .write = pnv_xive2_ic_pc_write, 1230 .endianness = DEVICE_BIG_ENDIAN, 1231 .valid = { 1232 .min_access_size = 8, 1233 .max_access_size = 8, 1234 }, 1235 .impl = { 1236 .min_access_size = 8, 1237 .max_access_size = 8, 1238 }, 1239 }; 1240 1241 1242 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset, 1243 unsigned size) 1244 { 1245 PnvXive2 *xive = PNV_XIVE2(opaque); 1246 uint64_t val = -1; 1247 uint32_t reg = offset >> 3; 1248 1249 switch (offset) { 1250 /* 1251 * XIVE2 hardware thread enablement 1252 */ 1253 case TCTXT_EN0: 1254 case TCTXT_EN1: 1255 val = xive->tctxt_regs[reg]; 1256 break; 1257 1258 case TCTXT_EN0_SET: 1259 case TCTXT_EN0_RESET: 1260 val = xive->tctxt_regs[TCTXT_EN0 >> 3]; 1261 break; 1262 case TCTXT_EN1_SET: 1263 case TCTXT_EN1_RESET: 1264 val = xive->tctxt_regs[TCTXT_EN1 >> 3]; 1265 break; 1266 default: 1267 xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset); 1268 } 1269 1270 return val; 1271 } 1272 1273 static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset, 1274 uint64_t val, unsigned size) 1275 { 1276 PnvXive2 *xive = PNV_XIVE2(opaque); 1277 uint32_t reg = offset >> 3; 1278 1279 switch (offset) { 1280 /* 1281 * XIVE2 hardware thread enablement 1282 */ 1283 case TCTXT_EN0: /* Physical Thread Enable */ 1284 case TCTXT_EN1: /* Physical Thread Enable (fused core) */ 1285 break; 1286 1287 case TCTXT_EN0_SET: 1288 xive->tctxt_regs[TCTXT_EN0 >> 3] |= val; 1289 break; 1290 case TCTXT_EN1_SET: 1291 xive->tctxt_regs[TCTXT_EN1 >> 3] |= val; 1292 break; 1293 case TCTXT_EN0_RESET: 1294 xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val; 1295 break; 1296 case TCTXT_EN1_RESET: 1297 xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val; 1298 break; 1299 1300 default: 1301 xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset); 1302 return; 1303 } 1304 1305 xive->pc_regs[reg] = val; 1306 } 1307 1308 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = { 1309 .read = pnv_xive2_ic_tctxt_read, 1310 .write = pnv_xive2_ic_tctxt_write, 1311 .endianness = DEVICE_BIG_ENDIAN, 1312 .valid = { 1313 .min_access_size = 8, 1314 .max_access_size = 8, 1315 }, 1316 .impl = { 1317 .min_access_size = 8, 1318 .max_access_size = 8, 1319 }, 1320 }; 1321 1322 /* 1323 * Redirect XSCOM to MMIO handlers 1324 */ 1325 static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset, 1326 unsigned size) 1327 { 1328 PnvXive2 *xive = PNV_XIVE2(opaque); 1329 uint64_t val = -1; 1330 uint32_t xscom_reg = offset >> 3; 1331 uint32_t mmio_offset = (xscom_reg & 0xFF) << 3; 1332 1333 switch (xscom_reg) { 1334 case 0x000 ... 0x0FF: 1335 val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size); 1336 break; 1337 case 0x100 ... 0x1FF: 1338 val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size); 1339 break; 1340 case 0x200 ... 0x2FF: 1341 val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size); 1342 break; 1343 case 0x300 ... 0x3FF: 1344 val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size); 1345 break; 1346 default: 1347 xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset); 1348 } 1349 1350 return val; 1351 } 1352 1353 static void pnv_xive2_xscom_write(void *opaque, hwaddr offset, 1354 uint64_t val, unsigned size) 1355 { 1356 PnvXive2 *xive = PNV_XIVE2(opaque); 1357 uint32_t xscom_reg = offset >> 3; 1358 uint32_t mmio_offset = (xscom_reg & 0xFF) << 3; 1359 1360 switch (xscom_reg) { 1361 case 0x000 ... 0x0FF: 1362 pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size); 1363 break; 1364 case 0x100 ... 0x1FF: 1365 pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size); 1366 break; 1367 case 0x200 ... 0x2FF: 1368 pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size); 1369 break; 1370 case 0x300 ... 0x3FF: 1371 pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size); 1372 break; 1373 default: 1374 xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset); 1375 } 1376 } 1377 1378 static const MemoryRegionOps pnv_xive2_xscom_ops = { 1379 .read = pnv_xive2_xscom_read, 1380 .write = pnv_xive2_xscom_write, 1381 .endianness = DEVICE_BIG_ENDIAN, 1382 .valid = { 1383 .min_access_size = 8, 1384 .max_access_size = 8, 1385 }, 1386 .impl = { 1387 .min_access_size = 8, 1388 .max_access_size = 8, 1389 }, 1390 }; 1391 1392 /* 1393 * Notify port page. The layout is compatible between 4K and 64K pages : 1394 * 1395 * Page 1 Notify page (writes only) 1396 * 0x000 - 0x7FF IPI interrupt (NPU) 1397 * 0x800 - 0xFFF HW interrupt triggers (PSI, PHB) 1398 */ 1399 1400 static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr, 1401 uint64_t val) 1402 { 1403 uint8_t blk; 1404 uint32_t idx; 1405 1406 if (val & XIVE_TRIGGER_END) { 1407 xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64, 1408 addr, val); 1409 return; 1410 } 1411 1412 /* 1413 * Forward the source event notification directly to the Router. 1414 * The source interrupt number should already be correctly encoded 1415 * with the chip block id by the sending device (PHB, PSI). 1416 */ 1417 blk = XIVE_EAS_BLOCK(val); 1418 idx = XIVE_EAS_INDEX(val); 1419 1420 xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx), 1421 !!(val & XIVE_TRIGGER_PQ)); 1422 } 1423 1424 static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset, 1425 uint64_t val, unsigned size) 1426 { 1427 PnvXive2 *xive = PNV_XIVE2(opaque); 1428 1429 /* VC: IPI triggers */ 1430 switch (offset) { 1431 case 0x000 ... 0x7FF: 1432 /* TODO: check IPI notify sub-page routing */ 1433 pnv_xive2_ic_hw_trigger(opaque, offset, val); 1434 break; 1435 1436 /* VC: HW triggers */ 1437 case 0x800 ... 0xFFF: 1438 pnv_xive2_ic_hw_trigger(opaque, offset, val); 1439 break; 1440 1441 default: 1442 xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset); 1443 } 1444 } 1445 1446 static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset, 1447 unsigned size) 1448 { 1449 PnvXive2 *xive = PNV_XIVE2(opaque); 1450 1451 /* loads are invalid */ 1452 xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset); 1453 return -1; 1454 } 1455 1456 static const MemoryRegionOps pnv_xive2_ic_notify_ops = { 1457 .read = pnv_xive2_ic_notify_read, 1458 .write = pnv_xive2_ic_notify_write, 1459 .endianness = DEVICE_BIG_ENDIAN, 1460 .valid = { 1461 .min_access_size = 8, 1462 .max_access_size = 8, 1463 }, 1464 .impl = { 1465 .min_access_size = 8, 1466 .max_access_size = 8, 1467 }, 1468 }; 1469 1470 static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset, 1471 unsigned size) 1472 { 1473 PnvXive2 *xive = PNV_XIVE2(opaque); 1474 1475 xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset); 1476 return -1; 1477 } 1478 1479 static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset, 1480 uint64_t val, unsigned size) 1481 { 1482 PnvXive2 *xive = PNV_XIVE2(opaque); 1483 1484 xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset); 1485 } 1486 1487 static const MemoryRegionOps pnv_xive2_ic_lsi_ops = { 1488 .read = pnv_xive2_ic_lsi_read, 1489 .write = pnv_xive2_ic_lsi_write, 1490 .endianness = DEVICE_BIG_ENDIAN, 1491 .valid = { 1492 .min_access_size = 8, 1493 .max_access_size = 8, 1494 }, 1495 .impl = { 1496 .min_access_size = 8, 1497 .max_access_size = 8, 1498 }, 1499 }; 1500 1501 /* 1502 * Sync MMIO page (write only) 1503 */ 1504 #define PNV_XIVE2_SYNC_IPI 0x000 1505 #define PNV_XIVE2_SYNC_HW 0x080 1506 #define PNV_XIVE2_SYNC_NxC 0x100 1507 #define PNV_XIVE2_SYNC_INT 0x180 1508 #define PNV_XIVE2_SYNC_OS_ESC 0x200 1509 #define PNV_XIVE2_SYNC_POOL_ESC 0x280 1510 #define PNV_XIVE2_SYNC_HARD_ESC 0x300 1511 1512 static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset, 1513 unsigned size) 1514 { 1515 PnvXive2 *xive = PNV_XIVE2(opaque); 1516 1517 /* loads are invalid */ 1518 xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset); 1519 return -1; 1520 } 1521 1522 static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset, 1523 uint64_t val, unsigned size) 1524 { 1525 PnvXive2 *xive = PNV_XIVE2(opaque); 1526 1527 switch (offset) { 1528 case PNV_XIVE2_SYNC_IPI: 1529 case PNV_XIVE2_SYNC_HW: 1530 case PNV_XIVE2_SYNC_NxC: 1531 case PNV_XIVE2_SYNC_INT: 1532 case PNV_XIVE2_SYNC_OS_ESC: 1533 case PNV_XIVE2_SYNC_POOL_ESC: 1534 case PNV_XIVE2_SYNC_HARD_ESC: 1535 break; 1536 default: 1537 xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset); 1538 } 1539 } 1540 1541 static const MemoryRegionOps pnv_xive2_ic_sync_ops = { 1542 .read = pnv_xive2_ic_sync_read, 1543 .write = pnv_xive2_ic_sync_write, 1544 .endianness = DEVICE_BIG_ENDIAN, 1545 .valid = { 1546 .min_access_size = 8, 1547 .max_access_size = 8, 1548 }, 1549 .impl = { 1550 .min_access_size = 8, 1551 .max_access_size = 8, 1552 }, 1553 }; 1554 1555 /* 1556 * When the TM direct pages of the IC controller are accessed, the 1557 * target HW thread is deduced from the page offset. 1558 */ 1559 static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir) 1560 { 1561 PnvChip *chip = xive->chip; 1562 PowerPCCPU *cpu = NULL; 1563 1564 cpu = pnv_chip_find_cpu(chip, pir); 1565 if (!cpu) { 1566 xive2_error(xive, "IC: invalid PIR %x for indirect access", pir); 1567 return NULL; 1568 } 1569 1570 if (!pnv_xive2_is_cpu_enabled(xive, cpu)) { 1571 xive2_error(xive, "IC: CPU %x is not enabled", pir); 1572 } 1573 1574 return XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1575 } 1576 1577 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset, 1578 unsigned size) 1579 { 1580 PnvXive2 *xive = PNV_XIVE2(opaque); 1581 uint32_t pir = offset >> xive->ic_shift; 1582 XiveTCTX *tctx = pnv_xive2_get_indirect_tctx(xive, pir); 1583 uint64_t val = -1; 1584 1585 if (tctx) { 1586 val = xive_tctx_tm_read(NULL, tctx, offset, size); 1587 } 1588 1589 return val; 1590 } 1591 1592 static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset, 1593 uint64_t val, unsigned size) 1594 { 1595 PnvXive2 *xive = PNV_XIVE2(opaque); 1596 uint32_t pir = offset >> xive->ic_shift; 1597 XiveTCTX *tctx = pnv_xive2_get_indirect_tctx(xive, pir); 1598 1599 if (tctx) { 1600 xive_tctx_tm_write(NULL, tctx, offset, val, size); 1601 } 1602 } 1603 1604 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = { 1605 .read = pnv_xive2_ic_tm_indirect_read, 1606 .write = pnv_xive2_ic_tm_indirect_write, 1607 .endianness = DEVICE_BIG_ENDIAN, 1608 .valid = { 1609 .min_access_size = 8, 1610 .max_access_size = 8, 1611 }, 1612 .impl = { 1613 .min_access_size = 8, 1614 .max_access_size = 8, 1615 }, 1616 }; 1617 1618 /* 1619 * TIMA ops 1620 */ 1621 1622 /* 1623 * Special TIMA offsets to handle accesses in a POWER10 way. 1624 * 1625 * Only the CAM line updates done by the hypervisor should be handled 1626 * specifically. 1627 */ 1628 #define HV_PAGE_OFFSET (XIVE_TM_HV_PAGE << TM_SHIFT) 1629 #define HV_PUSH_OS_CTX_OFFSET (HV_PAGE_OFFSET | (TM_QW1_OS + TM_WORD2)) 1630 #define HV_PULL_OS_CTX_OFFSET (HV_PAGE_OFFSET | TM_SPC_PULL_OS_CTX) 1631 1632 static void pnv_xive2_tm_write(void *opaque, hwaddr offset, 1633 uint64_t value, unsigned size) 1634 { 1635 PowerPCCPU *cpu = POWERPC_CPU(current_cpu); 1636 PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu); 1637 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1638 XivePresenter *xptr = XIVE_PRESENTER(xive); 1639 bool gen1_tima_os = 1640 xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; 1641 1642 /* TODO: should we switch the TM ops table instead ? */ 1643 if (!gen1_tima_os && offset == HV_PUSH_OS_CTX_OFFSET) { 1644 xive2_tm_push_os_ctx(xptr, tctx, offset, value, size); 1645 return; 1646 } 1647 1648 /* Other TM ops are the same as XIVE1 */ 1649 xive_tctx_tm_write(xptr, tctx, offset, value, size); 1650 } 1651 1652 static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size) 1653 { 1654 PowerPCCPU *cpu = POWERPC_CPU(current_cpu); 1655 PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu); 1656 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); 1657 XivePresenter *xptr = XIVE_PRESENTER(xive); 1658 bool gen1_tima_os = 1659 xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; 1660 1661 /* TODO: should we switch the TM ops table instead ? */ 1662 if (!gen1_tima_os && offset == HV_PULL_OS_CTX_OFFSET) { 1663 return xive2_tm_pull_os_ctx(xptr, tctx, offset, size); 1664 } 1665 1666 /* Other TM ops are the same as XIVE1 */ 1667 return xive_tctx_tm_read(xptr, tctx, offset, size); 1668 } 1669 1670 static const MemoryRegionOps pnv_xive2_tm_ops = { 1671 .read = pnv_xive2_tm_read, 1672 .write = pnv_xive2_tm_write, 1673 .endianness = DEVICE_BIG_ENDIAN, 1674 .valid = { 1675 .min_access_size = 1, 1676 .max_access_size = 8, 1677 }, 1678 .impl = { 1679 .min_access_size = 1, 1680 .max_access_size = 8, 1681 }, 1682 }; 1683 1684 static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset, 1685 unsigned size) 1686 { 1687 PnvXive2 *xive = PNV_XIVE2(opaque); 1688 1689 xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset); 1690 return -1; 1691 } 1692 1693 static void pnv_xive2_nvc_write(void *opaque, hwaddr offset, 1694 uint64_t val, unsigned size) 1695 { 1696 PnvXive2 *xive = PNV_XIVE2(opaque); 1697 1698 xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset); 1699 } 1700 1701 static const MemoryRegionOps pnv_xive2_nvc_ops = { 1702 .read = pnv_xive2_nvc_read, 1703 .write = pnv_xive2_nvc_write, 1704 .endianness = DEVICE_BIG_ENDIAN, 1705 .valid = { 1706 .min_access_size = 8, 1707 .max_access_size = 8, 1708 }, 1709 .impl = { 1710 .min_access_size = 8, 1711 .max_access_size = 8, 1712 }, 1713 }; 1714 1715 static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset, 1716 unsigned size) 1717 { 1718 PnvXive2 *xive = PNV_XIVE2(opaque); 1719 1720 xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset); 1721 return -1; 1722 } 1723 1724 static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset, 1725 uint64_t val, unsigned size) 1726 { 1727 PnvXive2 *xive = PNV_XIVE2(opaque); 1728 1729 xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset); 1730 } 1731 1732 static const MemoryRegionOps pnv_xive2_nvpg_ops = { 1733 .read = pnv_xive2_nvpg_read, 1734 .write = pnv_xive2_nvpg_write, 1735 .endianness = DEVICE_BIG_ENDIAN, 1736 .valid = { 1737 .min_access_size = 8, 1738 .max_access_size = 8, 1739 }, 1740 .impl = { 1741 .min_access_size = 8, 1742 .max_access_size = 8, 1743 }, 1744 }; 1745 1746 /* 1747 * POWER10 default capabilities: 0x2000120076f000FC 1748 */ 1749 #define PNV_XIVE2_CAPABILITIES 0x2000120076f000FC 1750 1751 /* 1752 * POWER10 default configuration: 0x0030000033000000 1753 * 1754 * 8bits thread id was dropped for P10 1755 */ 1756 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000 1757 1758 static void pnv_xive2_reset(void *dev) 1759 { 1760 PnvXive2 *xive = PNV_XIVE2(dev); 1761 XiveSource *xsrc = &xive->ipi_source; 1762 Xive2EndSource *end_xsrc = &xive->end_source; 1763 1764 xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities; 1765 xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config; 1766 1767 /* HW hardwires the #Topology of the chip in the block field */ 1768 xive->cq_regs[CQ_XIVE_CFG >> 3] |= 1769 SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id); 1770 1771 /* Set default page size to 64k */ 1772 xive->ic_shift = xive->esb_shift = xive->end_shift = 16; 1773 xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16; 1774 1775 /* Clear source MMIOs */ 1776 if (memory_region_is_mapped(&xsrc->esb_mmio)) { 1777 memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio); 1778 } 1779 1780 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { 1781 memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio); 1782 } 1783 } 1784 1785 /* 1786 * Maximum number of IRQs and ENDs supported by HW. Will be tuned by 1787 * software. 1788 */ 1789 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1790 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) 1791 1792 static void pnv_xive2_realize(DeviceState *dev, Error **errp) 1793 { 1794 PnvXive2 *xive = PNV_XIVE2(dev); 1795 PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev); 1796 XiveSource *xsrc = &xive->ipi_source; 1797 Xive2EndSource *end_xsrc = &xive->end_source; 1798 Error *local_err = NULL; 1799 int i; 1800 1801 pxc->parent_realize(dev, &local_err); 1802 if (local_err) { 1803 error_propagate(errp, local_err); 1804 return; 1805 } 1806 1807 assert(xive->chip); 1808 1809 /* 1810 * The XiveSource and Xive2EndSource objects are realized with the 1811 * maximum allowed HW configuration. The ESB MMIO regions will be 1812 * resized dynamically when the controller is configured by the FW 1813 * to limit accesses to resources not provisioned. 1814 */ 1815 object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI, 1816 &error_fatal); 1817 object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS, 1818 &error_fatal); 1819 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), 1820 &error_fatal); 1821 qdev_realize(DEVICE(xsrc), NULL, &local_err); 1822 if (local_err) { 1823 error_propagate(errp, local_err); 1824 return; 1825 } 1826 1827 object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS, 1828 &error_fatal); 1829 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive), 1830 &error_abort); 1831 qdev_realize(DEVICE(end_xsrc), NULL, &local_err); 1832 if (local_err) { 1833 error_propagate(errp, local_err); 1834 return; 1835 } 1836 1837 /* XSCOM region, used for initial configuration of the BARs */ 1838 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), 1839 &pnv_xive2_xscom_ops, xive, "xscom-xive", 1840 PNV10_XSCOM_XIVE2_SIZE << 3); 1841 1842 /* Interrupt controller MMIO regions */ 1843 xive->ic_shift = 16; 1844 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", 1845 PNV10_XIVE2_IC_SIZE); 1846 1847 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) { 1848 memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev), 1849 pnv_xive2_ic_regions[i].ops, xive, 1850 pnv_xive2_ic_regions[i].name, 1851 pnv_xive2_ic_regions[i].pgsize << xive->ic_shift); 1852 } 1853 1854 /* 1855 * VC MMIO regions. 1856 */ 1857 xive->esb_shift = 16; 1858 xive->end_shift = 16; 1859 memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb", 1860 PNV10_XIVE2_ESB_SIZE); 1861 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end", 1862 PNV10_XIVE2_END_SIZE); 1863 1864 /* Presenter Controller MMIO region (not modeled) */ 1865 xive->nvc_shift = 16; 1866 xive->nvpg_shift = 16; 1867 memory_region_init_io(&xive->nvc_mmio, OBJECT(dev), 1868 &pnv_xive2_nvc_ops, xive, 1869 "xive-nvc", PNV10_XIVE2_NVC_SIZE); 1870 1871 memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev), 1872 &pnv_xive2_nvpg_ops, xive, 1873 "xive-nvpg", PNV10_XIVE2_NVPG_SIZE); 1874 1875 /* Thread Interrupt Management Area (Direct) */ 1876 xive->tm_shift = 16; 1877 memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops, 1878 xive, "xive-tima", PNV10_XIVE2_TM_SIZE); 1879 1880 qemu_register_reset(pnv_xive2_reset, dev); 1881 } 1882 1883 static Property pnv_xive2_properties[] = { 1884 DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0), 1885 DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0), 1886 DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0), 1887 DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0), 1888 DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0), 1889 DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0), 1890 DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities, 1891 PNV_XIVE2_CAPABILITIES), 1892 DEFINE_PROP_UINT64("config", PnvXive2, config, 1893 PNV_XIVE2_CONFIGURATION), 1894 DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *), 1895 DEFINE_PROP_END_OF_LIST(), 1896 }; 1897 1898 static void pnv_xive2_instance_init(Object *obj) 1899 { 1900 PnvXive2 *xive = PNV_XIVE2(obj); 1901 1902 object_initialize_child(obj, "ipi_source", &xive->ipi_source, 1903 TYPE_XIVE_SOURCE); 1904 object_initialize_child(obj, "end_source", &xive->end_source, 1905 TYPE_XIVE2_END_SOURCE); 1906 } 1907 1908 static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt, 1909 int xscom_offset) 1910 { 1911 const char compat_p10[] = "ibm,power10-xive-x"; 1912 char *name; 1913 int offset; 1914 uint32_t reg[] = { 1915 cpu_to_be32(PNV10_XSCOM_XIVE2_BASE), 1916 cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE) 1917 }; 1918 1919 name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE); 1920 offset = fdt_add_subnode(fdt, xscom_offset, name); 1921 _FDT(offset); 1922 g_free(name); 1923 1924 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); 1925 _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10, 1926 sizeof(compat_p10))); 1927 return 0; 1928 } 1929 1930 static void pnv_xive2_class_init(ObjectClass *klass, void *data) 1931 { 1932 DeviceClass *dc = DEVICE_CLASS(klass); 1933 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); 1934 Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass); 1935 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); 1936 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass); 1937 PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass); 1938 1939 xdc->dt_xscom = pnv_xive2_dt_xscom; 1940 1941 dc->desc = "PowerNV XIVE2 Interrupt Controller (POWER10)"; 1942 device_class_set_parent_realize(dc, pnv_xive2_realize, 1943 &pxc->parent_realize); 1944 device_class_set_props(dc, pnv_xive2_properties); 1945 1946 xrc->get_eas = pnv_xive2_get_eas; 1947 xrc->get_pq = pnv_xive2_get_pq; 1948 xrc->set_pq = pnv_xive2_set_pq; 1949 xrc->get_end = pnv_xive2_get_end; 1950 xrc->write_end = pnv_xive2_write_end; 1951 xrc->get_nvp = pnv_xive2_get_nvp; 1952 xrc->write_nvp = pnv_xive2_write_nvp; 1953 xrc->get_block_id = pnv_xive2_get_block_id; 1954 1955 xnc->notify = pnv_xive2_notify; 1956 1957 xpc->match_nvt = pnv_xive2_match_nvt; 1958 }; 1959 1960 static const TypeInfo pnv_xive2_info = { 1961 .name = TYPE_PNV_XIVE2, 1962 .parent = TYPE_XIVE2_ROUTER, 1963 .instance_init = pnv_xive2_instance_init, 1964 .instance_size = sizeof(PnvXive2), 1965 .class_init = pnv_xive2_class_init, 1966 .class_size = sizeof(PnvXive2Class), 1967 .interfaces = (InterfaceInfo[]) { 1968 { TYPE_PNV_XSCOM_INTERFACE }, 1969 { } 1970 } 1971 }; 1972 1973 static void pnv_xive2_register_types(void) 1974 { 1975 type_register_static(&pnv_xive2_info); 1976 } 1977 1978 type_init(pnv_xive2_register_types) 1979 1980 static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, 1981 Monitor *mon) 1982 { 1983 uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5); 1984 uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5); 1985 1986 if (!xive2_nvp_is_valid(nvp)) { 1987 return; 1988 } 1989 1990 monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x\n", 1991 nvp_idx, eq_blk, eq_idx, 1992 xive_get_field32(NVP2_W2_IPB, nvp->w2)); 1993 } 1994 1995 /* 1996 * If the table is direct, we can compute the number of PQ entries 1997 * provisioned by FW. 1998 */ 1999 static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive) 2000 { 2001 uint8_t blk = pnv_xive2_block_id(xive); 2002 uint64_t vsd = xive->vsds[VST_ESB][blk]; 2003 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); 2004 2005 return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE; 2006 } 2007 2008 /* 2009 * Compute the number of entries per indirect subpage. 2010 */ 2011 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type) 2012 { 2013 uint8_t blk = pnv_xive2_block_id(xive); 2014 uint64_t vsd = xive->vsds[type][blk]; 2015 const XiveVstInfo *info = &vst_infos[type]; 2016 uint64_t vsd_addr; 2017 uint32_t page_shift; 2018 2019 /* For direct tables, fake a valid value */ 2020 if (!(VSD_INDIRECT & vsd)) { 2021 return 1; 2022 } 2023 2024 /* Get the page size of the indirect table. */ 2025 vsd_addr = vsd & VSD_ADDRESS_MASK; 2026 ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED); 2027 2028 if (!(vsd & VSD_ADDRESS_MASK)) { 2029 #ifdef XIVE2_DEBUG 2030 xive2_error(xive, "VST: invalid %s entry!?", info->name); 2031 #endif 2032 return 0; 2033 } 2034 2035 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; 2036 2037 if (!pnv_xive2_vst_page_size_allowed(page_shift)) { 2038 xive2_error(xive, "VST: invalid %s page shift %d", info->name, 2039 page_shift); 2040 return 0; 2041 } 2042 2043 return (1ull << page_shift) / info->size; 2044 } 2045 2046 void pnv_xive2_pic_print_info(PnvXive2 *xive, Monitor *mon) 2047 { 2048 Xive2Router *xrtr = XIVE2_ROUTER(xive); 2049 uint8_t blk = pnv_xive2_block_id(xive); 2050 uint8_t chip_id = xive->chip->chip_id; 2051 uint32_t srcno0 = XIVE_EAS(blk, 0); 2052 uint32_t nr_esbs = pnv_xive2_nr_esbs(xive); 2053 Xive2Eas eas; 2054 Xive2End end; 2055 Xive2Nvp nvp; 2056 int i; 2057 uint64_t xive_nvp_per_subpage; 2058 2059 monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0, 2060 srcno0 + nr_esbs - 1); 2061 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); 2062 2063 monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0, 2064 srcno0 + nr_esbs - 1); 2065 for (i = 0; i < nr_esbs; i++) { 2066 if (xive2_router_get_eas(xrtr, blk, i, &eas)) { 2067 break; 2068 } 2069 if (!xive2_eas_is_masked(&eas)) { 2070 xive2_eas_pic_print_info(&eas, i, mon); 2071 } 2072 } 2073 2074 monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk); 2075 i = 0; 2076 while (!xive2_router_get_end(xrtr, blk, i, &end)) { 2077 xive2_end_eas_pic_print_info(&end, i++, mon); 2078 } 2079 2080 monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk); 2081 i = 0; 2082 while (!xive2_router_get_end(xrtr, blk, i, &end)) { 2083 xive2_end_pic_print_info(&end, i++, mon); 2084 } 2085 2086 monitor_printf(mon, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id, blk, 2087 0, XIVE2_NVP_COUNT - 1); 2088 xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP); 2089 for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) { 2090 while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) { 2091 xive2_nvp_pic_print_info(&nvp, i++, mon); 2092 } 2093 } 2094 } 2095