1 /* 2 * QEMU PowerPC PowerNV (POWER9) PHB4 model 3 * 4 * Copyright (c) 2018-2020, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 #include "qemu/osdep.h" 10 #include "qemu/log.h" 11 #include "qapi/visitor.h" 12 #include "qapi/error.h" 13 #include "qemu-common.h" 14 #include "monitor/monitor.h" 15 #include "target/ppc/cpu.h" 16 #include "hw/pci-host/pnv_phb4_regs.h" 17 #include "hw/pci-host/pnv_phb4.h" 18 #include "hw/pci/pcie_host.h" 19 #include "hw/pci/pcie_port.h" 20 #include "hw/ppc/pnv.h" 21 #include "hw/ppc/pnv_xscom.h" 22 #include "hw/irq.h" 23 #include "hw/qdev-properties.h" 24 #include "qom/object.h" 25 #include "sysemu/sysemu.h" 26 #include "trace.h" 27 28 #define phb_error(phb, fmt, ...) \ 29 qemu_log_mask(LOG_GUEST_ERROR, "phb4[%d:%d]: " fmt "\n", \ 30 (phb)->chip_id, (phb)->phb_id, ## __VA_ARGS__) 31 32 #define phb_pec_error(pec, fmt, ...) \ 33 qemu_log_mask(LOG_GUEST_ERROR, "phb4_pec[%d:%d]: " fmt "\n", \ 34 (pec)->chip_id, (pec)->index, ## __VA_ARGS__) 35 36 /* 37 * QEMU version of the GETFIELD/SETFIELD macros 38 * 39 * These are common with the PnvXive model. 40 */ 41 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) 42 { 43 return (word & mask) >> ctz64(mask); 44 } 45 46 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, 47 uint64_t value) 48 { 49 return (word & ~mask) | ((value << ctz64(mask)) & mask); 50 } 51 52 static PCIDevice *pnv_phb4_find_cfg_dev(PnvPHB4 *phb) 53 { 54 PCIHostState *pci = PCI_HOST_BRIDGE(phb); 55 uint64_t addr = phb->regs[PHB_CONFIG_ADDRESS >> 3]; 56 uint8_t bus, devfn; 57 58 if (!(addr >> 63)) { 59 return NULL; 60 } 61 bus = (addr >> 52) & 0xff; 62 devfn = (addr >> 44) & 0xff; 63 64 /* We don't access the root complex this way */ 65 if (bus == 0 && devfn == 0) { 66 return NULL; 67 } 68 return pci_find_device(pci->bus, bus, devfn); 69 } 70 71 /* 72 * The CONFIG_DATA register expects little endian accesses, but as the 73 * region is big endian, we have to swap the value. 74 */ 75 static void pnv_phb4_config_write(PnvPHB4 *phb, unsigned off, 76 unsigned size, uint64_t val) 77 { 78 uint32_t cfg_addr, limit; 79 PCIDevice *pdev; 80 81 pdev = pnv_phb4_find_cfg_dev(phb); 82 if (!pdev) { 83 return; 84 } 85 cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; 86 cfg_addr |= off; 87 limit = pci_config_size(pdev); 88 if (limit <= cfg_addr) { 89 /* 90 * conventional pci device can be behind pcie-to-pci bridge. 91 * 256 <= addr < 4K has no effects. 92 */ 93 return; 94 } 95 switch (size) { 96 case 1: 97 break; 98 case 2: 99 val = bswap16(val); 100 break; 101 case 4: 102 val = bswap32(val); 103 break; 104 default: 105 g_assert_not_reached(); 106 } 107 pci_host_config_write_common(pdev, cfg_addr, limit, val, size); 108 } 109 110 static uint64_t pnv_phb4_config_read(PnvPHB4 *phb, unsigned off, 111 unsigned size) 112 { 113 uint32_t cfg_addr, limit; 114 PCIDevice *pdev; 115 uint64_t val; 116 117 pdev = pnv_phb4_find_cfg_dev(phb); 118 if (!pdev) { 119 return ~0ull; 120 } 121 cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; 122 cfg_addr |= off; 123 limit = pci_config_size(pdev); 124 if (limit <= cfg_addr) { 125 /* 126 * conventional pci device can be behind pcie-to-pci bridge. 127 * 256 <= addr < 4K has no effects. 128 */ 129 return ~0ull; 130 } 131 val = pci_host_config_read_common(pdev, cfg_addr, limit, size); 132 switch (size) { 133 case 1: 134 return val; 135 case 2: 136 return bswap16(val); 137 case 4: 138 return bswap32(val); 139 default: 140 g_assert_not_reached(); 141 } 142 } 143 144 /* 145 * Root complex register accesses are memory mapped. 146 */ 147 static void pnv_phb4_rc_config_write(PnvPHB4 *phb, unsigned off, 148 unsigned size, uint64_t val) 149 { 150 PCIHostState *pci = PCI_HOST_BRIDGE(phb); 151 PCIDevice *pdev; 152 153 if (size != 4) { 154 phb_error(phb, "rc_config_write invalid size %d\n", size); 155 return; 156 } 157 158 pdev = pci_find_device(pci->bus, 0, 0); 159 if (!pdev) { 160 phb_error(phb, "rc_config_write device not found\n"); 161 return; 162 } 163 164 pci_host_config_write_common(pdev, off, PHB_RC_CONFIG_SIZE, 165 bswap32(val), 4); 166 } 167 168 static uint64_t pnv_phb4_rc_config_read(PnvPHB4 *phb, unsigned off, 169 unsigned size) 170 { 171 PCIHostState *pci = PCI_HOST_BRIDGE(phb); 172 PCIDevice *pdev; 173 uint64_t val; 174 175 if (size != 4) { 176 phb_error(phb, "rc_config_read invalid size %d\n", size); 177 return ~0ull; 178 } 179 180 pdev = pci_find_device(pci->bus, 0, 0); 181 if (!pdev) { 182 phb_error(phb, "rc_config_read device not found\n"); 183 return ~0ull; 184 } 185 186 val = pci_host_config_read_common(pdev, off, PHB_RC_CONFIG_SIZE, 4); 187 return bswap32(val); 188 } 189 190 static void pnv_phb4_check_mbt(PnvPHB4 *phb, uint32_t index) 191 { 192 uint64_t base, start, size, mbe0, mbe1; 193 MemoryRegion *parent; 194 char name[64]; 195 196 /* Unmap first */ 197 if (memory_region_is_mapped(&phb->mr_mmio[index])) { 198 /* Should we destroy it in RCU friendly way... ? */ 199 memory_region_del_subregion(phb->mr_mmio[index].container, 200 &phb->mr_mmio[index]); 201 } 202 203 /* Get table entry */ 204 mbe0 = phb->ioda_MBT[(index << 1)]; 205 mbe1 = phb->ioda_MBT[(index << 1) + 1]; 206 207 if (!(mbe0 & IODA3_MBT0_ENABLE)) { 208 return; 209 } 210 211 /* Grab geometry from registers */ 212 base = GETFIELD(IODA3_MBT0_BASE_ADDR, mbe0) << 12; 213 size = GETFIELD(IODA3_MBT1_MASK, mbe1) << 12; 214 size |= 0xff00000000000000ull; 215 size = ~size + 1; 216 217 /* Calculate PCI side start address based on M32/M64 window type */ 218 if (mbe0 & IODA3_MBT0_TYPE_M32) { 219 start = phb->regs[PHB_M32_START_ADDR >> 3]; 220 if ((start + size) > 0x100000000ull) { 221 phb_error(phb, "M32 set beyond 4GB boundary !"); 222 size = 0x100000000 - start; 223 } 224 } else { 225 start = base | (phb->regs[PHB_M64_UPPER_BITS >> 3]); 226 } 227 228 /* TODO: Figure out how to implemet/decode AOMASK */ 229 230 /* Check if it matches an enabled MMIO region in the PEC stack */ 231 if (memory_region_is_mapped(&phb->stack->mmbar0) && 232 base >= phb->stack->mmio0_base && 233 (base + size) <= (phb->stack->mmio0_base + phb->stack->mmio0_size)) { 234 parent = &phb->stack->mmbar0; 235 base -= phb->stack->mmio0_base; 236 } else if (memory_region_is_mapped(&phb->stack->mmbar1) && 237 base >= phb->stack->mmio1_base && 238 (base + size) <= (phb->stack->mmio1_base + phb->stack->mmio1_size)) { 239 parent = &phb->stack->mmbar1; 240 base -= phb->stack->mmio1_base; 241 } else { 242 phb_error(phb, "PHB MBAR %d out of parent bounds", index); 243 return; 244 } 245 246 /* Create alias (better name ?) */ 247 snprintf(name, sizeof(name), "phb4-mbar%d", index); 248 memory_region_init_alias(&phb->mr_mmio[index], OBJECT(phb), name, 249 &phb->pci_mmio, start, size); 250 memory_region_add_subregion(parent, base, &phb->mr_mmio[index]); 251 } 252 253 static void pnv_phb4_check_all_mbt(PnvPHB4 *phb) 254 { 255 uint64_t i; 256 uint32_t num_windows = phb->big_phb ? PNV_PHB4_MAX_MMIO_WINDOWS : 257 PNV_PHB4_MIN_MMIO_WINDOWS; 258 259 for (i = 0; i < num_windows; i++) { 260 pnv_phb4_check_mbt(phb, i); 261 } 262 } 263 264 static uint64_t *pnv_phb4_ioda_access(PnvPHB4 *phb, 265 unsigned *out_table, unsigned *out_idx) 266 { 267 uint64_t adreg = phb->regs[PHB_IODA_ADDR >> 3]; 268 unsigned int index = GETFIELD(PHB_IODA_AD_TADR, adreg); 269 unsigned int table = GETFIELD(PHB_IODA_AD_TSEL, adreg); 270 unsigned int mask; 271 uint64_t *tptr = NULL; 272 273 switch (table) { 274 case IODA3_TBL_LIST: 275 tptr = phb->ioda_LIST; 276 mask = 7; 277 break; 278 case IODA3_TBL_MIST: 279 tptr = phb->ioda_MIST; 280 mask = phb->big_phb ? PNV_PHB4_MAX_MIST : (PNV_PHB4_MAX_MIST >> 1); 281 mask -= 1; 282 break; 283 case IODA3_TBL_RCAM: 284 mask = phb->big_phb ? 127 : 63; 285 break; 286 case IODA3_TBL_MRT: 287 mask = phb->big_phb ? 15 : 7; 288 break; 289 case IODA3_TBL_PESTA: 290 case IODA3_TBL_PESTB: 291 mask = phb->big_phb ? PNV_PHB4_MAX_PEs : (PNV_PHB4_MAX_PEs >> 1); 292 mask -= 1; 293 break; 294 case IODA3_TBL_TVT: 295 tptr = phb->ioda_TVT; 296 mask = phb->big_phb ? PNV_PHB4_MAX_TVEs : (PNV_PHB4_MAX_TVEs >> 1); 297 mask -= 1; 298 break; 299 case IODA3_TBL_TCR: 300 case IODA3_TBL_TDR: 301 mask = phb->big_phb ? 1023 : 511; 302 break; 303 case IODA3_TBL_MBT: 304 tptr = phb->ioda_MBT; 305 mask = phb->big_phb ? PNV_PHB4_MAX_MBEs : (PNV_PHB4_MAX_MBEs >> 1); 306 mask -= 1; 307 break; 308 case IODA3_TBL_MDT: 309 tptr = phb->ioda_MDT; 310 mask = phb->big_phb ? PNV_PHB4_MAX_PEs : (PNV_PHB4_MAX_PEs >> 1); 311 mask -= 1; 312 break; 313 case IODA3_TBL_PEEV: 314 tptr = phb->ioda_PEEV; 315 mask = phb->big_phb ? PNV_PHB4_MAX_PEEVs : (PNV_PHB4_MAX_PEEVs >> 1); 316 mask -= 1; 317 break; 318 default: 319 phb_error(phb, "invalid IODA table %d", table); 320 return NULL; 321 } 322 index &= mask; 323 if (out_idx) { 324 *out_idx = index; 325 } 326 if (out_table) { 327 *out_table = table; 328 } 329 if (tptr) { 330 tptr += index; 331 } 332 if (adreg & PHB_IODA_AD_AUTOINC) { 333 index = (index + 1) & mask; 334 adreg = SETFIELD(PHB_IODA_AD_TADR, adreg, index); 335 } 336 337 phb->regs[PHB_IODA_ADDR >> 3] = adreg; 338 return tptr; 339 } 340 341 static uint64_t pnv_phb4_ioda_read(PnvPHB4 *phb) 342 { 343 unsigned table, idx; 344 uint64_t *tptr; 345 346 tptr = pnv_phb4_ioda_access(phb, &table, &idx); 347 if (!tptr) { 348 /* Special PESTA case */ 349 if (table == IODA3_TBL_PESTA) { 350 return ((uint64_t)(phb->ioda_PEST_AB[idx] & 1)) << 63; 351 } else if (table == IODA3_TBL_PESTB) { 352 return ((uint64_t)(phb->ioda_PEST_AB[idx] & 2)) << 62; 353 } 354 /* Return 0 on unsupported tables, not ff's */ 355 return 0; 356 } 357 return *tptr; 358 } 359 360 static void pnv_phb4_ioda_write(PnvPHB4 *phb, uint64_t val) 361 { 362 unsigned table, idx; 363 uint64_t *tptr; 364 365 tptr = pnv_phb4_ioda_access(phb, &table, &idx); 366 if (!tptr) { 367 /* Special PESTA case */ 368 if (table == IODA3_TBL_PESTA) { 369 phb->ioda_PEST_AB[idx] &= ~1; 370 phb->ioda_PEST_AB[idx] |= (val >> 63) & 1; 371 } else if (table == IODA3_TBL_PESTB) { 372 phb->ioda_PEST_AB[idx] &= ~2; 373 phb->ioda_PEST_AB[idx] |= (val >> 62) & 2; 374 } 375 return; 376 } 377 378 /* Handle side effects */ 379 switch (table) { 380 case IODA3_TBL_LIST: 381 break; 382 case IODA3_TBL_MIST: { 383 /* Special mask for MIST partial write */ 384 uint64_t adreg = phb->regs[PHB_IODA_ADDR >> 3]; 385 uint32_t mmask = GETFIELD(PHB_IODA_AD_MIST_PWV, adreg); 386 uint64_t v = *tptr; 387 if (mmask == 0) { 388 mmask = 0xf; 389 } 390 if (mmask & 8) { 391 v &= 0x0000ffffffffffffull; 392 v |= 0xcfff000000000000ull & val; 393 } 394 if (mmask & 4) { 395 v &= 0xffff0000ffffffffull; 396 v |= 0x0000cfff00000000ull & val; 397 } 398 if (mmask & 2) { 399 v &= 0xffffffff0000ffffull; 400 v |= 0x00000000cfff0000ull & val; 401 } 402 if (mmask & 1) { 403 v &= 0xffffffffffff0000ull; 404 v |= 0x000000000000cfffull & val; 405 } 406 *tptr = v; 407 break; 408 } 409 case IODA3_TBL_MBT: 410 *tptr = val; 411 412 /* Copy accross the valid bit to the other half */ 413 phb->ioda_MBT[idx ^ 1] &= 0x7fffffffffffffffull; 414 phb->ioda_MBT[idx ^ 1] |= 0x8000000000000000ull & val; 415 416 /* Update mappings */ 417 pnv_phb4_check_mbt(phb, idx >> 1); 418 break; 419 default: 420 *tptr = val; 421 } 422 } 423 424 static void pnv_phb4_rtc_invalidate(PnvPHB4 *phb, uint64_t val) 425 { 426 PnvPhb4DMASpace *ds; 427 428 /* Always invalidate all for now ... */ 429 QLIST_FOREACH(ds, &phb->dma_spaces, list) { 430 ds->pe_num = PHB_INVALID_PE; 431 } 432 } 433 434 static void pnv_phb4_update_msi_regions(PnvPhb4DMASpace *ds) 435 { 436 uint64_t cfg = ds->phb->regs[PHB_PHB4_CONFIG >> 3]; 437 438 if (cfg & PHB_PHB4C_32BIT_MSI_EN) { 439 if (!memory_region_is_mapped(MEMORY_REGION(&ds->msi32_mr))) { 440 memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), 441 0xffff0000, &ds->msi32_mr); 442 } 443 } else { 444 if (memory_region_is_mapped(MEMORY_REGION(&ds->msi32_mr))) { 445 memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), 446 &ds->msi32_mr); 447 } 448 } 449 450 if (cfg & PHB_PHB4C_64BIT_MSI_EN) { 451 if (!memory_region_is_mapped(MEMORY_REGION(&ds->msi64_mr))) { 452 memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), 453 (1ull << 60), &ds->msi64_mr); 454 } 455 } else { 456 if (memory_region_is_mapped(MEMORY_REGION(&ds->msi64_mr))) { 457 memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), 458 &ds->msi64_mr); 459 } 460 } 461 } 462 463 static void pnv_phb4_update_all_msi_regions(PnvPHB4 *phb) 464 { 465 PnvPhb4DMASpace *ds; 466 467 QLIST_FOREACH(ds, &phb->dma_spaces, list) { 468 pnv_phb4_update_msi_regions(ds); 469 } 470 } 471 472 static void pnv_phb4_update_xsrc(PnvPHB4 *phb) 473 { 474 int shift, flags, i, lsi_base; 475 XiveSource *xsrc = &phb->xsrc; 476 477 /* The XIVE source characteristics can be set at run time */ 478 if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_PGSZ_64K) { 479 shift = XIVE_ESB_64K; 480 } else { 481 shift = XIVE_ESB_4K; 482 } 483 if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_STORE_EOI) { 484 flags = XIVE_SRC_STORE_EOI; 485 } else { 486 flags = 0; 487 } 488 489 phb->xsrc.esb_shift = shift; 490 phb->xsrc.esb_flags = flags; 491 492 lsi_base = GETFIELD(PHB_LSI_SRC_ID, phb->regs[PHB_LSI_SOURCE_ID >> 3]); 493 lsi_base <<= 3; 494 495 /* TODO: handle reset values of PHB_LSI_SRC_ID */ 496 if (!lsi_base) { 497 return; 498 } 499 500 /* TODO: need a xive_source_irq_reset_lsi() */ 501 bitmap_zero(xsrc->lsi_map, xsrc->nr_irqs); 502 503 for (i = 0; i < xsrc->nr_irqs; i++) { 504 bool msi = (i < lsi_base || i >= (lsi_base + 8)); 505 if (!msi) { 506 xive_source_irq_set_lsi(xsrc, i); 507 } 508 } 509 } 510 511 static void pnv_phb4_reg_write(void *opaque, hwaddr off, uint64_t val, 512 unsigned size) 513 { 514 PnvPHB4 *phb = PNV_PHB4(opaque); 515 bool changed; 516 517 /* Special case outbound configuration data */ 518 if ((off & 0xfffc) == PHB_CONFIG_DATA) { 519 pnv_phb4_config_write(phb, off & 0x3, size, val); 520 return; 521 } 522 523 /* Special case RC configuration space */ 524 if ((off & 0xf800) == PHB_RC_CONFIG_BASE) { 525 pnv_phb4_rc_config_write(phb, off & 0x7ff, size, val); 526 return; 527 } 528 529 /* Other registers are 64-bit only */ 530 if (size != 8 || off & 0x7) { 531 phb_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", 532 off, size); 533 return; 534 } 535 536 /* Handle masking */ 537 switch (off) { 538 case PHB_LSI_SOURCE_ID: 539 val &= PHB_LSI_SRC_ID; 540 break; 541 case PHB_M64_UPPER_BITS: 542 val &= 0xff00000000000000ull; 543 break; 544 /* TCE Kill */ 545 case PHB_TCE_KILL: 546 /* Clear top 3 bits which HW does to indicate successful queuing */ 547 val &= ~(PHB_TCE_KILL_ALL | PHB_TCE_KILL_PE | PHB_TCE_KILL_ONE); 548 break; 549 case PHB_Q_DMA_R: 550 /* 551 * This is enough logic to make SW happy but we aren't 552 * actually quiescing the DMAs 553 */ 554 if (val & PHB_Q_DMA_R_AUTORESET) { 555 val = 0; 556 } else { 557 val &= PHB_Q_DMA_R_QUIESCE_DMA; 558 } 559 break; 560 /* LEM stuff */ 561 case PHB_LEM_FIR_AND_MASK: 562 phb->regs[PHB_LEM_FIR_ACCUM >> 3] &= val; 563 return; 564 case PHB_LEM_FIR_OR_MASK: 565 phb->regs[PHB_LEM_FIR_ACCUM >> 3] |= val; 566 return; 567 case PHB_LEM_ERROR_AND_MASK: 568 phb->regs[PHB_LEM_ERROR_MASK >> 3] &= val; 569 return; 570 case PHB_LEM_ERROR_OR_MASK: 571 phb->regs[PHB_LEM_ERROR_MASK >> 3] |= val; 572 return; 573 case PHB_LEM_WOF: 574 val = 0; 575 break; 576 /* TODO: More regs ..., maybe create a table with masks... */ 577 578 /* Read only registers */ 579 case PHB_CPU_LOADSTORE_STATUS: 580 case PHB_ETU_ERR_SUMMARY: 581 case PHB_PHB4_GEN_CAP: 582 case PHB_PHB4_TCE_CAP: 583 case PHB_PHB4_IRQ_CAP: 584 case PHB_PHB4_EEH_CAP: 585 return; 586 } 587 588 /* Record whether it changed */ 589 changed = phb->regs[off >> 3] != val; 590 591 /* Store in register cache first */ 592 phb->regs[off >> 3] = val; 593 594 /* Handle side effects */ 595 switch (off) { 596 case PHB_PHB4_CONFIG: 597 if (changed) { 598 pnv_phb4_update_all_msi_regions(phb); 599 } 600 break; 601 case PHB_M32_START_ADDR: 602 case PHB_M64_UPPER_BITS: 603 if (changed) { 604 pnv_phb4_check_all_mbt(phb); 605 } 606 break; 607 608 /* IODA table accesses */ 609 case PHB_IODA_DATA0: 610 pnv_phb4_ioda_write(phb, val); 611 break; 612 613 /* RTC invalidation */ 614 case PHB_RTC_INVALIDATE: 615 pnv_phb4_rtc_invalidate(phb, val); 616 break; 617 618 /* PHB Control (Affects XIVE source) */ 619 case PHB_CTRLR: 620 case PHB_LSI_SOURCE_ID: 621 pnv_phb4_update_xsrc(phb); 622 break; 623 624 /* Silent simple writes */ 625 case PHB_ASN_CMPM: 626 case PHB_CONFIG_ADDRESS: 627 case PHB_IODA_ADDR: 628 case PHB_TCE_KILL: 629 case PHB_TCE_SPEC_CTL: 630 case PHB_PEST_BAR: 631 case PHB_PELTV_BAR: 632 case PHB_RTT_BAR: 633 case PHB_LEM_FIR_ACCUM: 634 case PHB_LEM_ERROR_MASK: 635 case PHB_LEM_ACTION0: 636 case PHB_LEM_ACTION1: 637 case PHB_TCE_TAG_ENABLE: 638 case PHB_INT_NOTIFY_ADDR: 639 case PHB_INT_NOTIFY_INDEX: 640 case PHB_DMARD_SYNC: 641 break; 642 643 /* Noise on anything else */ 644 default: 645 qemu_log_mask(LOG_UNIMP, "phb4: reg_write 0x%"PRIx64"=%"PRIx64"\n", 646 off, val); 647 } 648 } 649 650 static uint64_t pnv_phb4_reg_read(void *opaque, hwaddr off, unsigned size) 651 { 652 PnvPHB4 *phb = PNV_PHB4(opaque); 653 uint64_t val; 654 655 if ((off & 0xfffc) == PHB_CONFIG_DATA) { 656 return pnv_phb4_config_read(phb, off & 0x3, size); 657 } 658 659 /* Special case RC configuration space */ 660 if ((off & 0xf800) == PHB_RC_CONFIG_BASE) { 661 return pnv_phb4_rc_config_read(phb, off & 0x7ff, size); 662 } 663 664 /* Other registers are 64-bit only */ 665 if (size != 8 || off & 0x7) { 666 phb_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", 667 off, size); 668 return ~0ull; 669 } 670 671 /* Default read from cache */ 672 val = phb->regs[off >> 3]; 673 674 switch (off) { 675 case PHB_VERSION: 676 return phb->version; 677 678 /* Read-only */ 679 case PHB_PHB4_GEN_CAP: 680 return 0xe4b8000000000000ull; 681 case PHB_PHB4_TCE_CAP: 682 return phb->big_phb ? 0x4008440000000400ull : 0x2008440000000200ull; 683 case PHB_PHB4_IRQ_CAP: 684 return phb->big_phb ? 0x0800000000001000ull : 0x0800000000000800ull; 685 case PHB_PHB4_EEH_CAP: 686 return phb->big_phb ? 0x2000000000000000ull : 0x1000000000000000ull; 687 688 /* IODA table accesses */ 689 case PHB_IODA_DATA0: 690 return pnv_phb4_ioda_read(phb); 691 692 /* Link training always appears trained */ 693 case PHB_PCIE_DLP_TRAIN_CTL: 694 /* TODO: Do something sensible with speed ? */ 695 return PHB_PCIE_DLP_INBAND_PRESENCE | PHB_PCIE_DLP_TL_LINKACT; 696 697 /* DMA read sync: make it look like it's complete */ 698 case PHB_DMARD_SYNC: 699 return PHB_DMARD_SYNC_COMPLETE; 700 701 /* Silent simple reads */ 702 case PHB_LSI_SOURCE_ID: 703 case PHB_CPU_LOADSTORE_STATUS: 704 case PHB_ASN_CMPM: 705 case PHB_PHB4_CONFIG: 706 case PHB_M32_START_ADDR: 707 case PHB_CONFIG_ADDRESS: 708 case PHB_IODA_ADDR: 709 case PHB_RTC_INVALIDATE: 710 case PHB_TCE_KILL: 711 case PHB_TCE_SPEC_CTL: 712 case PHB_PEST_BAR: 713 case PHB_PELTV_BAR: 714 case PHB_RTT_BAR: 715 case PHB_M64_UPPER_BITS: 716 case PHB_CTRLR: 717 case PHB_LEM_FIR_ACCUM: 718 case PHB_LEM_ERROR_MASK: 719 case PHB_LEM_ACTION0: 720 case PHB_LEM_ACTION1: 721 case PHB_TCE_TAG_ENABLE: 722 case PHB_INT_NOTIFY_ADDR: 723 case PHB_INT_NOTIFY_INDEX: 724 case PHB_Q_DMA_R: 725 case PHB_ETU_ERR_SUMMARY: 726 break; 727 728 /* Noise on anything else */ 729 default: 730 qemu_log_mask(LOG_UNIMP, "phb4: reg_read 0x%"PRIx64"=%"PRIx64"\n", 731 off, val); 732 } 733 return val; 734 } 735 736 static const MemoryRegionOps pnv_phb4_reg_ops = { 737 .read = pnv_phb4_reg_read, 738 .write = pnv_phb4_reg_write, 739 .valid.min_access_size = 1, 740 .valid.max_access_size = 8, 741 .impl.min_access_size = 1, 742 .impl.max_access_size = 8, 743 .endianness = DEVICE_BIG_ENDIAN, 744 }; 745 746 static uint64_t pnv_phb4_xscom_read(void *opaque, hwaddr addr, unsigned size) 747 { 748 PnvPHB4 *phb = PNV_PHB4(opaque); 749 uint32_t reg = addr >> 3; 750 uint64_t val; 751 hwaddr offset; 752 753 switch (reg) { 754 case PHB_SCOM_HV_IND_ADDR: 755 return phb->scom_hv_ind_addr_reg; 756 757 case PHB_SCOM_HV_IND_DATA: 758 if (!(phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_VALID)) { 759 phb_error(phb, "Invalid indirect address"); 760 return ~0ull; 761 } 762 size = (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_4B) ? 4 : 8; 763 offset = GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, phb->scom_hv_ind_addr_reg); 764 val = pnv_phb4_reg_read(phb, offset, size); 765 if (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_AUTOINC) { 766 offset += size; 767 offset &= 0x3fff; 768 phb->scom_hv_ind_addr_reg = SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, 769 phb->scom_hv_ind_addr_reg, 770 offset); 771 } 772 return val; 773 case PHB_SCOM_ETU_LEM_FIR: 774 case PHB_SCOM_ETU_LEM_FIR_AND: 775 case PHB_SCOM_ETU_LEM_FIR_OR: 776 case PHB_SCOM_ETU_LEM_FIR_MSK: 777 case PHB_SCOM_ETU_LEM_ERR_MSK_AND: 778 case PHB_SCOM_ETU_LEM_ERR_MSK_OR: 779 case PHB_SCOM_ETU_LEM_ACT0: 780 case PHB_SCOM_ETU_LEM_ACT1: 781 case PHB_SCOM_ETU_LEM_WOF: 782 offset = ((reg - PHB_SCOM_ETU_LEM_FIR) << 3) + PHB_LEM_FIR_ACCUM; 783 return pnv_phb4_reg_read(phb, offset, size); 784 case PHB_SCOM_ETU_PMON_CONFIG: 785 case PHB_SCOM_ETU_PMON_CTR0: 786 case PHB_SCOM_ETU_PMON_CTR1: 787 case PHB_SCOM_ETU_PMON_CTR2: 788 case PHB_SCOM_ETU_PMON_CTR3: 789 offset = ((reg - PHB_SCOM_ETU_PMON_CONFIG) << 3) + PHB_PERFMON_CONFIG; 790 return pnv_phb4_reg_read(phb, offset, size); 791 792 default: 793 qemu_log_mask(LOG_UNIMP, "phb4: xscom_read 0x%"HWADDR_PRIx"\n", addr); 794 return ~0ull; 795 } 796 } 797 798 static void pnv_phb4_xscom_write(void *opaque, hwaddr addr, 799 uint64_t val, unsigned size) 800 { 801 PnvPHB4 *phb = PNV_PHB4(opaque); 802 uint32_t reg = addr >> 3; 803 hwaddr offset; 804 805 switch (reg) { 806 case PHB_SCOM_HV_IND_ADDR: 807 phb->scom_hv_ind_addr_reg = val & 0xe000000000001fff; 808 break; 809 case PHB_SCOM_HV_IND_DATA: 810 if (!(phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_VALID)) { 811 phb_error(phb, "Invalid indirect address"); 812 break; 813 } 814 size = (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_4B) ? 4 : 8; 815 offset = GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, phb->scom_hv_ind_addr_reg); 816 pnv_phb4_reg_write(phb, offset, val, size); 817 if (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_AUTOINC) { 818 offset += size; 819 offset &= 0x3fff; 820 phb->scom_hv_ind_addr_reg = SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, 821 phb->scom_hv_ind_addr_reg, 822 offset); 823 } 824 break; 825 case PHB_SCOM_ETU_LEM_FIR: 826 case PHB_SCOM_ETU_LEM_FIR_AND: 827 case PHB_SCOM_ETU_LEM_FIR_OR: 828 case PHB_SCOM_ETU_LEM_FIR_MSK: 829 case PHB_SCOM_ETU_LEM_ERR_MSK_AND: 830 case PHB_SCOM_ETU_LEM_ERR_MSK_OR: 831 case PHB_SCOM_ETU_LEM_ACT0: 832 case PHB_SCOM_ETU_LEM_ACT1: 833 case PHB_SCOM_ETU_LEM_WOF: 834 offset = ((reg - PHB_SCOM_ETU_LEM_FIR) << 3) + PHB_LEM_FIR_ACCUM; 835 pnv_phb4_reg_write(phb, offset, val, size); 836 break; 837 case PHB_SCOM_ETU_PMON_CONFIG: 838 case PHB_SCOM_ETU_PMON_CTR0: 839 case PHB_SCOM_ETU_PMON_CTR1: 840 case PHB_SCOM_ETU_PMON_CTR2: 841 case PHB_SCOM_ETU_PMON_CTR3: 842 offset = ((reg - PHB_SCOM_ETU_PMON_CONFIG) << 3) + PHB_PERFMON_CONFIG; 843 pnv_phb4_reg_write(phb, offset, val, size); 844 break; 845 default: 846 qemu_log_mask(LOG_UNIMP, "phb4: xscom_write 0x%"HWADDR_PRIx 847 "=%"PRIx64"\n", addr, val); 848 } 849 } 850 851 const MemoryRegionOps pnv_phb4_xscom_ops = { 852 .read = pnv_phb4_xscom_read, 853 .write = pnv_phb4_xscom_write, 854 .valid.min_access_size = 8, 855 .valid.max_access_size = 8, 856 .impl.min_access_size = 8, 857 .impl.max_access_size = 8, 858 .endianness = DEVICE_BIG_ENDIAN, 859 }; 860 861 static uint64_t pnv_pec_stk_nest_xscom_read(void *opaque, hwaddr addr, 862 unsigned size) 863 { 864 PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); 865 uint32_t reg = addr >> 3; 866 867 /* TODO: add list of allowed registers and error out if not */ 868 return stack->nest_regs[reg]; 869 } 870 871 static void pnv_pec_stk_update_map(PnvPhb4PecStack *stack) 872 { 873 PnvPhb4PecState *pec = stack->pec; 874 MemoryRegion *sysmem = get_system_memory(); 875 uint64_t bar_en = stack->nest_regs[PEC_NEST_STK_BAR_EN]; 876 uint64_t bar, mask, size; 877 char name[64]; 878 879 /* 880 * NOTE: This will really not work well if those are remapped 881 * after the PHB has created its sub regions. We could do better 882 * if we had a way to resize regions but we don't really care 883 * that much in practice as the stuff below really only happens 884 * once early during boot 885 */ 886 887 /* Handle unmaps */ 888 if (memory_region_is_mapped(&stack->mmbar0) && 889 !(bar_en & PEC_NEST_STK_BAR_EN_MMIO0)) { 890 memory_region_del_subregion(sysmem, &stack->mmbar0); 891 } 892 if (memory_region_is_mapped(&stack->mmbar1) && 893 !(bar_en & PEC_NEST_STK_BAR_EN_MMIO1)) { 894 memory_region_del_subregion(sysmem, &stack->mmbar1); 895 } 896 if (memory_region_is_mapped(&stack->phbbar) && 897 !(bar_en & PEC_NEST_STK_BAR_EN_PHB)) { 898 memory_region_del_subregion(sysmem, &stack->phbbar); 899 } 900 if (memory_region_is_mapped(&stack->intbar) && 901 !(bar_en & PEC_NEST_STK_BAR_EN_INT)) { 902 memory_region_del_subregion(sysmem, &stack->intbar); 903 } 904 905 /* Update PHB */ 906 pnv_phb4_update_regions(stack); 907 908 /* Handle maps */ 909 if (!memory_region_is_mapped(&stack->mmbar0) && 910 (bar_en & PEC_NEST_STK_BAR_EN_MMIO0)) { 911 bar = stack->nest_regs[PEC_NEST_STK_MMIO_BAR0] >> 8; 912 mask = stack->nest_regs[PEC_NEST_STK_MMIO_BAR0_MASK]; 913 size = ((~mask) >> 8) + 1; 914 snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-mmio0", 915 pec->chip_id, pec->index, stack->stack_no); 916 memory_region_init(&stack->mmbar0, OBJECT(stack), name, size); 917 memory_region_add_subregion(sysmem, bar, &stack->mmbar0); 918 stack->mmio0_base = bar; 919 stack->mmio0_size = size; 920 } 921 if (!memory_region_is_mapped(&stack->mmbar1) && 922 (bar_en & PEC_NEST_STK_BAR_EN_MMIO1)) { 923 bar = stack->nest_regs[PEC_NEST_STK_MMIO_BAR1] >> 8; 924 mask = stack->nest_regs[PEC_NEST_STK_MMIO_BAR1_MASK]; 925 size = ((~mask) >> 8) + 1; 926 snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-mmio1", 927 pec->chip_id, pec->index, stack->stack_no); 928 memory_region_init(&stack->mmbar1, OBJECT(stack), name, size); 929 memory_region_add_subregion(sysmem, bar, &stack->mmbar1); 930 stack->mmio1_base = bar; 931 stack->mmio1_size = size; 932 } 933 if (!memory_region_is_mapped(&stack->phbbar) && 934 (bar_en & PEC_NEST_STK_BAR_EN_PHB)) { 935 bar = stack->nest_regs[PEC_NEST_STK_PHB_REGS_BAR] >> 8; 936 size = PNV_PHB4_NUM_REGS << 3; 937 snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-phb", 938 pec->chip_id, pec->index, stack->stack_no); 939 memory_region_init(&stack->phbbar, OBJECT(stack), name, size); 940 memory_region_add_subregion(sysmem, bar, &stack->phbbar); 941 } 942 if (!memory_region_is_mapped(&stack->intbar) && 943 (bar_en & PEC_NEST_STK_BAR_EN_INT)) { 944 bar = stack->nest_regs[PEC_NEST_STK_INT_BAR] >> 8; 945 size = PNV_PHB4_MAX_INTs << 16; 946 snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-int", 947 stack->pec->chip_id, stack->pec->index, stack->stack_no); 948 memory_region_init(&stack->intbar, OBJECT(stack), name, size); 949 memory_region_add_subregion(sysmem, bar, &stack->intbar); 950 } 951 952 /* Update PHB */ 953 pnv_phb4_update_regions(stack); 954 } 955 956 static void pnv_pec_stk_nest_xscom_write(void *opaque, hwaddr addr, 957 uint64_t val, unsigned size) 958 { 959 PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); 960 PnvPhb4PecState *pec = stack->pec; 961 uint32_t reg = addr >> 3; 962 963 switch (reg) { 964 case PEC_NEST_STK_PCI_NEST_FIR: 965 stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] = val; 966 break; 967 case PEC_NEST_STK_PCI_NEST_FIR_CLR: 968 stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] &= val; 969 break; 970 case PEC_NEST_STK_PCI_NEST_FIR_SET: 971 stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] |= val; 972 break; 973 case PEC_NEST_STK_PCI_NEST_FIR_MSK: 974 stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] = val; 975 break; 976 case PEC_NEST_STK_PCI_NEST_FIR_MSKC: 977 stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] &= val; 978 break; 979 case PEC_NEST_STK_PCI_NEST_FIR_MSKS: 980 stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] |= val; 981 break; 982 case PEC_NEST_STK_PCI_NEST_FIR_ACT0: 983 case PEC_NEST_STK_PCI_NEST_FIR_ACT1: 984 stack->nest_regs[reg] = val; 985 break; 986 case PEC_NEST_STK_PCI_NEST_FIR_WOF: 987 stack->nest_regs[reg] = 0; 988 break; 989 case PEC_NEST_STK_ERR_REPORT_0: 990 case PEC_NEST_STK_ERR_REPORT_1: 991 case PEC_NEST_STK_PBCQ_GNRL_STATUS: 992 /* Flag error ? */ 993 break; 994 case PEC_NEST_STK_PBCQ_MODE: 995 stack->nest_regs[reg] = val & 0xff00000000000000ull; 996 break; 997 case PEC_NEST_STK_MMIO_BAR0: 998 case PEC_NEST_STK_MMIO_BAR0_MASK: 999 case PEC_NEST_STK_MMIO_BAR1: 1000 case PEC_NEST_STK_MMIO_BAR1_MASK: 1001 if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & 1002 (PEC_NEST_STK_BAR_EN_MMIO0 | 1003 PEC_NEST_STK_BAR_EN_MMIO1)) { 1004 phb_pec_error(pec, "Changing enabled BAR unsupported\n"); 1005 } 1006 stack->nest_regs[reg] = val & 0xffffffffff000000ull; 1007 break; 1008 case PEC_NEST_STK_PHB_REGS_BAR: 1009 if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & PEC_NEST_STK_BAR_EN_PHB) { 1010 phb_pec_error(pec, "Changing enabled BAR unsupported\n"); 1011 } 1012 stack->nest_regs[reg] = val & 0xffffffffffc00000ull; 1013 break; 1014 case PEC_NEST_STK_INT_BAR: 1015 if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & PEC_NEST_STK_BAR_EN_INT) { 1016 phb_pec_error(pec, "Changing enabled BAR unsupported\n"); 1017 } 1018 stack->nest_regs[reg] = val & 0xfffffff000000000ull; 1019 break; 1020 case PEC_NEST_STK_BAR_EN: 1021 stack->nest_regs[reg] = val & 0xf000000000000000ull; 1022 pnv_pec_stk_update_map(stack); 1023 break; 1024 case PEC_NEST_STK_DATA_FRZ_TYPE: 1025 case PEC_NEST_STK_PBCQ_TUN_BAR: 1026 /* Not used for now */ 1027 stack->nest_regs[reg] = val; 1028 break; 1029 default: 1030 qemu_log_mask(LOG_UNIMP, "phb4_pec: nest_xscom_write 0x%"HWADDR_PRIx 1031 "=%"PRIx64"\n", addr, val); 1032 } 1033 } 1034 1035 static const MemoryRegionOps pnv_pec_stk_nest_xscom_ops = { 1036 .read = pnv_pec_stk_nest_xscom_read, 1037 .write = pnv_pec_stk_nest_xscom_write, 1038 .valid.min_access_size = 8, 1039 .valid.max_access_size = 8, 1040 .impl.min_access_size = 8, 1041 .impl.max_access_size = 8, 1042 .endianness = DEVICE_BIG_ENDIAN, 1043 }; 1044 1045 static uint64_t pnv_pec_stk_pci_xscom_read(void *opaque, hwaddr addr, 1046 unsigned size) 1047 { 1048 PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); 1049 uint32_t reg = addr >> 3; 1050 1051 /* TODO: add list of allowed registers and error out if not */ 1052 return stack->pci_regs[reg]; 1053 } 1054 1055 static void pnv_pec_stk_pci_xscom_write(void *opaque, hwaddr addr, 1056 uint64_t val, unsigned size) 1057 { 1058 PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); 1059 uint32_t reg = addr >> 3; 1060 1061 switch (reg) { 1062 case PEC_PCI_STK_PCI_FIR: 1063 stack->nest_regs[reg] = val; 1064 break; 1065 case PEC_PCI_STK_PCI_FIR_CLR: 1066 stack->nest_regs[PEC_PCI_STK_PCI_FIR] &= val; 1067 break; 1068 case PEC_PCI_STK_PCI_FIR_SET: 1069 stack->nest_regs[PEC_PCI_STK_PCI_FIR] |= val; 1070 break; 1071 case PEC_PCI_STK_PCI_FIR_MSK: 1072 stack->nest_regs[reg] = val; 1073 break; 1074 case PEC_PCI_STK_PCI_FIR_MSKC: 1075 stack->nest_regs[PEC_PCI_STK_PCI_FIR_MSK] &= val; 1076 break; 1077 case PEC_PCI_STK_PCI_FIR_MSKS: 1078 stack->nest_regs[PEC_PCI_STK_PCI_FIR_MSK] |= val; 1079 break; 1080 case PEC_PCI_STK_PCI_FIR_ACT0: 1081 case PEC_PCI_STK_PCI_FIR_ACT1: 1082 stack->nest_regs[reg] = val; 1083 break; 1084 case PEC_PCI_STK_PCI_FIR_WOF: 1085 stack->nest_regs[reg] = 0; 1086 break; 1087 case PEC_PCI_STK_ETU_RESET: 1088 stack->nest_regs[reg] = val & 0x8000000000000000ull; 1089 /* TODO: Implement reset */ 1090 break; 1091 case PEC_PCI_STK_PBAIB_ERR_REPORT: 1092 break; 1093 case PEC_PCI_STK_PBAIB_TX_CMD_CRED: 1094 case PEC_PCI_STK_PBAIB_TX_DAT_CRED: 1095 stack->nest_regs[reg] = val; 1096 break; 1097 default: 1098 qemu_log_mask(LOG_UNIMP, "phb4_pec_stk: pci_xscom_write 0x%"HWADDR_PRIx 1099 "=%"PRIx64"\n", addr, val); 1100 } 1101 } 1102 1103 static const MemoryRegionOps pnv_pec_stk_pci_xscom_ops = { 1104 .read = pnv_pec_stk_pci_xscom_read, 1105 .write = pnv_pec_stk_pci_xscom_write, 1106 .valid.min_access_size = 8, 1107 .valid.max_access_size = 8, 1108 .impl.min_access_size = 8, 1109 .impl.max_access_size = 8, 1110 .endianness = DEVICE_BIG_ENDIAN, 1111 }; 1112 1113 static int pnv_phb4_map_irq(PCIDevice *pci_dev, int irq_num) 1114 { 1115 /* Check that out properly ... */ 1116 return irq_num & 3; 1117 } 1118 1119 static void pnv_phb4_set_irq(void *opaque, int irq_num, int level) 1120 { 1121 PnvPHB4 *phb = PNV_PHB4(opaque); 1122 uint32_t lsi_base; 1123 1124 /* LSI only ... */ 1125 if (irq_num > 3) { 1126 phb_error(phb, "IRQ %x is not an LSI", irq_num); 1127 } 1128 lsi_base = GETFIELD(PHB_LSI_SRC_ID, phb->regs[PHB_LSI_SOURCE_ID >> 3]); 1129 lsi_base <<= 3; 1130 qemu_set_irq(phb->qirqs[lsi_base + irq_num], level); 1131 } 1132 1133 static bool pnv_phb4_resolve_pe(PnvPhb4DMASpace *ds) 1134 { 1135 uint64_t rtt, addr; 1136 uint16_t rte; 1137 int bus_num; 1138 int num_PEs; 1139 1140 /* Already resolved ? */ 1141 if (ds->pe_num != PHB_INVALID_PE) { 1142 return true; 1143 } 1144 1145 /* We need to lookup the RTT */ 1146 rtt = ds->phb->regs[PHB_RTT_BAR >> 3]; 1147 if (!(rtt & PHB_RTT_BAR_ENABLE)) { 1148 phb_error(ds->phb, "DMA with RTT BAR disabled !"); 1149 /* Set error bits ? fence ? ... */ 1150 return false; 1151 } 1152 1153 /* Read RTE */ 1154 bus_num = pci_bus_num(ds->bus); 1155 addr = rtt & PHB_RTT_BASE_ADDRESS_MASK; 1156 addr += 2 * PCI_BUILD_BDF(bus_num, ds->devfn); 1157 if (dma_memory_read(&address_space_memory, addr, &rte, 1158 sizeof(rte), MEMTXATTRS_UNSPECIFIED)) { 1159 phb_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr); 1160 /* Set error bits ? fence ? ... */ 1161 return false; 1162 } 1163 rte = be16_to_cpu(rte); 1164 1165 /* Fail upon reading of invalid PE# */ 1166 num_PEs = ds->phb->big_phb ? PNV_PHB4_MAX_PEs : (PNV_PHB4_MAX_PEs >> 1); 1167 if (rte >= num_PEs) { 1168 phb_error(ds->phb, "RTE for RID 0x%x invalid (%04x", ds->devfn, rte); 1169 rte &= num_PEs - 1; 1170 } 1171 ds->pe_num = rte; 1172 return true; 1173 } 1174 1175 static void pnv_phb4_translate_tve(PnvPhb4DMASpace *ds, hwaddr addr, 1176 bool is_write, uint64_t tve, 1177 IOMMUTLBEntry *tlb) 1178 { 1179 uint64_t tta = GETFIELD(IODA3_TVT_TABLE_ADDR, tve); 1180 int32_t lev = GETFIELD(IODA3_TVT_NUM_LEVELS, tve); 1181 uint32_t tts = GETFIELD(IODA3_TVT_TCE_TABLE_SIZE, tve); 1182 uint32_t tps = GETFIELD(IODA3_TVT_IO_PSIZE, tve); 1183 1184 /* Invalid levels */ 1185 if (lev > 4) { 1186 phb_error(ds->phb, "Invalid #levels in TVE %d", lev); 1187 return; 1188 } 1189 1190 /* Invalid entry */ 1191 if (tts == 0) { 1192 phb_error(ds->phb, "Access to invalid TVE"); 1193 return; 1194 } 1195 1196 /* IO Page Size of 0 means untranslated, else use TCEs */ 1197 if (tps == 0) { 1198 /* TODO: Handle boundaries */ 1199 1200 /* Use 4k pages like q35 ... for now */ 1201 tlb->iova = addr & 0xfffffffffffff000ull; 1202 tlb->translated_addr = addr & 0x0003fffffffff000ull; 1203 tlb->addr_mask = 0xfffull; 1204 tlb->perm = IOMMU_RW; 1205 } else { 1206 uint32_t tce_shift, tbl_shift, sh; 1207 uint64_t base, taddr, tce, tce_mask; 1208 1209 /* Address bits per bottom level TCE entry */ 1210 tce_shift = tps + 11; 1211 1212 /* Address bits per table level */ 1213 tbl_shift = tts + 8; 1214 1215 /* Top level table base address */ 1216 base = tta << 12; 1217 1218 /* Total shift to first level */ 1219 sh = tbl_shift * lev + tce_shift; 1220 1221 /* TODO: Limit to support IO page sizes */ 1222 1223 /* TODO: Multi-level untested */ 1224 while ((lev--) >= 0) { 1225 /* Grab the TCE address */ 1226 taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3); 1227 if (dma_memory_read(&address_space_memory, taddr, &tce, 1228 sizeof(tce), MEMTXATTRS_UNSPECIFIED)) { 1229 phb_error(ds->phb, "Failed to read TCE at 0x%"PRIx64, taddr); 1230 return; 1231 } 1232 tce = be64_to_cpu(tce); 1233 1234 /* Check permission for indirect TCE */ 1235 if ((lev >= 0) && !(tce & 3)) { 1236 phb_error(ds->phb, "Invalid indirect TCE at 0x%"PRIx64, taddr); 1237 phb_error(ds->phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, 1238 is_write ? 'W' : 'R', tve); 1239 phb_error(ds->phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", 1240 tta, lev, tts, tps); 1241 return; 1242 } 1243 sh -= tbl_shift; 1244 base = tce & ~0xfffull; 1245 } 1246 1247 /* We exit the loop with TCE being the final TCE */ 1248 tce_mask = ~((1ull << tce_shift) - 1); 1249 tlb->iova = addr & tce_mask; 1250 tlb->translated_addr = tce & tce_mask; 1251 tlb->addr_mask = ~tce_mask; 1252 tlb->perm = tce & 3; 1253 if ((is_write & !(tce & 2)) || ((!is_write) && !(tce & 1))) { 1254 phb_error(ds->phb, "TCE access fault at 0x%"PRIx64, taddr); 1255 phb_error(ds->phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, 1256 is_write ? 'W' : 'R', tve); 1257 phb_error(ds->phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", 1258 tta, lev, tts, tps); 1259 } 1260 } 1261 } 1262 1263 static IOMMUTLBEntry pnv_phb4_translate_iommu(IOMMUMemoryRegion *iommu, 1264 hwaddr addr, 1265 IOMMUAccessFlags flag, 1266 int iommu_idx) 1267 { 1268 PnvPhb4DMASpace *ds = container_of(iommu, PnvPhb4DMASpace, dma_mr); 1269 int tve_sel; 1270 uint64_t tve, cfg; 1271 IOMMUTLBEntry ret = { 1272 .target_as = &address_space_memory, 1273 .iova = addr, 1274 .translated_addr = 0, 1275 .addr_mask = ~(hwaddr)0, 1276 .perm = IOMMU_NONE, 1277 }; 1278 1279 /* Resolve PE# */ 1280 if (!pnv_phb4_resolve_pe(ds)) { 1281 phb_error(ds->phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", 1282 ds->bus, pci_bus_num(ds->bus), ds->devfn); 1283 return ret; 1284 } 1285 1286 /* Check top bits */ 1287 switch (addr >> 60) { 1288 case 00: 1289 /* DMA or 32-bit MSI ? */ 1290 cfg = ds->phb->regs[PHB_PHB4_CONFIG >> 3]; 1291 if ((cfg & PHB_PHB4C_32BIT_MSI_EN) && 1292 ((addr & 0xffffffffffff0000ull) == 0xffff0000ull)) { 1293 phb_error(ds->phb, "xlate on 32-bit MSI region"); 1294 return ret; 1295 } 1296 /* Choose TVE XXX Use PHB4 Control Register */ 1297 tve_sel = (addr >> 59) & 1; 1298 tve = ds->phb->ioda_TVT[ds->pe_num * 2 + tve_sel]; 1299 pnv_phb4_translate_tve(ds, addr, flag & IOMMU_WO, tve, &ret); 1300 break; 1301 case 01: 1302 phb_error(ds->phb, "xlate on 64-bit MSI region"); 1303 break; 1304 default: 1305 phb_error(ds->phb, "xlate on unsupported address 0x%"PRIx64, addr); 1306 } 1307 return ret; 1308 } 1309 1310 #define TYPE_PNV_PHB4_IOMMU_MEMORY_REGION "pnv-phb4-iommu-memory-region" 1311 DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion, PNV_PHB4_IOMMU_MEMORY_REGION, 1312 TYPE_PNV_PHB4_IOMMU_MEMORY_REGION) 1313 1314 static void pnv_phb4_iommu_memory_region_class_init(ObjectClass *klass, 1315 void *data) 1316 { 1317 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 1318 1319 imrc->translate = pnv_phb4_translate_iommu; 1320 } 1321 1322 static const TypeInfo pnv_phb4_iommu_memory_region_info = { 1323 .parent = TYPE_IOMMU_MEMORY_REGION, 1324 .name = TYPE_PNV_PHB4_IOMMU_MEMORY_REGION, 1325 .class_init = pnv_phb4_iommu_memory_region_class_init, 1326 }; 1327 1328 /* 1329 * Return the index/phb-id of a PHB4 that belongs to a 1330 * pec->stacks[stack_index] stack. 1331 */ 1332 int pnv_phb4_pec_get_phb_id(PnvPhb4PecState *pec, int stack_index) 1333 { 1334 PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); 1335 int index = pec->index; 1336 int offset = 0; 1337 1338 while (index--) { 1339 offset += pecc->num_stacks[index]; 1340 } 1341 1342 return offset + stack_index; 1343 } 1344 1345 /* 1346 * MSI/MSIX memory region implementation. 1347 * The handler handles both MSI and MSIX. 1348 */ 1349 static void pnv_phb4_msi_write(void *opaque, hwaddr addr, 1350 uint64_t data, unsigned size) 1351 { 1352 PnvPhb4DMASpace *ds = opaque; 1353 PnvPHB4 *phb = ds->phb; 1354 1355 uint32_t src = ((addr >> 4) & 0xffff) | (data & 0x1f); 1356 1357 /* Resolve PE# */ 1358 if (!pnv_phb4_resolve_pe(ds)) { 1359 phb_error(phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", 1360 ds->bus, pci_bus_num(ds->bus), ds->devfn); 1361 return; 1362 } 1363 1364 /* TODO: Check it doesn't collide with LSIs */ 1365 if (src >= phb->xsrc.nr_irqs) { 1366 phb_error(phb, "MSI %d out of bounds", src); 1367 return; 1368 } 1369 1370 /* TODO: check PE/MSI assignement */ 1371 1372 qemu_irq_pulse(phb->qirqs[src]); 1373 } 1374 1375 /* There is no .read as the read result is undefined by PCI spec */ 1376 static uint64_t pnv_phb4_msi_read(void *opaque, hwaddr addr, unsigned size) 1377 { 1378 PnvPhb4DMASpace *ds = opaque; 1379 1380 phb_error(ds->phb, "Invalid MSI read @ 0x%" HWADDR_PRIx, addr); 1381 return -1; 1382 } 1383 1384 static const MemoryRegionOps pnv_phb4_msi_ops = { 1385 .read = pnv_phb4_msi_read, 1386 .write = pnv_phb4_msi_write, 1387 .endianness = DEVICE_LITTLE_ENDIAN 1388 }; 1389 1390 static PnvPhb4DMASpace *pnv_phb4_dma_find(PnvPHB4 *phb, PCIBus *bus, int devfn) 1391 { 1392 PnvPhb4DMASpace *ds; 1393 1394 QLIST_FOREACH(ds, &phb->dma_spaces, list) { 1395 if (ds->bus == bus && ds->devfn == devfn) { 1396 break; 1397 } 1398 } 1399 return ds; 1400 } 1401 1402 static AddressSpace *pnv_phb4_dma_iommu(PCIBus *bus, void *opaque, int devfn) 1403 { 1404 PnvPHB4 *phb = opaque; 1405 PnvPhb4DMASpace *ds; 1406 char name[32]; 1407 1408 ds = pnv_phb4_dma_find(phb, bus, devfn); 1409 1410 if (ds == NULL) { 1411 ds = g_malloc0(sizeof(PnvPhb4DMASpace)); 1412 ds->bus = bus; 1413 ds->devfn = devfn; 1414 ds->pe_num = PHB_INVALID_PE; 1415 ds->phb = phb; 1416 snprintf(name, sizeof(name), "phb4-%d.%d-iommu", phb->chip_id, 1417 phb->phb_id); 1418 memory_region_init_iommu(&ds->dma_mr, sizeof(ds->dma_mr), 1419 TYPE_PNV_PHB4_IOMMU_MEMORY_REGION, 1420 OBJECT(phb), name, UINT64_MAX); 1421 address_space_init(&ds->dma_as, MEMORY_REGION(&ds->dma_mr), 1422 name); 1423 memory_region_init_io(&ds->msi32_mr, OBJECT(phb), &pnv_phb4_msi_ops, 1424 ds, "msi32", 0x10000); 1425 memory_region_init_io(&ds->msi64_mr, OBJECT(phb), &pnv_phb4_msi_ops, 1426 ds, "msi64", 0x100000); 1427 pnv_phb4_update_msi_regions(ds); 1428 1429 QLIST_INSERT_HEAD(&phb->dma_spaces, ds, list); 1430 } 1431 return &ds->dma_as; 1432 } 1433 1434 static void pnv_phb4_xscom_realize(PnvPHB4 *phb) 1435 { 1436 PnvPhb4PecStack *stack = phb->stack; 1437 PnvPhb4PecState *pec = stack->pec; 1438 PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); 1439 uint32_t pec_nest_base; 1440 uint32_t pec_pci_base; 1441 char name[64]; 1442 1443 assert(pec); 1444 1445 /* Initialize the XSCOM regions for the stack registers */ 1446 snprintf(name, sizeof(name), "xscom-pec-%d.%d-nest-stack-%d", 1447 pec->chip_id, pec->index, stack->stack_no); 1448 pnv_xscom_region_init(&stack->nest_regs_mr, OBJECT(stack), 1449 &pnv_pec_stk_nest_xscom_ops, stack, name, 1450 PHB4_PEC_NEST_STK_REGS_COUNT); 1451 1452 snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci-stack-%d", 1453 pec->chip_id, pec->index, stack->stack_no); 1454 pnv_xscom_region_init(&stack->pci_regs_mr, OBJECT(stack), 1455 &pnv_pec_stk_pci_xscom_ops, stack, name, 1456 PHB4_PEC_PCI_STK_REGS_COUNT); 1457 1458 /* PHB pass-through */ 1459 snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci-stack-%d-phb", 1460 pec->chip_id, pec->index, stack->stack_no); 1461 pnv_xscom_region_init(&stack->phb_regs_mr, OBJECT(phb), 1462 &pnv_phb4_xscom_ops, phb, name, 0x40); 1463 1464 pec_nest_base = pecc->xscom_nest_base(pec); 1465 pec_pci_base = pecc->xscom_pci_base(pec); 1466 1467 /* Populate the XSCOM address space. */ 1468 pnv_xscom_add_subregion(pec->chip, 1469 pec_nest_base + 0x40 * (stack->stack_no + 1), 1470 &stack->nest_regs_mr); 1471 pnv_xscom_add_subregion(pec->chip, 1472 pec_pci_base + 0x40 * (stack->stack_no + 1), 1473 &stack->pci_regs_mr); 1474 pnv_xscom_add_subregion(pec->chip, 1475 pec_pci_base + PNV9_XSCOM_PEC_PCI_STK0 + 1476 0x40 * stack->stack_no, 1477 &stack->phb_regs_mr); 1478 } 1479 1480 static void pnv_phb4_instance_init(Object *obj) 1481 { 1482 PnvPHB4 *phb = PNV_PHB4(obj); 1483 1484 QLIST_INIT(&phb->dma_spaces); 1485 1486 /* XIVE interrupt source object */ 1487 object_initialize_child(obj, "source", &phb->xsrc, TYPE_XIVE_SOURCE); 1488 } 1489 1490 static void pnv_phb4_realize(DeviceState *dev, Error **errp) 1491 { 1492 PnvPHB4 *phb = PNV_PHB4(dev); 1493 PCIHostState *pci = PCI_HOST_BRIDGE(dev); 1494 XiveSource *xsrc = &phb->xsrc; 1495 int nr_irqs; 1496 char name[32]; 1497 1498 assert(phb->stack); 1499 1500 /* Set the "big_phb" flag */ 1501 phb->big_phb = phb->phb_id == 0 || phb->phb_id == 3; 1502 1503 /* Controller Registers */ 1504 snprintf(name, sizeof(name), "phb4-%d.%d-regs", phb->chip_id, 1505 phb->phb_id); 1506 memory_region_init_io(&phb->mr_regs, OBJECT(phb), &pnv_phb4_reg_ops, phb, 1507 name, 0x2000); 1508 1509 /* 1510 * PHB4 doesn't support IO space. However, qemu gets very upset if 1511 * we don't have an IO region to anchor IO BARs onto so we just 1512 * initialize one which we never hook up to anything 1513 */ 1514 1515 snprintf(name, sizeof(name), "phb4-%d.%d-pci-io", phb->chip_id, 1516 phb->phb_id); 1517 memory_region_init(&phb->pci_io, OBJECT(phb), name, 0x10000); 1518 1519 snprintf(name, sizeof(name), "phb4-%d.%d-pci-mmio", phb->chip_id, 1520 phb->phb_id); 1521 memory_region_init(&phb->pci_mmio, OBJECT(phb), name, 1522 PCI_MMIO_TOTAL_SIZE); 1523 1524 pci->bus = pci_register_root_bus(dev, dev->id, 1525 pnv_phb4_set_irq, pnv_phb4_map_irq, phb, 1526 &phb->pci_mmio, &phb->pci_io, 1527 0, 4, TYPE_PNV_PHB4_ROOT_BUS); 1528 pci_setup_iommu(pci->bus, pnv_phb4_dma_iommu, phb); 1529 pci->bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; 1530 1531 /* Add a single Root port if running with defaults */ 1532 if (defaults_enabled()) { 1533 pnv_phb_attach_root_port(PCI_HOST_BRIDGE(phb), 1534 TYPE_PNV_PHB4_ROOT_PORT); 1535 } 1536 1537 /* Setup XIVE Source */ 1538 if (phb->big_phb) { 1539 nr_irqs = PNV_PHB4_MAX_INTs; 1540 } else { 1541 nr_irqs = PNV_PHB4_MAX_INTs >> 1; 1542 } 1543 object_property_set_int(OBJECT(xsrc), "nr-irqs", nr_irqs, &error_fatal); 1544 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(phb), &error_fatal); 1545 if (!qdev_realize(DEVICE(xsrc), NULL, errp)) { 1546 return; 1547 } 1548 1549 pnv_phb4_update_xsrc(phb); 1550 1551 phb->qirqs = qemu_allocate_irqs(xive_source_set_irq, xsrc, xsrc->nr_irqs); 1552 1553 pnv_phb4_xscom_realize(phb); 1554 } 1555 1556 static const char *pnv_phb4_root_bus_path(PCIHostState *host_bridge, 1557 PCIBus *rootbus) 1558 { 1559 PnvPHB4 *phb = PNV_PHB4(host_bridge); 1560 1561 snprintf(phb->bus_path, sizeof(phb->bus_path), "00%02x:%02x", 1562 phb->chip_id, phb->phb_id); 1563 return phb->bus_path; 1564 } 1565 1566 static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno) 1567 { 1568 PnvPHB4 *phb = PNV_PHB4(xf); 1569 uint64_t notif_port = phb->regs[PHB_INT_NOTIFY_ADDR >> 3]; 1570 uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3]; 1571 uint64_t data = XIVE_TRIGGER_PQ | offset | srcno; 1572 MemTxResult result; 1573 1574 trace_pnv_phb4_xive_notify(notif_port, data); 1575 1576 address_space_stq_be(&address_space_memory, notif_port, data, 1577 MEMTXATTRS_UNSPECIFIED, &result); 1578 if (result != MEMTX_OK) { 1579 phb_error(phb, "trigger failed @%"HWADDR_PRIx "\n", notif_port); 1580 return; 1581 } 1582 } 1583 1584 static Property pnv_phb4_properties[] = { 1585 DEFINE_PROP_UINT32("index", PnvPHB4, phb_id, 0), 1586 DEFINE_PROP_UINT32("chip-id", PnvPHB4, chip_id, 0), 1587 DEFINE_PROP_UINT64("version", PnvPHB4, version, 0), 1588 DEFINE_PROP_LINK("stack", PnvPHB4, stack, TYPE_PNV_PHB4_PEC_STACK, 1589 PnvPhb4PecStack *), 1590 DEFINE_PROP_END_OF_LIST(), 1591 }; 1592 1593 static void pnv_phb4_class_init(ObjectClass *klass, void *data) 1594 { 1595 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); 1596 DeviceClass *dc = DEVICE_CLASS(klass); 1597 XiveNotifierClass *xfc = XIVE_NOTIFIER_CLASS(klass); 1598 1599 hc->root_bus_path = pnv_phb4_root_bus_path; 1600 dc->realize = pnv_phb4_realize; 1601 device_class_set_props(dc, pnv_phb4_properties); 1602 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); 1603 dc->user_creatable = false; 1604 1605 xfc->notify = pnv_phb4_xive_notify; 1606 } 1607 1608 static const TypeInfo pnv_phb4_type_info = { 1609 .name = TYPE_PNV_PHB4, 1610 .parent = TYPE_PCIE_HOST_BRIDGE, 1611 .instance_init = pnv_phb4_instance_init, 1612 .instance_size = sizeof(PnvPHB4), 1613 .class_init = pnv_phb4_class_init, 1614 .interfaces = (InterfaceInfo[]) { 1615 { TYPE_XIVE_NOTIFIER }, 1616 { }, 1617 } 1618 }; 1619 1620 static void pnv_phb4_root_bus_class_init(ObjectClass *klass, void *data) 1621 { 1622 BusClass *k = BUS_CLASS(klass); 1623 1624 /* 1625 * PHB4 has only a single root complex. Enforce the limit on the 1626 * parent bus 1627 */ 1628 k->max_dev = 1; 1629 } 1630 1631 static const TypeInfo pnv_phb4_root_bus_info = { 1632 .name = TYPE_PNV_PHB4_ROOT_BUS, 1633 .parent = TYPE_PCIE_BUS, 1634 .class_init = pnv_phb4_root_bus_class_init, 1635 .interfaces = (InterfaceInfo[]) { 1636 { INTERFACE_PCIE_DEVICE }, 1637 { } 1638 }, 1639 }; 1640 1641 static void pnv_phb4_root_port_reset(DeviceState *dev) 1642 { 1643 PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); 1644 PCIDevice *d = PCI_DEVICE(dev); 1645 uint8_t *conf = d->config; 1646 1647 rpc->parent_reset(dev); 1648 1649 pci_byte_test_and_set_mask(conf + PCI_IO_BASE, 1650 PCI_IO_RANGE_MASK & 0xff); 1651 pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT, 1652 PCI_IO_RANGE_MASK & 0xff); 1653 pci_set_word(conf + PCI_MEMORY_BASE, 0); 1654 pci_set_word(conf + PCI_MEMORY_LIMIT, 0xfff0); 1655 pci_set_word(conf + PCI_PREF_MEMORY_BASE, 0x1); 1656 pci_set_word(conf + PCI_PREF_MEMORY_LIMIT, 0xfff1); 1657 pci_set_long(conf + PCI_PREF_BASE_UPPER32, 0x1); /* Hack */ 1658 pci_set_long(conf + PCI_PREF_LIMIT_UPPER32, 0xffffffff); 1659 } 1660 1661 static void pnv_phb4_root_port_realize(DeviceState *dev, Error **errp) 1662 { 1663 PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); 1664 PCIDevice *pci = PCI_DEVICE(dev); 1665 PCIBus *bus = pci_get_bus(pci); 1666 PnvPHB4 *phb = NULL; 1667 Error *local_err = NULL; 1668 1669 phb = (PnvPHB4 *) object_dynamic_cast(OBJECT(bus->qbus.parent), 1670 TYPE_PNV_PHB4); 1671 1672 if (!phb) { 1673 error_setg(errp, "%s must be connected to pnv-phb4 buses", dev->id); 1674 return; 1675 } 1676 1677 /* Set unique chassis/slot values for the root port */ 1678 qdev_prop_set_uint8(&pci->qdev, "chassis", phb->chip_id); 1679 qdev_prop_set_uint16(&pci->qdev, "slot", phb->phb_id); 1680 1681 rpc->parent_realize(dev, &local_err); 1682 if (local_err) { 1683 error_propagate(errp, local_err); 1684 return; 1685 } 1686 } 1687 1688 static void pnv_phb4_root_port_class_init(ObjectClass *klass, void *data) 1689 { 1690 DeviceClass *dc = DEVICE_CLASS(klass); 1691 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1692 PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); 1693 1694 dc->desc = "IBM PHB4 PCIE Root Port"; 1695 dc->user_creatable = true; 1696 1697 device_class_set_parent_realize(dc, pnv_phb4_root_port_realize, 1698 &rpc->parent_realize); 1699 device_class_set_parent_reset(dc, pnv_phb4_root_port_reset, 1700 &rpc->parent_reset); 1701 1702 k->vendor_id = PCI_VENDOR_ID_IBM; 1703 k->device_id = PNV_PHB4_DEVICE_ID; 1704 k->revision = 0; 1705 1706 rpc->exp_offset = 0x48; 1707 rpc->aer_offset = 0x100; 1708 1709 dc->reset = &pnv_phb4_root_port_reset; 1710 } 1711 1712 static const TypeInfo pnv_phb4_root_port_info = { 1713 .name = TYPE_PNV_PHB4_ROOT_PORT, 1714 .parent = TYPE_PCIE_ROOT_PORT, 1715 .instance_size = sizeof(PnvPHB4RootPort), 1716 .class_init = pnv_phb4_root_port_class_init, 1717 }; 1718 1719 static void pnv_phb4_register_types(void) 1720 { 1721 type_register_static(&pnv_phb4_root_bus_info); 1722 type_register_static(&pnv_phb4_root_port_info); 1723 type_register_static(&pnv_phb4_type_info); 1724 type_register_static(&pnv_phb4_iommu_memory_region_info); 1725 } 1726 1727 type_init(pnv_phb4_register_types); 1728 1729 void pnv_phb4_update_regions(PnvPhb4PecStack *stack) 1730 { 1731 PnvPHB4 *phb = stack->phb; 1732 1733 /* Unmap first always */ 1734 if (memory_region_is_mapped(&phb->mr_regs)) { 1735 memory_region_del_subregion(&stack->phbbar, &phb->mr_regs); 1736 } 1737 if (memory_region_is_mapped(&phb->xsrc.esb_mmio)) { 1738 memory_region_del_subregion(&stack->intbar, &phb->xsrc.esb_mmio); 1739 } 1740 1741 /* Map registers if enabled */ 1742 if (memory_region_is_mapped(&stack->phbbar)) { 1743 memory_region_add_subregion(&stack->phbbar, 0, &phb->mr_regs); 1744 } 1745 1746 /* Map ESB if enabled */ 1747 if (memory_region_is_mapped(&stack->intbar)) { 1748 memory_region_add_subregion(&stack->intbar, 0, &phb->xsrc.esb_mmio); 1749 } 1750 1751 /* Check/update m32 */ 1752 pnv_phb4_check_all_mbt(phb); 1753 } 1754 1755 void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon) 1756 { 1757 uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3]; 1758 1759 monitor_printf(mon, "PHB4[%x:%x] Source %08x .. %08x\n", 1760 phb->chip_id, phb->phb_id, 1761 offset, offset + phb->xsrc.nr_irqs - 1); 1762 xive_source_pic_print_info(&phb->xsrc, 0, mon); 1763 } 1764