1 /* 2 * s390 PCI BUS 3 * 4 * Copyright 2014 IBM Corp. 5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com> 6 * Hong Bo Li <lihbbj@cn.ibm.com> 7 * Yi Min Zhao <zyimin@cn.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at 10 * your option) any later version. See the COPYING file in the top-level 11 * directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qapi/visitor.h" 17 #include "hw/s390x/s390-pci-bus.h" 18 #include "hw/s390x/s390-pci-inst.h" 19 #include "hw/s390x/s390-pci-kvm.h" 20 #include "hw/s390x/s390-pci-vfio.h" 21 #include "hw/pci/pci_bus.h" 22 #include "hw/qdev-properties.h" 23 #include "hw/pci/pci_bridge.h" 24 #include "hw/pci/msi.h" 25 #include "qemu/error-report.h" 26 #include "qemu/module.h" 27 #include "sysemu/reset.h" 28 #include "sysemu/runstate.h" 29 30 #ifndef DEBUG_S390PCI_BUS 31 #define DEBUG_S390PCI_BUS 0 32 #endif 33 34 #define DPRINTF(fmt, ...) \ 35 do { \ 36 if (DEBUG_S390PCI_BUS) { \ 37 fprintf(stderr, "S390pci-bus: " fmt, ## __VA_ARGS__); \ 38 } \ 39 } while (0) 40 41 S390pciState *s390_get_phb(void) 42 { 43 static S390pciState *phb; 44 45 if (!phb) { 46 phb = S390_PCI_HOST_BRIDGE( 47 object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); 48 assert(phb != NULL); 49 } 50 51 return phb; 52 } 53 54 int pci_chsc_sei_nt2_get_event(void *res) 55 { 56 ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res; 57 PciCcdfAvail *accdf; 58 PciCcdfErr *eccdf; 59 int rc = 1; 60 SeiContainer *sei_cont; 61 S390pciState *s = s390_get_phb(); 62 63 sei_cont = QTAILQ_FIRST(&s->pending_sei); 64 if (sei_cont) { 65 QTAILQ_REMOVE(&s->pending_sei, sei_cont, link); 66 nt2_res->nt = 2; 67 nt2_res->cc = sei_cont->cc; 68 nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res)); 69 switch (sei_cont->cc) { 70 case 1: /* error event */ 71 eccdf = (PciCcdfErr *)nt2_res->ccdf; 72 eccdf->fid = cpu_to_be32(sei_cont->fid); 73 eccdf->fh = cpu_to_be32(sei_cont->fh); 74 eccdf->e = cpu_to_be32(sei_cont->e); 75 eccdf->faddr = cpu_to_be64(sei_cont->faddr); 76 eccdf->pec = cpu_to_be16(sei_cont->pec); 77 break; 78 case 2: /* availability event */ 79 accdf = (PciCcdfAvail *)nt2_res->ccdf; 80 accdf->fid = cpu_to_be32(sei_cont->fid); 81 accdf->fh = cpu_to_be32(sei_cont->fh); 82 accdf->pec = cpu_to_be16(sei_cont->pec); 83 break; 84 default: 85 abort(); 86 } 87 g_free(sei_cont); 88 rc = 0; 89 } 90 91 return rc; 92 } 93 94 int pci_chsc_sei_nt2_have_event(void) 95 { 96 S390pciState *s = s390_get_phb(); 97 98 return !QTAILQ_EMPTY(&s->pending_sei); 99 } 100 101 S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s, 102 S390PCIBusDevice *pbdev) 103 { 104 S390PCIBusDevice *ret = pbdev ? QTAILQ_NEXT(pbdev, link) : 105 QTAILQ_FIRST(&s->zpci_devs); 106 107 while (ret && ret->state == ZPCI_FS_RESERVED) { 108 ret = QTAILQ_NEXT(ret, link); 109 } 110 111 return ret; 112 } 113 114 S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid) 115 { 116 S390PCIBusDevice *pbdev; 117 118 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 119 if (pbdev->fid == fid) { 120 return pbdev; 121 } 122 } 123 124 return NULL; 125 } 126 127 void s390_pci_sclp_configure(SCCB *sccb) 128 { 129 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; 130 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), 131 be32_to_cpu(psccb->aid)); 132 uint16_t rc; 133 134 if (!pbdev) { 135 DPRINTF("sclp config no dev found\n"); 136 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; 137 goto out; 138 } 139 140 switch (pbdev->state) { 141 case ZPCI_FS_RESERVED: 142 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; 143 break; 144 case ZPCI_FS_STANDBY: 145 pbdev->state = ZPCI_FS_DISABLED; 146 rc = SCLP_RC_NORMAL_COMPLETION; 147 break; 148 default: 149 rc = SCLP_RC_NO_ACTION_REQUIRED; 150 } 151 out: 152 psccb->header.response_code = cpu_to_be16(rc); 153 } 154 155 static void s390_pci_shutdown_notifier(Notifier *n, void *opaque) 156 { 157 S390PCIBusDevice *pbdev = container_of(n, S390PCIBusDevice, 158 shutdown_notifier); 159 160 pci_device_reset(pbdev->pdev); 161 } 162 163 static void s390_pci_reset_cb(void *opaque) 164 { 165 S390PCIBusDevice *pbdev = opaque; 166 167 pci_device_reset(pbdev->pdev); 168 } 169 170 static void s390_pci_perform_unplug(S390PCIBusDevice *pbdev) 171 { 172 HotplugHandler *hotplug_ctrl; 173 174 if (pbdev->pft == ZPCI_PFT_ISM) { 175 notifier_remove(&pbdev->shutdown_notifier); 176 qemu_unregister_reset(s390_pci_reset_cb, pbdev); 177 } 178 179 /* Unplug the PCI device */ 180 if (pbdev->pdev) { 181 DeviceState *pdev = DEVICE(pbdev->pdev); 182 183 hotplug_ctrl = qdev_get_hotplug_handler(pdev); 184 hotplug_handler_unplug(hotplug_ctrl, pdev, &error_abort); 185 object_unparent(OBJECT(pdev)); 186 } 187 188 /* Unplug the zPCI device */ 189 hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(pbdev)); 190 hotplug_handler_unplug(hotplug_ctrl, DEVICE(pbdev), &error_abort); 191 object_unparent(OBJECT(pbdev)); 192 } 193 194 void s390_pci_sclp_deconfigure(SCCB *sccb) 195 { 196 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; 197 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), 198 be32_to_cpu(psccb->aid)); 199 uint16_t rc; 200 201 if (!pbdev) { 202 DPRINTF("sclp deconfig no dev found\n"); 203 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; 204 goto out; 205 } 206 207 switch (pbdev->state) { 208 case ZPCI_FS_RESERVED: 209 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; 210 break; 211 case ZPCI_FS_STANDBY: 212 rc = SCLP_RC_NO_ACTION_REQUIRED; 213 break; 214 default: 215 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 216 /* Interpreted devices were using interrupt forwarding */ 217 s390_pci_kvm_aif_disable(pbdev); 218 } else if (pbdev->summary_ind) { 219 pci_dereg_irqs(pbdev); 220 } 221 if (pbdev->iommu->enabled) { 222 pci_dereg_ioat(pbdev->iommu); 223 } 224 pbdev->state = ZPCI_FS_STANDBY; 225 rc = SCLP_RC_NORMAL_COMPLETION; 226 227 if (pbdev->unplug_requested) { 228 s390_pci_perform_unplug(pbdev); 229 } 230 } 231 out: 232 psccb->header.response_code = cpu_to_be16(rc); 233 } 234 235 static S390PCIBusDevice *s390_pci_find_dev_by_uid(S390pciState *s, uint16_t uid) 236 { 237 S390PCIBusDevice *pbdev; 238 239 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 240 if (pbdev->uid == uid) { 241 return pbdev; 242 } 243 } 244 245 return NULL; 246 } 247 248 S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s, 249 const char *target) 250 { 251 S390PCIBusDevice *pbdev; 252 253 if (!target) { 254 return NULL; 255 } 256 257 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 258 if (!strcmp(pbdev->target, target)) { 259 return pbdev; 260 } 261 } 262 263 return NULL; 264 } 265 266 static S390PCIBusDevice *s390_pci_find_dev_by_pci(S390pciState *s, 267 PCIDevice *pci_dev) 268 { 269 S390PCIBusDevice *pbdev; 270 271 if (!pci_dev) { 272 return NULL; 273 } 274 275 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 276 if (pbdev->pdev == pci_dev) { 277 return pbdev; 278 } 279 } 280 281 return NULL; 282 } 283 284 S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx) 285 { 286 return g_hash_table_lookup(s->zpci_table, &idx); 287 } 288 289 S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh) 290 { 291 uint32_t idx = FH_MASK_INDEX & fh; 292 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_idx(s, idx); 293 294 if (pbdev && pbdev->fh == fh) { 295 return pbdev; 296 } 297 298 return NULL; 299 } 300 301 static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh, 302 uint32_t fid, uint64_t faddr, uint32_t e) 303 { 304 SeiContainer *sei_cont; 305 S390pciState *s = s390_get_phb(); 306 307 sei_cont = g_new0(SeiContainer, 1); 308 sei_cont->fh = fh; 309 sei_cont->fid = fid; 310 sei_cont->cc = cc; 311 sei_cont->pec = pec; 312 sei_cont->faddr = faddr; 313 sei_cont->e = e; 314 315 QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link); 316 css_generate_css_crws(0); 317 } 318 319 static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh, 320 uint32_t fid) 321 { 322 s390_pci_generate_event(2, pec, fh, fid, 0, 0); 323 } 324 325 void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid, 326 uint64_t faddr, uint32_t e) 327 { 328 s390_pci_generate_event(1, pec, fh, fid, faddr, e); 329 } 330 331 static void s390_pci_set_irq(void *opaque, int irq, int level) 332 { 333 /* nothing to do */ 334 } 335 336 static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num) 337 { 338 /* nothing to do */ 339 return 0; 340 } 341 342 static uint64_t s390_pci_get_table_origin(uint64_t iota) 343 { 344 return iota & ~ZPCI_IOTA_RTTO_FLAG; 345 } 346 347 static unsigned int calc_rtx(dma_addr_t ptr) 348 { 349 return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; 350 } 351 352 static unsigned int calc_sx(dma_addr_t ptr) 353 { 354 return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK; 355 } 356 357 static unsigned int calc_px(dma_addr_t ptr) 358 { 359 return ((unsigned long) ptr >> TARGET_PAGE_BITS) & ZPCI_PT_MASK; 360 } 361 362 static uint64_t get_rt_sto(uint64_t entry) 363 { 364 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) 365 ? (entry & ZPCI_RTE_ADDR_MASK) 366 : 0; 367 } 368 369 static uint64_t get_st_pto(uint64_t entry) 370 { 371 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) 372 ? (entry & ZPCI_STE_ADDR_MASK) 373 : 0; 374 } 375 376 static bool rt_entry_isvalid(uint64_t entry) 377 { 378 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; 379 } 380 381 static bool pt_entry_isvalid(uint64_t entry) 382 { 383 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; 384 } 385 386 static bool entry_isprotected(uint64_t entry) 387 { 388 return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED; 389 } 390 391 /* ett is expected table type, -1 page table, 0 segment table, 1 region table */ 392 static uint64_t get_table_index(uint64_t iova, int8_t ett) 393 { 394 switch (ett) { 395 case ZPCI_ETT_PT: 396 return calc_px(iova); 397 case ZPCI_ETT_ST: 398 return calc_sx(iova); 399 case ZPCI_ETT_RT: 400 return calc_rtx(iova); 401 } 402 403 return -1; 404 } 405 406 static bool entry_isvalid(uint64_t entry, int8_t ett) 407 { 408 switch (ett) { 409 case ZPCI_ETT_PT: 410 return pt_entry_isvalid(entry); 411 case ZPCI_ETT_ST: 412 case ZPCI_ETT_RT: 413 return rt_entry_isvalid(entry); 414 } 415 416 return false; 417 } 418 419 /* Return true if address translation is done */ 420 static bool translate_iscomplete(uint64_t entry, int8_t ett) 421 { 422 switch (ett) { 423 case 0: 424 return (entry & ZPCI_TABLE_FC) ? true : false; 425 case 1: 426 return false; 427 } 428 429 return true; 430 } 431 432 static uint64_t get_frame_size(int8_t ett) 433 { 434 switch (ett) { 435 case ZPCI_ETT_PT: 436 return 1ULL << 12; 437 case ZPCI_ETT_ST: 438 return 1ULL << 20; 439 case ZPCI_ETT_RT: 440 return 1ULL << 31; 441 } 442 443 return 0; 444 } 445 446 static uint64_t get_next_table_origin(uint64_t entry, int8_t ett) 447 { 448 switch (ett) { 449 case ZPCI_ETT_PT: 450 return entry & ZPCI_PTE_ADDR_MASK; 451 case ZPCI_ETT_ST: 452 return get_st_pto(entry); 453 case ZPCI_ETT_RT: 454 return get_rt_sto(entry); 455 } 456 457 return 0; 458 } 459 460 /** 461 * table_translate: do translation within one table and return the following 462 * table origin 463 * 464 * @entry: the entry being translated, the result is stored in this. 465 * @to: the address of table origin. 466 * @ett: expected table type, 1 region table, 0 segment table and -1 page table. 467 * @error: error code 468 */ 469 static uint64_t table_translate(S390IOTLBEntry *entry, uint64_t to, int8_t ett, 470 uint16_t *error) 471 { 472 uint64_t tx, te, nto = 0; 473 uint16_t err = 0; 474 475 tx = get_table_index(entry->iova, ett); 476 te = address_space_ldq(&address_space_memory, to + tx * sizeof(uint64_t), 477 MEMTXATTRS_UNSPECIFIED, NULL); 478 479 if (!te) { 480 err = ERR_EVENT_INVALTE; 481 goto out; 482 } 483 484 if (!entry_isvalid(te, ett)) { 485 entry->perm &= IOMMU_NONE; 486 goto out; 487 } 488 489 if (ett == ZPCI_ETT_RT && ((te & ZPCI_TABLE_LEN_RTX) != ZPCI_TABLE_LEN_RTX 490 || te & ZPCI_TABLE_OFFSET_MASK)) { 491 err = ERR_EVENT_INVALTL; 492 goto out; 493 } 494 495 nto = get_next_table_origin(te, ett); 496 if (!nto) { 497 err = ERR_EVENT_TT; 498 goto out; 499 } 500 501 if (entry_isprotected(te)) { 502 entry->perm &= IOMMU_RO; 503 } else { 504 entry->perm &= IOMMU_RW; 505 } 506 507 if (translate_iscomplete(te, ett)) { 508 switch (ett) { 509 case ZPCI_ETT_PT: 510 entry->translated_addr = te & ZPCI_PTE_ADDR_MASK; 511 break; 512 case ZPCI_ETT_ST: 513 entry->translated_addr = (te & ZPCI_SFAA_MASK) | 514 (entry->iova & ~ZPCI_SFAA_MASK); 515 break; 516 } 517 nto = 0; 518 } 519 out: 520 if (err) { 521 entry->perm = IOMMU_NONE; 522 *error = err; 523 } 524 entry->len = get_frame_size(ett); 525 return nto; 526 } 527 528 uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr, 529 S390IOTLBEntry *entry) 530 { 531 uint64_t to = s390_pci_get_table_origin(g_iota); 532 int8_t ett = 1; 533 uint16_t error = 0; 534 535 entry->iova = addr & TARGET_PAGE_MASK; 536 entry->translated_addr = 0; 537 entry->perm = IOMMU_RW; 538 539 if (entry_isprotected(g_iota)) { 540 entry->perm &= IOMMU_RO; 541 } 542 543 while (to) { 544 to = table_translate(entry, to, ett--, &error); 545 } 546 547 return error; 548 } 549 550 static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr, 551 IOMMUAccessFlags flag, int iommu_idx) 552 { 553 S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr); 554 S390IOTLBEntry *entry; 555 uint64_t iova = addr & TARGET_PAGE_MASK; 556 uint16_t error = 0; 557 IOMMUTLBEntry ret = { 558 .target_as = &address_space_memory, 559 .iova = 0, 560 .translated_addr = 0, 561 .addr_mask = ~(hwaddr)0, 562 .perm = IOMMU_NONE, 563 }; 564 565 switch (iommu->pbdev->state) { 566 case ZPCI_FS_ENABLED: 567 case ZPCI_FS_BLOCKED: 568 if (!iommu->enabled) { 569 return ret; 570 } 571 break; 572 default: 573 return ret; 574 } 575 576 DPRINTF("iommu trans addr 0x%" PRIx64 "\n", addr); 577 578 if (addr < iommu->pba || addr > iommu->pal) { 579 error = ERR_EVENT_OORANGE; 580 goto err; 581 } 582 583 entry = g_hash_table_lookup(iommu->iotlb, &iova); 584 if (entry) { 585 ret.iova = entry->iova; 586 ret.translated_addr = entry->translated_addr; 587 ret.addr_mask = entry->len - 1; 588 ret.perm = entry->perm; 589 } else { 590 ret.iova = iova; 591 ret.addr_mask = ~TARGET_PAGE_MASK; 592 ret.perm = IOMMU_NONE; 593 } 594 595 if (flag != IOMMU_NONE && !(flag & ret.perm)) { 596 error = ERR_EVENT_TPROTE; 597 } 598 err: 599 if (error) { 600 iommu->pbdev->state = ZPCI_FS_ERROR; 601 s390_pci_generate_error_event(error, iommu->pbdev->fh, 602 iommu->pbdev->fid, addr, 0); 603 } 604 return ret; 605 } 606 607 static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu, 608 IOMMUNotifier *notifier) 609 { 610 /* It's impossible to plug a pci device on s390x that already has iommu 611 * mappings which need to be replayed, that is due to the "one iommu per 612 * zpci device" construct. But when we support migration of vfio-pci 613 * devices in future, we need to revisit this. 614 */ 615 return; 616 } 617 618 static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus, 619 int devfn) 620 { 621 uint64_t key = (uintptr_t)bus; 622 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); 623 S390PCIIOMMU *iommu; 624 625 if (!table) { 626 table = g_new0(S390PCIIOMMUTable, 1); 627 table->key = key; 628 g_hash_table_insert(s->iommu_table, &table->key, table); 629 } 630 631 iommu = table->iommu[PCI_SLOT(devfn)]; 632 if (!iommu) { 633 iommu = S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU)); 634 635 char *mr_name = g_strdup_printf("iommu-root-%02x:%02x.%01x", 636 pci_bus_num(bus), 637 PCI_SLOT(devfn), 638 PCI_FUNC(devfn)); 639 char *as_name = g_strdup_printf("iommu-pci-%02x:%02x.%01x", 640 pci_bus_num(bus), 641 PCI_SLOT(devfn), 642 PCI_FUNC(devfn)); 643 memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX); 644 address_space_init(&iommu->as, &iommu->mr, as_name); 645 iommu->iotlb = g_hash_table_new_full(g_int64_hash, g_int64_equal, 646 NULL, g_free); 647 table->iommu[PCI_SLOT(devfn)] = iommu; 648 649 g_free(mr_name); 650 g_free(as_name); 651 } 652 653 return iommu; 654 } 655 656 static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) 657 { 658 S390pciState *s = opaque; 659 S390PCIIOMMU *iommu = s390_pci_get_iommu(s, bus, devfn); 660 661 return &iommu->as; 662 } 663 664 static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) 665 { 666 uint8_t expected, actual; 667 hwaddr len = 1; 668 /* avoid multiple fetches */ 669 uint8_t volatile *ind_addr; 670 671 ind_addr = cpu_physical_memory_map(ind_loc, &len, true); 672 if (!ind_addr) { 673 s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0); 674 return -1; 675 } 676 actual = *ind_addr; 677 do { 678 expected = actual; 679 actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); 680 } while (actual != expected); 681 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); 682 683 return actual; 684 } 685 686 static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data, 687 unsigned int size) 688 { 689 S390PCIBusDevice *pbdev = opaque; 690 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 691 uint64_t ind_bit; 692 uint32_t sum_bit; 693 694 assert(pbdev); 695 DPRINTF("write_msix data 0x%" PRIx64 " idx %d vec 0x%x\n", data, 696 pbdev->idx, vec); 697 698 if (pbdev->state != ZPCI_FS_ENABLED) { 699 return; 700 } 701 702 ind_bit = pbdev->routes.adapter.ind_offset; 703 sum_bit = pbdev->routes.adapter.summary_offset; 704 705 set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8, 706 0x80 >> ((ind_bit + vec) % 8)); 707 if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8, 708 0x80 >> (sum_bit % 8))) { 709 css_adapter_interrupt(CSS_IO_ADAPTER_PCI, pbdev->isc); 710 } 711 } 712 713 static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size) 714 { 715 return 0xffffffff; 716 } 717 718 static const MemoryRegionOps s390_msi_ctrl_ops = { 719 .write = s390_msi_ctrl_write, 720 .read = s390_msi_ctrl_read, 721 .endianness = DEVICE_LITTLE_ENDIAN, 722 }; 723 724 void s390_pci_iommu_enable(S390PCIIOMMU *iommu) 725 { 726 /* 727 * The iommu region is initialized against a 0-mapped address space, 728 * so the smallest IOMMU region we can define runs from 0 to the end 729 * of the PCI address space. 730 */ 731 char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid); 732 memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr), 733 TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr), 734 name, iommu->pal + 1); 735 iommu->enabled = true; 736 memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr)); 737 g_free(name); 738 } 739 740 void s390_pci_iommu_disable(S390PCIIOMMU *iommu) 741 { 742 iommu->enabled = false; 743 g_hash_table_remove_all(iommu->iotlb); 744 memory_region_del_subregion(&iommu->mr, MEMORY_REGION(&iommu->iommu_mr)); 745 object_unparent(OBJECT(&iommu->iommu_mr)); 746 } 747 748 static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn) 749 { 750 uint64_t key = (uintptr_t)bus; 751 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); 752 S390PCIIOMMU *iommu = table ? table->iommu[PCI_SLOT(devfn)] : NULL; 753 754 if (!table || !iommu) { 755 return; 756 } 757 758 table->iommu[PCI_SLOT(devfn)] = NULL; 759 g_hash_table_destroy(iommu->iotlb); 760 /* 761 * An attached PCI device may have memory listeners, eg. VFIO PCI. 762 * The associated subregion will already have been unmapped in 763 * s390_pci_iommu_disable in response to the guest deconfigure request. 764 * Remove the listeners now before destroying the address space. 765 */ 766 address_space_remove_listeners(&iommu->as); 767 address_space_destroy(&iommu->as); 768 object_unparent(OBJECT(&iommu->mr)); 769 object_unparent(OBJECT(iommu)); 770 object_unref(OBJECT(iommu)); 771 } 772 773 S390PCIGroup *s390_group_create(int id, int host_id) 774 { 775 S390PCIGroup *group; 776 S390pciState *s = s390_get_phb(); 777 778 group = g_new0(S390PCIGroup, 1); 779 group->id = id; 780 group->host_id = host_id; 781 QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link); 782 return group; 783 } 784 785 S390PCIGroup *s390_group_find(int id) 786 { 787 S390PCIGroup *group; 788 S390pciState *s = s390_get_phb(); 789 790 QTAILQ_FOREACH(group, &s->zpci_groups, link) { 791 if (group->id == id) { 792 return group; 793 } 794 } 795 return NULL; 796 } 797 798 S390PCIGroup *s390_group_find_host_sim(int host_id) 799 { 800 S390PCIGroup *group; 801 S390pciState *s = s390_get_phb(); 802 803 QTAILQ_FOREACH(group, &s->zpci_groups, link) { 804 if (group->id >= ZPCI_SIM_GRP_START && group->host_id == host_id) { 805 return group; 806 } 807 } 808 return NULL; 809 } 810 811 static void s390_pci_init_default_group(void) 812 { 813 S390PCIGroup *group; 814 ClpRspQueryPciGrp *resgrp; 815 816 group = s390_group_create(ZPCI_DEFAULT_FN_GRP, ZPCI_DEFAULT_FN_GRP); 817 resgrp = &group->zpci_group; 818 resgrp->fr = 1; 819 resgrp->dasm = 0; 820 resgrp->msia = ZPCI_MSI_ADDR; 821 resgrp->mui = DEFAULT_MUI; 822 resgrp->i = 128; 823 resgrp->maxstbl = 128; 824 resgrp->version = 0; 825 resgrp->dtsm = ZPCI_DTSM; 826 } 827 828 static void set_pbdev_info(S390PCIBusDevice *pbdev) 829 { 830 pbdev->zpci_fn.sdma = ZPCI_SDMA_ADDR; 831 pbdev->zpci_fn.edma = ZPCI_EDMA_ADDR; 832 pbdev->zpci_fn.pchid = 0; 833 pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP; 834 pbdev->zpci_fn.fid = pbdev->fid; 835 pbdev->zpci_fn.uid = pbdev->uid; 836 pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP); 837 } 838 839 static void s390_pcihost_realize(DeviceState *dev, Error **errp) 840 { 841 PCIBus *b; 842 BusState *bus; 843 PCIHostState *phb = PCI_HOST_BRIDGE(dev); 844 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 845 846 DPRINTF("host_init\n"); 847 848 b = pci_register_root_bus(dev, NULL, s390_pci_set_irq, s390_pci_map_irq, 849 NULL, get_system_memory(), get_system_io(), 0, 850 64, TYPE_PCI_BUS); 851 pci_setup_iommu(b, s390_pci_dma_iommu, s); 852 853 bus = BUS(b); 854 qbus_set_hotplug_handler(bus, OBJECT(dev)); 855 phb->bus = b; 856 857 s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL)); 858 qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev)); 859 860 s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal, 861 NULL, g_free); 862 s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL); 863 s->bus_no = 0; 864 s->next_sim_grp = ZPCI_SIM_GRP_START; 865 QTAILQ_INIT(&s->pending_sei); 866 QTAILQ_INIT(&s->zpci_devs); 867 QTAILQ_INIT(&s->zpci_dma_limit); 868 QTAILQ_INIT(&s->zpci_groups); 869 870 s390_pci_init_default_group(); 871 css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false, 872 S390_ADAPTER_SUPPRESSIBLE, errp); 873 } 874 875 static void s390_pcihost_unrealize(DeviceState *dev) 876 { 877 S390PCIGroup *group; 878 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 879 880 while (!QTAILQ_EMPTY(&s->zpci_groups)) { 881 group = QTAILQ_FIRST(&s->zpci_groups); 882 QTAILQ_REMOVE(&s->zpci_groups, group, link); 883 } 884 } 885 886 static int s390_pci_msix_init(S390PCIBusDevice *pbdev) 887 { 888 char *name; 889 uint8_t pos; 890 uint16_t ctrl; 891 uint32_t table, pba; 892 893 pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX); 894 if (!pos) { 895 return -1; 896 } 897 898 ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS, 899 pci_config_size(pbdev->pdev), sizeof(ctrl)); 900 table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE, 901 pci_config_size(pbdev->pdev), sizeof(table)); 902 pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA, 903 pci_config_size(pbdev->pdev), sizeof(pba)); 904 905 pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK; 906 pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; 907 pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; 908 pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; 909 pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 910 911 name = g_strdup_printf("msix-s390-%04x", pbdev->uid); 912 memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev), 913 &s390_msi_ctrl_ops, pbdev, name, TARGET_PAGE_SIZE); 914 memory_region_add_subregion(&pbdev->iommu->mr, 915 pbdev->pci_group->zpci_group.msia, 916 &pbdev->msix_notify_mr); 917 g_free(name); 918 919 return 0; 920 } 921 922 static void s390_pci_msix_free(S390PCIBusDevice *pbdev) 923 { 924 if (pbdev->msix.entries == 0) { 925 return; 926 } 927 928 memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr); 929 object_unparent(OBJECT(&pbdev->msix_notify_mr)); 930 } 931 932 static S390PCIBusDevice *s390_pci_device_new(S390pciState *s, 933 const char *target, Error **errp) 934 { 935 Error *local_err = NULL; 936 DeviceState *dev; 937 938 dev = qdev_try_new(TYPE_S390_PCI_DEVICE); 939 if (!dev) { 940 error_setg(errp, "zPCI device could not be created"); 941 return NULL; 942 } 943 944 if (!object_property_set_str(OBJECT(dev), "target", target, &local_err)) { 945 object_unparent(OBJECT(dev)); 946 error_propagate_prepend(errp, local_err, 947 "zPCI device could not be created: "); 948 return NULL; 949 } 950 if (!qdev_realize_and_unref(dev, BUS(s->bus), &local_err)) { 951 object_unparent(OBJECT(dev)); 952 error_propagate_prepend(errp, local_err, 953 "zPCI device could not be created: "); 954 return NULL; 955 } 956 957 return S390_PCI_DEVICE(dev); 958 } 959 960 static bool s390_pci_alloc_idx(S390pciState *s, S390PCIBusDevice *pbdev) 961 { 962 uint32_t idx; 963 964 idx = s->next_idx; 965 while (s390_pci_find_dev_by_idx(s, idx)) { 966 idx = (idx + 1) & FH_MASK_INDEX; 967 if (idx == s->next_idx) { 968 return false; 969 } 970 } 971 972 pbdev->idx = idx; 973 return true; 974 } 975 976 static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 977 Error **errp) 978 { 979 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 980 981 if (!s390_has_feat(S390_FEAT_ZPCI)) { 982 warn_report("Plugging a PCI/zPCI device without the 'zpci' CPU " 983 "feature enabled; the guest will not be able to see/use " 984 "this device"); 985 } 986 987 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 988 PCIDevice *pdev = PCI_DEVICE(dev); 989 990 if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 991 error_setg(errp, "multifunction not supported in s390"); 992 return; 993 } 994 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 995 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); 996 997 if (!s390_pci_alloc_idx(s, pbdev)) { 998 error_setg(errp, "no slot for plugging zpci device"); 999 return; 1000 } 1001 } 1002 } 1003 1004 static void s390_pci_update_subordinate(PCIDevice *dev, uint32_t nr) 1005 { 1006 uint32_t old_nr; 1007 1008 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); 1009 while (!pci_bus_is_root(pci_get_bus(dev))) { 1010 dev = pci_get_bus(dev)->parent_dev; 1011 1012 old_nr = pci_default_read_config(dev, PCI_SUBORDINATE_BUS, 1); 1013 if (old_nr < nr) { 1014 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); 1015 } 1016 } 1017 } 1018 1019 static int s390_pci_interp_plug(S390pciState *s, S390PCIBusDevice *pbdev) 1020 { 1021 uint32_t idx, fh; 1022 1023 if (!s390_pci_get_host_fh(pbdev, &fh)) { 1024 return -EPERM; 1025 } 1026 1027 /* 1028 * The host device is already in an enabled state, but we always present 1029 * the initial device state to the guest as disabled (ZPCI_FS_DISABLED). 1030 * Therefore, mask off the enable bit from the passthrough handle until 1031 * the guest issues a CLP SET PCI FN later to enable the device. 1032 */ 1033 pbdev->fh = fh & ~FH_MASK_ENABLE; 1034 1035 /* Next, see if the idx is already in-use */ 1036 idx = pbdev->fh & FH_MASK_INDEX; 1037 if (pbdev->idx != idx) { 1038 if (s390_pci_find_dev_by_idx(s, idx)) { 1039 return -EINVAL; 1040 } 1041 /* 1042 * Update the idx entry with the passed through idx 1043 * If the relinquished idx is lower than next_idx, use it 1044 * to replace next_idx 1045 */ 1046 g_hash_table_remove(s->zpci_table, &pbdev->idx); 1047 if (idx < s->next_idx) { 1048 s->next_idx = idx; 1049 } 1050 pbdev->idx = idx; 1051 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); 1052 } 1053 1054 return 0; 1055 } 1056 1057 static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 1058 Error **errp) 1059 { 1060 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1061 PCIDevice *pdev = NULL; 1062 S390PCIBusDevice *pbdev = NULL; 1063 int rc; 1064 1065 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { 1066 PCIBridge *pb = PCI_BRIDGE(dev); 1067 1068 pdev = PCI_DEVICE(dev); 1069 pci_bridge_map_irq(pb, dev->id, s390_pci_map_irq); 1070 pci_setup_iommu(&pb->sec_bus, s390_pci_dma_iommu, s); 1071 1072 qbus_set_hotplug_handler(BUS(&pb->sec_bus), OBJECT(s)); 1073 1074 if (dev->hotplugged) { 1075 pci_default_write_config(pdev, PCI_PRIMARY_BUS, 1076 pci_dev_bus_num(pdev), 1); 1077 s->bus_no += 1; 1078 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); 1079 1080 s390_pci_update_subordinate(pdev, s->bus_no); 1081 } 1082 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1083 pdev = PCI_DEVICE(dev); 1084 1085 if (!dev->id) { 1086 /* In the case the PCI device does not define an id */ 1087 /* we generate one based on the PCI address */ 1088 dev->id = g_strdup_printf("auto_%02x:%02x.%01x", 1089 pci_dev_bus_num(pdev), 1090 PCI_SLOT(pdev->devfn), 1091 PCI_FUNC(pdev->devfn)); 1092 } 1093 1094 pbdev = s390_pci_find_dev_by_target(s, dev->id); 1095 if (!pbdev) { 1096 pbdev = s390_pci_device_new(s, dev->id, errp); 1097 if (!pbdev) { 1098 return; 1099 } 1100 } 1101 1102 pbdev->pdev = pdev; 1103 pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn); 1104 pbdev->iommu->pbdev = pbdev; 1105 pbdev->state = ZPCI_FS_DISABLED; 1106 set_pbdev_info(pbdev); 1107 1108 if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) { 1109 /* 1110 * By default, interpretation is always requested; if the available 1111 * facilities indicate it is not available, fallback to the 1112 * interception model. 1113 */ 1114 if (pbdev->interp) { 1115 if (s390_pci_kvm_interp_allowed()) { 1116 rc = s390_pci_interp_plug(s, pbdev); 1117 if (rc) { 1118 error_setg(errp, "Plug failed for zPCI device in " 1119 "interpretation mode: %d", rc); 1120 return; 1121 } 1122 } else { 1123 DPRINTF("zPCI interpretation facilities missing.\n"); 1124 pbdev->interp = false; 1125 pbdev->forwarding_assist = false; 1126 } 1127 } 1128 pbdev->iommu->dma_limit = s390_pci_start_dma_count(s, pbdev); 1129 /* Fill in CLP information passed via the vfio region */ 1130 s390_pci_get_clp_info(pbdev); 1131 if (!pbdev->interp) { 1132 /* Do vfio passthrough but intercept for I/O */ 1133 pbdev->fh |= FH_SHM_VFIO; 1134 pbdev->forwarding_assist = false; 1135 } 1136 /* Register shutdown notifier and reset callback for ISM devices */ 1137 if (pbdev->pft == ZPCI_PFT_ISM) { 1138 pbdev->shutdown_notifier.notify = s390_pci_shutdown_notifier; 1139 qemu_register_shutdown_notifier(&pbdev->shutdown_notifier); 1140 qemu_register_reset(s390_pci_reset_cb, pbdev); 1141 } 1142 } else { 1143 pbdev->fh |= FH_SHM_EMUL; 1144 /* Always intercept emulated devices */ 1145 pbdev->interp = false; 1146 pbdev->forwarding_assist = false; 1147 } 1148 1149 if (s390_pci_msix_init(pbdev) && !pbdev->interp) { 1150 error_setg(errp, "MSI-X support is mandatory " 1151 "in the S390 architecture"); 1152 return; 1153 } 1154 1155 if (dev->hotplugged) { 1156 s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED , 1157 pbdev->fh, pbdev->fid); 1158 } 1159 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1160 pbdev = S390_PCI_DEVICE(dev); 1161 1162 /* the allocated idx is actually getting used */ 1163 s->next_idx = (pbdev->idx + 1) & FH_MASK_INDEX; 1164 pbdev->fh = pbdev->idx; 1165 QTAILQ_INSERT_TAIL(&s->zpci_devs, pbdev, link); 1166 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); 1167 } else { 1168 g_assert_not_reached(); 1169 } 1170 } 1171 1172 static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, 1173 Error **errp) 1174 { 1175 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1176 S390PCIBusDevice *pbdev = NULL; 1177 1178 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1179 PCIDevice *pci_dev = PCI_DEVICE(dev); 1180 PCIBus *bus; 1181 int32_t devfn; 1182 1183 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); 1184 g_assert(pbdev); 1185 1186 s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED, 1187 pbdev->fh, pbdev->fid); 1188 bus = pci_get_bus(pci_dev); 1189 devfn = pci_dev->devfn; 1190 qdev_unrealize(dev); 1191 1192 s390_pci_msix_free(pbdev); 1193 s390_pci_iommu_free(s, bus, devfn); 1194 pbdev->pdev = NULL; 1195 pbdev->state = ZPCI_FS_RESERVED; 1196 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1197 pbdev = S390_PCI_DEVICE(dev); 1198 pbdev->fid = 0; 1199 QTAILQ_REMOVE(&s->zpci_devs, pbdev, link); 1200 g_hash_table_remove(s->zpci_table, &pbdev->idx); 1201 if (pbdev->iommu->dma_limit) { 1202 s390_pci_end_dma_count(s, pbdev->iommu->dma_limit); 1203 } 1204 qdev_unrealize(dev); 1205 } 1206 } 1207 1208 static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev, 1209 DeviceState *dev, 1210 Error **errp) 1211 { 1212 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1213 S390PCIBusDevice *pbdev; 1214 1215 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { 1216 error_setg(errp, "PCI bridge hot unplug currently not supported"); 1217 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1218 /* 1219 * Redirect the unplug request to the zPCI device and remember that 1220 * we've checked the PCI device already (to prevent endless recursion). 1221 */ 1222 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); 1223 g_assert(pbdev); 1224 pbdev->pci_unplug_request_processed = true; 1225 qdev_unplug(DEVICE(pbdev), errp); 1226 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1227 pbdev = S390_PCI_DEVICE(dev); 1228 1229 /* 1230 * If unplug was initially requested for the zPCI device, we 1231 * first have to redirect to the PCI device, which will in return 1232 * redirect back to us after performing its checks (if the request 1233 * is not blocked, e.g. because it's a PCI bridge). 1234 */ 1235 if (pbdev->pdev && !pbdev->pci_unplug_request_processed) { 1236 qdev_unplug(DEVICE(pbdev->pdev), errp); 1237 return; 1238 } 1239 pbdev->pci_unplug_request_processed = false; 1240 1241 switch (pbdev->state) { 1242 case ZPCI_FS_STANDBY: 1243 case ZPCI_FS_RESERVED: 1244 s390_pci_perform_unplug(pbdev); 1245 break; 1246 default: 1247 /* 1248 * Allow to send multiple requests, e.g. if the guest crashed 1249 * before releasing the device, we would not be able to send 1250 * another request to the same VM (e.g. fresh OS). 1251 */ 1252 pbdev->unplug_requested = true; 1253 s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST, 1254 pbdev->fh, pbdev->fid); 1255 } 1256 } else { 1257 g_assert_not_reached(); 1258 } 1259 } 1260 1261 static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, 1262 void *opaque) 1263 { 1264 S390pciState *s = opaque; 1265 PCIBus *sec_bus = NULL; 1266 1267 if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != 1268 PCI_HEADER_TYPE_BRIDGE)) { 1269 return; 1270 } 1271 1272 (s->bus_no)++; 1273 pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1); 1274 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); 1275 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); 1276 1277 sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); 1278 if (!sec_bus) { 1279 return; 1280 } 1281 1282 /* Assign numbers to all child bridges. The last is the highest number. */ 1283 pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s); 1284 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); 1285 } 1286 1287 static void s390_pcihost_reset(DeviceState *dev) 1288 { 1289 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 1290 PCIBus *bus = s->parent_obj.bus; 1291 S390PCIBusDevice *pbdev, *next; 1292 1293 /* Process all pending unplug requests */ 1294 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) { 1295 if (pbdev->unplug_requested) { 1296 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 1297 /* Interpreted devices were using interrupt forwarding */ 1298 s390_pci_kvm_aif_disable(pbdev); 1299 } else if (pbdev->summary_ind) { 1300 pci_dereg_irqs(pbdev); 1301 } 1302 if (pbdev->iommu->enabled) { 1303 pci_dereg_ioat(pbdev->iommu); 1304 } 1305 pbdev->state = ZPCI_FS_STANDBY; 1306 s390_pci_perform_unplug(pbdev); 1307 } 1308 } 1309 1310 /* 1311 * When resetting a PCI bridge, the assigned numbers are set to 0. So 1312 * on every system reset, we also have to reassign numbers. 1313 */ 1314 s->bus_no = 0; 1315 pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s); 1316 } 1317 1318 static void s390_pcihost_class_init(ObjectClass *klass, void *data) 1319 { 1320 DeviceClass *dc = DEVICE_CLASS(klass); 1321 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 1322 1323 dc->reset = s390_pcihost_reset; 1324 dc->realize = s390_pcihost_realize; 1325 dc->unrealize = s390_pcihost_unrealize; 1326 hc->pre_plug = s390_pcihost_pre_plug; 1327 hc->plug = s390_pcihost_plug; 1328 hc->unplug_request = s390_pcihost_unplug_request; 1329 hc->unplug = s390_pcihost_unplug; 1330 msi_nonbroken = true; 1331 } 1332 1333 static const TypeInfo s390_pcihost_info = { 1334 .name = TYPE_S390_PCI_HOST_BRIDGE, 1335 .parent = TYPE_PCI_HOST_BRIDGE, 1336 .instance_size = sizeof(S390pciState), 1337 .class_init = s390_pcihost_class_init, 1338 .interfaces = (InterfaceInfo[]) { 1339 { TYPE_HOTPLUG_HANDLER }, 1340 { } 1341 } 1342 }; 1343 1344 static const TypeInfo s390_pcibus_info = { 1345 .name = TYPE_S390_PCI_BUS, 1346 .parent = TYPE_BUS, 1347 .instance_size = sizeof(S390PCIBus), 1348 }; 1349 1350 static uint16_t s390_pci_generate_uid(S390pciState *s) 1351 { 1352 uint16_t uid = 0; 1353 1354 do { 1355 uid++; 1356 if (!s390_pci_find_dev_by_uid(s, uid)) { 1357 return uid; 1358 } 1359 } while (uid < ZPCI_MAX_UID); 1360 1361 return UID_UNDEFINED; 1362 } 1363 1364 static uint32_t s390_pci_generate_fid(S390pciState *s, Error **errp) 1365 { 1366 uint32_t fid = 0; 1367 1368 do { 1369 if (!s390_pci_find_dev_by_fid(s, fid)) { 1370 return fid; 1371 } 1372 } while (fid++ != ZPCI_MAX_FID); 1373 1374 error_setg(errp, "no free fid could be found"); 1375 return 0; 1376 } 1377 1378 static void s390_pci_device_realize(DeviceState *dev, Error **errp) 1379 { 1380 S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev); 1381 S390pciState *s = s390_get_phb(); 1382 1383 if (!zpci->target) { 1384 error_setg(errp, "target must be defined"); 1385 return; 1386 } 1387 1388 if (s390_pci_find_dev_by_target(s, zpci->target)) { 1389 error_setg(errp, "target %s already has an associated zpci device", 1390 zpci->target); 1391 return; 1392 } 1393 1394 if (zpci->uid == UID_UNDEFINED) { 1395 zpci->uid = s390_pci_generate_uid(s); 1396 if (!zpci->uid) { 1397 error_setg(errp, "no free uid could be found"); 1398 return; 1399 } 1400 } else if (s390_pci_find_dev_by_uid(s, zpci->uid)) { 1401 error_setg(errp, "uid %u already in use", zpci->uid); 1402 return; 1403 } 1404 1405 if (!zpci->fid_defined) { 1406 Error *local_error = NULL; 1407 1408 zpci->fid = s390_pci_generate_fid(s, &local_error); 1409 if (local_error) { 1410 error_propagate(errp, local_error); 1411 return; 1412 } 1413 } else if (s390_pci_find_dev_by_fid(s, zpci->fid)) { 1414 error_setg(errp, "fid %u already in use", zpci->fid); 1415 return; 1416 } 1417 1418 zpci->state = ZPCI_FS_RESERVED; 1419 zpci->fmb.format = ZPCI_FMB_FORMAT; 1420 } 1421 1422 static void s390_pci_device_reset(DeviceState *dev) 1423 { 1424 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); 1425 1426 switch (pbdev->state) { 1427 case ZPCI_FS_RESERVED: 1428 return; 1429 case ZPCI_FS_STANDBY: 1430 break; 1431 default: 1432 pbdev->fh &= ~FH_MASK_ENABLE; 1433 pbdev->state = ZPCI_FS_DISABLED; 1434 break; 1435 } 1436 1437 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 1438 /* Interpreted devices were using interrupt forwarding */ 1439 s390_pci_kvm_aif_disable(pbdev); 1440 } else if (pbdev->summary_ind) { 1441 pci_dereg_irqs(pbdev); 1442 } 1443 if (pbdev->iommu->enabled) { 1444 pci_dereg_ioat(pbdev->iommu); 1445 } 1446 1447 fmb_timer_free(pbdev); 1448 } 1449 1450 static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name, 1451 void *opaque, Error **errp) 1452 { 1453 Property *prop = opaque; 1454 uint32_t *ptr = object_field_prop_ptr(obj, prop); 1455 1456 visit_type_uint32(v, name, ptr, errp); 1457 } 1458 1459 static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name, 1460 void *opaque, Error **errp) 1461 { 1462 S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj); 1463 Property *prop = opaque; 1464 uint32_t *ptr = object_field_prop_ptr(obj, prop); 1465 1466 if (!visit_type_uint32(v, name, ptr, errp)) { 1467 return; 1468 } 1469 zpci->fid_defined = true; 1470 } 1471 1472 static const PropertyInfo s390_pci_fid_propinfo = { 1473 .name = "zpci_fid", 1474 .get = s390_pci_get_fid, 1475 .set = s390_pci_set_fid, 1476 }; 1477 1478 #define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \ 1479 DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t) 1480 1481 static Property s390_pci_device_properties[] = { 1482 DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED), 1483 DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid), 1484 DEFINE_PROP_STRING("target", S390PCIBusDevice, target), 1485 DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true), 1486 DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist, 1487 true), 1488 DEFINE_PROP_END_OF_LIST(), 1489 }; 1490 1491 static const VMStateDescription s390_pci_device_vmstate = { 1492 .name = TYPE_S390_PCI_DEVICE, 1493 /* 1494 * TODO: add state handling here, so migration works at least with 1495 * emulated pci devices on s390x 1496 */ 1497 .unmigratable = 1, 1498 }; 1499 1500 static void s390_pci_device_class_init(ObjectClass *klass, void *data) 1501 { 1502 DeviceClass *dc = DEVICE_CLASS(klass); 1503 1504 dc->desc = "zpci device"; 1505 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 1506 dc->reset = s390_pci_device_reset; 1507 dc->bus_type = TYPE_S390_PCI_BUS; 1508 dc->realize = s390_pci_device_realize; 1509 device_class_set_props(dc, s390_pci_device_properties); 1510 dc->vmsd = &s390_pci_device_vmstate; 1511 } 1512 1513 static const TypeInfo s390_pci_device_info = { 1514 .name = TYPE_S390_PCI_DEVICE, 1515 .parent = TYPE_DEVICE, 1516 .instance_size = sizeof(S390PCIBusDevice), 1517 .class_init = s390_pci_device_class_init, 1518 }; 1519 1520 static const TypeInfo s390_pci_iommu_info = { 1521 .name = TYPE_S390_PCI_IOMMU, 1522 .parent = TYPE_OBJECT, 1523 .instance_size = sizeof(S390PCIIOMMU), 1524 }; 1525 1526 static void s390_iommu_memory_region_class_init(ObjectClass *klass, void *data) 1527 { 1528 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 1529 1530 imrc->translate = s390_translate_iommu; 1531 imrc->replay = s390_pci_iommu_replay; 1532 } 1533 1534 static const TypeInfo s390_iommu_memory_region_info = { 1535 .parent = TYPE_IOMMU_MEMORY_REGION, 1536 .name = TYPE_S390_IOMMU_MEMORY_REGION, 1537 .class_init = s390_iommu_memory_region_class_init, 1538 }; 1539 1540 static void s390_pci_register_types(void) 1541 { 1542 type_register_static(&s390_pcihost_info); 1543 type_register_static(&s390_pcibus_info); 1544 type_register_static(&s390_pci_device_info); 1545 type_register_static(&s390_pci_iommu_info); 1546 type_register_static(&s390_iommu_memory_region_info); 1547 } 1548 1549 type_init(s390_pci_register_types) 1550