1 /* 2 * s390 PCI BUS 3 * 4 * Copyright 2014 IBM Corp. 5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com> 6 * Hong Bo Li <lihbbj@cn.ibm.com> 7 * Yi Min Zhao <zyimin@cn.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at 10 * your option) any later version. See the COPYING file in the top-level 11 * directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qapi/visitor.h" 17 #include "hw/s390x/s390-pci-bus.h" 18 #include "hw/s390x/s390-pci-inst.h" 19 #include "hw/s390x/s390-pci-kvm.h" 20 #include "hw/s390x/s390-pci-vfio.h" 21 #include "hw/pci/pci_bus.h" 22 #include "hw/qdev-properties.h" 23 #include "hw/pci/pci_bridge.h" 24 #include "hw/pci/msi.h" 25 #include "qemu/error-report.h" 26 #include "qemu/module.h" 27 #include "sysemu/reset.h" 28 #include "sysemu/runstate.h" 29 30 #include "trace.h" 31 32 S390pciState *s390_get_phb(void) 33 { 34 static S390pciState *phb; 35 36 if (!phb) { 37 phb = S390_PCI_HOST_BRIDGE( 38 object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); 39 assert(phb != NULL); 40 } 41 42 return phb; 43 } 44 45 int pci_chsc_sei_nt2_get_event(void *res) 46 { 47 ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res; 48 PciCcdfAvail *accdf; 49 PciCcdfErr *eccdf; 50 int rc = 1; 51 SeiContainer *sei_cont; 52 S390pciState *s = s390_get_phb(); 53 54 sei_cont = QTAILQ_FIRST(&s->pending_sei); 55 if (sei_cont) { 56 QTAILQ_REMOVE(&s->pending_sei, sei_cont, link); 57 nt2_res->nt = 2; 58 nt2_res->cc = sei_cont->cc; 59 nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res)); 60 switch (sei_cont->cc) { 61 case 1: /* error event */ 62 eccdf = (PciCcdfErr *)nt2_res->ccdf; 63 eccdf->fid = cpu_to_be32(sei_cont->fid); 64 eccdf->fh = cpu_to_be32(sei_cont->fh); 65 eccdf->e = cpu_to_be32(sei_cont->e); 66 eccdf->faddr = cpu_to_be64(sei_cont->faddr); 67 eccdf->pec = cpu_to_be16(sei_cont->pec); 68 break; 69 case 2: /* availability event */ 70 accdf = (PciCcdfAvail *)nt2_res->ccdf; 71 accdf->fid = cpu_to_be32(sei_cont->fid); 72 accdf->fh = cpu_to_be32(sei_cont->fh); 73 accdf->pec = cpu_to_be16(sei_cont->pec); 74 break; 75 default: 76 abort(); 77 } 78 g_free(sei_cont); 79 rc = 0; 80 } 81 82 return rc; 83 } 84 85 int pci_chsc_sei_nt2_have_event(void) 86 { 87 S390pciState *s = s390_get_phb(); 88 89 return !QTAILQ_EMPTY(&s->pending_sei); 90 } 91 92 S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s, 93 S390PCIBusDevice *pbdev) 94 { 95 S390PCIBusDevice *ret = pbdev ? QTAILQ_NEXT(pbdev, link) : 96 QTAILQ_FIRST(&s->zpci_devs); 97 98 while (ret && ret->state == ZPCI_FS_RESERVED) { 99 ret = QTAILQ_NEXT(ret, link); 100 } 101 102 return ret; 103 } 104 105 S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid) 106 { 107 S390PCIBusDevice *pbdev; 108 109 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 110 if (pbdev->fid == fid) { 111 return pbdev; 112 } 113 } 114 115 return NULL; 116 } 117 118 void s390_pci_sclp_configure(SCCB *sccb) 119 { 120 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; 121 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), 122 be32_to_cpu(psccb->aid)); 123 uint16_t rc; 124 125 if (!pbdev) { 126 trace_s390_pci_sclp_nodev("configure", be32_to_cpu(psccb->aid)); 127 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; 128 goto out; 129 } 130 131 switch (pbdev->state) { 132 case ZPCI_FS_RESERVED: 133 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; 134 break; 135 case ZPCI_FS_STANDBY: 136 pbdev->state = ZPCI_FS_DISABLED; 137 rc = SCLP_RC_NORMAL_COMPLETION; 138 break; 139 default: 140 rc = SCLP_RC_NO_ACTION_REQUIRED; 141 } 142 out: 143 psccb->header.response_code = cpu_to_be16(rc); 144 } 145 146 static void s390_pci_shutdown_notifier(Notifier *n, void *opaque) 147 { 148 S390PCIBusDevice *pbdev = container_of(n, S390PCIBusDevice, 149 shutdown_notifier); 150 151 pci_device_reset(pbdev->pdev); 152 } 153 154 static void s390_pci_reset_cb(void *opaque) 155 { 156 S390PCIBusDevice *pbdev = opaque; 157 158 pci_device_reset(pbdev->pdev); 159 } 160 161 static void s390_pci_perform_unplug(S390PCIBusDevice *pbdev) 162 { 163 HotplugHandler *hotplug_ctrl; 164 165 if (pbdev->pft == ZPCI_PFT_ISM) { 166 notifier_remove(&pbdev->shutdown_notifier); 167 qemu_unregister_reset(s390_pci_reset_cb, pbdev); 168 } 169 170 /* Unplug the PCI device */ 171 if (pbdev->pdev) { 172 DeviceState *pdev = DEVICE(pbdev->pdev); 173 174 hotplug_ctrl = qdev_get_hotplug_handler(pdev); 175 hotplug_handler_unplug(hotplug_ctrl, pdev, &error_abort); 176 object_unparent(OBJECT(pdev)); 177 } 178 179 /* Unplug the zPCI device */ 180 hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(pbdev)); 181 hotplug_handler_unplug(hotplug_ctrl, DEVICE(pbdev), &error_abort); 182 object_unparent(OBJECT(pbdev)); 183 } 184 185 void s390_pci_sclp_deconfigure(SCCB *sccb) 186 { 187 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; 188 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), 189 be32_to_cpu(psccb->aid)); 190 uint16_t rc; 191 192 if (!pbdev) { 193 trace_s390_pci_sclp_nodev("deconfigure", be32_to_cpu(psccb->aid)); 194 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; 195 goto out; 196 } 197 198 switch (pbdev->state) { 199 case ZPCI_FS_RESERVED: 200 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; 201 break; 202 case ZPCI_FS_STANDBY: 203 rc = SCLP_RC_NO_ACTION_REQUIRED; 204 break; 205 default: 206 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 207 /* Interpreted devices were using interrupt forwarding */ 208 s390_pci_kvm_aif_disable(pbdev); 209 } else if (pbdev->summary_ind) { 210 pci_dereg_irqs(pbdev); 211 } 212 if (pbdev->iommu->enabled) { 213 pci_dereg_ioat(pbdev->iommu); 214 } 215 pbdev->state = ZPCI_FS_STANDBY; 216 rc = SCLP_RC_NORMAL_COMPLETION; 217 218 if (pbdev->unplug_requested) { 219 s390_pci_perform_unplug(pbdev); 220 } 221 } 222 out: 223 psccb->header.response_code = cpu_to_be16(rc); 224 } 225 226 static S390PCIBusDevice *s390_pci_find_dev_by_uid(S390pciState *s, uint16_t uid) 227 { 228 S390PCIBusDevice *pbdev; 229 230 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 231 if (pbdev->uid == uid) { 232 return pbdev; 233 } 234 } 235 236 return NULL; 237 } 238 239 S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s, 240 const char *target) 241 { 242 S390PCIBusDevice *pbdev; 243 244 if (!target) { 245 return NULL; 246 } 247 248 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 249 if (!strcmp(pbdev->target, target)) { 250 return pbdev; 251 } 252 } 253 254 return NULL; 255 } 256 257 static S390PCIBusDevice *s390_pci_find_dev_by_pci(S390pciState *s, 258 PCIDevice *pci_dev) 259 { 260 S390PCIBusDevice *pbdev; 261 262 if (!pci_dev) { 263 return NULL; 264 } 265 266 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 267 if (pbdev->pdev == pci_dev) { 268 return pbdev; 269 } 270 } 271 272 return NULL; 273 } 274 275 S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx) 276 { 277 return g_hash_table_lookup(s->zpci_table, &idx); 278 } 279 280 S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh) 281 { 282 uint32_t idx = FH_MASK_INDEX & fh; 283 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_idx(s, idx); 284 285 if (pbdev && pbdev->fh == fh) { 286 return pbdev; 287 } 288 289 return NULL; 290 } 291 292 static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh, 293 uint32_t fid, uint64_t faddr, uint32_t e) 294 { 295 SeiContainer *sei_cont; 296 S390pciState *s = s390_get_phb(); 297 298 sei_cont = g_new0(SeiContainer, 1); 299 sei_cont->fh = fh; 300 sei_cont->fid = fid; 301 sei_cont->cc = cc; 302 sei_cont->pec = pec; 303 sei_cont->faddr = faddr; 304 sei_cont->e = e; 305 306 QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link); 307 css_generate_css_crws(0); 308 } 309 310 static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh, 311 uint32_t fid) 312 { 313 s390_pci_generate_event(2, pec, fh, fid, 0, 0); 314 } 315 316 void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid, 317 uint64_t faddr, uint32_t e) 318 { 319 s390_pci_generate_event(1, pec, fh, fid, faddr, e); 320 } 321 322 static void s390_pci_set_irq(void *opaque, int irq, int level) 323 { 324 /* nothing to do */ 325 } 326 327 static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num) 328 { 329 /* nothing to do */ 330 return 0; 331 } 332 333 static uint64_t s390_pci_get_table_origin(uint64_t iota) 334 { 335 return iota & ~ZPCI_IOTA_RTTO_FLAG; 336 } 337 338 static unsigned int calc_rtx(dma_addr_t ptr) 339 { 340 return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; 341 } 342 343 static unsigned int calc_sx(dma_addr_t ptr) 344 { 345 return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK; 346 } 347 348 static unsigned int calc_px(dma_addr_t ptr) 349 { 350 return ((unsigned long) ptr >> TARGET_PAGE_BITS) & ZPCI_PT_MASK; 351 } 352 353 static uint64_t get_rt_sto(uint64_t entry) 354 { 355 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) 356 ? (entry & ZPCI_RTE_ADDR_MASK) 357 : 0; 358 } 359 360 static uint64_t get_st_pto(uint64_t entry) 361 { 362 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) 363 ? (entry & ZPCI_STE_ADDR_MASK) 364 : 0; 365 } 366 367 static bool rt_entry_isvalid(uint64_t entry) 368 { 369 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; 370 } 371 372 static bool pt_entry_isvalid(uint64_t entry) 373 { 374 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; 375 } 376 377 static bool entry_isprotected(uint64_t entry) 378 { 379 return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED; 380 } 381 382 /* ett is expected table type, -1 page table, 0 segment table, 1 region table */ 383 static uint64_t get_table_index(uint64_t iova, int8_t ett) 384 { 385 switch (ett) { 386 case ZPCI_ETT_PT: 387 return calc_px(iova); 388 case ZPCI_ETT_ST: 389 return calc_sx(iova); 390 case ZPCI_ETT_RT: 391 return calc_rtx(iova); 392 } 393 394 return -1; 395 } 396 397 static bool entry_isvalid(uint64_t entry, int8_t ett) 398 { 399 switch (ett) { 400 case ZPCI_ETT_PT: 401 return pt_entry_isvalid(entry); 402 case ZPCI_ETT_ST: 403 case ZPCI_ETT_RT: 404 return rt_entry_isvalid(entry); 405 } 406 407 return false; 408 } 409 410 /* Return true if address translation is done */ 411 static bool translate_iscomplete(uint64_t entry, int8_t ett) 412 { 413 switch (ett) { 414 case 0: 415 return (entry & ZPCI_TABLE_FC) ? true : false; 416 case 1: 417 return false; 418 } 419 420 return true; 421 } 422 423 static uint64_t get_frame_size(int8_t ett) 424 { 425 switch (ett) { 426 case ZPCI_ETT_PT: 427 return 1ULL << 12; 428 case ZPCI_ETT_ST: 429 return 1ULL << 20; 430 case ZPCI_ETT_RT: 431 return 1ULL << 31; 432 } 433 434 return 0; 435 } 436 437 static uint64_t get_next_table_origin(uint64_t entry, int8_t ett) 438 { 439 switch (ett) { 440 case ZPCI_ETT_PT: 441 return entry & ZPCI_PTE_ADDR_MASK; 442 case ZPCI_ETT_ST: 443 return get_st_pto(entry); 444 case ZPCI_ETT_RT: 445 return get_rt_sto(entry); 446 } 447 448 return 0; 449 } 450 451 /** 452 * table_translate: do translation within one table and return the following 453 * table origin 454 * 455 * @entry: the entry being translated, the result is stored in this. 456 * @to: the address of table origin. 457 * @ett: expected table type, 1 region table, 0 segment table and -1 page table. 458 * @error: error code 459 */ 460 static uint64_t table_translate(S390IOTLBEntry *entry, uint64_t to, int8_t ett, 461 uint16_t *error) 462 { 463 uint64_t tx, te, nto = 0; 464 uint16_t err = 0; 465 466 tx = get_table_index(entry->iova, ett); 467 te = address_space_ldq(&address_space_memory, to + tx * sizeof(uint64_t), 468 MEMTXATTRS_UNSPECIFIED, NULL); 469 470 if (!te) { 471 err = ERR_EVENT_INVALTE; 472 goto out; 473 } 474 475 if (!entry_isvalid(te, ett)) { 476 entry->perm &= IOMMU_NONE; 477 goto out; 478 } 479 480 if (ett == ZPCI_ETT_RT && ((te & ZPCI_TABLE_LEN_RTX) != ZPCI_TABLE_LEN_RTX 481 || te & ZPCI_TABLE_OFFSET_MASK)) { 482 err = ERR_EVENT_INVALTL; 483 goto out; 484 } 485 486 nto = get_next_table_origin(te, ett); 487 if (!nto) { 488 err = ERR_EVENT_TT; 489 goto out; 490 } 491 492 if (entry_isprotected(te)) { 493 entry->perm &= IOMMU_RO; 494 } else { 495 entry->perm &= IOMMU_RW; 496 } 497 498 if (translate_iscomplete(te, ett)) { 499 switch (ett) { 500 case ZPCI_ETT_PT: 501 entry->translated_addr = te & ZPCI_PTE_ADDR_MASK; 502 break; 503 case ZPCI_ETT_ST: 504 entry->translated_addr = (te & ZPCI_SFAA_MASK) | 505 (entry->iova & ~ZPCI_SFAA_MASK); 506 break; 507 } 508 nto = 0; 509 } 510 out: 511 if (err) { 512 entry->perm = IOMMU_NONE; 513 *error = err; 514 } 515 entry->len = get_frame_size(ett); 516 return nto; 517 } 518 519 uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr, 520 S390IOTLBEntry *entry) 521 { 522 uint64_t to = s390_pci_get_table_origin(g_iota); 523 int8_t ett = 1; 524 uint16_t error = 0; 525 526 entry->iova = addr & TARGET_PAGE_MASK; 527 entry->translated_addr = 0; 528 entry->perm = IOMMU_RW; 529 530 if (entry_isprotected(g_iota)) { 531 entry->perm &= IOMMU_RO; 532 } 533 534 while (to) { 535 to = table_translate(entry, to, ett--, &error); 536 } 537 538 return error; 539 } 540 541 static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr, 542 IOMMUAccessFlags flag, int iommu_idx) 543 { 544 S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr); 545 S390IOTLBEntry *entry; 546 uint64_t iova = addr & TARGET_PAGE_MASK; 547 uint16_t error = 0; 548 IOMMUTLBEntry ret = { 549 .target_as = &address_space_memory, 550 .iova = 0, 551 .translated_addr = 0, 552 .addr_mask = ~(hwaddr)0, 553 .perm = IOMMU_NONE, 554 }; 555 556 switch (iommu->pbdev->state) { 557 case ZPCI_FS_ENABLED: 558 case ZPCI_FS_BLOCKED: 559 if (!iommu->enabled) { 560 return ret; 561 } 562 break; 563 default: 564 return ret; 565 } 566 567 trace_s390_pci_iommu_xlate(addr); 568 569 if (addr < iommu->pba || addr > iommu->pal) { 570 error = ERR_EVENT_OORANGE; 571 goto err; 572 } 573 574 entry = g_hash_table_lookup(iommu->iotlb, &iova); 575 if (entry) { 576 ret.iova = entry->iova; 577 ret.translated_addr = entry->translated_addr; 578 ret.addr_mask = entry->len - 1; 579 ret.perm = entry->perm; 580 } else { 581 ret.iova = iova; 582 ret.addr_mask = ~TARGET_PAGE_MASK; 583 ret.perm = IOMMU_NONE; 584 } 585 586 if (flag != IOMMU_NONE && !(flag & ret.perm)) { 587 error = ERR_EVENT_TPROTE; 588 } 589 err: 590 if (error) { 591 iommu->pbdev->state = ZPCI_FS_ERROR; 592 s390_pci_generate_error_event(error, iommu->pbdev->fh, 593 iommu->pbdev->fid, addr, 0); 594 } 595 return ret; 596 } 597 598 static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu, 599 IOMMUNotifier *notifier) 600 { 601 /* It's impossible to plug a pci device on s390x that already has iommu 602 * mappings which need to be replayed, that is due to the "one iommu per 603 * zpci device" construct. But when we support migration of vfio-pci 604 * devices in future, we need to revisit this. 605 */ 606 return; 607 } 608 609 static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus, 610 int devfn) 611 { 612 uint64_t key = (uintptr_t)bus; 613 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); 614 S390PCIIOMMU *iommu; 615 616 if (!table) { 617 table = g_new0(S390PCIIOMMUTable, 1); 618 table->key = key; 619 g_hash_table_insert(s->iommu_table, &table->key, table); 620 } 621 622 iommu = table->iommu[PCI_SLOT(devfn)]; 623 if (!iommu) { 624 iommu = S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU)); 625 626 char *mr_name = g_strdup_printf("iommu-root-%02x:%02x.%01x", 627 pci_bus_num(bus), 628 PCI_SLOT(devfn), 629 PCI_FUNC(devfn)); 630 char *as_name = g_strdup_printf("iommu-pci-%02x:%02x.%01x", 631 pci_bus_num(bus), 632 PCI_SLOT(devfn), 633 PCI_FUNC(devfn)); 634 memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX); 635 address_space_init(&iommu->as, &iommu->mr, as_name); 636 iommu->iotlb = g_hash_table_new_full(g_int64_hash, g_int64_equal, 637 NULL, g_free); 638 table->iommu[PCI_SLOT(devfn)] = iommu; 639 640 g_free(mr_name); 641 g_free(as_name); 642 } 643 644 return iommu; 645 } 646 647 static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) 648 { 649 S390pciState *s = opaque; 650 S390PCIIOMMU *iommu = s390_pci_get_iommu(s, bus, devfn); 651 652 return &iommu->as; 653 } 654 655 static const PCIIOMMUOps s390_iommu_ops = { 656 .get_address_space = s390_pci_dma_iommu, 657 }; 658 659 static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) 660 { 661 uint8_t expected, actual; 662 hwaddr len = 1; 663 /* avoid multiple fetches */ 664 uint8_t volatile *ind_addr; 665 666 ind_addr = cpu_physical_memory_map(ind_loc, &len, true); 667 if (!ind_addr) { 668 s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0); 669 return -1; 670 } 671 actual = *ind_addr; 672 do { 673 expected = actual; 674 actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); 675 } while (actual != expected); 676 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); 677 678 return actual; 679 } 680 681 static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data, 682 unsigned int size) 683 { 684 S390PCIBusDevice *pbdev = opaque; 685 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 686 uint64_t ind_bit; 687 uint32_t sum_bit; 688 689 assert(pbdev); 690 691 trace_s390_pci_msi_ctrl_write(data, pbdev->idx, vec); 692 693 if (pbdev->state != ZPCI_FS_ENABLED) { 694 return; 695 } 696 697 ind_bit = pbdev->routes.adapter.ind_offset; 698 sum_bit = pbdev->routes.adapter.summary_offset; 699 700 set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8, 701 0x80 >> ((ind_bit + vec) % 8)); 702 if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8, 703 0x80 >> (sum_bit % 8))) { 704 css_adapter_interrupt(CSS_IO_ADAPTER_PCI, pbdev->isc); 705 } 706 } 707 708 static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size) 709 { 710 return 0xffffffff; 711 } 712 713 static const MemoryRegionOps s390_msi_ctrl_ops = { 714 .write = s390_msi_ctrl_write, 715 .read = s390_msi_ctrl_read, 716 .endianness = DEVICE_LITTLE_ENDIAN, 717 }; 718 719 void s390_pci_iommu_enable(S390PCIIOMMU *iommu) 720 { 721 /* 722 * The iommu region is initialized against a 0-mapped address space, 723 * so the smallest IOMMU region we can define runs from 0 to the end 724 * of the PCI address space. 725 */ 726 char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid); 727 memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr), 728 TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr), 729 name, iommu->pal + 1); 730 iommu->enabled = true; 731 memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr)); 732 g_free(name); 733 } 734 735 void s390_pci_iommu_disable(S390PCIIOMMU *iommu) 736 { 737 iommu->enabled = false; 738 g_hash_table_remove_all(iommu->iotlb); 739 memory_region_del_subregion(&iommu->mr, MEMORY_REGION(&iommu->iommu_mr)); 740 object_unparent(OBJECT(&iommu->iommu_mr)); 741 } 742 743 static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn) 744 { 745 uint64_t key = (uintptr_t)bus; 746 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); 747 S390PCIIOMMU *iommu = table ? table->iommu[PCI_SLOT(devfn)] : NULL; 748 749 if (!table || !iommu) { 750 return; 751 } 752 753 table->iommu[PCI_SLOT(devfn)] = NULL; 754 g_hash_table_destroy(iommu->iotlb); 755 /* 756 * An attached PCI device may have memory listeners, eg. VFIO PCI. 757 * The associated subregion will already have been unmapped in 758 * s390_pci_iommu_disable in response to the guest deconfigure request. 759 * Remove the listeners now before destroying the address space. 760 */ 761 address_space_remove_listeners(&iommu->as); 762 address_space_destroy(&iommu->as); 763 object_unparent(OBJECT(&iommu->mr)); 764 object_unparent(OBJECT(iommu)); 765 object_unref(OBJECT(iommu)); 766 } 767 768 S390PCIGroup *s390_group_create(int id, int host_id) 769 { 770 S390PCIGroup *group; 771 S390pciState *s = s390_get_phb(); 772 773 group = g_new0(S390PCIGroup, 1); 774 group->id = id; 775 group->host_id = host_id; 776 QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link); 777 return group; 778 } 779 780 S390PCIGroup *s390_group_find(int id) 781 { 782 S390PCIGroup *group; 783 S390pciState *s = s390_get_phb(); 784 785 QTAILQ_FOREACH(group, &s->zpci_groups, link) { 786 if (group->id == id) { 787 return group; 788 } 789 } 790 return NULL; 791 } 792 793 S390PCIGroup *s390_group_find_host_sim(int host_id) 794 { 795 S390PCIGroup *group; 796 S390pciState *s = s390_get_phb(); 797 798 QTAILQ_FOREACH(group, &s->zpci_groups, link) { 799 if (group->id >= ZPCI_SIM_GRP_START && group->host_id == host_id) { 800 return group; 801 } 802 } 803 return NULL; 804 } 805 806 static void s390_pci_init_default_group(void) 807 { 808 S390PCIGroup *group; 809 ClpRspQueryPciGrp *resgrp; 810 811 group = s390_group_create(ZPCI_DEFAULT_FN_GRP, ZPCI_DEFAULT_FN_GRP); 812 resgrp = &group->zpci_group; 813 resgrp->fr = 1; 814 resgrp->dasm = 0; 815 resgrp->msia = ZPCI_MSI_ADDR; 816 resgrp->mui = DEFAULT_MUI; 817 resgrp->i = 128; 818 resgrp->maxstbl = 128; 819 resgrp->version = 0; 820 resgrp->dtsm = ZPCI_DTSM; 821 } 822 823 static void set_pbdev_info(S390PCIBusDevice *pbdev) 824 { 825 pbdev->zpci_fn.sdma = ZPCI_SDMA_ADDR; 826 pbdev->zpci_fn.edma = ZPCI_EDMA_ADDR; 827 pbdev->zpci_fn.pchid = 0; 828 pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP; 829 pbdev->zpci_fn.fid = pbdev->fid; 830 pbdev->zpci_fn.uid = pbdev->uid; 831 pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP); 832 } 833 834 static void s390_pcihost_realize(DeviceState *dev, Error **errp) 835 { 836 PCIBus *b; 837 BusState *bus; 838 PCIHostState *phb = PCI_HOST_BRIDGE(dev); 839 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 840 841 trace_s390_pcihost("realize"); 842 843 b = pci_register_root_bus(dev, NULL, s390_pci_set_irq, s390_pci_map_irq, 844 NULL, get_system_memory(), get_system_io(), 0, 845 64, TYPE_PCI_BUS); 846 pci_setup_iommu(b, &s390_iommu_ops, s); 847 848 bus = BUS(b); 849 qbus_set_hotplug_handler(bus, OBJECT(dev)); 850 phb->bus = b; 851 852 s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL)); 853 qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev)); 854 855 s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal, 856 NULL, g_free); 857 s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL); 858 s->bus_no = 0; 859 s->next_sim_grp = ZPCI_SIM_GRP_START; 860 QTAILQ_INIT(&s->pending_sei); 861 QTAILQ_INIT(&s->zpci_devs); 862 QTAILQ_INIT(&s->zpci_dma_limit); 863 QTAILQ_INIT(&s->zpci_groups); 864 865 s390_pci_init_default_group(); 866 css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false, 867 S390_ADAPTER_SUPPRESSIBLE, errp); 868 } 869 870 static void s390_pcihost_unrealize(DeviceState *dev) 871 { 872 S390PCIGroup *group; 873 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 874 875 while (!QTAILQ_EMPTY(&s->zpci_groups)) { 876 group = QTAILQ_FIRST(&s->zpci_groups); 877 QTAILQ_REMOVE(&s->zpci_groups, group, link); 878 } 879 } 880 881 static int s390_pci_msix_init(S390PCIBusDevice *pbdev) 882 { 883 char *name; 884 uint8_t pos; 885 uint16_t ctrl; 886 uint32_t table, pba; 887 888 pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX); 889 if (!pos) { 890 return -1; 891 } 892 893 ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS, 894 pci_config_size(pbdev->pdev), sizeof(ctrl)); 895 table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE, 896 pci_config_size(pbdev->pdev), sizeof(table)); 897 pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA, 898 pci_config_size(pbdev->pdev), sizeof(pba)); 899 900 pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK; 901 pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; 902 pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; 903 pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; 904 pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 905 906 name = g_strdup_printf("msix-s390-%04x", pbdev->uid); 907 memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev), 908 &s390_msi_ctrl_ops, pbdev, name, TARGET_PAGE_SIZE); 909 memory_region_add_subregion(&pbdev->iommu->mr, 910 pbdev->pci_group->zpci_group.msia, 911 &pbdev->msix_notify_mr); 912 g_free(name); 913 914 return 0; 915 } 916 917 static void s390_pci_msix_free(S390PCIBusDevice *pbdev) 918 { 919 if (pbdev->msix.entries == 0) { 920 return; 921 } 922 923 memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr); 924 object_unparent(OBJECT(&pbdev->msix_notify_mr)); 925 } 926 927 static S390PCIBusDevice *s390_pci_device_new(S390pciState *s, 928 const char *target, Error **errp) 929 { 930 Error *local_err = NULL; 931 DeviceState *dev; 932 933 dev = qdev_try_new(TYPE_S390_PCI_DEVICE); 934 if (!dev) { 935 error_setg(errp, "zPCI device could not be created"); 936 return NULL; 937 } 938 939 if (!object_property_set_str(OBJECT(dev), "target", target, &local_err)) { 940 object_unparent(OBJECT(dev)); 941 error_propagate_prepend(errp, local_err, 942 "zPCI device could not be created: "); 943 return NULL; 944 } 945 if (!qdev_realize_and_unref(dev, BUS(s->bus), &local_err)) { 946 object_unparent(OBJECT(dev)); 947 error_propagate_prepend(errp, local_err, 948 "zPCI device could not be created: "); 949 return NULL; 950 } 951 952 return S390_PCI_DEVICE(dev); 953 } 954 955 static bool s390_pci_alloc_idx(S390pciState *s, S390PCIBusDevice *pbdev) 956 { 957 uint32_t idx; 958 959 idx = s->next_idx; 960 while (s390_pci_find_dev_by_idx(s, idx)) { 961 idx = (idx + 1) & FH_MASK_INDEX; 962 if (idx == s->next_idx) { 963 return false; 964 } 965 } 966 967 pbdev->idx = idx; 968 return true; 969 } 970 971 static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 972 Error **errp) 973 { 974 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 975 976 if (!s390_has_feat(S390_FEAT_ZPCI)) { 977 warn_report("Plugging a PCI/zPCI device without the 'zpci' CPU " 978 "feature enabled; the guest will not be able to see/use " 979 "this device"); 980 } 981 982 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 983 PCIDevice *pdev = PCI_DEVICE(dev); 984 985 if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { 986 error_setg(errp, "multifunction not supported in s390"); 987 return; 988 } 989 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 990 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); 991 992 if (!s390_pci_alloc_idx(s, pbdev)) { 993 error_setg(errp, "no slot for plugging zpci device"); 994 return; 995 } 996 } 997 } 998 999 static void s390_pci_update_subordinate(PCIDevice *dev, uint32_t nr) 1000 { 1001 uint32_t old_nr; 1002 1003 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); 1004 while (!pci_bus_is_root(pci_get_bus(dev))) { 1005 dev = pci_get_bus(dev)->parent_dev; 1006 1007 old_nr = pci_default_read_config(dev, PCI_SUBORDINATE_BUS, 1); 1008 if (old_nr < nr) { 1009 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); 1010 } 1011 } 1012 } 1013 1014 static int s390_pci_interp_plug(S390pciState *s, S390PCIBusDevice *pbdev) 1015 { 1016 uint32_t idx, fh; 1017 1018 if (!s390_pci_get_host_fh(pbdev, &fh)) { 1019 return -EPERM; 1020 } 1021 1022 /* 1023 * The host device is already in an enabled state, but we always present 1024 * the initial device state to the guest as disabled (ZPCI_FS_DISABLED). 1025 * Therefore, mask off the enable bit from the passthrough handle until 1026 * the guest issues a CLP SET PCI FN later to enable the device. 1027 */ 1028 pbdev->fh = fh & ~FH_MASK_ENABLE; 1029 1030 /* Next, see if the idx is already in-use */ 1031 idx = pbdev->fh & FH_MASK_INDEX; 1032 if (pbdev->idx != idx) { 1033 if (s390_pci_find_dev_by_idx(s, idx)) { 1034 return -EINVAL; 1035 } 1036 /* 1037 * Update the idx entry with the passed through idx 1038 * If the relinquished idx is lower than next_idx, use it 1039 * to replace next_idx 1040 */ 1041 g_hash_table_remove(s->zpci_table, &pbdev->idx); 1042 if (idx < s->next_idx) { 1043 s->next_idx = idx; 1044 } 1045 pbdev->idx = idx; 1046 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); 1047 } 1048 1049 return 0; 1050 } 1051 1052 static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 1053 Error **errp) 1054 { 1055 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1056 PCIDevice *pdev = NULL; 1057 S390PCIBusDevice *pbdev = NULL; 1058 int rc; 1059 1060 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { 1061 PCIBridge *pb = PCI_BRIDGE(dev); 1062 1063 pdev = PCI_DEVICE(dev); 1064 pci_bridge_map_irq(pb, dev->id, s390_pci_map_irq); 1065 pci_setup_iommu(&pb->sec_bus, &s390_iommu_ops, s); 1066 1067 qbus_set_hotplug_handler(BUS(&pb->sec_bus), OBJECT(s)); 1068 1069 if (dev->hotplugged) { 1070 pci_default_write_config(pdev, PCI_PRIMARY_BUS, 1071 pci_dev_bus_num(pdev), 1); 1072 s->bus_no += 1; 1073 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); 1074 1075 s390_pci_update_subordinate(pdev, s->bus_no); 1076 } 1077 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1078 pdev = PCI_DEVICE(dev); 1079 1080 if (!dev->id) { 1081 /* In the case the PCI device does not define an id */ 1082 /* we generate one based on the PCI address */ 1083 dev->id = g_strdup_printf("auto_%02x:%02x.%01x", 1084 pci_dev_bus_num(pdev), 1085 PCI_SLOT(pdev->devfn), 1086 PCI_FUNC(pdev->devfn)); 1087 } 1088 1089 pbdev = s390_pci_find_dev_by_target(s, dev->id); 1090 if (!pbdev) { 1091 pbdev = s390_pci_device_new(s, dev->id, errp); 1092 if (!pbdev) { 1093 return; 1094 } 1095 } 1096 1097 pbdev->pdev = pdev; 1098 pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn); 1099 pbdev->iommu->pbdev = pbdev; 1100 pbdev->state = ZPCI_FS_DISABLED; 1101 set_pbdev_info(pbdev); 1102 1103 if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) { 1104 /* 1105 * By default, interpretation is always requested; if the available 1106 * facilities indicate it is not available, fallback to the 1107 * interception model. 1108 */ 1109 if (pbdev->interp) { 1110 if (s390_pci_kvm_interp_allowed()) { 1111 rc = s390_pci_interp_plug(s, pbdev); 1112 if (rc) { 1113 error_setg(errp, "Plug failed for zPCI device in " 1114 "interpretation mode: %d", rc); 1115 return; 1116 } 1117 } else { 1118 trace_s390_pcihost("zPCI interpretation missing"); 1119 pbdev->interp = false; 1120 pbdev->forwarding_assist = false; 1121 } 1122 } 1123 pbdev->iommu->dma_limit = s390_pci_start_dma_count(s, pbdev); 1124 /* Fill in CLP information passed via the vfio region */ 1125 s390_pci_get_clp_info(pbdev); 1126 if (!pbdev->interp) { 1127 /* Do vfio passthrough but intercept for I/O */ 1128 pbdev->fh |= FH_SHM_VFIO; 1129 pbdev->forwarding_assist = false; 1130 } 1131 /* Register shutdown notifier and reset callback for ISM devices */ 1132 if (pbdev->pft == ZPCI_PFT_ISM) { 1133 pbdev->shutdown_notifier.notify = s390_pci_shutdown_notifier; 1134 qemu_register_shutdown_notifier(&pbdev->shutdown_notifier); 1135 qemu_register_reset(s390_pci_reset_cb, pbdev); 1136 } 1137 } else { 1138 pbdev->fh |= FH_SHM_EMUL; 1139 /* Always intercept emulated devices */ 1140 pbdev->interp = false; 1141 pbdev->forwarding_assist = false; 1142 } 1143 1144 if (s390_pci_msix_init(pbdev) && !pbdev->interp) { 1145 error_setg(errp, "MSI-X support is mandatory " 1146 "in the S390 architecture"); 1147 return; 1148 } 1149 1150 if (dev->hotplugged) { 1151 s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED , 1152 pbdev->fh, pbdev->fid); 1153 } 1154 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1155 pbdev = S390_PCI_DEVICE(dev); 1156 1157 /* the allocated idx is actually getting used */ 1158 s->next_idx = (pbdev->idx + 1) & FH_MASK_INDEX; 1159 pbdev->fh = pbdev->idx; 1160 QTAILQ_INSERT_TAIL(&s->zpci_devs, pbdev, link); 1161 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); 1162 } else { 1163 g_assert_not_reached(); 1164 } 1165 } 1166 1167 static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, 1168 Error **errp) 1169 { 1170 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1171 S390PCIBusDevice *pbdev = NULL; 1172 1173 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1174 PCIDevice *pci_dev = PCI_DEVICE(dev); 1175 PCIBus *bus; 1176 int32_t devfn; 1177 1178 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); 1179 g_assert(pbdev); 1180 1181 s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED, 1182 pbdev->fh, pbdev->fid); 1183 bus = pci_get_bus(pci_dev); 1184 devfn = pci_dev->devfn; 1185 qdev_unrealize(dev); 1186 1187 s390_pci_msix_free(pbdev); 1188 s390_pci_iommu_free(s, bus, devfn); 1189 pbdev->pdev = NULL; 1190 pbdev->state = ZPCI_FS_RESERVED; 1191 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1192 pbdev = S390_PCI_DEVICE(dev); 1193 pbdev->fid = 0; 1194 QTAILQ_REMOVE(&s->zpci_devs, pbdev, link); 1195 g_hash_table_remove(s->zpci_table, &pbdev->idx); 1196 if (pbdev->iommu->dma_limit) { 1197 s390_pci_end_dma_count(s, pbdev->iommu->dma_limit); 1198 } 1199 qdev_unrealize(dev); 1200 } 1201 } 1202 1203 static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev, 1204 DeviceState *dev, 1205 Error **errp) 1206 { 1207 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1208 S390PCIBusDevice *pbdev; 1209 1210 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { 1211 error_setg(errp, "PCI bridge hot unplug currently not supported"); 1212 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1213 /* 1214 * Redirect the unplug request to the zPCI device and remember that 1215 * we've checked the PCI device already (to prevent endless recursion). 1216 */ 1217 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); 1218 g_assert(pbdev); 1219 pbdev->pci_unplug_request_processed = true; 1220 qdev_unplug(DEVICE(pbdev), errp); 1221 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1222 pbdev = S390_PCI_DEVICE(dev); 1223 1224 /* 1225 * If unplug was initially requested for the zPCI device, we 1226 * first have to redirect to the PCI device, which will in return 1227 * redirect back to us after performing its checks (if the request 1228 * is not blocked, e.g. because it's a PCI bridge). 1229 */ 1230 if (pbdev->pdev && !pbdev->pci_unplug_request_processed) { 1231 qdev_unplug(DEVICE(pbdev->pdev), errp); 1232 return; 1233 } 1234 pbdev->pci_unplug_request_processed = false; 1235 1236 switch (pbdev->state) { 1237 case ZPCI_FS_STANDBY: 1238 case ZPCI_FS_RESERVED: 1239 s390_pci_perform_unplug(pbdev); 1240 break; 1241 default: 1242 /* 1243 * Allow to send multiple requests, e.g. if the guest crashed 1244 * before releasing the device, we would not be able to send 1245 * another request to the same VM (e.g. fresh OS). 1246 */ 1247 pbdev->unplug_requested = true; 1248 s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST, 1249 pbdev->fh, pbdev->fid); 1250 } 1251 } else { 1252 g_assert_not_reached(); 1253 } 1254 } 1255 1256 static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, 1257 void *opaque) 1258 { 1259 S390pciState *s = opaque; 1260 PCIBus *sec_bus = NULL; 1261 1262 if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != 1263 PCI_HEADER_TYPE_BRIDGE)) { 1264 return; 1265 } 1266 1267 (s->bus_no)++; 1268 pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1); 1269 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); 1270 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); 1271 1272 sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); 1273 if (!sec_bus) { 1274 return; 1275 } 1276 1277 /* Assign numbers to all child bridges. The last is the highest number. */ 1278 pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s); 1279 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); 1280 } 1281 1282 static void s390_pcihost_reset(DeviceState *dev) 1283 { 1284 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 1285 PCIBus *bus = s->parent_obj.bus; 1286 S390PCIBusDevice *pbdev, *next; 1287 1288 /* Process all pending unplug requests */ 1289 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) { 1290 if (pbdev->unplug_requested) { 1291 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 1292 /* Interpreted devices were using interrupt forwarding */ 1293 s390_pci_kvm_aif_disable(pbdev); 1294 } else if (pbdev->summary_ind) { 1295 pci_dereg_irqs(pbdev); 1296 } 1297 if (pbdev->iommu->enabled) { 1298 pci_dereg_ioat(pbdev->iommu); 1299 } 1300 pbdev->state = ZPCI_FS_STANDBY; 1301 s390_pci_perform_unplug(pbdev); 1302 } 1303 } 1304 1305 /* 1306 * When resetting a PCI bridge, the assigned numbers are set to 0. So 1307 * on every system reset, we also have to reassign numbers. 1308 */ 1309 s->bus_no = 0; 1310 pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s); 1311 } 1312 1313 static void s390_pcihost_class_init(ObjectClass *klass, void *data) 1314 { 1315 DeviceClass *dc = DEVICE_CLASS(klass); 1316 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 1317 1318 dc->reset = s390_pcihost_reset; 1319 dc->realize = s390_pcihost_realize; 1320 dc->unrealize = s390_pcihost_unrealize; 1321 hc->pre_plug = s390_pcihost_pre_plug; 1322 hc->plug = s390_pcihost_plug; 1323 hc->unplug_request = s390_pcihost_unplug_request; 1324 hc->unplug = s390_pcihost_unplug; 1325 msi_nonbroken = true; 1326 } 1327 1328 static const TypeInfo s390_pcihost_info = { 1329 .name = TYPE_S390_PCI_HOST_BRIDGE, 1330 .parent = TYPE_PCI_HOST_BRIDGE, 1331 .instance_size = sizeof(S390pciState), 1332 .class_init = s390_pcihost_class_init, 1333 .interfaces = (InterfaceInfo[]) { 1334 { TYPE_HOTPLUG_HANDLER }, 1335 { } 1336 } 1337 }; 1338 1339 static const TypeInfo s390_pcibus_info = { 1340 .name = TYPE_S390_PCI_BUS, 1341 .parent = TYPE_BUS, 1342 .instance_size = sizeof(S390PCIBus), 1343 }; 1344 1345 static uint16_t s390_pci_generate_uid(S390pciState *s) 1346 { 1347 uint16_t uid = 0; 1348 1349 do { 1350 uid++; 1351 if (!s390_pci_find_dev_by_uid(s, uid)) { 1352 return uid; 1353 } 1354 } while (uid < ZPCI_MAX_UID); 1355 1356 return UID_UNDEFINED; 1357 } 1358 1359 static uint32_t s390_pci_generate_fid(S390pciState *s, Error **errp) 1360 { 1361 uint32_t fid = 0; 1362 1363 do { 1364 if (!s390_pci_find_dev_by_fid(s, fid)) { 1365 return fid; 1366 } 1367 } while (fid++ != ZPCI_MAX_FID); 1368 1369 error_setg(errp, "no free fid could be found"); 1370 return 0; 1371 } 1372 1373 static void s390_pci_device_realize(DeviceState *dev, Error **errp) 1374 { 1375 S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev); 1376 S390pciState *s = s390_get_phb(); 1377 1378 if (!zpci->target) { 1379 error_setg(errp, "target must be defined"); 1380 return; 1381 } 1382 1383 if (s390_pci_find_dev_by_target(s, zpci->target)) { 1384 error_setg(errp, "target %s already has an associated zpci device", 1385 zpci->target); 1386 return; 1387 } 1388 1389 if (zpci->uid == UID_UNDEFINED) { 1390 zpci->uid = s390_pci_generate_uid(s); 1391 if (!zpci->uid) { 1392 error_setg(errp, "no free uid could be found"); 1393 return; 1394 } 1395 } else if (s390_pci_find_dev_by_uid(s, zpci->uid)) { 1396 error_setg(errp, "uid %u already in use", zpci->uid); 1397 return; 1398 } 1399 1400 if (!zpci->fid_defined) { 1401 Error *local_error = NULL; 1402 1403 zpci->fid = s390_pci_generate_fid(s, &local_error); 1404 if (local_error) { 1405 error_propagate(errp, local_error); 1406 return; 1407 } 1408 } else if (s390_pci_find_dev_by_fid(s, zpci->fid)) { 1409 error_setg(errp, "fid %u already in use", zpci->fid); 1410 return; 1411 } 1412 1413 zpci->state = ZPCI_FS_RESERVED; 1414 zpci->fmb.format = ZPCI_FMB_FORMAT; 1415 } 1416 1417 static void s390_pci_device_reset(DeviceState *dev) 1418 { 1419 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); 1420 1421 switch (pbdev->state) { 1422 case ZPCI_FS_RESERVED: 1423 return; 1424 case ZPCI_FS_STANDBY: 1425 break; 1426 default: 1427 pbdev->fh &= ~FH_MASK_ENABLE; 1428 pbdev->state = ZPCI_FS_DISABLED; 1429 break; 1430 } 1431 1432 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 1433 /* Interpreted devices were using interrupt forwarding */ 1434 s390_pci_kvm_aif_disable(pbdev); 1435 } else if (pbdev->summary_ind) { 1436 pci_dereg_irqs(pbdev); 1437 } 1438 if (pbdev->iommu->enabled) { 1439 pci_dereg_ioat(pbdev->iommu); 1440 } 1441 1442 fmb_timer_free(pbdev); 1443 } 1444 1445 static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name, 1446 void *opaque, Error **errp) 1447 { 1448 Property *prop = opaque; 1449 uint32_t *ptr = object_field_prop_ptr(obj, prop); 1450 1451 visit_type_uint32(v, name, ptr, errp); 1452 } 1453 1454 static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name, 1455 void *opaque, Error **errp) 1456 { 1457 S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj); 1458 Property *prop = opaque; 1459 uint32_t *ptr = object_field_prop_ptr(obj, prop); 1460 1461 if (!visit_type_uint32(v, name, ptr, errp)) { 1462 return; 1463 } 1464 zpci->fid_defined = true; 1465 } 1466 1467 static const PropertyInfo s390_pci_fid_propinfo = { 1468 .name = "zpci_fid", 1469 .get = s390_pci_get_fid, 1470 .set = s390_pci_set_fid, 1471 }; 1472 1473 #define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \ 1474 DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t) 1475 1476 static Property s390_pci_device_properties[] = { 1477 DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED), 1478 DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid), 1479 DEFINE_PROP_STRING("target", S390PCIBusDevice, target), 1480 DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true), 1481 DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist, 1482 true), 1483 DEFINE_PROP_END_OF_LIST(), 1484 }; 1485 1486 static const VMStateDescription s390_pci_device_vmstate = { 1487 .name = TYPE_S390_PCI_DEVICE, 1488 /* 1489 * TODO: add state handling here, so migration works at least with 1490 * emulated pci devices on s390x 1491 */ 1492 .unmigratable = 1, 1493 }; 1494 1495 static void s390_pci_device_class_init(ObjectClass *klass, void *data) 1496 { 1497 DeviceClass *dc = DEVICE_CLASS(klass); 1498 1499 dc->desc = "zpci device"; 1500 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 1501 dc->reset = s390_pci_device_reset; 1502 dc->bus_type = TYPE_S390_PCI_BUS; 1503 dc->realize = s390_pci_device_realize; 1504 device_class_set_props(dc, s390_pci_device_properties); 1505 dc->vmsd = &s390_pci_device_vmstate; 1506 } 1507 1508 static const TypeInfo s390_pci_device_info = { 1509 .name = TYPE_S390_PCI_DEVICE, 1510 .parent = TYPE_DEVICE, 1511 .instance_size = sizeof(S390PCIBusDevice), 1512 .class_init = s390_pci_device_class_init, 1513 }; 1514 1515 static const TypeInfo s390_pci_iommu_info = { 1516 .name = TYPE_S390_PCI_IOMMU, 1517 .parent = TYPE_OBJECT, 1518 .instance_size = sizeof(S390PCIIOMMU), 1519 }; 1520 1521 static void s390_iommu_memory_region_class_init(ObjectClass *klass, void *data) 1522 { 1523 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 1524 1525 imrc->translate = s390_translate_iommu; 1526 imrc->replay = s390_pci_iommu_replay; 1527 } 1528 1529 static const TypeInfo s390_iommu_memory_region_info = { 1530 .parent = TYPE_IOMMU_MEMORY_REGION, 1531 .name = TYPE_S390_IOMMU_MEMORY_REGION, 1532 .class_init = s390_iommu_memory_region_class_init, 1533 }; 1534 1535 static void s390_pci_register_types(void) 1536 { 1537 type_register_static(&s390_pcihost_info); 1538 type_register_static(&s390_pcibus_info); 1539 type_register_static(&s390_pci_device_info); 1540 type_register_static(&s390_pci_iommu_info); 1541 type_register_static(&s390_iommu_memory_region_info); 1542 } 1543 1544 type_init(s390_pci_register_types) 1545