1 /* 2 * s390 PCI BUS 3 * 4 * Copyright 2014 IBM Corp. 5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com> 6 * Hong Bo Li <lihbbj@cn.ibm.com> 7 * Yi Min Zhao <zyimin@cn.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at 10 * your option) any later version. See the COPYING file in the top-level 11 * directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qapi/visitor.h" 17 #include "exec/target_page.h" 18 #include "hw/s390x/s390-pci-bus.h" 19 #include "hw/s390x/s390-pci-inst.h" 20 #include "hw/s390x/s390-pci-kvm.h" 21 #include "hw/s390x/s390-pci-vfio.h" 22 #include "hw/s390x/s390-virtio-ccw.h" 23 #include "hw/boards.h" 24 #include "hw/pci/pci_bus.h" 25 #include "hw/qdev-properties.h" 26 #include "hw/pci/pci_bridge.h" 27 #include "hw/pci/msi.h" 28 #include "qemu/error-report.h" 29 #include "qemu/module.h" 30 #include "system/reset.h" 31 #include "system/runstate.h" 32 33 #include "trace.h" 34 35 S390pciState *s390_get_phb(void) 36 { 37 static S390pciState *phb; 38 39 if (!phb) { 40 phb = S390_PCI_HOST_BRIDGE( 41 object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); 42 assert(phb != NULL); 43 } 44 45 return phb; 46 } 47 48 int pci_chsc_sei_nt2_get_event(void *res) 49 { 50 ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res; 51 PciCcdfAvail *accdf; 52 PciCcdfErr *eccdf; 53 int rc = 1; 54 SeiContainer *sei_cont; 55 S390pciState *s = s390_get_phb(); 56 57 sei_cont = QTAILQ_FIRST(&s->pending_sei); 58 if (sei_cont) { 59 QTAILQ_REMOVE(&s->pending_sei, sei_cont, link); 60 nt2_res->nt = 2; 61 nt2_res->cc = sei_cont->cc; 62 nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res)); 63 switch (sei_cont->cc) { 64 case 1: /* error event */ 65 eccdf = (PciCcdfErr *)nt2_res->ccdf; 66 eccdf->fid = cpu_to_be32(sei_cont->fid); 67 eccdf->fh = cpu_to_be32(sei_cont->fh); 68 eccdf->e = cpu_to_be32(sei_cont->e); 69 eccdf->faddr = cpu_to_be64(sei_cont->faddr); 70 eccdf->pec = cpu_to_be16(sei_cont->pec); 71 break; 72 case 2: /* availability event */ 73 accdf = (PciCcdfAvail *)nt2_res->ccdf; 74 accdf->fid = cpu_to_be32(sei_cont->fid); 75 accdf->fh = cpu_to_be32(sei_cont->fh); 76 accdf->pec = cpu_to_be16(sei_cont->pec); 77 break; 78 default: 79 abort(); 80 } 81 g_free(sei_cont); 82 rc = 0; 83 } 84 85 return rc; 86 } 87 88 int pci_chsc_sei_nt2_have_event(void) 89 { 90 S390pciState *s = s390_get_phb(); 91 92 return !QTAILQ_EMPTY(&s->pending_sei); 93 } 94 95 S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s, 96 S390PCIBusDevice *pbdev) 97 { 98 S390PCIBusDevice *ret = pbdev ? QTAILQ_NEXT(pbdev, link) : 99 QTAILQ_FIRST(&s->zpci_devs); 100 101 while (ret && ret->state == ZPCI_FS_RESERVED) { 102 ret = QTAILQ_NEXT(ret, link); 103 } 104 105 return ret; 106 } 107 108 S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid) 109 { 110 S390PCIBusDevice *pbdev; 111 112 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 113 if (pbdev->fid == fid) { 114 return pbdev; 115 } 116 } 117 118 return NULL; 119 } 120 121 void s390_pci_sclp_configure(SCCB *sccb) 122 { 123 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; 124 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), 125 be32_to_cpu(psccb->aid)); 126 uint16_t rc; 127 128 if (!pbdev) { 129 trace_s390_pci_sclp_nodev("configure", be32_to_cpu(psccb->aid)); 130 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; 131 goto out; 132 } 133 134 switch (pbdev->state) { 135 case ZPCI_FS_RESERVED: 136 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; 137 break; 138 case ZPCI_FS_STANDBY: 139 pbdev->state = ZPCI_FS_DISABLED; 140 rc = SCLP_RC_NORMAL_COMPLETION; 141 break; 142 default: 143 rc = SCLP_RC_NO_ACTION_REQUIRED; 144 } 145 out: 146 psccb->header.response_code = cpu_to_be16(rc); 147 } 148 149 static void s390_pci_shutdown_notifier(Notifier *n, void *opaque) 150 { 151 S390PCIBusDevice *pbdev = container_of(n, S390PCIBusDevice, 152 shutdown_notifier); 153 154 pci_device_reset(pbdev->pdev); 155 } 156 157 static void s390_pci_perform_unplug(S390PCIBusDevice *pbdev) 158 { 159 HotplugHandler *hotplug_ctrl; 160 161 if (pbdev->pft == ZPCI_PFT_ISM) { 162 notifier_remove(&pbdev->shutdown_notifier); 163 } 164 165 /* Unplug the PCI device */ 166 if (pbdev->pdev) { 167 DeviceState *pdev = DEVICE(pbdev->pdev); 168 169 hotplug_ctrl = qdev_get_hotplug_handler(pdev); 170 hotplug_handler_unplug(hotplug_ctrl, pdev, &error_abort); 171 object_unparent(OBJECT(pdev)); 172 } 173 174 /* Unplug the zPCI device */ 175 hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(pbdev)); 176 hotplug_handler_unplug(hotplug_ctrl, DEVICE(pbdev), &error_abort); 177 object_unparent(OBJECT(pbdev)); 178 } 179 180 void s390_pci_sclp_deconfigure(SCCB *sccb) 181 { 182 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; 183 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), 184 be32_to_cpu(psccb->aid)); 185 uint16_t rc; 186 187 if (!pbdev) { 188 trace_s390_pci_sclp_nodev("deconfigure", be32_to_cpu(psccb->aid)); 189 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; 190 goto out; 191 } 192 193 switch (pbdev->state) { 194 case ZPCI_FS_RESERVED: 195 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; 196 break; 197 case ZPCI_FS_STANDBY: 198 rc = SCLP_RC_NO_ACTION_REQUIRED; 199 break; 200 default: 201 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 202 /* Interpreted devices were using interrupt forwarding */ 203 s390_pci_kvm_aif_disable(pbdev); 204 } else if (pbdev->summary_ind) { 205 pci_dereg_irqs(pbdev); 206 } 207 if (pbdev->iommu->enabled) { 208 pci_dereg_ioat(pbdev->iommu); 209 } 210 pbdev->state = ZPCI_FS_STANDBY; 211 rc = SCLP_RC_NORMAL_COMPLETION; 212 213 if (pbdev->unplug_requested) { 214 s390_pci_perform_unplug(pbdev); 215 } 216 } 217 out: 218 psccb->header.response_code = cpu_to_be16(rc); 219 } 220 221 static S390PCIBusDevice *s390_pci_find_dev_by_uid(S390pciState *s, uint16_t uid) 222 { 223 S390PCIBusDevice *pbdev; 224 225 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 226 if (pbdev->uid == uid) { 227 return pbdev; 228 } 229 } 230 231 return NULL; 232 } 233 234 S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s, 235 const char *target) 236 { 237 S390PCIBusDevice *pbdev; 238 239 if (!target) { 240 return NULL; 241 } 242 243 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 244 if (!strcmp(pbdev->target, target)) { 245 return pbdev; 246 } 247 } 248 249 return NULL; 250 } 251 252 static S390PCIBusDevice *s390_pci_find_dev_by_pci(S390pciState *s, 253 PCIDevice *pci_dev) 254 { 255 S390PCIBusDevice *pbdev; 256 257 if (!pci_dev) { 258 return NULL; 259 } 260 261 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 262 if (pbdev->pdev == pci_dev) { 263 return pbdev; 264 } 265 } 266 267 return NULL; 268 } 269 270 S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx) 271 { 272 return g_hash_table_lookup(s->zpci_table, &idx); 273 } 274 275 S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh) 276 { 277 uint32_t idx = FH_MASK_INDEX & fh; 278 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_idx(s, idx); 279 280 if (pbdev && pbdev->fh == fh) { 281 return pbdev; 282 } 283 284 return NULL; 285 } 286 287 static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh, 288 uint32_t fid, uint64_t faddr, uint32_t e) 289 { 290 SeiContainer *sei_cont; 291 S390pciState *s = s390_get_phb(); 292 293 sei_cont = g_new0(SeiContainer, 1); 294 sei_cont->fh = fh; 295 sei_cont->fid = fid; 296 sei_cont->cc = cc; 297 sei_cont->pec = pec; 298 sei_cont->faddr = faddr; 299 sei_cont->e = e; 300 301 QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link); 302 css_generate_css_crws(0); 303 } 304 305 static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh, 306 uint32_t fid) 307 { 308 s390_pci_generate_event(2, pec, fh, fid, 0, 0); 309 } 310 311 void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid, 312 uint64_t faddr, uint32_t e) 313 { 314 s390_pci_generate_event(1, pec, fh, fid, faddr, e); 315 } 316 317 static void s390_pci_set_irq(void *opaque, int irq, int level) 318 { 319 /* nothing to do */ 320 } 321 322 static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num) 323 { 324 /* nothing to do */ 325 return 0; 326 } 327 328 static uint64_t s390_pci_get_table_origin(uint64_t iota) 329 { 330 return iota & ~ZPCI_IOTA_RTTO_FLAG; 331 } 332 333 static unsigned int calc_rtx(dma_addr_t ptr) 334 { 335 return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; 336 } 337 338 static unsigned int calc_sx(dma_addr_t ptr) 339 { 340 return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK; 341 } 342 343 static unsigned int calc_px(dma_addr_t ptr) 344 { 345 return ((unsigned long) ptr >> TARGET_PAGE_BITS) & ZPCI_PT_MASK; 346 } 347 348 static uint64_t get_rt_sto(uint64_t entry) 349 { 350 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) 351 ? (entry & ZPCI_RTE_ADDR_MASK) 352 : 0; 353 } 354 355 static uint64_t get_st_pto(uint64_t entry) 356 { 357 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) 358 ? (entry & ZPCI_STE_ADDR_MASK) 359 : 0; 360 } 361 362 static bool rt_entry_isvalid(uint64_t entry) 363 { 364 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; 365 } 366 367 static bool pt_entry_isvalid(uint64_t entry) 368 { 369 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; 370 } 371 372 static bool entry_isprotected(uint64_t entry) 373 { 374 return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED; 375 } 376 377 /* ett is expected table type, -1 page table, 0 segment table, 1 region table */ 378 static uint64_t get_table_index(uint64_t iova, int8_t ett) 379 { 380 switch (ett) { 381 case ZPCI_ETT_PT: 382 return calc_px(iova); 383 case ZPCI_ETT_ST: 384 return calc_sx(iova); 385 case ZPCI_ETT_RT: 386 return calc_rtx(iova); 387 default: 388 g_assert_not_reached(); 389 } 390 } 391 392 static bool entry_isvalid(uint64_t entry, int8_t ett) 393 { 394 switch (ett) { 395 case ZPCI_ETT_PT: 396 return pt_entry_isvalid(entry); 397 case ZPCI_ETT_ST: 398 case ZPCI_ETT_RT: 399 return rt_entry_isvalid(entry); 400 default: 401 g_assert_not_reached(); 402 } 403 } 404 405 /* Return true if address translation is done */ 406 static bool translate_iscomplete(uint64_t entry, int8_t ett) 407 { 408 switch (ett) { 409 case ZPCI_ETT_ST: 410 return (entry & ZPCI_TABLE_FC) ? true : false; 411 case ZPCI_ETT_RT: 412 return false; 413 case ZPCI_ETT_PT: 414 return true; 415 default: 416 g_assert_not_reached(); 417 } 418 } 419 420 static uint64_t get_frame_size(int8_t ett) 421 { 422 switch (ett) { 423 case ZPCI_ETT_PT: 424 return 1ULL << 12; 425 case ZPCI_ETT_ST: 426 return 1ULL << 20; 427 case ZPCI_ETT_RT: 428 return 1ULL << 31; 429 default: 430 g_assert_not_reached(); 431 } 432 } 433 434 static uint64_t get_next_table_origin(uint64_t entry, int8_t ett) 435 { 436 switch (ett) { 437 case ZPCI_ETT_PT: 438 return entry & ZPCI_PTE_ADDR_MASK; 439 case ZPCI_ETT_ST: 440 return get_st_pto(entry); 441 case ZPCI_ETT_RT: 442 return get_rt_sto(entry); 443 default: 444 g_assert_not_reached(); 445 } 446 } 447 448 /** 449 * table_translate: do translation within one table and return the following 450 * table origin 451 * 452 * @entry: the entry being translated, the result is stored in this. 453 * @to: the address of table origin. 454 * @ett: expected table type, 1 region table, 0 segment table and -1 page table. 455 * @error: error code 456 */ 457 static uint64_t table_translate(S390IOTLBEntry *entry, uint64_t to, int8_t ett, 458 uint16_t *error) 459 { 460 uint64_t tx, te, nto = 0; 461 uint16_t err = 0; 462 463 tx = get_table_index(entry->iova, ett); 464 te = address_space_ldq(&address_space_memory, to + tx * sizeof(uint64_t), 465 MEMTXATTRS_UNSPECIFIED, NULL); 466 467 if (!te) { 468 err = ERR_EVENT_INVALTE; 469 goto out; 470 } 471 472 if (!entry_isvalid(te, ett)) { 473 entry->perm &= IOMMU_NONE; 474 goto out; 475 } 476 477 if (ett == ZPCI_ETT_RT && ((te & ZPCI_TABLE_LEN_RTX) != ZPCI_TABLE_LEN_RTX 478 || te & ZPCI_TABLE_OFFSET_MASK)) { 479 err = ERR_EVENT_INVALTL; 480 goto out; 481 } 482 483 nto = get_next_table_origin(te, ett); 484 if (!nto) { 485 err = ERR_EVENT_TT; 486 goto out; 487 } 488 489 if (entry_isprotected(te)) { 490 entry->perm &= IOMMU_RO; 491 } else { 492 entry->perm &= IOMMU_RW; 493 } 494 495 if (translate_iscomplete(te, ett)) { 496 switch (ett) { 497 case ZPCI_ETT_PT: 498 entry->translated_addr = te & ZPCI_PTE_ADDR_MASK; 499 break; 500 case ZPCI_ETT_ST: 501 entry->translated_addr = (te & ZPCI_SFAA_MASK) | 502 (entry->iova & ~ZPCI_SFAA_MASK); 503 break; 504 } 505 nto = 0; 506 } 507 out: 508 if (err) { 509 entry->perm = IOMMU_NONE; 510 *error = err; 511 } 512 entry->len = get_frame_size(ett); 513 return nto; 514 } 515 516 uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr, 517 S390IOTLBEntry *entry) 518 { 519 uint64_t to = s390_pci_get_table_origin(g_iota); 520 int8_t ett = 1; 521 uint16_t error = 0; 522 523 entry->iova = addr & TARGET_PAGE_MASK; 524 entry->translated_addr = 0; 525 entry->perm = IOMMU_RW; 526 527 if (entry_isprotected(g_iota)) { 528 entry->perm &= IOMMU_RO; 529 } 530 531 while (to) { 532 to = table_translate(entry, to, ett--, &error); 533 } 534 535 return error; 536 } 537 538 static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr, 539 IOMMUAccessFlags flag, int iommu_idx) 540 { 541 S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr); 542 S390IOTLBEntry *entry; 543 uint64_t iova = addr & TARGET_PAGE_MASK; 544 uint16_t error = 0; 545 IOMMUTLBEntry ret = { 546 .target_as = &address_space_memory, 547 .iova = 0, 548 .translated_addr = 0, 549 .addr_mask = ~(hwaddr)0, 550 .perm = IOMMU_NONE, 551 }; 552 553 switch (iommu->pbdev->state) { 554 case ZPCI_FS_ENABLED: 555 case ZPCI_FS_BLOCKED: 556 if (!iommu->enabled) { 557 return ret; 558 } 559 break; 560 default: 561 return ret; 562 } 563 564 trace_s390_pci_iommu_xlate(addr); 565 566 if (addr < iommu->pba || addr > iommu->pal) { 567 error = ERR_EVENT_OORANGE; 568 goto err; 569 } 570 571 entry = g_hash_table_lookup(iommu->iotlb, &iova); 572 if (entry) { 573 ret.iova = entry->iova; 574 ret.translated_addr = entry->translated_addr; 575 ret.addr_mask = entry->len - 1; 576 ret.perm = entry->perm; 577 } else { 578 ret.iova = iova; 579 ret.addr_mask = ~TARGET_PAGE_MASK; 580 ret.perm = IOMMU_NONE; 581 } 582 583 if (flag != IOMMU_NONE && !(flag & ret.perm)) { 584 error = ERR_EVENT_TPROTE; 585 } 586 err: 587 if (error) { 588 iommu->pbdev->state = ZPCI_FS_ERROR; 589 s390_pci_generate_error_event(error, iommu->pbdev->fh, 590 iommu->pbdev->fid, addr, 0); 591 } 592 return ret; 593 } 594 595 static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu, 596 IOMMUNotifier *notifier) 597 { 598 /* It's impossible to plug a pci device on s390x that already has iommu 599 * mappings which need to be replayed, that is due to the "one iommu per 600 * zpci device" construct. But when we support migration of vfio-pci 601 * devices in future, we need to revisit this. 602 */ 603 } 604 605 static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus, 606 int devfn) 607 { 608 uint64_t key = (uintptr_t)bus; 609 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); 610 S390PCIIOMMU *iommu; 611 612 if (!table) { 613 table = g_new0(S390PCIIOMMUTable, 1); 614 table->key = key; 615 g_hash_table_insert(s->iommu_table, &table->key, table); 616 } 617 618 iommu = table->iommu[PCI_SLOT(devfn)]; 619 if (!iommu) { 620 iommu = S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU)); 621 622 char *mr_name = g_strdup_printf("iommu-root-%02x:%02x.%01x", 623 pci_bus_num(bus), 624 PCI_SLOT(devfn), 625 PCI_FUNC(devfn)); 626 char *as_name = g_strdup_printf("iommu-pci-%02x:%02x.%01x", 627 pci_bus_num(bus), 628 PCI_SLOT(devfn), 629 PCI_FUNC(devfn)); 630 memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX); 631 address_space_init(&iommu->as, &iommu->mr, as_name); 632 iommu->iotlb = g_hash_table_new_full(g_int64_hash, g_int64_equal, 633 NULL, g_free); 634 table->iommu[PCI_SLOT(devfn)] = iommu; 635 636 g_free(mr_name); 637 g_free(as_name); 638 } 639 640 return iommu; 641 } 642 643 static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) 644 { 645 S390pciState *s = opaque; 646 S390PCIIOMMU *iommu = s390_pci_get_iommu(s, bus, devfn); 647 648 return &iommu->as; 649 } 650 651 static const PCIIOMMUOps s390_iommu_ops = { 652 .get_address_space = s390_pci_dma_iommu, 653 }; 654 655 static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) 656 { 657 uint8_t expected, actual; 658 hwaddr len = 1; 659 /* avoid multiple fetches */ 660 uint8_t volatile *ind_addr; 661 662 ind_addr = cpu_physical_memory_map(ind_loc, &len, true); 663 if (!ind_addr) { 664 s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0); 665 return -1; 666 } 667 actual = *ind_addr; 668 do { 669 expected = actual; 670 actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); 671 } while (actual != expected); 672 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); 673 674 return actual; 675 } 676 677 static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data, 678 unsigned int size) 679 { 680 S390PCIBusDevice *pbdev = opaque; 681 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 682 uint64_t ind_bit; 683 uint32_t sum_bit; 684 685 assert(pbdev); 686 687 trace_s390_pci_msi_ctrl_write(data, pbdev->idx, vec); 688 689 if (pbdev->state != ZPCI_FS_ENABLED) { 690 return; 691 } 692 693 ind_bit = pbdev->routes.adapter.ind_offset; 694 sum_bit = pbdev->routes.adapter.summary_offset; 695 696 set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8, 697 0x80 >> ((ind_bit + vec) % 8)); 698 if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8, 699 0x80 >> (sum_bit % 8))) { 700 css_adapter_interrupt(CSS_IO_ADAPTER_PCI, pbdev->isc); 701 } 702 } 703 704 static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size) 705 { 706 return 0xffffffff; 707 } 708 709 static const MemoryRegionOps s390_msi_ctrl_ops = { 710 .write = s390_msi_ctrl_write, 711 .read = s390_msi_ctrl_read, 712 .endianness = DEVICE_LITTLE_ENDIAN, 713 }; 714 715 void s390_pci_iommu_enable(S390PCIIOMMU *iommu) 716 { 717 /* 718 * The iommu region is initialized against a 0-mapped address space, 719 * so the smallest IOMMU region we can define runs from 0 to the end 720 * of the PCI address space. 721 */ 722 char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid); 723 memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr), 724 TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr), 725 name, iommu->pal + 1); 726 iommu->enabled = true; 727 memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr)); 728 g_free(name); 729 } 730 731 void s390_pci_iommu_direct_map_enable(S390PCIIOMMU *iommu) 732 { 733 MachineState *ms = MACHINE(qdev_get_machine()); 734 S390CcwMachineState *s390ms = S390_CCW_MACHINE(ms); 735 736 /* 737 * For direct-mapping we must map the entire guest address space. Rather 738 * than using an iommu, create a memory region alias that maps GPA X to 739 * IOVA X + SDMA. VFIO will handle pinning via its memory listener. 740 */ 741 g_autofree char *name = g_strdup_printf("iommu-dm-s390-%04x", 742 iommu->pbdev->uid); 743 744 iommu->dm_mr = g_malloc0(sizeof(*iommu->dm_mr)); 745 memory_region_init_alias(iommu->dm_mr, OBJECT(&iommu->mr), name, 746 get_system_memory(), 0, 747 s390_get_memory_limit(s390ms)); 748 iommu->enabled = true; 749 memory_region_add_subregion(&iommu->mr, iommu->pbdev->zpci_fn.sdma, 750 iommu->dm_mr); 751 } 752 753 void s390_pci_iommu_disable(S390PCIIOMMU *iommu) 754 { 755 iommu->enabled = false; 756 g_hash_table_remove_all(iommu->iotlb); 757 if (iommu->dm_mr) { 758 memory_region_del_subregion(&iommu->mr, iommu->dm_mr); 759 object_unparent(OBJECT(iommu->dm_mr)); 760 g_free(iommu->dm_mr); 761 iommu->dm_mr = NULL; 762 } else { 763 memory_region_del_subregion(&iommu->mr, 764 MEMORY_REGION(&iommu->iommu_mr)); 765 object_unparent(OBJECT(&iommu->iommu_mr)); 766 } 767 } 768 769 static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn) 770 { 771 uint64_t key = (uintptr_t)bus; 772 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); 773 S390PCIIOMMU *iommu = table ? table->iommu[PCI_SLOT(devfn)] : NULL; 774 775 if (!table || !iommu) { 776 return; 777 } 778 779 table->iommu[PCI_SLOT(devfn)] = NULL; 780 g_hash_table_destroy(iommu->iotlb); 781 /* 782 * An attached PCI device may have memory listeners, eg. VFIO PCI. 783 * The associated subregion will already have been unmapped in 784 * s390_pci_iommu_disable in response to the guest deconfigure request. 785 * Remove the listeners now before destroying the address space. 786 */ 787 address_space_remove_listeners(&iommu->as); 788 address_space_destroy(&iommu->as); 789 object_unparent(OBJECT(&iommu->mr)); 790 object_unparent(OBJECT(iommu)); 791 object_unref(OBJECT(iommu)); 792 } 793 794 S390PCIGroup *s390_group_create(int id, int host_id) 795 { 796 S390PCIGroup *group; 797 S390pciState *s = s390_get_phb(); 798 799 group = g_new0(S390PCIGroup, 1); 800 group->id = id; 801 group->host_id = host_id; 802 QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link); 803 return group; 804 } 805 806 S390PCIGroup *s390_group_find(int id) 807 { 808 S390PCIGroup *group; 809 S390pciState *s = s390_get_phb(); 810 811 QTAILQ_FOREACH(group, &s->zpci_groups, link) { 812 if (group->id == id) { 813 return group; 814 } 815 } 816 return NULL; 817 } 818 819 S390PCIGroup *s390_group_find_host_sim(int host_id) 820 { 821 S390PCIGroup *group; 822 S390pciState *s = s390_get_phb(); 823 824 QTAILQ_FOREACH(group, &s->zpci_groups, link) { 825 if (group->id >= ZPCI_SIM_GRP_START && group->host_id == host_id) { 826 return group; 827 } 828 } 829 return NULL; 830 } 831 832 static void s390_pci_init_default_group(void) 833 { 834 S390PCIGroup *group; 835 ClpRspQueryPciGrp *resgrp; 836 837 group = s390_group_create(ZPCI_DEFAULT_FN_GRP, ZPCI_DEFAULT_FN_GRP); 838 resgrp = &group->zpci_group; 839 resgrp->fr = 1; 840 resgrp->dasm = 0; 841 resgrp->msia = ZPCI_MSI_ADDR; 842 resgrp->mui = DEFAULT_MUI; 843 resgrp->i = 128; 844 resgrp->maxstbl = 128; 845 resgrp->version = 0; 846 resgrp->dtsm = ZPCI_DTSM; 847 } 848 849 static void set_pbdev_info(S390PCIBusDevice *pbdev) 850 { 851 pbdev->zpci_fn.sdma = ZPCI_SDMA_ADDR; 852 pbdev->zpci_fn.edma = ZPCI_EDMA_ADDR; 853 pbdev->zpci_fn.pchid = 0; 854 pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP; 855 pbdev->zpci_fn.fid = pbdev->fid; 856 pbdev->zpci_fn.uid = pbdev->uid; 857 pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP); 858 } 859 860 static void s390_pcihost_realize(DeviceState *dev, Error **errp) 861 { 862 PCIBus *b; 863 BusState *bus; 864 PCIHostState *phb = PCI_HOST_BRIDGE(dev); 865 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 866 867 trace_s390_pcihost("realize"); 868 869 b = pci_register_root_bus(dev, NULL, s390_pci_set_irq, s390_pci_map_irq, 870 NULL, get_system_memory(), get_system_io(), 0, 871 64, TYPE_PCI_BUS); 872 pci_setup_iommu(b, &s390_iommu_ops, s); 873 874 bus = BUS(b); 875 qbus_set_hotplug_handler(bus, OBJECT(dev)); 876 phb->bus = b; 877 878 s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL)); 879 qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev)); 880 881 s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal, 882 NULL, g_free); 883 s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL); 884 s->bus_no = 0; 885 s->next_sim_grp = ZPCI_SIM_GRP_START; 886 QTAILQ_INIT(&s->pending_sei); 887 QTAILQ_INIT(&s->zpci_devs); 888 QTAILQ_INIT(&s->zpci_dma_limit); 889 QTAILQ_INIT(&s->zpci_groups); 890 891 s390_pci_init_default_group(); 892 css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false, 893 S390_ADAPTER_SUPPRESSIBLE, errp); 894 } 895 896 static void s390_pcihost_unrealize(DeviceState *dev) 897 { 898 S390PCIGroup *group; 899 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 900 901 while (!QTAILQ_EMPTY(&s->zpci_groups)) { 902 group = QTAILQ_FIRST(&s->zpci_groups); 903 QTAILQ_REMOVE(&s->zpci_groups, group, link); 904 } 905 } 906 907 static int s390_pci_msix_init(S390PCIBusDevice *pbdev) 908 { 909 char *name; 910 uint8_t pos; 911 uint16_t ctrl; 912 uint32_t table, pba; 913 914 pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX); 915 if (!pos) { 916 return -1; 917 } 918 919 ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS, 920 pci_config_size(pbdev->pdev), sizeof(ctrl)); 921 table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE, 922 pci_config_size(pbdev->pdev), sizeof(table)); 923 pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA, 924 pci_config_size(pbdev->pdev), sizeof(pba)); 925 926 pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK; 927 pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; 928 pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; 929 pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; 930 pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 931 932 name = g_strdup_printf("msix-s390-%04x", pbdev->uid); 933 memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev), 934 &s390_msi_ctrl_ops, pbdev, name, TARGET_PAGE_SIZE); 935 memory_region_add_subregion(&pbdev->iommu->mr, 936 pbdev->pci_group->zpci_group.msia, 937 &pbdev->msix_notify_mr); 938 g_free(name); 939 940 return 0; 941 } 942 943 static void s390_pci_msix_free(S390PCIBusDevice *pbdev) 944 { 945 if (pbdev->msix.entries == 0) { 946 return; 947 } 948 949 memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr); 950 object_unparent(OBJECT(&pbdev->msix_notify_mr)); 951 } 952 953 static S390PCIBusDevice *s390_pci_device_new(S390pciState *s, 954 const char *target, Error **errp) 955 { 956 Error *local_err = NULL; 957 DeviceState *dev; 958 959 dev = qdev_try_new(TYPE_S390_PCI_DEVICE); 960 if (!dev) { 961 error_setg(errp, "zPCI device could not be created"); 962 return NULL; 963 } 964 965 if (!object_property_set_str(OBJECT(dev), "target", target, &local_err)) { 966 object_unparent(OBJECT(dev)); 967 error_propagate_prepend(errp, local_err, 968 "zPCI device could not be created: "); 969 return NULL; 970 } 971 if (!qdev_realize_and_unref(dev, BUS(s->bus), &local_err)) { 972 object_unparent(OBJECT(dev)); 973 error_propagate_prepend(errp, local_err, 974 "zPCI device could not be created: "); 975 return NULL; 976 } 977 978 return S390_PCI_DEVICE(dev); 979 } 980 981 static bool s390_pci_alloc_idx(S390pciState *s, S390PCIBusDevice *pbdev) 982 { 983 uint32_t idx; 984 985 idx = s->next_idx; 986 while (s390_pci_find_dev_by_idx(s, idx)) { 987 idx = (idx + 1) & FH_MASK_INDEX; 988 if (idx == s->next_idx) { 989 return false; 990 } 991 } 992 993 pbdev->idx = idx; 994 return true; 995 } 996 997 static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 998 Error **errp) 999 { 1000 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1001 1002 if (!s390_has_feat(S390_FEAT_ZPCI)) { 1003 warn_report("Plugging a PCI/zPCI device without the 'zpci' CPU " 1004 "feature enabled; the guest will not be able to see/use " 1005 "this device"); 1006 } 1007 1008 if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1009 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); 1010 1011 if (!s390_pci_alloc_idx(s, pbdev)) { 1012 error_setg(errp, "no slot for plugging zpci device"); 1013 return; 1014 } 1015 } 1016 } 1017 1018 static void s390_pci_update_subordinate(PCIDevice *dev, uint32_t nr) 1019 { 1020 uint32_t old_nr; 1021 1022 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); 1023 while (!pci_bus_is_root(pci_get_bus(dev))) { 1024 dev = pci_get_bus(dev)->parent_dev; 1025 1026 old_nr = pci_default_read_config(dev, PCI_SUBORDINATE_BUS, 1); 1027 if (old_nr < nr) { 1028 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); 1029 } 1030 } 1031 } 1032 1033 static int s390_pci_interp_plug(S390pciState *s, S390PCIBusDevice *pbdev) 1034 { 1035 uint32_t idx, fh; 1036 1037 if (!s390_pci_get_host_fh(pbdev, &fh)) { 1038 return -EPERM; 1039 } 1040 1041 /* 1042 * The host device is already in an enabled state, but we always present 1043 * the initial device state to the guest as disabled (ZPCI_FS_DISABLED). 1044 * Therefore, mask off the enable bit from the passthrough handle until 1045 * the guest issues a CLP SET PCI FN later to enable the device. 1046 */ 1047 pbdev->fh = fh & ~FH_MASK_ENABLE; 1048 1049 /* Next, see if the idx is already in-use */ 1050 idx = pbdev->fh & FH_MASK_INDEX; 1051 if (pbdev->idx != idx) { 1052 if (s390_pci_find_dev_by_idx(s, idx)) { 1053 return -EINVAL; 1054 } 1055 /* 1056 * Update the idx entry with the passed through idx 1057 * If the relinquished idx is lower than next_idx, use it 1058 * to replace next_idx 1059 */ 1060 g_hash_table_remove(s->zpci_table, &pbdev->idx); 1061 if (idx < s->next_idx) { 1062 s->next_idx = idx; 1063 } 1064 pbdev->idx = idx; 1065 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); 1066 } 1067 1068 return 0; 1069 } 1070 1071 static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 1072 Error **errp) 1073 { 1074 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1075 PCIDevice *pdev = NULL; 1076 S390PCIBusDevice *pbdev = NULL; 1077 int rc; 1078 1079 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { 1080 PCIBridge *pb = PCI_BRIDGE(dev); 1081 1082 pdev = PCI_DEVICE(dev); 1083 pci_bridge_map_irq(pb, dev->id, s390_pci_map_irq); 1084 pci_setup_iommu(&pb->sec_bus, &s390_iommu_ops, s); 1085 1086 qbus_set_hotplug_handler(BUS(&pb->sec_bus), OBJECT(s)); 1087 1088 if (dev->hotplugged) { 1089 pci_default_write_config(pdev, PCI_PRIMARY_BUS, 1090 pci_dev_bus_num(pdev), 1); 1091 s->bus_no += 1; 1092 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); 1093 1094 s390_pci_update_subordinate(pdev, s->bus_no); 1095 } 1096 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1097 pdev = PCI_DEVICE(dev); 1098 1099 /* 1100 * Multifunction is not supported due to the lack of CLP. However, 1101 * do not check for multifunction capability for SR-IOV devices because 1102 * SR-IOV devices automatically add the multifunction capability whether 1103 * the user intends to use the functions other than the PF. 1104 */ 1105 if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION && 1106 !pdev->exp.sriov_cap) { 1107 error_setg(errp, "multifunction not supported in s390"); 1108 return; 1109 } 1110 1111 if (!dev->id) { 1112 /* In the case the PCI device does not define an id */ 1113 /* we generate one based on the PCI address */ 1114 dev->id = g_strdup_printf("auto_%02x:%02x.%01x", 1115 pci_dev_bus_num(pdev), 1116 PCI_SLOT(pdev->devfn), 1117 PCI_FUNC(pdev->devfn)); 1118 } 1119 1120 pbdev = s390_pci_find_dev_by_target(s, dev->id); 1121 if (!pbdev) { 1122 /* 1123 * VFs are automatically created by PF, and creating zpci for them 1124 * will result in unexpected usage of fids. Currently QEMU does not 1125 * support multifunction for s390x so we don't need zpci for VFs 1126 * anyway. 1127 */ 1128 if (pci_is_vf(pdev)) { 1129 return; 1130 } 1131 1132 pbdev = s390_pci_device_new(s, dev->id, errp); 1133 if (!pbdev) { 1134 return; 1135 } 1136 } 1137 1138 pbdev->pdev = pdev; 1139 pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn); 1140 pbdev->iommu->pbdev = pbdev; 1141 pbdev->state = ZPCI_FS_DISABLED; 1142 set_pbdev_info(pbdev); 1143 1144 if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) { 1145 /* 1146 * By default, interpretation is always requested; if the available 1147 * facilities indicate it is not available, fallback to the 1148 * interception model. 1149 */ 1150 if (pbdev->interp) { 1151 if (s390_pci_kvm_interp_allowed()) { 1152 rc = s390_pci_interp_plug(s, pbdev); 1153 if (rc) { 1154 error_setg(errp, "Plug failed for zPCI device in " 1155 "interpretation mode: %d", rc); 1156 return; 1157 } 1158 } else { 1159 trace_s390_pcihost("zPCI interpretation missing"); 1160 pbdev->interp = false; 1161 pbdev->forwarding_assist = false; 1162 } 1163 } 1164 pbdev->iommu->dma_limit = s390_pci_start_dma_count(s, pbdev); 1165 /* Fill in CLP information passed via the vfio region */ 1166 s390_pci_get_clp_info(pbdev); 1167 if (!pbdev->interp) { 1168 /* Do vfio passthrough but intercept for I/O */ 1169 pbdev->fh |= FH_SHM_VFIO; 1170 pbdev->forwarding_assist = false; 1171 } 1172 /* Register shutdown notifier and reset callback for ISM devices */ 1173 if (pbdev->pft == ZPCI_PFT_ISM) { 1174 pbdev->shutdown_notifier.notify = s390_pci_shutdown_notifier; 1175 qemu_register_shutdown_notifier(&pbdev->shutdown_notifier); 1176 } 1177 } else { 1178 pbdev->fh |= FH_SHM_EMUL; 1179 /* Always intercept emulated devices */ 1180 pbdev->interp = false; 1181 pbdev->forwarding_assist = false; 1182 pbdev->rtr_avail = false; 1183 } 1184 1185 if (s390_pci_msix_init(pbdev) && !pbdev->interp) { 1186 error_setg(errp, "MSI-X support is mandatory " 1187 "in the S390 architecture"); 1188 return; 1189 } 1190 1191 if (dev->hotplugged) { 1192 s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED , 1193 pbdev->fh, pbdev->fid); 1194 } 1195 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1196 pbdev = S390_PCI_DEVICE(dev); 1197 1198 /* the allocated idx is actually getting used */ 1199 s->next_idx = (pbdev->idx + 1) & FH_MASK_INDEX; 1200 pbdev->fh = pbdev->idx; 1201 QTAILQ_INSERT_TAIL(&s->zpci_devs, pbdev, link); 1202 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); 1203 } else { 1204 g_assert_not_reached(); 1205 } 1206 } 1207 1208 static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, 1209 Error **errp) 1210 { 1211 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1212 S390PCIBusDevice *pbdev = NULL; 1213 1214 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1215 PCIDevice *pci_dev = PCI_DEVICE(dev); 1216 PCIBus *bus; 1217 int32_t devfn; 1218 1219 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); 1220 if (!pbdev) { 1221 g_assert(pci_is_vf(pci_dev)); 1222 return; 1223 } 1224 1225 s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED, 1226 pbdev->fh, pbdev->fid); 1227 bus = pci_get_bus(pci_dev); 1228 devfn = pci_dev->devfn; 1229 qdev_unrealize(dev); 1230 1231 s390_pci_msix_free(pbdev); 1232 s390_pci_iommu_free(s, bus, devfn); 1233 pbdev->pdev = NULL; 1234 pbdev->state = ZPCI_FS_RESERVED; 1235 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1236 pbdev = S390_PCI_DEVICE(dev); 1237 pbdev->fid = 0; 1238 QTAILQ_REMOVE(&s->zpci_devs, pbdev, link); 1239 g_hash_table_remove(s->zpci_table, &pbdev->idx); 1240 if (pbdev->iommu->dma_limit) { 1241 s390_pci_end_dma_count(s, pbdev->iommu->dma_limit); 1242 } 1243 qdev_unrealize(dev); 1244 } 1245 } 1246 1247 static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev, 1248 DeviceState *dev, 1249 Error **errp) 1250 { 1251 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1252 S390PCIBusDevice *pbdev; 1253 1254 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { 1255 error_setg(errp, "PCI bridge hot unplug currently not supported"); 1256 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1257 /* 1258 * Redirect the unplug request to the zPCI device and remember that 1259 * we've checked the PCI device already (to prevent endless recursion). 1260 */ 1261 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); 1262 if (!pbdev) { 1263 g_assert(pci_is_vf(PCI_DEVICE(dev))); 1264 return; 1265 } 1266 1267 pbdev->pci_unplug_request_processed = true; 1268 qdev_unplug(DEVICE(pbdev), errp); 1269 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1270 pbdev = S390_PCI_DEVICE(dev); 1271 1272 /* 1273 * If unplug was initially requested for the zPCI device, we 1274 * first have to redirect to the PCI device, which will in return 1275 * redirect back to us after performing its checks (if the request 1276 * is not blocked, e.g. because it's a PCI bridge). 1277 */ 1278 if (pbdev->pdev && !pbdev->pci_unplug_request_processed) { 1279 qdev_unplug(DEVICE(pbdev->pdev), errp); 1280 return; 1281 } 1282 pbdev->pci_unplug_request_processed = false; 1283 1284 switch (pbdev->state) { 1285 case ZPCI_FS_STANDBY: 1286 case ZPCI_FS_RESERVED: 1287 s390_pci_perform_unplug(pbdev); 1288 break; 1289 default: 1290 /* 1291 * Allow to send multiple requests, e.g. if the guest crashed 1292 * before releasing the device, we would not be able to send 1293 * another request to the same VM (e.g. fresh OS). 1294 */ 1295 pbdev->unplug_requested = true; 1296 s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST, 1297 pbdev->fh, pbdev->fid); 1298 } 1299 } else { 1300 g_assert_not_reached(); 1301 } 1302 } 1303 1304 static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, 1305 void *opaque) 1306 { 1307 S390pciState *s = opaque; 1308 PCIBus *sec_bus = NULL; 1309 1310 if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != 1311 PCI_HEADER_TYPE_BRIDGE)) { 1312 return; 1313 } 1314 1315 (s->bus_no)++; 1316 pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1); 1317 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); 1318 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); 1319 1320 sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); 1321 if (!sec_bus) { 1322 return; 1323 } 1324 1325 /* Assign numbers to all child bridges. The last is the highest number. */ 1326 pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s); 1327 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); 1328 } 1329 1330 void s390_pci_ism_reset(void) 1331 { 1332 S390pciState *s = s390_get_phb(); 1333 1334 S390PCIBusDevice *pbdev, *next; 1335 1336 /* Trigger reset event for each passthrough ISM device currently in-use */ 1337 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) { 1338 if (pbdev->interp && pbdev->pft == ZPCI_PFT_ISM && 1339 pbdev->fh & FH_MASK_ENABLE) { 1340 s390_pci_kvm_aif_disable(pbdev); 1341 1342 pci_device_reset(pbdev->pdev); 1343 } 1344 } 1345 } 1346 1347 static void s390_pcihost_reset(DeviceState *dev) 1348 { 1349 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 1350 PCIBus *bus = s->parent_obj.bus; 1351 S390PCIBusDevice *pbdev, *next; 1352 1353 /* Process all pending unplug requests */ 1354 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) { 1355 if (pbdev->unplug_requested) { 1356 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 1357 /* Interpreted devices were using interrupt forwarding */ 1358 s390_pci_kvm_aif_disable(pbdev); 1359 } else if (pbdev->summary_ind) { 1360 pci_dereg_irqs(pbdev); 1361 } 1362 if (pbdev->iommu->enabled) { 1363 pci_dereg_ioat(pbdev->iommu); 1364 } 1365 pbdev->state = ZPCI_FS_STANDBY; 1366 s390_pci_perform_unplug(pbdev); 1367 } 1368 } 1369 1370 /* 1371 * When resetting a PCI bridge, the assigned numbers are set to 0. So 1372 * on every system reset, we also have to reassign numbers. 1373 */ 1374 s->bus_no = 0; 1375 pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s); 1376 } 1377 1378 static void s390_pcihost_class_init(ObjectClass *klass, const void *data) 1379 { 1380 DeviceClass *dc = DEVICE_CLASS(klass); 1381 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 1382 1383 device_class_set_legacy_reset(dc, s390_pcihost_reset); 1384 dc->realize = s390_pcihost_realize; 1385 dc->unrealize = s390_pcihost_unrealize; 1386 hc->pre_plug = s390_pcihost_pre_plug; 1387 hc->plug = s390_pcihost_plug; 1388 hc->unplug_request = s390_pcihost_unplug_request; 1389 hc->unplug = s390_pcihost_unplug; 1390 msi_nonbroken = true; 1391 } 1392 1393 static const TypeInfo s390_pcihost_info = { 1394 .name = TYPE_S390_PCI_HOST_BRIDGE, 1395 .parent = TYPE_PCI_HOST_BRIDGE, 1396 .instance_size = sizeof(S390pciState), 1397 .class_init = s390_pcihost_class_init, 1398 .interfaces = (const InterfaceInfo[]) { 1399 { TYPE_HOTPLUG_HANDLER }, 1400 { } 1401 } 1402 }; 1403 1404 static const TypeInfo s390_pcibus_info = { 1405 .name = TYPE_S390_PCI_BUS, 1406 .parent = TYPE_BUS, 1407 .instance_size = sizeof(S390PCIBus), 1408 }; 1409 1410 static uint16_t s390_pci_generate_uid(S390pciState *s) 1411 { 1412 uint16_t uid = 0; 1413 1414 do { 1415 uid++; 1416 if (!s390_pci_find_dev_by_uid(s, uid)) { 1417 return uid; 1418 } 1419 } while (uid < ZPCI_MAX_UID); 1420 1421 return UID_UNDEFINED; 1422 } 1423 1424 static uint32_t s390_pci_generate_fid(S390pciState *s, Error **errp) 1425 { 1426 uint32_t fid = 0; 1427 1428 do { 1429 if (!s390_pci_find_dev_by_fid(s, fid)) { 1430 return fid; 1431 } 1432 } while (fid++ != ZPCI_MAX_FID); 1433 1434 error_setg(errp, "no free fid could be found"); 1435 return 0; 1436 } 1437 1438 static void s390_pci_device_realize(DeviceState *dev, Error **errp) 1439 { 1440 S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev); 1441 S390pciState *s = s390_get_phb(); 1442 1443 if (!zpci->target) { 1444 error_setg(errp, "target must be defined"); 1445 return; 1446 } 1447 1448 if (s390_pci_find_dev_by_target(s, zpci->target)) { 1449 error_setg(errp, "target %s already has an associated zpci device", 1450 zpci->target); 1451 return; 1452 } 1453 1454 if (zpci->uid == UID_UNDEFINED) { 1455 zpci->uid = s390_pci_generate_uid(s); 1456 if (!zpci->uid) { 1457 error_setg(errp, "no free uid could be found"); 1458 return; 1459 } 1460 } else if (s390_pci_find_dev_by_uid(s, zpci->uid)) { 1461 error_setg(errp, "uid %u already in use", zpci->uid); 1462 return; 1463 } 1464 1465 if (!zpci->fid_defined) { 1466 Error *local_error = NULL; 1467 1468 zpci->fid = s390_pci_generate_fid(s, &local_error); 1469 if (local_error) { 1470 error_propagate(errp, local_error); 1471 return; 1472 } 1473 } else if (s390_pci_find_dev_by_fid(s, zpci->fid)) { 1474 error_setg(errp, "fid %u already in use", zpci->fid); 1475 return; 1476 } 1477 1478 zpci->state = ZPCI_FS_RESERVED; 1479 zpci->fmb.format = ZPCI_FMB_FORMAT; 1480 } 1481 1482 static void s390_pci_device_reset(DeviceState *dev) 1483 { 1484 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); 1485 1486 switch (pbdev->state) { 1487 case ZPCI_FS_RESERVED: 1488 return; 1489 case ZPCI_FS_STANDBY: 1490 break; 1491 default: 1492 pbdev->fh &= ~FH_MASK_ENABLE; 1493 pbdev->state = ZPCI_FS_DISABLED; 1494 break; 1495 } 1496 1497 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 1498 /* Interpreted devices were using interrupt forwarding */ 1499 s390_pci_kvm_aif_disable(pbdev); 1500 } else if (pbdev->summary_ind) { 1501 pci_dereg_irqs(pbdev); 1502 } 1503 if (pbdev->iommu->enabled) { 1504 pci_dereg_ioat(pbdev->iommu); 1505 } 1506 1507 fmb_timer_free(pbdev); 1508 } 1509 1510 static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name, 1511 void *opaque, Error **errp) 1512 { 1513 const Property *prop = opaque; 1514 uint32_t *ptr = object_field_prop_ptr(obj, prop); 1515 1516 visit_type_uint32(v, name, ptr, errp); 1517 } 1518 1519 static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name, 1520 void *opaque, Error **errp) 1521 { 1522 S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj); 1523 const Property *prop = opaque; 1524 uint32_t *ptr = object_field_prop_ptr(obj, prop); 1525 1526 if (!visit_type_uint32(v, name, ptr, errp)) { 1527 return; 1528 } 1529 zpci->fid_defined = true; 1530 } 1531 1532 static const PropertyInfo s390_pci_fid_propinfo = { 1533 .type = "uint32", 1534 .description = "zpci_fid", 1535 .get = s390_pci_get_fid, 1536 .set = s390_pci_set_fid, 1537 }; 1538 1539 #define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \ 1540 DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t) 1541 1542 static const Property s390_pci_device_properties[] = { 1543 DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED), 1544 DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid), 1545 DEFINE_PROP_STRING("target", S390PCIBusDevice, target), 1546 DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true), 1547 DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist, 1548 true), 1549 DEFINE_PROP_BOOL("relaxed-translation", S390PCIBusDevice, rtr_avail, 1550 true), 1551 }; 1552 1553 static const VMStateDescription s390_pci_device_vmstate = { 1554 .name = TYPE_S390_PCI_DEVICE, 1555 /* 1556 * TODO: add state handling here, so migration works at least with 1557 * emulated pci devices on s390x 1558 */ 1559 .unmigratable = 1, 1560 }; 1561 1562 static void s390_pci_device_class_init(ObjectClass *klass, const void *data) 1563 { 1564 DeviceClass *dc = DEVICE_CLASS(klass); 1565 1566 dc->desc = "zpci device"; 1567 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 1568 device_class_set_legacy_reset(dc, s390_pci_device_reset); 1569 dc->bus_type = TYPE_S390_PCI_BUS; 1570 dc->realize = s390_pci_device_realize; 1571 device_class_set_props(dc, s390_pci_device_properties); 1572 dc->vmsd = &s390_pci_device_vmstate; 1573 } 1574 1575 static const TypeInfo s390_pci_device_info = { 1576 .name = TYPE_S390_PCI_DEVICE, 1577 .parent = TYPE_DEVICE, 1578 .instance_size = sizeof(S390PCIBusDevice), 1579 .class_init = s390_pci_device_class_init, 1580 }; 1581 1582 static const TypeInfo s390_pci_iommu_info = { 1583 .name = TYPE_S390_PCI_IOMMU, 1584 .parent = TYPE_OBJECT, 1585 .instance_size = sizeof(S390PCIIOMMU), 1586 }; 1587 1588 static void s390_iommu_memory_region_class_init(ObjectClass *klass, 1589 const void *data) 1590 { 1591 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 1592 1593 imrc->translate = s390_translate_iommu; 1594 imrc->replay = s390_pci_iommu_replay; 1595 } 1596 1597 static const TypeInfo s390_iommu_memory_region_info = { 1598 .parent = TYPE_IOMMU_MEMORY_REGION, 1599 .name = TYPE_S390_IOMMU_MEMORY_REGION, 1600 .class_init = s390_iommu_memory_region_class_init, 1601 }; 1602 1603 static void s390_pci_register_types(void) 1604 { 1605 type_register_static(&s390_pcihost_info); 1606 type_register_static(&s390_pcibus_info); 1607 type_register_static(&s390_pci_device_info); 1608 type_register_static(&s390_pci_iommu_info); 1609 type_register_static(&s390_iommu_memory_region_info); 1610 } 1611 1612 type_init(s390_pci_register_types) 1613