1 /* 2 * s390 PCI BUS 3 * 4 * Copyright 2014 IBM Corp. 5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com> 6 * Hong Bo Li <lihbbj@cn.ibm.com> 7 * Yi Min Zhao <zyimin@cn.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at 10 * your option) any later version. See the COPYING file in the top-level 11 * directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qapi/visitor.h" 17 #include "hw/s390x/s390-pci-bus.h" 18 #include "hw/s390x/s390-pci-inst.h" 19 #include "hw/s390x/s390-pci-kvm.h" 20 #include "hw/s390x/s390-pci-vfio.h" 21 #include "hw/s390x/s390-virtio-ccw.h" 22 #include "hw/boards.h" 23 #include "hw/pci/pci_bus.h" 24 #include "hw/qdev-properties.h" 25 #include "hw/pci/pci_bridge.h" 26 #include "hw/pci/msi.h" 27 #include "qemu/error-report.h" 28 #include "qemu/module.h" 29 #include "system/reset.h" 30 #include "system/runstate.h" 31 32 #include "trace.h" 33 34 S390pciState *s390_get_phb(void) 35 { 36 static S390pciState *phb; 37 38 if (!phb) { 39 phb = S390_PCI_HOST_BRIDGE( 40 object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); 41 assert(phb != NULL); 42 } 43 44 return phb; 45 } 46 47 int pci_chsc_sei_nt2_get_event(void *res) 48 { 49 ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res; 50 PciCcdfAvail *accdf; 51 PciCcdfErr *eccdf; 52 int rc = 1; 53 SeiContainer *sei_cont; 54 S390pciState *s = s390_get_phb(); 55 56 sei_cont = QTAILQ_FIRST(&s->pending_sei); 57 if (sei_cont) { 58 QTAILQ_REMOVE(&s->pending_sei, sei_cont, link); 59 nt2_res->nt = 2; 60 nt2_res->cc = sei_cont->cc; 61 nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res)); 62 switch (sei_cont->cc) { 63 case 1: /* error event */ 64 eccdf = (PciCcdfErr *)nt2_res->ccdf; 65 eccdf->fid = cpu_to_be32(sei_cont->fid); 66 eccdf->fh = cpu_to_be32(sei_cont->fh); 67 eccdf->e = cpu_to_be32(sei_cont->e); 68 eccdf->faddr = cpu_to_be64(sei_cont->faddr); 69 eccdf->pec = cpu_to_be16(sei_cont->pec); 70 break; 71 case 2: /* availability event */ 72 accdf = (PciCcdfAvail *)nt2_res->ccdf; 73 accdf->fid = cpu_to_be32(sei_cont->fid); 74 accdf->fh = cpu_to_be32(sei_cont->fh); 75 accdf->pec = cpu_to_be16(sei_cont->pec); 76 break; 77 default: 78 abort(); 79 } 80 g_free(sei_cont); 81 rc = 0; 82 } 83 84 return rc; 85 } 86 87 int pci_chsc_sei_nt2_have_event(void) 88 { 89 S390pciState *s = s390_get_phb(); 90 91 return !QTAILQ_EMPTY(&s->pending_sei); 92 } 93 94 S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s, 95 S390PCIBusDevice *pbdev) 96 { 97 S390PCIBusDevice *ret = pbdev ? QTAILQ_NEXT(pbdev, link) : 98 QTAILQ_FIRST(&s->zpci_devs); 99 100 while (ret && ret->state == ZPCI_FS_RESERVED) { 101 ret = QTAILQ_NEXT(ret, link); 102 } 103 104 return ret; 105 } 106 107 S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid) 108 { 109 S390PCIBusDevice *pbdev; 110 111 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 112 if (pbdev->fid == fid) { 113 return pbdev; 114 } 115 } 116 117 return NULL; 118 } 119 120 void s390_pci_sclp_configure(SCCB *sccb) 121 { 122 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; 123 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), 124 be32_to_cpu(psccb->aid)); 125 uint16_t rc; 126 127 if (!pbdev) { 128 trace_s390_pci_sclp_nodev("configure", be32_to_cpu(psccb->aid)); 129 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; 130 goto out; 131 } 132 133 switch (pbdev->state) { 134 case ZPCI_FS_RESERVED: 135 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; 136 break; 137 case ZPCI_FS_STANDBY: 138 pbdev->state = ZPCI_FS_DISABLED; 139 rc = SCLP_RC_NORMAL_COMPLETION; 140 break; 141 default: 142 rc = SCLP_RC_NO_ACTION_REQUIRED; 143 } 144 out: 145 psccb->header.response_code = cpu_to_be16(rc); 146 } 147 148 static void s390_pci_shutdown_notifier(Notifier *n, void *opaque) 149 { 150 S390PCIBusDevice *pbdev = container_of(n, S390PCIBusDevice, 151 shutdown_notifier); 152 153 pci_device_reset(pbdev->pdev); 154 } 155 156 static void s390_pci_perform_unplug(S390PCIBusDevice *pbdev) 157 { 158 HotplugHandler *hotplug_ctrl; 159 160 if (pbdev->pft == ZPCI_PFT_ISM) { 161 notifier_remove(&pbdev->shutdown_notifier); 162 } 163 164 /* Unplug the PCI device */ 165 if (pbdev->pdev) { 166 DeviceState *pdev = DEVICE(pbdev->pdev); 167 168 hotplug_ctrl = qdev_get_hotplug_handler(pdev); 169 hotplug_handler_unplug(hotplug_ctrl, pdev, &error_abort); 170 object_unparent(OBJECT(pdev)); 171 } 172 173 /* Unplug the zPCI device */ 174 hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(pbdev)); 175 hotplug_handler_unplug(hotplug_ctrl, DEVICE(pbdev), &error_abort); 176 object_unparent(OBJECT(pbdev)); 177 } 178 179 void s390_pci_sclp_deconfigure(SCCB *sccb) 180 { 181 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; 182 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), 183 be32_to_cpu(psccb->aid)); 184 uint16_t rc; 185 186 if (!pbdev) { 187 trace_s390_pci_sclp_nodev("deconfigure", be32_to_cpu(psccb->aid)); 188 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; 189 goto out; 190 } 191 192 switch (pbdev->state) { 193 case ZPCI_FS_RESERVED: 194 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; 195 break; 196 case ZPCI_FS_STANDBY: 197 rc = SCLP_RC_NO_ACTION_REQUIRED; 198 break; 199 default: 200 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 201 /* Interpreted devices were using interrupt forwarding */ 202 s390_pci_kvm_aif_disable(pbdev); 203 } else if (pbdev->summary_ind) { 204 pci_dereg_irqs(pbdev); 205 } 206 if (pbdev->iommu->enabled) { 207 pci_dereg_ioat(pbdev->iommu); 208 } 209 pbdev->state = ZPCI_FS_STANDBY; 210 rc = SCLP_RC_NORMAL_COMPLETION; 211 212 if (pbdev->unplug_requested) { 213 s390_pci_perform_unplug(pbdev); 214 } 215 } 216 out: 217 psccb->header.response_code = cpu_to_be16(rc); 218 } 219 220 static S390PCIBusDevice *s390_pci_find_dev_by_uid(S390pciState *s, uint16_t uid) 221 { 222 S390PCIBusDevice *pbdev; 223 224 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 225 if (pbdev->uid == uid) { 226 return pbdev; 227 } 228 } 229 230 return NULL; 231 } 232 233 S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s, 234 const char *target) 235 { 236 S390PCIBusDevice *pbdev; 237 238 if (!target) { 239 return NULL; 240 } 241 242 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 243 if (!strcmp(pbdev->target, target)) { 244 return pbdev; 245 } 246 } 247 248 return NULL; 249 } 250 251 static S390PCIBusDevice *s390_pci_find_dev_by_pci(S390pciState *s, 252 PCIDevice *pci_dev) 253 { 254 S390PCIBusDevice *pbdev; 255 256 if (!pci_dev) { 257 return NULL; 258 } 259 260 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 261 if (pbdev->pdev == pci_dev) { 262 return pbdev; 263 } 264 } 265 266 return NULL; 267 } 268 269 S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx) 270 { 271 return g_hash_table_lookup(s->zpci_table, &idx); 272 } 273 274 S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh) 275 { 276 uint32_t idx = FH_MASK_INDEX & fh; 277 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_idx(s, idx); 278 279 if (pbdev && pbdev->fh == fh) { 280 return pbdev; 281 } 282 283 return NULL; 284 } 285 286 static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh, 287 uint32_t fid, uint64_t faddr, uint32_t e) 288 { 289 SeiContainer *sei_cont; 290 S390pciState *s = s390_get_phb(); 291 292 sei_cont = g_new0(SeiContainer, 1); 293 sei_cont->fh = fh; 294 sei_cont->fid = fid; 295 sei_cont->cc = cc; 296 sei_cont->pec = pec; 297 sei_cont->faddr = faddr; 298 sei_cont->e = e; 299 300 QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link); 301 css_generate_css_crws(0); 302 } 303 304 static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh, 305 uint32_t fid) 306 { 307 s390_pci_generate_event(2, pec, fh, fid, 0, 0); 308 } 309 310 void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid, 311 uint64_t faddr, uint32_t e) 312 { 313 s390_pci_generate_event(1, pec, fh, fid, faddr, e); 314 } 315 316 static void s390_pci_set_irq(void *opaque, int irq, int level) 317 { 318 /* nothing to do */ 319 } 320 321 static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num) 322 { 323 /* nothing to do */ 324 return 0; 325 } 326 327 static uint64_t s390_pci_get_table_origin(uint64_t iota) 328 { 329 return iota & ~ZPCI_IOTA_RTTO_FLAG; 330 } 331 332 static unsigned int calc_rtx(dma_addr_t ptr) 333 { 334 return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; 335 } 336 337 static unsigned int calc_sx(dma_addr_t ptr) 338 { 339 return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK; 340 } 341 342 static unsigned int calc_px(dma_addr_t ptr) 343 { 344 return ((unsigned long) ptr >> TARGET_PAGE_BITS) & ZPCI_PT_MASK; 345 } 346 347 static uint64_t get_rt_sto(uint64_t entry) 348 { 349 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) 350 ? (entry & ZPCI_RTE_ADDR_MASK) 351 : 0; 352 } 353 354 static uint64_t get_st_pto(uint64_t entry) 355 { 356 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) 357 ? (entry & ZPCI_STE_ADDR_MASK) 358 : 0; 359 } 360 361 static bool rt_entry_isvalid(uint64_t entry) 362 { 363 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; 364 } 365 366 static bool pt_entry_isvalid(uint64_t entry) 367 { 368 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; 369 } 370 371 static bool entry_isprotected(uint64_t entry) 372 { 373 return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED; 374 } 375 376 /* ett is expected table type, -1 page table, 0 segment table, 1 region table */ 377 static uint64_t get_table_index(uint64_t iova, int8_t ett) 378 { 379 switch (ett) { 380 case ZPCI_ETT_PT: 381 return calc_px(iova); 382 case ZPCI_ETT_ST: 383 return calc_sx(iova); 384 case ZPCI_ETT_RT: 385 return calc_rtx(iova); 386 } 387 388 return -1; 389 } 390 391 static bool entry_isvalid(uint64_t entry, int8_t ett) 392 { 393 switch (ett) { 394 case ZPCI_ETT_PT: 395 return pt_entry_isvalid(entry); 396 case ZPCI_ETT_ST: 397 case ZPCI_ETT_RT: 398 return rt_entry_isvalid(entry); 399 } 400 401 return false; 402 } 403 404 /* Return true if address translation is done */ 405 static bool translate_iscomplete(uint64_t entry, int8_t ett) 406 { 407 switch (ett) { 408 case 0: 409 return (entry & ZPCI_TABLE_FC) ? true : false; 410 case 1: 411 return false; 412 } 413 414 return true; 415 } 416 417 static uint64_t get_frame_size(int8_t ett) 418 { 419 switch (ett) { 420 case ZPCI_ETT_PT: 421 return 1ULL << 12; 422 case ZPCI_ETT_ST: 423 return 1ULL << 20; 424 case ZPCI_ETT_RT: 425 return 1ULL << 31; 426 } 427 428 return 0; 429 } 430 431 static uint64_t get_next_table_origin(uint64_t entry, int8_t ett) 432 { 433 switch (ett) { 434 case ZPCI_ETT_PT: 435 return entry & ZPCI_PTE_ADDR_MASK; 436 case ZPCI_ETT_ST: 437 return get_st_pto(entry); 438 case ZPCI_ETT_RT: 439 return get_rt_sto(entry); 440 } 441 442 return 0; 443 } 444 445 /** 446 * table_translate: do translation within one table and return the following 447 * table origin 448 * 449 * @entry: the entry being translated, the result is stored in this. 450 * @to: the address of table origin. 451 * @ett: expected table type, 1 region table, 0 segment table and -1 page table. 452 * @error: error code 453 */ 454 static uint64_t table_translate(S390IOTLBEntry *entry, uint64_t to, int8_t ett, 455 uint16_t *error) 456 { 457 uint64_t tx, te, nto = 0; 458 uint16_t err = 0; 459 460 tx = get_table_index(entry->iova, ett); 461 te = address_space_ldq(&address_space_memory, to + tx * sizeof(uint64_t), 462 MEMTXATTRS_UNSPECIFIED, NULL); 463 464 if (!te) { 465 err = ERR_EVENT_INVALTE; 466 goto out; 467 } 468 469 if (!entry_isvalid(te, ett)) { 470 entry->perm &= IOMMU_NONE; 471 goto out; 472 } 473 474 if (ett == ZPCI_ETT_RT && ((te & ZPCI_TABLE_LEN_RTX) != ZPCI_TABLE_LEN_RTX 475 || te & ZPCI_TABLE_OFFSET_MASK)) { 476 err = ERR_EVENT_INVALTL; 477 goto out; 478 } 479 480 nto = get_next_table_origin(te, ett); 481 if (!nto) { 482 err = ERR_EVENT_TT; 483 goto out; 484 } 485 486 if (entry_isprotected(te)) { 487 entry->perm &= IOMMU_RO; 488 } else { 489 entry->perm &= IOMMU_RW; 490 } 491 492 if (translate_iscomplete(te, ett)) { 493 switch (ett) { 494 case ZPCI_ETT_PT: 495 entry->translated_addr = te & ZPCI_PTE_ADDR_MASK; 496 break; 497 case ZPCI_ETT_ST: 498 entry->translated_addr = (te & ZPCI_SFAA_MASK) | 499 (entry->iova & ~ZPCI_SFAA_MASK); 500 break; 501 } 502 nto = 0; 503 } 504 out: 505 if (err) { 506 entry->perm = IOMMU_NONE; 507 *error = err; 508 } 509 entry->len = get_frame_size(ett); 510 return nto; 511 } 512 513 uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr, 514 S390IOTLBEntry *entry) 515 { 516 uint64_t to = s390_pci_get_table_origin(g_iota); 517 int8_t ett = 1; 518 uint16_t error = 0; 519 520 entry->iova = addr & TARGET_PAGE_MASK; 521 entry->translated_addr = 0; 522 entry->perm = IOMMU_RW; 523 524 if (entry_isprotected(g_iota)) { 525 entry->perm &= IOMMU_RO; 526 } 527 528 while (to) { 529 to = table_translate(entry, to, ett--, &error); 530 } 531 532 return error; 533 } 534 535 static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr, 536 IOMMUAccessFlags flag, int iommu_idx) 537 { 538 S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr); 539 S390IOTLBEntry *entry; 540 uint64_t iova = addr & TARGET_PAGE_MASK; 541 uint16_t error = 0; 542 IOMMUTLBEntry ret = { 543 .target_as = &address_space_memory, 544 .iova = 0, 545 .translated_addr = 0, 546 .addr_mask = ~(hwaddr)0, 547 .perm = IOMMU_NONE, 548 }; 549 550 switch (iommu->pbdev->state) { 551 case ZPCI_FS_ENABLED: 552 case ZPCI_FS_BLOCKED: 553 if (!iommu->enabled) { 554 return ret; 555 } 556 break; 557 default: 558 return ret; 559 } 560 561 trace_s390_pci_iommu_xlate(addr); 562 563 if (addr < iommu->pba || addr > iommu->pal) { 564 error = ERR_EVENT_OORANGE; 565 goto err; 566 } 567 568 entry = g_hash_table_lookup(iommu->iotlb, &iova); 569 if (entry) { 570 ret.iova = entry->iova; 571 ret.translated_addr = entry->translated_addr; 572 ret.addr_mask = entry->len - 1; 573 ret.perm = entry->perm; 574 } else { 575 ret.iova = iova; 576 ret.addr_mask = ~TARGET_PAGE_MASK; 577 ret.perm = IOMMU_NONE; 578 } 579 580 if (flag != IOMMU_NONE && !(flag & ret.perm)) { 581 error = ERR_EVENT_TPROTE; 582 } 583 err: 584 if (error) { 585 iommu->pbdev->state = ZPCI_FS_ERROR; 586 s390_pci_generate_error_event(error, iommu->pbdev->fh, 587 iommu->pbdev->fid, addr, 0); 588 } 589 return ret; 590 } 591 592 static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu, 593 IOMMUNotifier *notifier) 594 { 595 /* It's impossible to plug a pci device on s390x that already has iommu 596 * mappings which need to be replayed, that is due to the "one iommu per 597 * zpci device" construct. But when we support migration of vfio-pci 598 * devices in future, we need to revisit this. 599 */ 600 } 601 602 static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus, 603 int devfn) 604 { 605 uint64_t key = (uintptr_t)bus; 606 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); 607 S390PCIIOMMU *iommu; 608 609 if (!table) { 610 table = g_new0(S390PCIIOMMUTable, 1); 611 table->key = key; 612 g_hash_table_insert(s->iommu_table, &table->key, table); 613 } 614 615 iommu = table->iommu[PCI_SLOT(devfn)]; 616 if (!iommu) { 617 iommu = S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU)); 618 619 char *mr_name = g_strdup_printf("iommu-root-%02x:%02x.%01x", 620 pci_bus_num(bus), 621 PCI_SLOT(devfn), 622 PCI_FUNC(devfn)); 623 char *as_name = g_strdup_printf("iommu-pci-%02x:%02x.%01x", 624 pci_bus_num(bus), 625 PCI_SLOT(devfn), 626 PCI_FUNC(devfn)); 627 memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX); 628 address_space_init(&iommu->as, &iommu->mr, as_name); 629 iommu->iotlb = g_hash_table_new_full(g_int64_hash, g_int64_equal, 630 NULL, g_free); 631 table->iommu[PCI_SLOT(devfn)] = iommu; 632 633 g_free(mr_name); 634 g_free(as_name); 635 } 636 637 return iommu; 638 } 639 640 static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) 641 { 642 S390pciState *s = opaque; 643 S390PCIIOMMU *iommu = s390_pci_get_iommu(s, bus, devfn); 644 645 return &iommu->as; 646 } 647 648 static const PCIIOMMUOps s390_iommu_ops = { 649 .get_address_space = s390_pci_dma_iommu, 650 }; 651 652 static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) 653 { 654 uint8_t expected, actual; 655 hwaddr len = 1; 656 /* avoid multiple fetches */ 657 uint8_t volatile *ind_addr; 658 659 ind_addr = cpu_physical_memory_map(ind_loc, &len, true); 660 if (!ind_addr) { 661 s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0); 662 return -1; 663 } 664 actual = *ind_addr; 665 do { 666 expected = actual; 667 actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); 668 } while (actual != expected); 669 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); 670 671 return actual; 672 } 673 674 static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data, 675 unsigned int size) 676 { 677 S390PCIBusDevice *pbdev = opaque; 678 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 679 uint64_t ind_bit; 680 uint32_t sum_bit; 681 682 assert(pbdev); 683 684 trace_s390_pci_msi_ctrl_write(data, pbdev->idx, vec); 685 686 if (pbdev->state != ZPCI_FS_ENABLED) { 687 return; 688 } 689 690 ind_bit = pbdev->routes.adapter.ind_offset; 691 sum_bit = pbdev->routes.adapter.summary_offset; 692 693 set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8, 694 0x80 >> ((ind_bit + vec) % 8)); 695 if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8, 696 0x80 >> (sum_bit % 8))) { 697 css_adapter_interrupt(CSS_IO_ADAPTER_PCI, pbdev->isc); 698 } 699 } 700 701 static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size) 702 { 703 return 0xffffffff; 704 } 705 706 static const MemoryRegionOps s390_msi_ctrl_ops = { 707 .write = s390_msi_ctrl_write, 708 .read = s390_msi_ctrl_read, 709 .endianness = DEVICE_LITTLE_ENDIAN, 710 }; 711 712 void s390_pci_iommu_enable(S390PCIIOMMU *iommu) 713 { 714 /* 715 * The iommu region is initialized against a 0-mapped address space, 716 * so the smallest IOMMU region we can define runs from 0 to the end 717 * of the PCI address space. 718 */ 719 char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid); 720 memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr), 721 TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr), 722 name, iommu->pal + 1); 723 iommu->enabled = true; 724 memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr)); 725 g_free(name); 726 } 727 728 void s390_pci_iommu_direct_map_enable(S390PCIIOMMU *iommu) 729 { 730 MachineState *ms = MACHINE(qdev_get_machine()); 731 S390CcwMachineState *s390ms = S390_CCW_MACHINE(ms); 732 733 /* 734 * For direct-mapping we must map the entire guest address space. Rather 735 * than using an iommu, create a memory region alias that maps GPA X to 736 * IOVA X + SDMA. VFIO will handle pinning via its memory listener. 737 */ 738 g_autofree char *name = g_strdup_printf("iommu-dm-s390-%04x", 739 iommu->pbdev->uid); 740 741 iommu->dm_mr = g_malloc0(sizeof(*iommu->dm_mr)); 742 memory_region_init_alias(iommu->dm_mr, OBJECT(&iommu->mr), name, 743 get_system_memory(), 0, 744 s390_get_memory_limit(s390ms)); 745 iommu->enabled = true; 746 memory_region_add_subregion(&iommu->mr, iommu->pbdev->zpci_fn.sdma, 747 iommu->dm_mr); 748 } 749 750 void s390_pci_iommu_disable(S390PCIIOMMU *iommu) 751 { 752 iommu->enabled = false; 753 g_hash_table_remove_all(iommu->iotlb); 754 if (iommu->dm_mr) { 755 memory_region_del_subregion(&iommu->mr, iommu->dm_mr); 756 object_unparent(OBJECT(iommu->dm_mr)); 757 g_free(iommu->dm_mr); 758 iommu->dm_mr = NULL; 759 } else { 760 memory_region_del_subregion(&iommu->mr, 761 MEMORY_REGION(&iommu->iommu_mr)); 762 object_unparent(OBJECT(&iommu->iommu_mr)); 763 } 764 } 765 766 static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn) 767 { 768 uint64_t key = (uintptr_t)bus; 769 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); 770 S390PCIIOMMU *iommu = table ? table->iommu[PCI_SLOT(devfn)] : NULL; 771 772 if (!table || !iommu) { 773 return; 774 } 775 776 table->iommu[PCI_SLOT(devfn)] = NULL; 777 g_hash_table_destroy(iommu->iotlb); 778 /* 779 * An attached PCI device may have memory listeners, eg. VFIO PCI. 780 * The associated subregion will already have been unmapped in 781 * s390_pci_iommu_disable in response to the guest deconfigure request. 782 * Remove the listeners now before destroying the address space. 783 */ 784 address_space_remove_listeners(&iommu->as); 785 address_space_destroy(&iommu->as); 786 object_unparent(OBJECT(&iommu->mr)); 787 object_unparent(OBJECT(iommu)); 788 object_unref(OBJECT(iommu)); 789 } 790 791 S390PCIGroup *s390_group_create(int id, int host_id) 792 { 793 S390PCIGroup *group; 794 S390pciState *s = s390_get_phb(); 795 796 group = g_new0(S390PCIGroup, 1); 797 group->id = id; 798 group->host_id = host_id; 799 QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link); 800 return group; 801 } 802 803 S390PCIGroup *s390_group_find(int id) 804 { 805 S390PCIGroup *group; 806 S390pciState *s = s390_get_phb(); 807 808 QTAILQ_FOREACH(group, &s->zpci_groups, link) { 809 if (group->id == id) { 810 return group; 811 } 812 } 813 return NULL; 814 } 815 816 S390PCIGroup *s390_group_find_host_sim(int host_id) 817 { 818 S390PCIGroup *group; 819 S390pciState *s = s390_get_phb(); 820 821 QTAILQ_FOREACH(group, &s->zpci_groups, link) { 822 if (group->id >= ZPCI_SIM_GRP_START && group->host_id == host_id) { 823 return group; 824 } 825 } 826 return NULL; 827 } 828 829 static void s390_pci_init_default_group(void) 830 { 831 S390PCIGroup *group; 832 ClpRspQueryPciGrp *resgrp; 833 834 group = s390_group_create(ZPCI_DEFAULT_FN_GRP, ZPCI_DEFAULT_FN_GRP); 835 resgrp = &group->zpci_group; 836 resgrp->fr = 1; 837 resgrp->dasm = 0; 838 resgrp->msia = ZPCI_MSI_ADDR; 839 resgrp->mui = DEFAULT_MUI; 840 resgrp->i = 128; 841 resgrp->maxstbl = 128; 842 resgrp->version = 0; 843 resgrp->dtsm = ZPCI_DTSM; 844 } 845 846 static void set_pbdev_info(S390PCIBusDevice *pbdev) 847 { 848 pbdev->zpci_fn.sdma = ZPCI_SDMA_ADDR; 849 pbdev->zpci_fn.edma = ZPCI_EDMA_ADDR; 850 pbdev->zpci_fn.pchid = 0; 851 pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP; 852 pbdev->zpci_fn.fid = pbdev->fid; 853 pbdev->zpci_fn.uid = pbdev->uid; 854 pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP); 855 } 856 857 static void s390_pcihost_realize(DeviceState *dev, Error **errp) 858 { 859 PCIBus *b; 860 BusState *bus; 861 PCIHostState *phb = PCI_HOST_BRIDGE(dev); 862 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 863 864 trace_s390_pcihost("realize"); 865 866 b = pci_register_root_bus(dev, NULL, s390_pci_set_irq, s390_pci_map_irq, 867 NULL, get_system_memory(), get_system_io(), 0, 868 64, TYPE_PCI_BUS); 869 pci_setup_iommu(b, &s390_iommu_ops, s); 870 871 bus = BUS(b); 872 qbus_set_hotplug_handler(bus, OBJECT(dev)); 873 phb->bus = b; 874 875 s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL)); 876 qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev)); 877 878 s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal, 879 NULL, g_free); 880 s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL); 881 s->bus_no = 0; 882 s->next_sim_grp = ZPCI_SIM_GRP_START; 883 QTAILQ_INIT(&s->pending_sei); 884 QTAILQ_INIT(&s->zpci_devs); 885 QTAILQ_INIT(&s->zpci_dma_limit); 886 QTAILQ_INIT(&s->zpci_groups); 887 888 s390_pci_init_default_group(); 889 css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false, 890 S390_ADAPTER_SUPPRESSIBLE, errp); 891 } 892 893 static void s390_pcihost_unrealize(DeviceState *dev) 894 { 895 S390PCIGroup *group; 896 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 897 898 while (!QTAILQ_EMPTY(&s->zpci_groups)) { 899 group = QTAILQ_FIRST(&s->zpci_groups); 900 QTAILQ_REMOVE(&s->zpci_groups, group, link); 901 } 902 } 903 904 static int s390_pci_msix_init(S390PCIBusDevice *pbdev) 905 { 906 char *name; 907 uint8_t pos; 908 uint16_t ctrl; 909 uint32_t table, pba; 910 911 pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX); 912 if (!pos) { 913 return -1; 914 } 915 916 ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS, 917 pci_config_size(pbdev->pdev), sizeof(ctrl)); 918 table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE, 919 pci_config_size(pbdev->pdev), sizeof(table)); 920 pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA, 921 pci_config_size(pbdev->pdev), sizeof(pba)); 922 923 pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK; 924 pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; 925 pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; 926 pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; 927 pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 928 929 name = g_strdup_printf("msix-s390-%04x", pbdev->uid); 930 memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev), 931 &s390_msi_ctrl_ops, pbdev, name, TARGET_PAGE_SIZE); 932 memory_region_add_subregion(&pbdev->iommu->mr, 933 pbdev->pci_group->zpci_group.msia, 934 &pbdev->msix_notify_mr); 935 g_free(name); 936 937 return 0; 938 } 939 940 static void s390_pci_msix_free(S390PCIBusDevice *pbdev) 941 { 942 if (pbdev->msix.entries == 0) { 943 return; 944 } 945 946 memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr); 947 object_unparent(OBJECT(&pbdev->msix_notify_mr)); 948 } 949 950 static S390PCIBusDevice *s390_pci_device_new(S390pciState *s, 951 const char *target, Error **errp) 952 { 953 Error *local_err = NULL; 954 DeviceState *dev; 955 956 dev = qdev_try_new(TYPE_S390_PCI_DEVICE); 957 if (!dev) { 958 error_setg(errp, "zPCI device could not be created"); 959 return NULL; 960 } 961 962 if (!object_property_set_str(OBJECT(dev), "target", target, &local_err)) { 963 object_unparent(OBJECT(dev)); 964 error_propagate_prepend(errp, local_err, 965 "zPCI device could not be created: "); 966 return NULL; 967 } 968 if (!qdev_realize_and_unref(dev, BUS(s->bus), &local_err)) { 969 object_unparent(OBJECT(dev)); 970 error_propagate_prepend(errp, local_err, 971 "zPCI device could not be created: "); 972 return NULL; 973 } 974 975 return S390_PCI_DEVICE(dev); 976 } 977 978 static bool s390_pci_alloc_idx(S390pciState *s, S390PCIBusDevice *pbdev) 979 { 980 uint32_t idx; 981 982 idx = s->next_idx; 983 while (s390_pci_find_dev_by_idx(s, idx)) { 984 idx = (idx + 1) & FH_MASK_INDEX; 985 if (idx == s->next_idx) { 986 return false; 987 } 988 } 989 990 pbdev->idx = idx; 991 return true; 992 } 993 994 static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 995 Error **errp) 996 { 997 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 998 999 if (!s390_has_feat(S390_FEAT_ZPCI)) { 1000 warn_report("Plugging a PCI/zPCI device without the 'zpci' CPU " 1001 "feature enabled; the guest will not be able to see/use " 1002 "this device"); 1003 } 1004 1005 if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1006 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); 1007 1008 if (!s390_pci_alloc_idx(s, pbdev)) { 1009 error_setg(errp, "no slot for plugging zpci device"); 1010 return; 1011 } 1012 } 1013 } 1014 1015 static void s390_pci_update_subordinate(PCIDevice *dev, uint32_t nr) 1016 { 1017 uint32_t old_nr; 1018 1019 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); 1020 while (!pci_bus_is_root(pci_get_bus(dev))) { 1021 dev = pci_get_bus(dev)->parent_dev; 1022 1023 old_nr = pci_default_read_config(dev, PCI_SUBORDINATE_BUS, 1); 1024 if (old_nr < nr) { 1025 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); 1026 } 1027 } 1028 } 1029 1030 static int s390_pci_interp_plug(S390pciState *s, S390PCIBusDevice *pbdev) 1031 { 1032 uint32_t idx, fh; 1033 1034 if (!s390_pci_get_host_fh(pbdev, &fh)) { 1035 return -EPERM; 1036 } 1037 1038 /* 1039 * The host device is already in an enabled state, but we always present 1040 * the initial device state to the guest as disabled (ZPCI_FS_DISABLED). 1041 * Therefore, mask off the enable bit from the passthrough handle until 1042 * the guest issues a CLP SET PCI FN later to enable the device. 1043 */ 1044 pbdev->fh = fh & ~FH_MASK_ENABLE; 1045 1046 /* Next, see if the idx is already in-use */ 1047 idx = pbdev->fh & FH_MASK_INDEX; 1048 if (pbdev->idx != idx) { 1049 if (s390_pci_find_dev_by_idx(s, idx)) { 1050 return -EINVAL; 1051 } 1052 /* 1053 * Update the idx entry with the passed through idx 1054 * If the relinquished idx is lower than next_idx, use it 1055 * to replace next_idx 1056 */ 1057 g_hash_table_remove(s->zpci_table, &pbdev->idx); 1058 if (idx < s->next_idx) { 1059 s->next_idx = idx; 1060 } 1061 pbdev->idx = idx; 1062 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); 1063 } 1064 1065 return 0; 1066 } 1067 1068 static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 1069 Error **errp) 1070 { 1071 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1072 PCIDevice *pdev = NULL; 1073 S390PCIBusDevice *pbdev = NULL; 1074 int rc; 1075 1076 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { 1077 PCIBridge *pb = PCI_BRIDGE(dev); 1078 1079 pdev = PCI_DEVICE(dev); 1080 pci_bridge_map_irq(pb, dev->id, s390_pci_map_irq); 1081 pci_setup_iommu(&pb->sec_bus, &s390_iommu_ops, s); 1082 1083 qbus_set_hotplug_handler(BUS(&pb->sec_bus), OBJECT(s)); 1084 1085 if (dev->hotplugged) { 1086 pci_default_write_config(pdev, PCI_PRIMARY_BUS, 1087 pci_dev_bus_num(pdev), 1); 1088 s->bus_no += 1; 1089 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); 1090 1091 s390_pci_update_subordinate(pdev, s->bus_no); 1092 } 1093 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1094 pdev = PCI_DEVICE(dev); 1095 1096 /* 1097 * Multifunction is not supported due to the lack of CLP. However, 1098 * do not check for multifunction capability for SR-IOV devices because 1099 * SR-IOV devices automatically add the multifunction capability whether 1100 * the user intends to use the functions other than the PF. 1101 */ 1102 if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION && 1103 !pdev->exp.sriov_cap) { 1104 error_setg(errp, "multifunction not supported in s390"); 1105 return; 1106 } 1107 1108 if (!dev->id) { 1109 /* In the case the PCI device does not define an id */ 1110 /* we generate one based on the PCI address */ 1111 dev->id = g_strdup_printf("auto_%02x:%02x.%01x", 1112 pci_dev_bus_num(pdev), 1113 PCI_SLOT(pdev->devfn), 1114 PCI_FUNC(pdev->devfn)); 1115 } 1116 1117 pbdev = s390_pci_find_dev_by_target(s, dev->id); 1118 if (!pbdev) { 1119 /* 1120 * VFs are automatically created by PF, and creating zpci for them 1121 * will result in unexpected usage of fids. Currently QEMU does not 1122 * support multifunction for s390x so we don't need zpci for VFs 1123 * anyway. 1124 */ 1125 if (pci_is_vf(pdev)) { 1126 return; 1127 } 1128 1129 pbdev = s390_pci_device_new(s, dev->id, errp); 1130 if (!pbdev) { 1131 return; 1132 } 1133 } 1134 1135 pbdev->pdev = pdev; 1136 pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn); 1137 pbdev->iommu->pbdev = pbdev; 1138 pbdev->state = ZPCI_FS_DISABLED; 1139 set_pbdev_info(pbdev); 1140 1141 if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) { 1142 /* 1143 * By default, interpretation is always requested; if the available 1144 * facilities indicate it is not available, fallback to the 1145 * interception model. 1146 */ 1147 if (pbdev->interp) { 1148 if (s390_pci_kvm_interp_allowed()) { 1149 rc = s390_pci_interp_plug(s, pbdev); 1150 if (rc) { 1151 error_setg(errp, "Plug failed for zPCI device in " 1152 "interpretation mode: %d", rc); 1153 return; 1154 } 1155 } else { 1156 trace_s390_pcihost("zPCI interpretation missing"); 1157 pbdev->interp = false; 1158 pbdev->forwarding_assist = false; 1159 } 1160 } 1161 pbdev->iommu->dma_limit = s390_pci_start_dma_count(s, pbdev); 1162 /* Fill in CLP information passed via the vfio region */ 1163 s390_pci_get_clp_info(pbdev); 1164 if (!pbdev->interp) { 1165 /* Do vfio passthrough but intercept for I/O */ 1166 pbdev->fh |= FH_SHM_VFIO; 1167 pbdev->forwarding_assist = false; 1168 } 1169 /* Register shutdown notifier and reset callback for ISM devices */ 1170 if (pbdev->pft == ZPCI_PFT_ISM) { 1171 pbdev->shutdown_notifier.notify = s390_pci_shutdown_notifier; 1172 qemu_register_shutdown_notifier(&pbdev->shutdown_notifier); 1173 } 1174 } else { 1175 pbdev->fh |= FH_SHM_EMUL; 1176 /* Always intercept emulated devices */ 1177 pbdev->interp = false; 1178 pbdev->forwarding_assist = false; 1179 pbdev->rtr_avail = false; 1180 } 1181 1182 if (s390_pci_msix_init(pbdev) && !pbdev->interp) { 1183 error_setg(errp, "MSI-X support is mandatory " 1184 "in the S390 architecture"); 1185 return; 1186 } 1187 1188 if (dev->hotplugged) { 1189 s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED , 1190 pbdev->fh, pbdev->fid); 1191 } 1192 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1193 pbdev = S390_PCI_DEVICE(dev); 1194 1195 /* the allocated idx is actually getting used */ 1196 s->next_idx = (pbdev->idx + 1) & FH_MASK_INDEX; 1197 pbdev->fh = pbdev->idx; 1198 QTAILQ_INSERT_TAIL(&s->zpci_devs, pbdev, link); 1199 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); 1200 } else { 1201 g_assert_not_reached(); 1202 } 1203 } 1204 1205 static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, 1206 Error **errp) 1207 { 1208 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1209 S390PCIBusDevice *pbdev = NULL; 1210 1211 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1212 PCIDevice *pci_dev = PCI_DEVICE(dev); 1213 PCIBus *bus; 1214 int32_t devfn; 1215 1216 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); 1217 if (!pbdev) { 1218 g_assert(pci_is_vf(pci_dev)); 1219 return; 1220 } 1221 1222 s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED, 1223 pbdev->fh, pbdev->fid); 1224 bus = pci_get_bus(pci_dev); 1225 devfn = pci_dev->devfn; 1226 qdev_unrealize(dev); 1227 1228 s390_pci_msix_free(pbdev); 1229 s390_pci_iommu_free(s, bus, devfn); 1230 pbdev->pdev = NULL; 1231 pbdev->state = ZPCI_FS_RESERVED; 1232 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1233 pbdev = S390_PCI_DEVICE(dev); 1234 pbdev->fid = 0; 1235 QTAILQ_REMOVE(&s->zpci_devs, pbdev, link); 1236 g_hash_table_remove(s->zpci_table, &pbdev->idx); 1237 if (pbdev->iommu->dma_limit) { 1238 s390_pci_end_dma_count(s, pbdev->iommu->dma_limit); 1239 } 1240 qdev_unrealize(dev); 1241 } 1242 } 1243 1244 static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev, 1245 DeviceState *dev, 1246 Error **errp) 1247 { 1248 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1249 S390PCIBusDevice *pbdev; 1250 1251 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { 1252 error_setg(errp, "PCI bridge hot unplug currently not supported"); 1253 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1254 /* 1255 * Redirect the unplug request to the zPCI device and remember that 1256 * we've checked the PCI device already (to prevent endless recursion). 1257 */ 1258 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); 1259 if (!pbdev) { 1260 g_assert(pci_is_vf(PCI_DEVICE(dev))); 1261 return; 1262 } 1263 1264 pbdev->pci_unplug_request_processed = true; 1265 qdev_unplug(DEVICE(pbdev), errp); 1266 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1267 pbdev = S390_PCI_DEVICE(dev); 1268 1269 /* 1270 * If unplug was initially requested for the zPCI device, we 1271 * first have to redirect to the PCI device, which will in return 1272 * redirect back to us after performing its checks (if the request 1273 * is not blocked, e.g. because it's a PCI bridge). 1274 */ 1275 if (pbdev->pdev && !pbdev->pci_unplug_request_processed) { 1276 qdev_unplug(DEVICE(pbdev->pdev), errp); 1277 return; 1278 } 1279 pbdev->pci_unplug_request_processed = false; 1280 1281 switch (pbdev->state) { 1282 case ZPCI_FS_STANDBY: 1283 case ZPCI_FS_RESERVED: 1284 s390_pci_perform_unplug(pbdev); 1285 break; 1286 default: 1287 /* 1288 * Allow to send multiple requests, e.g. if the guest crashed 1289 * before releasing the device, we would not be able to send 1290 * another request to the same VM (e.g. fresh OS). 1291 */ 1292 pbdev->unplug_requested = true; 1293 s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST, 1294 pbdev->fh, pbdev->fid); 1295 } 1296 } else { 1297 g_assert_not_reached(); 1298 } 1299 } 1300 1301 static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, 1302 void *opaque) 1303 { 1304 S390pciState *s = opaque; 1305 PCIBus *sec_bus = NULL; 1306 1307 if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != 1308 PCI_HEADER_TYPE_BRIDGE)) { 1309 return; 1310 } 1311 1312 (s->bus_no)++; 1313 pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1); 1314 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); 1315 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); 1316 1317 sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); 1318 if (!sec_bus) { 1319 return; 1320 } 1321 1322 /* Assign numbers to all child bridges. The last is the highest number. */ 1323 pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s); 1324 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); 1325 } 1326 1327 void s390_pci_ism_reset(void) 1328 { 1329 S390pciState *s = s390_get_phb(); 1330 1331 S390PCIBusDevice *pbdev, *next; 1332 1333 /* Trigger reset event for each passthrough ISM device currently in-use */ 1334 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) { 1335 if (pbdev->interp && pbdev->pft == ZPCI_PFT_ISM && 1336 pbdev->fh & FH_MASK_ENABLE) { 1337 s390_pci_kvm_aif_disable(pbdev); 1338 1339 pci_device_reset(pbdev->pdev); 1340 } 1341 } 1342 } 1343 1344 static void s390_pcihost_reset(DeviceState *dev) 1345 { 1346 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 1347 PCIBus *bus = s->parent_obj.bus; 1348 S390PCIBusDevice *pbdev, *next; 1349 1350 /* Process all pending unplug requests */ 1351 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) { 1352 if (pbdev->unplug_requested) { 1353 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 1354 /* Interpreted devices were using interrupt forwarding */ 1355 s390_pci_kvm_aif_disable(pbdev); 1356 } else if (pbdev->summary_ind) { 1357 pci_dereg_irqs(pbdev); 1358 } 1359 if (pbdev->iommu->enabled) { 1360 pci_dereg_ioat(pbdev->iommu); 1361 } 1362 pbdev->state = ZPCI_FS_STANDBY; 1363 s390_pci_perform_unplug(pbdev); 1364 } 1365 } 1366 1367 /* 1368 * When resetting a PCI bridge, the assigned numbers are set to 0. So 1369 * on every system reset, we also have to reassign numbers. 1370 */ 1371 s->bus_no = 0; 1372 pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s); 1373 } 1374 1375 static void s390_pcihost_class_init(ObjectClass *klass, void *data) 1376 { 1377 DeviceClass *dc = DEVICE_CLASS(klass); 1378 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 1379 1380 device_class_set_legacy_reset(dc, s390_pcihost_reset); 1381 dc->realize = s390_pcihost_realize; 1382 dc->unrealize = s390_pcihost_unrealize; 1383 hc->pre_plug = s390_pcihost_pre_plug; 1384 hc->plug = s390_pcihost_plug; 1385 hc->unplug_request = s390_pcihost_unplug_request; 1386 hc->unplug = s390_pcihost_unplug; 1387 msi_nonbroken = true; 1388 } 1389 1390 static const TypeInfo s390_pcihost_info = { 1391 .name = TYPE_S390_PCI_HOST_BRIDGE, 1392 .parent = TYPE_PCI_HOST_BRIDGE, 1393 .instance_size = sizeof(S390pciState), 1394 .class_init = s390_pcihost_class_init, 1395 .interfaces = (InterfaceInfo[]) { 1396 { TYPE_HOTPLUG_HANDLER }, 1397 { } 1398 } 1399 }; 1400 1401 static const TypeInfo s390_pcibus_info = { 1402 .name = TYPE_S390_PCI_BUS, 1403 .parent = TYPE_BUS, 1404 .instance_size = sizeof(S390PCIBus), 1405 }; 1406 1407 static uint16_t s390_pci_generate_uid(S390pciState *s) 1408 { 1409 uint16_t uid = 0; 1410 1411 do { 1412 uid++; 1413 if (!s390_pci_find_dev_by_uid(s, uid)) { 1414 return uid; 1415 } 1416 } while (uid < ZPCI_MAX_UID); 1417 1418 return UID_UNDEFINED; 1419 } 1420 1421 static uint32_t s390_pci_generate_fid(S390pciState *s, Error **errp) 1422 { 1423 uint32_t fid = 0; 1424 1425 do { 1426 if (!s390_pci_find_dev_by_fid(s, fid)) { 1427 return fid; 1428 } 1429 } while (fid++ != ZPCI_MAX_FID); 1430 1431 error_setg(errp, "no free fid could be found"); 1432 return 0; 1433 } 1434 1435 static void s390_pci_device_realize(DeviceState *dev, Error **errp) 1436 { 1437 S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev); 1438 S390pciState *s = s390_get_phb(); 1439 1440 if (!zpci->target) { 1441 error_setg(errp, "target must be defined"); 1442 return; 1443 } 1444 1445 if (s390_pci_find_dev_by_target(s, zpci->target)) { 1446 error_setg(errp, "target %s already has an associated zpci device", 1447 zpci->target); 1448 return; 1449 } 1450 1451 if (zpci->uid == UID_UNDEFINED) { 1452 zpci->uid = s390_pci_generate_uid(s); 1453 if (!zpci->uid) { 1454 error_setg(errp, "no free uid could be found"); 1455 return; 1456 } 1457 } else if (s390_pci_find_dev_by_uid(s, zpci->uid)) { 1458 error_setg(errp, "uid %u already in use", zpci->uid); 1459 return; 1460 } 1461 1462 if (!zpci->fid_defined) { 1463 Error *local_error = NULL; 1464 1465 zpci->fid = s390_pci_generate_fid(s, &local_error); 1466 if (local_error) { 1467 error_propagate(errp, local_error); 1468 return; 1469 } 1470 } else if (s390_pci_find_dev_by_fid(s, zpci->fid)) { 1471 error_setg(errp, "fid %u already in use", zpci->fid); 1472 return; 1473 } 1474 1475 zpci->state = ZPCI_FS_RESERVED; 1476 zpci->fmb.format = ZPCI_FMB_FORMAT; 1477 } 1478 1479 static void s390_pci_device_reset(DeviceState *dev) 1480 { 1481 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); 1482 1483 switch (pbdev->state) { 1484 case ZPCI_FS_RESERVED: 1485 return; 1486 case ZPCI_FS_STANDBY: 1487 break; 1488 default: 1489 pbdev->fh &= ~FH_MASK_ENABLE; 1490 pbdev->state = ZPCI_FS_DISABLED; 1491 break; 1492 } 1493 1494 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 1495 /* Interpreted devices were using interrupt forwarding */ 1496 s390_pci_kvm_aif_disable(pbdev); 1497 } else if (pbdev->summary_ind) { 1498 pci_dereg_irqs(pbdev); 1499 } 1500 if (pbdev->iommu->enabled) { 1501 pci_dereg_ioat(pbdev->iommu); 1502 } 1503 1504 fmb_timer_free(pbdev); 1505 } 1506 1507 static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name, 1508 void *opaque, Error **errp) 1509 { 1510 const Property *prop = opaque; 1511 uint32_t *ptr = object_field_prop_ptr(obj, prop); 1512 1513 visit_type_uint32(v, name, ptr, errp); 1514 } 1515 1516 static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name, 1517 void *opaque, Error **errp) 1518 { 1519 S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj); 1520 const Property *prop = opaque; 1521 uint32_t *ptr = object_field_prop_ptr(obj, prop); 1522 1523 if (!visit_type_uint32(v, name, ptr, errp)) { 1524 return; 1525 } 1526 zpci->fid_defined = true; 1527 } 1528 1529 static const PropertyInfo s390_pci_fid_propinfo = { 1530 .type = "uint32", 1531 .description = "zpci_fid", 1532 .get = s390_pci_get_fid, 1533 .set = s390_pci_set_fid, 1534 }; 1535 1536 #define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \ 1537 DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t) 1538 1539 static const Property s390_pci_device_properties[] = { 1540 DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED), 1541 DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid), 1542 DEFINE_PROP_STRING("target", S390PCIBusDevice, target), 1543 DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true), 1544 DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist, 1545 true), 1546 DEFINE_PROP_BOOL("relaxed-translation", S390PCIBusDevice, rtr_avail, 1547 true), 1548 }; 1549 1550 static const VMStateDescription s390_pci_device_vmstate = { 1551 .name = TYPE_S390_PCI_DEVICE, 1552 /* 1553 * TODO: add state handling here, so migration works at least with 1554 * emulated pci devices on s390x 1555 */ 1556 .unmigratable = 1, 1557 }; 1558 1559 static void s390_pci_device_class_init(ObjectClass *klass, void *data) 1560 { 1561 DeviceClass *dc = DEVICE_CLASS(klass); 1562 1563 dc->desc = "zpci device"; 1564 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 1565 device_class_set_legacy_reset(dc, s390_pci_device_reset); 1566 dc->bus_type = TYPE_S390_PCI_BUS; 1567 dc->realize = s390_pci_device_realize; 1568 device_class_set_props(dc, s390_pci_device_properties); 1569 dc->vmsd = &s390_pci_device_vmstate; 1570 } 1571 1572 static const TypeInfo s390_pci_device_info = { 1573 .name = TYPE_S390_PCI_DEVICE, 1574 .parent = TYPE_DEVICE, 1575 .instance_size = sizeof(S390PCIBusDevice), 1576 .class_init = s390_pci_device_class_init, 1577 }; 1578 1579 static const TypeInfo s390_pci_iommu_info = { 1580 .name = TYPE_S390_PCI_IOMMU, 1581 .parent = TYPE_OBJECT, 1582 .instance_size = sizeof(S390PCIIOMMU), 1583 }; 1584 1585 static void s390_iommu_memory_region_class_init(ObjectClass *klass, void *data) 1586 { 1587 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 1588 1589 imrc->translate = s390_translate_iommu; 1590 imrc->replay = s390_pci_iommu_replay; 1591 } 1592 1593 static const TypeInfo s390_iommu_memory_region_info = { 1594 .parent = TYPE_IOMMU_MEMORY_REGION, 1595 .name = TYPE_S390_IOMMU_MEMORY_REGION, 1596 .class_init = s390_iommu_memory_region_class_init, 1597 }; 1598 1599 static void s390_pci_register_types(void) 1600 { 1601 type_register_static(&s390_pcihost_info); 1602 type_register_static(&s390_pcibus_info); 1603 type_register_static(&s390_pci_device_info); 1604 type_register_static(&s390_pci_iommu_info); 1605 type_register_static(&s390_iommu_memory_region_info); 1606 } 1607 1608 type_init(s390_pci_register_types) 1609