1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2012 4 * 5 * Author(s): 6 * Jan Glauber <jang@linux.vnet.ibm.com> 7 * 8 * The System z PCI code is a rewrite from a prototype by 9 * the following people (Kudoz!): 10 * Alexander Schmidt 11 * Christoph Raisch 12 * Hannes Hering 13 * Hoang-Nam Nguyen 14 * Jan-Bernd Themann 15 * Stefan Roscher 16 * Thomas Klein 17 */ 18 19 #define KMSG_COMPONENT "zpci" 20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 21 22 #include <linux/kernel.h> 23 #include <linux/slab.h> 24 #include <linux/err.h> 25 #include <linux/export.h> 26 #include <linux/delay.h> 27 #include <linux/seq_file.h> 28 #include <linux/jump_label.h> 29 #include <linux/pci.h> 30 31 #include <asm/isc.h> 32 #include <asm/airq.h> 33 #include <asm/facility.h> 34 #include <asm/pci_insn.h> 35 #include <asm/pci_clp.h> 36 #include <asm/pci_dma.h> 37 38 /* list of all detected zpci devices */ 39 static LIST_HEAD(zpci_list); 40 static DEFINE_SPINLOCK(zpci_list_lock); 41 42 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 43 static DEFINE_SPINLOCK(zpci_domain_lock); 44 45 #define ZPCI_IOMAP_ENTRIES \ 46 min(((unsigned long) ZPCI_NR_DEVICES * PCI_BAR_COUNT / 2), \ 47 ZPCI_IOMAP_MAX_ENTRIES) 48 49 static DEFINE_SPINLOCK(zpci_iomap_lock); 50 static unsigned long *zpci_iomap_bitmap; 51 struct zpci_iomap_entry *zpci_iomap_start; 52 EXPORT_SYMBOL_GPL(zpci_iomap_start); 53 54 DEFINE_STATIC_KEY_FALSE(have_mio); 55 56 static struct kmem_cache *zdev_fmb_cache; 57 58 struct zpci_dev *get_zdev_by_fid(u32 fid) 59 { 60 struct zpci_dev *tmp, *zdev = NULL; 61 62 spin_lock(&zpci_list_lock); 63 list_for_each_entry(tmp, &zpci_list, entry) { 64 if (tmp->fid == fid) { 65 zdev = tmp; 66 break; 67 } 68 } 69 spin_unlock(&zpci_list_lock); 70 return zdev; 71 } 72 73 void zpci_remove_reserved_devices(void) 74 { 75 struct zpci_dev *tmp, *zdev; 76 enum zpci_state state; 77 LIST_HEAD(remove); 78 79 spin_lock(&zpci_list_lock); 80 list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) { 81 if (zdev->state == ZPCI_FN_STATE_STANDBY && 82 !clp_get_state(zdev->fid, &state) && 83 state == ZPCI_FN_STATE_RESERVED) 84 list_move_tail(&zdev->entry, &remove); 85 } 86 spin_unlock(&zpci_list_lock); 87 88 list_for_each_entry_safe(zdev, tmp, &remove, entry) 89 zpci_remove_device(zdev); 90 } 91 92 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) 93 { 94 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; 95 } 96 97 int pci_domain_nr(struct pci_bus *bus) 98 { 99 return ((struct zpci_dev *) bus->sysdata)->domain; 100 } 101 EXPORT_SYMBOL_GPL(pci_domain_nr); 102 103 int pci_proc_domain(struct pci_bus *bus) 104 { 105 return pci_domain_nr(bus); 106 } 107 EXPORT_SYMBOL_GPL(pci_proc_domain); 108 109 /* Modify PCI: Register I/O address translation parameters */ 110 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, 111 u64 base, u64 limit, u64 iota) 112 { 113 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT); 114 struct zpci_fib fib = {0}; 115 u8 status; 116 117 WARN_ON_ONCE(iota & 0x3fff); 118 fib.pba = base; 119 fib.pal = limit; 120 fib.iota = iota | ZPCI_IOTA_RTTO_FLAG; 121 return zpci_mod_fc(req, &fib, &status) ? -EIO : 0; 122 } 123 124 /* Modify PCI: Unregister I/O address translation parameters */ 125 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) 126 { 127 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT); 128 struct zpci_fib fib = {0}; 129 u8 cc, status; 130 131 cc = zpci_mod_fc(req, &fib, &status); 132 if (cc == 3) /* Function already gone. */ 133 cc = 0; 134 return cc ? -EIO : 0; 135 } 136 137 /* Modify PCI: Set PCI function measurement parameters */ 138 int zpci_fmb_enable_device(struct zpci_dev *zdev) 139 { 140 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE); 141 struct zpci_fib fib = {0}; 142 u8 cc, status; 143 144 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length) 145 return -EINVAL; 146 147 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL); 148 if (!zdev->fmb) 149 return -ENOMEM; 150 WARN_ON((u64) zdev->fmb & 0xf); 151 152 /* reset software counters */ 153 atomic64_set(&zdev->allocated_pages, 0); 154 atomic64_set(&zdev->mapped_pages, 0); 155 atomic64_set(&zdev->unmapped_pages, 0); 156 157 fib.fmb_addr = virt_to_phys(zdev->fmb); 158 cc = zpci_mod_fc(req, &fib, &status); 159 if (cc) { 160 kmem_cache_free(zdev_fmb_cache, zdev->fmb); 161 zdev->fmb = NULL; 162 } 163 return cc ? -EIO : 0; 164 } 165 166 /* Modify PCI: Disable PCI function measurement */ 167 int zpci_fmb_disable_device(struct zpci_dev *zdev) 168 { 169 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE); 170 struct zpci_fib fib = {0}; 171 u8 cc, status; 172 173 if (!zdev->fmb) 174 return -EINVAL; 175 176 /* Function measurement is disabled if fmb address is zero */ 177 cc = zpci_mod_fc(req, &fib, &status); 178 if (cc == 3) /* Function already gone. */ 179 cc = 0; 180 181 if (!cc) { 182 kmem_cache_free(zdev_fmb_cache, zdev->fmb); 183 zdev->fmb = NULL; 184 } 185 return cc ? -EIO : 0; 186 } 187 188 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) 189 { 190 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 191 u64 data; 192 int rc; 193 194 rc = __zpci_load(&data, req, offset); 195 if (!rc) { 196 data = le64_to_cpu((__force __le64) data); 197 data >>= (8 - len) * 8; 198 *val = (u32) data; 199 } else 200 *val = 0xffffffff; 201 return rc; 202 } 203 204 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) 205 { 206 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 207 u64 data = val; 208 int rc; 209 210 data <<= (8 - len) * 8; 211 data = (__force u64) cpu_to_le64(data); 212 rc = __zpci_store(data, req, offset); 213 return rc; 214 } 215 216 resource_size_t pcibios_align_resource(void *data, const struct resource *res, 217 resource_size_t size, 218 resource_size_t align) 219 { 220 return 0; 221 } 222 223 /* combine single writes by using store-block insn */ 224 void __iowrite64_copy(void __iomem *to, const void *from, size_t count) 225 { 226 zpci_memcpy_toio(to, from, count); 227 } 228 229 void __iomem *ioremap(unsigned long ioaddr, unsigned long size) 230 { 231 struct vm_struct *area; 232 unsigned long offset; 233 234 if (!size) 235 return NULL; 236 237 if (!static_branch_unlikely(&have_mio)) 238 return (void __iomem *) ioaddr; 239 240 offset = ioaddr & ~PAGE_MASK; 241 ioaddr &= PAGE_MASK; 242 size = PAGE_ALIGN(size + offset); 243 area = get_vm_area(size, VM_IOREMAP); 244 if (!area) 245 return NULL; 246 247 if (ioremap_page_range((unsigned long) area->addr, 248 (unsigned long) area->addr + size, 249 ioaddr, PAGE_KERNEL)) { 250 vunmap(area->addr); 251 return NULL; 252 } 253 return (void __iomem *) ((unsigned long) area->addr + offset); 254 } 255 EXPORT_SYMBOL(ioremap); 256 257 void iounmap(volatile void __iomem *addr) 258 { 259 if (static_branch_likely(&have_mio)) 260 vunmap((__force void *) ((unsigned long) addr & PAGE_MASK)); 261 } 262 EXPORT_SYMBOL(iounmap); 263 264 /* Create a virtual mapping cookie for a PCI BAR */ 265 static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar, 266 unsigned long offset, unsigned long max) 267 { 268 struct zpci_dev *zdev = to_zpci(pdev); 269 int idx; 270 271 idx = zdev->bars[bar].map_idx; 272 spin_lock(&zpci_iomap_lock); 273 /* Detect overrun */ 274 WARN_ON(!++zpci_iomap_start[idx].count); 275 zpci_iomap_start[idx].fh = zdev->fh; 276 zpci_iomap_start[idx].bar = bar; 277 spin_unlock(&zpci_iomap_lock); 278 279 return (void __iomem *) ZPCI_ADDR(idx) + offset; 280 } 281 282 static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar, 283 unsigned long offset, 284 unsigned long max) 285 { 286 unsigned long barsize = pci_resource_len(pdev, bar); 287 struct zpci_dev *zdev = to_zpci(pdev); 288 void __iomem *iova; 289 290 iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize); 291 return iova ? iova + offset : iova; 292 } 293 294 void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar, 295 unsigned long offset, unsigned long max) 296 { 297 if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT) 298 return NULL; 299 300 if (static_branch_likely(&have_mio)) 301 return pci_iomap_range_mio(pdev, bar, offset, max); 302 else 303 return pci_iomap_range_fh(pdev, bar, offset, max); 304 } 305 EXPORT_SYMBOL(pci_iomap_range); 306 307 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 308 { 309 return pci_iomap_range(dev, bar, 0, maxlen); 310 } 311 EXPORT_SYMBOL(pci_iomap); 312 313 static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar, 314 unsigned long offset, unsigned long max) 315 { 316 unsigned long barsize = pci_resource_len(pdev, bar); 317 struct zpci_dev *zdev = to_zpci(pdev); 318 void __iomem *iova; 319 320 iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize); 321 return iova ? iova + offset : iova; 322 } 323 324 void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar, 325 unsigned long offset, unsigned long max) 326 { 327 if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT) 328 return NULL; 329 330 if (static_branch_likely(&have_mio)) 331 return pci_iomap_wc_range_mio(pdev, bar, offset, max); 332 else 333 return pci_iomap_range_fh(pdev, bar, offset, max); 334 } 335 EXPORT_SYMBOL(pci_iomap_wc_range); 336 337 void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) 338 { 339 return pci_iomap_wc_range(dev, bar, 0, maxlen); 340 } 341 EXPORT_SYMBOL(pci_iomap_wc); 342 343 static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr) 344 { 345 unsigned int idx = ZPCI_IDX(addr); 346 347 spin_lock(&zpci_iomap_lock); 348 /* Detect underrun */ 349 WARN_ON(!zpci_iomap_start[idx].count); 350 if (!--zpci_iomap_start[idx].count) { 351 zpci_iomap_start[idx].fh = 0; 352 zpci_iomap_start[idx].bar = 0; 353 } 354 spin_unlock(&zpci_iomap_lock); 355 } 356 357 static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr) 358 { 359 iounmap(addr); 360 } 361 362 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) 363 { 364 if (static_branch_likely(&have_mio)) 365 pci_iounmap_mio(pdev, addr); 366 else 367 pci_iounmap_fh(pdev, addr); 368 } 369 EXPORT_SYMBOL(pci_iounmap); 370 371 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 372 int size, u32 *val) 373 { 374 struct zpci_dev *zdev = get_zdev_by_bus(bus); 375 int ret; 376 377 if (!zdev || devfn != ZPCI_DEVFN) 378 ret = -ENODEV; 379 else 380 ret = zpci_cfg_load(zdev, where, val, size); 381 382 return ret; 383 } 384 385 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, 386 int size, u32 val) 387 { 388 struct zpci_dev *zdev = get_zdev_by_bus(bus); 389 int ret; 390 391 if (!zdev || devfn != ZPCI_DEVFN) 392 ret = -ENODEV; 393 else 394 ret = zpci_cfg_store(zdev, where, val, size); 395 396 return ret; 397 } 398 399 static struct pci_ops pci_root_ops = { 400 .read = pci_read, 401 .write = pci_write, 402 }; 403 404 #ifdef CONFIG_PCI_IOV 405 static struct resource iov_res = { 406 .name = "PCI IOV res", 407 .start = 0, 408 .end = -1, 409 .flags = IORESOURCE_MEM, 410 }; 411 #endif 412 413 static void zpci_map_resources(struct pci_dev *pdev) 414 { 415 struct zpci_dev *zdev = to_zpci(pdev); 416 resource_size_t len; 417 int i; 418 419 for (i = 0; i < PCI_BAR_COUNT; i++) { 420 len = pci_resource_len(pdev, i); 421 if (!len) 422 continue; 423 424 if (zpci_use_mio(zdev)) 425 pdev->resource[i].start = 426 (resource_size_t __force) zdev->bars[i].mio_wb; 427 else 428 pdev->resource[i].start = (resource_size_t __force) 429 pci_iomap_range_fh(pdev, i, 0, 0); 430 pdev->resource[i].end = pdev->resource[i].start + len - 1; 431 } 432 433 #ifdef CONFIG_PCI_IOV 434 i = PCI_IOV_RESOURCES; 435 436 for (; i < PCI_SRIOV_NUM_BARS + PCI_IOV_RESOURCES; i++) { 437 len = pci_resource_len(pdev, i); 438 if (!len) 439 continue; 440 pdev->resource[i].parent = &iov_res; 441 } 442 #endif 443 } 444 445 static void zpci_unmap_resources(struct pci_dev *pdev) 446 { 447 struct zpci_dev *zdev = to_zpci(pdev); 448 resource_size_t len; 449 int i; 450 451 if (zpci_use_mio(zdev)) 452 return; 453 454 for (i = 0; i < PCI_BAR_COUNT; i++) { 455 len = pci_resource_len(pdev, i); 456 if (!len) 457 continue; 458 pci_iounmap_fh(pdev, (void __iomem __force *) 459 pdev->resource[i].start); 460 } 461 } 462 463 static int zpci_alloc_iomap(struct zpci_dev *zdev) 464 { 465 unsigned long entry; 466 467 spin_lock(&zpci_iomap_lock); 468 entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES); 469 if (entry == ZPCI_IOMAP_ENTRIES) { 470 spin_unlock(&zpci_iomap_lock); 471 return -ENOSPC; 472 } 473 set_bit(entry, zpci_iomap_bitmap); 474 spin_unlock(&zpci_iomap_lock); 475 return entry; 476 } 477 478 static void zpci_free_iomap(struct zpci_dev *zdev, int entry) 479 { 480 spin_lock(&zpci_iomap_lock); 481 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); 482 clear_bit(entry, zpci_iomap_bitmap); 483 spin_unlock(&zpci_iomap_lock); 484 } 485 486 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start, 487 unsigned long size, unsigned long flags) 488 { 489 struct resource *r; 490 491 r = kzalloc(sizeof(*r), GFP_KERNEL); 492 if (!r) 493 return NULL; 494 495 r->start = start; 496 r->end = r->start + size - 1; 497 r->flags = flags; 498 r->name = zdev->res_name; 499 500 if (request_resource(&iomem_resource, r)) { 501 kfree(r); 502 return NULL; 503 } 504 return r; 505 } 506 507 static int zpci_setup_bus_resources(struct zpci_dev *zdev, 508 struct list_head *resources) 509 { 510 unsigned long addr, size, flags; 511 struct resource *res; 512 int i, entry; 513 514 snprintf(zdev->res_name, sizeof(zdev->res_name), 515 "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR); 516 517 for (i = 0; i < PCI_BAR_COUNT; i++) { 518 if (!zdev->bars[i].size) 519 continue; 520 entry = zpci_alloc_iomap(zdev); 521 if (entry < 0) 522 return entry; 523 zdev->bars[i].map_idx = entry; 524 525 /* only MMIO is supported */ 526 flags = IORESOURCE_MEM; 527 if (zdev->bars[i].val & 8) 528 flags |= IORESOURCE_PREFETCH; 529 if (zdev->bars[i].val & 4) 530 flags |= IORESOURCE_MEM_64; 531 532 if (zpci_use_mio(zdev)) 533 addr = (unsigned long) zdev->bars[i].mio_wb; 534 else 535 addr = ZPCI_ADDR(entry); 536 size = 1UL << zdev->bars[i].size; 537 538 res = __alloc_res(zdev, addr, size, flags); 539 if (!res) { 540 zpci_free_iomap(zdev, entry); 541 return -ENOMEM; 542 } 543 zdev->bars[i].res = res; 544 pci_add_resource(resources, res); 545 } 546 547 return 0; 548 } 549 550 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) 551 { 552 int i; 553 554 for (i = 0; i < PCI_BAR_COUNT; i++) { 555 if (!zdev->bars[i].size || !zdev->bars[i].res) 556 continue; 557 558 zpci_free_iomap(zdev, zdev->bars[i].map_idx); 559 release_resource(zdev->bars[i].res); 560 kfree(zdev->bars[i].res); 561 } 562 } 563 564 int pcibios_add_device(struct pci_dev *pdev) 565 { 566 struct resource *res; 567 int i; 568 569 if (pdev->is_physfn) 570 pdev->no_vf_scan = 1; 571 572 pdev->dev.groups = zpci_attr_groups; 573 pdev->dev.dma_ops = &s390_pci_dma_ops; 574 zpci_map_resources(pdev); 575 576 for (i = 0; i < PCI_BAR_COUNT; i++) { 577 res = &pdev->resource[i]; 578 if (res->parent || !res->flags) 579 continue; 580 pci_claim_resource(pdev, i); 581 } 582 583 return 0; 584 } 585 586 void pcibios_release_device(struct pci_dev *pdev) 587 { 588 zpci_unmap_resources(pdev); 589 } 590 591 int pcibios_enable_device(struct pci_dev *pdev, int mask) 592 { 593 struct zpci_dev *zdev = to_zpci(pdev); 594 595 zpci_debug_init_device(zdev, dev_name(&pdev->dev)); 596 zpci_fmb_enable_device(zdev); 597 598 return pci_enable_resources(pdev, mask); 599 } 600 601 void pcibios_disable_device(struct pci_dev *pdev) 602 { 603 struct zpci_dev *zdev = to_zpci(pdev); 604 605 zpci_fmb_disable_device(zdev); 606 zpci_debug_exit_device(zdev); 607 } 608 609 #ifdef CONFIG_HIBERNATE_CALLBACKS 610 static int zpci_restore(struct device *dev) 611 { 612 struct pci_dev *pdev = to_pci_dev(dev); 613 struct zpci_dev *zdev = to_zpci(pdev); 614 int ret = 0; 615 616 if (zdev->state != ZPCI_FN_STATE_ONLINE) 617 goto out; 618 619 ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES); 620 if (ret) 621 goto out; 622 623 zpci_map_resources(pdev); 624 zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, 625 (u64) zdev->dma_table); 626 627 out: 628 return ret; 629 } 630 631 static int zpci_freeze(struct device *dev) 632 { 633 struct pci_dev *pdev = to_pci_dev(dev); 634 struct zpci_dev *zdev = to_zpci(pdev); 635 636 if (zdev->state != ZPCI_FN_STATE_ONLINE) 637 return 0; 638 639 zpci_unregister_ioat(zdev, 0); 640 zpci_unmap_resources(pdev); 641 return clp_disable_fh(zdev); 642 } 643 644 struct dev_pm_ops pcibios_pm_ops = { 645 .thaw_noirq = zpci_restore, 646 .freeze_noirq = zpci_freeze, 647 .restore_noirq = zpci_restore, 648 .poweroff_noirq = zpci_freeze, 649 }; 650 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 651 652 static int zpci_alloc_domain(struct zpci_dev *zdev) 653 { 654 if (zpci_unique_uid) { 655 zdev->domain = (u16) zdev->uid; 656 if (zdev->domain >= ZPCI_NR_DEVICES) 657 return 0; 658 659 spin_lock(&zpci_domain_lock); 660 if (test_bit(zdev->domain, zpci_domain)) { 661 spin_unlock(&zpci_domain_lock); 662 return -EEXIST; 663 } 664 set_bit(zdev->domain, zpci_domain); 665 spin_unlock(&zpci_domain_lock); 666 return 0; 667 } 668 669 spin_lock(&zpci_domain_lock); 670 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); 671 if (zdev->domain == ZPCI_NR_DEVICES) { 672 spin_unlock(&zpci_domain_lock); 673 return -ENOSPC; 674 } 675 set_bit(zdev->domain, zpci_domain); 676 spin_unlock(&zpci_domain_lock); 677 return 0; 678 } 679 680 static void zpci_free_domain(struct zpci_dev *zdev) 681 { 682 if (zdev->domain >= ZPCI_NR_DEVICES) 683 return; 684 685 spin_lock(&zpci_domain_lock); 686 clear_bit(zdev->domain, zpci_domain); 687 spin_unlock(&zpci_domain_lock); 688 } 689 690 void pcibios_remove_bus(struct pci_bus *bus) 691 { 692 struct zpci_dev *zdev = get_zdev_by_bus(bus); 693 694 zpci_exit_slot(zdev); 695 zpci_cleanup_bus_resources(zdev); 696 zpci_destroy_iommu(zdev); 697 zpci_free_domain(zdev); 698 699 spin_lock(&zpci_list_lock); 700 list_del(&zdev->entry); 701 spin_unlock(&zpci_list_lock); 702 703 zpci_dbg(3, "rem fid:%x\n", zdev->fid); 704 kfree(zdev); 705 } 706 707 static int zpci_scan_bus(struct zpci_dev *zdev) 708 { 709 LIST_HEAD(resources); 710 int ret; 711 712 ret = zpci_setup_bus_resources(zdev, &resources); 713 if (ret) 714 goto error; 715 716 zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, 717 zdev, &resources); 718 if (!zdev->bus) { 719 ret = -EIO; 720 goto error; 721 } 722 zdev->bus->max_bus_speed = zdev->max_bus_speed; 723 pci_bus_add_devices(zdev->bus); 724 return 0; 725 726 error: 727 zpci_cleanup_bus_resources(zdev); 728 pci_free_resource_list(&resources); 729 return ret; 730 } 731 732 int zpci_enable_device(struct zpci_dev *zdev) 733 { 734 int rc; 735 736 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES); 737 if (rc) 738 goto out; 739 740 rc = zpci_dma_init_device(zdev); 741 if (rc) 742 goto out_dma; 743 744 zdev->state = ZPCI_FN_STATE_ONLINE; 745 return 0; 746 747 out_dma: 748 clp_disable_fh(zdev); 749 out: 750 return rc; 751 } 752 EXPORT_SYMBOL_GPL(zpci_enable_device); 753 754 int zpci_disable_device(struct zpci_dev *zdev) 755 { 756 zpci_dma_exit_device(zdev); 757 return clp_disable_fh(zdev); 758 } 759 EXPORT_SYMBOL_GPL(zpci_disable_device); 760 761 int zpci_create_device(struct zpci_dev *zdev) 762 { 763 int rc; 764 765 rc = zpci_alloc_domain(zdev); 766 if (rc) 767 goto out; 768 769 rc = zpci_init_iommu(zdev); 770 if (rc) 771 goto out_free; 772 773 mutex_init(&zdev->lock); 774 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { 775 rc = zpci_enable_device(zdev); 776 if (rc) 777 goto out_destroy_iommu; 778 } 779 rc = zpci_scan_bus(zdev); 780 if (rc) 781 goto out_disable; 782 783 spin_lock(&zpci_list_lock); 784 list_add_tail(&zdev->entry, &zpci_list); 785 spin_unlock(&zpci_list_lock); 786 787 zpci_init_slot(zdev); 788 789 return 0; 790 791 out_disable: 792 if (zdev->state == ZPCI_FN_STATE_ONLINE) 793 zpci_disable_device(zdev); 794 out_destroy_iommu: 795 zpci_destroy_iommu(zdev); 796 out_free: 797 zpci_free_domain(zdev); 798 out: 799 return rc; 800 } 801 802 void zpci_remove_device(struct zpci_dev *zdev) 803 { 804 if (!zdev->bus) 805 return; 806 807 pci_stop_root_bus(zdev->bus); 808 pci_remove_root_bus(zdev->bus); 809 } 810 811 int zpci_report_error(struct pci_dev *pdev, 812 struct zpci_report_error_header *report) 813 { 814 struct zpci_dev *zdev = to_zpci(pdev); 815 816 return sclp_pci_report(report, zdev->fh, zdev->fid); 817 } 818 EXPORT_SYMBOL(zpci_report_error); 819 820 static int zpci_mem_init(void) 821 { 822 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) || 823 __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb)); 824 825 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), 826 __alignof__(struct zpci_fmb), 0, NULL); 827 if (!zdev_fmb_cache) 828 goto error_fmb; 829 830 zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES, 831 sizeof(*zpci_iomap_start), GFP_KERNEL); 832 if (!zpci_iomap_start) 833 goto error_iomap; 834 835 zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES), 836 sizeof(*zpci_iomap_bitmap), GFP_KERNEL); 837 if (!zpci_iomap_bitmap) 838 goto error_iomap_bitmap; 839 840 return 0; 841 error_iomap_bitmap: 842 kfree(zpci_iomap_start); 843 error_iomap: 844 kmem_cache_destroy(zdev_fmb_cache); 845 error_fmb: 846 return -ENOMEM; 847 } 848 849 static void zpci_mem_exit(void) 850 { 851 kfree(zpci_iomap_bitmap); 852 kfree(zpci_iomap_start); 853 kmem_cache_destroy(zdev_fmb_cache); 854 } 855 856 static unsigned int s390_pci_probe __initdata = 1; 857 static unsigned int s390_pci_no_mio __initdata; 858 unsigned int s390_pci_force_floating __initdata; 859 static unsigned int s390_pci_initialized; 860 861 char * __init pcibios_setup(char *str) 862 { 863 if (!strcmp(str, "off")) { 864 s390_pci_probe = 0; 865 return NULL; 866 } 867 if (!strcmp(str, "nomio")) { 868 s390_pci_no_mio = 1; 869 return NULL; 870 } 871 if (!strcmp(str, "force_floating")) { 872 s390_pci_force_floating = 1; 873 return NULL; 874 } 875 return str; 876 } 877 878 bool zpci_is_enabled(void) 879 { 880 return s390_pci_initialized; 881 } 882 883 static int __init pci_base_init(void) 884 { 885 int rc; 886 887 if (!s390_pci_probe) 888 return 0; 889 890 if (!test_facility(69) || !test_facility(71)) 891 return 0; 892 893 if (test_facility(153) && !s390_pci_no_mio) { 894 static_branch_enable(&have_mio); 895 ctl_set_bit(2, 5); 896 } 897 898 rc = zpci_debug_init(); 899 if (rc) 900 goto out; 901 902 rc = zpci_mem_init(); 903 if (rc) 904 goto out_mem; 905 906 rc = zpci_irq_init(); 907 if (rc) 908 goto out_irq; 909 910 rc = zpci_dma_init(); 911 if (rc) 912 goto out_dma; 913 914 rc = clp_scan_pci_devices(); 915 if (rc) 916 goto out_find; 917 918 s390_pci_initialized = 1; 919 return 0; 920 921 out_find: 922 zpci_dma_exit(); 923 out_dma: 924 zpci_irq_exit(); 925 out_irq: 926 zpci_mem_exit(); 927 out_mem: 928 zpci_debug_exit(); 929 out: 930 return rc; 931 } 932 subsys_initcall_sync(pci_base_init); 933 934 void zpci_rescan(void) 935 { 936 if (zpci_is_enabled()) 937 clp_rescan_pci_devices_simple(); 938 } 939