1 /* 2 * Copyright IBM Corp. 2012 3 * 4 * Author(s): 5 * Jan Glauber <jang@linux.vnet.ibm.com> 6 * 7 * The System z PCI code is a rewrite from a prototype by 8 * the following people (Kudoz!): 9 * Alexander Schmidt 10 * Christoph Raisch 11 * Hannes Hering 12 * Hoang-Nam Nguyen 13 * Jan-Bernd Themann 14 * Stefan Roscher 15 * Thomas Klein 16 */ 17 18 #define COMPONENT "zPCI" 19 #define pr_fmt(fmt) COMPONENT ": " fmt 20 21 #include <linux/kernel.h> 22 #include <linux/slab.h> 23 #include <linux/err.h> 24 #include <linux/export.h> 25 #include <linux/delay.h> 26 #include <linux/irq.h> 27 #include <linux/kernel_stat.h> 28 #include <linux/seq_file.h> 29 #include <linux/pci.h> 30 #include <linux/msi.h> 31 32 #include <asm/isc.h> 33 #include <asm/airq.h> 34 #include <asm/facility.h> 35 #include <asm/pci_insn.h> 36 #include <asm/pci_clp.h> 37 #include <asm/pci_dma.h> 38 39 #define DEBUG /* enable pr_debug */ 40 41 #define SIC_IRQ_MODE_ALL 0 42 #define SIC_IRQ_MODE_SINGLE 1 43 44 #define ZPCI_NR_DMA_SPACES 1 45 #define ZPCI_MSI_VEC_BITS 6 46 #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS 47 48 /* list of all detected zpci devices */ 49 LIST_HEAD(zpci_list); 50 EXPORT_SYMBOL_GPL(zpci_list); 51 DEFINE_MUTEX(zpci_list_lock); 52 EXPORT_SYMBOL_GPL(zpci_list_lock); 53 54 static struct pci_hp_callback_ops *hotplug_ops; 55 56 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 57 static DEFINE_SPINLOCK(zpci_domain_lock); 58 59 struct callback { 60 irq_handler_t handler; 61 void *data; 62 }; 63 64 struct zdev_irq_map { 65 unsigned long aibv; /* AI bit vector */ 66 int msi_vecs; /* consecutive MSI-vectors used */ 67 int __unused; 68 struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */ 69 spinlock_t lock; /* protect callbacks against de-reg */ 70 }; 71 72 struct intr_bucket { 73 /* amap of adapters, one bit per dev, corresponds to one irq nr */ 74 unsigned long *alloc; 75 /* AI summary bit, global page for all devices */ 76 unsigned long *aisb; 77 /* pointer to aibv and callback data in zdev */ 78 struct zdev_irq_map *imap[ZPCI_NR_DEVICES]; 79 /* protects the whole bucket struct */ 80 spinlock_t lock; 81 }; 82 83 static struct intr_bucket *bucket; 84 85 /* Adapter local summary indicator */ 86 static u8 *zpci_irq_si; 87 88 static atomic_t irq_retries = ATOMIC_INIT(0); 89 90 /* I/O Map */ 91 static DEFINE_SPINLOCK(zpci_iomap_lock); 92 static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); 93 struct zpci_iomap_entry *zpci_iomap_start; 94 EXPORT_SYMBOL_GPL(zpci_iomap_start); 95 96 /* highest irq summary bit */ 97 static int __read_mostly aisb_max; 98 99 static struct kmem_cache *zdev_irq_cache; 100 static struct kmem_cache *zdev_fmb_cache; 101 102 static inline int irq_to_msi_nr(unsigned int irq) 103 { 104 return irq & ZPCI_MSI_MASK; 105 } 106 107 static inline int irq_to_dev_nr(unsigned int irq) 108 { 109 return irq >> ZPCI_MSI_VEC_BITS; 110 } 111 112 static inline struct zdev_irq_map *get_imap(unsigned int irq) 113 { 114 return bucket->imap[irq_to_dev_nr(irq)]; 115 } 116 117 struct zpci_dev *get_zdev(struct pci_dev *pdev) 118 { 119 return (struct zpci_dev *) pdev->sysdata; 120 } 121 122 struct zpci_dev *get_zdev_by_fid(u32 fid) 123 { 124 struct zpci_dev *tmp, *zdev = NULL; 125 126 mutex_lock(&zpci_list_lock); 127 list_for_each_entry(tmp, &zpci_list, entry) { 128 if (tmp->fid == fid) { 129 zdev = tmp; 130 break; 131 } 132 } 133 mutex_unlock(&zpci_list_lock); 134 return zdev; 135 } 136 137 bool zpci_fid_present(u32 fid) 138 { 139 return (get_zdev_by_fid(fid) != NULL) ? true : false; 140 } 141 142 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) 143 { 144 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; 145 } 146 147 int pci_domain_nr(struct pci_bus *bus) 148 { 149 return ((struct zpci_dev *) bus->sysdata)->domain; 150 } 151 EXPORT_SYMBOL_GPL(pci_domain_nr); 152 153 int pci_proc_domain(struct pci_bus *bus) 154 { 155 return pci_domain_nr(bus); 156 } 157 EXPORT_SYMBOL_GPL(pci_proc_domain); 158 159 /* Modify PCI: Register adapter interruptions */ 160 static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, 161 u64 aibv) 162 { 163 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); 164 struct zpci_fib *fib; 165 int rc; 166 167 fib = (void *) get_zeroed_page(GFP_KERNEL); 168 if (!fib) 169 return -ENOMEM; 170 171 fib->isc = PCI_ISC; 172 fib->noi = zdev->irq_map->msi_vecs; 173 fib->sum = 1; /* enable summary notifications */ 174 fib->aibv = aibv; 175 fib->aibvo = 0; /* every function has its own page */ 176 fib->aisb = (u64) bucket->aisb + aisb / 8; 177 fib->aisbo = aisb & ZPCI_MSI_MASK; 178 179 rc = s390pci_mod_fc(req, fib); 180 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); 181 182 free_page((unsigned long) fib); 183 return rc; 184 } 185 186 struct mod_pci_args { 187 u64 base; 188 u64 limit; 189 u64 iota; 190 u64 fmb_addr; 191 }; 192 193 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args) 194 { 195 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn); 196 struct zpci_fib *fib; 197 int rc; 198 199 /* The FIB must be available even if it's not used */ 200 fib = (void *) get_zeroed_page(GFP_KERNEL); 201 if (!fib) 202 return -ENOMEM; 203 204 fib->pba = args->base; 205 fib->pal = args->limit; 206 fib->iota = args->iota; 207 fib->fmb_addr = args->fmb_addr; 208 209 rc = s390pci_mod_fc(req, fib); 210 free_page((unsigned long) fib); 211 return rc; 212 } 213 214 /* Modify PCI: Register I/O address translation parameters */ 215 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, 216 u64 base, u64 limit, u64 iota) 217 { 218 struct mod_pci_args args = { base, limit, iota, 0 }; 219 220 WARN_ON_ONCE(iota & 0x3fff); 221 args.iota |= ZPCI_IOTA_RTTO_FLAG; 222 return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args); 223 } 224 225 /* Modify PCI: Unregister I/O address translation parameters */ 226 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) 227 { 228 struct mod_pci_args args = { 0, 0, 0, 0 }; 229 230 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args); 231 } 232 233 /* Modify PCI: Unregister adapter interruptions */ 234 static int zpci_unregister_airq(struct zpci_dev *zdev) 235 { 236 struct mod_pci_args args = { 0, 0, 0, 0 }; 237 238 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args); 239 } 240 241 /* Modify PCI: Set PCI function measurement parameters */ 242 int zpci_fmb_enable_device(struct zpci_dev *zdev) 243 { 244 struct mod_pci_args args = { 0, 0, 0, 0 }; 245 246 if (zdev->fmb) 247 return -EINVAL; 248 249 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL); 250 if (!zdev->fmb) 251 return -ENOMEM; 252 WARN_ON((u64) zdev->fmb & 0xf); 253 254 args.fmb_addr = virt_to_phys(zdev->fmb); 255 return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 256 } 257 258 /* Modify PCI: Disable PCI function measurement */ 259 int zpci_fmb_disable_device(struct zpci_dev *zdev) 260 { 261 struct mod_pci_args args = { 0, 0, 0, 0 }; 262 int rc; 263 264 if (!zdev->fmb) 265 return -EINVAL; 266 267 /* Function measurement is disabled if fmb address is zero */ 268 rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 269 270 kmem_cache_free(zdev_fmb_cache, zdev->fmb); 271 zdev->fmb = NULL; 272 return rc; 273 } 274 275 #define ZPCI_PCIAS_CFGSPC 15 276 277 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) 278 { 279 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 280 u64 data; 281 int rc; 282 283 rc = s390pci_load(&data, req, offset); 284 if (!rc) { 285 data = data << ((8 - len) * 8); 286 data = le64_to_cpu(data); 287 *val = (u32) data; 288 } else 289 *val = 0xffffffff; 290 return rc; 291 } 292 293 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) 294 { 295 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 296 u64 data = val; 297 int rc; 298 299 data = cpu_to_le64(data); 300 data = data >> ((8 - len) * 8); 301 rc = s390pci_store(data, req, offset); 302 return rc; 303 } 304 305 void synchronize_irq(unsigned int irq) 306 { 307 /* 308 * Not needed, the handler is protected by a lock and IRQs that occur 309 * after the handler is deleted are just NOPs. 310 */ 311 } 312 EXPORT_SYMBOL_GPL(synchronize_irq); 313 314 void enable_irq(unsigned int irq) 315 { 316 struct msi_desc *msi = irq_get_msi_desc(irq); 317 318 zpci_msi_set_mask_bits(msi, 1, 0); 319 } 320 EXPORT_SYMBOL_GPL(enable_irq); 321 322 void disable_irq(unsigned int irq) 323 { 324 struct msi_desc *msi = irq_get_msi_desc(irq); 325 326 zpci_msi_set_mask_bits(msi, 1, 1); 327 } 328 EXPORT_SYMBOL_GPL(disable_irq); 329 330 void disable_irq_nosync(unsigned int irq) 331 { 332 disable_irq(irq); 333 } 334 EXPORT_SYMBOL_GPL(disable_irq_nosync); 335 336 unsigned long probe_irq_on(void) 337 { 338 return 0; 339 } 340 EXPORT_SYMBOL_GPL(probe_irq_on); 341 342 int probe_irq_off(unsigned long val) 343 { 344 return 0; 345 } 346 EXPORT_SYMBOL_GPL(probe_irq_off); 347 348 unsigned int probe_irq_mask(unsigned long val) 349 { 350 return val; 351 } 352 EXPORT_SYMBOL_GPL(probe_irq_mask); 353 354 void pcibios_fixup_bus(struct pci_bus *bus) 355 { 356 } 357 358 resource_size_t pcibios_align_resource(void *data, const struct resource *res, 359 resource_size_t size, 360 resource_size_t align) 361 { 362 return 0; 363 } 364 365 /* combine single writes by using store-block insn */ 366 void __iowrite64_copy(void __iomem *to, const void *from, size_t count) 367 { 368 zpci_memcpy_toio(to, from, count); 369 } 370 371 /* Create a virtual mapping cookie for a PCI BAR */ 372 void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) 373 { 374 struct zpci_dev *zdev = get_zdev(pdev); 375 u64 addr; 376 int idx; 377 378 if ((bar & 7) != bar) 379 return NULL; 380 381 idx = zdev->bars[bar].map_idx; 382 spin_lock(&zpci_iomap_lock); 383 zpci_iomap_start[idx].fh = zdev->fh; 384 zpci_iomap_start[idx].bar = bar; 385 spin_unlock(&zpci_iomap_lock); 386 387 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); 388 return (void __iomem *) addr; 389 } 390 EXPORT_SYMBOL_GPL(pci_iomap); 391 392 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) 393 { 394 unsigned int idx; 395 396 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; 397 spin_lock(&zpci_iomap_lock); 398 zpci_iomap_start[idx].fh = 0; 399 zpci_iomap_start[idx].bar = 0; 400 spin_unlock(&zpci_iomap_lock); 401 } 402 EXPORT_SYMBOL_GPL(pci_iounmap); 403 404 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 405 int size, u32 *val) 406 { 407 struct zpci_dev *zdev = get_zdev_by_bus(bus); 408 int ret; 409 410 if (!zdev || devfn != ZPCI_DEVFN) 411 ret = -ENODEV; 412 else 413 ret = zpci_cfg_load(zdev, where, val, size); 414 415 return ret; 416 } 417 418 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, 419 int size, u32 val) 420 { 421 struct zpci_dev *zdev = get_zdev_by_bus(bus); 422 int ret; 423 424 if (!zdev || devfn != ZPCI_DEVFN) 425 ret = -ENODEV; 426 else 427 ret = zpci_cfg_store(zdev, where, val, size); 428 429 return ret; 430 } 431 432 static struct pci_ops pci_root_ops = { 433 .read = pci_read, 434 .write = pci_write, 435 }; 436 437 /* store the last handled bit to implement fair scheduling of devices */ 438 static DEFINE_PER_CPU(unsigned long, next_sbit); 439 440 static void zpci_irq_handler(void *dont, void *need) 441 { 442 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit); 443 int rescan = 0, max = aisb_max; 444 struct zdev_irq_map *imap; 445 446 inc_irq_stat(IRQIO_PCI); 447 sbit = start; 448 449 scan: 450 /* find summary_bit */ 451 for_each_set_bit_left_cont(sbit, bucket->aisb, max) { 452 clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6)); 453 last = sbit; 454 455 /* find vector bit */ 456 imap = bucket->imap[sbit]; 457 for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) { 458 inc_irq_stat(IRQIO_MSI); 459 clear_bit(63 - mbit, &imap->aibv); 460 461 spin_lock(&imap->lock); 462 if (imap->cb[mbit].handler) 463 imap->cb[mbit].handler(mbit, 464 imap->cb[mbit].data); 465 spin_unlock(&imap->lock); 466 } 467 } 468 469 if (rescan) 470 goto out; 471 472 /* scan the skipped bits */ 473 if (start > 0) { 474 sbit = 0; 475 max = start; 476 start = 0; 477 goto scan; 478 } 479 480 /* enable interrupts again */ 481 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 482 483 /* check again to not lose initiative */ 484 rmb(); 485 max = aisb_max; 486 sbit = find_first_bit_left(bucket->aisb, max); 487 if (sbit != max) { 488 atomic_inc(&irq_retries); 489 rescan++; 490 goto scan; 491 } 492 out: 493 /* store next device bit to scan */ 494 __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last; 495 } 496 497 /* msi_vecs - number of requested interrupts, 0 place function to error state */ 498 static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs) 499 { 500 struct zpci_dev *zdev = get_zdev(pdev); 501 unsigned int aisb, msi_nr; 502 struct msi_desc *msi; 503 int rc; 504 505 /* store the number of used MSI vectors */ 506 zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS); 507 508 spin_lock(&bucket->lock); 509 aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE); 510 /* alloc map exhausted? */ 511 if (aisb == PAGE_SIZE) { 512 spin_unlock(&bucket->lock); 513 return -EIO; 514 } 515 set_bit(aisb, bucket->alloc); 516 spin_unlock(&bucket->lock); 517 518 zdev->aisb = aisb; 519 if (aisb + 1 > aisb_max) 520 aisb_max = aisb + 1; 521 522 /* wire up IRQ shortcut pointer */ 523 bucket->imap[zdev->aisb] = zdev->irq_map; 524 pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map); 525 526 /* TODO: irq number 0 wont be found if we return less than requested MSIs. 527 * ignore it for now and fix in common code. 528 */ 529 msi_nr = aisb << ZPCI_MSI_VEC_BITS; 530 531 list_for_each_entry(msi, &pdev->msi_list, list) { 532 rc = zpci_setup_msi_irq(zdev, msi, msi_nr, 533 aisb << ZPCI_MSI_VEC_BITS); 534 if (rc) 535 return rc; 536 msi_nr++; 537 } 538 539 rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv); 540 if (rc) { 541 clear_bit(aisb, bucket->alloc); 542 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc); 543 return rc; 544 } 545 return (zdev->irq_map->msi_vecs == msi_vecs) ? 546 0 : zdev->irq_map->msi_vecs; 547 } 548 549 static void zpci_teardown_msi(struct pci_dev *pdev) 550 { 551 struct zpci_dev *zdev = get_zdev(pdev); 552 struct msi_desc *msi; 553 int aisb, rc; 554 555 rc = zpci_unregister_airq(zdev); 556 if (rc) { 557 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc); 558 return; 559 } 560 561 msi = list_first_entry(&pdev->msi_list, struct msi_desc, list); 562 aisb = irq_to_dev_nr(msi->irq); 563 564 list_for_each_entry(msi, &pdev->msi_list, list) 565 zpci_teardown_msi_irq(zdev, msi); 566 567 clear_bit(aisb, bucket->alloc); 568 if (aisb + 1 == aisb_max) 569 aisb_max--; 570 } 571 572 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 573 { 574 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec); 575 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI) 576 return -EINVAL; 577 return zpci_setup_msi(pdev, nvec); 578 } 579 580 void arch_teardown_msi_irqs(struct pci_dev *pdev) 581 { 582 pr_info("%s: on pdev: %p\n", __func__, pdev); 583 zpci_teardown_msi(pdev); 584 } 585 586 static void zpci_map_resources(struct zpci_dev *zdev) 587 { 588 struct pci_dev *pdev = zdev->pdev; 589 resource_size_t len; 590 int i; 591 592 for (i = 0; i < PCI_BAR_COUNT; i++) { 593 len = pci_resource_len(pdev, i); 594 if (!len) 595 continue; 596 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); 597 pdev->resource[i].end = pdev->resource[i].start + len - 1; 598 pr_debug("BAR%i: -> start: %Lx end: %Lx\n", 599 i, pdev->resource[i].start, pdev->resource[i].end); 600 } 601 }; 602 603 struct zpci_dev *zpci_alloc_device(void) 604 { 605 struct zpci_dev *zdev; 606 607 /* Alloc memory for our private pci device data */ 608 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); 609 if (!zdev) 610 return ERR_PTR(-ENOMEM); 611 612 /* Alloc aibv & callback space */ 613 zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL); 614 if (!zdev->irq_map) 615 goto error; 616 WARN_ON((u64) zdev->irq_map & 0xff); 617 return zdev; 618 619 error: 620 kfree(zdev); 621 return ERR_PTR(-ENOMEM); 622 } 623 624 void zpci_free_device(struct zpci_dev *zdev) 625 { 626 kmem_cache_free(zdev_irq_cache, zdev->irq_map); 627 kfree(zdev); 628 } 629 630 /* 631 * Too late for any s390 specific setup, since interrupts must be set up 632 * already which requires DMA setup too and the pci scan will access the 633 * config space, which only works if the function handle is enabled. 634 */ 635 int pcibios_enable_device(struct pci_dev *pdev, int mask) 636 { 637 struct resource *res; 638 u16 cmd; 639 int i; 640 641 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 642 643 for (i = 0; i < PCI_BAR_COUNT; i++) { 644 res = &pdev->resource[i]; 645 646 if (res->flags & IORESOURCE_IO) 647 return -EINVAL; 648 649 if (res->flags & IORESOURCE_MEM) 650 cmd |= PCI_COMMAND_MEMORY; 651 } 652 pci_write_config_word(pdev, PCI_COMMAND, cmd); 653 return 0; 654 } 655 656 int pcibios_add_platform_entries(struct pci_dev *pdev) 657 { 658 return zpci_sysfs_add_device(&pdev->dev); 659 } 660 661 int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data) 662 { 663 int msi_nr = irq_to_msi_nr(irq); 664 struct zdev_irq_map *imap; 665 struct msi_desc *msi; 666 667 msi = irq_get_msi_desc(irq); 668 if (!msi) 669 return -EIO; 670 671 imap = get_imap(irq); 672 spin_lock_init(&imap->lock); 673 674 pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr); 675 imap->cb[msi_nr].handler = handler; 676 imap->cb[msi_nr].data = data; 677 678 /* 679 * The generic MSI code returns with the interrupt disabled on the 680 * card, using the MSI mask bits. Firmware doesn't appear to unmask 681 * at that level, so we do it here by hand. 682 */ 683 zpci_msi_set_mask_bits(msi, 1, 0); 684 return 0; 685 } 686 687 void zpci_free_irq(unsigned int irq) 688 { 689 struct zdev_irq_map *imap = get_imap(irq); 690 int msi_nr = irq_to_msi_nr(irq); 691 unsigned long flags; 692 693 pr_debug("%s: for irq: %d\n", __func__, irq); 694 695 spin_lock_irqsave(&imap->lock, flags); 696 imap->cb[msi_nr].handler = NULL; 697 imap->cb[msi_nr].data = NULL; 698 spin_unlock_irqrestore(&imap->lock, flags); 699 } 700 701 int request_irq(unsigned int irq, irq_handler_t handler, 702 unsigned long irqflags, const char *devname, void *dev_id) 703 { 704 pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n", 705 __func__, irq, handler, irqflags, devname); 706 707 return zpci_request_irq(irq, handler, dev_id); 708 } 709 EXPORT_SYMBOL_GPL(request_irq); 710 711 void free_irq(unsigned int irq, void *dev_id) 712 { 713 zpci_free_irq(irq); 714 } 715 EXPORT_SYMBOL_GPL(free_irq); 716 717 static int __init zpci_irq_init(void) 718 { 719 int cpu, rc; 720 721 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL); 722 if (!bucket) 723 return -ENOMEM; 724 725 bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL); 726 if (!bucket->aisb) { 727 rc = -ENOMEM; 728 goto out_aisb; 729 } 730 731 bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL); 732 if (!bucket->alloc) { 733 rc = -ENOMEM; 734 goto out_alloc; 735 } 736 737 isc_register(PCI_ISC); 738 zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC); 739 if (IS_ERR(zpci_irq_si)) { 740 rc = PTR_ERR(zpci_irq_si); 741 zpci_irq_si = NULL; 742 goto out_ai; 743 } 744 745 for_each_online_cpu(cpu) 746 per_cpu(next_sbit, cpu) = 0; 747 748 spin_lock_init(&bucket->lock); 749 /* set summary to 1 to be called every time for the ISC */ 750 *zpci_irq_si = 1; 751 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 752 return 0; 753 754 out_ai: 755 isc_unregister(PCI_ISC); 756 free_page((unsigned long) bucket->alloc); 757 out_alloc: 758 free_page((unsigned long) bucket->aisb); 759 out_aisb: 760 kfree(bucket); 761 return rc; 762 } 763 764 static void zpci_irq_exit(void) 765 { 766 free_page((unsigned long) bucket->alloc); 767 free_page((unsigned long) bucket->aisb); 768 s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC); 769 isc_unregister(PCI_ISC); 770 kfree(bucket); 771 } 772 773 void zpci_debug_info(struct zpci_dev *zdev, struct seq_file *m) 774 { 775 if (!zdev) 776 return; 777 778 seq_printf(m, "global irq retries: %u\n", atomic_read(&irq_retries)); 779 seq_printf(m, "aibv[0]:%016lx aibv[1]:%016lx aisb:%016lx\n", 780 get_imap(0)->aibv, get_imap(1)->aibv, *bucket->aisb); 781 } 782 783 static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, 784 unsigned long flags, int domain) 785 { 786 struct resource *r; 787 char *name; 788 int rc; 789 790 r = kzalloc(sizeof(*r), GFP_KERNEL); 791 if (!r) 792 return ERR_PTR(-ENOMEM); 793 r->start = start; 794 r->end = r->start + size - 1; 795 r->flags = flags; 796 r->parent = &iomem_resource; 797 name = kmalloc(18, GFP_KERNEL); 798 if (!name) { 799 kfree(r); 800 return ERR_PTR(-ENOMEM); 801 } 802 sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR); 803 r->name = name; 804 805 rc = request_resource(&iomem_resource, r); 806 if (rc) 807 pr_debug("request resource %pR failed\n", r); 808 return r; 809 } 810 811 static int zpci_alloc_iomap(struct zpci_dev *zdev) 812 { 813 int entry; 814 815 spin_lock(&zpci_iomap_lock); 816 entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); 817 if (entry == ZPCI_IOMAP_MAX_ENTRIES) { 818 spin_unlock(&zpci_iomap_lock); 819 return -ENOSPC; 820 } 821 set_bit(entry, zpci_iomap); 822 spin_unlock(&zpci_iomap_lock); 823 return entry; 824 } 825 826 static void zpci_free_iomap(struct zpci_dev *zdev, int entry) 827 { 828 spin_lock(&zpci_iomap_lock); 829 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); 830 clear_bit(entry, zpci_iomap); 831 spin_unlock(&zpci_iomap_lock); 832 } 833 834 int pcibios_add_device(struct pci_dev *pdev) 835 { 836 struct zpci_dev *zdev = get_zdev(pdev); 837 838 zdev->pdev = pdev; 839 zpci_debug_init_device(zdev); 840 zpci_fmb_enable_device(zdev); 841 zpci_map_resources(zdev); 842 843 return 0; 844 } 845 846 static int zpci_scan_bus(struct zpci_dev *zdev) 847 { 848 struct resource *res; 849 LIST_HEAD(resources); 850 int i; 851 852 /* allocate mapping entry for each used bar */ 853 for (i = 0; i < PCI_BAR_COUNT; i++) { 854 unsigned long addr, size, flags; 855 int entry; 856 857 if (!zdev->bars[i].size) 858 continue; 859 entry = zpci_alloc_iomap(zdev); 860 if (entry < 0) 861 return entry; 862 zdev->bars[i].map_idx = entry; 863 864 /* only MMIO is supported */ 865 flags = IORESOURCE_MEM; 866 if (zdev->bars[i].val & 8) 867 flags |= IORESOURCE_PREFETCH; 868 if (zdev->bars[i].val & 4) 869 flags |= IORESOURCE_MEM_64; 870 871 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48); 872 873 size = 1UL << zdev->bars[i].size; 874 875 res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain); 876 if (IS_ERR(res)) { 877 zpci_free_iomap(zdev, entry); 878 return PTR_ERR(res); 879 } 880 pci_add_resource(&resources, res); 881 } 882 883 zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, 884 zdev, &resources); 885 if (!zdev->bus) 886 return -EIO; 887 888 zdev->bus->max_bus_speed = zdev->max_bus_speed; 889 return 0; 890 } 891 892 static int zpci_alloc_domain(struct zpci_dev *zdev) 893 { 894 spin_lock(&zpci_domain_lock); 895 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); 896 if (zdev->domain == ZPCI_NR_DEVICES) { 897 spin_unlock(&zpci_domain_lock); 898 return -ENOSPC; 899 } 900 set_bit(zdev->domain, zpci_domain); 901 spin_unlock(&zpci_domain_lock); 902 return 0; 903 } 904 905 static void zpci_free_domain(struct zpci_dev *zdev) 906 { 907 spin_lock(&zpci_domain_lock); 908 clear_bit(zdev->domain, zpci_domain); 909 spin_unlock(&zpci_domain_lock); 910 } 911 912 int zpci_enable_device(struct zpci_dev *zdev) 913 { 914 int rc; 915 916 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES); 917 if (rc) 918 goto out; 919 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid); 920 921 rc = zpci_dma_init_device(zdev); 922 if (rc) 923 goto out_dma; 924 return 0; 925 926 out_dma: 927 clp_disable_fh(zdev); 928 out: 929 return rc; 930 } 931 EXPORT_SYMBOL_GPL(zpci_enable_device); 932 933 int zpci_disable_device(struct zpci_dev *zdev) 934 { 935 zpci_dma_exit_device(zdev); 936 return clp_disable_fh(zdev); 937 } 938 EXPORT_SYMBOL_GPL(zpci_disable_device); 939 940 int zpci_create_device(struct zpci_dev *zdev) 941 { 942 int rc; 943 944 rc = zpci_alloc_domain(zdev); 945 if (rc) 946 goto out; 947 948 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { 949 rc = zpci_enable_device(zdev); 950 if (rc) 951 goto out_free; 952 953 zdev->state = ZPCI_FN_STATE_ONLINE; 954 } 955 rc = zpci_scan_bus(zdev); 956 if (rc) 957 goto out_disable; 958 959 mutex_lock(&zpci_list_lock); 960 list_add_tail(&zdev->entry, &zpci_list); 961 if (hotplug_ops) 962 hotplug_ops->create_slot(zdev); 963 mutex_unlock(&zpci_list_lock); 964 965 return 0; 966 967 out_disable: 968 if (zdev->state == ZPCI_FN_STATE_ONLINE) 969 zpci_disable_device(zdev); 970 out_free: 971 zpci_free_domain(zdev); 972 out: 973 return rc; 974 } 975 976 void zpci_stop_device(struct zpci_dev *zdev) 977 { 978 zpci_dma_exit_device(zdev); 979 /* 980 * Note: SCLP disables fh via set-pci-fn so don't 981 * do that here. 982 */ 983 } 984 EXPORT_SYMBOL_GPL(zpci_stop_device); 985 986 int zpci_scan_device(struct zpci_dev *zdev) 987 { 988 zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN); 989 if (!zdev->pdev) { 990 pr_err("pci_scan_single_device failed for fid: 0x%x\n", 991 zdev->fid); 992 goto out; 993 } 994 995 pci_bus_add_devices(zdev->bus); 996 997 return 0; 998 out: 999 zpci_dma_exit_device(zdev); 1000 clp_disable_fh(zdev); 1001 return -EIO; 1002 } 1003 EXPORT_SYMBOL_GPL(zpci_scan_device); 1004 1005 static inline int barsize(u8 size) 1006 { 1007 return (size) ? (1 << size) >> 10 : 0; 1008 } 1009 1010 static int zpci_mem_init(void) 1011 { 1012 zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map), 1013 L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL); 1014 if (!zdev_irq_cache) 1015 goto error_zdev; 1016 1017 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), 1018 16, 0, NULL); 1019 if (!zdev_fmb_cache) 1020 goto error_fmb; 1021 1022 /* TODO: use realloc */ 1023 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), 1024 GFP_KERNEL); 1025 if (!zpci_iomap_start) 1026 goto error_iomap; 1027 return 0; 1028 1029 error_iomap: 1030 kmem_cache_destroy(zdev_fmb_cache); 1031 error_fmb: 1032 kmem_cache_destroy(zdev_irq_cache); 1033 error_zdev: 1034 return -ENOMEM; 1035 } 1036 1037 static void zpci_mem_exit(void) 1038 { 1039 kfree(zpci_iomap_start); 1040 kmem_cache_destroy(zdev_irq_cache); 1041 kmem_cache_destroy(zdev_fmb_cache); 1042 } 1043 1044 void zpci_register_hp_ops(struct pci_hp_callback_ops *ops) 1045 { 1046 mutex_lock(&zpci_list_lock); 1047 hotplug_ops = ops; 1048 mutex_unlock(&zpci_list_lock); 1049 } 1050 EXPORT_SYMBOL_GPL(zpci_register_hp_ops); 1051 1052 void zpci_deregister_hp_ops(void) 1053 { 1054 mutex_lock(&zpci_list_lock); 1055 hotplug_ops = NULL; 1056 mutex_unlock(&zpci_list_lock); 1057 } 1058 EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); 1059 1060 unsigned int s390_pci_probe; 1061 EXPORT_SYMBOL_GPL(s390_pci_probe); 1062 1063 char * __init pcibios_setup(char *str) 1064 { 1065 if (!strcmp(str, "on")) { 1066 s390_pci_probe = 1; 1067 return NULL; 1068 } 1069 return str; 1070 } 1071 1072 static int __init pci_base_init(void) 1073 { 1074 int rc; 1075 1076 if (!s390_pci_probe) 1077 return 0; 1078 1079 if (!test_facility(2) || !test_facility(69) 1080 || !test_facility(71) || !test_facility(72)) 1081 return 0; 1082 1083 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n", 1084 test_facility(69), test_facility(70), 1085 test_facility(71)); 1086 1087 rc = zpci_debug_init(); 1088 if (rc) 1089 return rc; 1090 1091 rc = zpci_mem_init(); 1092 if (rc) 1093 goto out_mem; 1094 1095 rc = zpci_msihash_init(); 1096 if (rc) 1097 goto out_hash; 1098 1099 rc = zpci_irq_init(); 1100 if (rc) 1101 goto out_irq; 1102 1103 rc = zpci_dma_init(); 1104 if (rc) 1105 goto out_dma; 1106 1107 rc = clp_find_pci_devices(); 1108 if (rc) 1109 goto out_find; 1110 1111 return 0; 1112 1113 out_find: 1114 zpci_dma_exit(); 1115 out_dma: 1116 zpci_irq_exit(); 1117 out_irq: 1118 zpci_msihash_exit(); 1119 out_hash: 1120 zpci_mem_exit(); 1121 out_mem: 1122 zpci_debug_exit(); 1123 return rc; 1124 } 1125 subsys_initcall(pci_base_init); 1126