1 /* 2 * Copyright IBM Corp. 2012 3 * 4 * Author(s): 5 * Jan Glauber <jang@linux.vnet.ibm.com> 6 * 7 * The System z PCI code is a rewrite from a prototype by 8 * the following people (Kudoz!): 9 * Alexander Schmidt 10 * Christoph Raisch 11 * Hannes Hering 12 * Hoang-Nam Nguyen 13 * Jan-Bernd Themann 14 * Stefan Roscher 15 * Thomas Klein 16 */ 17 18 #define COMPONENT "zPCI" 19 #define pr_fmt(fmt) COMPONENT ": " fmt 20 21 #include <linux/kernel.h> 22 #include <linux/slab.h> 23 #include <linux/err.h> 24 #include <linux/export.h> 25 #include <linux/delay.h> 26 #include <linux/irq.h> 27 #include <linux/kernel_stat.h> 28 #include <linux/seq_file.h> 29 #include <linux/pci.h> 30 #include <linux/msi.h> 31 32 #include <asm/isc.h> 33 #include <asm/airq.h> 34 #include <asm/facility.h> 35 #include <asm/pci_insn.h> 36 #include <asm/pci_clp.h> 37 #include <asm/pci_dma.h> 38 39 #define DEBUG /* enable pr_debug */ 40 41 #define SIC_IRQ_MODE_ALL 0 42 #define SIC_IRQ_MODE_SINGLE 1 43 44 #define ZPCI_NR_DMA_SPACES 1 45 #define ZPCI_MSI_VEC_BITS 6 46 #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS 47 48 /* list of all detected zpci devices */ 49 LIST_HEAD(zpci_list); 50 EXPORT_SYMBOL_GPL(zpci_list); 51 DEFINE_MUTEX(zpci_list_lock); 52 EXPORT_SYMBOL_GPL(zpci_list_lock); 53 54 struct pci_hp_callback_ops hotplug_ops; 55 EXPORT_SYMBOL_GPL(hotplug_ops); 56 57 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 58 static DEFINE_SPINLOCK(zpci_domain_lock); 59 60 struct callback { 61 irq_handler_t handler; 62 void *data; 63 }; 64 65 struct zdev_irq_map { 66 unsigned long aibv; /* AI bit vector */ 67 int msi_vecs; /* consecutive MSI-vectors used */ 68 int __unused; 69 struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */ 70 spinlock_t lock; /* protect callbacks against de-reg */ 71 }; 72 73 struct intr_bucket { 74 /* amap of adapters, one bit per dev, corresponds to one irq nr */ 75 unsigned long *alloc; 76 /* AI summary bit, global page for all devices */ 77 unsigned long *aisb; 78 /* pointer to aibv and callback data in zdev */ 79 struct zdev_irq_map *imap[ZPCI_NR_DEVICES]; 80 /* protects the whole bucket struct */ 81 spinlock_t lock; 82 }; 83 84 static struct intr_bucket *bucket; 85 86 /* Adapter local summary indicator */ 87 static u8 *zpci_irq_si; 88 89 static atomic_t irq_retries = ATOMIC_INIT(0); 90 91 /* I/O Map */ 92 static DEFINE_SPINLOCK(zpci_iomap_lock); 93 static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); 94 struct zpci_iomap_entry *zpci_iomap_start; 95 EXPORT_SYMBOL_GPL(zpci_iomap_start); 96 97 /* highest irq summary bit */ 98 static int __read_mostly aisb_max; 99 100 static struct kmem_cache *zdev_irq_cache; 101 static struct kmem_cache *zdev_fmb_cache; 102 103 debug_info_t *pci_debug_msg_id; 104 debug_info_t *pci_debug_err_id; 105 106 static inline int irq_to_msi_nr(unsigned int irq) 107 { 108 return irq & ZPCI_MSI_MASK; 109 } 110 111 static inline int irq_to_dev_nr(unsigned int irq) 112 { 113 return irq >> ZPCI_MSI_VEC_BITS; 114 } 115 116 static inline struct zdev_irq_map *get_imap(unsigned int irq) 117 { 118 return bucket->imap[irq_to_dev_nr(irq)]; 119 } 120 121 struct zpci_dev *get_zdev(struct pci_dev *pdev) 122 { 123 return (struct zpci_dev *) pdev->sysdata; 124 } 125 126 struct zpci_dev *get_zdev_by_fid(u32 fid) 127 { 128 struct zpci_dev *tmp, *zdev = NULL; 129 130 mutex_lock(&zpci_list_lock); 131 list_for_each_entry(tmp, &zpci_list, entry) { 132 if (tmp->fid == fid) { 133 zdev = tmp; 134 break; 135 } 136 } 137 mutex_unlock(&zpci_list_lock); 138 return zdev; 139 } 140 141 bool zpci_fid_present(u32 fid) 142 { 143 return (get_zdev_by_fid(fid) != NULL) ? true : false; 144 } 145 146 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) 147 { 148 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; 149 } 150 151 int pci_domain_nr(struct pci_bus *bus) 152 { 153 return ((struct zpci_dev *) bus->sysdata)->domain; 154 } 155 EXPORT_SYMBOL_GPL(pci_domain_nr); 156 157 int pci_proc_domain(struct pci_bus *bus) 158 { 159 return pci_domain_nr(bus); 160 } 161 EXPORT_SYMBOL_GPL(pci_proc_domain); 162 163 /* Modify PCI: Register adapter interruptions */ 164 static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, 165 u64 aibv) 166 { 167 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); 168 struct zpci_fib *fib; 169 int rc; 170 171 fib = (void *) get_zeroed_page(GFP_KERNEL); 172 if (!fib) 173 return -ENOMEM; 174 175 fib->isc = PCI_ISC; 176 fib->noi = zdev->irq_map->msi_vecs; 177 fib->sum = 1; /* enable summary notifications */ 178 fib->aibv = aibv; 179 fib->aibvo = 0; /* every function has its own page */ 180 fib->aisb = (u64) bucket->aisb + aisb / 8; 181 fib->aisbo = aisb & ZPCI_MSI_MASK; 182 183 rc = mpcifc_instr(req, fib); 184 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); 185 186 free_page((unsigned long) fib); 187 return rc; 188 } 189 190 struct mod_pci_args { 191 u64 base; 192 u64 limit; 193 u64 iota; 194 u64 fmb_addr; 195 }; 196 197 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args) 198 { 199 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn); 200 struct zpci_fib *fib; 201 int rc; 202 203 /* The FIB must be available even if it's not used */ 204 fib = (void *) get_zeroed_page(GFP_KERNEL); 205 if (!fib) 206 return -ENOMEM; 207 208 fib->pba = args->base; 209 fib->pal = args->limit; 210 fib->iota = args->iota; 211 fib->fmb_addr = args->fmb_addr; 212 213 rc = mpcifc_instr(req, fib); 214 free_page((unsigned long) fib); 215 return rc; 216 } 217 218 /* Modify PCI: Register I/O address translation parameters */ 219 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, 220 u64 base, u64 limit, u64 iota) 221 { 222 struct mod_pci_args args = { base, limit, iota, 0 }; 223 224 WARN_ON_ONCE(iota & 0x3fff); 225 args.iota |= ZPCI_IOTA_RTTO_FLAG; 226 return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args); 227 } 228 229 /* Modify PCI: Unregister I/O address translation parameters */ 230 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) 231 { 232 struct mod_pci_args args = { 0, 0, 0, 0 }; 233 234 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args); 235 } 236 237 /* Modify PCI: Unregister adapter interruptions */ 238 static int zpci_unregister_airq(struct zpci_dev *zdev) 239 { 240 struct mod_pci_args args = { 0, 0, 0, 0 }; 241 242 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args); 243 } 244 245 /* Modify PCI: Set PCI function measurement parameters */ 246 int zpci_fmb_enable_device(struct zpci_dev *zdev) 247 { 248 struct mod_pci_args args = { 0, 0, 0, 0 }; 249 250 if (zdev->fmb) 251 return -EINVAL; 252 253 zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL); 254 if (!zdev->fmb) 255 return -ENOMEM; 256 memset(zdev->fmb, 0, sizeof(*zdev->fmb)); 257 WARN_ON((u64) zdev->fmb & 0xf); 258 259 args.fmb_addr = virt_to_phys(zdev->fmb); 260 return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 261 } 262 263 /* Modify PCI: Disable PCI function measurement */ 264 int zpci_fmb_disable_device(struct zpci_dev *zdev) 265 { 266 struct mod_pci_args args = { 0, 0, 0, 0 }; 267 int rc; 268 269 if (!zdev->fmb) 270 return -EINVAL; 271 272 /* Function measurement is disabled if fmb address is zero */ 273 rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 274 275 kmem_cache_free(zdev_fmb_cache, zdev->fmb); 276 zdev->fmb = NULL; 277 return rc; 278 } 279 280 #define ZPCI_PCIAS_CFGSPC 15 281 282 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) 283 { 284 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 285 u64 data; 286 int rc; 287 288 rc = pcilg_instr(&data, req, offset); 289 data = data << ((8 - len) * 8); 290 data = le64_to_cpu(data); 291 if (!rc) 292 *val = (u32) data; 293 else 294 *val = 0xffffffff; 295 return rc; 296 } 297 298 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) 299 { 300 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 301 u64 data = val; 302 int rc; 303 304 data = cpu_to_le64(data); 305 data = data >> ((8 - len) * 8); 306 rc = pcistg_instr(data, req, offset); 307 return rc; 308 } 309 310 void synchronize_irq(unsigned int irq) 311 { 312 /* 313 * Not needed, the handler is protected by a lock and IRQs that occur 314 * after the handler is deleted are just NOPs. 315 */ 316 } 317 EXPORT_SYMBOL_GPL(synchronize_irq); 318 319 void enable_irq(unsigned int irq) 320 { 321 struct msi_desc *msi = irq_get_msi_desc(irq); 322 323 zpci_msi_set_mask_bits(msi, 1, 0); 324 } 325 EXPORT_SYMBOL_GPL(enable_irq); 326 327 void disable_irq(unsigned int irq) 328 { 329 struct msi_desc *msi = irq_get_msi_desc(irq); 330 331 zpci_msi_set_mask_bits(msi, 1, 1); 332 } 333 EXPORT_SYMBOL_GPL(disable_irq); 334 335 void disable_irq_nosync(unsigned int irq) 336 { 337 disable_irq(irq); 338 } 339 EXPORT_SYMBOL_GPL(disable_irq_nosync); 340 341 unsigned long probe_irq_on(void) 342 { 343 return 0; 344 } 345 EXPORT_SYMBOL_GPL(probe_irq_on); 346 347 int probe_irq_off(unsigned long val) 348 { 349 return 0; 350 } 351 EXPORT_SYMBOL_GPL(probe_irq_off); 352 353 unsigned int probe_irq_mask(unsigned long val) 354 { 355 return val; 356 } 357 EXPORT_SYMBOL_GPL(probe_irq_mask); 358 359 void pcibios_fixup_bus(struct pci_bus *bus) 360 { 361 } 362 363 resource_size_t pcibios_align_resource(void *data, const struct resource *res, 364 resource_size_t size, 365 resource_size_t align) 366 { 367 return 0; 368 } 369 370 /* combine single writes by using store-block insn */ 371 void __iowrite64_copy(void __iomem *to, const void *from, size_t count) 372 { 373 zpci_memcpy_toio(to, from, count); 374 } 375 376 /* Create a virtual mapping cookie for a PCI BAR */ 377 void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) 378 { 379 struct zpci_dev *zdev = get_zdev(pdev); 380 u64 addr; 381 int idx; 382 383 if ((bar & 7) != bar) 384 return NULL; 385 386 idx = zdev->bars[bar].map_idx; 387 spin_lock(&zpci_iomap_lock); 388 zpci_iomap_start[idx].fh = zdev->fh; 389 zpci_iomap_start[idx].bar = bar; 390 spin_unlock(&zpci_iomap_lock); 391 392 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); 393 return (void __iomem *) addr; 394 } 395 EXPORT_SYMBOL_GPL(pci_iomap); 396 397 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) 398 { 399 unsigned int idx; 400 401 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; 402 spin_lock(&zpci_iomap_lock); 403 zpci_iomap_start[idx].fh = 0; 404 zpci_iomap_start[idx].bar = 0; 405 spin_unlock(&zpci_iomap_lock); 406 } 407 EXPORT_SYMBOL_GPL(pci_iounmap); 408 409 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 410 int size, u32 *val) 411 { 412 struct zpci_dev *zdev = get_zdev_by_bus(bus); 413 414 if (!zdev || devfn != ZPCI_DEVFN) 415 return 0; 416 return zpci_cfg_load(zdev, where, val, size); 417 } 418 419 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, 420 int size, u32 val) 421 { 422 struct zpci_dev *zdev = get_zdev_by_bus(bus); 423 424 if (!zdev || devfn != ZPCI_DEVFN) 425 return 0; 426 return zpci_cfg_store(zdev, where, val, size); 427 } 428 429 static struct pci_ops pci_root_ops = { 430 .read = pci_read, 431 .write = pci_write, 432 }; 433 434 /* store the last handled bit to implement fair scheduling of devices */ 435 static DEFINE_PER_CPU(unsigned long, next_sbit); 436 437 static void zpci_irq_handler(void *dont, void *need) 438 { 439 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit); 440 int rescan = 0, max = aisb_max; 441 struct zdev_irq_map *imap; 442 443 inc_irq_stat(IRQIO_PCI); 444 sbit = start; 445 446 scan: 447 /* find summary_bit */ 448 for_each_set_bit_left_cont(sbit, bucket->aisb, max) { 449 clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6)); 450 last = sbit; 451 452 /* find vector bit */ 453 imap = bucket->imap[sbit]; 454 for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) { 455 inc_irq_stat(IRQIO_MSI); 456 clear_bit(63 - mbit, &imap->aibv); 457 458 spin_lock(&imap->lock); 459 if (imap->cb[mbit].handler) 460 imap->cb[mbit].handler(mbit, 461 imap->cb[mbit].data); 462 spin_unlock(&imap->lock); 463 } 464 } 465 466 if (rescan) 467 goto out; 468 469 /* scan the skipped bits */ 470 if (start > 0) { 471 sbit = 0; 472 max = start; 473 start = 0; 474 goto scan; 475 } 476 477 /* enable interrupts again */ 478 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 479 480 /* check again to not lose initiative */ 481 rmb(); 482 max = aisb_max; 483 sbit = find_first_bit_left(bucket->aisb, max); 484 if (sbit != max) { 485 atomic_inc(&irq_retries); 486 rescan++; 487 goto scan; 488 } 489 out: 490 /* store next device bit to scan */ 491 __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last; 492 } 493 494 /* msi_vecs - number of requested interrupts, 0 place function to error state */ 495 static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs) 496 { 497 struct zpci_dev *zdev = get_zdev(pdev); 498 unsigned int aisb, msi_nr; 499 struct msi_desc *msi; 500 int rc; 501 502 /* store the number of used MSI vectors */ 503 zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS); 504 505 spin_lock(&bucket->lock); 506 aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE); 507 /* alloc map exhausted? */ 508 if (aisb == PAGE_SIZE) { 509 spin_unlock(&bucket->lock); 510 return -EIO; 511 } 512 set_bit(aisb, bucket->alloc); 513 spin_unlock(&bucket->lock); 514 515 zdev->aisb = aisb; 516 if (aisb + 1 > aisb_max) 517 aisb_max = aisb + 1; 518 519 /* wire up IRQ shortcut pointer */ 520 bucket->imap[zdev->aisb] = zdev->irq_map; 521 pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map); 522 523 /* TODO: irq number 0 wont be found if we return less than requested MSIs. 524 * ignore it for now and fix in common code. 525 */ 526 msi_nr = aisb << ZPCI_MSI_VEC_BITS; 527 528 list_for_each_entry(msi, &pdev->msi_list, list) { 529 rc = zpci_setup_msi_irq(zdev, msi, msi_nr, 530 aisb << ZPCI_MSI_VEC_BITS); 531 if (rc) 532 return rc; 533 msi_nr++; 534 } 535 536 rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv); 537 if (rc) { 538 clear_bit(aisb, bucket->alloc); 539 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc); 540 return rc; 541 } 542 return (zdev->irq_map->msi_vecs == msi_vecs) ? 543 0 : zdev->irq_map->msi_vecs; 544 } 545 546 static void zpci_teardown_msi(struct pci_dev *pdev) 547 { 548 struct zpci_dev *zdev = get_zdev(pdev); 549 struct msi_desc *msi; 550 int aisb, rc; 551 552 rc = zpci_unregister_airq(zdev); 553 if (rc) { 554 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc); 555 return; 556 } 557 558 msi = list_first_entry(&pdev->msi_list, struct msi_desc, list); 559 aisb = irq_to_dev_nr(msi->irq); 560 561 list_for_each_entry(msi, &pdev->msi_list, list) 562 zpci_teardown_msi_irq(zdev, msi); 563 564 clear_bit(aisb, bucket->alloc); 565 if (aisb + 1 == aisb_max) 566 aisb_max--; 567 } 568 569 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 570 { 571 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec); 572 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI) 573 return -EINVAL; 574 return zpci_setup_msi(pdev, nvec); 575 } 576 577 void arch_teardown_msi_irqs(struct pci_dev *pdev) 578 { 579 pr_info("%s: on pdev: %p\n", __func__, pdev); 580 zpci_teardown_msi(pdev); 581 } 582 583 static void zpci_map_resources(struct zpci_dev *zdev) 584 { 585 struct pci_dev *pdev = zdev->pdev; 586 resource_size_t len; 587 int i; 588 589 for (i = 0; i < PCI_BAR_COUNT; i++) { 590 len = pci_resource_len(pdev, i); 591 if (!len) 592 continue; 593 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); 594 pdev->resource[i].end = pdev->resource[i].start + len - 1; 595 pr_debug("BAR%i: -> start: %Lx end: %Lx\n", 596 i, pdev->resource[i].start, pdev->resource[i].end); 597 } 598 }; 599 600 static void zpci_unmap_resources(struct pci_dev *pdev) 601 { 602 resource_size_t len; 603 int i; 604 605 for (i = 0; i < PCI_BAR_COUNT; i++) { 606 len = pci_resource_len(pdev, i); 607 if (!len) 608 continue; 609 pci_iounmap(pdev, (void *) pdev->resource[i].start); 610 } 611 }; 612 613 struct zpci_dev *zpci_alloc_device(void) 614 { 615 struct zpci_dev *zdev; 616 617 /* Alloc memory for our private pci device data */ 618 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); 619 if (!zdev) 620 return ERR_PTR(-ENOMEM); 621 622 /* Alloc aibv & callback space */ 623 zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL); 624 if (!zdev->irq_map) 625 goto error; 626 WARN_ON((u64) zdev->irq_map & 0xff); 627 return zdev; 628 629 error: 630 kfree(zdev); 631 return ERR_PTR(-ENOMEM); 632 } 633 634 void zpci_free_device(struct zpci_dev *zdev) 635 { 636 kmem_cache_free(zdev_irq_cache, zdev->irq_map); 637 kfree(zdev); 638 } 639 640 /* Called on removal of pci_dev, leaves zpci and bus device */ 641 static void zpci_remove_device(struct pci_dev *pdev) 642 { 643 struct zpci_dev *zdev = get_zdev(pdev); 644 645 dev_info(&pdev->dev, "Removing device %u\n", zdev->domain); 646 zdev->state = ZPCI_FN_STATE_CONFIGURED; 647 zpci_dma_exit_device(zdev); 648 zpci_fmb_disable_device(zdev); 649 zpci_sysfs_remove_device(&pdev->dev); 650 zpci_unmap_resources(pdev); 651 list_del(&zdev->entry); /* can be called from init */ 652 zdev->pdev = NULL; 653 } 654 655 static void zpci_scan_devices(void) 656 { 657 struct zpci_dev *zdev; 658 659 mutex_lock(&zpci_list_lock); 660 list_for_each_entry(zdev, &zpci_list, entry) 661 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) 662 zpci_scan_device(zdev); 663 mutex_unlock(&zpci_list_lock); 664 } 665 666 /* 667 * Too late for any s390 specific setup, since interrupts must be set up 668 * already which requires DMA setup too and the pci scan will access the 669 * config space, which only works if the function handle is enabled. 670 */ 671 int pcibios_enable_device(struct pci_dev *pdev, int mask) 672 { 673 struct resource *res; 674 u16 cmd; 675 int i; 676 677 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 678 679 for (i = 0; i < PCI_BAR_COUNT; i++) { 680 res = &pdev->resource[i]; 681 682 if (res->flags & IORESOURCE_IO) 683 return -EINVAL; 684 685 if (res->flags & IORESOURCE_MEM) 686 cmd |= PCI_COMMAND_MEMORY; 687 } 688 pci_write_config_word(pdev, PCI_COMMAND, cmd); 689 return 0; 690 } 691 692 void pcibios_disable_device(struct pci_dev *pdev) 693 { 694 zpci_remove_device(pdev); 695 pdev->sysdata = NULL; 696 } 697 698 int pcibios_add_platform_entries(struct pci_dev *pdev) 699 { 700 return zpci_sysfs_add_device(&pdev->dev); 701 } 702 703 int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data) 704 { 705 int msi_nr = irq_to_msi_nr(irq); 706 struct zdev_irq_map *imap; 707 struct msi_desc *msi; 708 709 msi = irq_get_msi_desc(irq); 710 if (!msi) 711 return -EIO; 712 713 imap = get_imap(irq); 714 spin_lock_init(&imap->lock); 715 716 pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr); 717 imap->cb[msi_nr].handler = handler; 718 imap->cb[msi_nr].data = data; 719 720 /* 721 * The generic MSI code returns with the interrupt disabled on the 722 * card, using the MSI mask bits. Firmware doesn't appear to unmask 723 * at that level, so we do it here by hand. 724 */ 725 zpci_msi_set_mask_bits(msi, 1, 0); 726 return 0; 727 } 728 729 void zpci_free_irq(unsigned int irq) 730 { 731 struct zdev_irq_map *imap = get_imap(irq); 732 int msi_nr = irq_to_msi_nr(irq); 733 unsigned long flags; 734 735 pr_debug("%s: for irq: %d\n", __func__, irq); 736 737 spin_lock_irqsave(&imap->lock, flags); 738 imap->cb[msi_nr].handler = NULL; 739 imap->cb[msi_nr].data = NULL; 740 spin_unlock_irqrestore(&imap->lock, flags); 741 } 742 743 int request_irq(unsigned int irq, irq_handler_t handler, 744 unsigned long irqflags, const char *devname, void *dev_id) 745 { 746 pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n", 747 __func__, irq, handler, irqflags, devname); 748 749 return zpci_request_irq(irq, handler, dev_id); 750 } 751 EXPORT_SYMBOL_GPL(request_irq); 752 753 void free_irq(unsigned int irq, void *dev_id) 754 { 755 zpci_free_irq(irq); 756 } 757 EXPORT_SYMBOL_GPL(free_irq); 758 759 static int __init zpci_irq_init(void) 760 { 761 int cpu, rc; 762 763 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL); 764 if (!bucket) 765 return -ENOMEM; 766 767 bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL); 768 if (!bucket->aisb) { 769 rc = -ENOMEM; 770 goto out_aisb; 771 } 772 773 bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL); 774 if (!bucket->alloc) { 775 rc = -ENOMEM; 776 goto out_alloc; 777 } 778 779 isc_register(PCI_ISC); 780 zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC); 781 if (IS_ERR(zpci_irq_si)) { 782 rc = PTR_ERR(zpci_irq_si); 783 zpci_irq_si = NULL; 784 goto out_ai; 785 } 786 787 for_each_online_cpu(cpu) 788 per_cpu(next_sbit, cpu) = 0; 789 790 spin_lock_init(&bucket->lock); 791 /* set summary to 1 to be called every time for the ISC */ 792 *zpci_irq_si = 1; 793 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 794 return 0; 795 796 out_ai: 797 isc_unregister(PCI_ISC); 798 free_page((unsigned long) bucket->alloc); 799 out_alloc: 800 free_page((unsigned long) bucket->aisb); 801 out_aisb: 802 kfree(bucket); 803 return rc; 804 } 805 806 static void zpci_irq_exit(void) 807 { 808 free_page((unsigned long) bucket->alloc); 809 free_page((unsigned long) bucket->aisb); 810 s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC); 811 isc_unregister(PCI_ISC); 812 kfree(bucket); 813 } 814 815 void zpci_debug_info(struct zpci_dev *zdev, struct seq_file *m) 816 { 817 if (!zdev) 818 return; 819 820 seq_printf(m, "global irq retries: %u\n", atomic_read(&irq_retries)); 821 seq_printf(m, "aibv[0]:%016lx aibv[1]:%016lx aisb:%016lx\n", 822 get_imap(0)->aibv, get_imap(1)->aibv, *bucket->aisb); 823 } 824 825 static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, 826 unsigned long flags, int domain) 827 { 828 struct resource *r; 829 char *name; 830 int rc; 831 832 r = kzalloc(sizeof(*r), GFP_KERNEL); 833 if (!r) 834 return ERR_PTR(-ENOMEM); 835 r->start = start; 836 r->end = r->start + size - 1; 837 r->flags = flags; 838 r->parent = &iomem_resource; 839 name = kmalloc(18, GFP_KERNEL); 840 if (!name) { 841 kfree(r); 842 return ERR_PTR(-ENOMEM); 843 } 844 sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR); 845 r->name = name; 846 847 rc = request_resource(&iomem_resource, r); 848 if (rc) 849 pr_debug("request resource %pR failed\n", r); 850 return r; 851 } 852 853 static int zpci_alloc_iomap(struct zpci_dev *zdev) 854 { 855 int entry; 856 857 spin_lock(&zpci_iomap_lock); 858 entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); 859 if (entry == ZPCI_IOMAP_MAX_ENTRIES) { 860 spin_unlock(&zpci_iomap_lock); 861 return -ENOSPC; 862 } 863 set_bit(entry, zpci_iomap); 864 spin_unlock(&zpci_iomap_lock); 865 return entry; 866 } 867 868 static void zpci_free_iomap(struct zpci_dev *zdev, int entry) 869 { 870 spin_lock(&zpci_iomap_lock); 871 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); 872 clear_bit(entry, zpci_iomap); 873 spin_unlock(&zpci_iomap_lock); 874 } 875 876 static int zpci_create_device_bus(struct zpci_dev *zdev) 877 { 878 struct resource *res; 879 LIST_HEAD(resources); 880 int i; 881 882 /* allocate mapping entry for each used bar */ 883 for (i = 0; i < PCI_BAR_COUNT; i++) { 884 unsigned long addr, size, flags; 885 int entry; 886 887 if (!zdev->bars[i].size) 888 continue; 889 entry = zpci_alloc_iomap(zdev); 890 if (entry < 0) 891 return entry; 892 zdev->bars[i].map_idx = entry; 893 894 /* only MMIO is supported */ 895 flags = IORESOURCE_MEM; 896 if (zdev->bars[i].val & 8) 897 flags |= IORESOURCE_PREFETCH; 898 if (zdev->bars[i].val & 4) 899 flags |= IORESOURCE_MEM_64; 900 901 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48); 902 903 size = 1UL << zdev->bars[i].size; 904 905 res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain); 906 if (IS_ERR(res)) { 907 zpci_free_iomap(zdev, entry); 908 return PTR_ERR(res); 909 } 910 pci_add_resource(&resources, res); 911 } 912 913 zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, 914 zdev, &resources); 915 if (!zdev->bus) 916 return -EIO; 917 918 zdev->bus->max_bus_speed = zdev->max_bus_speed; 919 return 0; 920 } 921 922 static int zpci_alloc_domain(struct zpci_dev *zdev) 923 { 924 spin_lock(&zpci_domain_lock); 925 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); 926 if (zdev->domain == ZPCI_NR_DEVICES) { 927 spin_unlock(&zpci_domain_lock); 928 return -ENOSPC; 929 } 930 set_bit(zdev->domain, zpci_domain); 931 spin_unlock(&zpci_domain_lock); 932 return 0; 933 } 934 935 static void zpci_free_domain(struct zpci_dev *zdev) 936 { 937 spin_lock(&zpci_domain_lock); 938 clear_bit(zdev->domain, zpci_domain); 939 spin_unlock(&zpci_domain_lock); 940 } 941 942 int zpci_enable_device(struct zpci_dev *zdev) 943 { 944 int rc; 945 946 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES); 947 if (rc) 948 goto out; 949 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid); 950 951 rc = zpci_dma_init_device(zdev); 952 if (rc) 953 goto out_dma; 954 return 0; 955 956 out_dma: 957 clp_disable_fh(zdev); 958 out: 959 return rc; 960 } 961 EXPORT_SYMBOL_GPL(zpci_enable_device); 962 963 int zpci_create_device(struct zpci_dev *zdev) 964 { 965 int rc; 966 967 rc = zpci_alloc_domain(zdev); 968 if (rc) 969 goto out; 970 971 rc = zpci_create_device_bus(zdev); 972 if (rc) 973 goto out_bus; 974 975 mutex_lock(&zpci_list_lock); 976 list_add_tail(&zdev->entry, &zpci_list); 977 if (hotplug_ops.create_slot) 978 hotplug_ops.create_slot(zdev); 979 mutex_unlock(&zpci_list_lock); 980 981 if (zdev->state == ZPCI_FN_STATE_STANDBY) 982 return 0; 983 984 rc = zpci_enable_device(zdev); 985 if (rc) 986 goto out_start; 987 return 0; 988 989 out_start: 990 mutex_lock(&zpci_list_lock); 991 list_del(&zdev->entry); 992 if (hotplug_ops.remove_slot) 993 hotplug_ops.remove_slot(zdev); 994 mutex_unlock(&zpci_list_lock); 995 out_bus: 996 zpci_free_domain(zdev); 997 out: 998 return rc; 999 } 1000 1001 void zpci_stop_device(struct zpci_dev *zdev) 1002 { 1003 zpci_dma_exit_device(zdev); 1004 /* 1005 * Note: SCLP disables fh via set-pci-fn so don't 1006 * do that here. 1007 */ 1008 } 1009 EXPORT_SYMBOL_GPL(zpci_stop_device); 1010 1011 int zpci_scan_device(struct zpci_dev *zdev) 1012 { 1013 zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN); 1014 if (!zdev->pdev) { 1015 pr_err("pci_scan_single_device failed for fid: 0x%x\n", 1016 zdev->fid); 1017 goto out; 1018 } 1019 1020 zpci_debug_init_device(zdev); 1021 zpci_fmb_enable_device(zdev); 1022 zpci_map_resources(zdev); 1023 pci_bus_add_devices(zdev->bus); 1024 1025 /* now that pdev was added to the bus mark it as used */ 1026 zdev->state = ZPCI_FN_STATE_ONLINE; 1027 return 0; 1028 1029 out: 1030 zpci_dma_exit_device(zdev); 1031 clp_disable_fh(zdev); 1032 return -EIO; 1033 } 1034 EXPORT_SYMBOL_GPL(zpci_scan_device); 1035 1036 static inline int barsize(u8 size) 1037 { 1038 return (size) ? (1 << size) >> 10 : 0; 1039 } 1040 1041 static int zpci_mem_init(void) 1042 { 1043 zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map), 1044 L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL); 1045 if (!zdev_irq_cache) 1046 goto error_zdev; 1047 1048 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), 1049 16, 0, NULL); 1050 if (!zdev_fmb_cache) 1051 goto error_fmb; 1052 1053 /* TODO: use realloc */ 1054 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), 1055 GFP_KERNEL); 1056 if (!zpci_iomap_start) 1057 goto error_iomap; 1058 return 0; 1059 1060 error_iomap: 1061 kmem_cache_destroy(zdev_fmb_cache); 1062 error_fmb: 1063 kmem_cache_destroy(zdev_irq_cache); 1064 error_zdev: 1065 return -ENOMEM; 1066 } 1067 1068 static void zpci_mem_exit(void) 1069 { 1070 kfree(zpci_iomap_start); 1071 kmem_cache_destroy(zdev_irq_cache); 1072 kmem_cache_destroy(zdev_fmb_cache); 1073 } 1074 1075 unsigned int pci_probe = 1; 1076 EXPORT_SYMBOL_GPL(pci_probe); 1077 1078 char * __init pcibios_setup(char *str) 1079 { 1080 if (!strcmp(str, "off")) { 1081 pci_probe = 0; 1082 return NULL; 1083 } 1084 return str; 1085 } 1086 1087 static int __init pci_base_init(void) 1088 { 1089 int rc; 1090 1091 if (!pci_probe) 1092 return 0; 1093 1094 if (!test_facility(2) || !test_facility(69) 1095 || !test_facility(71) || !test_facility(72)) 1096 return 0; 1097 1098 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n", 1099 test_facility(69), test_facility(70), 1100 test_facility(71)); 1101 1102 rc = zpci_debug_init(); 1103 if (rc) 1104 return rc; 1105 1106 rc = zpci_mem_init(); 1107 if (rc) 1108 goto out_mem; 1109 1110 rc = zpci_msihash_init(); 1111 if (rc) 1112 goto out_hash; 1113 1114 rc = zpci_irq_init(); 1115 if (rc) 1116 goto out_irq; 1117 1118 rc = zpci_dma_init(); 1119 if (rc) 1120 goto out_dma; 1121 1122 rc = clp_find_pci_devices(); 1123 if (rc) 1124 goto out_find; 1125 1126 zpci_scan_devices(); 1127 return 0; 1128 1129 out_find: 1130 zpci_dma_exit(); 1131 out_dma: 1132 zpci_irq_exit(); 1133 out_irq: 1134 zpci_msihash_exit(); 1135 out_hash: 1136 zpci_mem_exit(); 1137 out_mem: 1138 zpci_debug_exit(); 1139 return rc; 1140 } 1141 subsys_initcall(pci_base_init); 1142