1 /* 2 * Support PCI/PCIe on PowerNV platforms 3 * 4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #undef DEBUG 13 14 #include <linux/kernel.h> 15 #include <linux/pci.h> 16 #include <linux/delay.h> 17 #include <linux/string.h> 18 #include <linux/init.h> 19 #include <linux/bootmem.h> 20 #include <linux/irq.h> 21 #include <linux/io.h> 22 #include <linux/msi.h> 23 24 #include <asm/sections.h> 25 #include <asm/io.h> 26 #include <asm/prom.h> 27 #include <asm/pci-bridge.h> 28 #include <asm/machdep.h> 29 #include <asm/ppc-pci.h> 30 #include <asm/opal.h> 31 #include <asm/iommu.h> 32 #include <asm/tce.h> 33 34 #include "powernv.h" 35 #include "pci.h" 36 37 struct resource_wrap { 38 struct list_head link; 39 resource_size_t size; 40 resource_size_t align; 41 struct pci_dev *dev; /* Set if it's a device */ 42 struct pci_bus *bus; /* Set if it's a bridge */ 43 }; 44 45 static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe, 46 struct va_format *vaf) 47 { 48 char pfix[32]; 49 50 if (pe->pdev) 51 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); 52 else 53 sprintf(pfix, "%04x:%02x ", 54 pci_domain_nr(pe->pbus), pe->pbus->number); 55 return printk("pci %s%s: [PE# %.3d] %pV", level, pfix, pe->pe_number, vaf); 56 } 57 58 #define define_pe_printk_level(func, kern_level) \ 59 static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \ 60 { \ 61 struct va_format vaf; \ 62 va_list args; \ 63 int r; \ 64 \ 65 va_start(args, fmt); \ 66 \ 67 vaf.fmt = fmt; \ 68 vaf.va = &args; \ 69 \ 70 r = __pe_printk(kern_level, pe, &vaf); \ 71 va_end(args); \ 72 \ 73 return r; \ 74 } \ 75 76 define_pe_printk_level(pe_err, KERN_ERR); 77 define_pe_printk_level(pe_warn, KERN_WARNING); 78 define_pe_printk_level(pe_info, KERN_INFO); 79 80 81 /* Calculate resource usage & alignment requirement of a single 82 * device. This will also assign all resources within the device 83 * for a given type starting at 0 for the biggest one and then 84 * assigning in decreasing order of size. 85 */ 86 static void __devinit pnv_ioda_calc_dev(struct pci_dev *dev, unsigned int flags, 87 resource_size_t *size, 88 resource_size_t *align) 89 { 90 resource_size_t start; 91 struct resource *r; 92 int i; 93 94 pr_devel(" -> CDR %s\n", pci_name(dev)); 95 96 *size = *align = 0; 97 98 /* Clear the resources out and mark them all unset */ 99 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 100 r = &dev->resource[i]; 101 if (!(r->flags & flags)) 102 continue; 103 if (r->start) { 104 r->end -= r->start; 105 r->start = 0; 106 } 107 r->flags |= IORESOURCE_UNSET; 108 } 109 110 /* We currently keep all memory resources together, we 111 * will handle prefetch & 64-bit separately in the future 112 * but for now we stick everybody in M32 113 */ 114 start = 0; 115 for (;;) { 116 resource_size_t max_size = 0; 117 int max_no = -1; 118 119 /* Find next biggest resource */ 120 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 121 r = &dev->resource[i]; 122 if (!(r->flags & IORESOURCE_UNSET) || 123 !(r->flags & flags)) 124 continue; 125 if (resource_size(r) > max_size) { 126 max_size = resource_size(r); 127 max_no = i; 128 } 129 } 130 if (max_no < 0) 131 break; 132 r = &dev->resource[max_no]; 133 if (max_size > *align) 134 *align = max_size; 135 *size += max_size; 136 r->start = start; 137 start += max_size; 138 r->end = r->start + max_size - 1; 139 r->flags &= ~IORESOURCE_UNSET; 140 pr_devel(" -> R%d %016llx..%016llx\n", 141 max_no, r->start, r->end); 142 } 143 pr_devel(" <- CDR %s size=%llx align=%llx\n", 144 pci_name(dev), *size, *align); 145 } 146 147 /* Allocate a resource "wrap" for a given device or bridge and 148 * insert it at the right position in the sorted list 149 */ 150 static void __devinit pnv_ioda_add_wrap(struct list_head *list, 151 struct pci_bus *bus, 152 struct pci_dev *dev, 153 resource_size_t size, 154 resource_size_t align) 155 { 156 struct resource_wrap *w1, *w = kzalloc(sizeof(*w), GFP_KERNEL); 157 158 w->size = size; 159 w->align = align; 160 w->dev = dev; 161 w->bus = bus; 162 163 list_for_each_entry(w1, list, link) { 164 if (w1->align < align) { 165 list_add_tail(&w->link, &w1->link); 166 return; 167 } 168 } 169 list_add_tail(&w->link, list); 170 } 171 172 /* Offset device resources of a given type */ 173 static void __devinit pnv_ioda_offset_dev(struct pci_dev *dev, 174 unsigned int flags, 175 resource_size_t offset) 176 { 177 struct resource *r; 178 int i; 179 180 pr_devel(" -> ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset); 181 182 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 183 r = &dev->resource[i]; 184 if (r->flags & flags) { 185 dev->resource[i].start += offset; 186 dev->resource[i].end += offset; 187 } 188 } 189 190 pr_devel(" <- ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset); 191 } 192 193 /* Offset bus resources (& all children) of a given type */ 194 static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus, 195 unsigned int flags, 196 resource_size_t offset) 197 { 198 struct resource *r; 199 struct pci_dev *dev; 200 struct pci_bus *cbus; 201 int i; 202 203 pr_devel(" -> OBR %s [%x] +%016llx\n", 204 bus->self ? pci_name(bus->self) : "root", flags, offset); 205 206 pci_bus_for_each_resource(bus, r, i) { 207 if (r && (r->flags & flags)) { 208 r->start += offset; 209 r->end += offset; 210 } 211 } 212 list_for_each_entry(dev, &bus->devices, bus_list) 213 pnv_ioda_offset_dev(dev, flags, offset); 214 list_for_each_entry(cbus, &bus->children, node) 215 pnv_ioda_offset_bus(cbus, flags, offset); 216 217 pr_devel(" <- OBR %s [%x]\n", 218 bus->self ? pci_name(bus->self) : "root", flags); 219 } 220 221 /* This is the guts of our IODA resource allocation. This is called 222 * recursively for each bus in the system. It calculates all the 223 * necessary size and requirements for children and assign them 224 * resources such that: 225 * 226 * - Each function fits in it's own contiguous set of IO/M32 227 * segment 228 * 229 * - All segments behind a P2P bridge are contiguous and obey 230 * alignment constraints of those bridges 231 */ 232 static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags, 233 resource_size_t *size, 234 resource_size_t *align) 235 { 236 struct pci_controller *hose = pci_bus_to_host(bus); 237 struct pnv_phb *phb = hose->private_data; 238 resource_size_t dev_size, dev_align, start; 239 resource_size_t min_align, min_balign; 240 struct pci_dev *cdev; 241 struct pci_bus *cbus; 242 struct list_head head; 243 struct resource_wrap *w; 244 unsigned int bres; 245 246 *size = *align = 0; 247 248 pr_devel("-> CBR %s [%x]\n", 249 bus->self ? pci_name(bus->self) : "root", flags); 250 251 /* Calculate alignment requirements based on the type 252 * of resource we are working on 253 */ 254 if (flags & IORESOURCE_IO) { 255 bres = 0; 256 min_align = phb->ioda.io_segsize; 257 min_balign = 0x1000; 258 } else { 259 bres = 1; 260 min_align = phb->ioda.m32_segsize; 261 min_balign = 0x100000; 262 } 263 264 /* Gather all our children resources ordered by alignment */ 265 INIT_LIST_HEAD(&head); 266 267 /* - Busses */ 268 list_for_each_entry(cbus, &bus->children, node) { 269 pnv_ioda_calc_bus(cbus, flags, &dev_size, &dev_align); 270 pnv_ioda_add_wrap(&head, cbus, NULL, dev_size, dev_align); 271 } 272 273 /* - Devices */ 274 list_for_each_entry(cdev, &bus->devices, bus_list) { 275 pnv_ioda_calc_dev(cdev, flags, &dev_size, &dev_align); 276 /* Align them to segment size */ 277 if (dev_align < min_align) 278 dev_align = min_align; 279 pnv_ioda_add_wrap(&head, NULL, cdev, dev_size, dev_align); 280 } 281 if (list_empty(&head)) 282 goto empty; 283 284 /* Now we can do two things: assign offsets to them within that 285 * level and get our total alignment & size requirements. The 286 * assignment algorithm is going to be uber-trivial for now, we 287 * can try to be smarter later at filling out holes. 288 */ 289 if (bus->self) { 290 /* No offset for downstream bridges */ 291 start = 0; 292 } else { 293 /* Offset from the root */ 294 if (flags & IORESOURCE_IO) 295 /* Don't hand out IO 0 */ 296 start = hose->io_resource.start + 0x1000; 297 else 298 start = hose->mem_resources[0].start; 299 } 300 while(!list_empty(&head)) { 301 w = list_first_entry(&head, struct resource_wrap, link); 302 list_del(&w->link); 303 if (w->size) { 304 if (start) { 305 start = ALIGN(start, w->align); 306 if (w->dev) 307 pnv_ioda_offset_dev(w->dev,flags,start); 308 else if (w->bus) 309 pnv_ioda_offset_bus(w->bus,flags,start); 310 } 311 if (w->align > *align) 312 *align = w->align; 313 } 314 start += w->size; 315 kfree(w); 316 } 317 *size = start; 318 319 /* Align and setup bridge resources */ 320 *align = max_t(resource_size_t, *align, 321 max_t(resource_size_t, min_align, min_balign)); 322 *size = ALIGN(*size, 323 max_t(resource_size_t, min_align, min_balign)); 324 empty: 325 /* Only setup P2P's, not the PHB itself */ 326 if (bus->self) { 327 struct resource *res = bus->resource[bres]; 328 329 if (WARN_ON(res == NULL)) 330 return; 331 332 /* 333 * FIXME: We should probably export and call 334 * pci_bridge_check_ranges() to properly re-initialize 335 * the PCI portion of the flags here, and to detect 336 * what the bridge actually supports. 337 */ 338 res->start = 0; 339 res->flags = (*size) ? flags : 0; 340 res->end = (*size) ? (*size - 1) : 0; 341 } 342 343 pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n", 344 bus->self ? pci_name(bus->self) : "root", flags,*size,*align); 345 } 346 347 static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) 348 { 349 struct device_node *np; 350 351 np = pci_device_to_OF_node(dev); 352 if (!np) 353 return NULL; 354 return PCI_DN(np); 355 } 356 357 static void __devinit pnv_ioda_setup_pe_segments(struct pci_dev *dev) 358 { 359 struct pci_controller *hose = pci_bus_to_host(dev->bus); 360 struct pnv_phb *phb = hose->private_data; 361 struct pci_dn *pdn = pnv_ioda_get_pdn(dev); 362 unsigned int pe, i; 363 resource_size_t pos; 364 struct resource io_res; 365 struct resource m32_res; 366 struct pci_bus_region region; 367 int rc; 368 369 /* Anything not referenced in the device-tree gets PE#0 */ 370 pe = pdn ? pdn->pe_number : 0; 371 372 /* Calculate the device min/max */ 373 io_res.start = m32_res.start = (resource_size_t)-1; 374 io_res.end = m32_res.end = 0; 375 io_res.flags = IORESOURCE_IO; 376 m32_res.flags = IORESOURCE_MEM; 377 378 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 379 struct resource *r = NULL; 380 if (dev->resource[i].flags & IORESOURCE_IO) 381 r = &io_res; 382 if (dev->resource[i].flags & IORESOURCE_MEM) 383 r = &m32_res; 384 if (!r) 385 continue; 386 if (dev->resource[i].start < r->start) 387 r->start = dev->resource[i].start; 388 if (dev->resource[i].end > r->end) 389 r->end = dev->resource[i].end; 390 } 391 392 /* Setup IO segments */ 393 if (io_res.start < io_res.end) { 394 pcibios_resource_to_bus(dev, ®ion, &io_res); 395 pos = region.start; 396 i = pos / phb->ioda.io_segsize; 397 while(i < phb->ioda.total_pe && pos <= region.end) { 398 if (phb->ioda.io_segmap[i]) { 399 pr_err("%s: Trying to use IO seg #%d which is" 400 " already used by PE# %d\n", 401 pci_name(dev), i, 402 phb->ioda.io_segmap[i]); 403 /* XXX DO SOMETHING TO DISABLE DEVICE ? */ 404 break; 405 } 406 phb->ioda.io_segmap[i] = pe; 407 rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe, 408 OPAL_IO_WINDOW_TYPE, 409 0, i); 410 if (rc != OPAL_SUCCESS) { 411 pr_err("%s: OPAL error %d setting up mapping" 412 " for IO seg# %d\n", 413 pci_name(dev), rc, i); 414 /* XXX DO SOMETHING TO DISABLE DEVICE ? */ 415 break; 416 } 417 pos += phb->ioda.io_segsize; 418 i++; 419 }; 420 } 421 422 /* Setup M32 segments */ 423 if (m32_res.start < m32_res.end) { 424 pcibios_resource_to_bus(dev, ®ion, &m32_res); 425 pos = region.start; 426 i = pos / phb->ioda.m32_segsize; 427 while(i < phb->ioda.total_pe && pos <= region.end) { 428 if (phb->ioda.m32_segmap[i]) { 429 pr_err("%s: Trying to use M32 seg #%d which is" 430 " already used by PE# %d\n", 431 pci_name(dev), i, 432 phb->ioda.m32_segmap[i]); 433 /* XXX DO SOMETHING TO DISABLE DEVICE ? */ 434 break; 435 } 436 phb->ioda.m32_segmap[i] = pe; 437 rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe, 438 OPAL_M32_WINDOW_TYPE, 439 0, i); 440 if (rc != OPAL_SUCCESS) { 441 pr_err("%s: OPAL error %d setting up mapping" 442 " for M32 seg# %d\n", 443 pci_name(dev), rc, i); 444 /* XXX DO SOMETHING TO DISABLE DEVICE ? */ 445 break; 446 } 447 pos += phb->ioda.m32_segsize; 448 i++; 449 } 450 } 451 } 452 453 /* Check if a resource still fits in the total IO or M32 range 454 * for a given PHB 455 */ 456 static int __devinit pnv_ioda_resource_fit(struct pci_controller *hose, 457 struct resource *r) 458 { 459 struct resource *bounds; 460 461 if (r->flags & IORESOURCE_IO) 462 bounds = &hose->io_resource; 463 else if (r->flags & IORESOURCE_MEM) 464 bounds = &hose->mem_resources[0]; 465 else 466 return 1; 467 468 if (r->start >= bounds->start && r->end <= bounds->end) 469 return 1; 470 r->flags = 0; 471 return 0; 472 } 473 474 static void __devinit pnv_ioda_update_resources(struct pci_bus *bus) 475 { 476 struct pci_controller *hose = pci_bus_to_host(bus); 477 struct pci_bus *cbus; 478 struct pci_dev *cdev; 479 unsigned int i; 480 481 /* We used to clear all device enables here. However it looks like 482 * clearing MEM enable causes Obsidian (IPR SCS) to go bonkers, 483 * and shoot fatal errors to the PHB which in turns fences itself 484 * and we can't recover from that ... yet. So for now, let's leave 485 * the enables as-is and hope for the best. 486 */ 487 488 /* Check if bus resources fit in our IO or M32 range */ 489 for (i = 0; bus->self && (i < 2); i++) { 490 struct resource *r = bus->resource[i]; 491 if (r && !pnv_ioda_resource_fit(hose, r)) 492 pr_err("%s: Bus %d resource %d disabled, no room\n", 493 pci_name(bus->self), bus->number, i); 494 } 495 496 /* Update self if it's not a PHB */ 497 if (bus->self) 498 pci_setup_bridge(bus); 499 500 /* Update child devices */ 501 list_for_each_entry(cdev, &bus->devices, bus_list) { 502 /* Check if resource fits, if not, disabled it */ 503 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 504 struct resource *r = &cdev->resource[i]; 505 if (!pnv_ioda_resource_fit(hose, r)) 506 pr_err("%s: Resource %d disabled, no room\n", 507 pci_name(cdev), i); 508 } 509 510 /* Assign segments */ 511 pnv_ioda_setup_pe_segments(cdev); 512 513 /* Update HW BARs */ 514 for (i = 0; i <= PCI_ROM_RESOURCE; i++) 515 pci_update_resource(cdev, i); 516 } 517 518 /* Update child busses */ 519 list_for_each_entry(cbus, &bus->children, node) 520 pnv_ioda_update_resources(cbus); 521 } 522 523 static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb) 524 { 525 unsigned long pe; 526 527 do { 528 pe = find_next_zero_bit(phb->ioda.pe_alloc, 529 phb->ioda.total_pe, 0); 530 if (pe >= phb->ioda.total_pe) 531 return IODA_INVALID_PE; 532 } while(test_and_set_bit(pe, phb->ioda.pe_alloc)); 533 534 phb->ioda.pe_array[pe].pe_number = pe; 535 return pe; 536 } 537 538 static void __devinit pnv_ioda_free_pe(struct pnv_phb *phb, int pe) 539 { 540 WARN_ON(phb->ioda.pe_array[pe].pdev); 541 542 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe)); 543 clear_bit(pe, phb->ioda.pe_alloc); 544 } 545 546 /* Currently those 2 are only used when MSIs are enabled, this will change 547 * but in the meantime, we need to protect them to avoid warnings 548 */ 549 #ifdef CONFIG_PCI_MSI 550 static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev) 551 { 552 struct pci_controller *hose = pci_bus_to_host(dev->bus); 553 struct pnv_phb *phb = hose->private_data; 554 struct pci_dn *pdn = pnv_ioda_get_pdn(dev); 555 556 if (!pdn) 557 return NULL; 558 if (pdn->pe_number == IODA_INVALID_PE) 559 return NULL; 560 return &phb->ioda.pe_array[pdn->pe_number]; 561 } 562 563 static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev) 564 { 565 struct pnv_ioda_pe *pe = __pnv_ioda_get_one_pe(dev); 566 567 while (!pe && dev->bus->self) { 568 dev = dev->bus->self; 569 pe = __pnv_ioda_get_one_pe(dev); 570 if (pe) 571 pe = pe->bus_pe; 572 } 573 return pe; 574 } 575 #endif /* CONFIG_PCI_MSI */ 576 577 static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb, 578 struct pnv_ioda_pe *pe) 579 { 580 struct pci_dev *parent; 581 uint8_t bcomp, dcomp, fcomp; 582 long rc, rid_end, rid; 583 584 /* Bus validation ? */ 585 if (pe->pbus) { 586 int count; 587 588 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; 589 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; 590 parent = pe->pbus->self; 591 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; 592 switch(count) { 593 case 1: bcomp = OpalPciBusAll; break; 594 case 2: bcomp = OpalPciBus7Bits; break; 595 case 4: bcomp = OpalPciBus6Bits; break; 596 case 8: bcomp = OpalPciBus5Bits; break; 597 case 16: bcomp = OpalPciBus4Bits; break; 598 case 32: bcomp = OpalPciBus3Bits; break; 599 default: 600 pr_err("%s: Number of subordinate busses %d" 601 " unsupported\n", 602 pci_name(pe->pbus->self), count); 603 /* Do an exact match only */ 604 bcomp = OpalPciBusAll; 605 } 606 rid_end = pe->rid + (count << 8); 607 } else { 608 parent = pe->pdev->bus->self; 609 bcomp = OpalPciBusAll; 610 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; 611 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; 612 rid_end = pe->rid + 1; 613 } 614 615 /* Associate PE in PELT */ 616 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, 617 bcomp, dcomp, fcomp, OPAL_MAP_PE); 618 if (rc) { 619 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); 620 return -ENXIO; 621 } 622 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, 623 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 624 625 /* Add to all parents PELT-V */ 626 while (parent) { 627 struct pci_dn *pdn = pnv_ioda_get_pdn(parent); 628 if (pdn && pdn->pe_number != IODA_INVALID_PE) { 629 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, 630 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN); 631 /* XXX What to do in case of error ? */ 632 } 633 parent = parent->bus->self; 634 } 635 /* Setup reverse map */ 636 for (rid = pe->rid; rid < rid_end; rid++) 637 phb->ioda.pe_rmap[rid] = pe->pe_number; 638 639 /* Setup one MVTs on IODA1 */ 640 if (phb->type == PNV_PHB_IODA1) { 641 pe->mve_number = pe->pe_number; 642 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, 643 pe->pe_number); 644 if (rc) { 645 pe_err(pe, "OPAL error %ld setting up MVE %d\n", 646 rc, pe->mve_number); 647 pe->mve_number = -1; 648 } else { 649 rc = opal_pci_set_mve_enable(phb->opal_id, 650 pe->mve_number, OPAL_ENABLE_MVE); 651 if (rc) { 652 pe_err(pe, "OPAL error %ld enabling MVE %d\n", 653 rc, pe->mve_number); 654 pe->mve_number = -1; 655 } 656 } 657 } else if (phb->type == PNV_PHB_IODA2) 658 pe->mve_number = 0; 659 660 return 0; 661 } 662 663 static void __devinit pnv_ioda_link_pe_by_weight(struct pnv_phb *phb, 664 struct pnv_ioda_pe *pe) 665 { 666 struct pnv_ioda_pe *lpe; 667 668 list_for_each_entry(lpe, &phb->ioda.pe_list, link) { 669 if (lpe->dma_weight < pe->dma_weight) { 670 list_add_tail(&pe->link, &lpe->link); 671 return; 672 } 673 } 674 list_add_tail(&pe->link, &phb->ioda.pe_list); 675 } 676 677 static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev) 678 { 679 /* This is quite simplistic. The "base" weight of a device 680 * is 10. 0 means no DMA is to be accounted for it. 681 */ 682 683 /* If it's a bridge, no DMA */ 684 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) 685 return 0; 686 687 /* Reduce the weight of slow USB controllers */ 688 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI || 689 dev->class == PCI_CLASS_SERIAL_USB_OHCI || 690 dev->class == PCI_CLASS_SERIAL_USB_EHCI) 691 return 3; 692 693 /* Increase the weight of RAID (includes Obsidian) */ 694 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID) 695 return 15; 696 697 /* Default */ 698 return 10; 699 } 700 701 static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev) 702 { 703 struct pci_controller *hose = pci_bus_to_host(dev->bus); 704 struct pnv_phb *phb = hose->private_data; 705 struct pci_dn *pdn = pnv_ioda_get_pdn(dev); 706 struct pnv_ioda_pe *pe; 707 int pe_num; 708 709 if (!pdn) { 710 pr_err("%s: Device tree node not associated properly\n", 711 pci_name(dev)); 712 return NULL; 713 } 714 if (pdn->pe_number != IODA_INVALID_PE) 715 return NULL; 716 717 /* PE#0 has been pre-set */ 718 if (dev->bus->number == 0) 719 pe_num = 0; 720 else 721 pe_num = pnv_ioda_alloc_pe(phb); 722 if (pe_num == IODA_INVALID_PE) { 723 pr_warning("%s: Not enough PE# available, disabling device\n", 724 pci_name(dev)); 725 return NULL; 726 } 727 728 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the 729 * pointer in the PE data structure, both should be destroyed at the 730 * same time. However, this needs to be looked at more closely again 731 * once we actually start removing things (Hotplug, SR-IOV, ...) 732 * 733 * At some point we want to remove the PDN completely anyways 734 */ 735 pe = &phb->ioda.pe_array[pe_num]; 736 pci_dev_get(dev); 737 pdn->pcidev = dev; 738 pdn->pe_number = pe_num; 739 pe->pdev = dev; 740 pe->pbus = NULL; 741 pe->tce32_seg = -1; 742 pe->mve_number = -1; 743 pe->rid = dev->bus->number << 8 | pdn->devfn; 744 745 pe_info(pe, "Associated device to PE\n"); 746 747 if (pnv_ioda_configure_pe(phb, pe)) { 748 /* XXX What do we do here ? */ 749 if (pe_num) 750 pnv_ioda_free_pe(phb, pe_num); 751 pdn->pe_number = IODA_INVALID_PE; 752 pe->pdev = NULL; 753 pci_dev_put(dev); 754 return NULL; 755 } 756 757 /* Assign a DMA weight to the device */ 758 pe->dma_weight = pnv_ioda_dma_weight(dev); 759 if (pe->dma_weight != 0) { 760 phb->ioda.dma_weight += pe->dma_weight; 761 phb->ioda.dma_pe_count++; 762 } 763 764 /* Link the PE */ 765 pnv_ioda_link_pe_by_weight(phb, pe); 766 767 return pe; 768 } 769 770 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) 771 { 772 struct pci_dev *dev; 773 774 list_for_each_entry(dev, &bus->devices, bus_list) { 775 struct pci_dn *pdn = pnv_ioda_get_pdn(dev); 776 777 if (pdn == NULL) { 778 pr_warn("%s: No device node associated with device !\n", 779 pci_name(dev)); 780 continue; 781 } 782 pci_dev_get(dev); 783 pdn->pcidev = dev; 784 pdn->pe_number = pe->pe_number; 785 pe->dma_weight += pnv_ioda_dma_weight(dev); 786 if (dev->subordinate) 787 pnv_ioda_setup_same_PE(dev->subordinate, pe); 788 } 789 } 790 791 static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev, 792 struct pnv_ioda_pe *ppe) 793 { 794 struct pci_controller *hose = pci_bus_to_host(dev->bus); 795 struct pnv_phb *phb = hose->private_data; 796 struct pci_bus *bus = dev->subordinate; 797 struct pnv_ioda_pe *pe; 798 int pe_num; 799 800 if (!bus) { 801 pr_warning("%s: Bridge without a subordinate bus !\n", 802 pci_name(dev)); 803 return; 804 } 805 pe_num = pnv_ioda_alloc_pe(phb); 806 if (pe_num == IODA_INVALID_PE) { 807 pr_warning("%s: Not enough PE# available, disabling bus\n", 808 pci_name(dev)); 809 return; 810 } 811 812 pe = &phb->ioda.pe_array[pe_num]; 813 ppe->bus_pe = pe; 814 pe->pbus = bus; 815 pe->pdev = NULL; 816 pe->tce32_seg = -1; 817 pe->mve_number = -1; 818 pe->rid = bus->busn_res.start << 8; 819 pe->dma_weight = 0; 820 821 pe_info(pe, "Secondary busses %pR associated with PE\n", 822 &bus->busn_res); 823 824 if (pnv_ioda_configure_pe(phb, pe)) { 825 /* XXX What do we do here ? */ 826 if (pe_num) 827 pnv_ioda_free_pe(phb, pe_num); 828 pe->pbus = NULL; 829 return; 830 } 831 832 /* Associate it with all child devices */ 833 pnv_ioda_setup_same_PE(bus, pe); 834 835 /* Account for one DMA PE if at least one DMA capable device exist 836 * below the bridge 837 */ 838 if (pe->dma_weight != 0) { 839 phb->ioda.dma_weight += pe->dma_weight; 840 phb->ioda.dma_pe_count++; 841 } 842 843 /* Link the PE */ 844 pnv_ioda_link_pe_by_weight(phb, pe); 845 } 846 847 static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus) 848 { 849 struct pci_dev *dev; 850 struct pnv_ioda_pe *pe; 851 852 list_for_each_entry(dev, &bus->devices, bus_list) { 853 pe = pnv_ioda_setup_dev_PE(dev); 854 if (pe == NULL) 855 continue; 856 /* Leaving the PCIe domain ... single PE# */ 857 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) 858 pnv_ioda_setup_bus_PE(dev, pe); 859 else if (dev->subordinate) 860 pnv_ioda_setup_PEs(dev->subordinate); 861 } 862 } 863 864 static void __devinit pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, 865 struct pci_dev *dev) 866 { 867 /* We delay DMA setup after we have assigned all PE# */ 868 } 869 870 static void __devinit pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, 871 struct pci_bus *bus) 872 { 873 struct pci_dev *dev; 874 875 list_for_each_entry(dev, &bus->devices, bus_list) { 876 set_iommu_table_base(&dev->dev, &pe->tce32_table); 877 if (dev->subordinate) 878 pnv_ioda_setup_bus_dma(pe, dev->subordinate); 879 } 880 } 881 882 static void __devinit pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, 883 struct pnv_ioda_pe *pe, 884 unsigned int base, 885 unsigned int segs) 886 { 887 888 struct page *tce_mem = NULL; 889 const __be64 *swinvp; 890 struct iommu_table *tbl; 891 unsigned int i; 892 int64_t rc; 893 void *addr; 894 895 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */ 896 #define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8) 897 898 /* XXX FIXME: Handle 64-bit only DMA devices */ 899 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */ 900 /* XXX FIXME: Allocate multi-level tables on PHB3 */ 901 902 /* We shouldn't already have a 32-bit DMA associated */ 903 if (WARN_ON(pe->tce32_seg >= 0)) 904 return; 905 906 /* Grab a 32-bit TCE table */ 907 pe->tce32_seg = base; 908 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n", 909 (base << 28), ((base + segs) << 28) - 1); 910 911 /* XXX Currently, we allocate one big contiguous table for the 912 * TCEs. We only really need one chunk per 256M of TCE space 913 * (ie per segment) but that's an optimization for later, it 914 * requires some added smarts with our get/put_tce implementation 915 */ 916 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL, 917 get_order(TCE32_TABLE_SIZE * segs)); 918 if (!tce_mem) { 919 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n"); 920 goto fail; 921 } 922 addr = page_address(tce_mem); 923 memset(addr, 0, TCE32_TABLE_SIZE * segs); 924 925 /* Configure HW */ 926 for (i = 0; i < segs; i++) { 927 rc = opal_pci_map_pe_dma_window(phb->opal_id, 928 pe->pe_number, 929 base + i, 1, 930 __pa(addr) + TCE32_TABLE_SIZE * i, 931 TCE32_TABLE_SIZE, 0x1000); 932 if (rc) { 933 pe_err(pe, " Failed to configure 32-bit TCE table," 934 " err %ld\n", rc); 935 goto fail; 936 } 937 } 938 939 /* Setup linux iommu table */ 940 tbl = &pe->tce32_table; 941 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs, 942 base << 28); 943 944 /* OPAL variant of P7IOC SW invalidated TCEs */ 945 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL); 946 if (swinvp) { 947 /* We need a couple more fields -- an address and a data 948 * to or. Since the bus is only printed out on table free 949 * errors, and on the first pass the data will be a relative 950 * bus number, print that out instead. 951 */ 952 tbl->it_busno = 0; 953 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8); 954 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE 955 | TCE_PCI_SWINV_PAIR; 956 } 957 iommu_init_table(tbl, phb->hose->node); 958 959 if (pe->pdev) 960 set_iommu_table_base(&pe->pdev->dev, tbl); 961 else 962 pnv_ioda_setup_bus_dma(pe, pe->pbus); 963 964 return; 965 fail: 966 /* XXX Failure: Try to fallback to 64-bit only ? */ 967 if (pe->tce32_seg >= 0) 968 pe->tce32_seg = -1; 969 if (tce_mem) 970 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); 971 } 972 973 static void __devinit pnv_ioda_setup_dma(struct pnv_phb *phb) 974 { 975 struct pci_controller *hose = phb->hose; 976 unsigned int residual, remaining, segs, tw, base; 977 struct pnv_ioda_pe *pe; 978 979 /* If we have more PE# than segments available, hand out one 980 * per PE until we run out and let the rest fail. If not, 981 * then we assign at least one segment per PE, plus more based 982 * on the amount of devices under that PE 983 */ 984 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count) 985 residual = 0; 986 else 987 residual = phb->ioda.tce32_count - 988 phb->ioda.dma_pe_count; 989 990 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n", 991 hose->global_number, phb->ioda.tce32_count); 992 pr_info("PCI: %d PE# for a total weight of %d\n", 993 phb->ioda.dma_pe_count, phb->ioda.dma_weight); 994 995 /* Walk our PE list and configure their DMA segments, hand them 996 * out one base segment plus any residual segments based on 997 * weight 998 */ 999 remaining = phb->ioda.tce32_count; 1000 tw = phb->ioda.dma_weight; 1001 base = 0; 1002 list_for_each_entry(pe, &phb->ioda.pe_list, link) { 1003 if (!pe->dma_weight) 1004 continue; 1005 if (!remaining) { 1006 pe_warn(pe, "No DMA32 resources available\n"); 1007 continue; 1008 } 1009 segs = 1; 1010 if (residual) { 1011 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw; 1012 if (segs > remaining) 1013 segs = remaining; 1014 } 1015 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n", 1016 pe->dma_weight, segs); 1017 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs); 1018 remaining -= segs; 1019 base += segs; 1020 } 1021 } 1022 1023 #ifdef CONFIG_PCI_MSI 1024 static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, 1025 unsigned int hwirq, unsigned int is_64, 1026 struct msi_msg *msg) 1027 { 1028 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); 1029 unsigned int xive_num = hwirq - phb->msi_base; 1030 uint64_t addr64; 1031 uint32_t addr32, data; 1032 int rc; 1033 1034 /* No PE assigned ? bail out ... no MSI for you ! */ 1035 if (pe == NULL) 1036 return -ENXIO; 1037 1038 /* Check if we have an MVE */ 1039 if (pe->mve_number < 0) 1040 return -ENXIO; 1041 1042 /* Assign XIVE to PE */ 1043 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); 1044 if (rc) { 1045 pr_warn("%s: OPAL error %d setting XIVE %d PE\n", 1046 pci_name(dev), rc, xive_num); 1047 return -EIO; 1048 } 1049 1050 if (is_64) { 1051 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, 1052 &addr64, &data); 1053 if (rc) { 1054 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n", 1055 pci_name(dev), rc); 1056 return -EIO; 1057 } 1058 msg->address_hi = addr64 >> 32; 1059 msg->address_lo = addr64 & 0xfffffffful; 1060 } else { 1061 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, 1062 &addr32, &data); 1063 if (rc) { 1064 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n", 1065 pci_name(dev), rc); 1066 return -EIO; 1067 } 1068 msg->address_hi = 0; 1069 msg->address_lo = addr32; 1070 } 1071 msg->data = data; 1072 1073 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d)," 1074 " address=%x_%08x data=%x PE# %d\n", 1075 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num, 1076 msg->address_hi, msg->address_lo, data, pe->pe_number); 1077 1078 return 0; 1079 } 1080 1081 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) 1082 { 1083 unsigned int bmap_size; 1084 const __be32 *prop = of_get_property(phb->hose->dn, 1085 "ibm,opal-msi-ranges", NULL); 1086 if (!prop) { 1087 /* BML Fallback */ 1088 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL); 1089 } 1090 if (!prop) 1091 return; 1092 1093 phb->msi_base = be32_to_cpup(prop); 1094 phb->msi_count = be32_to_cpup(prop + 1); 1095 bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long); 1096 phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL); 1097 if (!phb->msi_map) { 1098 pr_err("PCI %d: Failed to allocate MSI bitmap !\n", 1099 phb->hose->global_number); 1100 return; 1101 } 1102 phb->msi_setup = pnv_pci_ioda_msi_setup; 1103 phb->msi32_support = 1; 1104 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n", 1105 phb->msi_count, phb->msi_base); 1106 } 1107 #else 1108 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { } 1109 #endif /* CONFIG_PCI_MSI */ 1110 1111 /* This is the starting point of our IODA specific resource 1112 * allocation process 1113 */ 1114 static void __devinit pnv_pci_ioda_fixup_phb(struct pci_controller *hose) 1115 { 1116 resource_size_t size, align; 1117 struct pci_bus *child; 1118 1119 /* Associate PEs per functions */ 1120 pnv_ioda_setup_PEs(hose->bus); 1121 1122 /* Calculate all resources */ 1123 pnv_ioda_calc_bus(hose->bus, IORESOURCE_IO, &size, &align); 1124 pnv_ioda_calc_bus(hose->bus, IORESOURCE_MEM, &size, &align); 1125 1126 /* Apply then to HW */ 1127 pnv_ioda_update_resources(hose->bus); 1128 1129 /* Setup DMA */ 1130 pnv_ioda_setup_dma(hose->private_data); 1131 1132 /* Configure PCI Express settings */ 1133 list_for_each_entry(child, &hose->bus->children, node) { 1134 struct pci_dev *self = child->self; 1135 if (!self) 1136 continue; 1137 pcie_bus_configure_settings(child, self->pcie_mpss); 1138 } 1139 } 1140 1141 /* 1142 * Returns the alignment for I/O or memory windows for P2P 1143 * bridges. That actually depends on how PEs are segmented. 1144 * For now, we return I/O or M32 segment size for PE sensitive 1145 * P2P bridges. Otherwise, the default values (4KiB for I/O, 1146 * 1MiB for memory) will be returned. 1147 * 1148 * The current PCI bus might be put into one PE, which was 1149 * create against the parent PCI bridge. For that case, we 1150 * needn't enlarge the alignment so that we can save some 1151 * resources. 1152 */ 1153 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, 1154 unsigned long type) 1155 { 1156 struct pci_dev *bridge; 1157 struct pci_controller *hose = pci_bus_to_host(bus); 1158 struct pnv_phb *phb = hose->private_data; 1159 int num_pci_bridges = 0; 1160 1161 bridge = bus->self; 1162 while (bridge) { 1163 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) { 1164 num_pci_bridges++; 1165 if (num_pci_bridges >= 2) 1166 return 1; 1167 } 1168 1169 bridge = bridge->bus->self; 1170 } 1171 1172 /* We need support prefetchable memory window later */ 1173 if (type & IORESOURCE_MEM) 1174 return phb->ioda.m32_segsize; 1175 1176 return phb->ioda.io_segsize; 1177 } 1178 1179 /* Prevent enabling devices for which we couldn't properly 1180 * assign a PE 1181 */ 1182 static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev) 1183 { 1184 struct pci_dn *pdn = pnv_ioda_get_pdn(dev); 1185 1186 if (!pdn || pdn->pe_number == IODA_INVALID_PE) 1187 return -EINVAL; 1188 return 0; 1189 } 1190 1191 static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus, 1192 u32 devfn) 1193 { 1194 return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; 1195 } 1196 1197 void __init pnv_pci_init_ioda1_phb(struct device_node *np) 1198 { 1199 struct pci_controller *hose; 1200 static int primary = 1; 1201 struct pnv_phb *phb; 1202 unsigned long size, m32map_off, iomap_off, pemap_off; 1203 const u64 *prop64; 1204 u64 phb_id; 1205 void *aux; 1206 long rc; 1207 1208 pr_info(" Initializing IODA OPAL PHB %s\n", np->full_name); 1209 1210 prop64 = of_get_property(np, "ibm,opal-phbid", NULL); 1211 if (!prop64) { 1212 pr_err(" Missing \"ibm,opal-phbid\" property !\n"); 1213 return; 1214 } 1215 phb_id = be64_to_cpup(prop64); 1216 pr_debug(" PHB-ID : 0x%016llx\n", phb_id); 1217 1218 phb = alloc_bootmem(sizeof(struct pnv_phb)); 1219 if (phb) { 1220 memset(phb, 0, sizeof(struct pnv_phb)); 1221 phb->hose = hose = pcibios_alloc_controller(np); 1222 } 1223 if (!phb || !phb->hose) { 1224 pr_err("PCI: Failed to allocate PCI controller for %s\n", 1225 np->full_name); 1226 return; 1227 } 1228 1229 spin_lock_init(&phb->lock); 1230 /* XXX Use device-tree */ 1231 hose->first_busno = 0; 1232 hose->last_busno = 0xff; 1233 hose->private_data = phb; 1234 phb->opal_id = phb_id; 1235 phb->type = PNV_PHB_IODA1; 1236 1237 /* Detect specific models for error handling */ 1238 if (of_device_is_compatible(np, "ibm,p7ioc-pciex")) 1239 phb->model = PNV_PHB_MODEL_P7IOC; 1240 else 1241 phb->model = PNV_PHB_MODEL_UNKNOWN; 1242 1243 /* We parse "ranges" now since we need to deduce the register base 1244 * from the IO base 1245 */ 1246 pci_process_bridge_OF_ranges(phb->hose, np, primary); 1247 primary = 0; 1248 1249 /* Magic formula from Milton */ 1250 phb->regs = of_iomap(np, 0); 1251 if (phb->regs == NULL) 1252 pr_err(" Failed to map registers !\n"); 1253 1254 1255 /* XXX This is hack-a-thon. This needs to be changed so that: 1256 * - we obtain stuff like PE# etc... from device-tree 1257 * - we properly re-allocate M32 ourselves 1258 * (the OFW one isn't very good) 1259 */ 1260 1261 /* Initialize more IODA stuff */ 1262 phb->ioda.total_pe = 128; 1263 1264 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]); 1265 /* OFW Has already off top 64k of M32 space (MSI space) */ 1266 phb->ioda.m32_size += 0x10000; 1267 1268 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe; 1269 phb->ioda.m32_pci_base = hose->mem_resources[0].start - 1270 hose->pci_mem_offset; 1271 phb->ioda.io_size = hose->pci_io_size; 1272 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe; 1273 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */ 1274 1275 /* Allocate aux data & arrays */ 1276 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long)); 1277 m32map_off = size; 1278 size += phb->ioda.total_pe; 1279 iomap_off = size; 1280 size += phb->ioda.total_pe; 1281 pemap_off = size; 1282 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe); 1283 aux = alloc_bootmem(size); 1284 memset(aux, 0, size); 1285 phb->ioda.pe_alloc = aux; 1286 phb->ioda.m32_segmap = aux + m32map_off; 1287 phb->ioda.io_segmap = aux + iomap_off; 1288 phb->ioda.pe_array = aux + pemap_off; 1289 set_bit(0, phb->ioda.pe_alloc); 1290 1291 INIT_LIST_HEAD(&phb->ioda.pe_list); 1292 1293 /* Calculate how many 32-bit TCE segments we have */ 1294 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28; 1295 1296 /* Clear unusable m64 */ 1297 hose->mem_resources[1].flags = 0; 1298 hose->mem_resources[1].start = 0; 1299 hose->mem_resources[1].end = 0; 1300 hose->mem_resources[2].flags = 0; 1301 hose->mem_resources[2].start = 0; 1302 hose->mem_resources[2].end = 0; 1303 1304 #if 0 1305 rc = opal_pci_set_phb_mem_window(opal->phb_id, 1306 window_type, 1307 window_num, 1308 starting_real_address, 1309 starting_pci_address, 1310 segment_size); 1311 #endif 1312 1313 pr_info(" %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n", 1314 phb->ioda.total_pe, 1315 phb->ioda.m32_size, phb->ioda.m32_segsize, 1316 phb->ioda.io_size, phb->ioda.io_segsize); 1317 1318 if (phb->regs) { 1319 pr_devel(" BUID = 0x%016llx\n", in_be64(phb->regs + 0x100)); 1320 pr_devel(" PHB2_CR = 0x%016llx\n", in_be64(phb->regs + 0x160)); 1321 pr_devel(" IO_BAR = 0x%016llx\n", in_be64(phb->regs + 0x170)); 1322 pr_devel(" IO_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x178)); 1323 pr_devel(" IO_SAR = 0x%016llx\n", in_be64(phb->regs + 0x180)); 1324 pr_devel(" M32_BAR = 0x%016llx\n", in_be64(phb->regs + 0x190)); 1325 pr_devel(" M32_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x198)); 1326 pr_devel(" M32_SAR = 0x%016llx\n", in_be64(phb->regs + 0x1a0)); 1327 } 1328 phb->hose->ops = &pnv_pci_ops; 1329 1330 /* Setup RID -> PE mapping function */ 1331 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe; 1332 1333 /* Setup TCEs */ 1334 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; 1335 1336 /* Setup MSI support */ 1337 pnv_pci_init_ioda_msis(phb); 1338 1339 /* We set both PCI_PROBE_ONLY and PCI_REASSIGN_ALL_RSRC. This is an 1340 * odd combination which essentially means that we skip all resource 1341 * fixups and assignments in the generic code, and do it all 1342 * ourselves here 1343 */ 1344 ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb; 1345 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook; 1346 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment; 1347 pci_add_flags(PCI_PROBE_ONLY | PCI_REASSIGN_ALL_RSRC); 1348 1349 /* Reset IODA tables to a clean state */ 1350 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET); 1351 if (rc) 1352 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc); 1353 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE); 1354 } 1355 1356 void __init pnv_pci_init_ioda_hub(struct device_node *np) 1357 { 1358 struct device_node *phbn; 1359 const u64 *prop64; 1360 u64 hub_id; 1361 1362 pr_info("Probing IODA IO-Hub %s\n", np->full_name); 1363 1364 prop64 = of_get_property(np, "ibm,opal-hubid", NULL); 1365 if (!prop64) { 1366 pr_err(" Missing \"ibm,opal-hubid\" property !\n"); 1367 return; 1368 } 1369 hub_id = be64_to_cpup(prop64); 1370 pr_devel(" HUB-ID : 0x%016llx\n", hub_id); 1371 1372 /* Count child PHBs */ 1373 for_each_child_of_node(np, phbn) { 1374 /* Look for IODA1 PHBs */ 1375 if (of_device_is_compatible(phbn, "ibm,ioda-phb")) 1376 pnv_pci_init_ioda1_phb(phbn); 1377 } 1378 } 1379