1 /* 2 * Common pmac/prep/chrp pci routines. -- Cort 3 */ 4 5 #include <linux/kernel.h> 6 #include <linux/pci.h> 7 #include <linux/delay.h> 8 #include <linux/string.h> 9 #include <linux/init.h> 10 #include <linux/capability.h> 11 #include <linux/sched.h> 12 #include <linux/errno.h> 13 #include <linux/bootmem.h> 14 #include <linux/irq.h> 15 #include <linux/list.h> 16 17 #include <asm/processor.h> 18 #include <asm/io.h> 19 #include <asm/prom.h> 20 #include <asm/sections.h> 21 #include <asm/pci-bridge.h> 22 #include <asm/byteorder.h> 23 #include <asm/uaccess.h> 24 #include <asm/machdep.h> 25 26 #undef DEBUG 27 28 #ifdef DEBUG 29 #define DBG(x...) printk(x) 30 #else 31 #define DBG(x...) 32 #endif 33 34 unsigned long isa_io_base = 0; 35 unsigned long isa_mem_base = 0; 36 unsigned long pci_dram_offset = 0; 37 int pcibios_assign_bus_offset = 1; 38 39 void pcibios_make_OF_bus_map(void); 40 41 static int pci_relocate_bridge_resource(struct pci_bus *bus, int i); 42 static int probe_resource(struct pci_bus *parent, struct resource *pr, 43 struct resource *res, struct resource **conflict); 44 static void update_bridge_base(struct pci_bus *bus, int i); 45 static void pcibios_fixup_resources(struct pci_dev* dev); 46 static void fixup_broken_pcnet32(struct pci_dev* dev); 47 static int reparent_resources(struct resource *parent, struct resource *res); 48 static void fixup_cpc710_pci64(struct pci_dev* dev); 49 #ifdef CONFIG_PPC_OF 50 static u8* pci_to_OF_bus_map; 51 #endif 52 53 /* By default, we don't re-assign bus numbers. We do this only on 54 * some pmacs 55 */ 56 int pci_assign_all_buses; 57 58 struct pci_controller* hose_head; 59 struct pci_controller** hose_tail = &hose_head; 60 61 static int pci_bus_count; 62 63 static void 64 fixup_broken_pcnet32(struct pci_dev* dev) 65 { 66 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { 67 dev->vendor = PCI_VENDOR_ID_AMD; 68 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); 69 } 70 } 71 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); 72 73 static void 74 fixup_cpc710_pci64(struct pci_dev* dev) 75 { 76 /* Hide the PCI64 BARs from the kernel as their content doesn't 77 * fit well in the resource management 78 */ 79 dev->resource[0].start = dev->resource[0].end = 0; 80 dev->resource[0].flags = 0; 81 dev->resource[1].start = dev->resource[1].end = 0; 82 dev->resource[1].flags = 0; 83 } 84 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64); 85 86 static void 87 pcibios_fixup_resources(struct pci_dev *dev) 88 { 89 struct pci_controller* hose = (struct pci_controller *)dev->sysdata; 90 int i; 91 unsigned long offset; 92 93 if (!hose) { 94 printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev)); 95 return; 96 } 97 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 98 struct resource *res = dev->resource + i; 99 if (!res->flags) 100 continue; 101 if (res->end == 0xffffffff) { 102 DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n", 103 pci_name(dev), i, (u64)res->start, (u64)res->end); 104 res->end -= res->start; 105 res->start = 0; 106 res->flags |= IORESOURCE_UNSET; 107 continue; 108 } 109 offset = 0; 110 if (res->flags & IORESOURCE_MEM) { 111 offset = hose->pci_mem_offset; 112 } else if (res->flags & IORESOURCE_IO) { 113 offset = (unsigned long) hose->io_base_virt 114 - isa_io_base; 115 } 116 if (offset != 0) { 117 res->start += offset; 118 res->end += offset; 119 DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n", 120 i, res->flags, pci_name(dev), 121 (u64)res->start - offset, (u64)res->start); 122 } 123 } 124 125 /* Call machine specific resource fixup */ 126 if (ppc_md.pcibios_fixup_resources) 127 ppc_md.pcibios_fixup_resources(dev); 128 } 129 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); 130 131 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 132 struct resource *res) 133 { 134 unsigned long offset = 0; 135 struct pci_controller *hose = dev->sysdata; 136 137 if (hose && res->flags & IORESOURCE_IO) 138 offset = (unsigned long)hose->io_base_virt - isa_io_base; 139 else if (hose && res->flags & IORESOURCE_MEM) 140 offset = hose->pci_mem_offset; 141 region->start = res->start - offset; 142 region->end = res->end - offset; 143 } 144 EXPORT_SYMBOL(pcibios_resource_to_bus); 145 146 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 147 struct pci_bus_region *region) 148 { 149 unsigned long offset = 0; 150 struct pci_controller *hose = dev->sysdata; 151 152 if (hose && res->flags & IORESOURCE_IO) 153 offset = (unsigned long)hose->io_base_virt - isa_io_base; 154 else if (hose && res->flags & IORESOURCE_MEM) 155 offset = hose->pci_mem_offset; 156 res->start = region->start + offset; 157 res->end = region->end + offset; 158 } 159 EXPORT_SYMBOL(pcibios_bus_to_resource); 160 161 /* 162 * We need to avoid collisions with `mirrored' VGA ports 163 * and other strange ISA hardware, so we always want the 164 * addresses to be allocated in the 0x000-0x0ff region 165 * modulo 0x400. 166 * 167 * Why? Because some silly external IO cards only decode 168 * the low 10 bits of the IO address. The 0x00-0xff region 169 * is reserved for motherboard devices that decode all 16 170 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, 171 * but we want to try to avoid allocating at 0x2900-0x2bff 172 * which might have be mirrored at 0x0100-0x03ff.. 173 */ 174 void pcibios_align_resource(void *data, struct resource *res, 175 resource_size_t size, resource_size_t align) 176 { 177 struct pci_dev *dev = data; 178 179 if (res->flags & IORESOURCE_IO) { 180 resource_size_t start = res->start; 181 182 if (size > 0x100) { 183 printk(KERN_ERR "PCI: I/O Region %s/%d too large" 184 " (%lld bytes)\n", pci_name(dev), 185 dev->resource - res, (unsigned long long)size); 186 } 187 188 if (start & 0x300) { 189 start = (start + 0x3ff) & ~0x3ff; 190 res->start = start; 191 } 192 } 193 } 194 EXPORT_SYMBOL(pcibios_align_resource); 195 196 /* 197 * Handle resources of PCI devices. If the world were perfect, we could 198 * just allocate all the resource regions and do nothing more. It isn't. 199 * On the other hand, we cannot just re-allocate all devices, as it would 200 * require us to know lots of host bridge internals. So we attempt to 201 * keep as much of the original configuration as possible, but tweak it 202 * when it's found to be wrong. 203 * 204 * Known BIOS problems we have to work around: 205 * - I/O or memory regions not configured 206 * - regions configured, but not enabled in the command register 207 * - bogus I/O addresses above 64K used 208 * - expansion ROMs left enabled (this may sound harmless, but given 209 * the fact the PCI specs explicitly allow address decoders to be 210 * shared between expansion ROMs and other resource regions, it's 211 * at least dangerous) 212 * 213 * Our solution: 214 * (1) Allocate resources for all buses behind PCI-to-PCI bridges. 215 * This gives us fixed barriers on where we can allocate. 216 * (2) Allocate resources for all enabled devices. If there is 217 * a collision, just mark the resource as unallocated. Also 218 * disable expansion ROMs during this step. 219 * (3) Try to allocate resources for disabled devices. If the 220 * resources were assigned correctly, everything goes well, 221 * if they weren't, they won't disturb allocation of other 222 * resources. 223 * (4) Assign new addresses to resources which were either 224 * not configured at all or misconfigured. If explicitly 225 * requested by the user, configure expansion ROM address 226 * as well. 227 */ 228 229 static void __init 230 pcibios_allocate_bus_resources(struct list_head *bus_list) 231 { 232 struct pci_bus *bus; 233 int i; 234 struct resource *res, *pr; 235 236 /* Depth-First Search on bus tree */ 237 list_for_each_entry(bus, bus_list, node) { 238 for (i = 0; i < 4; ++i) { 239 if ((res = bus->resource[i]) == NULL || !res->flags 240 || res->start > res->end) 241 continue; 242 if (bus->parent == NULL) 243 pr = (res->flags & IORESOURCE_IO)? 244 &ioport_resource: &iomem_resource; 245 else { 246 pr = pci_find_parent_resource(bus->self, res); 247 if (pr == res) { 248 /* this happens when the generic PCI 249 * code (wrongly) decides that this 250 * bridge is transparent -- paulus 251 */ 252 continue; 253 } 254 } 255 256 DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n", 257 (u64)res->start, (u64)res->end, res->flags, pr); 258 if (pr) { 259 if (request_resource(pr, res) == 0) 260 continue; 261 /* 262 * Must be a conflict with an existing entry. 263 * Move that entry (or entries) under the 264 * bridge resource and try again. 265 */ 266 if (reparent_resources(pr, res) == 0) 267 continue; 268 } 269 printk(KERN_ERR "PCI: Cannot allocate resource region " 270 "%d of PCI bridge %d\n", i, bus->number); 271 if (pci_relocate_bridge_resource(bus, i)) 272 bus->resource[i] = NULL; 273 } 274 pcibios_allocate_bus_resources(&bus->children); 275 } 276 } 277 278 /* 279 * Reparent resource children of pr that conflict with res 280 * under res, and make res replace those children. 281 */ 282 static int __init 283 reparent_resources(struct resource *parent, struct resource *res) 284 { 285 struct resource *p, **pp; 286 struct resource **firstpp = NULL; 287 288 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { 289 if (p->end < res->start) 290 continue; 291 if (res->end < p->start) 292 break; 293 if (p->start < res->start || p->end > res->end) 294 return -1; /* not completely contained */ 295 if (firstpp == NULL) 296 firstpp = pp; 297 } 298 if (firstpp == NULL) 299 return -1; /* didn't find any conflicting entries? */ 300 res->parent = parent; 301 res->child = *firstpp; 302 res->sibling = *pp; 303 *firstpp = res; 304 *pp = NULL; 305 for (p = res->child; p != NULL; p = p->sibling) { 306 p->parent = res; 307 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n", 308 p->name, (u64)p->start, (u64)p->end, res->name); 309 } 310 return 0; 311 } 312 313 /* 314 * A bridge has been allocated a range which is outside the range 315 * of its parent bridge, so it needs to be moved. 316 */ 317 static int __init 318 pci_relocate_bridge_resource(struct pci_bus *bus, int i) 319 { 320 struct resource *res, *pr, *conflict; 321 unsigned long try, size; 322 int j; 323 struct pci_bus *parent = bus->parent; 324 325 if (parent == NULL) { 326 /* shouldn't ever happen */ 327 printk(KERN_ERR "PCI: can't move host bridge resource\n"); 328 return -1; 329 } 330 res = bus->resource[i]; 331 if (res == NULL) 332 return -1; 333 pr = NULL; 334 for (j = 0; j < 4; j++) { 335 struct resource *r = parent->resource[j]; 336 if (!r) 337 continue; 338 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 339 continue; 340 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) { 341 pr = r; 342 break; 343 } 344 if (res->flags & IORESOURCE_PREFETCH) 345 pr = r; 346 } 347 if (pr == NULL) 348 return -1; 349 size = res->end - res->start; 350 if (pr->start > pr->end || size > pr->end - pr->start) 351 return -1; 352 try = pr->end; 353 for (;;) { 354 res->start = try - size; 355 res->end = try; 356 if (probe_resource(bus->parent, pr, res, &conflict) == 0) 357 break; 358 if (conflict->start <= pr->start + size) 359 return -1; 360 try = conflict->start - 1; 361 } 362 if (request_resource(pr, res)) { 363 DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n", 364 (u64)res->start, (u64)res->end); 365 return -1; /* "can't happen" */ 366 } 367 update_bridge_base(bus, i); 368 printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n", 369 bus->number, i, (unsigned long long)res->start, 370 (unsigned long long)res->end); 371 return 0; 372 } 373 374 static int __init 375 probe_resource(struct pci_bus *parent, struct resource *pr, 376 struct resource *res, struct resource **conflict) 377 { 378 struct pci_bus *bus; 379 struct pci_dev *dev; 380 struct resource *r; 381 int i; 382 383 for (r = pr->child; r != NULL; r = r->sibling) { 384 if (r->end >= res->start && res->end >= r->start) { 385 *conflict = r; 386 return 1; 387 } 388 } 389 list_for_each_entry(bus, &parent->children, node) { 390 for (i = 0; i < 4; ++i) { 391 if ((r = bus->resource[i]) == NULL) 392 continue; 393 if (!r->flags || r->start > r->end || r == res) 394 continue; 395 if (pci_find_parent_resource(bus->self, r) != pr) 396 continue; 397 if (r->end >= res->start && res->end >= r->start) { 398 *conflict = r; 399 return 1; 400 } 401 } 402 } 403 list_for_each_entry(dev, &parent->devices, bus_list) { 404 for (i = 0; i < 6; ++i) { 405 r = &dev->resource[i]; 406 if (!r->flags || (r->flags & IORESOURCE_UNSET)) 407 continue; 408 if (pci_find_parent_resource(dev, r) != pr) 409 continue; 410 if (r->end >= res->start && res->end >= r->start) { 411 *conflict = r; 412 return 1; 413 } 414 } 415 } 416 return 0; 417 } 418 419 static void __init 420 update_bridge_base(struct pci_bus *bus, int i) 421 { 422 struct resource *res = bus->resource[i]; 423 u8 io_base_lo, io_limit_lo; 424 u16 mem_base, mem_limit; 425 u16 cmd; 426 unsigned long start, end, off; 427 struct pci_dev *dev = bus->self; 428 struct pci_controller *hose = dev->sysdata; 429 430 if (!hose) { 431 printk("update_bridge_base: no hose?\n"); 432 return; 433 } 434 pci_read_config_word(dev, PCI_COMMAND, &cmd); 435 pci_write_config_word(dev, PCI_COMMAND, 436 cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY)); 437 if (res->flags & IORESOURCE_IO) { 438 off = (unsigned long) hose->io_base_virt - isa_io_base; 439 start = res->start - off; 440 end = res->end - off; 441 io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK; 442 io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK; 443 if (end > 0xffff) 444 io_base_lo |= PCI_IO_RANGE_TYPE_32; 445 else 446 io_base_lo |= PCI_IO_RANGE_TYPE_16; 447 pci_write_config_word(dev, PCI_IO_BASE_UPPER16, 448 start >> 16); 449 pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16, 450 end >> 16); 451 pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo); 452 pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo); 453 454 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) 455 == IORESOURCE_MEM) { 456 off = hose->pci_mem_offset; 457 mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK; 458 mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK; 459 pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base); 460 pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit); 461 462 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) 463 == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) { 464 off = hose->pci_mem_offset; 465 mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK; 466 mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK; 467 pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base); 468 pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit); 469 470 } else { 471 DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n", 472 pci_name(dev), i, res->flags); 473 } 474 pci_write_config_word(dev, PCI_COMMAND, cmd); 475 } 476 477 static inline void alloc_resource(struct pci_dev *dev, int idx) 478 { 479 struct resource *pr, *r = &dev->resource[idx]; 480 481 DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n", 482 pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags); 483 pr = pci_find_parent_resource(dev, r); 484 if (!pr || request_resource(pr, r) < 0) { 485 printk(KERN_ERR "PCI: Cannot allocate resource region %d" 486 " of device %s\n", idx, pci_name(dev)); 487 if (pr) 488 DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n", 489 pr, (u64)pr->start, (u64)pr->end, pr->flags); 490 /* We'll assign a new address later */ 491 r->flags |= IORESOURCE_UNSET; 492 r->end -= r->start; 493 r->start = 0; 494 } 495 } 496 497 static void __init 498 pcibios_allocate_resources(int pass) 499 { 500 struct pci_dev *dev = NULL; 501 int idx, disabled; 502 u16 command; 503 struct resource *r; 504 505 for_each_pci_dev(dev) { 506 pci_read_config_word(dev, PCI_COMMAND, &command); 507 for (idx = 0; idx < 6; idx++) { 508 r = &dev->resource[idx]; 509 if (r->parent) /* Already allocated */ 510 continue; 511 if (!r->flags || (r->flags & IORESOURCE_UNSET)) 512 continue; /* Not assigned at all */ 513 if (r->flags & IORESOURCE_IO) 514 disabled = !(command & PCI_COMMAND_IO); 515 else 516 disabled = !(command & PCI_COMMAND_MEMORY); 517 if (pass == disabled) 518 alloc_resource(dev, idx); 519 } 520 if (pass) 521 continue; 522 r = &dev->resource[PCI_ROM_RESOURCE]; 523 if (r->flags & IORESOURCE_ROM_ENABLE) { 524 /* Turn the ROM off, leave the resource region, but keep it unregistered. */ 525 u32 reg; 526 DBG("PCI: Switching off ROM of %s\n", pci_name(dev)); 527 r->flags &= ~IORESOURCE_ROM_ENABLE; 528 pci_read_config_dword(dev, dev->rom_base_reg, ®); 529 pci_write_config_dword(dev, dev->rom_base_reg, 530 reg & ~PCI_ROM_ADDRESS_ENABLE); 531 } 532 } 533 } 534 535 static void __init 536 pcibios_assign_resources(void) 537 { 538 struct pci_dev *dev = NULL; 539 int idx; 540 struct resource *r; 541 542 for_each_pci_dev(dev) { 543 int class = dev->class >> 8; 544 545 /* Don't touch classless devices and host bridges */ 546 if (!class || class == PCI_CLASS_BRIDGE_HOST) 547 continue; 548 549 for (idx = 0; idx < 6; idx++) { 550 r = &dev->resource[idx]; 551 552 /* 553 * We shall assign a new address to this resource, 554 * either because the BIOS (sic) forgot to do so 555 * or because we have decided the old address was 556 * unusable for some reason. 557 */ 558 if ((r->flags & IORESOURCE_UNSET) && r->end && 559 (!ppc_md.pcibios_enable_device_hook || 560 !ppc_md.pcibios_enable_device_hook(dev, 1))) { 561 r->flags &= ~IORESOURCE_UNSET; 562 pci_assign_resource(dev, idx); 563 } 564 } 565 566 #if 0 /* don't assign ROMs */ 567 r = &dev->resource[PCI_ROM_RESOURCE]; 568 r->end -= r->start; 569 r->start = 0; 570 if (r->end) 571 pci_assign_resource(dev, PCI_ROM_RESOURCE); 572 #endif 573 } 574 } 575 576 577 int 578 pcibios_enable_resources(struct pci_dev *dev, int mask) 579 { 580 u16 cmd, old_cmd; 581 int idx; 582 struct resource *r; 583 584 pci_read_config_word(dev, PCI_COMMAND, &cmd); 585 old_cmd = cmd; 586 for (idx=0; idx<6; idx++) { 587 /* Only set up the requested stuff */ 588 if (!(mask & (1<<idx))) 589 continue; 590 591 r = &dev->resource[idx]; 592 if (r->flags & IORESOURCE_UNSET) { 593 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); 594 return -EINVAL; 595 } 596 if (r->flags & IORESOURCE_IO) 597 cmd |= PCI_COMMAND_IO; 598 if (r->flags & IORESOURCE_MEM) 599 cmd |= PCI_COMMAND_MEMORY; 600 } 601 if (dev->resource[PCI_ROM_RESOURCE].start) 602 cmd |= PCI_COMMAND_MEMORY; 603 if (cmd != old_cmd) { 604 printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); 605 pci_write_config_word(dev, PCI_COMMAND, cmd); 606 } 607 return 0; 608 } 609 610 static int next_controller_index; 611 612 struct pci_controller * __init 613 pcibios_alloc_controller(void) 614 { 615 struct pci_controller *hose; 616 617 hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose)); 618 memset(hose, 0, sizeof(struct pci_controller)); 619 620 *hose_tail = hose; 621 hose_tail = &hose->next; 622 623 hose->index = next_controller_index++; 624 625 return hose; 626 } 627 628 #ifdef CONFIG_PPC_OF 629 /* 630 * Functions below are used on OpenFirmware machines. 631 */ 632 static void 633 make_one_node_map(struct device_node* node, u8 pci_bus) 634 { 635 const int *bus_range; 636 int len; 637 638 if (pci_bus >= pci_bus_count) 639 return; 640 bus_range = of_get_property(node, "bus-range", &len); 641 if (bus_range == NULL || len < 2 * sizeof(int)) { 642 printk(KERN_WARNING "Can't get bus-range for %s, " 643 "assuming it starts at 0\n", node->full_name); 644 pci_to_OF_bus_map[pci_bus] = 0; 645 } else 646 pci_to_OF_bus_map[pci_bus] = bus_range[0]; 647 648 for (node=node->child; node != 0;node = node->sibling) { 649 struct pci_dev* dev; 650 const unsigned int *class_code, *reg; 651 652 class_code = of_get_property(node, "class-code", NULL); 653 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && 654 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) 655 continue; 656 reg = of_get_property(node, "reg", NULL); 657 if (!reg) 658 continue; 659 dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff)); 660 if (!dev || !dev->subordinate) { 661 pci_dev_put(dev); 662 continue; 663 } 664 make_one_node_map(node, dev->subordinate->number); 665 pci_dev_put(dev); 666 } 667 } 668 669 void 670 pcibios_make_OF_bus_map(void) 671 { 672 int i; 673 struct pci_controller* hose; 674 struct property *map_prop; 675 struct device_node *dn; 676 677 pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL); 678 if (!pci_to_OF_bus_map) { 679 printk(KERN_ERR "Can't allocate OF bus map !\n"); 680 return; 681 } 682 683 /* We fill the bus map with invalid values, that helps 684 * debugging. 685 */ 686 for (i=0; i<pci_bus_count; i++) 687 pci_to_OF_bus_map[i] = 0xff; 688 689 /* For each hose, we begin searching bridges */ 690 for(hose=hose_head; hose; hose=hose->next) { 691 struct device_node* node; 692 node = (struct device_node *)hose->arch_data; 693 if (!node) 694 continue; 695 make_one_node_map(node, hose->first_busno); 696 } 697 dn = of_find_node_by_path("/"); 698 map_prop = of_find_property(dn, "pci-OF-bus-map", NULL); 699 if (map_prop) { 700 BUG_ON(pci_bus_count > map_prop->length); 701 memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count); 702 } 703 of_node_put(dn); 704 #ifdef DEBUG 705 printk("PCI->OF bus map:\n"); 706 for (i=0; i<pci_bus_count; i++) { 707 if (pci_to_OF_bus_map[i] == 0xff) 708 continue; 709 printk("%d -> %d\n", i, pci_to_OF_bus_map[i]); 710 } 711 #endif 712 } 713 714 typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data); 715 716 static struct device_node* 717 scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data) 718 { 719 struct device_node* sub_node; 720 721 for (; node != 0;node = node->sibling) { 722 const unsigned int *class_code; 723 724 if (filter(node, data)) 725 return node; 726 727 /* For PCI<->PCI bridges or CardBus bridges, we go down 728 * Note: some OFs create a parent node "multifunc-device" as 729 * a fake root for all functions of a multi-function device, 730 * we go down them as well. 731 */ 732 class_code = of_get_property(node, "class-code", NULL); 733 if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && 734 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && 735 strcmp(node->name, "multifunc-device")) 736 continue; 737 sub_node = scan_OF_pci_childs(node->child, filter, data); 738 if (sub_node) 739 return sub_node; 740 } 741 return NULL; 742 } 743 744 static struct device_node *scan_OF_for_pci_dev(struct device_node *parent, 745 unsigned int devfn) 746 { 747 struct device_node *np = NULL; 748 const u32 *reg; 749 unsigned int psize; 750 751 while ((np = of_get_next_child(parent, np)) != NULL) { 752 reg = of_get_property(np, "reg", &psize); 753 if (reg == NULL || psize < 4) 754 continue; 755 if (((reg[0] >> 8) & 0xff) == devfn) 756 return np; 757 } 758 return NULL; 759 } 760 761 762 static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus) 763 { 764 struct device_node *parent, *np; 765 766 /* Are we a root bus ? */ 767 if (bus->self == NULL || bus->parent == NULL) { 768 struct pci_controller *hose = pci_bus_to_hose(bus->number); 769 if (hose == NULL) 770 return NULL; 771 return of_node_get(hose->arch_data); 772 } 773 774 /* not a root bus, we need to get our parent */ 775 parent = scan_OF_for_pci_bus(bus->parent); 776 if (parent == NULL) 777 return NULL; 778 779 /* now iterate for children for a match */ 780 np = scan_OF_for_pci_dev(parent, bus->self->devfn); 781 of_node_put(parent); 782 783 return np; 784 } 785 786 /* 787 * Scans the OF tree for a device node matching a PCI device 788 */ 789 struct device_node * 790 pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) 791 { 792 struct device_node *parent, *np; 793 794 if (!have_of) 795 return NULL; 796 797 DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); 798 parent = scan_OF_for_pci_bus(bus); 799 if (parent == NULL) 800 return NULL; 801 DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>"); 802 np = scan_OF_for_pci_dev(parent, devfn); 803 of_node_put(parent); 804 DBG(" result is %s\n", np ? np->full_name : "<NULL>"); 805 806 /* XXX most callers don't release the returned node 807 * mostly because ppc64 doesn't increase the refcount, 808 * we need to fix that. 809 */ 810 return np; 811 } 812 EXPORT_SYMBOL(pci_busdev_to_OF_node); 813 814 struct device_node* 815 pci_device_to_OF_node(struct pci_dev *dev) 816 { 817 return pci_busdev_to_OF_node(dev->bus, dev->devfn); 818 } 819 EXPORT_SYMBOL(pci_device_to_OF_node); 820 821 /* This routine is meant to be used early during boot, when the 822 * PCI bus numbers have not yet been assigned, and you need to 823 * issue PCI config cycles to an OF device. 824 * It could also be used to "fix" RTAS config cycles if you want 825 * to set pci_assign_all_buses to 1 and still use RTAS for PCI 826 * config cycles. 827 */ 828 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) 829 { 830 if (!have_of) 831 return NULL; 832 while(node) { 833 struct pci_controller* hose; 834 for (hose=hose_head;hose;hose=hose->next) 835 if (hose->arch_data == node) 836 return hose; 837 node=node->parent; 838 } 839 return NULL; 840 } 841 842 static int 843 find_OF_pci_device_filter(struct device_node* node, void* data) 844 { 845 return ((void *)node == data); 846 } 847 848 /* 849 * Returns the PCI device matching a given OF node 850 */ 851 int 852 pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) 853 { 854 const unsigned int *reg; 855 struct pci_controller* hose; 856 struct pci_dev* dev = NULL; 857 858 if (!have_of) 859 return -ENODEV; 860 /* Make sure it's really a PCI device */ 861 hose = pci_find_hose_for_OF_device(node); 862 if (!hose || !hose->arch_data) 863 return -ENODEV; 864 if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child, 865 find_OF_pci_device_filter, (void *)node)) 866 return -ENODEV; 867 reg = of_get_property(node, "reg", NULL); 868 if (!reg) 869 return -ENODEV; 870 *bus = (reg[0] >> 16) & 0xff; 871 *devfn = ((reg[0] >> 8) & 0xff); 872 873 /* Ok, here we need some tweak. If we have already renumbered 874 * all busses, we can't rely on the OF bus number any more. 875 * the pci_to_OF_bus_map is not enough as several PCI busses 876 * may match the same OF bus number. 877 */ 878 if (!pci_to_OF_bus_map) 879 return 0; 880 881 for_each_pci_dev(dev) 882 if (pci_to_OF_bus_map[dev->bus->number] == *bus && 883 dev->devfn == *devfn) { 884 *bus = dev->bus->number; 885 pci_dev_put(dev); 886 return 0; 887 } 888 889 return -ENODEV; 890 } 891 EXPORT_SYMBOL(pci_device_from_OF_node); 892 893 void __init 894 pci_process_bridge_OF_ranges(struct pci_controller *hose, 895 struct device_node *dev, int primary) 896 { 897 static unsigned int static_lc_ranges[256] __initdata; 898 const unsigned int *dt_ranges; 899 unsigned int *lc_ranges, *ranges, *prev, size; 900 int rlen = 0, orig_rlen; 901 int memno = 0; 902 struct resource *res; 903 int np, na = of_n_addr_cells(dev); 904 np = na + 5; 905 906 /* First we try to merge ranges to fix a problem with some pmacs 907 * that can have more than 3 ranges, fortunately using contiguous 908 * addresses -- BenH 909 */ 910 dt_ranges = of_get_property(dev, "ranges", &rlen); 911 if (!dt_ranges) 912 return; 913 /* Sanity check, though hopefully that never happens */ 914 if (rlen > sizeof(static_lc_ranges)) { 915 printk(KERN_WARNING "OF ranges property too large !\n"); 916 rlen = sizeof(static_lc_ranges); 917 } 918 lc_ranges = static_lc_ranges; 919 memcpy(lc_ranges, dt_ranges, rlen); 920 orig_rlen = rlen; 921 922 /* Let's work on a copy of the "ranges" property instead of damaging 923 * the device-tree image in memory 924 */ 925 ranges = lc_ranges; 926 prev = NULL; 927 while ((rlen -= np * sizeof(unsigned int)) >= 0) { 928 if (prev) { 929 if (prev[0] == ranges[0] && prev[1] == ranges[1] && 930 (prev[2] + prev[na+4]) == ranges[2] && 931 (prev[na+2] + prev[na+4]) == ranges[na+2]) { 932 prev[na+4] += ranges[na+4]; 933 ranges[0] = 0; 934 ranges += np; 935 continue; 936 } 937 } 938 prev = ranges; 939 ranges += np; 940 } 941 942 /* 943 * The ranges property is laid out as an array of elements, 944 * each of which comprises: 945 * cells 0 - 2: a PCI address 946 * cells 3 or 3+4: a CPU physical address 947 * (size depending on dev->n_addr_cells) 948 * cells 4+5 or 5+6: the size of the range 949 */ 950 ranges = lc_ranges; 951 rlen = orig_rlen; 952 while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) { 953 res = NULL; 954 size = ranges[na+4]; 955 switch ((ranges[0] >> 24) & 0x3) { 956 case 1: /* I/O space */ 957 if (ranges[2] != 0) 958 break; 959 hose->io_base_phys = ranges[na+2]; 960 /* limit I/O space to 16MB */ 961 if (size > 0x01000000) 962 size = 0x01000000; 963 hose->io_base_virt = ioremap(ranges[na+2], size); 964 if (primary) 965 isa_io_base = (unsigned long) hose->io_base_virt; 966 res = &hose->io_resource; 967 res->flags = IORESOURCE_IO; 968 res->start = ranges[2]; 969 DBG("PCI: IO 0x%llx -> 0x%llx\n", 970 (u64)res->start, (u64)res->start + size - 1); 971 break; 972 case 2: /* memory space */ 973 memno = 0; 974 if (ranges[1] == 0 && ranges[2] == 0 975 && ranges[na+4] <= (16 << 20)) { 976 /* 1st 16MB, i.e. ISA memory area */ 977 if (primary) 978 isa_mem_base = ranges[na+2]; 979 memno = 1; 980 } 981 while (memno < 3 && hose->mem_resources[memno].flags) 982 ++memno; 983 if (memno == 0) 984 hose->pci_mem_offset = ranges[na+2] - ranges[2]; 985 if (memno < 3) { 986 res = &hose->mem_resources[memno]; 987 res->flags = IORESOURCE_MEM; 988 if(ranges[0] & 0x40000000) 989 res->flags |= IORESOURCE_PREFETCH; 990 res->start = ranges[na+2]; 991 DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno, 992 (u64)res->start, (u64)res->start + size - 1); 993 } 994 break; 995 } 996 if (res != NULL) { 997 res->name = dev->full_name; 998 res->end = res->start + size - 1; 999 res->parent = NULL; 1000 res->sibling = NULL; 1001 res->child = NULL; 1002 } 1003 ranges += np; 1004 } 1005 } 1006 1007 /* We create the "pci-OF-bus-map" property now so it appears in the 1008 * /proc device tree 1009 */ 1010 void __init 1011 pci_create_OF_bus_map(void) 1012 { 1013 struct property* of_prop; 1014 struct device_node *dn; 1015 1016 of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256); 1017 if (!of_prop) 1018 return; 1019 dn = of_find_node_by_path("/"); 1020 if (dn) { 1021 memset(of_prop, -1, sizeof(struct property) + 256); 1022 of_prop->name = "pci-OF-bus-map"; 1023 of_prop->length = 256; 1024 of_prop->value = &of_prop[1]; 1025 prom_add_property(dn, of_prop); 1026 of_node_put(dn); 1027 } 1028 } 1029 1030 static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) 1031 { 1032 struct pci_dev *pdev; 1033 struct device_node *np; 1034 1035 pdev = to_pci_dev (dev); 1036 np = pci_device_to_OF_node(pdev); 1037 if (np == NULL || np->full_name == NULL) 1038 return 0; 1039 return sprintf(buf, "%s", np->full_name); 1040 } 1041 static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); 1042 1043 #else /* CONFIG_PPC_OF */ 1044 void pcibios_make_OF_bus_map(void) 1045 { 1046 } 1047 #endif /* CONFIG_PPC_OF */ 1048 1049 /* Add sysfs properties */ 1050 void pcibios_add_platform_entries(struct pci_dev *pdev) 1051 { 1052 #ifdef CONFIG_PPC_OF 1053 device_create_file(&pdev->dev, &dev_attr_devspec); 1054 #endif /* CONFIG_PPC_OF */ 1055 } 1056 1057 1058 #ifdef CONFIG_PPC_PMAC 1059 /* 1060 * This set of routines checks for PCI<->PCI bridges that have closed 1061 * IO resources and have child devices. It tries to re-open an IO 1062 * window on them. 1063 * 1064 * This is a _temporary_ fix to workaround a problem with Apple's OF 1065 * closing IO windows on P2P bridges when the OF drivers of cards 1066 * below this bridge don't claim any IO range (typically ATI or 1067 * Adaptec). 1068 * 1069 * A more complete fix would be to use drivers/pci/setup-bus.c, which 1070 * involves a working pcibios_fixup_pbus_ranges(), some more care about 1071 * ordering when creating the host bus resources, and maybe a few more 1072 * minor tweaks 1073 */ 1074 1075 /* Initialize bridges with base/limit values we have collected */ 1076 static void __init 1077 do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga) 1078 { 1079 struct pci_dev *bridge = bus->self; 1080 struct pci_controller* hose = (struct pci_controller *)bridge->sysdata; 1081 u32 l; 1082 u16 w; 1083 struct resource res; 1084 1085 if (bus->resource[0] == NULL) 1086 return; 1087 res = *(bus->resource[0]); 1088 1089 DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge)); 1090 res.start -= ((unsigned long) hose->io_base_virt - isa_io_base); 1091 res.end -= ((unsigned long) hose->io_base_virt - isa_io_base); 1092 DBG(" IO window: %016llx-%016llx\n", res.start, res.end); 1093 1094 /* Set up the top and bottom of the PCI I/O segment for this bus. */ 1095 pci_read_config_dword(bridge, PCI_IO_BASE, &l); 1096 l &= 0xffff000f; 1097 l |= (res.start >> 8) & 0x00f0; 1098 l |= res.end & 0xf000; 1099 pci_write_config_dword(bridge, PCI_IO_BASE, l); 1100 1101 if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { 1102 l = (res.start >> 16) | (res.end & 0xffff0000); 1103 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l); 1104 } 1105 1106 pci_read_config_word(bridge, PCI_COMMAND, &w); 1107 w |= PCI_COMMAND_IO; 1108 pci_write_config_word(bridge, PCI_COMMAND, w); 1109 1110 #if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */ 1111 if (enable_vga) { 1112 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w); 1113 w |= PCI_BRIDGE_CTL_VGA; 1114 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w); 1115 } 1116 #endif 1117 } 1118 1119 /* This function is pretty basic and actually quite broken for the 1120 * general case, it's enough for us right now though. It's supposed 1121 * to tell us if we need to open an IO range at all or not and what 1122 * size. 1123 */ 1124 static int __init 1125 check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga) 1126 { 1127 struct pci_dev *dev; 1128 int i; 1129 int rc = 0; 1130 1131 #define push_end(res, mask) do { \ 1132 BUG_ON((mask+1) & mask); \ 1133 res->end = (res->end + mask) | mask; \ 1134 } while (0) 1135 1136 list_for_each_entry(dev, &bus->devices, bus_list) { 1137 u16 class = dev->class >> 8; 1138 1139 if (class == PCI_CLASS_DISPLAY_VGA || 1140 class == PCI_CLASS_NOT_DEFINED_VGA) 1141 *found_vga = 1; 1142 if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate) 1143 rc |= check_for_io_childs(dev->subordinate, res, found_vga); 1144 if (class == PCI_CLASS_BRIDGE_CARDBUS) 1145 push_end(res, 0xfff); 1146 1147 for (i=0; i<PCI_NUM_RESOURCES; i++) { 1148 struct resource *r; 1149 unsigned long r_size; 1150 1151 if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI 1152 && i >= PCI_BRIDGE_RESOURCES) 1153 continue; 1154 r = &dev->resource[i]; 1155 r_size = r->end - r->start; 1156 if (r_size < 0xfff) 1157 r_size = 0xfff; 1158 if (r->flags & IORESOURCE_IO && (r_size) != 0) { 1159 rc = 1; 1160 push_end(res, r_size); 1161 } 1162 } 1163 } 1164 1165 return rc; 1166 } 1167 1168 /* Here we scan all P2P bridges of a given level that have a closed 1169 * IO window. Note that the test for the presence of a VGA card should 1170 * be improved to take into account already configured P2P bridges, 1171 * currently, we don't see them and might end up configuring 2 bridges 1172 * with VGA pass through enabled 1173 */ 1174 static void __init 1175 do_fixup_p2p_level(struct pci_bus *bus) 1176 { 1177 struct pci_bus *b; 1178 int i, parent_io; 1179 int has_vga = 0; 1180 1181 for (parent_io=0; parent_io<4; parent_io++) 1182 if (bus->resource[parent_io] 1183 && bus->resource[parent_io]->flags & IORESOURCE_IO) 1184 break; 1185 if (parent_io >= 4) 1186 return; 1187 1188 list_for_each_entry(b, &bus->children, node) { 1189 struct pci_dev *d = b->self; 1190 struct pci_controller* hose = (struct pci_controller *)d->sysdata; 1191 struct resource *res = b->resource[0]; 1192 struct resource tmp_res; 1193 unsigned long max; 1194 int found_vga = 0; 1195 1196 memset(&tmp_res, 0, sizeof(tmp_res)); 1197 tmp_res.start = bus->resource[parent_io]->start; 1198 1199 /* We don't let low addresses go through that closed P2P bridge, well, 1200 * that may not be necessary but I feel safer that way 1201 */ 1202 if (tmp_res.start == 0) 1203 tmp_res.start = 0x1000; 1204 1205 if (!list_empty(&b->devices) && res && res->flags == 0 && 1206 res != bus->resource[parent_io] && 1207 (d->class >> 8) == PCI_CLASS_BRIDGE_PCI && 1208 check_for_io_childs(b, &tmp_res, &found_vga)) { 1209 u8 io_base_lo; 1210 1211 printk(KERN_INFO "Fixing up IO bus %s\n", b->name); 1212 1213 if (found_vga) { 1214 if (has_vga) { 1215 printk(KERN_WARNING "Skipping VGA, already active" 1216 " on bus segment\n"); 1217 found_vga = 0; 1218 } else 1219 has_vga = 1; 1220 } 1221 pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo); 1222 1223 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) 1224 max = ((unsigned long) hose->io_base_virt 1225 - isa_io_base) + 0xffffffff; 1226 else 1227 max = ((unsigned long) hose->io_base_virt 1228 - isa_io_base) + 0xffff; 1229 1230 *res = tmp_res; 1231 res->flags = IORESOURCE_IO; 1232 res->name = b->name; 1233 1234 /* Find a resource in the parent where we can allocate */ 1235 for (i = 0 ; i < 4; i++) { 1236 struct resource *r = bus->resource[i]; 1237 if (!r) 1238 continue; 1239 if ((r->flags & IORESOURCE_IO) == 0) 1240 continue; 1241 DBG("Trying to allocate from %016llx, size %016llx from parent" 1242 " res %d: %016llx -> %016llx\n", 1243 res->start, res->end, i, r->start, r->end); 1244 1245 if (allocate_resource(r, res, res->end + 1, res->start, max, 1246 res->end + 1, NULL, NULL) < 0) { 1247 DBG("Failed !\n"); 1248 continue; 1249 } 1250 do_update_p2p_io_resource(b, found_vga); 1251 break; 1252 } 1253 } 1254 do_fixup_p2p_level(b); 1255 } 1256 } 1257 1258 static void 1259 pcibios_fixup_p2p_bridges(void) 1260 { 1261 struct pci_bus *b; 1262 1263 list_for_each_entry(b, &pci_root_buses, node) 1264 do_fixup_p2p_level(b); 1265 } 1266 1267 #endif /* CONFIG_PPC_PMAC */ 1268 1269 static int __init 1270 pcibios_init(void) 1271 { 1272 struct pci_controller *hose; 1273 struct pci_bus *bus; 1274 int next_busno; 1275 1276 printk(KERN_INFO "PCI: Probing PCI hardware\n"); 1277 1278 /* Scan all of the recorded PCI controllers. */ 1279 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { 1280 if (pci_assign_all_buses) 1281 hose->first_busno = next_busno; 1282 hose->last_busno = 0xff; 1283 bus = pci_scan_bus_parented(hose->parent, hose->first_busno, 1284 hose->ops, hose); 1285 if (bus) 1286 pci_bus_add_devices(bus); 1287 hose->last_busno = bus->subordinate; 1288 if (pci_assign_all_buses || next_busno <= hose->last_busno) 1289 next_busno = hose->last_busno + pcibios_assign_bus_offset; 1290 } 1291 pci_bus_count = next_busno; 1292 1293 /* OpenFirmware based machines need a map of OF bus 1294 * numbers vs. kernel bus numbers since we may have to 1295 * remap them. 1296 */ 1297 if (pci_assign_all_buses && have_of) 1298 pcibios_make_OF_bus_map(); 1299 1300 /* Call machine dependent fixup */ 1301 if (ppc_md.pcibios_fixup) 1302 ppc_md.pcibios_fixup(); 1303 1304 /* Allocate and assign resources */ 1305 pcibios_allocate_bus_resources(&pci_root_buses); 1306 pcibios_allocate_resources(0); 1307 pcibios_allocate_resources(1); 1308 #ifdef CONFIG_PPC_PMAC 1309 pcibios_fixup_p2p_bridges(); 1310 #endif /* CONFIG_PPC_PMAC */ 1311 pcibios_assign_resources(); 1312 1313 /* Call machine dependent post-init code */ 1314 if (ppc_md.pcibios_after_init) 1315 ppc_md.pcibios_after_init(); 1316 1317 return 0; 1318 } 1319 1320 subsys_initcall(pcibios_init); 1321 1322 unsigned long resource_fixup(struct pci_dev * dev, struct resource * res, 1323 unsigned long start, unsigned long size) 1324 { 1325 return start; 1326 } 1327 1328 void __init pcibios_fixup_bus(struct pci_bus *bus) 1329 { 1330 struct pci_controller *hose = (struct pci_controller *) bus->sysdata; 1331 unsigned long io_offset; 1332 struct resource *res; 1333 struct pci_dev *dev; 1334 int i; 1335 1336 io_offset = (unsigned long)hose->io_base_virt - isa_io_base; 1337 if (bus->parent == NULL) { 1338 /* This is a host bridge - fill in its resources */ 1339 hose->bus = bus; 1340 1341 bus->resource[0] = res = &hose->io_resource; 1342 if (!res->flags) { 1343 if (io_offset) 1344 printk(KERN_ERR "I/O resource not set for host" 1345 " bridge %d\n", hose->index); 1346 res->start = 0; 1347 res->end = IO_SPACE_LIMIT; 1348 res->flags = IORESOURCE_IO; 1349 } 1350 res->start += io_offset; 1351 res->end += io_offset; 1352 1353 for (i = 0; i < 3; ++i) { 1354 res = &hose->mem_resources[i]; 1355 if (!res->flags) { 1356 if (i > 0) 1357 continue; 1358 printk(KERN_ERR "Memory resource not set for " 1359 "host bridge %d\n", hose->index); 1360 res->start = hose->pci_mem_offset; 1361 res->end = ~0U; 1362 res->flags = IORESOURCE_MEM; 1363 } 1364 bus->resource[i+1] = res; 1365 } 1366 } else { 1367 /* This is a subordinate bridge */ 1368 pci_read_bridge_bases(bus); 1369 1370 for (i = 0; i < 4; ++i) { 1371 if ((res = bus->resource[i]) == NULL) 1372 continue; 1373 if (!res->flags) 1374 continue; 1375 if (io_offset && (res->flags & IORESOURCE_IO)) { 1376 res->start += io_offset; 1377 res->end += io_offset; 1378 } else if (hose->pci_mem_offset 1379 && (res->flags & IORESOURCE_MEM)) { 1380 res->start += hose->pci_mem_offset; 1381 res->end += hose->pci_mem_offset; 1382 } 1383 } 1384 } 1385 1386 /* Platform specific bus fixups */ 1387 if (ppc_md.pcibios_fixup_bus) 1388 ppc_md.pcibios_fixup_bus(bus); 1389 1390 /* Read default IRQs and fixup if necessary */ 1391 list_for_each_entry(dev, &bus->devices, bus_list) { 1392 pci_read_irq_line(dev); 1393 if (ppc_md.pci_irq_fixup) 1394 ppc_md.pci_irq_fixup(dev); 1395 } 1396 } 1397 1398 char __init *pcibios_setup(char *str) 1399 { 1400 return str; 1401 } 1402 1403 /* the next one is stolen from the alpha port... */ 1404 void __init 1405 pcibios_update_irq(struct pci_dev *dev, int irq) 1406 { 1407 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); 1408 /* XXX FIXME - update OF device tree node interrupt property */ 1409 } 1410 1411 #ifdef CONFIG_PPC_MERGE 1412 /* XXX This is a copy of the ppc64 version. This is temporary until we start 1413 * merging the 2 PCI layers 1414 */ 1415 /* 1416 * Reads the interrupt pin to determine if interrupt is use by card. 1417 * If the interrupt is used, then gets the interrupt line from the 1418 * openfirmware and sets it in the pci_dev and pci_config line. 1419 */ 1420 int pci_read_irq_line(struct pci_dev *pci_dev) 1421 { 1422 struct of_irq oirq; 1423 unsigned int virq; 1424 1425 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 1426 1427 /* Try to get a mapping from the device-tree */ 1428 if (of_irq_map_pci(pci_dev, &oirq)) { 1429 u8 line, pin; 1430 1431 /* If that fails, lets fallback to what is in the config 1432 * space and map that through the default controller. We 1433 * also set the type to level low since that's what PCI 1434 * interrupts are. If your platform does differently, then 1435 * either provide a proper interrupt tree or don't use this 1436 * function. 1437 */ 1438 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) 1439 return -1; 1440 if (pin == 0) 1441 return -1; 1442 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || 1443 line == 0xff) { 1444 return -1; 1445 } 1446 DBG(" -> no map ! Using irq line %d from PCI config\n", line); 1447 1448 virq = irq_create_mapping(NULL, line); 1449 if (virq != NO_IRQ) 1450 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 1451 } else { 1452 DBG(" -> got one, spec %d cells (0x%08x...) on %s\n", 1453 oirq.size, oirq.specifier[0], oirq.controller->full_name); 1454 1455 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 1456 oirq.size); 1457 } 1458 if(virq == NO_IRQ) { 1459 DBG(" -> failed to map !\n"); 1460 return -1; 1461 } 1462 pci_dev->irq = virq; 1463 1464 return 0; 1465 } 1466 EXPORT_SYMBOL(pci_read_irq_line); 1467 #endif /* CONFIG_PPC_MERGE */ 1468 1469 int pcibios_enable_device(struct pci_dev *dev, int mask) 1470 { 1471 u16 cmd, old_cmd; 1472 int idx; 1473 struct resource *r; 1474 1475 if (ppc_md.pcibios_enable_device_hook) 1476 if (ppc_md.pcibios_enable_device_hook(dev, 0)) 1477 return -EINVAL; 1478 1479 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1480 old_cmd = cmd; 1481 for (idx=0; idx<6; idx++) { 1482 r = &dev->resource[idx]; 1483 if (r->flags & IORESOURCE_UNSET) { 1484 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); 1485 return -EINVAL; 1486 } 1487 if (r->flags & IORESOURCE_IO) 1488 cmd |= PCI_COMMAND_IO; 1489 if (r->flags & IORESOURCE_MEM) 1490 cmd |= PCI_COMMAND_MEMORY; 1491 } 1492 if (cmd != old_cmd) { 1493 printk("PCI: Enabling device %s (%04x -> %04x)\n", 1494 pci_name(dev), old_cmd, cmd); 1495 pci_write_config_word(dev, PCI_COMMAND, cmd); 1496 } 1497 return 0; 1498 } 1499 1500 struct pci_controller* 1501 pci_bus_to_hose(int bus) 1502 { 1503 struct pci_controller* hose = hose_head; 1504 1505 for (; hose; hose = hose->next) 1506 if (bus >= hose->first_busno && bus <= hose->last_busno) 1507 return hose; 1508 return NULL; 1509 } 1510 1511 void __iomem * 1512 pci_bus_io_base(unsigned int bus) 1513 { 1514 struct pci_controller *hose; 1515 1516 hose = pci_bus_to_hose(bus); 1517 if (!hose) 1518 return NULL; 1519 return hose->io_base_virt; 1520 } 1521 1522 unsigned long 1523 pci_bus_io_base_phys(unsigned int bus) 1524 { 1525 struct pci_controller *hose; 1526 1527 hose = pci_bus_to_hose(bus); 1528 if (!hose) 1529 return 0; 1530 return hose->io_base_phys; 1531 } 1532 1533 unsigned long 1534 pci_bus_mem_base_phys(unsigned int bus) 1535 { 1536 struct pci_controller *hose; 1537 1538 hose = pci_bus_to_hose(bus); 1539 if (!hose) 1540 return 0; 1541 return hose->pci_mem_offset; 1542 } 1543 1544 unsigned long 1545 pci_resource_to_bus(struct pci_dev *pdev, struct resource *res) 1546 { 1547 /* Hack alert again ! See comments in chrp_pci.c 1548 */ 1549 struct pci_controller* hose = 1550 (struct pci_controller *)pdev->sysdata; 1551 if (hose && res->flags & IORESOURCE_MEM) 1552 return res->start - hose->pci_mem_offset; 1553 /* We may want to do something with IOs here... */ 1554 return res->start; 1555 } 1556 1557 1558 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 1559 resource_size_t *offset, 1560 enum pci_mmap_state mmap_state) 1561 { 1562 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); 1563 unsigned long io_offset = 0; 1564 int i, res_bit; 1565 1566 if (hose == 0) 1567 return NULL; /* should never happen */ 1568 1569 /* If memory, add on the PCI bridge address offset */ 1570 if (mmap_state == pci_mmap_mem) { 1571 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ 1572 *offset += hose->pci_mem_offset; 1573 #endif 1574 res_bit = IORESOURCE_MEM; 1575 } else { 1576 io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE; 1577 *offset += io_offset; 1578 res_bit = IORESOURCE_IO; 1579 } 1580 1581 /* 1582 * Check that the offset requested corresponds to one of the 1583 * resources of the device. 1584 */ 1585 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 1586 struct resource *rp = &dev->resource[i]; 1587 int flags = rp->flags; 1588 1589 /* treat ROM as memory (should be already) */ 1590 if (i == PCI_ROM_RESOURCE) 1591 flags |= IORESOURCE_MEM; 1592 1593 /* Active and same type? */ 1594 if ((flags & res_bit) == 0) 1595 continue; 1596 1597 /* In the range of this resource? */ 1598 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) 1599 continue; 1600 1601 /* found it! construct the final physical address */ 1602 if (mmap_state == pci_mmap_io) 1603 *offset += hose->io_base_phys - io_offset; 1604 return rp; 1605 } 1606 1607 return NULL; 1608 } 1609 1610 /* 1611 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci 1612 * device mapping. 1613 */ 1614 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, 1615 pgprot_t protection, 1616 enum pci_mmap_state mmap_state, 1617 int write_combine) 1618 { 1619 unsigned long prot = pgprot_val(protection); 1620 1621 /* Write combine is always 0 on non-memory space mappings. On 1622 * memory space, if the user didn't pass 1, we check for a 1623 * "prefetchable" resource. This is a bit hackish, but we use 1624 * this to workaround the inability of /sysfs to provide a write 1625 * combine bit 1626 */ 1627 if (mmap_state != pci_mmap_mem) 1628 write_combine = 0; 1629 else if (write_combine == 0) { 1630 if (rp->flags & IORESOURCE_PREFETCH) 1631 write_combine = 1; 1632 } 1633 1634 /* XXX would be nice to have a way to ask for write-through */ 1635 prot |= _PAGE_NO_CACHE; 1636 if (write_combine) 1637 prot &= ~_PAGE_GUARDED; 1638 else 1639 prot |= _PAGE_GUARDED; 1640 1641 return __pgprot(prot); 1642 } 1643 1644 /* 1645 * This one is used by /dev/mem and fbdev who have no clue about the 1646 * PCI device, it tries to find the PCI device first and calls the 1647 * above routine 1648 */ 1649 pgprot_t pci_phys_mem_access_prot(struct file *file, 1650 unsigned long pfn, 1651 unsigned long size, 1652 pgprot_t protection) 1653 { 1654 struct pci_dev *pdev = NULL; 1655 struct resource *found = NULL; 1656 unsigned long prot = pgprot_val(protection); 1657 unsigned long offset = pfn << PAGE_SHIFT; 1658 int i; 1659 1660 if (page_is_ram(pfn)) 1661 return __pgprot(prot); 1662 1663 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 1664 1665 for_each_pci_dev(pdev) { 1666 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 1667 struct resource *rp = &pdev->resource[i]; 1668 int flags = rp->flags; 1669 1670 /* Active and same type? */ 1671 if ((flags & IORESOURCE_MEM) == 0) 1672 continue; 1673 /* In the range of this resource? */ 1674 if (offset < (rp->start & PAGE_MASK) || 1675 offset > rp->end) 1676 continue; 1677 found = rp; 1678 break; 1679 } 1680 if (found) 1681 break; 1682 } 1683 if (found) { 1684 if (found->flags & IORESOURCE_PREFETCH) 1685 prot &= ~_PAGE_GUARDED; 1686 pci_dev_put(pdev); 1687 } 1688 1689 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); 1690 1691 return __pgprot(prot); 1692 } 1693 1694 1695 /* 1696 * Perform the actual remap of the pages for a PCI device mapping, as 1697 * appropriate for this architecture. The region in the process to map 1698 * is described by vm_start and vm_end members of VMA, the base physical 1699 * address is found in vm_pgoff. 1700 * The pci device structure is provided so that architectures may make mapping 1701 * decisions on a per-device or per-bus basis. 1702 * 1703 * Returns a negative error code on failure, zero on success. 1704 */ 1705 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 1706 enum pci_mmap_state mmap_state, 1707 int write_combine) 1708 { 1709 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT; 1710 struct resource *rp; 1711 int ret; 1712 1713 rp = __pci_mmap_make_offset(dev, &offset, mmap_state); 1714 if (rp == NULL) 1715 return -EINVAL; 1716 1717 vma->vm_pgoff = offset >> PAGE_SHIFT; 1718 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 1719 vma->vm_page_prot, 1720 mmap_state, write_combine); 1721 1722 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 1723 vma->vm_end - vma->vm_start, vma->vm_page_prot); 1724 1725 return ret; 1726 } 1727 1728 /* Obsolete functions. Should be removed once the symbios driver 1729 * is fixed 1730 */ 1731 unsigned long 1732 phys_to_bus(unsigned long pa) 1733 { 1734 struct pci_controller *hose; 1735 int i; 1736 1737 for (hose = hose_head; hose; hose = hose->next) { 1738 for (i = 0; i < 3; ++i) { 1739 if (pa >= hose->mem_resources[i].start 1740 && pa <= hose->mem_resources[i].end) { 1741 /* 1742 * XXX the hose->pci_mem_offset really 1743 * only applies to mem_resources[0]. 1744 * We need a way to store an offset for 1745 * the others. -- paulus 1746 */ 1747 if (i == 0) 1748 pa -= hose->pci_mem_offset; 1749 return pa; 1750 } 1751 } 1752 } 1753 /* hmmm, didn't find it */ 1754 return 0; 1755 } 1756 1757 unsigned long 1758 pci_phys_to_bus(unsigned long pa, int busnr) 1759 { 1760 struct pci_controller* hose = pci_bus_to_hose(busnr); 1761 if (!hose) 1762 return pa; 1763 return pa - hose->pci_mem_offset; 1764 } 1765 1766 unsigned long 1767 pci_bus_to_phys(unsigned int ba, int busnr) 1768 { 1769 struct pci_controller* hose = pci_bus_to_hose(busnr); 1770 if (!hose) 1771 return ba; 1772 return ba + hose->pci_mem_offset; 1773 } 1774 1775 /* Provide information on locations of various I/O regions in physical 1776 * memory. Do this on a per-card basis so that we choose the right 1777 * root bridge. 1778 * Note that the returned IO or memory base is a physical address 1779 */ 1780 1781 long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) 1782 { 1783 struct pci_controller* hose; 1784 long result = -EOPNOTSUPP; 1785 1786 /* Argh ! Please forgive me for that hack, but that's the 1787 * simplest way to get existing XFree to not lockup on some 1788 * G5 machines... So when something asks for bus 0 io base 1789 * (bus 0 is HT root), we return the AGP one instead. 1790 */ 1791 #ifdef CONFIG_PPC_PMAC 1792 if (machine_is(powermac) && machine_is_compatible("MacRISC4")) 1793 if (bus == 0) 1794 bus = 0xf0; 1795 #endif /* CONFIG_PPC_PMAC */ 1796 1797 hose = pci_bus_to_hose(bus); 1798 if (!hose) 1799 return -ENODEV; 1800 1801 switch (which) { 1802 case IOBASE_BRIDGE_NUMBER: 1803 return (long)hose->first_busno; 1804 case IOBASE_MEMORY: 1805 return (long)hose->pci_mem_offset; 1806 case IOBASE_IO: 1807 return (long)hose->io_base_phys; 1808 case IOBASE_ISA_IO: 1809 return (long)isa_io_base; 1810 case IOBASE_ISA_MEM: 1811 return (long)isa_mem_base; 1812 } 1813 1814 return result; 1815 } 1816 1817 void pci_resource_to_user(const struct pci_dev *dev, int bar, 1818 const struct resource *rsrc, 1819 resource_size_t *start, resource_size_t *end) 1820 { 1821 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); 1822 resource_size_t offset = 0; 1823 1824 if (hose == NULL) 1825 return; 1826 1827 if (rsrc->flags & IORESOURCE_IO) 1828 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 1829 1830 /* We pass a fully fixed up address to userland for MMIO instead of 1831 * a BAR value because X is lame and expects to be able to use that 1832 * to pass to /dev/mem ! 1833 * 1834 * That means that we'll have potentially 64 bits values where some 1835 * userland apps only expect 32 (like X itself since it thinks only 1836 * Sparc has 64 bits MMIO) but if we don't do that, we break it on 1837 * 32 bits CHRPs :-( 1838 * 1839 * Hopefully, the sysfs insterface is immune to that gunk. Once X 1840 * has been fixed (and the fix spread enough), we can re-enable the 1841 * 2 lines below and pass down a BAR value to userland. In that case 1842 * we'll also have to re-enable the matching code in 1843 * __pci_mmap_make_offset(). 1844 * 1845 * BenH. 1846 */ 1847 #if 0 1848 else if (rsrc->flags & IORESOURCE_MEM) 1849 offset = hose->pci_mem_offset; 1850 #endif 1851 1852 *start = rsrc->start - offset; 1853 *end = rsrc->end - offset; 1854 } 1855 1856 void __init pci_init_resource(struct resource *res, resource_size_t start, 1857 resource_size_t end, int flags, char *name) 1858 { 1859 res->start = start; 1860 res->end = end; 1861 res->flags = flags; 1862 res->name = name; 1863 res->parent = NULL; 1864 res->sibling = NULL; 1865 res->child = NULL; 1866 } 1867 1868 unsigned long pci_address_to_pio(phys_addr_t address) 1869 { 1870 struct pci_controller* hose = hose_head; 1871 1872 for (; hose; hose = hose->next) { 1873 unsigned int size = hose->io_resource.end - 1874 hose->io_resource.start + 1; 1875 if (address >= hose->io_base_phys && 1876 address < (hose->io_base_phys + size)) { 1877 unsigned long base = 1878 (unsigned long)hose->io_base_virt - _IO_BASE; 1879 return base + (address - hose->io_base_phys); 1880 } 1881 } 1882 return (unsigned int)-1; 1883 } 1884 EXPORT_SYMBOL(pci_address_to_pio); 1885 1886 /* 1887 * Null PCI config access functions, for the case when we can't 1888 * find a hose. 1889 */ 1890 #define NULL_PCI_OP(rw, size, type) \ 1891 static int \ 1892 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ 1893 { \ 1894 return PCIBIOS_DEVICE_NOT_FOUND; \ 1895 } 1896 1897 static int 1898 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 1899 int len, u32 *val) 1900 { 1901 return PCIBIOS_DEVICE_NOT_FOUND; 1902 } 1903 1904 static int 1905 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 1906 int len, u32 val) 1907 { 1908 return PCIBIOS_DEVICE_NOT_FOUND; 1909 } 1910 1911 static struct pci_ops null_pci_ops = 1912 { 1913 null_read_config, 1914 null_write_config 1915 }; 1916 1917 /* 1918 * These functions are used early on before PCI scanning is done 1919 * and all of the pci_dev and pci_bus structures have been created. 1920 */ 1921 static struct pci_bus * 1922 fake_pci_bus(struct pci_controller *hose, int busnr) 1923 { 1924 static struct pci_bus bus; 1925 1926 if (hose == 0) { 1927 hose = pci_bus_to_hose(busnr); 1928 if (hose == 0) 1929 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); 1930 } 1931 bus.number = busnr; 1932 bus.sysdata = hose; 1933 bus.ops = hose? hose->ops: &null_pci_ops; 1934 return &bus; 1935 } 1936 1937 #define EARLY_PCI_OP(rw, size, type) \ 1938 int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ 1939 int devfn, int offset, type value) \ 1940 { \ 1941 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ 1942 devfn, offset, value); \ 1943 } 1944 1945 EARLY_PCI_OP(read, byte, u8 *) 1946 EARLY_PCI_OP(read, word, u16 *) 1947 EARLY_PCI_OP(read, dword, u32 *) 1948 EARLY_PCI_OP(write, byte, u8) 1949 EARLY_PCI_OP(write, word, u16) 1950 EARLY_PCI_OP(write, dword, u32) 1951