1 /* pci.c: UltraSparc PCI controller support. 2 * 3 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) 4 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) 5 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) 6 * 7 * OF tree based PCI bus probing taken from the PowerPC port 8 * with minor modifications, see there for credits. 9 */ 10 11 #include <linux/export.h> 12 #include <linux/kernel.h> 13 #include <linux/string.h> 14 #include <linux/sched.h> 15 #include <linux/capability.h> 16 #include <linux/errno.h> 17 #include <linux/pci.h> 18 #include <linux/msi.h> 19 #include <linux/irq.h> 20 #include <linux/init.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 24 #include <asm/uaccess.h> 25 #include <asm/pgtable.h> 26 #include <asm/irq.h> 27 #include <asm/prom.h> 28 #include <asm/apb.h> 29 30 #include "pci_impl.h" 31 #include "kernel.h" 32 33 /* List of all PCI controllers found in the system. */ 34 struct pci_pbm_info *pci_pbm_root = NULL; 35 36 /* Each PBM found gets a unique index. */ 37 int pci_num_pbms = 0; 38 39 volatile int pci_poke_in_progress; 40 volatile int pci_poke_cpu = -1; 41 volatile int pci_poke_faulted; 42 43 static DEFINE_SPINLOCK(pci_poke_lock); 44 45 void pci_config_read8(u8 *addr, u8 *ret) 46 { 47 unsigned long flags; 48 u8 byte; 49 50 spin_lock_irqsave(&pci_poke_lock, flags); 51 pci_poke_cpu = smp_processor_id(); 52 pci_poke_in_progress = 1; 53 pci_poke_faulted = 0; 54 __asm__ __volatile__("membar #Sync\n\t" 55 "lduba [%1] %2, %0\n\t" 56 "membar #Sync" 57 : "=r" (byte) 58 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 59 : "memory"); 60 pci_poke_in_progress = 0; 61 pci_poke_cpu = -1; 62 if (!pci_poke_faulted) 63 *ret = byte; 64 spin_unlock_irqrestore(&pci_poke_lock, flags); 65 } 66 67 void pci_config_read16(u16 *addr, u16 *ret) 68 { 69 unsigned long flags; 70 u16 word; 71 72 spin_lock_irqsave(&pci_poke_lock, flags); 73 pci_poke_cpu = smp_processor_id(); 74 pci_poke_in_progress = 1; 75 pci_poke_faulted = 0; 76 __asm__ __volatile__("membar #Sync\n\t" 77 "lduha [%1] %2, %0\n\t" 78 "membar #Sync" 79 : "=r" (word) 80 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 81 : "memory"); 82 pci_poke_in_progress = 0; 83 pci_poke_cpu = -1; 84 if (!pci_poke_faulted) 85 *ret = word; 86 spin_unlock_irqrestore(&pci_poke_lock, flags); 87 } 88 89 void pci_config_read32(u32 *addr, u32 *ret) 90 { 91 unsigned long flags; 92 u32 dword; 93 94 spin_lock_irqsave(&pci_poke_lock, flags); 95 pci_poke_cpu = smp_processor_id(); 96 pci_poke_in_progress = 1; 97 pci_poke_faulted = 0; 98 __asm__ __volatile__("membar #Sync\n\t" 99 "lduwa [%1] %2, %0\n\t" 100 "membar #Sync" 101 : "=r" (dword) 102 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 103 : "memory"); 104 pci_poke_in_progress = 0; 105 pci_poke_cpu = -1; 106 if (!pci_poke_faulted) 107 *ret = dword; 108 spin_unlock_irqrestore(&pci_poke_lock, flags); 109 } 110 111 void pci_config_write8(u8 *addr, u8 val) 112 { 113 unsigned long flags; 114 115 spin_lock_irqsave(&pci_poke_lock, flags); 116 pci_poke_cpu = smp_processor_id(); 117 pci_poke_in_progress = 1; 118 pci_poke_faulted = 0; 119 __asm__ __volatile__("membar #Sync\n\t" 120 "stba %0, [%1] %2\n\t" 121 "membar #Sync" 122 : /* no outputs */ 123 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 124 : "memory"); 125 pci_poke_in_progress = 0; 126 pci_poke_cpu = -1; 127 spin_unlock_irqrestore(&pci_poke_lock, flags); 128 } 129 130 void pci_config_write16(u16 *addr, u16 val) 131 { 132 unsigned long flags; 133 134 spin_lock_irqsave(&pci_poke_lock, flags); 135 pci_poke_cpu = smp_processor_id(); 136 pci_poke_in_progress = 1; 137 pci_poke_faulted = 0; 138 __asm__ __volatile__("membar #Sync\n\t" 139 "stha %0, [%1] %2\n\t" 140 "membar #Sync" 141 : /* no outputs */ 142 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 143 : "memory"); 144 pci_poke_in_progress = 0; 145 pci_poke_cpu = -1; 146 spin_unlock_irqrestore(&pci_poke_lock, flags); 147 } 148 149 void pci_config_write32(u32 *addr, u32 val) 150 { 151 unsigned long flags; 152 153 spin_lock_irqsave(&pci_poke_lock, flags); 154 pci_poke_cpu = smp_processor_id(); 155 pci_poke_in_progress = 1; 156 pci_poke_faulted = 0; 157 __asm__ __volatile__("membar #Sync\n\t" 158 "stwa %0, [%1] %2\n\t" 159 "membar #Sync" 160 : /* no outputs */ 161 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 162 : "memory"); 163 pci_poke_in_progress = 0; 164 pci_poke_cpu = -1; 165 spin_unlock_irqrestore(&pci_poke_lock, flags); 166 } 167 168 static int ofpci_verbose; 169 170 static int __init ofpci_debug(char *str) 171 { 172 int val = 0; 173 174 get_option(&str, &val); 175 if (val) 176 ofpci_verbose = 1; 177 return 1; 178 } 179 180 __setup("ofpci_debug=", ofpci_debug); 181 182 static unsigned long pci_parse_of_flags(u32 addr0) 183 { 184 unsigned long flags = 0; 185 186 if (addr0 & 0x02000000) { 187 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; 188 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; 189 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; 190 if (addr0 & 0x40000000) 191 flags |= IORESOURCE_PREFETCH 192 | PCI_BASE_ADDRESS_MEM_PREFETCH; 193 } else if (addr0 & 0x01000000) 194 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; 195 return flags; 196 } 197 198 /* The of_device layer has translated all of the assigned-address properties 199 * into physical address resources, we only have to figure out the register 200 * mapping. 201 */ 202 static void pci_parse_of_addrs(struct platform_device *op, 203 struct device_node *node, 204 struct pci_dev *dev) 205 { 206 struct resource *op_res; 207 const u32 *addrs; 208 int proplen; 209 210 addrs = of_get_property(node, "assigned-addresses", &proplen); 211 if (!addrs) 212 return; 213 if (ofpci_verbose) 214 printk(" parse addresses (%d bytes) @ %p\n", 215 proplen, addrs); 216 op_res = &op->resource[0]; 217 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { 218 struct resource *res; 219 unsigned long flags; 220 int i; 221 222 flags = pci_parse_of_flags(addrs[0]); 223 if (!flags) 224 continue; 225 i = addrs[0] & 0xff; 226 if (ofpci_verbose) 227 printk(" start: %llx, end: %llx, i: %x\n", 228 op_res->start, op_res->end, i); 229 230 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 231 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 232 } else if (i == dev->rom_base_reg) { 233 res = &dev->resource[PCI_ROM_RESOURCE]; 234 flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; 235 } else { 236 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); 237 continue; 238 } 239 res->start = op_res->start; 240 res->end = op_res->end; 241 res->flags = flags; 242 res->name = pci_name(dev); 243 } 244 } 245 246 static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, 247 struct device_node *node, 248 struct pci_bus *bus, int devfn) 249 { 250 struct dev_archdata *sd; 251 struct platform_device *op; 252 struct pci_dev *dev; 253 const char *type; 254 u32 class; 255 256 dev = pci_alloc_dev(bus); 257 if (!dev) 258 return NULL; 259 260 sd = &dev->dev.archdata; 261 sd->iommu = pbm->iommu; 262 sd->stc = &pbm->stc; 263 sd->host_controller = pbm; 264 sd->op = op = of_find_device_by_node(node); 265 sd->numa_node = pbm->numa_node; 266 267 sd = &op->dev.archdata; 268 sd->iommu = pbm->iommu; 269 sd->stc = &pbm->stc; 270 sd->numa_node = pbm->numa_node; 271 272 if (!strcmp(node->name, "ebus")) 273 of_propagate_archdata(op); 274 275 type = of_get_property(node, "device_type", NULL); 276 if (type == NULL) 277 type = ""; 278 279 if (ofpci_verbose) 280 printk(" create device, devfn: %x, type: %s\n", 281 devfn, type); 282 283 dev->sysdata = node; 284 dev->dev.parent = bus->bridge; 285 dev->dev.bus = &pci_bus_type; 286 dev->dev.of_node = of_node_get(node); 287 dev->devfn = devfn; 288 dev->multifunction = 0; /* maybe a lie? */ 289 set_pcie_port_type(dev); 290 291 pci_dev_assign_slot(dev); 292 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); 293 dev->device = of_getintprop_default(node, "device-id", 0xffff); 294 dev->subsystem_vendor = 295 of_getintprop_default(node, "subsystem-vendor-id", 0); 296 dev->subsystem_device = 297 of_getintprop_default(node, "subsystem-id", 0); 298 299 dev->cfg_size = pci_cfg_space_size(dev); 300 301 /* We can't actually use the firmware value, we have 302 * to read what is in the register right now. One 303 * reason is that in the case of IDE interfaces the 304 * firmware can sample the value before the the IDE 305 * interface is programmed into native mode. 306 */ 307 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 308 dev->class = class >> 8; 309 dev->revision = class & 0xff; 310 311 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus), 312 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 313 314 if (ofpci_verbose) 315 printk(" class: 0x%x device name: %s\n", 316 dev->class, pci_name(dev)); 317 318 /* I have seen IDE devices which will not respond to 319 * the bmdma simplex check reads if bus mastering is 320 * disabled. 321 */ 322 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) 323 pci_set_master(dev); 324 325 dev->current_state = PCI_UNKNOWN; /* unknown power state */ 326 dev->error_state = pci_channel_io_normal; 327 dev->dma_mask = 0xffffffff; 328 329 if (!strcmp(node->name, "pci")) { 330 /* a PCI-PCI bridge */ 331 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; 332 dev->rom_base_reg = PCI_ROM_ADDRESS1; 333 } else if (!strcmp(type, "cardbus")) { 334 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; 335 } else { 336 dev->hdr_type = PCI_HEADER_TYPE_NORMAL; 337 dev->rom_base_reg = PCI_ROM_ADDRESS; 338 339 dev->irq = sd->op->archdata.irqs[0]; 340 if (dev->irq == 0xffffffff) 341 dev->irq = PCI_IRQ_NONE; 342 } 343 344 pci_parse_of_addrs(sd->op, node, dev); 345 346 if (ofpci_verbose) 347 printk(" adding to system ...\n"); 348 349 pci_device_add(dev, bus); 350 351 return dev; 352 } 353 354 static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) 355 { 356 u32 idx, first, last; 357 358 first = 8; 359 last = 0; 360 for (idx = 0; idx < 8; idx++) { 361 if ((map & (1 << idx)) != 0) { 362 if (first > idx) 363 first = idx; 364 if (last < idx) 365 last = idx; 366 } 367 } 368 369 *first_p = first; 370 *last_p = last; 371 } 372 373 /* Cook up fake bus resources for SUNW,simba PCI bridges which lack 374 * a proper 'ranges' property. 375 */ 376 static void apb_fake_ranges(struct pci_dev *dev, 377 struct pci_bus *bus, 378 struct pci_pbm_info *pbm) 379 { 380 struct pci_bus_region region; 381 struct resource *res; 382 u32 first, last; 383 u8 map; 384 385 pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map); 386 apb_calc_first_last(map, &first, &last); 387 res = bus->resource[0]; 388 res->flags = IORESOURCE_IO; 389 region.start = (first << 21); 390 region.end = (last << 21) + ((1 << 21) - 1); 391 pcibios_bus_to_resource(dev->bus, res, ®ion); 392 393 pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map); 394 apb_calc_first_last(map, &first, &last); 395 res = bus->resource[1]; 396 res->flags = IORESOURCE_MEM; 397 region.start = (first << 29); 398 region.end = (last << 29) + ((1 << 29) - 1); 399 pcibios_bus_to_resource(dev->bus, res, ®ion); 400 } 401 402 static void pci_of_scan_bus(struct pci_pbm_info *pbm, 403 struct device_node *node, 404 struct pci_bus *bus); 405 406 #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) 407 408 static void of_scan_pci_bridge(struct pci_pbm_info *pbm, 409 struct device_node *node, 410 struct pci_dev *dev) 411 { 412 struct pci_bus *bus; 413 const u32 *busrange, *ranges; 414 int len, i, simba; 415 struct pci_bus_region region; 416 struct resource *res; 417 unsigned int flags; 418 u64 size; 419 420 if (ofpci_verbose) 421 printk("of_scan_pci_bridge(%s)\n", node->full_name); 422 423 /* parse bus-range property */ 424 busrange = of_get_property(node, "bus-range", &len); 425 if (busrange == NULL || len != 8) { 426 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", 427 node->full_name); 428 return; 429 } 430 431 if (ofpci_verbose) 432 printk(" Bridge bus range [%u --> %u]\n", 433 busrange[0], busrange[1]); 434 435 ranges = of_get_property(node, "ranges", &len); 436 simba = 0; 437 if (ranges == NULL) { 438 const char *model = of_get_property(node, "model", NULL); 439 if (model && !strcmp(model, "SUNW,simba")) 440 simba = 1; 441 } 442 443 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 444 if (!bus) { 445 printk(KERN_ERR "Failed to create pci bus for %s\n", 446 node->full_name); 447 return; 448 } 449 450 bus->primary = dev->bus->number; 451 pci_bus_insert_busn_res(bus, busrange[0], busrange[1]); 452 bus->bridge_ctl = 0; 453 454 if (ofpci_verbose) 455 printk(" Bridge ranges[%p] simba[%d]\n", 456 ranges, simba); 457 458 /* parse ranges property, or cook one up by hand for Simba */ 459 /* PCI #address-cells == 3 and #size-cells == 2 always */ 460 res = &dev->resource[PCI_BRIDGE_RESOURCES]; 461 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { 462 res->flags = 0; 463 bus->resource[i] = res; 464 ++res; 465 } 466 if (simba) { 467 apb_fake_ranges(dev, bus, pbm); 468 goto after_ranges; 469 } else if (ranges == NULL) { 470 pci_read_bridge_bases(bus); 471 goto after_ranges; 472 } 473 i = 1; 474 for (; len >= 32; len -= 32, ranges += 8) { 475 u64 start; 476 477 if (ofpci_verbose) 478 printk(" RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:" 479 "%08x:%08x]\n", 480 ranges[0], ranges[1], ranges[2], ranges[3], 481 ranges[4], ranges[5], ranges[6], ranges[7]); 482 483 flags = pci_parse_of_flags(ranges[0]); 484 size = GET_64BIT(ranges, 6); 485 if (flags == 0 || size == 0) 486 continue; 487 488 /* On PCI-Express systems, PCI bridges that have no devices downstream 489 * have a bogus size value where the first 32-bit cell is 0xffffffff. 490 * This results in a bogus range where start + size overflows. 491 * 492 * Just skip these otherwise the kernel will complain when the resource 493 * tries to be claimed. 494 */ 495 if (size >> 32 == 0xffffffff) 496 continue; 497 498 if (flags & IORESOURCE_IO) { 499 res = bus->resource[0]; 500 if (res->flags) { 501 printk(KERN_ERR "PCI: ignoring extra I/O range" 502 " for bridge %s\n", node->full_name); 503 continue; 504 } 505 } else { 506 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { 507 printk(KERN_ERR "PCI: too many memory ranges" 508 " for bridge %s\n", node->full_name); 509 continue; 510 } 511 res = bus->resource[i]; 512 ++i; 513 } 514 515 res->flags = flags; 516 region.start = start = GET_64BIT(ranges, 1); 517 region.end = region.start + size - 1; 518 519 if (ofpci_verbose) 520 printk(" Using flags[%08x] start[%016llx] size[%016llx]\n", 521 flags, start, size); 522 523 pcibios_bus_to_resource(dev->bus, res, ®ion); 524 } 525 after_ranges: 526 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 527 bus->number); 528 if (ofpci_verbose) 529 printk(" bus name: %s\n", bus->name); 530 531 pci_of_scan_bus(pbm, node, bus); 532 } 533 534 static void pci_of_scan_bus(struct pci_pbm_info *pbm, 535 struct device_node *node, 536 struct pci_bus *bus) 537 { 538 struct device_node *child; 539 const u32 *reg; 540 int reglen, devfn, prev_devfn; 541 struct pci_dev *dev; 542 543 if (ofpci_verbose) 544 printk("PCI: scan_bus[%s] bus no %d\n", 545 node->full_name, bus->number); 546 547 child = NULL; 548 prev_devfn = -1; 549 while ((child = of_get_next_child(node, child)) != NULL) { 550 if (ofpci_verbose) 551 printk(" * %s\n", child->full_name); 552 reg = of_get_property(child, "reg", ®len); 553 if (reg == NULL || reglen < 20) 554 continue; 555 556 devfn = (reg[0] >> 8) & 0xff; 557 558 /* This is a workaround for some device trees 559 * which list PCI devices twice. On the V100 560 * for example, device number 3 is listed twice. 561 * Once as "pm" and once again as "lomp". 562 */ 563 if (devfn == prev_devfn) 564 continue; 565 prev_devfn = devfn; 566 567 /* create a new pci_dev for this device */ 568 dev = of_create_pci_dev(pbm, child, bus, devfn); 569 if (!dev) 570 continue; 571 if (ofpci_verbose) 572 printk("PCI: dev header type: %x\n", 573 dev->hdr_type); 574 575 if (pci_is_bridge(dev)) 576 of_scan_pci_bridge(pbm, child, dev); 577 } 578 } 579 580 static ssize_t 581 show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf) 582 { 583 struct pci_dev *pdev; 584 struct device_node *dp; 585 586 pdev = to_pci_dev(dev); 587 dp = pdev->dev.of_node; 588 589 return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name); 590 } 591 592 static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); 593 594 static void pci_bus_register_of_sysfs(struct pci_bus *bus) 595 { 596 struct pci_dev *dev; 597 struct pci_bus *child_bus; 598 int err; 599 600 list_for_each_entry(dev, &bus->devices, bus_list) { 601 /* we don't really care if we can create this file or 602 * not, but we need to assign the result of the call 603 * or the world will fall under alien invasion and 604 * everybody will be frozen on a spaceship ready to be 605 * eaten on alpha centauri by some green and jelly 606 * humanoid. 607 */ 608 err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr); 609 (void) err; 610 } 611 list_for_each_entry(child_bus, &bus->children, node) 612 pci_bus_register_of_sysfs(child_bus); 613 } 614 615 static void pci_claim_bus_resources(struct pci_bus *bus) 616 { 617 struct pci_bus *child_bus; 618 struct pci_dev *dev; 619 620 list_for_each_entry(dev, &bus->devices, bus_list) { 621 int i; 622 623 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 624 struct resource *r = &dev->resource[i]; 625 626 if (r->parent || !r->start || !r->flags) 627 continue; 628 629 if (ofpci_verbose) 630 printk("PCI: Claiming %s: " 631 "Resource %d: %016llx..%016llx [%x]\n", 632 pci_name(dev), i, 633 (unsigned long long)r->start, 634 (unsigned long long)r->end, 635 (unsigned int)r->flags); 636 637 pci_claim_resource(dev, i); 638 } 639 } 640 641 list_for_each_entry(child_bus, &bus->children, node) 642 pci_claim_bus_resources(child_bus); 643 } 644 645 struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, 646 struct device *parent) 647 { 648 LIST_HEAD(resources); 649 struct device_node *node = pbm->op->dev.of_node; 650 struct pci_bus *bus; 651 652 printk("PCI: Scanning PBM %s\n", node->full_name); 653 654 pci_add_resource_offset(&resources, &pbm->io_space, 655 pbm->io_space.start); 656 pci_add_resource_offset(&resources, &pbm->mem_space, 657 pbm->mem_space.start); 658 pbm->busn.start = pbm->pci_first_busno; 659 pbm->busn.end = pbm->pci_last_busno; 660 pbm->busn.flags = IORESOURCE_BUS; 661 pci_add_resource(&resources, &pbm->busn); 662 bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops, 663 pbm, &resources); 664 if (!bus) { 665 printk(KERN_ERR "Failed to create bus for %s\n", 666 node->full_name); 667 pci_free_resource_list(&resources); 668 return NULL; 669 } 670 671 pci_of_scan_bus(pbm, node, bus); 672 pci_bus_register_of_sysfs(bus); 673 674 pci_claim_bus_resources(bus); 675 pci_bus_add_devices(bus); 676 return bus; 677 } 678 679 void pcibios_fixup_bus(struct pci_bus *pbus) 680 { 681 } 682 683 resource_size_t pcibios_align_resource(void *data, const struct resource *res, 684 resource_size_t size, resource_size_t align) 685 { 686 return res->start; 687 } 688 689 int pcibios_enable_device(struct pci_dev *dev, int mask) 690 { 691 u16 cmd, oldcmd; 692 int i; 693 694 pci_read_config_word(dev, PCI_COMMAND, &cmd); 695 oldcmd = cmd; 696 697 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 698 struct resource *res = &dev->resource[i]; 699 700 /* Only set up the requested stuff */ 701 if (!(mask & (1<<i))) 702 continue; 703 704 if (res->flags & IORESOURCE_IO) 705 cmd |= PCI_COMMAND_IO; 706 if (res->flags & IORESOURCE_MEM) 707 cmd |= PCI_COMMAND_MEMORY; 708 } 709 710 if (cmd != oldcmd) { 711 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", 712 pci_name(dev), cmd); 713 /* Enable the appropriate bits in the PCI command register. */ 714 pci_write_config_word(dev, PCI_COMMAND, cmd); 715 } 716 return 0; 717 } 718 719 /* Platform support for /proc/bus/pci/X/Y mmap()s. */ 720 721 /* If the user uses a host-bridge as the PCI device, he may use 722 * this to perform a raw mmap() of the I/O or MEM space behind 723 * that controller. 724 * 725 * This can be useful for execution of x86 PCI bios initialization code 726 * on a PCI card, like the xfree86 int10 stuff does. 727 */ 728 static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma, 729 enum pci_mmap_state mmap_state) 730 { 731 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 732 unsigned long space_size, user_offset, user_size; 733 734 if (mmap_state == pci_mmap_io) { 735 space_size = resource_size(&pbm->io_space); 736 } else { 737 space_size = resource_size(&pbm->mem_space); 738 } 739 740 /* Make sure the request is in range. */ 741 user_offset = vma->vm_pgoff << PAGE_SHIFT; 742 user_size = vma->vm_end - vma->vm_start; 743 744 if (user_offset >= space_size || 745 (user_offset + user_size) > space_size) 746 return -EINVAL; 747 748 if (mmap_state == pci_mmap_io) { 749 vma->vm_pgoff = (pbm->io_space.start + 750 user_offset) >> PAGE_SHIFT; 751 } else { 752 vma->vm_pgoff = (pbm->mem_space.start + 753 user_offset) >> PAGE_SHIFT; 754 } 755 756 return 0; 757 } 758 759 /* Adjust vm_pgoff of VMA such that it is the physical page offset 760 * corresponding to the 32-bit pci bus offset for DEV requested by the user. 761 * 762 * Basically, the user finds the base address for his device which he wishes 763 * to mmap. They read the 32-bit value from the config space base register, 764 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the 765 * offset parameter of mmap on /proc/bus/pci/XXX for that device. 766 * 767 * Returns negative error code on failure, zero on success. 768 */ 769 static int __pci_mmap_make_offset(struct pci_dev *pdev, 770 struct vm_area_struct *vma, 771 enum pci_mmap_state mmap_state) 772 { 773 unsigned long user_paddr, user_size; 774 int i, err; 775 776 /* First compute the physical address in vma->vm_pgoff, 777 * making sure the user offset is within range in the 778 * appropriate PCI space. 779 */ 780 err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state); 781 if (err) 782 return err; 783 784 /* If this is a mapping on a host bridge, any address 785 * is OK. 786 */ 787 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST) 788 return err; 789 790 /* Otherwise make sure it's in the range for one of the 791 * device's resources. 792 */ 793 user_paddr = vma->vm_pgoff << PAGE_SHIFT; 794 user_size = vma->vm_end - vma->vm_start; 795 796 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 797 struct resource *rp = &pdev->resource[i]; 798 resource_size_t aligned_end; 799 800 /* Active? */ 801 if (!rp->flags) 802 continue; 803 804 /* Same type? */ 805 if (i == PCI_ROM_RESOURCE) { 806 if (mmap_state != pci_mmap_mem) 807 continue; 808 } else { 809 if ((mmap_state == pci_mmap_io && 810 (rp->flags & IORESOURCE_IO) == 0) || 811 (mmap_state == pci_mmap_mem && 812 (rp->flags & IORESOURCE_MEM) == 0)) 813 continue; 814 } 815 816 /* Align the resource end to the next page address. 817 * PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1), 818 * because actually we need the address of the next byte 819 * after rp->end. 820 */ 821 aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK; 822 823 if ((rp->start <= user_paddr) && 824 (user_paddr + user_size) <= aligned_end) 825 break; 826 } 827 828 if (i > PCI_ROM_RESOURCE) 829 return -EINVAL; 830 831 return 0; 832 } 833 834 /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci 835 * device mapping. 836 */ 837 static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma, 838 enum pci_mmap_state mmap_state) 839 { 840 /* Our io_remap_pfn_range takes care of this, do nothing. */ 841 } 842 843 /* Perform the actual remap of the pages for a PCI device mapping, as appropriate 844 * for this architecture. The region in the process to map is described by vm_start 845 * and vm_end members of VMA, the base physical address is found in vm_pgoff. 846 * The pci device structure is provided so that architectures may make mapping 847 * decisions on a per-device or per-bus basis. 848 * 849 * Returns a negative error code on failure, zero on success. 850 */ 851 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 852 enum pci_mmap_state mmap_state, 853 int write_combine) 854 { 855 int ret; 856 857 ret = __pci_mmap_make_offset(dev, vma, mmap_state); 858 if (ret < 0) 859 return ret; 860 861 __pci_mmap_set_pgprot(dev, vma, mmap_state); 862 863 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 864 ret = io_remap_pfn_range(vma, vma->vm_start, 865 vma->vm_pgoff, 866 vma->vm_end - vma->vm_start, 867 vma->vm_page_prot); 868 if (ret) 869 return ret; 870 871 return 0; 872 } 873 874 #ifdef CONFIG_NUMA 875 int pcibus_to_node(struct pci_bus *pbus) 876 { 877 struct pci_pbm_info *pbm = pbus->sysdata; 878 879 return pbm->numa_node; 880 } 881 EXPORT_SYMBOL(pcibus_to_node); 882 #endif 883 884 /* Return the domain number for this pci bus */ 885 886 int pci_domain_nr(struct pci_bus *pbus) 887 { 888 struct pci_pbm_info *pbm = pbus->sysdata; 889 int ret; 890 891 if (!pbm) { 892 ret = -ENXIO; 893 } else { 894 ret = pbm->index; 895 } 896 897 return ret; 898 } 899 EXPORT_SYMBOL(pci_domain_nr); 900 901 #ifdef CONFIG_PCI_MSI 902 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) 903 { 904 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 905 unsigned int irq; 906 907 if (!pbm->setup_msi_irq) 908 return -EINVAL; 909 910 return pbm->setup_msi_irq(&irq, pdev, desc); 911 } 912 913 void arch_teardown_msi_irq(unsigned int irq) 914 { 915 struct msi_desc *entry = irq_get_msi_desc(irq); 916 struct pci_dev *pdev = msi_desc_to_pci_dev(entry); 917 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 918 919 if (pbm->teardown_msi_irq) 920 pbm->teardown_msi_irq(irq, pdev); 921 } 922 #endif /* !(CONFIG_PCI_MSI) */ 923 924 static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) 925 { 926 struct pci_dev *ali_isa_bridge; 927 u8 val; 928 929 /* ALI sound chips generate 31-bits of DMA, a special register 930 * determines what bit 31 is emitted as. 931 */ 932 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, 933 PCI_DEVICE_ID_AL_M1533, 934 NULL); 935 936 pci_read_config_byte(ali_isa_bridge, 0x7e, &val); 937 if (set_bit) 938 val |= 0x01; 939 else 940 val &= ~0x01; 941 pci_write_config_byte(ali_isa_bridge, 0x7e, val); 942 pci_dev_put(ali_isa_bridge); 943 } 944 945 int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask) 946 { 947 u64 dma_addr_mask; 948 949 if (pdev == NULL) { 950 dma_addr_mask = 0xffffffff; 951 } else { 952 struct iommu *iommu = pdev->dev.archdata.iommu; 953 954 dma_addr_mask = iommu->dma_addr_mask; 955 956 if (pdev->vendor == PCI_VENDOR_ID_AL && 957 pdev->device == PCI_DEVICE_ID_AL_M5451 && 958 device_mask == 0x7fffffff) { 959 ali_sound_dma_hack(pdev, 960 (dma_addr_mask & 0x80000000) != 0); 961 return 1; 962 } 963 } 964 965 if (device_mask >= (1UL << 32UL)) 966 return 0; 967 968 return (device_mask & dma_addr_mask) == dma_addr_mask; 969 } 970 971 void pci_resource_to_user(const struct pci_dev *pdev, int bar, 972 const struct resource *rp, resource_size_t *start, 973 resource_size_t *end) 974 { 975 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 976 unsigned long offset; 977 978 if (rp->flags & IORESOURCE_IO) 979 offset = pbm->io_space.start; 980 else 981 offset = pbm->mem_space.start; 982 983 *start = rp->start - offset; 984 *end = rp->end - offset; 985 } 986 987 void pcibios_set_master(struct pci_dev *dev) 988 { 989 /* No special bus mastering setup handling */ 990 } 991 992 static int __init pcibios_init(void) 993 { 994 pci_dfl_cache_line_size = 64 >> 2; 995 return 0; 996 } 997 subsys_initcall(pcibios_init); 998 999 #ifdef CONFIG_SYSFS 1000 1001 #define SLOT_NAME_SIZE 11 /* Max decimal digits + null in u32 */ 1002 1003 static void pcie_bus_slot_names(struct pci_bus *pbus) 1004 { 1005 struct pci_dev *pdev; 1006 struct pci_bus *bus; 1007 1008 list_for_each_entry(pdev, &pbus->devices, bus_list) { 1009 char name[SLOT_NAME_SIZE]; 1010 struct pci_slot *pci_slot; 1011 const u32 *slot_num; 1012 int len; 1013 1014 slot_num = of_get_property(pdev->dev.of_node, 1015 "physical-slot#", &len); 1016 1017 if (slot_num == NULL || len != 4) 1018 continue; 1019 1020 snprintf(name, sizeof(name), "%u", slot_num[0]); 1021 pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL); 1022 1023 if (IS_ERR(pci_slot)) 1024 pr_err("PCI: pci_create_slot returned %ld.\n", 1025 PTR_ERR(pci_slot)); 1026 } 1027 1028 list_for_each_entry(bus, &pbus->children, node) 1029 pcie_bus_slot_names(bus); 1030 } 1031 1032 static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) 1033 { 1034 const struct pci_slot_names { 1035 u32 slot_mask; 1036 char names[0]; 1037 } *prop; 1038 const char *sp; 1039 int len, i; 1040 u32 mask; 1041 1042 prop = of_get_property(node, "slot-names", &len); 1043 if (!prop) 1044 return; 1045 1046 mask = prop->slot_mask; 1047 sp = prop->names; 1048 1049 if (ofpci_verbose) 1050 printk("PCI: Making slots for [%s] mask[0x%02x]\n", 1051 node->full_name, mask); 1052 1053 i = 0; 1054 while (mask) { 1055 struct pci_slot *pci_slot; 1056 u32 this_bit = 1 << i; 1057 1058 if (!(mask & this_bit)) { 1059 i++; 1060 continue; 1061 } 1062 1063 if (ofpci_verbose) 1064 printk("PCI: Making slot [%s]\n", sp); 1065 1066 pci_slot = pci_create_slot(bus, i, sp, NULL); 1067 if (IS_ERR(pci_slot)) 1068 printk(KERN_ERR "PCI: pci_create_slot returned %ld\n", 1069 PTR_ERR(pci_slot)); 1070 1071 sp += strlen(sp) + 1; 1072 mask &= ~this_bit; 1073 i++; 1074 } 1075 } 1076 1077 static int __init of_pci_slot_init(void) 1078 { 1079 struct pci_bus *pbus = NULL; 1080 1081 while ((pbus = pci_find_next_bus(pbus)) != NULL) { 1082 struct device_node *node; 1083 struct pci_dev *pdev; 1084 1085 pdev = list_first_entry(&pbus->devices, struct pci_dev, 1086 bus_list); 1087 1088 if (pdev && pci_is_pcie(pdev)) { 1089 pcie_bus_slot_names(pbus); 1090 } else { 1091 1092 if (pbus->self) { 1093 1094 /* PCI->PCI bridge */ 1095 node = pbus->self->dev.of_node; 1096 1097 } else { 1098 struct pci_pbm_info *pbm = pbus->sysdata; 1099 1100 /* Host PCI controller */ 1101 node = pbm->op->dev.of_node; 1102 } 1103 1104 pci_bus_slot_names(node, pbus); 1105 } 1106 } 1107 1108 return 0; 1109 } 1110 device_initcall(of_pci_slot_init); 1111 #endif 1112