1 /* 2 * probe.c - PCI detection and setup code 3 */ 4 5 #include <linux/kernel.h> 6 #include <linux/delay.h> 7 #include <linux/init.h> 8 #include <linux/pci.h> 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 #include <linux/cpumask.h> 12 #include <linux/pci-aspm.h> 13 #include <asm-generic/pci-bridge.h> 14 #include "pci.h" 15 16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 17 #define CARDBUS_RESERVE_BUSNR 3 18 19 static struct resource busn_resource = { 20 .name = "PCI busn", 21 .start = 0, 22 .end = 255, 23 .flags = IORESOURCE_BUS, 24 }; 25 26 /* Ugh. Need to stop exporting this to modules. */ 27 LIST_HEAD(pci_root_buses); 28 EXPORT_SYMBOL(pci_root_buses); 29 30 static LIST_HEAD(pci_domain_busn_res_list); 31 32 struct pci_domain_busn_res { 33 struct list_head list; 34 struct resource res; 35 int domain_nr; 36 }; 37 38 static struct resource *get_pci_domain_busn_res(int domain_nr) 39 { 40 struct pci_domain_busn_res *r; 41 42 list_for_each_entry(r, &pci_domain_busn_res_list, list) 43 if (r->domain_nr == domain_nr) 44 return &r->res; 45 46 r = kzalloc(sizeof(*r), GFP_KERNEL); 47 if (!r) 48 return NULL; 49 50 r->domain_nr = domain_nr; 51 r->res.start = 0; 52 r->res.end = 0xff; 53 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED; 54 55 list_add_tail(&r->list, &pci_domain_busn_res_list); 56 57 return &r->res; 58 } 59 60 static int find_anything(struct device *dev, void *data) 61 { 62 return 1; 63 } 64 65 /* 66 * Some device drivers need know if pci is initiated. 67 * Basically, we think pci is not initiated when there 68 * is no device to be found on the pci_bus_type. 69 */ 70 int no_pci_devices(void) 71 { 72 struct device *dev; 73 int no_devices; 74 75 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); 76 no_devices = (dev == NULL); 77 put_device(dev); 78 return no_devices; 79 } 80 EXPORT_SYMBOL(no_pci_devices); 81 82 /* 83 * PCI Bus Class 84 */ 85 static void release_pcibus_dev(struct device *dev) 86 { 87 struct pci_bus *pci_bus = to_pci_bus(dev); 88 89 if (pci_bus->bridge) 90 put_device(pci_bus->bridge); 91 pci_bus_remove_resources(pci_bus); 92 pci_release_bus_of_node(pci_bus); 93 kfree(pci_bus); 94 } 95 96 static struct class pcibus_class = { 97 .name = "pci_bus", 98 .dev_release = &release_pcibus_dev, 99 .dev_groups = pcibus_groups, 100 }; 101 102 static int __init pcibus_class_init(void) 103 { 104 return class_register(&pcibus_class); 105 } 106 postcore_initcall(pcibus_class_init); 107 108 static u64 pci_size(u64 base, u64 maxbase, u64 mask) 109 { 110 u64 size = mask & maxbase; /* Find the significant bits */ 111 if (!size) 112 return 0; 113 114 /* Get the lowest of them to find the decode size, and 115 from that the extent. */ 116 size = (size & ~(size-1)) - 1; 117 118 /* base == maxbase can be valid only if the BAR has 119 already been programmed with all 1s. */ 120 if (base == maxbase && ((base | size) & mask) != mask) 121 return 0; 122 123 return size; 124 } 125 126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) 127 { 128 u32 mem_type; 129 unsigned long flags; 130 131 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 132 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; 133 flags |= IORESOURCE_IO; 134 return flags; 135 } 136 137 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; 138 flags |= IORESOURCE_MEM; 139 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) 140 flags |= IORESOURCE_PREFETCH; 141 142 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; 143 switch (mem_type) { 144 case PCI_BASE_ADDRESS_MEM_TYPE_32: 145 break; 146 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 147 /* 1M mem BAR treated as 32-bit BAR */ 148 break; 149 case PCI_BASE_ADDRESS_MEM_TYPE_64: 150 flags |= IORESOURCE_MEM_64; 151 break; 152 default: 153 /* mem unknown type treated as 32-bit BAR */ 154 break; 155 } 156 return flags; 157 } 158 159 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO) 160 161 /** 162 * pci_read_base - read a PCI BAR 163 * @dev: the PCI device 164 * @type: type of the BAR 165 * @res: resource buffer to be filled in 166 * @pos: BAR position in the config space 167 * 168 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. 169 */ 170 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 171 struct resource *res, unsigned int pos) 172 { 173 u32 l, sz, mask; 174 u64 l64, sz64, mask64; 175 u16 orig_cmd; 176 struct pci_bus_region region, inverted_region; 177 bool bar_too_big = false, bar_too_high = false, bar_invalid = false; 178 179 mask = type ? PCI_ROM_ADDRESS_MASK : ~0; 180 181 /* No printks while decoding is disabled! */ 182 if (!dev->mmio_always_on) { 183 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); 184 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) { 185 pci_write_config_word(dev, PCI_COMMAND, 186 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE); 187 } 188 } 189 190 res->name = pci_name(dev); 191 192 pci_read_config_dword(dev, pos, &l); 193 pci_write_config_dword(dev, pos, l | mask); 194 pci_read_config_dword(dev, pos, &sz); 195 pci_write_config_dword(dev, pos, l); 196 197 /* 198 * All bits set in sz means the device isn't working properly. 199 * If the BAR isn't implemented, all bits must be 0. If it's a 200 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit 201 * 1 must be clear. 202 */ 203 if (!sz || sz == 0xffffffff) 204 goto fail; 205 206 /* 207 * I don't know how l can have all bits set. Copied from old code. 208 * Maybe it fixes a bug on some ancient platform. 209 */ 210 if (l == 0xffffffff) 211 l = 0; 212 213 if (type == pci_bar_unknown) { 214 res->flags = decode_bar(dev, l); 215 res->flags |= IORESOURCE_SIZEALIGN; 216 if (res->flags & IORESOURCE_IO) { 217 l &= PCI_BASE_ADDRESS_IO_MASK; 218 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; 219 } else { 220 l &= PCI_BASE_ADDRESS_MEM_MASK; 221 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 222 } 223 } else { 224 res->flags |= (l & IORESOURCE_ROM_ENABLE); 225 l &= PCI_ROM_ADDRESS_MASK; 226 mask = (u32)PCI_ROM_ADDRESS_MASK; 227 } 228 229 if (res->flags & IORESOURCE_MEM_64) { 230 l64 = l; 231 sz64 = sz; 232 mask64 = mask | (u64)~0 << 32; 233 234 pci_read_config_dword(dev, pos + 4, &l); 235 pci_write_config_dword(dev, pos + 4, ~0); 236 pci_read_config_dword(dev, pos + 4, &sz); 237 pci_write_config_dword(dev, pos + 4, l); 238 239 l64 |= ((u64)l << 32); 240 sz64 |= ((u64)sz << 32); 241 242 sz64 = pci_size(l64, sz64, mask64); 243 244 if (!sz64) 245 goto fail; 246 247 if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) && 248 sz64 > 0x100000000ULL) { 249 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; 250 res->start = 0; 251 res->end = 0; 252 bar_too_big = true; 253 goto out; 254 } 255 256 if ((sizeof(dma_addr_t) < 8) && l) { 257 /* Above 32-bit boundary; try to reallocate */ 258 res->flags |= IORESOURCE_UNSET; 259 res->start = 0; 260 res->end = sz64; 261 bar_too_high = true; 262 goto out; 263 } else { 264 region.start = l64; 265 region.end = l64 + sz64; 266 } 267 } else { 268 sz = pci_size(l, sz, mask); 269 270 if (!sz) 271 goto fail; 272 273 region.start = l; 274 region.end = l + sz; 275 } 276 277 pcibios_bus_to_resource(dev->bus, res, ®ion); 278 pcibios_resource_to_bus(dev->bus, &inverted_region, res); 279 280 /* 281 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is 282 * the corresponding resource address (the physical address used by 283 * the CPU. Converting that resource address back to a bus address 284 * should yield the original BAR value: 285 * 286 * resource_to_bus(bus_to_resource(A)) == A 287 * 288 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not 289 * be claimed by the device. 290 */ 291 if (inverted_region.start != region.start) { 292 res->flags |= IORESOURCE_UNSET; 293 res->start = 0; 294 res->end = region.end - region.start; 295 bar_invalid = true; 296 } 297 298 goto out; 299 300 301 fail: 302 res->flags = 0; 303 out: 304 if (!dev->mmio_always_on && 305 (orig_cmd & PCI_COMMAND_DECODE_ENABLE)) 306 pci_write_config_word(dev, PCI_COMMAND, orig_cmd); 307 308 if (bar_too_big) 309 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n", 310 pos, (unsigned long long) sz64); 311 if (bar_too_high) 312 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n", 313 pos, (unsigned long long) l64); 314 if (bar_invalid) 315 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n", 316 pos, (unsigned long long) region.start); 317 if (res->flags) 318 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res); 319 320 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; 321 } 322 323 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 324 { 325 unsigned int pos, reg; 326 327 for (pos = 0; pos < howmany; pos++) { 328 struct resource *res = &dev->resource[pos]; 329 reg = PCI_BASE_ADDRESS_0 + (pos << 2); 330 pos += __pci_read_base(dev, pci_bar_unknown, res, reg); 331 } 332 333 if (rom) { 334 struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; 335 dev->rom_base_reg = rom; 336 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | 337 IORESOURCE_READONLY | IORESOURCE_CACHEABLE | 338 IORESOURCE_SIZEALIGN; 339 __pci_read_base(dev, pci_bar_mem32, res, rom); 340 } 341 } 342 343 static void pci_read_bridge_io(struct pci_bus *child) 344 { 345 struct pci_dev *dev = child->self; 346 u8 io_base_lo, io_limit_lo; 347 unsigned long io_mask, io_granularity, base, limit; 348 struct pci_bus_region region; 349 struct resource *res; 350 351 io_mask = PCI_IO_RANGE_MASK; 352 io_granularity = 0x1000; 353 if (dev->io_window_1k) { 354 /* Support 1K I/O space granularity */ 355 io_mask = PCI_IO_1K_RANGE_MASK; 356 io_granularity = 0x400; 357 } 358 359 res = child->resource[0]; 360 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); 361 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); 362 base = (io_base_lo & io_mask) << 8; 363 limit = (io_limit_lo & io_mask) << 8; 364 365 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { 366 u16 io_base_hi, io_limit_hi; 367 368 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); 369 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); 370 base |= ((unsigned long) io_base_hi << 16); 371 limit |= ((unsigned long) io_limit_hi << 16); 372 } 373 374 if (base <= limit) { 375 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; 376 region.start = base; 377 region.end = limit + io_granularity - 1; 378 pcibios_bus_to_resource(dev->bus, res, ®ion); 379 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 380 } 381 } 382 383 static void pci_read_bridge_mmio(struct pci_bus *child) 384 { 385 struct pci_dev *dev = child->self; 386 u16 mem_base_lo, mem_limit_lo; 387 unsigned long base, limit; 388 struct pci_bus_region region; 389 struct resource *res; 390 391 res = child->resource[1]; 392 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); 393 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); 394 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; 395 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; 396 if (base <= limit) { 397 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 398 region.start = base; 399 region.end = limit + 0xfffff; 400 pcibios_bus_to_resource(dev->bus, res, ®ion); 401 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 402 } 403 } 404 405 static void pci_read_bridge_mmio_pref(struct pci_bus *child) 406 { 407 struct pci_dev *dev = child->self; 408 u16 mem_base_lo, mem_limit_lo; 409 unsigned long base, limit; 410 struct pci_bus_region region; 411 struct resource *res; 412 413 res = child->resource[2]; 414 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); 415 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); 416 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16; 417 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; 418 419 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { 420 u32 mem_base_hi, mem_limit_hi; 421 422 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); 423 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); 424 425 /* 426 * Some bridges set the base > limit by default, and some 427 * (broken) BIOSes do not initialize them. If we find 428 * this, just assume they are not being used. 429 */ 430 if (mem_base_hi <= mem_limit_hi) { 431 #if BITS_PER_LONG == 64 432 base |= ((unsigned long) mem_base_hi) << 32; 433 limit |= ((unsigned long) mem_limit_hi) << 32; 434 #else 435 if (mem_base_hi || mem_limit_hi) { 436 dev_err(&dev->dev, "can't handle 64-bit " 437 "address space for bridge\n"); 438 return; 439 } 440 #endif 441 } 442 } 443 if (base <= limit) { 444 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | 445 IORESOURCE_MEM | IORESOURCE_PREFETCH; 446 if (res->flags & PCI_PREF_RANGE_TYPE_64) 447 res->flags |= IORESOURCE_MEM_64; 448 region.start = base; 449 region.end = limit + 0xfffff; 450 pcibios_bus_to_resource(dev->bus, res, ®ion); 451 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 452 } 453 } 454 455 void pci_read_bridge_bases(struct pci_bus *child) 456 { 457 struct pci_dev *dev = child->self; 458 struct resource *res; 459 int i; 460 461 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ 462 return; 463 464 dev_info(&dev->dev, "PCI bridge to %pR%s\n", 465 &child->busn_res, 466 dev->transparent ? " (subtractive decode)" : ""); 467 468 pci_bus_remove_resources(child); 469 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 470 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; 471 472 pci_read_bridge_io(child); 473 pci_read_bridge_mmio(child); 474 pci_read_bridge_mmio_pref(child); 475 476 if (dev->transparent) { 477 pci_bus_for_each_resource(child->parent, res, i) { 478 if (res && res->flags) { 479 pci_bus_add_resource(child, res, 480 PCI_SUBTRACTIVE_DECODE); 481 dev_printk(KERN_DEBUG, &dev->dev, 482 " bridge window %pR (subtractive decode)\n", 483 res); 484 } 485 } 486 } 487 } 488 489 static struct pci_bus *pci_alloc_bus(void) 490 { 491 struct pci_bus *b; 492 493 b = kzalloc(sizeof(*b), GFP_KERNEL); 494 if (!b) 495 return NULL; 496 497 INIT_LIST_HEAD(&b->node); 498 INIT_LIST_HEAD(&b->children); 499 INIT_LIST_HEAD(&b->devices); 500 INIT_LIST_HEAD(&b->slots); 501 INIT_LIST_HEAD(&b->resources); 502 b->max_bus_speed = PCI_SPEED_UNKNOWN; 503 b->cur_bus_speed = PCI_SPEED_UNKNOWN; 504 return b; 505 } 506 507 static void pci_release_host_bridge_dev(struct device *dev) 508 { 509 struct pci_host_bridge *bridge = to_pci_host_bridge(dev); 510 511 if (bridge->release_fn) 512 bridge->release_fn(bridge); 513 514 pci_free_resource_list(&bridge->windows); 515 516 kfree(bridge); 517 } 518 519 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b) 520 { 521 struct pci_host_bridge *bridge; 522 523 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 524 if (!bridge) 525 return NULL; 526 527 INIT_LIST_HEAD(&bridge->windows); 528 bridge->bus = b; 529 return bridge; 530 } 531 532 static const unsigned char pcix_bus_speed[] = { 533 PCI_SPEED_UNKNOWN, /* 0 */ 534 PCI_SPEED_66MHz_PCIX, /* 1 */ 535 PCI_SPEED_100MHz_PCIX, /* 2 */ 536 PCI_SPEED_133MHz_PCIX, /* 3 */ 537 PCI_SPEED_UNKNOWN, /* 4 */ 538 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ 539 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ 540 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ 541 PCI_SPEED_UNKNOWN, /* 8 */ 542 PCI_SPEED_66MHz_PCIX_266, /* 9 */ 543 PCI_SPEED_100MHz_PCIX_266, /* A */ 544 PCI_SPEED_133MHz_PCIX_266, /* B */ 545 PCI_SPEED_UNKNOWN, /* C */ 546 PCI_SPEED_66MHz_PCIX_533, /* D */ 547 PCI_SPEED_100MHz_PCIX_533, /* E */ 548 PCI_SPEED_133MHz_PCIX_533 /* F */ 549 }; 550 551 const unsigned char pcie_link_speed[] = { 552 PCI_SPEED_UNKNOWN, /* 0 */ 553 PCIE_SPEED_2_5GT, /* 1 */ 554 PCIE_SPEED_5_0GT, /* 2 */ 555 PCIE_SPEED_8_0GT, /* 3 */ 556 PCI_SPEED_UNKNOWN, /* 4 */ 557 PCI_SPEED_UNKNOWN, /* 5 */ 558 PCI_SPEED_UNKNOWN, /* 6 */ 559 PCI_SPEED_UNKNOWN, /* 7 */ 560 PCI_SPEED_UNKNOWN, /* 8 */ 561 PCI_SPEED_UNKNOWN, /* 9 */ 562 PCI_SPEED_UNKNOWN, /* A */ 563 PCI_SPEED_UNKNOWN, /* B */ 564 PCI_SPEED_UNKNOWN, /* C */ 565 PCI_SPEED_UNKNOWN, /* D */ 566 PCI_SPEED_UNKNOWN, /* E */ 567 PCI_SPEED_UNKNOWN /* F */ 568 }; 569 570 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) 571 { 572 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; 573 } 574 EXPORT_SYMBOL_GPL(pcie_update_link_speed); 575 576 static unsigned char agp_speeds[] = { 577 AGP_UNKNOWN, 578 AGP_1X, 579 AGP_2X, 580 AGP_4X, 581 AGP_8X 582 }; 583 584 static enum pci_bus_speed agp_speed(int agp3, int agpstat) 585 { 586 int index = 0; 587 588 if (agpstat & 4) 589 index = 3; 590 else if (agpstat & 2) 591 index = 2; 592 else if (agpstat & 1) 593 index = 1; 594 else 595 goto out; 596 597 if (agp3) { 598 index += 2; 599 if (index == 5) 600 index = 0; 601 } 602 603 out: 604 return agp_speeds[index]; 605 } 606 607 608 static void pci_set_bus_speed(struct pci_bus *bus) 609 { 610 struct pci_dev *bridge = bus->self; 611 int pos; 612 613 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); 614 if (!pos) 615 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); 616 if (pos) { 617 u32 agpstat, agpcmd; 618 619 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); 620 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); 621 622 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); 623 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); 624 } 625 626 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); 627 if (pos) { 628 u16 status; 629 enum pci_bus_speed max; 630 631 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS, 632 &status); 633 634 if (status & PCI_X_SSTATUS_533MHZ) { 635 max = PCI_SPEED_133MHz_PCIX_533; 636 } else if (status & PCI_X_SSTATUS_266MHZ) { 637 max = PCI_SPEED_133MHz_PCIX_266; 638 } else if (status & PCI_X_SSTATUS_133MHZ) { 639 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) { 640 max = PCI_SPEED_133MHz_PCIX_ECC; 641 } else { 642 max = PCI_SPEED_133MHz_PCIX; 643 } 644 } else { 645 max = PCI_SPEED_66MHz_PCIX; 646 } 647 648 bus->max_bus_speed = max; 649 bus->cur_bus_speed = pcix_bus_speed[ 650 (status & PCI_X_SSTATUS_FREQ) >> 6]; 651 652 return; 653 } 654 655 if (pci_is_pcie(bridge)) { 656 u32 linkcap; 657 u16 linksta; 658 659 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); 660 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; 661 662 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); 663 pcie_update_link_speed(bus, linksta); 664 } 665 } 666 667 668 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, 669 struct pci_dev *bridge, int busnr) 670 { 671 struct pci_bus *child; 672 int i; 673 int ret; 674 675 /* 676 * Allocate a new bus, and inherit stuff from the parent.. 677 */ 678 child = pci_alloc_bus(); 679 if (!child) 680 return NULL; 681 682 child->parent = parent; 683 child->ops = parent->ops; 684 child->msi = parent->msi; 685 child->sysdata = parent->sysdata; 686 child->bus_flags = parent->bus_flags; 687 688 /* initialize some portions of the bus device, but don't register it 689 * now as the parent is not properly set up yet. 690 */ 691 child->dev.class = &pcibus_class; 692 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); 693 694 /* 695 * Set up the primary, secondary and subordinate 696 * bus numbers. 697 */ 698 child->number = child->busn_res.start = busnr; 699 child->primary = parent->busn_res.start; 700 child->busn_res.end = 0xff; 701 702 if (!bridge) { 703 child->dev.parent = parent->bridge; 704 goto add_dev; 705 } 706 707 child->self = bridge; 708 child->bridge = get_device(&bridge->dev); 709 child->dev.parent = child->bridge; 710 pci_set_bus_of_node(child); 711 pci_set_bus_speed(child); 712 713 /* Set up default resource pointers and names.. */ 714 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 715 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 716 child->resource[i]->name = child->name; 717 } 718 bridge->subordinate = child; 719 720 add_dev: 721 ret = device_register(&child->dev); 722 WARN_ON(ret < 0); 723 724 pcibios_add_bus(child); 725 726 /* Create legacy_io and legacy_mem files for this bus */ 727 pci_create_legacy_files(child); 728 729 return child; 730 } 731 732 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) 733 { 734 struct pci_bus *child; 735 736 child = pci_alloc_child_bus(parent, dev, busnr); 737 if (child) { 738 down_write(&pci_bus_sem); 739 list_add_tail(&child->node, &parent->children); 740 up_write(&pci_bus_sem); 741 } 742 return child; 743 } 744 745 /* 746 * If it's a bridge, configure it and scan the bus behind it. 747 * For CardBus bridges, we don't scan behind as the devices will 748 * be handled by the bridge driver itself. 749 * 750 * We need to process bridges in two passes -- first we scan those 751 * already configured by the BIOS and after we are done with all of 752 * them, we proceed to assigning numbers to the remaining buses in 753 * order to avoid overlaps between old and new bus numbers. 754 */ 755 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) 756 { 757 struct pci_bus *child; 758 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 759 u32 buses, i, j = 0; 760 u16 bctl; 761 u8 primary, secondary, subordinate; 762 int broken = 0; 763 764 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 765 primary = buses & 0xFF; 766 secondary = (buses >> 8) & 0xFF; 767 subordinate = (buses >> 16) & 0xFF; 768 769 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", 770 secondary, subordinate, pass); 771 772 if (!primary && (primary != bus->number) && secondary && subordinate) { 773 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n"); 774 primary = bus->number; 775 } 776 777 /* Check if setup is sensible at all */ 778 if (!pass && 779 (primary != bus->number || secondary <= bus->number || 780 secondary > subordinate || subordinate > bus->busn_res.end)) { 781 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", 782 secondary, subordinate); 783 broken = 1; 784 } 785 786 /* Disable MasterAbortMode during probing to avoid reporting 787 of bus errors (in some architectures) */ 788 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 789 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 790 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 791 792 if ((secondary || subordinate) && !pcibios_assign_all_busses() && 793 !is_cardbus && !broken) { 794 unsigned int cmax; 795 /* 796 * Bus already configured by firmware, process it in the first 797 * pass and just note the configuration. 798 */ 799 if (pass) 800 goto out; 801 802 /* 803 * The bus might already exist for two reasons: Either we are 804 * rescanning the bus or the bus is reachable through more than 805 * one bridge. The second case can happen with the i450NX 806 * chipset. 807 */ 808 child = pci_find_bus(pci_domain_nr(bus), secondary); 809 if (!child) { 810 child = pci_add_new_bus(bus, dev, secondary); 811 if (!child) 812 goto out; 813 child->primary = primary; 814 pci_bus_insert_busn_res(child, secondary, subordinate); 815 child->bridge_ctl = bctl; 816 } 817 818 cmax = pci_scan_child_bus(child); 819 if (cmax > subordinate) 820 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n", 821 subordinate, cmax); 822 /* subordinate should equal child->busn_res.end */ 823 if (subordinate > max) 824 max = subordinate; 825 } else { 826 /* 827 * We need to assign a number to this bus which we always 828 * do in the second pass. 829 */ 830 if (!pass) { 831 if (pcibios_assign_all_busses() || broken || is_cardbus) 832 /* Temporarily disable forwarding of the 833 configuration cycles on all bridges in 834 this bus segment to avoid possible 835 conflicts in the second pass between two 836 bridges programmed with overlapping 837 bus ranges. */ 838 pci_write_config_dword(dev, PCI_PRIMARY_BUS, 839 buses & ~0xffffff); 840 goto out; 841 } 842 843 if (max >= bus->busn_res.end) { 844 dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n", 845 max, &bus->busn_res); 846 goto out; 847 } 848 849 /* Clear errors */ 850 pci_write_config_word(dev, PCI_STATUS, 0xffff); 851 852 /* The bus will already exist if we are rescanning */ 853 child = pci_find_bus(pci_domain_nr(bus), max+1); 854 if (!child) { 855 child = pci_add_new_bus(bus, dev, max+1); 856 if (!child) 857 goto out; 858 pci_bus_insert_busn_res(child, max+1, 859 bus->busn_res.end); 860 } 861 max++; 862 buses = (buses & 0xff000000) 863 | ((unsigned int)(child->primary) << 0) 864 | ((unsigned int)(child->busn_res.start) << 8) 865 | ((unsigned int)(child->busn_res.end) << 16); 866 867 /* 868 * yenta.c forces a secondary latency timer of 176. 869 * Copy that behaviour here. 870 */ 871 if (is_cardbus) { 872 buses &= ~0xff000000; 873 buses |= CARDBUS_LATENCY_TIMER << 24; 874 } 875 876 /* 877 * We need to blast all three values with a single write. 878 */ 879 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); 880 881 if (!is_cardbus) { 882 child->bridge_ctl = bctl; 883 max = pci_scan_child_bus(child); 884 } else { 885 /* 886 * For CardBus bridges, we leave 4 bus numbers 887 * as cards with a PCI-to-PCI bridge can be 888 * inserted later. 889 */ 890 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) { 891 struct pci_bus *parent = bus; 892 if (pci_find_bus(pci_domain_nr(bus), 893 max+i+1)) 894 break; 895 while (parent->parent) { 896 if ((!pcibios_assign_all_busses()) && 897 (parent->busn_res.end > max) && 898 (parent->busn_res.end <= max+i)) { 899 j = 1; 900 } 901 parent = parent->parent; 902 } 903 if (j) { 904 /* 905 * Often, there are two cardbus bridges 906 * -- try to leave one valid bus number 907 * for each one. 908 */ 909 i /= 2; 910 break; 911 } 912 } 913 max += i; 914 } 915 /* 916 * Set the subordinate bus number to its real value. 917 */ 918 if (max > bus->busn_res.end) { 919 dev_warn(&dev->dev, "max busn %02x is outside %pR\n", 920 max, &bus->busn_res); 921 max = bus->busn_res.end; 922 } 923 pci_bus_update_busn_res_end(child, max); 924 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); 925 } 926 927 sprintf(child->name, 928 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), 929 pci_domain_nr(bus), child->number); 930 931 /* Has only triggered on CardBus, fixup is in yenta_socket */ 932 while (bus->parent) { 933 if ((child->busn_res.end > bus->busn_res.end) || 934 (child->number > bus->busn_res.end) || 935 (child->number < bus->number) || 936 (child->busn_res.end < bus->number)) { 937 dev_info(&child->dev, "%pR %s " 938 "hidden behind%s bridge %s %pR\n", 939 &child->busn_res, 940 (bus->number > child->busn_res.end && 941 bus->busn_res.end < child->number) ? 942 "wholly" : "partially", 943 bus->self->transparent ? " transparent" : "", 944 dev_name(&bus->dev), 945 &bus->busn_res); 946 } 947 bus = bus->parent; 948 } 949 950 out: 951 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); 952 953 return max; 954 } 955 956 /* 957 * Read interrupt line and base address registers. 958 * The architecture-dependent code can tweak these, of course. 959 */ 960 static void pci_read_irq(struct pci_dev *dev) 961 { 962 unsigned char irq; 963 964 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); 965 dev->pin = irq; 966 if (irq) 967 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 968 dev->irq = irq; 969 } 970 971 void set_pcie_port_type(struct pci_dev *pdev) 972 { 973 int pos; 974 u16 reg16; 975 976 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 977 if (!pos) 978 return; 979 pdev->pcie_cap = pos; 980 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); 981 pdev->pcie_flags_reg = reg16; 982 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); 983 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 984 } 985 986 void set_pcie_hotplug_bridge(struct pci_dev *pdev) 987 { 988 u32 reg32; 989 990 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); 991 if (reg32 & PCI_EXP_SLTCAP_HPC) 992 pdev->is_hotplug_bridge = 1; 993 } 994 995 996 /** 997 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config? 998 * @dev: PCI device 999 * 1000 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that 1001 * when forwarding a type1 configuration request the bridge must check that 1002 * the extended register address field is zero. The bridge is not permitted 1003 * to forward the transactions and must handle it as an Unsupported Request. 1004 * Some bridges do not follow this rule and simply drop the extended register 1005 * bits, resulting in the standard config space being aliased, every 256 1006 * bytes across the entire configuration space. Test for this condition by 1007 * comparing the first dword of each potential alias to the vendor/device ID. 1008 * Known offenders: 1009 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03) 1010 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40) 1011 */ 1012 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev) 1013 { 1014 #ifdef CONFIG_PCI_QUIRKS 1015 int pos; 1016 u32 header, tmp; 1017 1018 pci_read_config_dword(dev, PCI_VENDOR_ID, &header); 1019 1020 for (pos = PCI_CFG_SPACE_SIZE; 1021 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) { 1022 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL 1023 || header != tmp) 1024 return false; 1025 } 1026 1027 return true; 1028 #else 1029 return false; 1030 #endif 1031 } 1032 1033 /** 1034 * pci_cfg_space_size - get the configuration space size of the PCI device. 1035 * @dev: PCI device 1036 * 1037 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices 1038 * have 4096 bytes. Even if the device is capable, that doesn't mean we can 1039 * access it. Maybe we don't have a way to generate extended config space 1040 * accesses, or the device is behind a reverse Express bridge. So we try 1041 * reading the dword at 0x100 which must either be 0 or a valid extended 1042 * capability header. 1043 */ 1044 static int pci_cfg_space_size_ext(struct pci_dev *dev) 1045 { 1046 u32 status; 1047 int pos = PCI_CFG_SPACE_SIZE; 1048 1049 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) 1050 goto fail; 1051 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev)) 1052 goto fail; 1053 1054 return PCI_CFG_SPACE_EXP_SIZE; 1055 1056 fail: 1057 return PCI_CFG_SPACE_SIZE; 1058 } 1059 1060 int pci_cfg_space_size(struct pci_dev *dev) 1061 { 1062 int pos; 1063 u32 status; 1064 u16 class; 1065 1066 class = dev->class >> 8; 1067 if (class == PCI_CLASS_BRIDGE_HOST) 1068 return pci_cfg_space_size_ext(dev); 1069 1070 if (!pci_is_pcie(dev)) { 1071 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1072 if (!pos) 1073 goto fail; 1074 1075 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); 1076 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) 1077 goto fail; 1078 } 1079 1080 return pci_cfg_space_size_ext(dev); 1081 1082 fail: 1083 return PCI_CFG_SPACE_SIZE; 1084 } 1085 1086 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 1087 1088 /** 1089 * pci_setup_device - fill in class and map information of a device 1090 * @dev: the device structure to fill 1091 * 1092 * Initialize the device structure with information about the device's 1093 * vendor,class,memory and IO-space addresses,IRQ lines etc. 1094 * Called at initialisation of the PCI subsystem and by CardBus services. 1095 * Returns 0 on success and negative if unknown type of device (not normal, 1096 * bridge or CardBus). 1097 */ 1098 int pci_setup_device(struct pci_dev *dev) 1099 { 1100 u32 class; 1101 u8 hdr_type; 1102 struct pci_slot *slot; 1103 int pos = 0; 1104 struct pci_bus_region region; 1105 struct resource *res; 1106 1107 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) 1108 return -EIO; 1109 1110 dev->sysdata = dev->bus->sysdata; 1111 dev->dev.parent = dev->bus->bridge; 1112 dev->dev.bus = &pci_bus_type; 1113 dev->hdr_type = hdr_type & 0x7f; 1114 dev->multifunction = !!(hdr_type & 0x80); 1115 dev->error_state = pci_channel_io_normal; 1116 set_pcie_port_type(dev); 1117 1118 list_for_each_entry(slot, &dev->bus->slots, list) 1119 if (PCI_SLOT(dev->devfn) == slot->number) 1120 dev->slot = slot; 1121 1122 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 1123 set this higher, assuming the system even supports it. */ 1124 dev->dma_mask = 0xffffffff; 1125 1126 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 1127 dev->bus->number, PCI_SLOT(dev->devfn), 1128 PCI_FUNC(dev->devfn)); 1129 1130 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 1131 dev->revision = class & 0xff; 1132 dev->class = class >> 8; /* upper 3 bytes */ 1133 1134 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n", 1135 dev->vendor, dev->device, dev->hdr_type, dev->class); 1136 1137 /* need to have dev->class ready */ 1138 dev->cfg_size = pci_cfg_space_size(dev); 1139 1140 /* "Unknown power state" */ 1141 dev->current_state = PCI_UNKNOWN; 1142 1143 /* Early fixups, before probing the BARs */ 1144 pci_fixup_device(pci_fixup_early, dev); 1145 /* device class may be changed after fixup */ 1146 class = dev->class >> 8; 1147 1148 switch (dev->hdr_type) { /* header type */ 1149 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 1150 if (class == PCI_CLASS_BRIDGE_PCI) 1151 goto bad; 1152 pci_read_irq(dev); 1153 pci_read_bases(dev, 6, PCI_ROM_ADDRESS); 1154 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1155 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); 1156 1157 /* 1158 * Do the ugly legacy mode stuff here rather than broken chip 1159 * quirk code. Legacy mode ATA controllers have fixed 1160 * addresses. These are not always echoed in BAR0-3, and 1161 * BAR0-3 in a few cases contain junk! 1162 */ 1163 if (class == PCI_CLASS_STORAGE_IDE) { 1164 u8 progif; 1165 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 1166 if ((progif & 1) == 0) { 1167 region.start = 0x1F0; 1168 region.end = 0x1F7; 1169 res = &dev->resource[0]; 1170 res->flags = LEGACY_IO_RESOURCE; 1171 pcibios_bus_to_resource(dev->bus, res, ®ion); 1172 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n", 1173 res); 1174 region.start = 0x3F6; 1175 region.end = 0x3F6; 1176 res = &dev->resource[1]; 1177 res->flags = LEGACY_IO_RESOURCE; 1178 pcibios_bus_to_resource(dev->bus, res, ®ion); 1179 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n", 1180 res); 1181 } 1182 if ((progif & 4) == 0) { 1183 region.start = 0x170; 1184 region.end = 0x177; 1185 res = &dev->resource[2]; 1186 res->flags = LEGACY_IO_RESOURCE; 1187 pcibios_bus_to_resource(dev->bus, res, ®ion); 1188 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n", 1189 res); 1190 region.start = 0x376; 1191 region.end = 0x376; 1192 res = &dev->resource[3]; 1193 res->flags = LEGACY_IO_RESOURCE; 1194 pcibios_bus_to_resource(dev->bus, res, ®ion); 1195 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n", 1196 res); 1197 } 1198 } 1199 break; 1200 1201 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ 1202 if (class != PCI_CLASS_BRIDGE_PCI) 1203 goto bad; 1204 /* The PCI-to-PCI bridge spec requires that subtractive 1205 decoding (i.e. transparent) bridge must have programming 1206 interface code of 0x01. */ 1207 pci_read_irq(dev); 1208 dev->transparent = ((dev->class & 0xff) == 1); 1209 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 1210 set_pcie_hotplug_bridge(dev); 1211 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); 1212 if (pos) { 1213 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); 1214 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); 1215 } 1216 break; 1217 1218 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ 1219 if (class != PCI_CLASS_BRIDGE_CARDBUS) 1220 goto bad; 1221 pci_read_irq(dev); 1222 pci_read_bases(dev, 1, 0); 1223 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1224 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); 1225 break; 1226 1227 default: /* unknown header */ 1228 dev_err(&dev->dev, "unknown header type %02x, " 1229 "ignoring device\n", dev->hdr_type); 1230 return -EIO; 1231 1232 bad: 1233 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header " 1234 "type %02x)\n", dev->class, dev->hdr_type); 1235 dev->class = PCI_CLASS_NOT_DEFINED; 1236 } 1237 1238 /* We found a fine healthy device, go go go... */ 1239 return 0; 1240 } 1241 1242 static void pci_release_capabilities(struct pci_dev *dev) 1243 { 1244 pci_vpd_release(dev); 1245 pci_iov_release(dev); 1246 pci_free_cap_save_buffers(dev); 1247 } 1248 1249 /** 1250 * pci_release_dev - free a pci device structure when all users of it are finished. 1251 * @dev: device that's been disconnected 1252 * 1253 * Will be called only by the device core when all users of this pci device are 1254 * done. 1255 */ 1256 static void pci_release_dev(struct device *dev) 1257 { 1258 struct pci_dev *pci_dev; 1259 1260 pci_dev = to_pci_dev(dev); 1261 pci_release_capabilities(pci_dev); 1262 pci_release_of_node(pci_dev); 1263 pcibios_release_device(pci_dev); 1264 pci_bus_put(pci_dev->bus); 1265 kfree(pci_dev->driver_override); 1266 kfree(pci_dev); 1267 } 1268 1269 struct pci_dev *pci_alloc_dev(struct pci_bus *bus) 1270 { 1271 struct pci_dev *dev; 1272 1273 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); 1274 if (!dev) 1275 return NULL; 1276 1277 INIT_LIST_HEAD(&dev->bus_list); 1278 dev->dev.type = &pci_dev_type; 1279 dev->bus = pci_bus_get(bus); 1280 1281 return dev; 1282 } 1283 EXPORT_SYMBOL(pci_alloc_dev); 1284 1285 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, 1286 int crs_timeout) 1287 { 1288 int delay = 1; 1289 1290 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 1291 return false; 1292 1293 /* some broken boards return 0 or ~0 if a slot is empty: */ 1294 if (*l == 0xffffffff || *l == 0x00000000 || 1295 *l == 0x0000ffff || *l == 0xffff0000) 1296 return false; 1297 1298 /* Configuration request Retry Status */ 1299 while (*l == 0xffff0001) { 1300 if (!crs_timeout) 1301 return false; 1302 1303 msleep(delay); 1304 delay *= 2; 1305 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 1306 return false; 1307 /* Card hasn't responded in 60 seconds? Must be stuck. */ 1308 if (delay > crs_timeout) { 1309 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " 1310 "responding\n", pci_domain_nr(bus), 1311 bus->number, PCI_SLOT(devfn), 1312 PCI_FUNC(devfn)); 1313 return false; 1314 } 1315 } 1316 1317 return true; 1318 } 1319 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); 1320 1321 /* 1322 * Read the config data for a PCI device, sanity-check it 1323 * and fill in the dev structure... 1324 */ 1325 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 1326 { 1327 struct pci_dev *dev; 1328 u32 l; 1329 1330 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) 1331 return NULL; 1332 1333 dev = pci_alloc_dev(bus); 1334 if (!dev) 1335 return NULL; 1336 1337 dev->devfn = devfn; 1338 dev->vendor = l & 0xffff; 1339 dev->device = (l >> 16) & 0xffff; 1340 1341 pci_set_of_node(dev); 1342 1343 if (pci_setup_device(dev)) { 1344 pci_bus_put(dev->bus); 1345 kfree(dev); 1346 return NULL; 1347 } 1348 1349 return dev; 1350 } 1351 1352 static void pci_init_capabilities(struct pci_dev *dev) 1353 { 1354 /* MSI/MSI-X list */ 1355 pci_msi_init_pci_dev(dev); 1356 1357 /* Buffers for saving PCIe and PCI-X capabilities */ 1358 pci_allocate_cap_save_buffers(dev); 1359 1360 /* Power Management */ 1361 pci_pm_init(dev); 1362 1363 /* Vital Product Data */ 1364 pci_vpd_pci22_init(dev); 1365 1366 /* Alternative Routing-ID Forwarding */ 1367 pci_configure_ari(dev); 1368 1369 /* Single Root I/O Virtualization */ 1370 pci_iov_init(dev); 1371 1372 /* Enable ACS P2P upstream forwarding */ 1373 pci_enable_acs(dev); 1374 } 1375 1376 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 1377 { 1378 int ret; 1379 1380 device_initialize(&dev->dev); 1381 dev->dev.release = pci_release_dev; 1382 1383 set_dev_node(&dev->dev, pcibus_to_node(bus)); 1384 dev->dev.dma_mask = &dev->dma_mask; 1385 dev->dev.dma_parms = &dev->dma_parms; 1386 dev->dev.coherent_dma_mask = 0xffffffffull; 1387 1388 pci_set_dma_max_seg_size(dev, 65536); 1389 pci_set_dma_seg_boundary(dev, 0xffffffff); 1390 1391 /* Fix up broken headers */ 1392 pci_fixup_device(pci_fixup_header, dev); 1393 1394 /* moved out from quirk header fixup code */ 1395 pci_reassigndev_resource_alignment(dev); 1396 1397 /* Clear the state_saved flag. */ 1398 dev->state_saved = false; 1399 1400 /* Initialize various capabilities */ 1401 pci_init_capabilities(dev); 1402 1403 /* 1404 * Add the device to our list of discovered devices 1405 * and the bus list for fixup functions, etc. 1406 */ 1407 down_write(&pci_bus_sem); 1408 list_add_tail(&dev->bus_list, &bus->devices); 1409 up_write(&pci_bus_sem); 1410 1411 ret = pcibios_add_device(dev); 1412 WARN_ON(ret < 0); 1413 1414 /* Notifier could use PCI capabilities */ 1415 dev->match_driver = false; 1416 ret = device_add(&dev->dev); 1417 WARN_ON(ret < 0); 1418 } 1419 1420 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) 1421 { 1422 struct pci_dev *dev; 1423 1424 dev = pci_get_slot(bus, devfn); 1425 if (dev) { 1426 pci_dev_put(dev); 1427 return dev; 1428 } 1429 1430 dev = pci_scan_device(bus, devfn); 1431 if (!dev) 1432 return NULL; 1433 1434 pci_device_add(dev, bus); 1435 1436 return dev; 1437 } 1438 EXPORT_SYMBOL(pci_scan_single_device); 1439 1440 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn) 1441 { 1442 int pos; 1443 u16 cap = 0; 1444 unsigned next_fn; 1445 1446 if (pci_ari_enabled(bus)) { 1447 if (!dev) 1448 return 0; 1449 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1450 if (!pos) 1451 return 0; 1452 1453 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap); 1454 next_fn = PCI_ARI_CAP_NFN(cap); 1455 if (next_fn <= fn) 1456 return 0; /* protect against malformed list */ 1457 1458 return next_fn; 1459 } 1460 1461 /* dev may be NULL for non-contiguous multifunction devices */ 1462 if (!dev || dev->multifunction) 1463 return (fn + 1) % 8; 1464 1465 return 0; 1466 } 1467 1468 static int only_one_child(struct pci_bus *bus) 1469 { 1470 struct pci_dev *parent = bus->self; 1471 1472 if (!parent || !pci_is_pcie(parent)) 1473 return 0; 1474 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT) 1475 return 1; 1476 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM && 1477 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) 1478 return 1; 1479 return 0; 1480 } 1481 1482 /** 1483 * pci_scan_slot - scan a PCI slot on a bus for devices. 1484 * @bus: PCI bus to scan 1485 * @devfn: slot number to scan (must have zero function.) 1486 * 1487 * Scan a PCI slot on the specified PCI bus for devices, adding 1488 * discovered devices to the @bus->devices list. New devices 1489 * will not have is_added set. 1490 * 1491 * Returns the number of new devices found. 1492 */ 1493 int pci_scan_slot(struct pci_bus *bus, int devfn) 1494 { 1495 unsigned fn, nr = 0; 1496 struct pci_dev *dev; 1497 1498 if (only_one_child(bus) && (devfn > 0)) 1499 return 0; /* Already scanned the entire slot */ 1500 1501 dev = pci_scan_single_device(bus, devfn); 1502 if (!dev) 1503 return 0; 1504 if (!dev->is_added) 1505 nr++; 1506 1507 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) { 1508 dev = pci_scan_single_device(bus, devfn + fn); 1509 if (dev) { 1510 if (!dev->is_added) 1511 nr++; 1512 dev->multifunction = 1; 1513 } 1514 } 1515 1516 /* only one slot has pcie device */ 1517 if (bus->self && nr) 1518 pcie_aspm_init_link_state(bus->self); 1519 1520 return nr; 1521 } 1522 1523 static int pcie_find_smpss(struct pci_dev *dev, void *data) 1524 { 1525 u8 *smpss = data; 1526 1527 if (!pci_is_pcie(dev)) 1528 return 0; 1529 1530 /* 1531 * We don't have a way to change MPS settings on devices that have 1532 * drivers attached. A hot-added device might support only the minimum 1533 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge 1534 * where devices may be hot-added, we limit the fabric MPS to 128 so 1535 * hot-added devices will work correctly. 1536 * 1537 * However, if we hot-add a device to a slot directly below a Root 1538 * Port, it's impossible for there to be other existing devices below 1539 * the port. We don't limit the MPS in this case because we can 1540 * reconfigure MPS on both the Root Port and the hot-added device, 1541 * and there are no other devices involved. 1542 * 1543 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA. 1544 */ 1545 if (dev->is_hotplug_bridge && 1546 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) 1547 *smpss = 0; 1548 1549 if (*smpss > dev->pcie_mpss) 1550 *smpss = dev->pcie_mpss; 1551 1552 return 0; 1553 } 1554 1555 static void pcie_write_mps(struct pci_dev *dev, int mps) 1556 { 1557 int rc; 1558 1559 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 1560 mps = 128 << dev->pcie_mpss; 1561 1562 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT && 1563 dev->bus->self) 1564 /* For "Performance", the assumption is made that 1565 * downstream communication will never be larger than 1566 * the MRRS. So, the MPS only needs to be configured 1567 * for the upstream communication. This being the case, 1568 * walk from the top down and set the MPS of the child 1569 * to that of the parent bus. 1570 * 1571 * Configure the device MPS with the smaller of the 1572 * device MPSS or the bridge MPS (which is assumed to be 1573 * properly configured at this point to the largest 1574 * allowable MPS based on its parent bus). 1575 */ 1576 mps = min(mps, pcie_get_mps(dev->bus->self)); 1577 } 1578 1579 rc = pcie_set_mps(dev, mps); 1580 if (rc) 1581 dev_err(&dev->dev, "Failed attempting to set the MPS\n"); 1582 } 1583 1584 static void pcie_write_mrrs(struct pci_dev *dev) 1585 { 1586 int rc, mrrs; 1587 1588 /* In the "safe" case, do not configure the MRRS. There appear to be 1589 * issues with setting MRRS to 0 on a number of devices. 1590 */ 1591 if (pcie_bus_config != PCIE_BUS_PERFORMANCE) 1592 return; 1593 1594 /* For Max performance, the MRRS must be set to the largest supported 1595 * value. However, it cannot be configured larger than the MPS the 1596 * device or the bus can support. This should already be properly 1597 * configured by a prior call to pcie_write_mps. 1598 */ 1599 mrrs = pcie_get_mps(dev); 1600 1601 /* MRRS is a R/W register. Invalid values can be written, but a 1602 * subsequent read will verify if the value is acceptable or not. 1603 * If the MRRS value provided is not acceptable (e.g., too large), 1604 * shrink the value until it is acceptable to the HW. 1605 */ 1606 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { 1607 rc = pcie_set_readrq(dev, mrrs); 1608 if (!rc) 1609 break; 1610 1611 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); 1612 mrrs /= 2; 1613 } 1614 1615 if (mrrs < 128) 1616 dev_err(&dev->dev, "MRRS was unable to be configured with a " 1617 "safe value. If problems are experienced, try running " 1618 "with pci=pcie_bus_safe.\n"); 1619 } 1620 1621 static void pcie_bus_detect_mps(struct pci_dev *dev) 1622 { 1623 struct pci_dev *bridge = dev->bus->self; 1624 int mps, p_mps; 1625 1626 if (!bridge) 1627 return; 1628 1629 mps = pcie_get_mps(dev); 1630 p_mps = pcie_get_mps(bridge); 1631 1632 if (mps != p_mps) 1633 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", 1634 mps, pci_name(bridge), p_mps); 1635 } 1636 1637 static int pcie_bus_configure_set(struct pci_dev *dev, void *data) 1638 { 1639 int mps, orig_mps; 1640 1641 if (!pci_is_pcie(dev)) 1642 return 0; 1643 1644 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) { 1645 pcie_bus_detect_mps(dev); 1646 return 0; 1647 } 1648 1649 mps = 128 << *(u8 *)data; 1650 orig_mps = pcie_get_mps(dev); 1651 1652 pcie_write_mps(dev, mps); 1653 pcie_write_mrrs(dev); 1654 1655 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), " 1656 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, 1657 orig_mps, pcie_get_readrq(dev)); 1658 1659 return 0; 1660 } 1661 1662 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, 1663 * parents then children fashion. If this changes, then this code will not 1664 * work as designed. 1665 */ 1666 void pcie_bus_configure_settings(struct pci_bus *bus) 1667 { 1668 u8 smpss = 0; 1669 1670 if (!bus->self) 1671 return; 1672 1673 if (!pci_is_pcie(bus->self)) 1674 return; 1675 1676 /* FIXME - Peer to peer DMA is possible, though the endpoint would need 1677 * to be aware of the MPS of the destination. To work around this, 1678 * simply force the MPS of the entire system to the smallest possible. 1679 */ 1680 if (pcie_bus_config == PCIE_BUS_PEER2PEER) 1681 smpss = 0; 1682 1683 if (pcie_bus_config == PCIE_BUS_SAFE) { 1684 smpss = bus->self->pcie_mpss; 1685 1686 pcie_find_smpss(bus->self, &smpss); 1687 pci_walk_bus(bus, pcie_find_smpss, &smpss); 1688 } 1689 1690 pcie_bus_configure_set(bus->self, &smpss); 1691 pci_walk_bus(bus, pcie_bus_configure_set, &smpss); 1692 } 1693 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); 1694 1695 unsigned int pci_scan_child_bus(struct pci_bus *bus) 1696 { 1697 unsigned int devfn, pass, max = bus->busn_res.start; 1698 struct pci_dev *dev; 1699 1700 dev_dbg(&bus->dev, "scanning bus\n"); 1701 1702 /* Go find them, Rover! */ 1703 for (devfn = 0; devfn < 0x100; devfn += 8) 1704 pci_scan_slot(bus, devfn); 1705 1706 /* Reserve buses for SR-IOV capability. */ 1707 max += pci_iov_bus_range(bus); 1708 1709 /* 1710 * After performing arch-dependent fixup of the bus, look behind 1711 * all PCI-to-PCI bridges on this bus. 1712 */ 1713 if (!bus->is_added) { 1714 dev_dbg(&bus->dev, "fixups for bus\n"); 1715 pcibios_fixup_bus(bus); 1716 bus->is_added = 1; 1717 } 1718 1719 for (pass=0; pass < 2; pass++) 1720 list_for_each_entry(dev, &bus->devices, bus_list) { 1721 if (pci_is_bridge(dev)) 1722 max = pci_scan_bridge(bus, dev, max, pass); 1723 } 1724 1725 /* 1726 * We've scanned the bus and so we know all about what's on 1727 * the other side of any bridges that may be on this bus plus 1728 * any devices. 1729 * 1730 * Return how far we've got finding sub-buses. 1731 */ 1732 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); 1733 return max; 1734 } 1735 1736 /** 1737 * pcibios_root_bridge_prepare - Platform-specific host bridge setup. 1738 * @bridge: Host bridge to set up. 1739 * 1740 * Default empty implementation. Replace with an architecture-specific setup 1741 * routine, if necessary. 1742 */ 1743 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 1744 { 1745 return 0; 1746 } 1747 1748 void __weak pcibios_add_bus(struct pci_bus *bus) 1749 { 1750 } 1751 1752 void __weak pcibios_remove_bus(struct pci_bus *bus) 1753 { 1754 } 1755 1756 struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 1757 struct pci_ops *ops, void *sysdata, struct list_head *resources) 1758 { 1759 int error; 1760 struct pci_host_bridge *bridge; 1761 struct pci_bus *b, *b2; 1762 struct pci_host_bridge_window *window, *n; 1763 struct resource *res; 1764 resource_size_t offset; 1765 char bus_addr[64]; 1766 char *fmt; 1767 1768 b = pci_alloc_bus(); 1769 if (!b) 1770 return NULL; 1771 1772 b->sysdata = sysdata; 1773 b->ops = ops; 1774 b->number = b->busn_res.start = bus; 1775 b2 = pci_find_bus(pci_domain_nr(b), bus); 1776 if (b2) { 1777 /* If we already got to this bus through a different bridge, ignore it */ 1778 dev_dbg(&b2->dev, "bus already known\n"); 1779 goto err_out; 1780 } 1781 1782 bridge = pci_alloc_host_bridge(b); 1783 if (!bridge) 1784 goto err_out; 1785 1786 bridge->dev.parent = parent; 1787 bridge->dev.release = pci_release_host_bridge_dev; 1788 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus); 1789 error = pcibios_root_bridge_prepare(bridge); 1790 if (error) { 1791 kfree(bridge); 1792 goto err_out; 1793 } 1794 1795 error = device_register(&bridge->dev); 1796 if (error) { 1797 put_device(&bridge->dev); 1798 goto err_out; 1799 } 1800 b->bridge = get_device(&bridge->dev); 1801 device_enable_async_suspend(b->bridge); 1802 pci_set_bus_of_node(b); 1803 1804 if (!parent) 1805 set_dev_node(b->bridge, pcibus_to_node(b)); 1806 1807 b->dev.class = &pcibus_class; 1808 b->dev.parent = b->bridge; 1809 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); 1810 error = device_register(&b->dev); 1811 if (error) 1812 goto class_dev_reg_err; 1813 1814 pcibios_add_bus(b); 1815 1816 /* Create legacy_io and legacy_mem files for this bus */ 1817 pci_create_legacy_files(b); 1818 1819 if (parent) 1820 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1821 else 1822 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1823 1824 /* Add initial resources to the bus */ 1825 list_for_each_entry_safe(window, n, resources, list) { 1826 list_move_tail(&window->list, &bridge->windows); 1827 res = window->res; 1828 offset = window->offset; 1829 if (res->flags & IORESOURCE_BUS) 1830 pci_bus_insert_busn_res(b, bus, res->end); 1831 else 1832 pci_bus_add_resource(b, res, 0); 1833 if (offset) { 1834 if (resource_type(res) == IORESOURCE_IO) 1835 fmt = " (bus address [%#06llx-%#06llx])"; 1836 else 1837 fmt = " (bus address [%#010llx-%#010llx])"; 1838 snprintf(bus_addr, sizeof(bus_addr), fmt, 1839 (unsigned long long) (res->start - offset), 1840 (unsigned long long) (res->end - offset)); 1841 } else 1842 bus_addr[0] = '\0'; 1843 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr); 1844 } 1845 1846 down_write(&pci_bus_sem); 1847 list_add_tail(&b->node, &pci_root_buses); 1848 up_write(&pci_bus_sem); 1849 1850 return b; 1851 1852 class_dev_reg_err: 1853 put_device(&bridge->dev); 1854 device_unregister(&bridge->dev); 1855 err_out: 1856 kfree(b); 1857 return NULL; 1858 } 1859 1860 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) 1861 { 1862 struct resource *res = &b->busn_res; 1863 struct resource *parent_res, *conflict; 1864 1865 res->start = bus; 1866 res->end = bus_max; 1867 res->flags = IORESOURCE_BUS; 1868 1869 if (!pci_is_root_bus(b)) 1870 parent_res = &b->parent->busn_res; 1871 else { 1872 parent_res = get_pci_domain_busn_res(pci_domain_nr(b)); 1873 res->flags |= IORESOURCE_PCI_FIXED; 1874 } 1875 1876 conflict = request_resource_conflict(parent_res, res); 1877 1878 if (conflict) 1879 dev_printk(KERN_DEBUG, &b->dev, 1880 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", 1881 res, pci_is_root_bus(b) ? "domain " : "", 1882 parent_res, conflict->name, conflict); 1883 1884 return conflict == NULL; 1885 } 1886 1887 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) 1888 { 1889 struct resource *res = &b->busn_res; 1890 struct resource old_res = *res; 1891 resource_size_t size; 1892 int ret; 1893 1894 if (res->start > bus_max) 1895 return -EINVAL; 1896 1897 size = bus_max - res->start + 1; 1898 ret = adjust_resource(res, res->start, size); 1899 dev_printk(KERN_DEBUG, &b->dev, 1900 "busn_res: %pR end %s updated to %02x\n", 1901 &old_res, ret ? "can not be" : "is", bus_max); 1902 1903 if (!ret && !res->parent) 1904 pci_bus_insert_busn_res(b, res->start, res->end); 1905 1906 return ret; 1907 } 1908 1909 void pci_bus_release_busn_res(struct pci_bus *b) 1910 { 1911 struct resource *res = &b->busn_res; 1912 int ret; 1913 1914 if (!res->flags || !res->parent) 1915 return; 1916 1917 ret = release_resource(res); 1918 dev_printk(KERN_DEBUG, &b->dev, 1919 "busn_res: %pR %s released\n", 1920 res, ret ? "can not be" : "is"); 1921 } 1922 1923 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 1924 struct pci_ops *ops, void *sysdata, struct list_head *resources) 1925 { 1926 struct pci_host_bridge_window *window; 1927 bool found = false; 1928 struct pci_bus *b; 1929 int max; 1930 1931 list_for_each_entry(window, resources, list) 1932 if (window->res->flags & IORESOURCE_BUS) { 1933 found = true; 1934 break; 1935 } 1936 1937 b = pci_create_root_bus(parent, bus, ops, sysdata, resources); 1938 if (!b) 1939 return NULL; 1940 1941 if (!found) { 1942 dev_info(&b->dev, 1943 "No busn resource found for root bus, will use [bus %02x-ff]\n", 1944 bus); 1945 pci_bus_insert_busn_res(b, bus, 255); 1946 } 1947 1948 max = pci_scan_child_bus(b); 1949 1950 if (!found) 1951 pci_bus_update_busn_res_end(b, max); 1952 1953 pci_bus_add_devices(b); 1954 return b; 1955 } 1956 EXPORT_SYMBOL(pci_scan_root_bus); 1957 1958 /* Deprecated; use pci_scan_root_bus() instead */ 1959 struct pci_bus *pci_scan_bus_parented(struct device *parent, 1960 int bus, struct pci_ops *ops, void *sysdata) 1961 { 1962 LIST_HEAD(resources); 1963 struct pci_bus *b; 1964 1965 pci_add_resource(&resources, &ioport_resource); 1966 pci_add_resource(&resources, &iomem_resource); 1967 pci_add_resource(&resources, &busn_resource); 1968 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources); 1969 if (b) 1970 pci_scan_child_bus(b); 1971 else 1972 pci_free_resource_list(&resources); 1973 return b; 1974 } 1975 EXPORT_SYMBOL(pci_scan_bus_parented); 1976 1977 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, 1978 void *sysdata) 1979 { 1980 LIST_HEAD(resources); 1981 struct pci_bus *b; 1982 1983 pci_add_resource(&resources, &ioport_resource); 1984 pci_add_resource(&resources, &iomem_resource); 1985 pci_add_resource(&resources, &busn_resource); 1986 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources); 1987 if (b) { 1988 pci_scan_child_bus(b); 1989 pci_bus_add_devices(b); 1990 } else { 1991 pci_free_resource_list(&resources); 1992 } 1993 return b; 1994 } 1995 EXPORT_SYMBOL(pci_scan_bus); 1996 1997 /** 1998 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices. 1999 * @bridge: PCI bridge for the bus to scan 2000 * 2001 * Scan a PCI bus and child buses for new devices, add them, 2002 * and enable them, resizing bridge mmio/io resource if necessary 2003 * and possible. The caller must ensure the child devices are already 2004 * removed for resizing to occur. 2005 * 2006 * Returns the max number of subordinate bus discovered. 2007 */ 2008 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge) 2009 { 2010 unsigned int max; 2011 struct pci_bus *bus = bridge->subordinate; 2012 2013 max = pci_scan_child_bus(bus); 2014 2015 pci_assign_unassigned_bridge_resources(bridge); 2016 2017 pci_bus_add_devices(bus); 2018 2019 return max; 2020 } 2021 2022 /** 2023 * pci_rescan_bus - scan a PCI bus for devices. 2024 * @bus: PCI bus to scan 2025 * 2026 * Scan a PCI bus and child buses for new devices, adds them, 2027 * and enables them. 2028 * 2029 * Returns the max number of subordinate bus discovered. 2030 */ 2031 unsigned int pci_rescan_bus(struct pci_bus *bus) 2032 { 2033 unsigned int max; 2034 2035 max = pci_scan_child_bus(bus); 2036 pci_assign_unassigned_bus_resources(bus); 2037 pci_bus_add_devices(bus); 2038 2039 return max; 2040 } 2041 EXPORT_SYMBOL_GPL(pci_rescan_bus); 2042 2043 EXPORT_SYMBOL(pci_add_new_bus); 2044 EXPORT_SYMBOL(pci_scan_slot); 2045 EXPORT_SYMBOL(pci_scan_bridge); 2046 EXPORT_SYMBOL_GPL(pci_scan_child_bus); 2047 2048 /* 2049 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal 2050 * routines should always be executed under this mutex. 2051 */ 2052 static DEFINE_MUTEX(pci_rescan_remove_lock); 2053 2054 void pci_lock_rescan_remove(void) 2055 { 2056 mutex_lock(&pci_rescan_remove_lock); 2057 } 2058 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove); 2059 2060 void pci_unlock_rescan_remove(void) 2061 { 2062 mutex_unlock(&pci_rescan_remove_lock); 2063 } 2064 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove); 2065 2066 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) 2067 { 2068 const struct pci_dev *a = to_pci_dev(d_a); 2069 const struct pci_dev *b = to_pci_dev(d_b); 2070 2071 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; 2072 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; 2073 2074 if (a->bus->number < b->bus->number) return -1; 2075 else if (a->bus->number > b->bus->number) return 1; 2076 2077 if (a->devfn < b->devfn) return -1; 2078 else if (a->devfn > b->devfn) return 1; 2079 2080 return 0; 2081 } 2082 2083 void __init pci_sort_breadthfirst(void) 2084 { 2085 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); 2086 } 2087