1 /* 2 * probe.c - PCI detection and setup code 3 */ 4 5 #include <linux/kernel.h> 6 #include <linux/delay.h> 7 #include <linux/init.h> 8 #include <linux/pci.h> 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 #include <linux/cpumask.h> 12 #include <linux/pci-aspm.h> 13 #include <asm-generic/pci-bridge.h> 14 #include "pci.h" 15 16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 17 #define CARDBUS_RESERVE_BUSNR 3 18 19 struct resource busn_resource = { 20 .name = "PCI busn", 21 .start = 0, 22 .end = 255, 23 .flags = IORESOURCE_BUS, 24 }; 25 26 /* Ugh. Need to stop exporting this to modules. */ 27 LIST_HEAD(pci_root_buses); 28 EXPORT_SYMBOL(pci_root_buses); 29 30 static LIST_HEAD(pci_domain_busn_res_list); 31 32 struct pci_domain_busn_res { 33 struct list_head list; 34 struct resource res; 35 int domain_nr; 36 }; 37 38 static struct resource *get_pci_domain_busn_res(int domain_nr) 39 { 40 struct pci_domain_busn_res *r; 41 42 list_for_each_entry(r, &pci_domain_busn_res_list, list) 43 if (r->domain_nr == domain_nr) 44 return &r->res; 45 46 r = kzalloc(sizeof(*r), GFP_KERNEL); 47 if (!r) 48 return NULL; 49 50 r->domain_nr = domain_nr; 51 r->res.start = 0; 52 r->res.end = 0xff; 53 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED; 54 55 list_add_tail(&r->list, &pci_domain_busn_res_list); 56 57 return &r->res; 58 } 59 60 static int find_anything(struct device *dev, void *data) 61 { 62 return 1; 63 } 64 65 /* 66 * Some device drivers need know if pci is initiated. 67 * Basically, we think pci is not initiated when there 68 * is no device to be found on the pci_bus_type. 69 */ 70 int no_pci_devices(void) 71 { 72 struct device *dev; 73 int no_devices; 74 75 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); 76 no_devices = (dev == NULL); 77 put_device(dev); 78 return no_devices; 79 } 80 EXPORT_SYMBOL(no_pci_devices); 81 82 /* 83 * PCI Bus Class 84 */ 85 static void release_pcibus_dev(struct device *dev) 86 { 87 struct pci_bus *pci_bus = to_pci_bus(dev); 88 89 if (pci_bus->bridge) 90 put_device(pci_bus->bridge); 91 pci_bus_remove_resources(pci_bus); 92 pci_release_bus_of_node(pci_bus); 93 kfree(pci_bus); 94 } 95 96 static struct class pcibus_class = { 97 .name = "pci_bus", 98 .dev_release = &release_pcibus_dev, 99 .dev_groups = pcibus_groups, 100 }; 101 102 static int __init pcibus_class_init(void) 103 { 104 return class_register(&pcibus_class); 105 } 106 postcore_initcall(pcibus_class_init); 107 108 static u64 pci_size(u64 base, u64 maxbase, u64 mask) 109 { 110 u64 size = mask & maxbase; /* Find the significant bits */ 111 if (!size) 112 return 0; 113 114 /* Get the lowest of them to find the decode size, and 115 from that the extent. */ 116 size = (size & ~(size-1)) - 1; 117 118 /* base == maxbase can be valid only if the BAR has 119 already been programmed with all 1s. */ 120 if (base == maxbase && ((base | size) & mask) != mask) 121 return 0; 122 123 return size; 124 } 125 126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) 127 { 128 u32 mem_type; 129 unsigned long flags; 130 131 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 132 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; 133 flags |= IORESOURCE_IO; 134 return flags; 135 } 136 137 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; 138 flags |= IORESOURCE_MEM; 139 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) 140 flags |= IORESOURCE_PREFETCH; 141 142 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; 143 switch (mem_type) { 144 case PCI_BASE_ADDRESS_MEM_TYPE_32: 145 break; 146 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 147 /* 1M mem BAR treated as 32-bit BAR */ 148 break; 149 case PCI_BASE_ADDRESS_MEM_TYPE_64: 150 flags |= IORESOURCE_MEM_64; 151 break; 152 default: 153 /* mem unknown type treated as 32-bit BAR */ 154 break; 155 } 156 return flags; 157 } 158 159 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO) 160 161 /** 162 * pci_read_base - read a PCI BAR 163 * @dev: the PCI device 164 * @type: type of the BAR 165 * @res: resource buffer to be filled in 166 * @pos: BAR position in the config space 167 * 168 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. 169 */ 170 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 171 struct resource *res, unsigned int pos) 172 { 173 u32 l, sz, mask; 174 u16 orig_cmd; 175 struct pci_bus_region region, inverted_region; 176 bool bar_too_big = false, bar_disabled = false; 177 178 mask = type ? PCI_ROM_ADDRESS_MASK : ~0; 179 180 /* No printks while decoding is disabled! */ 181 if (!dev->mmio_always_on) { 182 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); 183 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) { 184 pci_write_config_word(dev, PCI_COMMAND, 185 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE); 186 } 187 } 188 189 res->name = pci_name(dev); 190 191 pci_read_config_dword(dev, pos, &l); 192 pci_write_config_dword(dev, pos, l | mask); 193 pci_read_config_dword(dev, pos, &sz); 194 pci_write_config_dword(dev, pos, l); 195 196 /* 197 * All bits set in sz means the device isn't working properly. 198 * If the BAR isn't implemented, all bits must be 0. If it's a 199 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit 200 * 1 must be clear. 201 */ 202 if (!sz || sz == 0xffffffff) 203 goto fail; 204 205 /* 206 * I don't know how l can have all bits set. Copied from old code. 207 * Maybe it fixes a bug on some ancient platform. 208 */ 209 if (l == 0xffffffff) 210 l = 0; 211 212 if (type == pci_bar_unknown) { 213 res->flags = decode_bar(dev, l); 214 res->flags |= IORESOURCE_SIZEALIGN; 215 if (res->flags & IORESOURCE_IO) { 216 l &= PCI_BASE_ADDRESS_IO_MASK; 217 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; 218 } else { 219 l &= PCI_BASE_ADDRESS_MEM_MASK; 220 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 221 } 222 } else { 223 res->flags |= (l & IORESOURCE_ROM_ENABLE); 224 l &= PCI_ROM_ADDRESS_MASK; 225 mask = (u32)PCI_ROM_ADDRESS_MASK; 226 } 227 228 if (res->flags & IORESOURCE_MEM_64) { 229 u64 l64 = l; 230 u64 sz64 = sz; 231 u64 mask64 = mask | (u64)~0 << 32; 232 233 pci_read_config_dword(dev, pos + 4, &l); 234 pci_write_config_dword(dev, pos + 4, ~0); 235 pci_read_config_dword(dev, pos + 4, &sz); 236 pci_write_config_dword(dev, pos + 4, l); 237 238 l64 |= ((u64)l << 32); 239 sz64 |= ((u64)sz << 32); 240 241 sz64 = pci_size(l64, sz64, mask64); 242 243 if (!sz64) 244 goto fail; 245 246 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { 247 bar_too_big = true; 248 goto fail; 249 } 250 251 if ((sizeof(resource_size_t) < 8) && l) { 252 /* Address above 32-bit boundary; disable the BAR */ 253 pci_write_config_dword(dev, pos, 0); 254 pci_write_config_dword(dev, pos + 4, 0); 255 region.start = 0; 256 region.end = sz64; 257 bar_disabled = true; 258 } else { 259 region.start = l64; 260 region.end = l64 + sz64; 261 } 262 } else { 263 sz = pci_size(l, sz, mask); 264 265 if (!sz) 266 goto fail; 267 268 region.start = l; 269 region.end = l + sz; 270 } 271 272 pcibios_bus_to_resource(dev, res, ®ion); 273 pcibios_resource_to_bus(dev, &inverted_region, res); 274 275 /* 276 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is 277 * the corresponding resource address (the physical address used by 278 * the CPU. Converting that resource address back to a bus address 279 * should yield the original BAR value: 280 * 281 * resource_to_bus(bus_to_resource(A)) == A 282 * 283 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not 284 * be claimed by the device. 285 */ 286 if (inverted_region.start != region.start) { 287 dev_info(&dev->dev, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n", 288 pos, ®ion.start); 289 res->flags |= IORESOURCE_UNSET; 290 res->end -= res->start; 291 res->start = 0; 292 } 293 294 goto out; 295 296 297 fail: 298 res->flags = 0; 299 out: 300 if (!dev->mmio_always_on && 301 (orig_cmd & PCI_COMMAND_DECODE_ENABLE)) 302 pci_write_config_word(dev, PCI_COMMAND, orig_cmd); 303 304 if (bar_too_big) 305 dev_err(&dev->dev, "reg 0x%x: can't handle 64-bit BAR\n", pos); 306 if (res->flags && !bar_disabled) 307 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res); 308 309 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; 310 } 311 312 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 313 { 314 unsigned int pos, reg; 315 316 for (pos = 0; pos < howmany; pos++) { 317 struct resource *res = &dev->resource[pos]; 318 reg = PCI_BASE_ADDRESS_0 + (pos << 2); 319 pos += __pci_read_base(dev, pci_bar_unknown, res, reg); 320 } 321 322 if (rom) { 323 struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; 324 dev->rom_base_reg = rom; 325 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | 326 IORESOURCE_READONLY | IORESOURCE_CACHEABLE | 327 IORESOURCE_SIZEALIGN; 328 __pci_read_base(dev, pci_bar_mem32, res, rom); 329 } 330 } 331 332 static void pci_read_bridge_io(struct pci_bus *child) 333 { 334 struct pci_dev *dev = child->self; 335 u8 io_base_lo, io_limit_lo; 336 unsigned long io_mask, io_granularity, base, limit; 337 struct pci_bus_region region; 338 struct resource *res; 339 340 io_mask = PCI_IO_RANGE_MASK; 341 io_granularity = 0x1000; 342 if (dev->io_window_1k) { 343 /* Support 1K I/O space granularity */ 344 io_mask = PCI_IO_1K_RANGE_MASK; 345 io_granularity = 0x400; 346 } 347 348 res = child->resource[0]; 349 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); 350 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); 351 base = (io_base_lo & io_mask) << 8; 352 limit = (io_limit_lo & io_mask) << 8; 353 354 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { 355 u16 io_base_hi, io_limit_hi; 356 357 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); 358 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); 359 base |= ((unsigned long) io_base_hi << 16); 360 limit |= ((unsigned long) io_limit_hi << 16); 361 } 362 363 if (base <= limit) { 364 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; 365 region.start = base; 366 region.end = limit + io_granularity - 1; 367 pcibios_bus_to_resource(dev, res, ®ion); 368 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 369 } 370 } 371 372 static void pci_read_bridge_mmio(struct pci_bus *child) 373 { 374 struct pci_dev *dev = child->self; 375 u16 mem_base_lo, mem_limit_lo; 376 unsigned long base, limit; 377 struct pci_bus_region region; 378 struct resource *res; 379 380 res = child->resource[1]; 381 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); 382 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); 383 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; 384 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; 385 if (base <= limit) { 386 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 387 region.start = base; 388 region.end = limit + 0xfffff; 389 pcibios_bus_to_resource(dev, res, ®ion); 390 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 391 } 392 } 393 394 static void pci_read_bridge_mmio_pref(struct pci_bus *child) 395 { 396 struct pci_dev *dev = child->self; 397 u16 mem_base_lo, mem_limit_lo; 398 unsigned long base, limit; 399 struct pci_bus_region region; 400 struct resource *res; 401 402 res = child->resource[2]; 403 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); 404 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); 405 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16; 406 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; 407 408 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { 409 u32 mem_base_hi, mem_limit_hi; 410 411 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); 412 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); 413 414 /* 415 * Some bridges set the base > limit by default, and some 416 * (broken) BIOSes do not initialize them. If we find 417 * this, just assume they are not being used. 418 */ 419 if (mem_base_hi <= mem_limit_hi) { 420 #if BITS_PER_LONG == 64 421 base |= ((unsigned long) mem_base_hi) << 32; 422 limit |= ((unsigned long) mem_limit_hi) << 32; 423 #else 424 if (mem_base_hi || mem_limit_hi) { 425 dev_err(&dev->dev, "can't handle 64-bit " 426 "address space for bridge\n"); 427 return; 428 } 429 #endif 430 } 431 } 432 if (base <= limit) { 433 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | 434 IORESOURCE_MEM | IORESOURCE_PREFETCH; 435 if (res->flags & PCI_PREF_RANGE_TYPE_64) 436 res->flags |= IORESOURCE_MEM_64; 437 region.start = base; 438 region.end = limit + 0xfffff; 439 pcibios_bus_to_resource(dev, res, ®ion); 440 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 441 } 442 } 443 444 void pci_read_bridge_bases(struct pci_bus *child) 445 { 446 struct pci_dev *dev = child->self; 447 struct resource *res; 448 int i; 449 450 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ 451 return; 452 453 dev_info(&dev->dev, "PCI bridge to %pR%s\n", 454 &child->busn_res, 455 dev->transparent ? " (subtractive decode)" : ""); 456 457 pci_bus_remove_resources(child); 458 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 459 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; 460 461 pci_read_bridge_io(child); 462 pci_read_bridge_mmio(child); 463 pci_read_bridge_mmio_pref(child); 464 465 if (dev->transparent) { 466 pci_bus_for_each_resource(child->parent, res, i) { 467 if (res) { 468 pci_bus_add_resource(child, res, 469 PCI_SUBTRACTIVE_DECODE); 470 dev_printk(KERN_DEBUG, &dev->dev, 471 " bridge window %pR (subtractive decode)\n", 472 res); 473 } 474 } 475 } 476 } 477 478 static struct pci_bus *pci_alloc_bus(void) 479 { 480 struct pci_bus *b; 481 482 b = kzalloc(sizeof(*b), GFP_KERNEL); 483 if (!b) 484 return NULL; 485 486 INIT_LIST_HEAD(&b->node); 487 INIT_LIST_HEAD(&b->children); 488 INIT_LIST_HEAD(&b->devices); 489 INIT_LIST_HEAD(&b->slots); 490 INIT_LIST_HEAD(&b->resources); 491 b->max_bus_speed = PCI_SPEED_UNKNOWN; 492 b->cur_bus_speed = PCI_SPEED_UNKNOWN; 493 return b; 494 } 495 496 static void pci_release_host_bridge_dev(struct device *dev) 497 { 498 struct pci_host_bridge *bridge = to_pci_host_bridge(dev); 499 500 if (bridge->release_fn) 501 bridge->release_fn(bridge); 502 503 pci_free_resource_list(&bridge->windows); 504 505 kfree(bridge); 506 } 507 508 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b) 509 { 510 struct pci_host_bridge *bridge; 511 512 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 513 if (!bridge) 514 return NULL; 515 516 INIT_LIST_HEAD(&bridge->windows); 517 bridge->bus = b; 518 return bridge; 519 } 520 521 const unsigned char pcix_bus_speed[] = { 522 PCI_SPEED_UNKNOWN, /* 0 */ 523 PCI_SPEED_66MHz_PCIX, /* 1 */ 524 PCI_SPEED_100MHz_PCIX, /* 2 */ 525 PCI_SPEED_133MHz_PCIX, /* 3 */ 526 PCI_SPEED_UNKNOWN, /* 4 */ 527 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ 528 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ 529 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ 530 PCI_SPEED_UNKNOWN, /* 8 */ 531 PCI_SPEED_66MHz_PCIX_266, /* 9 */ 532 PCI_SPEED_100MHz_PCIX_266, /* A */ 533 PCI_SPEED_133MHz_PCIX_266, /* B */ 534 PCI_SPEED_UNKNOWN, /* C */ 535 PCI_SPEED_66MHz_PCIX_533, /* D */ 536 PCI_SPEED_100MHz_PCIX_533, /* E */ 537 PCI_SPEED_133MHz_PCIX_533 /* F */ 538 }; 539 540 const unsigned char pcie_link_speed[] = { 541 PCI_SPEED_UNKNOWN, /* 0 */ 542 PCIE_SPEED_2_5GT, /* 1 */ 543 PCIE_SPEED_5_0GT, /* 2 */ 544 PCIE_SPEED_8_0GT, /* 3 */ 545 PCI_SPEED_UNKNOWN, /* 4 */ 546 PCI_SPEED_UNKNOWN, /* 5 */ 547 PCI_SPEED_UNKNOWN, /* 6 */ 548 PCI_SPEED_UNKNOWN, /* 7 */ 549 PCI_SPEED_UNKNOWN, /* 8 */ 550 PCI_SPEED_UNKNOWN, /* 9 */ 551 PCI_SPEED_UNKNOWN, /* A */ 552 PCI_SPEED_UNKNOWN, /* B */ 553 PCI_SPEED_UNKNOWN, /* C */ 554 PCI_SPEED_UNKNOWN, /* D */ 555 PCI_SPEED_UNKNOWN, /* E */ 556 PCI_SPEED_UNKNOWN /* F */ 557 }; 558 559 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) 560 { 561 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; 562 } 563 EXPORT_SYMBOL_GPL(pcie_update_link_speed); 564 565 static unsigned char agp_speeds[] = { 566 AGP_UNKNOWN, 567 AGP_1X, 568 AGP_2X, 569 AGP_4X, 570 AGP_8X 571 }; 572 573 static enum pci_bus_speed agp_speed(int agp3, int agpstat) 574 { 575 int index = 0; 576 577 if (agpstat & 4) 578 index = 3; 579 else if (agpstat & 2) 580 index = 2; 581 else if (agpstat & 1) 582 index = 1; 583 else 584 goto out; 585 586 if (agp3) { 587 index += 2; 588 if (index == 5) 589 index = 0; 590 } 591 592 out: 593 return agp_speeds[index]; 594 } 595 596 597 static void pci_set_bus_speed(struct pci_bus *bus) 598 { 599 struct pci_dev *bridge = bus->self; 600 int pos; 601 602 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); 603 if (!pos) 604 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); 605 if (pos) { 606 u32 agpstat, agpcmd; 607 608 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); 609 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); 610 611 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); 612 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); 613 } 614 615 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); 616 if (pos) { 617 u16 status; 618 enum pci_bus_speed max; 619 620 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS, 621 &status); 622 623 if (status & PCI_X_SSTATUS_533MHZ) { 624 max = PCI_SPEED_133MHz_PCIX_533; 625 } else if (status & PCI_X_SSTATUS_266MHZ) { 626 max = PCI_SPEED_133MHz_PCIX_266; 627 } else if (status & PCI_X_SSTATUS_133MHZ) { 628 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) { 629 max = PCI_SPEED_133MHz_PCIX_ECC; 630 } else { 631 max = PCI_SPEED_133MHz_PCIX; 632 } 633 } else { 634 max = PCI_SPEED_66MHz_PCIX; 635 } 636 637 bus->max_bus_speed = max; 638 bus->cur_bus_speed = pcix_bus_speed[ 639 (status & PCI_X_SSTATUS_FREQ) >> 6]; 640 641 return; 642 } 643 644 if (pci_is_pcie(bridge)) { 645 u32 linkcap; 646 u16 linksta; 647 648 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); 649 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; 650 651 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); 652 pcie_update_link_speed(bus, linksta); 653 } 654 } 655 656 657 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, 658 struct pci_dev *bridge, int busnr) 659 { 660 struct pci_bus *child; 661 int i; 662 int ret; 663 664 /* 665 * Allocate a new bus, and inherit stuff from the parent.. 666 */ 667 child = pci_alloc_bus(); 668 if (!child) 669 return NULL; 670 671 child->parent = parent; 672 child->ops = parent->ops; 673 child->msi = parent->msi; 674 child->sysdata = parent->sysdata; 675 child->bus_flags = parent->bus_flags; 676 677 /* initialize some portions of the bus device, but don't register it 678 * now as the parent is not properly set up yet. 679 */ 680 child->dev.class = &pcibus_class; 681 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); 682 683 /* 684 * Set up the primary, secondary and subordinate 685 * bus numbers. 686 */ 687 child->number = child->busn_res.start = busnr; 688 child->primary = parent->busn_res.start; 689 child->busn_res.end = 0xff; 690 691 if (!bridge) { 692 child->dev.parent = parent->bridge; 693 goto add_dev; 694 } 695 696 child->self = bridge; 697 child->bridge = get_device(&bridge->dev); 698 child->dev.parent = child->bridge; 699 pci_set_bus_of_node(child); 700 pci_set_bus_speed(child); 701 702 /* Set up default resource pointers and names.. */ 703 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 704 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 705 child->resource[i]->name = child->name; 706 } 707 bridge->subordinate = child; 708 709 add_dev: 710 ret = device_register(&child->dev); 711 WARN_ON(ret < 0); 712 713 pcibios_add_bus(child); 714 715 /* Create legacy_io and legacy_mem files for this bus */ 716 pci_create_legacy_files(child); 717 718 return child; 719 } 720 721 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) 722 { 723 struct pci_bus *child; 724 725 child = pci_alloc_child_bus(parent, dev, busnr); 726 if (child) { 727 down_write(&pci_bus_sem); 728 list_add_tail(&child->node, &parent->children); 729 up_write(&pci_bus_sem); 730 } 731 return child; 732 } 733 734 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) 735 { 736 struct pci_bus *parent = child->parent; 737 738 /* Attempts to fix that up are really dangerous unless 739 we're going to re-assign all bus numbers. */ 740 if (!pcibios_assign_all_busses()) 741 return; 742 743 while (parent->parent && parent->busn_res.end < max) { 744 parent->busn_res.end = max; 745 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); 746 parent = parent->parent; 747 } 748 } 749 750 /* 751 * If it's a bridge, configure it and scan the bus behind it. 752 * For CardBus bridges, we don't scan behind as the devices will 753 * be handled by the bridge driver itself. 754 * 755 * We need to process bridges in two passes -- first we scan those 756 * already configured by the BIOS and after we are done with all of 757 * them, we proceed to assigning numbers to the remaining buses in 758 * order to avoid overlaps between old and new bus numbers. 759 */ 760 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) 761 { 762 struct pci_bus *child; 763 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 764 u32 buses, i, j = 0; 765 u16 bctl; 766 u8 primary, secondary, subordinate; 767 int broken = 0; 768 769 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 770 primary = buses & 0xFF; 771 secondary = (buses >> 8) & 0xFF; 772 subordinate = (buses >> 16) & 0xFF; 773 774 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", 775 secondary, subordinate, pass); 776 777 if (!primary && (primary != bus->number) && secondary && subordinate) { 778 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n"); 779 primary = bus->number; 780 } 781 782 /* Check if setup is sensible at all */ 783 if (!pass && 784 (primary != bus->number || secondary <= bus->number || 785 secondary > subordinate)) { 786 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", 787 secondary, subordinate); 788 broken = 1; 789 } 790 791 /* Disable MasterAbortMode during probing to avoid reporting 792 of bus errors (in some architectures) */ 793 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 794 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 795 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 796 797 if ((secondary || subordinate) && !pcibios_assign_all_busses() && 798 !is_cardbus && !broken) { 799 unsigned int cmax; 800 /* 801 * Bus already configured by firmware, process it in the first 802 * pass and just note the configuration. 803 */ 804 if (pass) 805 goto out; 806 807 /* 808 * If we already got to this bus through a different bridge, 809 * don't re-add it. This can happen with the i450NX chipset. 810 * 811 * However, we continue to descend down the hierarchy and 812 * scan remaining child buses. 813 */ 814 child = pci_find_bus(pci_domain_nr(bus), secondary); 815 if (!child) { 816 child = pci_add_new_bus(bus, dev, secondary); 817 if (!child) 818 goto out; 819 child->primary = primary; 820 pci_bus_insert_busn_res(child, secondary, subordinate); 821 child->bridge_ctl = bctl; 822 } 823 824 cmax = pci_scan_child_bus(child); 825 if (cmax > max) 826 max = cmax; 827 if (child->busn_res.end > max) 828 max = child->busn_res.end; 829 } else { 830 /* 831 * We need to assign a number to this bus which we always 832 * do in the second pass. 833 */ 834 if (!pass) { 835 if (pcibios_assign_all_busses() || broken) 836 /* Temporarily disable forwarding of the 837 configuration cycles on all bridges in 838 this bus segment to avoid possible 839 conflicts in the second pass between two 840 bridges programmed with overlapping 841 bus ranges. */ 842 pci_write_config_dword(dev, PCI_PRIMARY_BUS, 843 buses & ~0xffffff); 844 goto out; 845 } 846 847 /* Clear errors */ 848 pci_write_config_word(dev, PCI_STATUS, 0xffff); 849 850 /* Prevent assigning a bus number that already exists. 851 * This can happen when a bridge is hot-plugged, so in 852 * this case we only re-scan this bus. */ 853 child = pci_find_bus(pci_domain_nr(bus), max+1); 854 if (!child) { 855 child = pci_add_new_bus(bus, dev, ++max); 856 if (!child) 857 goto out; 858 pci_bus_insert_busn_res(child, max, 0xff); 859 } 860 buses = (buses & 0xff000000) 861 | ((unsigned int)(child->primary) << 0) 862 | ((unsigned int)(child->busn_res.start) << 8) 863 | ((unsigned int)(child->busn_res.end) << 16); 864 865 /* 866 * yenta.c forces a secondary latency timer of 176. 867 * Copy that behaviour here. 868 */ 869 if (is_cardbus) { 870 buses &= ~0xff000000; 871 buses |= CARDBUS_LATENCY_TIMER << 24; 872 } 873 874 /* 875 * We need to blast all three values with a single write. 876 */ 877 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); 878 879 if (!is_cardbus) { 880 child->bridge_ctl = bctl; 881 /* 882 * Adjust subordinate busnr in parent buses. 883 * We do this before scanning for children because 884 * some devices may not be detected if the bios 885 * was lazy. 886 */ 887 pci_fixup_parent_subordinate_busnr(child, max); 888 /* Now we can scan all subordinate buses... */ 889 max = pci_scan_child_bus(child); 890 /* 891 * now fix it up again since we have found 892 * the real value of max. 893 */ 894 pci_fixup_parent_subordinate_busnr(child, max); 895 } else { 896 /* 897 * For CardBus bridges, we leave 4 bus numbers 898 * as cards with a PCI-to-PCI bridge can be 899 * inserted later. 900 */ 901 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) { 902 struct pci_bus *parent = bus; 903 if (pci_find_bus(pci_domain_nr(bus), 904 max+i+1)) 905 break; 906 while (parent->parent) { 907 if ((!pcibios_assign_all_busses()) && 908 (parent->busn_res.end > max) && 909 (parent->busn_res.end <= max+i)) { 910 j = 1; 911 } 912 parent = parent->parent; 913 } 914 if (j) { 915 /* 916 * Often, there are two cardbus bridges 917 * -- try to leave one valid bus number 918 * for each one. 919 */ 920 i /= 2; 921 break; 922 } 923 } 924 max += i; 925 pci_fixup_parent_subordinate_busnr(child, max); 926 } 927 /* 928 * Set the subordinate bus number to its real value. 929 */ 930 pci_bus_update_busn_res_end(child, max); 931 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); 932 } 933 934 sprintf(child->name, 935 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), 936 pci_domain_nr(bus), child->number); 937 938 /* Has only triggered on CardBus, fixup is in yenta_socket */ 939 while (bus->parent) { 940 if ((child->busn_res.end > bus->busn_res.end) || 941 (child->number > bus->busn_res.end) || 942 (child->number < bus->number) || 943 (child->busn_res.end < bus->number)) { 944 dev_info(&child->dev, "%pR %s " 945 "hidden behind%s bridge %s %pR\n", 946 &child->busn_res, 947 (bus->number > child->busn_res.end && 948 bus->busn_res.end < child->number) ? 949 "wholly" : "partially", 950 bus->self->transparent ? " transparent" : "", 951 dev_name(&bus->dev), 952 &bus->busn_res); 953 } 954 bus = bus->parent; 955 } 956 957 out: 958 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); 959 960 return max; 961 } 962 963 /* 964 * Read interrupt line and base address registers. 965 * The architecture-dependent code can tweak these, of course. 966 */ 967 static void pci_read_irq(struct pci_dev *dev) 968 { 969 unsigned char irq; 970 971 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); 972 dev->pin = irq; 973 if (irq) 974 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 975 dev->irq = irq; 976 } 977 978 void set_pcie_port_type(struct pci_dev *pdev) 979 { 980 int pos; 981 u16 reg16; 982 983 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 984 if (!pos) 985 return; 986 pdev->pcie_cap = pos; 987 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); 988 pdev->pcie_flags_reg = reg16; 989 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); 990 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 991 } 992 993 void set_pcie_hotplug_bridge(struct pci_dev *pdev) 994 { 995 u32 reg32; 996 997 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); 998 if (reg32 & PCI_EXP_SLTCAP_HPC) 999 pdev->is_hotplug_bridge = 1; 1000 } 1001 1002 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 1003 1004 /** 1005 * pci_setup_device - fill in class and map information of a device 1006 * @dev: the device structure to fill 1007 * 1008 * Initialize the device structure with information about the device's 1009 * vendor,class,memory and IO-space addresses,IRQ lines etc. 1010 * Called at initialisation of the PCI subsystem and by CardBus services. 1011 * Returns 0 on success and negative if unknown type of device (not normal, 1012 * bridge or CardBus). 1013 */ 1014 int pci_setup_device(struct pci_dev *dev) 1015 { 1016 u32 class; 1017 u8 hdr_type; 1018 struct pci_slot *slot; 1019 int pos = 0; 1020 struct pci_bus_region region; 1021 struct resource *res; 1022 1023 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) 1024 return -EIO; 1025 1026 dev->sysdata = dev->bus->sysdata; 1027 dev->dev.parent = dev->bus->bridge; 1028 dev->dev.bus = &pci_bus_type; 1029 dev->hdr_type = hdr_type & 0x7f; 1030 dev->multifunction = !!(hdr_type & 0x80); 1031 dev->error_state = pci_channel_io_normal; 1032 set_pcie_port_type(dev); 1033 1034 list_for_each_entry(slot, &dev->bus->slots, list) 1035 if (PCI_SLOT(dev->devfn) == slot->number) 1036 dev->slot = slot; 1037 1038 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 1039 set this higher, assuming the system even supports it. */ 1040 dev->dma_mask = 0xffffffff; 1041 1042 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 1043 dev->bus->number, PCI_SLOT(dev->devfn), 1044 PCI_FUNC(dev->devfn)); 1045 1046 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 1047 dev->revision = class & 0xff; 1048 dev->class = class >> 8; /* upper 3 bytes */ 1049 1050 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n", 1051 dev->vendor, dev->device, dev->hdr_type, dev->class); 1052 1053 /* need to have dev->class ready */ 1054 dev->cfg_size = pci_cfg_space_size(dev); 1055 1056 /* "Unknown power state" */ 1057 dev->current_state = PCI_UNKNOWN; 1058 1059 /* Early fixups, before probing the BARs */ 1060 pci_fixup_device(pci_fixup_early, dev); 1061 /* device class may be changed after fixup */ 1062 class = dev->class >> 8; 1063 1064 switch (dev->hdr_type) { /* header type */ 1065 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 1066 if (class == PCI_CLASS_BRIDGE_PCI) 1067 goto bad; 1068 pci_read_irq(dev); 1069 pci_read_bases(dev, 6, PCI_ROM_ADDRESS); 1070 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1071 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); 1072 1073 /* 1074 * Do the ugly legacy mode stuff here rather than broken chip 1075 * quirk code. Legacy mode ATA controllers have fixed 1076 * addresses. These are not always echoed in BAR0-3, and 1077 * BAR0-3 in a few cases contain junk! 1078 */ 1079 if (class == PCI_CLASS_STORAGE_IDE) { 1080 u8 progif; 1081 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 1082 if ((progif & 1) == 0) { 1083 region.start = 0x1F0; 1084 region.end = 0x1F7; 1085 res = &dev->resource[0]; 1086 res->flags = LEGACY_IO_RESOURCE; 1087 pcibios_bus_to_resource(dev, res, ®ion); 1088 region.start = 0x3F6; 1089 region.end = 0x3F6; 1090 res = &dev->resource[1]; 1091 res->flags = LEGACY_IO_RESOURCE; 1092 pcibios_bus_to_resource(dev, res, ®ion); 1093 } 1094 if ((progif & 4) == 0) { 1095 region.start = 0x170; 1096 region.end = 0x177; 1097 res = &dev->resource[2]; 1098 res->flags = LEGACY_IO_RESOURCE; 1099 pcibios_bus_to_resource(dev, res, ®ion); 1100 region.start = 0x376; 1101 region.end = 0x376; 1102 res = &dev->resource[3]; 1103 res->flags = LEGACY_IO_RESOURCE; 1104 pcibios_bus_to_resource(dev, res, ®ion); 1105 } 1106 } 1107 break; 1108 1109 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ 1110 if (class != PCI_CLASS_BRIDGE_PCI) 1111 goto bad; 1112 /* The PCI-to-PCI bridge spec requires that subtractive 1113 decoding (i.e. transparent) bridge must have programming 1114 interface code of 0x01. */ 1115 pci_read_irq(dev); 1116 dev->transparent = ((dev->class & 0xff) == 1); 1117 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 1118 set_pcie_hotplug_bridge(dev); 1119 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); 1120 if (pos) { 1121 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); 1122 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); 1123 } 1124 break; 1125 1126 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ 1127 if (class != PCI_CLASS_BRIDGE_CARDBUS) 1128 goto bad; 1129 pci_read_irq(dev); 1130 pci_read_bases(dev, 1, 0); 1131 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1132 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); 1133 break; 1134 1135 default: /* unknown header */ 1136 dev_err(&dev->dev, "unknown header type %02x, " 1137 "ignoring device\n", dev->hdr_type); 1138 return -EIO; 1139 1140 bad: 1141 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header " 1142 "type %02x)\n", dev->class, dev->hdr_type); 1143 dev->class = PCI_CLASS_NOT_DEFINED; 1144 } 1145 1146 /* We found a fine healthy device, go go go... */ 1147 return 0; 1148 } 1149 1150 static void pci_release_capabilities(struct pci_dev *dev) 1151 { 1152 pci_vpd_release(dev); 1153 pci_iov_release(dev); 1154 pci_free_cap_save_buffers(dev); 1155 } 1156 1157 /** 1158 * pci_release_dev - free a pci device structure when all users of it are finished. 1159 * @dev: device that's been disconnected 1160 * 1161 * Will be called only by the device core when all users of this pci device are 1162 * done. 1163 */ 1164 static void pci_release_dev(struct device *dev) 1165 { 1166 struct pci_dev *pci_dev; 1167 1168 pci_dev = to_pci_dev(dev); 1169 pci_release_capabilities(pci_dev); 1170 pci_release_of_node(pci_dev); 1171 pcibios_release_device(pci_dev); 1172 pci_bus_put(pci_dev->bus); 1173 kfree(pci_dev); 1174 } 1175 1176 /** 1177 * pci_cfg_space_size - get the configuration space size of the PCI device. 1178 * @dev: PCI device 1179 * 1180 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices 1181 * have 4096 bytes. Even if the device is capable, that doesn't mean we can 1182 * access it. Maybe we don't have a way to generate extended config space 1183 * accesses, or the device is behind a reverse Express bridge. So we try 1184 * reading the dword at 0x100 which must either be 0 or a valid extended 1185 * capability header. 1186 */ 1187 int pci_cfg_space_size_ext(struct pci_dev *dev) 1188 { 1189 u32 status; 1190 int pos = PCI_CFG_SPACE_SIZE; 1191 1192 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) 1193 goto fail; 1194 if (status == 0xffffffff) 1195 goto fail; 1196 1197 return PCI_CFG_SPACE_EXP_SIZE; 1198 1199 fail: 1200 return PCI_CFG_SPACE_SIZE; 1201 } 1202 1203 int pci_cfg_space_size(struct pci_dev *dev) 1204 { 1205 int pos; 1206 u32 status; 1207 u16 class; 1208 1209 class = dev->class >> 8; 1210 if (class == PCI_CLASS_BRIDGE_HOST) 1211 return pci_cfg_space_size_ext(dev); 1212 1213 if (!pci_is_pcie(dev)) { 1214 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1215 if (!pos) 1216 goto fail; 1217 1218 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); 1219 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) 1220 goto fail; 1221 } 1222 1223 return pci_cfg_space_size_ext(dev); 1224 1225 fail: 1226 return PCI_CFG_SPACE_SIZE; 1227 } 1228 1229 struct pci_dev *pci_alloc_dev(struct pci_bus *bus) 1230 { 1231 struct pci_dev *dev; 1232 1233 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); 1234 if (!dev) 1235 return NULL; 1236 1237 INIT_LIST_HEAD(&dev->bus_list); 1238 dev->dev.type = &pci_dev_type; 1239 dev->bus = pci_bus_get(bus); 1240 1241 return dev; 1242 } 1243 EXPORT_SYMBOL(pci_alloc_dev); 1244 1245 struct pci_dev *alloc_pci_dev(void) 1246 { 1247 return pci_alloc_dev(NULL); 1248 } 1249 EXPORT_SYMBOL(alloc_pci_dev); 1250 1251 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, 1252 int crs_timeout) 1253 { 1254 int delay = 1; 1255 1256 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 1257 return false; 1258 1259 /* some broken boards return 0 or ~0 if a slot is empty: */ 1260 if (*l == 0xffffffff || *l == 0x00000000 || 1261 *l == 0x0000ffff || *l == 0xffff0000) 1262 return false; 1263 1264 /* Configuration request Retry Status */ 1265 while (*l == 0xffff0001) { 1266 if (!crs_timeout) 1267 return false; 1268 1269 msleep(delay); 1270 delay *= 2; 1271 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 1272 return false; 1273 /* Card hasn't responded in 60 seconds? Must be stuck. */ 1274 if (delay > crs_timeout) { 1275 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " 1276 "responding\n", pci_domain_nr(bus), 1277 bus->number, PCI_SLOT(devfn), 1278 PCI_FUNC(devfn)); 1279 return false; 1280 } 1281 } 1282 1283 return true; 1284 } 1285 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); 1286 1287 /* 1288 * Read the config data for a PCI device, sanity-check it 1289 * and fill in the dev structure... 1290 */ 1291 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 1292 { 1293 struct pci_dev *dev; 1294 u32 l; 1295 1296 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) 1297 return NULL; 1298 1299 dev = pci_alloc_dev(bus); 1300 if (!dev) 1301 return NULL; 1302 1303 dev->devfn = devfn; 1304 dev->vendor = l & 0xffff; 1305 dev->device = (l >> 16) & 0xffff; 1306 1307 pci_set_of_node(dev); 1308 1309 if (pci_setup_device(dev)) { 1310 pci_bus_put(dev->bus); 1311 kfree(dev); 1312 return NULL; 1313 } 1314 1315 return dev; 1316 } 1317 1318 static void pci_init_capabilities(struct pci_dev *dev) 1319 { 1320 /* MSI/MSI-X list */ 1321 pci_msi_init_pci_dev(dev); 1322 1323 /* Buffers for saving PCIe and PCI-X capabilities */ 1324 pci_allocate_cap_save_buffers(dev); 1325 1326 /* Power Management */ 1327 pci_pm_init(dev); 1328 1329 /* Vital Product Data */ 1330 pci_vpd_pci22_init(dev); 1331 1332 /* Alternative Routing-ID Forwarding */ 1333 pci_configure_ari(dev); 1334 1335 /* Single Root I/O Virtualization */ 1336 pci_iov_init(dev); 1337 1338 /* Enable ACS P2P upstream forwarding */ 1339 pci_enable_acs(dev); 1340 } 1341 1342 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 1343 { 1344 int ret; 1345 1346 device_initialize(&dev->dev); 1347 dev->dev.release = pci_release_dev; 1348 1349 set_dev_node(&dev->dev, pcibus_to_node(bus)); 1350 dev->dev.dma_mask = &dev->dma_mask; 1351 dev->dev.dma_parms = &dev->dma_parms; 1352 dev->dev.coherent_dma_mask = 0xffffffffull; 1353 1354 pci_set_dma_max_seg_size(dev, 65536); 1355 pci_set_dma_seg_boundary(dev, 0xffffffff); 1356 1357 /* Fix up broken headers */ 1358 pci_fixup_device(pci_fixup_header, dev); 1359 1360 /* moved out from quirk header fixup code */ 1361 pci_reassigndev_resource_alignment(dev); 1362 1363 /* Clear the state_saved flag. */ 1364 dev->state_saved = false; 1365 1366 /* Initialize various capabilities */ 1367 pci_init_capabilities(dev); 1368 1369 /* 1370 * Add the device to our list of discovered devices 1371 * and the bus list for fixup functions, etc. 1372 */ 1373 down_write(&pci_bus_sem); 1374 list_add_tail(&dev->bus_list, &bus->devices); 1375 up_write(&pci_bus_sem); 1376 1377 ret = pcibios_add_device(dev); 1378 WARN_ON(ret < 0); 1379 1380 /* Notifier could use PCI capabilities */ 1381 dev->match_driver = false; 1382 ret = device_add(&dev->dev); 1383 WARN_ON(ret < 0); 1384 1385 pci_proc_attach_device(dev); 1386 } 1387 1388 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) 1389 { 1390 struct pci_dev *dev; 1391 1392 dev = pci_get_slot(bus, devfn); 1393 if (dev) { 1394 pci_dev_put(dev); 1395 return dev; 1396 } 1397 1398 dev = pci_scan_device(bus, devfn); 1399 if (!dev) 1400 return NULL; 1401 1402 pci_device_add(dev, bus); 1403 1404 return dev; 1405 } 1406 EXPORT_SYMBOL(pci_scan_single_device); 1407 1408 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn) 1409 { 1410 int pos; 1411 u16 cap = 0; 1412 unsigned next_fn; 1413 1414 if (pci_ari_enabled(bus)) { 1415 if (!dev) 1416 return 0; 1417 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1418 if (!pos) 1419 return 0; 1420 1421 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap); 1422 next_fn = PCI_ARI_CAP_NFN(cap); 1423 if (next_fn <= fn) 1424 return 0; /* protect against malformed list */ 1425 1426 return next_fn; 1427 } 1428 1429 /* dev may be NULL for non-contiguous multifunction devices */ 1430 if (!dev || dev->multifunction) 1431 return (fn + 1) % 8; 1432 1433 return 0; 1434 } 1435 1436 static int only_one_child(struct pci_bus *bus) 1437 { 1438 struct pci_dev *parent = bus->self; 1439 1440 if (!parent || !pci_is_pcie(parent)) 1441 return 0; 1442 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT) 1443 return 1; 1444 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM && 1445 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) 1446 return 1; 1447 return 0; 1448 } 1449 1450 /** 1451 * pci_scan_slot - scan a PCI slot on a bus for devices. 1452 * @bus: PCI bus to scan 1453 * @devfn: slot number to scan (must have zero function.) 1454 * 1455 * Scan a PCI slot on the specified PCI bus for devices, adding 1456 * discovered devices to the @bus->devices list. New devices 1457 * will not have is_added set. 1458 * 1459 * Returns the number of new devices found. 1460 */ 1461 int pci_scan_slot(struct pci_bus *bus, int devfn) 1462 { 1463 unsigned fn, nr = 0; 1464 struct pci_dev *dev; 1465 1466 if (only_one_child(bus) && (devfn > 0)) 1467 return 0; /* Already scanned the entire slot */ 1468 1469 dev = pci_scan_single_device(bus, devfn); 1470 if (!dev) 1471 return 0; 1472 if (!dev->is_added) 1473 nr++; 1474 1475 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) { 1476 dev = pci_scan_single_device(bus, devfn + fn); 1477 if (dev) { 1478 if (!dev->is_added) 1479 nr++; 1480 dev->multifunction = 1; 1481 } 1482 } 1483 1484 /* only one slot has pcie device */ 1485 if (bus->self && nr) 1486 pcie_aspm_init_link_state(bus->self); 1487 1488 return nr; 1489 } 1490 1491 static int pcie_find_smpss(struct pci_dev *dev, void *data) 1492 { 1493 u8 *smpss = data; 1494 1495 if (!pci_is_pcie(dev)) 1496 return 0; 1497 1498 /* 1499 * We don't have a way to change MPS settings on devices that have 1500 * drivers attached. A hot-added device might support only the minimum 1501 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge 1502 * where devices may be hot-added, we limit the fabric MPS to 128 so 1503 * hot-added devices will work correctly. 1504 * 1505 * However, if we hot-add a device to a slot directly below a Root 1506 * Port, it's impossible for there to be other existing devices below 1507 * the port. We don't limit the MPS in this case because we can 1508 * reconfigure MPS on both the Root Port and the hot-added device, 1509 * and there are no other devices involved. 1510 * 1511 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA. 1512 */ 1513 if (dev->is_hotplug_bridge && 1514 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) 1515 *smpss = 0; 1516 1517 if (*smpss > dev->pcie_mpss) 1518 *smpss = dev->pcie_mpss; 1519 1520 return 0; 1521 } 1522 1523 static void pcie_write_mps(struct pci_dev *dev, int mps) 1524 { 1525 int rc; 1526 1527 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 1528 mps = 128 << dev->pcie_mpss; 1529 1530 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT && 1531 dev->bus->self) 1532 /* For "Performance", the assumption is made that 1533 * downstream communication will never be larger than 1534 * the MRRS. So, the MPS only needs to be configured 1535 * for the upstream communication. This being the case, 1536 * walk from the top down and set the MPS of the child 1537 * to that of the parent bus. 1538 * 1539 * Configure the device MPS with the smaller of the 1540 * device MPSS or the bridge MPS (which is assumed to be 1541 * properly configured at this point to the largest 1542 * allowable MPS based on its parent bus). 1543 */ 1544 mps = min(mps, pcie_get_mps(dev->bus->self)); 1545 } 1546 1547 rc = pcie_set_mps(dev, mps); 1548 if (rc) 1549 dev_err(&dev->dev, "Failed attempting to set the MPS\n"); 1550 } 1551 1552 static void pcie_write_mrrs(struct pci_dev *dev) 1553 { 1554 int rc, mrrs; 1555 1556 /* In the "safe" case, do not configure the MRRS. There appear to be 1557 * issues with setting MRRS to 0 on a number of devices. 1558 */ 1559 if (pcie_bus_config != PCIE_BUS_PERFORMANCE) 1560 return; 1561 1562 /* For Max performance, the MRRS must be set to the largest supported 1563 * value. However, it cannot be configured larger than the MPS the 1564 * device or the bus can support. This should already be properly 1565 * configured by a prior call to pcie_write_mps. 1566 */ 1567 mrrs = pcie_get_mps(dev); 1568 1569 /* MRRS is a R/W register. Invalid values can be written, but a 1570 * subsequent read will verify if the value is acceptable or not. 1571 * If the MRRS value provided is not acceptable (e.g., too large), 1572 * shrink the value until it is acceptable to the HW. 1573 */ 1574 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { 1575 rc = pcie_set_readrq(dev, mrrs); 1576 if (!rc) 1577 break; 1578 1579 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); 1580 mrrs /= 2; 1581 } 1582 1583 if (mrrs < 128) 1584 dev_err(&dev->dev, "MRRS was unable to be configured with a " 1585 "safe value. If problems are experienced, try running " 1586 "with pci=pcie_bus_safe.\n"); 1587 } 1588 1589 static void pcie_bus_detect_mps(struct pci_dev *dev) 1590 { 1591 struct pci_dev *bridge = dev->bus->self; 1592 int mps, p_mps; 1593 1594 if (!bridge) 1595 return; 1596 1597 mps = pcie_get_mps(dev); 1598 p_mps = pcie_get_mps(bridge); 1599 1600 if (mps != p_mps) 1601 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", 1602 mps, pci_name(bridge), p_mps); 1603 } 1604 1605 static int pcie_bus_configure_set(struct pci_dev *dev, void *data) 1606 { 1607 int mps, orig_mps; 1608 1609 if (!pci_is_pcie(dev)) 1610 return 0; 1611 1612 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) { 1613 pcie_bus_detect_mps(dev); 1614 return 0; 1615 } 1616 1617 mps = 128 << *(u8 *)data; 1618 orig_mps = pcie_get_mps(dev); 1619 1620 pcie_write_mps(dev, mps); 1621 pcie_write_mrrs(dev); 1622 1623 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), " 1624 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, 1625 orig_mps, pcie_get_readrq(dev)); 1626 1627 return 0; 1628 } 1629 1630 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, 1631 * parents then children fashion. If this changes, then this code will not 1632 * work as designed. 1633 */ 1634 void pcie_bus_configure_settings(struct pci_bus *bus) 1635 { 1636 u8 smpss; 1637 1638 if (!bus->self) 1639 return; 1640 1641 if (!pci_is_pcie(bus->self)) 1642 return; 1643 1644 /* FIXME - Peer to peer DMA is possible, though the endpoint would need 1645 * to be aware of the MPS of the destination. To work around this, 1646 * simply force the MPS of the entire system to the smallest possible. 1647 */ 1648 if (pcie_bus_config == PCIE_BUS_PEER2PEER) 1649 smpss = 0; 1650 1651 if (pcie_bus_config == PCIE_BUS_SAFE) { 1652 smpss = bus->self->pcie_mpss; 1653 1654 pcie_find_smpss(bus->self, &smpss); 1655 pci_walk_bus(bus, pcie_find_smpss, &smpss); 1656 } 1657 1658 pcie_bus_configure_set(bus->self, &smpss); 1659 pci_walk_bus(bus, pcie_bus_configure_set, &smpss); 1660 } 1661 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); 1662 1663 unsigned int pci_scan_child_bus(struct pci_bus *bus) 1664 { 1665 unsigned int devfn, pass, max = bus->busn_res.start; 1666 struct pci_dev *dev; 1667 1668 dev_dbg(&bus->dev, "scanning bus\n"); 1669 1670 /* Go find them, Rover! */ 1671 for (devfn = 0; devfn < 0x100; devfn += 8) 1672 pci_scan_slot(bus, devfn); 1673 1674 /* Reserve buses for SR-IOV capability. */ 1675 max += pci_iov_bus_range(bus); 1676 1677 /* 1678 * After performing arch-dependent fixup of the bus, look behind 1679 * all PCI-to-PCI bridges on this bus. 1680 */ 1681 if (!bus->is_added) { 1682 dev_dbg(&bus->dev, "fixups for bus\n"); 1683 pcibios_fixup_bus(bus); 1684 bus->is_added = 1; 1685 } 1686 1687 for (pass=0; pass < 2; pass++) 1688 list_for_each_entry(dev, &bus->devices, bus_list) { 1689 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 1690 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 1691 max = pci_scan_bridge(bus, dev, max, pass); 1692 } 1693 1694 /* 1695 * We've scanned the bus and so we know all about what's on 1696 * the other side of any bridges that may be on this bus plus 1697 * any devices. 1698 * 1699 * Return how far we've got finding sub-buses. 1700 */ 1701 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); 1702 return max; 1703 } 1704 1705 /** 1706 * pcibios_root_bridge_prepare - Platform-specific host bridge setup. 1707 * @bridge: Host bridge to set up. 1708 * 1709 * Default empty implementation. Replace with an architecture-specific setup 1710 * routine, if necessary. 1711 */ 1712 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 1713 { 1714 return 0; 1715 } 1716 1717 void __weak pcibios_add_bus(struct pci_bus *bus) 1718 { 1719 } 1720 1721 void __weak pcibios_remove_bus(struct pci_bus *bus) 1722 { 1723 } 1724 1725 struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 1726 struct pci_ops *ops, void *sysdata, struct list_head *resources) 1727 { 1728 int error; 1729 struct pci_host_bridge *bridge; 1730 struct pci_bus *b, *b2; 1731 struct pci_host_bridge_window *window, *n; 1732 struct resource *res; 1733 resource_size_t offset; 1734 char bus_addr[64]; 1735 char *fmt; 1736 1737 b = pci_alloc_bus(); 1738 if (!b) 1739 return NULL; 1740 1741 b->sysdata = sysdata; 1742 b->ops = ops; 1743 b->number = b->busn_res.start = bus; 1744 b2 = pci_find_bus(pci_domain_nr(b), bus); 1745 if (b2) { 1746 /* If we already got to this bus through a different bridge, ignore it */ 1747 dev_dbg(&b2->dev, "bus already known\n"); 1748 goto err_out; 1749 } 1750 1751 bridge = pci_alloc_host_bridge(b); 1752 if (!bridge) 1753 goto err_out; 1754 1755 bridge->dev.parent = parent; 1756 bridge->dev.release = pci_release_host_bridge_dev; 1757 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus); 1758 error = pcibios_root_bridge_prepare(bridge); 1759 if (error) { 1760 kfree(bridge); 1761 goto err_out; 1762 } 1763 1764 error = device_register(&bridge->dev); 1765 if (error) { 1766 put_device(&bridge->dev); 1767 goto err_out; 1768 } 1769 b->bridge = get_device(&bridge->dev); 1770 device_enable_async_suspend(b->bridge); 1771 pci_set_bus_of_node(b); 1772 1773 if (!parent) 1774 set_dev_node(b->bridge, pcibus_to_node(b)); 1775 1776 b->dev.class = &pcibus_class; 1777 b->dev.parent = b->bridge; 1778 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); 1779 error = device_register(&b->dev); 1780 if (error) 1781 goto class_dev_reg_err; 1782 1783 pcibios_add_bus(b); 1784 1785 /* Create legacy_io and legacy_mem files for this bus */ 1786 pci_create_legacy_files(b); 1787 1788 if (parent) 1789 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1790 else 1791 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1792 1793 /* Add initial resources to the bus */ 1794 list_for_each_entry_safe(window, n, resources, list) { 1795 list_move_tail(&window->list, &bridge->windows); 1796 res = window->res; 1797 offset = window->offset; 1798 if (res->flags & IORESOURCE_BUS) 1799 pci_bus_insert_busn_res(b, bus, res->end); 1800 else 1801 pci_bus_add_resource(b, res, 0); 1802 if (offset) { 1803 if (resource_type(res) == IORESOURCE_IO) 1804 fmt = " (bus address [%#06llx-%#06llx])"; 1805 else 1806 fmt = " (bus address [%#010llx-%#010llx])"; 1807 snprintf(bus_addr, sizeof(bus_addr), fmt, 1808 (unsigned long long) (res->start - offset), 1809 (unsigned long long) (res->end - offset)); 1810 } else 1811 bus_addr[0] = '\0'; 1812 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr); 1813 } 1814 1815 down_write(&pci_bus_sem); 1816 list_add_tail(&b->node, &pci_root_buses); 1817 up_write(&pci_bus_sem); 1818 1819 return b; 1820 1821 class_dev_reg_err: 1822 put_device(&bridge->dev); 1823 device_unregister(&bridge->dev); 1824 err_out: 1825 kfree(b); 1826 return NULL; 1827 } 1828 1829 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) 1830 { 1831 struct resource *res = &b->busn_res; 1832 struct resource *parent_res, *conflict; 1833 1834 res->start = bus; 1835 res->end = bus_max; 1836 res->flags = IORESOURCE_BUS; 1837 1838 if (!pci_is_root_bus(b)) 1839 parent_res = &b->parent->busn_res; 1840 else { 1841 parent_res = get_pci_domain_busn_res(pci_domain_nr(b)); 1842 res->flags |= IORESOURCE_PCI_FIXED; 1843 } 1844 1845 conflict = insert_resource_conflict(parent_res, res); 1846 1847 if (conflict) 1848 dev_printk(KERN_DEBUG, &b->dev, 1849 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", 1850 res, pci_is_root_bus(b) ? "domain " : "", 1851 parent_res, conflict->name, conflict); 1852 1853 return conflict == NULL; 1854 } 1855 1856 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) 1857 { 1858 struct resource *res = &b->busn_res; 1859 struct resource old_res = *res; 1860 resource_size_t size; 1861 int ret; 1862 1863 if (res->start > bus_max) 1864 return -EINVAL; 1865 1866 size = bus_max - res->start + 1; 1867 ret = adjust_resource(res, res->start, size); 1868 dev_printk(KERN_DEBUG, &b->dev, 1869 "busn_res: %pR end %s updated to %02x\n", 1870 &old_res, ret ? "can not be" : "is", bus_max); 1871 1872 if (!ret && !res->parent) 1873 pci_bus_insert_busn_res(b, res->start, res->end); 1874 1875 return ret; 1876 } 1877 1878 void pci_bus_release_busn_res(struct pci_bus *b) 1879 { 1880 struct resource *res = &b->busn_res; 1881 int ret; 1882 1883 if (!res->flags || !res->parent) 1884 return; 1885 1886 ret = release_resource(res); 1887 dev_printk(KERN_DEBUG, &b->dev, 1888 "busn_res: %pR %s released\n", 1889 res, ret ? "can not be" : "is"); 1890 } 1891 1892 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 1893 struct pci_ops *ops, void *sysdata, struct list_head *resources) 1894 { 1895 struct pci_host_bridge_window *window; 1896 bool found = false; 1897 struct pci_bus *b; 1898 int max; 1899 1900 list_for_each_entry(window, resources, list) 1901 if (window->res->flags & IORESOURCE_BUS) { 1902 found = true; 1903 break; 1904 } 1905 1906 b = pci_create_root_bus(parent, bus, ops, sysdata, resources); 1907 if (!b) 1908 return NULL; 1909 1910 if (!found) { 1911 dev_info(&b->dev, 1912 "No busn resource found for root bus, will use [bus %02x-ff]\n", 1913 bus); 1914 pci_bus_insert_busn_res(b, bus, 255); 1915 } 1916 1917 max = pci_scan_child_bus(b); 1918 1919 if (!found) 1920 pci_bus_update_busn_res_end(b, max); 1921 1922 pci_bus_add_devices(b); 1923 return b; 1924 } 1925 EXPORT_SYMBOL(pci_scan_root_bus); 1926 1927 /* Deprecated; use pci_scan_root_bus() instead */ 1928 struct pci_bus *pci_scan_bus_parented(struct device *parent, 1929 int bus, struct pci_ops *ops, void *sysdata) 1930 { 1931 LIST_HEAD(resources); 1932 struct pci_bus *b; 1933 1934 pci_add_resource(&resources, &ioport_resource); 1935 pci_add_resource(&resources, &iomem_resource); 1936 pci_add_resource(&resources, &busn_resource); 1937 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources); 1938 if (b) 1939 pci_scan_child_bus(b); 1940 else 1941 pci_free_resource_list(&resources); 1942 return b; 1943 } 1944 EXPORT_SYMBOL(pci_scan_bus_parented); 1945 1946 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, 1947 void *sysdata) 1948 { 1949 LIST_HEAD(resources); 1950 struct pci_bus *b; 1951 1952 pci_add_resource(&resources, &ioport_resource); 1953 pci_add_resource(&resources, &iomem_resource); 1954 pci_add_resource(&resources, &busn_resource); 1955 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources); 1956 if (b) { 1957 pci_scan_child_bus(b); 1958 pci_bus_add_devices(b); 1959 } else { 1960 pci_free_resource_list(&resources); 1961 } 1962 return b; 1963 } 1964 EXPORT_SYMBOL(pci_scan_bus); 1965 1966 /** 1967 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices. 1968 * @bridge: PCI bridge for the bus to scan 1969 * 1970 * Scan a PCI bus and child buses for new devices, add them, 1971 * and enable them, resizing bridge mmio/io resource if necessary 1972 * and possible. The caller must ensure the child devices are already 1973 * removed for resizing to occur. 1974 * 1975 * Returns the max number of subordinate bus discovered. 1976 */ 1977 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge) 1978 { 1979 unsigned int max; 1980 struct pci_bus *bus = bridge->subordinate; 1981 1982 max = pci_scan_child_bus(bus); 1983 1984 pci_assign_unassigned_bridge_resources(bridge); 1985 1986 pci_bus_add_devices(bus); 1987 1988 return max; 1989 } 1990 1991 /** 1992 * pci_rescan_bus - scan a PCI bus for devices. 1993 * @bus: PCI bus to scan 1994 * 1995 * Scan a PCI bus and child buses for new devices, adds them, 1996 * and enables them. 1997 * 1998 * Returns the max number of subordinate bus discovered. 1999 */ 2000 unsigned int __ref pci_rescan_bus(struct pci_bus *bus) 2001 { 2002 unsigned int max; 2003 2004 max = pci_scan_child_bus(bus); 2005 pci_assign_unassigned_bus_resources(bus); 2006 pci_bus_add_devices(bus); 2007 2008 return max; 2009 } 2010 EXPORT_SYMBOL_GPL(pci_rescan_bus); 2011 2012 EXPORT_SYMBOL(pci_add_new_bus); 2013 EXPORT_SYMBOL(pci_scan_slot); 2014 EXPORT_SYMBOL(pci_scan_bridge); 2015 EXPORT_SYMBOL_GPL(pci_scan_child_bus); 2016 2017 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) 2018 { 2019 const struct pci_dev *a = to_pci_dev(d_a); 2020 const struct pci_dev *b = to_pci_dev(d_b); 2021 2022 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; 2023 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; 2024 2025 if (a->bus->number < b->bus->number) return -1; 2026 else if (a->bus->number > b->bus->number) return 1; 2027 2028 if (a->devfn < b->devfn) return -1; 2029 else if (a->devfn > b->devfn) return 1; 2030 2031 return 0; 2032 } 2033 2034 void __init pci_sort_breadthfirst(void) 2035 { 2036 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); 2037 } 2038