1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * probe.c - PCI detection and setup code 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/delay.h> 8 #include <linux/init.h> 9 #include <linux/pci.h> 10 #include <linux/of_device.h> 11 #include <linux/of_pci.h> 12 #include <linux/pci_hotplug.h> 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include <linux/cpumask.h> 16 #include <linux/pci-aspm.h> 17 #include <linux/aer.h> 18 #include <linux/acpi.h> 19 #include <linux/irqdomain.h> 20 #include <linux/pm_runtime.h> 21 #include "pci.h" 22 23 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 24 #define CARDBUS_RESERVE_BUSNR 3 25 26 static struct resource busn_resource = { 27 .name = "PCI busn", 28 .start = 0, 29 .end = 255, 30 .flags = IORESOURCE_BUS, 31 }; 32 33 /* Ugh. Need to stop exporting this to modules. */ 34 LIST_HEAD(pci_root_buses); 35 EXPORT_SYMBOL(pci_root_buses); 36 37 static LIST_HEAD(pci_domain_busn_res_list); 38 39 struct pci_domain_busn_res { 40 struct list_head list; 41 struct resource res; 42 int domain_nr; 43 }; 44 45 static struct resource *get_pci_domain_busn_res(int domain_nr) 46 { 47 struct pci_domain_busn_res *r; 48 49 list_for_each_entry(r, &pci_domain_busn_res_list, list) 50 if (r->domain_nr == domain_nr) 51 return &r->res; 52 53 r = kzalloc(sizeof(*r), GFP_KERNEL); 54 if (!r) 55 return NULL; 56 57 r->domain_nr = domain_nr; 58 r->res.start = 0; 59 r->res.end = 0xff; 60 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED; 61 62 list_add_tail(&r->list, &pci_domain_busn_res_list); 63 64 return &r->res; 65 } 66 67 static int find_anything(struct device *dev, void *data) 68 { 69 return 1; 70 } 71 72 /* 73 * Some device drivers need know if PCI is initiated. 74 * Basically, we think PCI is not initiated when there 75 * is no device to be found on the pci_bus_type. 76 */ 77 int no_pci_devices(void) 78 { 79 struct device *dev; 80 int no_devices; 81 82 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); 83 no_devices = (dev == NULL); 84 put_device(dev); 85 return no_devices; 86 } 87 EXPORT_SYMBOL(no_pci_devices); 88 89 /* 90 * PCI Bus Class 91 */ 92 static void release_pcibus_dev(struct device *dev) 93 { 94 struct pci_bus *pci_bus = to_pci_bus(dev); 95 96 put_device(pci_bus->bridge); 97 pci_bus_remove_resources(pci_bus); 98 pci_release_bus_of_node(pci_bus); 99 kfree(pci_bus); 100 } 101 102 static struct class pcibus_class = { 103 .name = "pci_bus", 104 .dev_release = &release_pcibus_dev, 105 .dev_groups = pcibus_groups, 106 }; 107 108 static int __init pcibus_class_init(void) 109 { 110 return class_register(&pcibus_class); 111 } 112 postcore_initcall(pcibus_class_init); 113 114 static u64 pci_size(u64 base, u64 maxbase, u64 mask) 115 { 116 u64 size = mask & maxbase; /* Find the significant bits */ 117 if (!size) 118 return 0; 119 120 /* 121 * Get the lowest of them to find the decode size, and from that 122 * the extent. 123 */ 124 size = (size & ~(size-1)) - 1; 125 126 /* 127 * base == maxbase can be valid only if the BAR has already been 128 * programmed with all 1s. 129 */ 130 if (base == maxbase && ((base | size) & mask) != mask) 131 return 0; 132 133 return size; 134 } 135 136 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) 137 { 138 u32 mem_type; 139 unsigned long flags; 140 141 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 142 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; 143 flags |= IORESOURCE_IO; 144 return flags; 145 } 146 147 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; 148 flags |= IORESOURCE_MEM; 149 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) 150 flags |= IORESOURCE_PREFETCH; 151 152 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; 153 switch (mem_type) { 154 case PCI_BASE_ADDRESS_MEM_TYPE_32: 155 break; 156 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 157 /* 1M mem BAR treated as 32-bit BAR */ 158 break; 159 case PCI_BASE_ADDRESS_MEM_TYPE_64: 160 flags |= IORESOURCE_MEM_64; 161 break; 162 default: 163 /* mem unknown type treated as 32-bit BAR */ 164 break; 165 } 166 return flags; 167 } 168 169 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO) 170 171 /** 172 * pci_read_base - Read a PCI BAR 173 * @dev: the PCI device 174 * @type: type of the BAR 175 * @res: resource buffer to be filled in 176 * @pos: BAR position in the config space 177 * 178 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. 179 */ 180 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 181 struct resource *res, unsigned int pos) 182 { 183 u32 l = 0, sz = 0, mask; 184 u64 l64, sz64, mask64; 185 u16 orig_cmd; 186 struct pci_bus_region region, inverted_region; 187 188 mask = type ? PCI_ROM_ADDRESS_MASK : ~0; 189 190 /* No printks while decoding is disabled! */ 191 if (!dev->mmio_always_on) { 192 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); 193 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) { 194 pci_write_config_word(dev, PCI_COMMAND, 195 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE); 196 } 197 } 198 199 res->name = pci_name(dev); 200 201 pci_read_config_dword(dev, pos, &l); 202 pci_write_config_dword(dev, pos, l | mask); 203 pci_read_config_dword(dev, pos, &sz); 204 pci_write_config_dword(dev, pos, l); 205 206 /* 207 * All bits set in sz means the device isn't working properly. 208 * If the BAR isn't implemented, all bits must be 0. If it's a 209 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit 210 * 1 must be clear. 211 */ 212 if (sz == 0xffffffff) 213 sz = 0; 214 215 /* 216 * I don't know how l can have all bits set. Copied from old code. 217 * Maybe it fixes a bug on some ancient platform. 218 */ 219 if (l == 0xffffffff) 220 l = 0; 221 222 if (type == pci_bar_unknown) { 223 res->flags = decode_bar(dev, l); 224 res->flags |= IORESOURCE_SIZEALIGN; 225 if (res->flags & IORESOURCE_IO) { 226 l64 = l & PCI_BASE_ADDRESS_IO_MASK; 227 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK; 228 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT; 229 } else { 230 l64 = l & PCI_BASE_ADDRESS_MEM_MASK; 231 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK; 232 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK; 233 } 234 } else { 235 if (l & PCI_ROM_ADDRESS_ENABLE) 236 res->flags |= IORESOURCE_ROM_ENABLE; 237 l64 = l & PCI_ROM_ADDRESS_MASK; 238 sz64 = sz & PCI_ROM_ADDRESS_MASK; 239 mask64 = PCI_ROM_ADDRESS_MASK; 240 } 241 242 if (res->flags & IORESOURCE_MEM_64) { 243 pci_read_config_dword(dev, pos + 4, &l); 244 pci_write_config_dword(dev, pos + 4, ~0); 245 pci_read_config_dword(dev, pos + 4, &sz); 246 pci_write_config_dword(dev, pos + 4, l); 247 248 l64 |= ((u64)l << 32); 249 sz64 |= ((u64)sz << 32); 250 mask64 |= ((u64)~0 << 32); 251 } 252 253 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE)) 254 pci_write_config_word(dev, PCI_COMMAND, orig_cmd); 255 256 if (!sz64) 257 goto fail; 258 259 sz64 = pci_size(l64, sz64, mask64); 260 if (!sz64) { 261 pci_info(dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n", 262 pos); 263 goto fail; 264 } 265 266 if (res->flags & IORESOURCE_MEM_64) { 267 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8) 268 && sz64 > 0x100000000ULL) { 269 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; 270 res->start = 0; 271 res->end = 0; 272 pci_err(dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n", 273 pos, (unsigned long long)sz64); 274 goto out; 275 } 276 277 if ((sizeof(pci_bus_addr_t) < 8) && l) { 278 /* Above 32-bit boundary; try to reallocate */ 279 res->flags |= IORESOURCE_UNSET; 280 res->start = 0; 281 res->end = sz64; 282 pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n", 283 pos, (unsigned long long)l64); 284 goto out; 285 } 286 } 287 288 region.start = l64; 289 region.end = l64 + sz64; 290 291 pcibios_bus_to_resource(dev->bus, res, ®ion); 292 pcibios_resource_to_bus(dev->bus, &inverted_region, res); 293 294 /* 295 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is 296 * the corresponding resource address (the physical address used by 297 * the CPU. Converting that resource address back to a bus address 298 * should yield the original BAR value: 299 * 300 * resource_to_bus(bus_to_resource(A)) == A 301 * 302 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not 303 * be claimed by the device. 304 */ 305 if (inverted_region.start != region.start) { 306 res->flags |= IORESOURCE_UNSET; 307 res->start = 0; 308 res->end = region.end - region.start; 309 pci_info(dev, "reg 0x%x: initial BAR value %#010llx invalid\n", 310 pos, (unsigned long long)region.start); 311 } 312 313 goto out; 314 315 316 fail: 317 res->flags = 0; 318 out: 319 if (res->flags) 320 pci_printk(KERN_DEBUG, dev, "reg 0x%x: %pR\n", pos, res); 321 322 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; 323 } 324 325 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 326 { 327 unsigned int pos, reg; 328 329 if (dev->non_compliant_bars) 330 return; 331 332 for (pos = 0; pos < howmany; pos++) { 333 struct resource *res = &dev->resource[pos]; 334 reg = PCI_BASE_ADDRESS_0 + (pos << 2); 335 pos += __pci_read_base(dev, pci_bar_unknown, res, reg); 336 } 337 338 if (rom) { 339 struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; 340 dev->rom_base_reg = rom; 341 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | 342 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; 343 __pci_read_base(dev, pci_bar_mem32, res, rom); 344 } 345 } 346 347 static void pci_read_bridge_io(struct pci_bus *child) 348 { 349 struct pci_dev *dev = child->self; 350 u8 io_base_lo, io_limit_lo; 351 unsigned long io_mask, io_granularity, base, limit; 352 struct pci_bus_region region; 353 struct resource *res; 354 355 io_mask = PCI_IO_RANGE_MASK; 356 io_granularity = 0x1000; 357 if (dev->io_window_1k) { 358 /* Support 1K I/O space granularity */ 359 io_mask = PCI_IO_1K_RANGE_MASK; 360 io_granularity = 0x400; 361 } 362 363 res = child->resource[0]; 364 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); 365 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); 366 base = (io_base_lo & io_mask) << 8; 367 limit = (io_limit_lo & io_mask) << 8; 368 369 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { 370 u16 io_base_hi, io_limit_hi; 371 372 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); 373 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); 374 base |= ((unsigned long) io_base_hi << 16); 375 limit |= ((unsigned long) io_limit_hi << 16); 376 } 377 378 if (base <= limit) { 379 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; 380 region.start = base; 381 region.end = limit + io_granularity - 1; 382 pcibios_bus_to_resource(dev->bus, res, ®ion); 383 pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); 384 } 385 } 386 387 static void pci_read_bridge_mmio(struct pci_bus *child) 388 { 389 struct pci_dev *dev = child->self; 390 u16 mem_base_lo, mem_limit_lo; 391 unsigned long base, limit; 392 struct pci_bus_region region; 393 struct resource *res; 394 395 res = child->resource[1]; 396 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); 397 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); 398 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; 399 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; 400 if (base <= limit) { 401 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 402 region.start = base; 403 region.end = limit + 0xfffff; 404 pcibios_bus_to_resource(dev->bus, res, ®ion); 405 pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); 406 } 407 } 408 409 static void pci_read_bridge_mmio_pref(struct pci_bus *child) 410 { 411 struct pci_dev *dev = child->self; 412 u16 mem_base_lo, mem_limit_lo; 413 u64 base64, limit64; 414 pci_bus_addr_t base, limit; 415 struct pci_bus_region region; 416 struct resource *res; 417 418 res = child->resource[2]; 419 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); 420 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); 421 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; 422 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; 423 424 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { 425 u32 mem_base_hi, mem_limit_hi; 426 427 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); 428 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); 429 430 /* 431 * Some bridges set the base > limit by default, and some 432 * (broken) BIOSes do not initialize them. If we find 433 * this, just assume they are not being used. 434 */ 435 if (mem_base_hi <= mem_limit_hi) { 436 base64 |= (u64) mem_base_hi << 32; 437 limit64 |= (u64) mem_limit_hi << 32; 438 } 439 } 440 441 base = (pci_bus_addr_t) base64; 442 limit = (pci_bus_addr_t) limit64; 443 444 if (base != base64) { 445 pci_err(dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", 446 (unsigned long long) base64); 447 return; 448 } 449 450 if (base <= limit) { 451 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | 452 IORESOURCE_MEM | IORESOURCE_PREFETCH; 453 if (res->flags & PCI_PREF_RANGE_TYPE_64) 454 res->flags |= IORESOURCE_MEM_64; 455 region.start = base; 456 region.end = limit + 0xfffff; 457 pcibios_bus_to_resource(dev->bus, res, ®ion); 458 pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); 459 } 460 } 461 462 void pci_read_bridge_bases(struct pci_bus *child) 463 { 464 struct pci_dev *dev = child->self; 465 struct resource *res; 466 int i; 467 468 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ 469 return; 470 471 pci_info(dev, "PCI bridge to %pR%s\n", 472 &child->busn_res, 473 dev->transparent ? " (subtractive decode)" : ""); 474 475 pci_bus_remove_resources(child); 476 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 477 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; 478 479 pci_read_bridge_io(child); 480 pci_read_bridge_mmio(child); 481 pci_read_bridge_mmio_pref(child); 482 483 if (dev->transparent) { 484 pci_bus_for_each_resource(child->parent, res, i) { 485 if (res && res->flags) { 486 pci_bus_add_resource(child, res, 487 PCI_SUBTRACTIVE_DECODE); 488 pci_printk(KERN_DEBUG, dev, 489 " bridge window %pR (subtractive decode)\n", 490 res); 491 } 492 } 493 } 494 } 495 496 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent) 497 { 498 struct pci_bus *b; 499 500 b = kzalloc(sizeof(*b), GFP_KERNEL); 501 if (!b) 502 return NULL; 503 504 INIT_LIST_HEAD(&b->node); 505 INIT_LIST_HEAD(&b->children); 506 INIT_LIST_HEAD(&b->devices); 507 INIT_LIST_HEAD(&b->slots); 508 INIT_LIST_HEAD(&b->resources); 509 b->max_bus_speed = PCI_SPEED_UNKNOWN; 510 b->cur_bus_speed = PCI_SPEED_UNKNOWN; 511 #ifdef CONFIG_PCI_DOMAINS_GENERIC 512 if (parent) 513 b->domain_nr = parent->domain_nr; 514 #endif 515 return b; 516 } 517 518 static void devm_pci_release_host_bridge_dev(struct device *dev) 519 { 520 struct pci_host_bridge *bridge = to_pci_host_bridge(dev); 521 522 if (bridge->release_fn) 523 bridge->release_fn(bridge); 524 } 525 526 static void pci_release_host_bridge_dev(struct device *dev) 527 { 528 devm_pci_release_host_bridge_dev(dev); 529 pci_free_host_bridge(to_pci_host_bridge(dev)); 530 } 531 532 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) 533 { 534 struct pci_host_bridge *bridge; 535 536 bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); 537 if (!bridge) 538 return NULL; 539 540 INIT_LIST_HEAD(&bridge->windows); 541 bridge->dev.release = pci_release_host_bridge_dev; 542 543 return bridge; 544 } 545 EXPORT_SYMBOL(pci_alloc_host_bridge); 546 547 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, 548 size_t priv) 549 { 550 struct pci_host_bridge *bridge; 551 552 bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL); 553 if (!bridge) 554 return NULL; 555 556 INIT_LIST_HEAD(&bridge->windows); 557 bridge->dev.release = devm_pci_release_host_bridge_dev; 558 559 return bridge; 560 } 561 EXPORT_SYMBOL(devm_pci_alloc_host_bridge); 562 563 void pci_free_host_bridge(struct pci_host_bridge *bridge) 564 { 565 pci_free_resource_list(&bridge->windows); 566 567 kfree(bridge); 568 } 569 EXPORT_SYMBOL(pci_free_host_bridge); 570 571 static const unsigned char pcix_bus_speed[] = { 572 PCI_SPEED_UNKNOWN, /* 0 */ 573 PCI_SPEED_66MHz_PCIX, /* 1 */ 574 PCI_SPEED_100MHz_PCIX, /* 2 */ 575 PCI_SPEED_133MHz_PCIX, /* 3 */ 576 PCI_SPEED_UNKNOWN, /* 4 */ 577 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ 578 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ 579 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ 580 PCI_SPEED_UNKNOWN, /* 8 */ 581 PCI_SPEED_66MHz_PCIX_266, /* 9 */ 582 PCI_SPEED_100MHz_PCIX_266, /* A */ 583 PCI_SPEED_133MHz_PCIX_266, /* B */ 584 PCI_SPEED_UNKNOWN, /* C */ 585 PCI_SPEED_66MHz_PCIX_533, /* D */ 586 PCI_SPEED_100MHz_PCIX_533, /* E */ 587 PCI_SPEED_133MHz_PCIX_533 /* F */ 588 }; 589 590 const unsigned char pcie_link_speed[] = { 591 PCI_SPEED_UNKNOWN, /* 0 */ 592 PCIE_SPEED_2_5GT, /* 1 */ 593 PCIE_SPEED_5_0GT, /* 2 */ 594 PCIE_SPEED_8_0GT, /* 3 */ 595 PCI_SPEED_UNKNOWN, /* 4 */ 596 PCI_SPEED_UNKNOWN, /* 5 */ 597 PCI_SPEED_UNKNOWN, /* 6 */ 598 PCI_SPEED_UNKNOWN, /* 7 */ 599 PCI_SPEED_UNKNOWN, /* 8 */ 600 PCI_SPEED_UNKNOWN, /* 9 */ 601 PCI_SPEED_UNKNOWN, /* A */ 602 PCI_SPEED_UNKNOWN, /* B */ 603 PCI_SPEED_UNKNOWN, /* C */ 604 PCI_SPEED_UNKNOWN, /* D */ 605 PCI_SPEED_UNKNOWN, /* E */ 606 PCI_SPEED_UNKNOWN /* F */ 607 }; 608 609 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) 610 { 611 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; 612 } 613 EXPORT_SYMBOL_GPL(pcie_update_link_speed); 614 615 static unsigned char agp_speeds[] = { 616 AGP_UNKNOWN, 617 AGP_1X, 618 AGP_2X, 619 AGP_4X, 620 AGP_8X 621 }; 622 623 static enum pci_bus_speed agp_speed(int agp3, int agpstat) 624 { 625 int index = 0; 626 627 if (agpstat & 4) 628 index = 3; 629 else if (agpstat & 2) 630 index = 2; 631 else if (agpstat & 1) 632 index = 1; 633 else 634 goto out; 635 636 if (agp3) { 637 index += 2; 638 if (index == 5) 639 index = 0; 640 } 641 642 out: 643 return agp_speeds[index]; 644 } 645 646 static void pci_set_bus_speed(struct pci_bus *bus) 647 { 648 struct pci_dev *bridge = bus->self; 649 int pos; 650 651 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); 652 if (!pos) 653 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); 654 if (pos) { 655 u32 agpstat, agpcmd; 656 657 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); 658 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); 659 660 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); 661 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); 662 } 663 664 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); 665 if (pos) { 666 u16 status; 667 enum pci_bus_speed max; 668 669 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS, 670 &status); 671 672 if (status & PCI_X_SSTATUS_533MHZ) { 673 max = PCI_SPEED_133MHz_PCIX_533; 674 } else if (status & PCI_X_SSTATUS_266MHZ) { 675 max = PCI_SPEED_133MHz_PCIX_266; 676 } else if (status & PCI_X_SSTATUS_133MHZ) { 677 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) 678 max = PCI_SPEED_133MHz_PCIX_ECC; 679 else 680 max = PCI_SPEED_133MHz_PCIX; 681 } else { 682 max = PCI_SPEED_66MHz_PCIX; 683 } 684 685 bus->max_bus_speed = max; 686 bus->cur_bus_speed = pcix_bus_speed[ 687 (status & PCI_X_SSTATUS_FREQ) >> 6]; 688 689 return; 690 } 691 692 if (pci_is_pcie(bridge)) { 693 u32 linkcap; 694 u16 linksta; 695 696 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); 697 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; 698 699 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); 700 pcie_update_link_speed(bus, linksta); 701 } 702 } 703 704 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus) 705 { 706 struct irq_domain *d; 707 708 /* 709 * Any firmware interface that can resolve the msi_domain 710 * should be called from here. 711 */ 712 d = pci_host_bridge_of_msi_domain(bus); 713 if (!d) 714 d = pci_host_bridge_acpi_msi_domain(bus); 715 716 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN 717 /* 718 * If no IRQ domain was found via the OF tree, try looking it up 719 * directly through the fwnode_handle. 720 */ 721 if (!d) { 722 struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus); 723 724 if (fwnode) 725 d = irq_find_matching_fwnode(fwnode, 726 DOMAIN_BUS_PCI_MSI); 727 } 728 #endif 729 730 return d; 731 } 732 733 static void pci_set_bus_msi_domain(struct pci_bus *bus) 734 { 735 struct irq_domain *d; 736 struct pci_bus *b; 737 738 /* 739 * The bus can be a root bus, a subordinate bus, or a virtual bus 740 * created by an SR-IOV device. Walk up to the first bridge device 741 * found or derive the domain from the host bridge. 742 */ 743 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) { 744 if (b->self) 745 d = dev_get_msi_domain(&b->self->dev); 746 } 747 748 if (!d) 749 d = pci_host_bridge_msi_domain(b); 750 751 dev_set_msi_domain(&bus->dev, d); 752 } 753 754 static int pci_register_host_bridge(struct pci_host_bridge *bridge) 755 { 756 struct device *parent = bridge->dev.parent; 757 struct resource_entry *window, *n; 758 struct pci_bus *bus, *b; 759 resource_size_t offset; 760 LIST_HEAD(resources); 761 struct resource *res; 762 char addr[64], *fmt; 763 const char *name; 764 int err; 765 766 bus = pci_alloc_bus(NULL); 767 if (!bus) 768 return -ENOMEM; 769 770 bridge->bus = bus; 771 772 /* Temporarily move resources off the list */ 773 list_splice_init(&bridge->windows, &resources); 774 bus->sysdata = bridge->sysdata; 775 bus->msi = bridge->msi; 776 bus->ops = bridge->ops; 777 bus->number = bus->busn_res.start = bridge->busnr; 778 #ifdef CONFIG_PCI_DOMAINS_GENERIC 779 bus->domain_nr = pci_bus_find_domain_nr(bus, parent); 780 #endif 781 782 b = pci_find_bus(pci_domain_nr(bus), bridge->busnr); 783 if (b) { 784 /* Ignore it if we already got here via a different bridge */ 785 dev_dbg(&b->dev, "bus already known\n"); 786 err = -EEXIST; 787 goto free; 788 } 789 790 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus), 791 bridge->busnr); 792 793 err = pcibios_root_bridge_prepare(bridge); 794 if (err) 795 goto free; 796 797 err = device_register(&bridge->dev); 798 if (err) 799 put_device(&bridge->dev); 800 801 bus->bridge = get_device(&bridge->dev); 802 device_enable_async_suspend(bus->bridge); 803 pci_set_bus_of_node(bus); 804 pci_set_bus_msi_domain(bus); 805 806 if (!parent) 807 set_dev_node(bus->bridge, pcibus_to_node(bus)); 808 809 bus->dev.class = &pcibus_class; 810 bus->dev.parent = bus->bridge; 811 812 dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number); 813 name = dev_name(&bus->dev); 814 815 err = device_register(&bus->dev); 816 if (err) 817 goto unregister; 818 819 pcibios_add_bus(bus); 820 821 /* Create legacy_io and legacy_mem files for this bus */ 822 pci_create_legacy_files(bus); 823 824 if (parent) 825 dev_info(parent, "PCI host bridge to bus %s\n", name); 826 else 827 pr_info("PCI host bridge to bus %s\n", name); 828 829 /* Add initial resources to the bus */ 830 resource_list_for_each_entry_safe(window, n, &resources) { 831 list_move_tail(&window->node, &bridge->windows); 832 offset = window->offset; 833 res = window->res; 834 835 if (res->flags & IORESOURCE_BUS) 836 pci_bus_insert_busn_res(bus, bus->number, res->end); 837 else 838 pci_bus_add_resource(bus, res, 0); 839 840 if (offset) { 841 if (resource_type(res) == IORESOURCE_IO) 842 fmt = " (bus address [%#06llx-%#06llx])"; 843 else 844 fmt = " (bus address [%#010llx-%#010llx])"; 845 846 snprintf(addr, sizeof(addr), fmt, 847 (unsigned long long)(res->start - offset), 848 (unsigned long long)(res->end - offset)); 849 } else 850 addr[0] = '\0'; 851 852 dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr); 853 } 854 855 down_write(&pci_bus_sem); 856 list_add_tail(&bus->node, &pci_root_buses); 857 up_write(&pci_bus_sem); 858 859 return 0; 860 861 unregister: 862 put_device(&bridge->dev); 863 device_unregister(&bridge->dev); 864 865 free: 866 kfree(bus); 867 return err; 868 } 869 870 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, 871 struct pci_dev *bridge, int busnr) 872 { 873 struct pci_bus *child; 874 int i; 875 int ret; 876 877 /* Allocate a new bus and inherit stuff from the parent */ 878 child = pci_alloc_bus(parent); 879 if (!child) 880 return NULL; 881 882 child->parent = parent; 883 child->ops = parent->ops; 884 child->msi = parent->msi; 885 child->sysdata = parent->sysdata; 886 child->bus_flags = parent->bus_flags; 887 888 /* 889 * Initialize some portions of the bus device, but don't register 890 * it now as the parent is not properly set up yet. 891 */ 892 child->dev.class = &pcibus_class; 893 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); 894 895 /* Set up the primary, secondary and subordinate bus numbers */ 896 child->number = child->busn_res.start = busnr; 897 child->primary = parent->busn_res.start; 898 child->busn_res.end = 0xff; 899 900 if (!bridge) { 901 child->dev.parent = parent->bridge; 902 goto add_dev; 903 } 904 905 child->self = bridge; 906 child->bridge = get_device(&bridge->dev); 907 child->dev.parent = child->bridge; 908 pci_set_bus_of_node(child); 909 pci_set_bus_speed(child); 910 911 /* Set up default resource pointers and names */ 912 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 913 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 914 child->resource[i]->name = child->name; 915 } 916 bridge->subordinate = child; 917 918 add_dev: 919 pci_set_bus_msi_domain(child); 920 ret = device_register(&child->dev); 921 WARN_ON(ret < 0); 922 923 pcibios_add_bus(child); 924 925 if (child->ops->add_bus) { 926 ret = child->ops->add_bus(child); 927 if (WARN_ON(ret < 0)) 928 dev_err(&child->dev, "failed to add bus: %d\n", ret); 929 } 930 931 /* Create legacy_io and legacy_mem files for this bus */ 932 pci_create_legacy_files(child); 933 934 return child; 935 } 936 937 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 938 int busnr) 939 { 940 struct pci_bus *child; 941 942 child = pci_alloc_child_bus(parent, dev, busnr); 943 if (child) { 944 down_write(&pci_bus_sem); 945 list_add_tail(&child->node, &parent->children); 946 up_write(&pci_bus_sem); 947 } 948 return child; 949 } 950 EXPORT_SYMBOL(pci_add_new_bus); 951 952 static void pci_enable_crs(struct pci_dev *pdev) 953 { 954 u16 root_cap = 0; 955 956 /* Enable CRS Software Visibility if supported */ 957 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap); 958 if (root_cap & PCI_EXP_RTCAP_CRSVIS) 959 pcie_capability_set_word(pdev, PCI_EXP_RTCTL, 960 PCI_EXP_RTCTL_CRSSVE); 961 } 962 963 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, 964 unsigned int available_buses); 965 966 /* 967 * pci_scan_bridge_extend() - Scan buses behind a bridge 968 * @bus: Parent bus the bridge is on 969 * @dev: Bridge itself 970 * @max: Starting subordinate number of buses behind this bridge 971 * @available_buses: Total number of buses available for this bridge and 972 * the devices below. After the minimal bus space has 973 * been allocated the remaining buses will be 974 * distributed equally between hotplug-capable bridges. 975 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges 976 * that need to be reconfigured. 977 * 978 * If it's a bridge, configure it and scan the bus behind it. 979 * For CardBus bridges, we don't scan behind as the devices will 980 * be handled by the bridge driver itself. 981 * 982 * We need to process bridges in two passes -- first we scan those 983 * already configured by the BIOS and after we are done with all of 984 * them, we proceed to assigning numbers to the remaining buses in 985 * order to avoid overlaps between old and new bus numbers. 986 */ 987 static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, 988 int max, unsigned int available_buses, 989 int pass) 990 { 991 struct pci_bus *child; 992 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 993 u32 buses, i, j = 0; 994 u16 bctl; 995 u8 primary, secondary, subordinate; 996 int broken = 0; 997 998 /* 999 * Make sure the bridge is powered on to be able to access config 1000 * space of devices below it. 1001 */ 1002 pm_runtime_get_sync(&dev->dev); 1003 1004 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 1005 primary = buses & 0xFF; 1006 secondary = (buses >> 8) & 0xFF; 1007 subordinate = (buses >> 16) & 0xFF; 1008 1009 pci_dbg(dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", 1010 secondary, subordinate, pass); 1011 1012 if (!primary && (primary != bus->number) && secondary && subordinate) { 1013 pci_warn(dev, "Primary bus is hard wired to 0\n"); 1014 primary = bus->number; 1015 } 1016 1017 /* Check if setup is sensible at all */ 1018 if (!pass && 1019 (primary != bus->number || secondary <= bus->number || 1020 secondary > subordinate)) { 1021 pci_info(dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", 1022 secondary, subordinate); 1023 broken = 1; 1024 } 1025 1026 /* 1027 * Disable Master-Abort Mode during probing to avoid reporting of 1028 * bus errors in some architectures. 1029 */ 1030 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 1031 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 1032 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 1033 1034 pci_enable_crs(dev); 1035 1036 if ((secondary || subordinate) && !pcibios_assign_all_busses() && 1037 !is_cardbus && !broken) { 1038 unsigned int cmax; 1039 1040 /* 1041 * Bus already configured by firmware, process it in the 1042 * first pass and just note the configuration. 1043 */ 1044 if (pass) 1045 goto out; 1046 1047 /* 1048 * The bus might already exist for two reasons: Either we 1049 * are rescanning the bus or the bus is reachable through 1050 * more than one bridge. The second case can happen with 1051 * the i450NX chipset. 1052 */ 1053 child = pci_find_bus(pci_domain_nr(bus), secondary); 1054 if (!child) { 1055 child = pci_add_new_bus(bus, dev, secondary); 1056 if (!child) 1057 goto out; 1058 child->primary = primary; 1059 pci_bus_insert_busn_res(child, secondary, subordinate); 1060 child->bridge_ctl = bctl; 1061 } 1062 1063 cmax = pci_scan_child_bus(child); 1064 if (cmax > subordinate) 1065 pci_warn(dev, "bridge has subordinate %02x but max busn %02x\n", 1066 subordinate, cmax); 1067 1068 /* Subordinate should equal child->busn_res.end */ 1069 if (subordinate > max) 1070 max = subordinate; 1071 } else { 1072 1073 /* 1074 * We need to assign a number to this bus which we always 1075 * do in the second pass. 1076 */ 1077 if (!pass) { 1078 if (pcibios_assign_all_busses() || broken || is_cardbus) 1079 1080 /* 1081 * Temporarily disable forwarding of the 1082 * configuration cycles on all bridges in 1083 * this bus segment to avoid possible 1084 * conflicts in the second pass between two 1085 * bridges programmed with overlapping bus 1086 * ranges. 1087 */ 1088 pci_write_config_dword(dev, PCI_PRIMARY_BUS, 1089 buses & ~0xffffff); 1090 goto out; 1091 } 1092 1093 /* Clear errors */ 1094 pci_write_config_word(dev, PCI_STATUS, 0xffff); 1095 1096 /* 1097 * Prevent assigning a bus number that already exists. 1098 * This can happen when a bridge is hot-plugged, so in this 1099 * case we only re-scan this bus. 1100 */ 1101 child = pci_find_bus(pci_domain_nr(bus), max+1); 1102 if (!child) { 1103 child = pci_add_new_bus(bus, dev, max+1); 1104 if (!child) 1105 goto out; 1106 pci_bus_insert_busn_res(child, max+1, 1107 bus->busn_res.end); 1108 } 1109 max++; 1110 if (available_buses) 1111 available_buses--; 1112 1113 buses = (buses & 0xff000000) 1114 | ((unsigned int)(child->primary) << 0) 1115 | ((unsigned int)(child->busn_res.start) << 8) 1116 | ((unsigned int)(child->busn_res.end) << 16); 1117 1118 /* 1119 * yenta.c forces a secondary latency timer of 176. 1120 * Copy that behaviour here. 1121 */ 1122 if (is_cardbus) { 1123 buses &= ~0xff000000; 1124 buses |= CARDBUS_LATENCY_TIMER << 24; 1125 } 1126 1127 /* We need to blast all three values with a single write */ 1128 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); 1129 1130 if (!is_cardbus) { 1131 child->bridge_ctl = bctl; 1132 max = pci_scan_child_bus_extend(child, available_buses); 1133 } else { 1134 1135 /* 1136 * For CardBus bridges, we leave 4 bus numbers as 1137 * cards with a PCI-to-PCI bridge can be inserted 1138 * later. 1139 */ 1140 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) { 1141 struct pci_bus *parent = bus; 1142 if (pci_find_bus(pci_domain_nr(bus), 1143 max+i+1)) 1144 break; 1145 while (parent->parent) { 1146 if ((!pcibios_assign_all_busses()) && 1147 (parent->busn_res.end > max) && 1148 (parent->busn_res.end <= max+i)) { 1149 j = 1; 1150 } 1151 parent = parent->parent; 1152 } 1153 if (j) { 1154 1155 /* 1156 * Often, there are two CardBus 1157 * bridges -- try to leave one 1158 * valid bus number for each one. 1159 */ 1160 i /= 2; 1161 break; 1162 } 1163 } 1164 max += i; 1165 } 1166 1167 /* Set subordinate bus number to its real value */ 1168 pci_bus_update_busn_res_end(child, max); 1169 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); 1170 } 1171 1172 sprintf(child->name, 1173 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), 1174 pci_domain_nr(bus), child->number); 1175 1176 /* Has only triggered on CardBus, fixup is in yenta_socket */ 1177 while (bus->parent) { 1178 if ((child->busn_res.end > bus->busn_res.end) || 1179 (child->number > bus->busn_res.end) || 1180 (child->number < bus->number) || 1181 (child->busn_res.end < bus->number)) { 1182 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n", 1183 &child->busn_res, 1184 (bus->number > child->busn_res.end && 1185 bus->busn_res.end < child->number) ? 1186 "wholly" : "partially", 1187 bus->self->transparent ? " transparent" : "", 1188 dev_name(&bus->dev), 1189 &bus->busn_res); 1190 } 1191 bus = bus->parent; 1192 } 1193 1194 out: 1195 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); 1196 1197 pm_runtime_put(&dev->dev); 1198 1199 return max; 1200 } 1201 1202 /* 1203 * pci_scan_bridge() - Scan buses behind a bridge 1204 * @bus: Parent bus the bridge is on 1205 * @dev: Bridge itself 1206 * @max: Starting subordinate number of buses behind this bridge 1207 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges 1208 * that need to be reconfigured. 1209 * 1210 * If it's a bridge, configure it and scan the bus behind it. 1211 * For CardBus bridges, we don't scan behind as the devices will 1212 * be handled by the bridge driver itself. 1213 * 1214 * We need to process bridges in two passes -- first we scan those 1215 * already configured by the BIOS and after we are done with all of 1216 * them, we proceed to assigning numbers to the remaining buses in 1217 * order to avoid overlaps between old and new bus numbers. 1218 */ 1219 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) 1220 { 1221 return pci_scan_bridge_extend(bus, dev, max, 0, pass); 1222 } 1223 EXPORT_SYMBOL(pci_scan_bridge); 1224 1225 /* 1226 * Read interrupt line and base address registers. 1227 * The architecture-dependent code can tweak these, of course. 1228 */ 1229 static void pci_read_irq(struct pci_dev *dev) 1230 { 1231 unsigned char irq; 1232 1233 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); 1234 dev->pin = irq; 1235 if (irq) 1236 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 1237 dev->irq = irq; 1238 } 1239 1240 void set_pcie_port_type(struct pci_dev *pdev) 1241 { 1242 int pos; 1243 u16 reg16; 1244 int type; 1245 struct pci_dev *parent; 1246 1247 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1248 if (!pos) 1249 return; 1250 1251 pdev->pcie_cap = pos; 1252 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); 1253 pdev->pcie_flags_reg = reg16; 1254 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); 1255 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 1256 1257 /* 1258 * A Root Port or a PCI-to-PCIe bridge is always the upstream end 1259 * of a Link. No PCIe component has two Links. Two Links are 1260 * connected by a Switch that has a Port on each Link and internal 1261 * logic to connect the two Ports. 1262 */ 1263 type = pci_pcie_type(pdev); 1264 if (type == PCI_EXP_TYPE_ROOT_PORT || 1265 type == PCI_EXP_TYPE_PCIE_BRIDGE) 1266 pdev->has_secondary_link = 1; 1267 else if (type == PCI_EXP_TYPE_UPSTREAM || 1268 type == PCI_EXP_TYPE_DOWNSTREAM) { 1269 parent = pci_upstream_bridge(pdev); 1270 1271 /* 1272 * Usually there's an upstream device (Root Port or Switch 1273 * Downstream Port), but we can't assume one exists. 1274 */ 1275 if (parent && !parent->has_secondary_link) 1276 pdev->has_secondary_link = 1; 1277 } 1278 } 1279 1280 void set_pcie_hotplug_bridge(struct pci_dev *pdev) 1281 { 1282 u32 reg32; 1283 1284 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); 1285 if (reg32 & PCI_EXP_SLTCAP_HPC) 1286 pdev->is_hotplug_bridge = 1; 1287 } 1288 1289 static void set_pcie_thunderbolt(struct pci_dev *dev) 1290 { 1291 int vsec = 0; 1292 u32 header; 1293 1294 while ((vsec = pci_find_next_ext_capability(dev, vsec, 1295 PCI_EXT_CAP_ID_VNDR))) { 1296 pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header); 1297 1298 /* Is the device part of a Thunderbolt controller? */ 1299 if (dev->vendor == PCI_VENDOR_ID_INTEL && 1300 PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) { 1301 dev->is_thunderbolt = 1; 1302 return; 1303 } 1304 } 1305 } 1306 1307 /** 1308 * pci_ext_cfg_is_aliased - Is ext config space just an alias of std config? 1309 * @dev: PCI device 1310 * 1311 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that 1312 * when forwarding a type1 configuration request the bridge must check that 1313 * the extended register address field is zero. The bridge is not permitted 1314 * to forward the transactions and must handle it as an Unsupported Request. 1315 * Some bridges do not follow this rule and simply drop the extended register 1316 * bits, resulting in the standard config space being aliased, every 256 1317 * bytes across the entire configuration space. Test for this condition by 1318 * comparing the first dword of each potential alias to the vendor/device ID. 1319 * Known offenders: 1320 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03) 1321 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40) 1322 */ 1323 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev) 1324 { 1325 #ifdef CONFIG_PCI_QUIRKS 1326 int pos; 1327 u32 header, tmp; 1328 1329 pci_read_config_dword(dev, PCI_VENDOR_ID, &header); 1330 1331 for (pos = PCI_CFG_SPACE_SIZE; 1332 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) { 1333 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL 1334 || header != tmp) 1335 return false; 1336 } 1337 1338 return true; 1339 #else 1340 return false; 1341 #endif 1342 } 1343 1344 /** 1345 * pci_cfg_space_size - Get the configuration space size of the PCI device 1346 * @dev: PCI device 1347 * 1348 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices 1349 * have 4096 bytes. Even if the device is capable, that doesn't mean we can 1350 * access it. Maybe we don't have a way to generate extended config space 1351 * accesses, or the device is behind a reverse Express bridge. So we try 1352 * reading the dword at 0x100 which must either be 0 or a valid extended 1353 * capability header. 1354 */ 1355 static int pci_cfg_space_size_ext(struct pci_dev *dev) 1356 { 1357 u32 status; 1358 int pos = PCI_CFG_SPACE_SIZE; 1359 1360 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) 1361 return PCI_CFG_SPACE_SIZE; 1362 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev)) 1363 return PCI_CFG_SPACE_SIZE; 1364 1365 return PCI_CFG_SPACE_EXP_SIZE; 1366 } 1367 1368 int pci_cfg_space_size(struct pci_dev *dev) 1369 { 1370 int pos; 1371 u32 status; 1372 u16 class; 1373 1374 class = dev->class >> 8; 1375 if (class == PCI_CLASS_BRIDGE_HOST) 1376 return pci_cfg_space_size_ext(dev); 1377 1378 if (pci_is_pcie(dev)) 1379 return pci_cfg_space_size_ext(dev); 1380 1381 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1382 if (!pos) 1383 return PCI_CFG_SPACE_SIZE; 1384 1385 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); 1386 if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)) 1387 return pci_cfg_space_size_ext(dev); 1388 1389 return PCI_CFG_SPACE_SIZE; 1390 } 1391 1392 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 1393 1394 static void pci_msi_setup_pci_dev(struct pci_dev *dev) 1395 { 1396 /* 1397 * Disable the MSI hardware to avoid screaming interrupts 1398 * during boot. This is the power on reset default so 1399 * usually this should be a noop. 1400 */ 1401 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); 1402 if (dev->msi_cap) 1403 pci_msi_set_enable(dev, 0); 1404 1405 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); 1406 if (dev->msix_cap) 1407 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 1408 } 1409 1410 /** 1411 * pci_intx_mask_broken - Test PCI_COMMAND_INTX_DISABLE writability 1412 * @dev: PCI device 1413 * 1414 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this 1415 * at enumeration-time to avoid modifying PCI_COMMAND at run-time. 1416 */ 1417 static int pci_intx_mask_broken(struct pci_dev *dev) 1418 { 1419 u16 orig, toggle, new; 1420 1421 pci_read_config_word(dev, PCI_COMMAND, &orig); 1422 toggle = orig ^ PCI_COMMAND_INTX_DISABLE; 1423 pci_write_config_word(dev, PCI_COMMAND, toggle); 1424 pci_read_config_word(dev, PCI_COMMAND, &new); 1425 1426 pci_write_config_word(dev, PCI_COMMAND, orig); 1427 1428 /* 1429 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI 1430 * r2.3, so strictly speaking, a device is not *broken* if it's not 1431 * writable. But we'll live with the misnomer for now. 1432 */ 1433 if (new != toggle) 1434 return 1; 1435 return 0; 1436 } 1437 1438 /** 1439 * pci_setup_device - Fill in class and map information of a device 1440 * @dev: the device structure to fill 1441 * 1442 * Initialize the device structure with information about the device's 1443 * vendor,class,memory and IO-space addresses, IRQ lines etc. 1444 * Called at initialisation of the PCI subsystem and by CardBus services. 1445 * Returns 0 on success and negative if unknown type of device (not normal, 1446 * bridge or CardBus). 1447 */ 1448 int pci_setup_device(struct pci_dev *dev) 1449 { 1450 u32 class; 1451 u16 cmd; 1452 u8 hdr_type; 1453 int pos = 0; 1454 struct pci_bus_region region; 1455 struct resource *res; 1456 1457 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) 1458 return -EIO; 1459 1460 dev->sysdata = dev->bus->sysdata; 1461 dev->dev.parent = dev->bus->bridge; 1462 dev->dev.bus = &pci_bus_type; 1463 dev->hdr_type = hdr_type & 0x7f; 1464 dev->multifunction = !!(hdr_type & 0x80); 1465 dev->error_state = pci_channel_io_normal; 1466 set_pcie_port_type(dev); 1467 1468 pci_dev_assign_slot(dev); 1469 1470 /* 1471 * Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 1472 * set this higher, assuming the system even supports it. 1473 */ 1474 dev->dma_mask = 0xffffffff; 1475 1476 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 1477 dev->bus->number, PCI_SLOT(dev->devfn), 1478 PCI_FUNC(dev->devfn)); 1479 1480 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 1481 dev->revision = class & 0xff; 1482 dev->class = class >> 8; /* upper 3 bytes */ 1483 1484 pci_printk(KERN_DEBUG, dev, "[%04x:%04x] type %02x class %#08x\n", 1485 dev->vendor, dev->device, dev->hdr_type, dev->class); 1486 1487 /* Need to have dev->class ready */ 1488 dev->cfg_size = pci_cfg_space_size(dev); 1489 1490 /* Need to have dev->cfg_size ready */ 1491 set_pcie_thunderbolt(dev); 1492 1493 /* "Unknown power state" */ 1494 dev->current_state = PCI_UNKNOWN; 1495 1496 /* Early fixups, before probing the BARs */ 1497 pci_fixup_device(pci_fixup_early, dev); 1498 1499 /* Device class may be changed after fixup */ 1500 class = dev->class >> 8; 1501 1502 if (dev->non_compliant_bars) { 1503 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1504 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { 1505 pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); 1506 cmd &= ~PCI_COMMAND_IO; 1507 cmd &= ~PCI_COMMAND_MEMORY; 1508 pci_write_config_word(dev, PCI_COMMAND, cmd); 1509 } 1510 } 1511 1512 dev->broken_intx_masking = pci_intx_mask_broken(dev); 1513 1514 switch (dev->hdr_type) { /* header type */ 1515 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 1516 if (class == PCI_CLASS_BRIDGE_PCI) 1517 goto bad; 1518 pci_read_irq(dev); 1519 pci_read_bases(dev, 6, PCI_ROM_ADDRESS); 1520 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1521 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); 1522 1523 /* 1524 * Do the ugly legacy mode stuff here rather than broken chip 1525 * quirk code. Legacy mode ATA controllers have fixed 1526 * addresses. These are not always echoed in BAR0-3, and 1527 * BAR0-3 in a few cases contain junk! 1528 */ 1529 if (class == PCI_CLASS_STORAGE_IDE) { 1530 u8 progif; 1531 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 1532 if ((progif & 1) == 0) { 1533 region.start = 0x1F0; 1534 region.end = 0x1F7; 1535 res = &dev->resource[0]; 1536 res->flags = LEGACY_IO_RESOURCE; 1537 pcibios_bus_to_resource(dev->bus, res, ®ion); 1538 pci_info(dev, "legacy IDE quirk: reg 0x10: %pR\n", 1539 res); 1540 region.start = 0x3F6; 1541 region.end = 0x3F6; 1542 res = &dev->resource[1]; 1543 res->flags = LEGACY_IO_RESOURCE; 1544 pcibios_bus_to_resource(dev->bus, res, ®ion); 1545 pci_info(dev, "legacy IDE quirk: reg 0x14: %pR\n", 1546 res); 1547 } 1548 if ((progif & 4) == 0) { 1549 region.start = 0x170; 1550 region.end = 0x177; 1551 res = &dev->resource[2]; 1552 res->flags = LEGACY_IO_RESOURCE; 1553 pcibios_bus_to_resource(dev->bus, res, ®ion); 1554 pci_info(dev, "legacy IDE quirk: reg 0x18: %pR\n", 1555 res); 1556 region.start = 0x376; 1557 region.end = 0x376; 1558 res = &dev->resource[3]; 1559 res->flags = LEGACY_IO_RESOURCE; 1560 pcibios_bus_to_resource(dev->bus, res, ®ion); 1561 pci_info(dev, "legacy IDE quirk: reg 0x1c: %pR\n", 1562 res); 1563 } 1564 } 1565 break; 1566 1567 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ 1568 if (class != PCI_CLASS_BRIDGE_PCI) 1569 goto bad; 1570 1571 /* 1572 * The PCI-to-PCI bridge spec requires that subtractive 1573 * decoding (i.e. transparent) bridge must have programming 1574 * interface code of 0x01. 1575 */ 1576 pci_read_irq(dev); 1577 dev->transparent = ((dev->class & 0xff) == 1); 1578 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 1579 set_pcie_hotplug_bridge(dev); 1580 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); 1581 if (pos) { 1582 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); 1583 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); 1584 } 1585 break; 1586 1587 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ 1588 if (class != PCI_CLASS_BRIDGE_CARDBUS) 1589 goto bad; 1590 pci_read_irq(dev); 1591 pci_read_bases(dev, 1, 0); 1592 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1593 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); 1594 break; 1595 1596 default: /* unknown header */ 1597 pci_err(dev, "unknown header type %02x, ignoring device\n", 1598 dev->hdr_type); 1599 return -EIO; 1600 1601 bad: 1602 pci_err(dev, "ignoring class %#08x (doesn't match header type %02x)\n", 1603 dev->class, dev->hdr_type); 1604 dev->class = PCI_CLASS_NOT_DEFINED << 8; 1605 } 1606 1607 /* We found a fine healthy device, go go go... */ 1608 return 0; 1609 } 1610 1611 static void pci_configure_mps(struct pci_dev *dev) 1612 { 1613 struct pci_dev *bridge = pci_upstream_bridge(dev); 1614 int mps, p_mps, rc; 1615 1616 if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) 1617 return; 1618 1619 mps = pcie_get_mps(dev); 1620 p_mps = pcie_get_mps(bridge); 1621 1622 if (mps == p_mps) 1623 return; 1624 1625 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) { 1626 pci_warn(dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", 1627 mps, pci_name(bridge), p_mps); 1628 return; 1629 } 1630 1631 /* 1632 * Fancier MPS configuration is done later by 1633 * pcie_bus_configure_settings() 1634 */ 1635 if (pcie_bus_config != PCIE_BUS_DEFAULT) 1636 return; 1637 1638 rc = pcie_set_mps(dev, p_mps); 1639 if (rc) { 1640 pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", 1641 p_mps); 1642 return; 1643 } 1644 1645 pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n", 1646 p_mps, mps, 128 << dev->pcie_mpss); 1647 } 1648 1649 static struct hpp_type0 pci_default_type0 = { 1650 .revision = 1, 1651 .cache_line_size = 8, 1652 .latency_timer = 0x40, 1653 .enable_serr = 0, 1654 .enable_perr = 0, 1655 }; 1656 1657 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) 1658 { 1659 u16 pci_cmd, pci_bctl; 1660 1661 if (!hpp) 1662 hpp = &pci_default_type0; 1663 1664 if (hpp->revision > 1) { 1665 pci_warn(dev, "PCI settings rev %d not supported; using defaults\n", 1666 hpp->revision); 1667 hpp = &pci_default_type0; 1668 } 1669 1670 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size); 1671 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer); 1672 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); 1673 if (hpp->enable_serr) 1674 pci_cmd |= PCI_COMMAND_SERR; 1675 if (hpp->enable_perr) 1676 pci_cmd |= PCI_COMMAND_PARITY; 1677 pci_write_config_word(dev, PCI_COMMAND, pci_cmd); 1678 1679 /* Program bridge control value */ 1680 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 1681 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 1682 hpp->latency_timer); 1683 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); 1684 if (hpp->enable_serr) 1685 pci_bctl |= PCI_BRIDGE_CTL_SERR; 1686 if (hpp->enable_perr) 1687 pci_bctl |= PCI_BRIDGE_CTL_PARITY; 1688 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); 1689 } 1690 } 1691 1692 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) 1693 { 1694 int pos; 1695 1696 if (!hpp) 1697 return; 1698 1699 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1700 if (!pos) 1701 return; 1702 1703 pci_warn(dev, "PCI-X settings not supported\n"); 1704 } 1705 1706 static bool pcie_root_rcb_set(struct pci_dev *dev) 1707 { 1708 struct pci_dev *rp = pcie_find_root_port(dev); 1709 u16 lnkctl; 1710 1711 if (!rp) 1712 return false; 1713 1714 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); 1715 if (lnkctl & PCI_EXP_LNKCTL_RCB) 1716 return true; 1717 1718 return false; 1719 } 1720 1721 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) 1722 { 1723 int pos; 1724 u32 reg32; 1725 1726 if (!hpp) 1727 return; 1728 1729 if (!pci_is_pcie(dev)) 1730 return; 1731 1732 if (hpp->revision > 1) { 1733 pci_warn(dev, "PCIe settings rev %d not supported\n", 1734 hpp->revision); 1735 return; 1736 } 1737 1738 /* 1739 * Don't allow _HPX to change MPS or MRRS settings. We manage 1740 * those to make sure they're consistent with the rest of the 1741 * platform. 1742 */ 1743 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | 1744 PCI_EXP_DEVCTL_READRQ; 1745 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | 1746 PCI_EXP_DEVCTL_READRQ); 1747 1748 /* Initialize Device Control Register */ 1749 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 1750 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); 1751 1752 /* Initialize Link Control Register */ 1753 if (pcie_cap_has_lnkctl(dev)) { 1754 1755 /* 1756 * If the Root Port supports Read Completion Boundary of 1757 * 128, set RCB to 128. Otherwise, clear it. 1758 */ 1759 hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; 1760 hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; 1761 if (pcie_root_rcb_set(dev)) 1762 hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; 1763 1764 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, 1765 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); 1766 } 1767 1768 /* Find Advanced Error Reporting Enhanced Capability */ 1769 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 1770 if (!pos) 1771 return; 1772 1773 /* Initialize Uncorrectable Error Mask Register */ 1774 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); 1775 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or; 1776 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); 1777 1778 /* Initialize Uncorrectable Error Severity Register */ 1779 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); 1780 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or; 1781 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); 1782 1783 /* Initialize Correctable Error Mask Register */ 1784 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); 1785 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or; 1786 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); 1787 1788 /* Initialize Advanced Error Capabilities and Control Register */ 1789 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); 1790 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; 1791 1792 /* Don't enable ECRC generation or checking if unsupported */ 1793 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) 1794 reg32 &= ~PCI_ERR_CAP_ECRC_GENE; 1795 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) 1796 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; 1797 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); 1798 1799 /* 1800 * FIXME: The following two registers are not supported yet. 1801 * 1802 * o Secondary Uncorrectable Error Severity Register 1803 * o Secondary Uncorrectable Error Mask Register 1804 */ 1805 } 1806 1807 int pci_configure_extended_tags(struct pci_dev *dev, void *ign) 1808 { 1809 struct pci_host_bridge *host; 1810 u32 cap; 1811 u16 ctl; 1812 int ret; 1813 1814 if (!pci_is_pcie(dev)) 1815 return 0; 1816 1817 ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); 1818 if (ret) 1819 return 0; 1820 1821 if (!(cap & PCI_EXP_DEVCAP_EXT_TAG)) 1822 return 0; 1823 1824 ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 1825 if (ret) 1826 return 0; 1827 1828 host = pci_find_host_bridge(dev->bus); 1829 if (!host) 1830 return 0; 1831 1832 /* 1833 * If some device in the hierarchy doesn't handle Extended Tags 1834 * correctly, make sure they're disabled. 1835 */ 1836 if (host->no_ext_tags) { 1837 if (ctl & PCI_EXP_DEVCTL_EXT_TAG) { 1838 pci_info(dev, "disabling Extended Tags\n"); 1839 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, 1840 PCI_EXP_DEVCTL_EXT_TAG); 1841 } 1842 return 0; 1843 } 1844 1845 if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) { 1846 pci_info(dev, "enabling Extended Tags\n"); 1847 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, 1848 PCI_EXP_DEVCTL_EXT_TAG); 1849 } 1850 return 0; 1851 } 1852 1853 /** 1854 * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable 1855 * @dev: PCI device to query 1856 * 1857 * Returns true if the device has enabled relaxed ordering attribute. 1858 */ 1859 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev) 1860 { 1861 u16 v; 1862 1863 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v); 1864 1865 return !!(v & PCI_EXP_DEVCTL_RELAX_EN); 1866 } 1867 EXPORT_SYMBOL(pcie_relaxed_ordering_enabled); 1868 1869 static void pci_configure_relaxed_ordering(struct pci_dev *dev) 1870 { 1871 struct pci_dev *root; 1872 1873 /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */ 1874 if (dev->is_virtfn) 1875 return; 1876 1877 if (!pcie_relaxed_ordering_enabled(dev)) 1878 return; 1879 1880 /* 1881 * For now, we only deal with Relaxed Ordering issues with Root 1882 * Ports. Peer-to-Peer DMA is another can of worms. 1883 */ 1884 root = pci_find_pcie_root_port(dev); 1885 if (!root) 1886 return; 1887 1888 if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) { 1889 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, 1890 PCI_EXP_DEVCTL_RELAX_EN); 1891 pci_info(dev, "Relaxed Ordering disabled because the Root Port didn't support it\n"); 1892 } 1893 } 1894 1895 static void pci_configure_ltr(struct pci_dev *dev) 1896 { 1897 #ifdef CONFIG_PCIEASPM 1898 u32 cap; 1899 struct pci_dev *bridge; 1900 1901 if (!pci_is_pcie(dev)) 1902 return; 1903 1904 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); 1905 if (!(cap & PCI_EXP_DEVCAP2_LTR)) 1906 return; 1907 1908 /* 1909 * Software must not enable LTR in an Endpoint unless the Root 1910 * Complex and all intermediate Switches indicate support for LTR. 1911 * PCIe r3.1, sec 6.18. 1912 */ 1913 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 1914 dev->ltr_path = 1; 1915 else { 1916 bridge = pci_upstream_bridge(dev); 1917 if (bridge && bridge->ltr_path) 1918 dev->ltr_path = 1; 1919 } 1920 1921 if (dev->ltr_path) 1922 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, 1923 PCI_EXP_DEVCTL2_LTR_EN); 1924 #endif 1925 } 1926 1927 static void pci_configure_device(struct pci_dev *dev) 1928 { 1929 struct hotplug_params hpp; 1930 int ret; 1931 1932 pci_configure_mps(dev); 1933 pci_configure_extended_tags(dev, NULL); 1934 pci_configure_relaxed_ordering(dev); 1935 pci_configure_ltr(dev); 1936 1937 memset(&hpp, 0, sizeof(hpp)); 1938 ret = pci_get_hp_params(dev, &hpp); 1939 if (ret) 1940 return; 1941 1942 program_hpp_type2(dev, hpp.t2); 1943 program_hpp_type1(dev, hpp.t1); 1944 program_hpp_type0(dev, hpp.t0); 1945 } 1946 1947 static void pci_release_capabilities(struct pci_dev *dev) 1948 { 1949 pci_vpd_release(dev); 1950 pci_iov_release(dev); 1951 pci_free_cap_save_buffers(dev); 1952 } 1953 1954 /** 1955 * pci_release_dev - Free a PCI device structure when all users of it are 1956 * finished 1957 * @dev: device that's been disconnected 1958 * 1959 * Will be called only by the device core when all users of this PCI device are 1960 * done. 1961 */ 1962 static void pci_release_dev(struct device *dev) 1963 { 1964 struct pci_dev *pci_dev; 1965 1966 pci_dev = to_pci_dev(dev); 1967 pci_release_capabilities(pci_dev); 1968 pci_release_of_node(pci_dev); 1969 pcibios_release_device(pci_dev); 1970 pci_bus_put(pci_dev->bus); 1971 kfree(pci_dev->driver_override); 1972 kfree(pci_dev->dma_alias_mask); 1973 kfree(pci_dev); 1974 } 1975 1976 struct pci_dev *pci_alloc_dev(struct pci_bus *bus) 1977 { 1978 struct pci_dev *dev; 1979 1980 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); 1981 if (!dev) 1982 return NULL; 1983 1984 INIT_LIST_HEAD(&dev->bus_list); 1985 dev->dev.type = &pci_dev_type; 1986 dev->bus = pci_bus_get(bus); 1987 1988 return dev; 1989 } 1990 EXPORT_SYMBOL(pci_alloc_dev); 1991 1992 static bool pci_bus_crs_vendor_id(u32 l) 1993 { 1994 return (l & 0xffff) == 0x0001; 1995 } 1996 1997 static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l, 1998 int timeout) 1999 { 2000 int delay = 1; 2001 2002 if (!pci_bus_crs_vendor_id(*l)) 2003 return true; /* not a CRS completion */ 2004 2005 if (!timeout) 2006 return false; /* CRS, but caller doesn't want to wait */ 2007 2008 /* 2009 * We got the reserved Vendor ID that indicates a completion with 2010 * Configuration Request Retry Status (CRS). Retry until we get a 2011 * valid Vendor ID or we time out. 2012 */ 2013 while (pci_bus_crs_vendor_id(*l)) { 2014 if (delay > timeout) { 2015 pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n", 2016 pci_domain_nr(bus), bus->number, 2017 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); 2018 2019 return false; 2020 } 2021 if (delay >= 1000) 2022 pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n", 2023 pci_domain_nr(bus), bus->number, 2024 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); 2025 2026 msleep(delay); 2027 delay *= 2; 2028 2029 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 2030 return false; 2031 } 2032 2033 if (delay >= 1000) 2034 pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n", 2035 pci_domain_nr(bus), bus->number, 2036 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); 2037 2038 return true; 2039 } 2040 2041 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, 2042 int timeout) 2043 { 2044 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 2045 return false; 2046 2047 /* Some broken boards return 0 or ~0 if a slot is empty: */ 2048 if (*l == 0xffffffff || *l == 0x00000000 || 2049 *l == 0x0000ffff || *l == 0xffff0000) 2050 return false; 2051 2052 if (pci_bus_crs_vendor_id(*l)) 2053 return pci_bus_wait_crs(bus, devfn, l, timeout); 2054 2055 return true; 2056 } 2057 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); 2058 2059 /* 2060 * Read the config data for a PCI device, sanity-check it, 2061 * and fill in the dev structure. 2062 */ 2063 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 2064 { 2065 struct pci_dev *dev; 2066 u32 l; 2067 2068 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) 2069 return NULL; 2070 2071 dev = pci_alloc_dev(bus); 2072 if (!dev) 2073 return NULL; 2074 2075 dev->devfn = devfn; 2076 dev->vendor = l & 0xffff; 2077 dev->device = (l >> 16) & 0xffff; 2078 2079 pci_set_of_node(dev); 2080 2081 if (pci_setup_device(dev)) { 2082 pci_bus_put(dev->bus); 2083 kfree(dev); 2084 return NULL; 2085 } 2086 2087 return dev; 2088 } 2089 2090 static void pci_init_capabilities(struct pci_dev *dev) 2091 { 2092 /* Enhanced Allocation */ 2093 pci_ea_init(dev); 2094 2095 /* Setup MSI caps & disable MSI/MSI-X interrupts */ 2096 pci_msi_setup_pci_dev(dev); 2097 2098 /* Buffers for saving PCIe and PCI-X capabilities */ 2099 pci_allocate_cap_save_buffers(dev); 2100 2101 /* Power Management */ 2102 pci_pm_init(dev); 2103 2104 /* Vital Product Data */ 2105 pci_vpd_init(dev); 2106 2107 /* Alternative Routing-ID Forwarding */ 2108 pci_configure_ari(dev); 2109 2110 /* Single Root I/O Virtualization */ 2111 pci_iov_init(dev); 2112 2113 /* Address Translation Services */ 2114 pci_ats_init(dev); 2115 2116 /* Enable ACS P2P upstream forwarding */ 2117 pci_enable_acs(dev); 2118 2119 /* Precision Time Measurement */ 2120 pci_ptm_init(dev); 2121 2122 /* Advanced Error Reporting */ 2123 pci_aer_init(dev); 2124 } 2125 2126 /* 2127 * This is the equivalent of pci_host_bridge_msi_domain() that acts on 2128 * devices. Firmware interfaces that can select the MSI domain on a 2129 * per-device basis should be called from here. 2130 */ 2131 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev) 2132 { 2133 struct irq_domain *d; 2134 2135 /* 2136 * If a domain has been set through the pcibios_add_device() 2137 * callback, then this is the one (platform code knows best). 2138 */ 2139 d = dev_get_msi_domain(&dev->dev); 2140 if (d) 2141 return d; 2142 2143 /* 2144 * Let's see if we have a firmware interface able to provide 2145 * the domain. 2146 */ 2147 d = pci_msi_get_device_domain(dev); 2148 if (d) 2149 return d; 2150 2151 return NULL; 2152 } 2153 2154 static void pci_set_msi_domain(struct pci_dev *dev) 2155 { 2156 struct irq_domain *d; 2157 2158 /* 2159 * If the platform or firmware interfaces cannot supply a 2160 * device-specific MSI domain, then inherit the default domain 2161 * from the host bridge itself. 2162 */ 2163 d = pci_dev_msi_domain(dev); 2164 if (!d) 2165 d = dev_get_msi_domain(&dev->bus->dev); 2166 2167 dev_set_msi_domain(&dev->dev, d); 2168 } 2169 2170 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 2171 { 2172 int ret; 2173 2174 pci_configure_device(dev); 2175 2176 device_initialize(&dev->dev); 2177 dev->dev.release = pci_release_dev; 2178 2179 set_dev_node(&dev->dev, pcibus_to_node(bus)); 2180 dev->dev.dma_mask = &dev->dma_mask; 2181 dev->dev.dma_parms = &dev->dma_parms; 2182 dev->dev.coherent_dma_mask = 0xffffffffull; 2183 2184 pci_set_dma_max_seg_size(dev, 65536); 2185 pci_set_dma_seg_boundary(dev, 0xffffffff); 2186 2187 /* Fix up broken headers */ 2188 pci_fixup_device(pci_fixup_header, dev); 2189 2190 /* Moved out from quirk header fixup code */ 2191 pci_reassigndev_resource_alignment(dev); 2192 2193 /* Clear the state_saved flag */ 2194 dev->state_saved = false; 2195 2196 /* Initialize various capabilities */ 2197 pci_init_capabilities(dev); 2198 2199 /* 2200 * Add the device to our list of discovered devices 2201 * and the bus list for fixup functions, etc. 2202 */ 2203 down_write(&pci_bus_sem); 2204 list_add_tail(&dev->bus_list, &bus->devices); 2205 up_write(&pci_bus_sem); 2206 2207 ret = pcibios_add_device(dev); 2208 WARN_ON(ret < 0); 2209 2210 /* Set up MSI IRQ domain */ 2211 pci_set_msi_domain(dev); 2212 2213 /* Notifier could use PCI capabilities */ 2214 dev->match_driver = false; 2215 ret = device_add(&dev->dev); 2216 WARN_ON(ret < 0); 2217 } 2218 2219 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) 2220 { 2221 struct pci_dev *dev; 2222 2223 dev = pci_get_slot(bus, devfn); 2224 if (dev) { 2225 pci_dev_put(dev); 2226 return dev; 2227 } 2228 2229 dev = pci_scan_device(bus, devfn); 2230 if (!dev) 2231 return NULL; 2232 2233 pci_device_add(dev, bus); 2234 2235 return dev; 2236 } 2237 EXPORT_SYMBOL(pci_scan_single_device); 2238 2239 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn) 2240 { 2241 int pos; 2242 u16 cap = 0; 2243 unsigned next_fn; 2244 2245 if (pci_ari_enabled(bus)) { 2246 if (!dev) 2247 return 0; 2248 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 2249 if (!pos) 2250 return 0; 2251 2252 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap); 2253 next_fn = PCI_ARI_CAP_NFN(cap); 2254 if (next_fn <= fn) 2255 return 0; /* protect against malformed list */ 2256 2257 return next_fn; 2258 } 2259 2260 /* dev may be NULL for non-contiguous multifunction devices */ 2261 if (!dev || dev->multifunction) 2262 return (fn + 1) % 8; 2263 2264 return 0; 2265 } 2266 2267 static int only_one_child(struct pci_bus *bus) 2268 { 2269 struct pci_dev *bridge = bus->self; 2270 2271 /* 2272 * Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so 2273 * we scan for all possible devices, not just Device 0. 2274 */ 2275 if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) 2276 return 0; 2277 2278 /* 2279 * A PCIe Downstream Port normally leads to a Link with only Device 2280 * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan 2281 * only for Device 0 in that situation. 2282 * 2283 * Checking has_secondary_link is a hack to identify Downstream 2284 * Ports because sometimes Switches are configured such that the 2285 * PCIe Port Type labels are backwards. 2286 */ 2287 if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link) 2288 return 1; 2289 2290 return 0; 2291 } 2292 2293 /** 2294 * pci_scan_slot - Scan a PCI slot on a bus for devices 2295 * @bus: PCI bus to scan 2296 * @devfn: slot number to scan (must have zero function) 2297 * 2298 * Scan a PCI slot on the specified PCI bus for devices, adding 2299 * discovered devices to the @bus->devices list. New devices 2300 * will not have is_added set. 2301 * 2302 * Returns the number of new devices found. 2303 */ 2304 int pci_scan_slot(struct pci_bus *bus, int devfn) 2305 { 2306 unsigned fn, nr = 0; 2307 struct pci_dev *dev; 2308 2309 if (only_one_child(bus) && (devfn > 0)) 2310 return 0; /* Already scanned the entire slot */ 2311 2312 dev = pci_scan_single_device(bus, devfn); 2313 if (!dev) 2314 return 0; 2315 if (!dev->is_added) 2316 nr++; 2317 2318 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) { 2319 dev = pci_scan_single_device(bus, devfn + fn); 2320 if (dev) { 2321 if (!dev->is_added) 2322 nr++; 2323 dev->multifunction = 1; 2324 } 2325 } 2326 2327 /* Only one slot has PCIe device */ 2328 if (bus->self && nr) 2329 pcie_aspm_init_link_state(bus->self); 2330 2331 return nr; 2332 } 2333 EXPORT_SYMBOL(pci_scan_slot); 2334 2335 static int pcie_find_smpss(struct pci_dev *dev, void *data) 2336 { 2337 u8 *smpss = data; 2338 2339 if (!pci_is_pcie(dev)) 2340 return 0; 2341 2342 /* 2343 * We don't have a way to change MPS settings on devices that have 2344 * drivers attached. A hot-added device might support only the minimum 2345 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge 2346 * where devices may be hot-added, we limit the fabric MPS to 128 so 2347 * hot-added devices will work correctly. 2348 * 2349 * However, if we hot-add a device to a slot directly below a Root 2350 * Port, it's impossible for there to be other existing devices below 2351 * the port. We don't limit the MPS in this case because we can 2352 * reconfigure MPS on both the Root Port and the hot-added device, 2353 * and there are no other devices involved. 2354 * 2355 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA. 2356 */ 2357 if (dev->is_hotplug_bridge && 2358 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) 2359 *smpss = 0; 2360 2361 if (*smpss > dev->pcie_mpss) 2362 *smpss = dev->pcie_mpss; 2363 2364 return 0; 2365 } 2366 2367 static void pcie_write_mps(struct pci_dev *dev, int mps) 2368 { 2369 int rc; 2370 2371 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 2372 mps = 128 << dev->pcie_mpss; 2373 2374 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT && 2375 dev->bus->self) 2376 2377 /* 2378 * For "Performance", the assumption is made that 2379 * downstream communication will never be larger than 2380 * the MRRS. So, the MPS only needs to be configured 2381 * for the upstream communication. This being the case, 2382 * walk from the top down and set the MPS of the child 2383 * to that of the parent bus. 2384 * 2385 * Configure the device MPS with the smaller of the 2386 * device MPSS or the bridge MPS (which is assumed to be 2387 * properly configured at this point to the largest 2388 * allowable MPS based on its parent bus). 2389 */ 2390 mps = min(mps, pcie_get_mps(dev->bus->self)); 2391 } 2392 2393 rc = pcie_set_mps(dev, mps); 2394 if (rc) 2395 pci_err(dev, "Failed attempting to set the MPS\n"); 2396 } 2397 2398 static void pcie_write_mrrs(struct pci_dev *dev) 2399 { 2400 int rc, mrrs; 2401 2402 /* 2403 * In the "safe" case, do not configure the MRRS. There appear to be 2404 * issues with setting MRRS to 0 on a number of devices. 2405 */ 2406 if (pcie_bus_config != PCIE_BUS_PERFORMANCE) 2407 return; 2408 2409 /* 2410 * For max performance, the MRRS must be set to the largest supported 2411 * value. However, it cannot be configured larger than the MPS the 2412 * device or the bus can support. This should already be properly 2413 * configured by a prior call to pcie_write_mps(). 2414 */ 2415 mrrs = pcie_get_mps(dev); 2416 2417 /* 2418 * MRRS is a R/W register. Invalid values can be written, but a 2419 * subsequent read will verify if the value is acceptable or not. 2420 * If the MRRS value provided is not acceptable (e.g., too large), 2421 * shrink the value until it is acceptable to the HW. 2422 */ 2423 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { 2424 rc = pcie_set_readrq(dev, mrrs); 2425 if (!rc) 2426 break; 2427 2428 pci_warn(dev, "Failed attempting to set the MRRS\n"); 2429 mrrs /= 2; 2430 } 2431 2432 if (mrrs < 128) 2433 pci_err(dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n"); 2434 } 2435 2436 static int pcie_bus_configure_set(struct pci_dev *dev, void *data) 2437 { 2438 int mps, orig_mps; 2439 2440 if (!pci_is_pcie(dev)) 2441 return 0; 2442 2443 if (pcie_bus_config == PCIE_BUS_TUNE_OFF || 2444 pcie_bus_config == PCIE_BUS_DEFAULT) 2445 return 0; 2446 2447 mps = 128 << *(u8 *)data; 2448 orig_mps = pcie_get_mps(dev); 2449 2450 pcie_write_mps(dev, mps); 2451 pcie_write_mrrs(dev); 2452 2453 pci_info(dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n", 2454 pcie_get_mps(dev), 128 << dev->pcie_mpss, 2455 orig_mps, pcie_get_readrq(dev)); 2456 2457 return 0; 2458 } 2459 2460 /* 2461 * pcie_bus_configure_settings() requires that pci_walk_bus work in a top-down, 2462 * parents then children fashion. If this changes, then this code will not 2463 * work as designed. 2464 */ 2465 void pcie_bus_configure_settings(struct pci_bus *bus) 2466 { 2467 u8 smpss = 0; 2468 2469 if (!bus->self) 2470 return; 2471 2472 if (!pci_is_pcie(bus->self)) 2473 return; 2474 2475 /* 2476 * FIXME - Peer to peer DMA is possible, though the endpoint would need 2477 * to be aware of the MPS of the destination. To work around this, 2478 * simply force the MPS of the entire system to the smallest possible. 2479 */ 2480 if (pcie_bus_config == PCIE_BUS_PEER2PEER) 2481 smpss = 0; 2482 2483 if (pcie_bus_config == PCIE_BUS_SAFE) { 2484 smpss = bus->self->pcie_mpss; 2485 2486 pcie_find_smpss(bus->self, &smpss); 2487 pci_walk_bus(bus, pcie_find_smpss, &smpss); 2488 } 2489 2490 pcie_bus_configure_set(bus->self, &smpss); 2491 pci_walk_bus(bus, pcie_bus_configure_set, &smpss); 2492 } 2493 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); 2494 2495 /* 2496 * Called after each bus is probed, but before its children are examined. This 2497 * is marked as __weak because multiple architectures define it. 2498 */ 2499 void __weak pcibios_fixup_bus(struct pci_bus *bus) 2500 { 2501 /* nothing to do, expected to be removed in the future */ 2502 } 2503 2504 /** 2505 * pci_scan_child_bus_extend() - Scan devices below a bus 2506 * @bus: Bus to scan for devices 2507 * @available_buses: Total number of buses available (%0 does not try to 2508 * extend beyond the minimal) 2509 * 2510 * Scans devices below @bus including subordinate buses. Returns new 2511 * subordinate number including all the found devices. Passing 2512 * @available_buses causes the remaining bus space to be distributed 2513 * equally between hotplug-capable bridges to allow future extension of the 2514 * hierarchy. 2515 */ 2516 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, 2517 unsigned int available_buses) 2518 { 2519 unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0; 2520 unsigned int start = bus->busn_res.start; 2521 unsigned int devfn, cmax, max = start; 2522 struct pci_dev *dev; 2523 2524 dev_dbg(&bus->dev, "scanning bus\n"); 2525 2526 /* Go find them, Rover! */ 2527 for (devfn = 0; devfn < 0x100; devfn += 8) 2528 pci_scan_slot(bus, devfn); 2529 2530 /* Reserve buses for SR-IOV capability */ 2531 used_buses = pci_iov_bus_range(bus); 2532 max += used_buses; 2533 2534 /* 2535 * After performing arch-dependent fixup of the bus, look behind 2536 * all PCI-to-PCI bridges on this bus. 2537 */ 2538 if (!bus->is_added) { 2539 dev_dbg(&bus->dev, "fixups for bus\n"); 2540 pcibios_fixup_bus(bus); 2541 bus->is_added = 1; 2542 } 2543 2544 /* 2545 * Calculate how many hotplug bridges and normal bridges there 2546 * are on this bus. We will distribute the additional available 2547 * buses between hotplug bridges. 2548 */ 2549 for_each_pci_bridge(dev, bus) { 2550 if (dev->is_hotplug_bridge) 2551 hotplug_bridges++; 2552 else 2553 normal_bridges++; 2554 } 2555 2556 /* 2557 * Scan bridges that are already configured. We don't touch them 2558 * unless they are misconfigured (which will be done in the second 2559 * scan below). 2560 */ 2561 for_each_pci_bridge(dev, bus) { 2562 cmax = max; 2563 max = pci_scan_bridge_extend(bus, dev, max, 0, 0); 2564 used_buses += cmax - max; 2565 } 2566 2567 /* Scan bridges that need to be reconfigured */ 2568 for_each_pci_bridge(dev, bus) { 2569 unsigned int buses = 0; 2570 2571 if (!hotplug_bridges && normal_bridges == 1) { 2572 2573 /* 2574 * There is only one bridge on the bus (upstream 2575 * port) so it gets all available buses which it 2576 * can then distribute to the possible hotplug 2577 * bridges below. 2578 */ 2579 buses = available_buses; 2580 } else if (dev->is_hotplug_bridge) { 2581 2582 /* 2583 * Distribute the extra buses between hotplug 2584 * bridges if any. 2585 */ 2586 buses = available_buses / hotplug_bridges; 2587 buses = min(buses, available_buses - used_buses); 2588 } 2589 2590 cmax = max; 2591 max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1); 2592 used_buses += max - cmax; 2593 } 2594 2595 /* 2596 * Make sure a hotplug bridge has at least the minimum requested 2597 * number of buses but allow it to grow up to the maximum available 2598 * bus number of there is room. 2599 */ 2600 if (bus->self && bus->self->is_hotplug_bridge) { 2601 used_buses = max_t(unsigned int, available_buses, 2602 pci_hotplug_bus_size - 1); 2603 if (max - start < used_buses) { 2604 max = start + used_buses; 2605 2606 /* Do not allocate more buses than we have room left */ 2607 if (max > bus->busn_res.end) 2608 max = bus->busn_res.end; 2609 2610 dev_dbg(&bus->dev, "%pR extended by %#02x\n", 2611 &bus->busn_res, max - start); 2612 } 2613 } 2614 2615 /* 2616 * We've scanned the bus and so we know all about what's on 2617 * the other side of any bridges that may be on this bus plus 2618 * any devices. 2619 * 2620 * Return how far we've got finding sub-buses. 2621 */ 2622 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); 2623 return max; 2624 } 2625 2626 /** 2627 * pci_scan_child_bus() - Scan devices below a bus 2628 * @bus: Bus to scan for devices 2629 * 2630 * Scans devices below @bus including subordinate buses. Returns new 2631 * subordinate number including all the found devices. 2632 */ 2633 unsigned int pci_scan_child_bus(struct pci_bus *bus) 2634 { 2635 return pci_scan_child_bus_extend(bus, 0); 2636 } 2637 EXPORT_SYMBOL_GPL(pci_scan_child_bus); 2638 2639 /** 2640 * pcibios_root_bridge_prepare - Platform-specific host bridge setup 2641 * @bridge: Host bridge to set up 2642 * 2643 * Default empty implementation. Replace with an architecture-specific setup 2644 * routine, if necessary. 2645 */ 2646 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 2647 { 2648 return 0; 2649 } 2650 2651 void __weak pcibios_add_bus(struct pci_bus *bus) 2652 { 2653 } 2654 2655 void __weak pcibios_remove_bus(struct pci_bus *bus) 2656 { 2657 } 2658 2659 struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 2660 struct pci_ops *ops, void *sysdata, struct list_head *resources) 2661 { 2662 int error; 2663 struct pci_host_bridge *bridge; 2664 2665 bridge = pci_alloc_host_bridge(0); 2666 if (!bridge) 2667 return NULL; 2668 2669 bridge->dev.parent = parent; 2670 2671 list_splice_init(resources, &bridge->windows); 2672 bridge->sysdata = sysdata; 2673 bridge->busnr = bus; 2674 bridge->ops = ops; 2675 2676 error = pci_register_host_bridge(bridge); 2677 if (error < 0) 2678 goto err_out; 2679 2680 return bridge->bus; 2681 2682 err_out: 2683 kfree(bridge); 2684 return NULL; 2685 } 2686 EXPORT_SYMBOL_GPL(pci_create_root_bus); 2687 2688 int pci_host_probe(struct pci_host_bridge *bridge) 2689 { 2690 struct pci_bus *bus, *child; 2691 int ret; 2692 2693 ret = pci_scan_root_bus_bridge(bridge); 2694 if (ret < 0) { 2695 dev_err(bridge->dev.parent, "Scanning root bridge failed"); 2696 return ret; 2697 } 2698 2699 bus = bridge->bus; 2700 2701 /* 2702 * We insert PCI resources into the iomem_resource and 2703 * ioport_resource trees in either pci_bus_claim_resources() 2704 * or pci_bus_assign_resources(). 2705 */ 2706 if (pci_has_flag(PCI_PROBE_ONLY)) { 2707 pci_bus_claim_resources(bus); 2708 } else { 2709 pci_bus_size_bridges(bus); 2710 pci_bus_assign_resources(bus); 2711 2712 list_for_each_entry(child, &bus->children, node) 2713 pcie_bus_configure_settings(child); 2714 } 2715 2716 pci_bus_add_devices(bus); 2717 return 0; 2718 } 2719 EXPORT_SYMBOL_GPL(pci_host_probe); 2720 2721 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) 2722 { 2723 struct resource *res = &b->busn_res; 2724 struct resource *parent_res, *conflict; 2725 2726 res->start = bus; 2727 res->end = bus_max; 2728 res->flags = IORESOURCE_BUS; 2729 2730 if (!pci_is_root_bus(b)) 2731 parent_res = &b->parent->busn_res; 2732 else { 2733 parent_res = get_pci_domain_busn_res(pci_domain_nr(b)); 2734 res->flags |= IORESOURCE_PCI_FIXED; 2735 } 2736 2737 conflict = request_resource_conflict(parent_res, res); 2738 2739 if (conflict) 2740 dev_printk(KERN_DEBUG, &b->dev, 2741 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", 2742 res, pci_is_root_bus(b) ? "domain " : "", 2743 parent_res, conflict->name, conflict); 2744 2745 return conflict == NULL; 2746 } 2747 2748 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) 2749 { 2750 struct resource *res = &b->busn_res; 2751 struct resource old_res = *res; 2752 resource_size_t size; 2753 int ret; 2754 2755 if (res->start > bus_max) 2756 return -EINVAL; 2757 2758 size = bus_max - res->start + 1; 2759 ret = adjust_resource(res, res->start, size); 2760 dev_printk(KERN_DEBUG, &b->dev, 2761 "busn_res: %pR end %s updated to %02x\n", 2762 &old_res, ret ? "can not be" : "is", bus_max); 2763 2764 if (!ret && !res->parent) 2765 pci_bus_insert_busn_res(b, res->start, res->end); 2766 2767 return ret; 2768 } 2769 2770 void pci_bus_release_busn_res(struct pci_bus *b) 2771 { 2772 struct resource *res = &b->busn_res; 2773 int ret; 2774 2775 if (!res->flags || !res->parent) 2776 return; 2777 2778 ret = release_resource(res); 2779 dev_printk(KERN_DEBUG, &b->dev, 2780 "busn_res: %pR %s released\n", 2781 res, ret ? "can not be" : "is"); 2782 } 2783 2784 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge) 2785 { 2786 struct resource_entry *window; 2787 bool found = false; 2788 struct pci_bus *b; 2789 int max, bus, ret; 2790 2791 if (!bridge) 2792 return -EINVAL; 2793 2794 resource_list_for_each_entry(window, &bridge->windows) 2795 if (window->res->flags & IORESOURCE_BUS) { 2796 found = true; 2797 break; 2798 } 2799 2800 ret = pci_register_host_bridge(bridge); 2801 if (ret < 0) 2802 return ret; 2803 2804 b = bridge->bus; 2805 bus = bridge->busnr; 2806 2807 if (!found) { 2808 dev_info(&b->dev, 2809 "No busn resource found for root bus, will use [bus %02x-ff]\n", 2810 bus); 2811 pci_bus_insert_busn_res(b, bus, 255); 2812 } 2813 2814 max = pci_scan_child_bus(b); 2815 2816 if (!found) 2817 pci_bus_update_busn_res_end(b, max); 2818 2819 return 0; 2820 } 2821 EXPORT_SYMBOL(pci_scan_root_bus_bridge); 2822 2823 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 2824 struct pci_ops *ops, void *sysdata, struct list_head *resources) 2825 { 2826 struct resource_entry *window; 2827 bool found = false; 2828 struct pci_bus *b; 2829 int max; 2830 2831 resource_list_for_each_entry(window, resources) 2832 if (window->res->flags & IORESOURCE_BUS) { 2833 found = true; 2834 break; 2835 } 2836 2837 b = pci_create_root_bus(parent, bus, ops, sysdata, resources); 2838 if (!b) 2839 return NULL; 2840 2841 if (!found) { 2842 dev_info(&b->dev, 2843 "No busn resource found for root bus, will use [bus %02x-ff]\n", 2844 bus); 2845 pci_bus_insert_busn_res(b, bus, 255); 2846 } 2847 2848 max = pci_scan_child_bus(b); 2849 2850 if (!found) 2851 pci_bus_update_busn_res_end(b, max); 2852 2853 return b; 2854 } 2855 EXPORT_SYMBOL(pci_scan_root_bus); 2856 2857 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, 2858 void *sysdata) 2859 { 2860 LIST_HEAD(resources); 2861 struct pci_bus *b; 2862 2863 pci_add_resource(&resources, &ioport_resource); 2864 pci_add_resource(&resources, &iomem_resource); 2865 pci_add_resource(&resources, &busn_resource); 2866 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources); 2867 if (b) { 2868 pci_scan_child_bus(b); 2869 } else { 2870 pci_free_resource_list(&resources); 2871 } 2872 return b; 2873 } 2874 EXPORT_SYMBOL(pci_scan_bus); 2875 2876 /** 2877 * pci_rescan_bus_bridge_resize - Scan a PCI bus for devices 2878 * @bridge: PCI bridge for the bus to scan 2879 * 2880 * Scan a PCI bus and child buses for new devices, add them, 2881 * and enable them, resizing bridge mmio/io resource if necessary 2882 * and possible. The caller must ensure the child devices are already 2883 * removed for resizing to occur. 2884 * 2885 * Returns the max number of subordinate bus discovered. 2886 */ 2887 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge) 2888 { 2889 unsigned int max; 2890 struct pci_bus *bus = bridge->subordinate; 2891 2892 max = pci_scan_child_bus(bus); 2893 2894 pci_assign_unassigned_bridge_resources(bridge); 2895 2896 pci_bus_add_devices(bus); 2897 2898 return max; 2899 } 2900 2901 /** 2902 * pci_rescan_bus - Scan a PCI bus for devices 2903 * @bus: PCI bus to scan 2904 * 2905 * Scan a PCI bus and child buses for new devices, add them, 2906 * and enable them. 2907 * 2908 * Returns the max number of subordinate bus discovered. 2909 */ 2910 unsigned int pci_rescan_bus(struct pci_bus *bus) 2911 { 2912 unsigned int max; 2913 2914 max = pci_scan_child_bus(bus); 2915 pci_assign_unassigned_bus_resources(bus); 2916 pci_bus_add_devices(bus); 2917 2918 return max; 2919 } 2920 EXPORT_SYMBOL_GPL(pci_rescan_bus); 2921 2922 /* 2923 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal 2924 * routines should always be executed under this mutex. 2925 */ 2926 static DEFINE_MUTEX(pci_rescan_remove_lock); 2927 2928 void pci_lock_rescan_remove(void) 2929 { 2930 mutex_lock(&pci_rescan_remove_lock); 2931 } 2932 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove); 2933 2934 void pci_unlock_rescan_remove(void) 2935 { 2936 mutex_unlock(&pci_rescan_remove_lock); 2937 } 2938 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove); 2939 2940 static int __init pci_sort_bf_cmp(const struct device *d_a, 2941 const struct device *d_b) 2942 { 2943 const struct pci_dev *a = to_pci_dev(d_a); 2944 const struct pci_dev *b = to_pci_dev(d_b); 2945 2946 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; 2947 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; 2948 2949 if (a->bus->number < b->bus->number) return -1; 2950 else if (a->bus->number > b->bus->number) return 1; 2951 2952 if (a->devfn < b->devfn) return -1; 2953 else if (a->devfn > b->devfn) return 1; 2954 2955 return 0; 2956 } 2957 2958 void __init pci_sort_breadthfirst(void) 2959 { 2960 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); 2961 } 2962 2963 int pci_hp_add_bridge(struct pci_dev *dev) 2964 { 2965 struct pci_bus *parent = dev->bus; 2966 int busnr, start = parent->busn_res.start; 2967 unsigned int available_buses = 0; 2968 int end = parent->busn_res.end; 2969 2970 for (busnr = start; busnr <= end; busnr++) { 2971 if (!pci_find_bus(pci_domain_nr(parent), busnr)) 2972 break; 2973 } 2974 if (busnr-- > end) { 2975 pci_err(dev, "No bus number available for hot-added bridge\n"); 2976 return -1; 2977 } 2978 2979 /* Scan bridges that are already configured */ 2980 busnr = pci_scan_bridge(parent, dev, busnr, 0); 2981 2982 /* 2983 * Distribute the available bus numbers between hotplug-capable 2984 * bridges to make extending the chain later possible. 2985 */ 2986 available_buses = end - busnr; 2987 2988 /* Scan bridges that need to be reconfigured */ 2989 pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1); 2990 2991 if (!dev->subordinate) 2992 return -1; 2993 2994 return 0; 2995 } 2996 EXPORT_SYMBOL_GPL(pci_hp_add_bridge); 2997