1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI detection and setup code 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/delay.h> 8 #include <linux/init.h> 9 #include <linux/pci.h> 10 #include <linux/of_device.h> 11 #include <linux/of_pci.h> 12 #include <linux/pci_hotplug.h> 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include <linux/cpumask.h> 16 #include <linux/pci-aspm.h> 17 #include <linux/aer.h> 18 #include <linux/acpi.h> 19 #include <linux/hypervisor.h> 20 #include <linux/irqdomain.h> 21 #include <linux/pm_runtime.h> 22 #include "pci.h" 23 24 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 25 #define CARDBUS_RESERVE_BUSNR 3 26 27 static struct resource busn_resource = { 28 .name = "PCI busn", 29 .start = 0, 30 .end = 255, 31 .flags = IORESOURCE_BUS, 32 }; 33 34 /* Ugh. Need to stop exporting this to modules. */ 35 LIST_HEAD(pci_root_buses); 36 EXPORT_SYMBOL(pci_root_buses); 37 38 static LIST_HEAD(pci_domain_busn_res_list); 39 40 struct pci_domain_busn_res { 41 struct list_head list; 42 struct resource res; 43 int domain_nr; 44 }; 45 46 static struct resource *get_pci_domain_busn_res(int domain_nr) 47 { 48 struct pci_domain_busn_res *r; 49 50 list_for_each_entry(r, &pci_domain_busn_res_list, list) 51 if (r->domain_nr == domain_nr) 52 return &r->res; 53 54 r = kzalloc(sizeof(*r), GFP_KERNEL); 55 if (!r) 56 return NULL; 57 58 r->domain_nr = domain_nr; 59 r->res.start = 0; 60 r->res.end = 0xff; 61 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED; 62 63 list_add_tail(&r->list, &pci_domain_busn_res_list); 64 65 return &r->res; 66 } 67 68 static int find_anything(struct device *dev, void *data) 69 { 70 return 1; 71 } 72 73 /* 74 * Some device drivers need know if PCI is initiated. 75 * Basically, we think PCI is not initiated when there 76 * is no device to be found on the pci_bus_type. 77 */ 78 int no_pci_devices(void) 79 { 80 struct device *dev; 81 int no_devices; 82 83 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); 84 no_devices = (dev == NULL); 85 put_device(dev); 86 return no_devices; 87 } 88 EXPORT_SYMBOL(no_pci_devices); 89 90 /* 91 * PCI Bus Class 92 */ 93 static void release_pcibus_dev(struct device *dev) 94 { 95 struct pci_bus *pci_bus = to_pci_bus(dev); 96 97 put_device(pci_bus->bridge); 98 pci_bus_remove_resources(pci_bus); 99 pci_release_bus_of_node(pci_bus); 100 kfree(pci_bus); 101 } 102 103 static struct class pcibus_class = { 104 .name = "pci_bus", 105 .dev_release = &release_pcibus_dev, 106 .dev_groups = pcibus_groups, 107 }; 108 109 static int __init pcibus_class_init(void) 110 { 111 return class_register(&pcibus_class); 112 } 113 postcore_initcall(pcibus_class_init); 114 115 static u64 pci_size(u64 base, u64 maxbase, u64 mask) 116 { 117 u64 size = mask & maxbase; /* Find the significant bits */ 118 if (!size) 119 return 0; 120 121 /* 122 * Get the lowest of them to find the decode size, and from that 123 * the extent. 124 */ 125 size = (size & ~(size-1)) - 1; 126 127 /* 128 * base == maxbase can be valid only if the BAR has already been 129 * programmed with all 1s. 130 */ 131 if (base == maxbase && ((base | size) & mask) != mask) 132 return 0; 133 134 return size; 135 } 136 137 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) 138 { 139 u32 mem_type; 140 unsigned long flags; 141 142 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 143 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; 144 flags |= IORESOURCE_IO; 145 return flags; 146 } 147 148 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; 149 flags |= IORESOURCE_MEM; 150 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) 151 flags |= IORESOURCE_PREFETCH; 152 153 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; 154 switch (mem_type) { 155 case PCI_BASE_ADDRESS_MEM_TYPE_32: 156 break; 157 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 158 /* 1M mem BAR treated as 32-bit BAR */ 159 break; 160 case PCI_BASE_ADDRESS_MEM_TYPE_64: 161 flags |= IORESOURCE_MEM_64; 162 break; 163 default: 164 /* mem unknown type treated as 32-bit BAR */ 165 break; 166 } 167 return flags; 168 } 169 170 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO) 171 172 /** 173 * pci_read_base - Read a PCI BAR 174 * @dev: the PCI device 175 * @type: type of the BAR 176 * @res: resource buffer to be filled in 177 * @pos: BAR position in the config space 178 * 179 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. 180 */ 181 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 182 struct resource *res, unsigned int pos) 183 { 184 u32 l = 0, sz = 0, mask; 185 u64 l64, sz64, mask64; 186 u16 orig_cmd; 187 struct pci_bus_region region, inverted_region; 188 189 mask = type ? PCI_ROM_ADDRESS_MASK : ~0; 190 191 /* No printks while decoding is disabled! */ 192 if (!dev->mmio_always_on) { 193 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); 194 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) { 195 pci_write_config_word(dev, PCI_COMMAND, 196 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE); 197 } 198 } 199 200 res->name = pci_name(dev); 201 202 pci_read_config_dword(dev, pos, &l); 203 pci_write_config_dword(dev, pos, l | mask); 204 pci_read_config_dword(dev, pos, &sz); 205 pci_write_config_dword(dev, pos, l); 206 207 /* 208 * All bits set in sz means the device isn't working properly. 209 * If the BAR isn't implemented, all bits must be 0. If it's a 210 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit 211 * 1 must be clear. 212 */ 213 if (sz == 0xffffffff) 214 sz = 0; 215 216 /* 217 * I don't know how l can have all bits set. Copied from old code. 218 * Maybe it fixes a bug on some ancient platform. 219 */ 220 if (l == 0xffffffff) 221 l = 0; 222 223 if (type == pci_bar_unknown) { 224 res->flags = decode_bar(dev, l); 225 res->flags |= IORESOURCE_SIZEALIGN; 226 if (res->flags & IORESOURCE_IO) { 227 l64 = l & PCI_BASE_ADDRESS_IO_MASK; 228 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK; 229 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT; 230 } else { 231 l64 = l & PCI_BASE_ADDRESS_MEM_MASK; 232 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK; 233 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK; 234 } 235 } else { 236 if (l & PCI_ROM_ADDRESS_ENABLE) 237 res->flags |= IORESOURCE_ROM_ENABLE; 238 l64 = l & PCI_ROM_ADDRESS_MASK; 239 sz64 = sz & PCI_ROM_ADDRESS_MASK; 240 mask64 = PCI_ROM_ADDRESS_MASK; 241 } 242 243 if (res->flags & IORESOURCE_MEM_64) { 244 pci_read_config_dword(dev, pos + 4, &l); 245 pci_write_config_dword(dev, pos + 4, ~0); 246 pci_read_config_dword(dev, pos + 4, &sz); 247 pci_write_config_dword(dev, pos + 4, l); 248 249 l64 |= ((u64)l << 32); 250 sz64 |= ((u64)sz << 32); 251 mask64 |= ((u64)~0 << 32); 252 } 253 254 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE)) 255 pci_write_config_word(dev, PCI_COMMAND, orig_cmd); 256 257 if (!sz64) 258 goto fail; 259 260 sz64 = pci_size(l64, sz64, mask64); 261 if (!sz64) { 262 pci_info(dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n", 263 pos); 264 goto fail; 265 } 266 267 if (res->flags & IORESOURCE_MEM_64) { 268 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8) 269 && sz64 > 0x100000000ULL) { 270 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; 271 res->start = 0; 272 res->end = 0; 273 pci_err(dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n", 274 pos, (unsigned long long)sz64); 275 goto out; 276 } 277 278 if ((sizeof(pci_bus_addr_t) < 8) && l) { 279 /* Above 32-bit boundary; try to reallocate */ 280 res->flags |= IORESOURCE_UNSET; 281 res->start = 0; 282 res->end = sz64; 283 pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n", 284 pos, (unsigned long long)l64); 285 goto out; 286 } 287 } 288 289 region.start = l64; 290 region.end = l64 + sz64; 291 292 pcibios_bus_to_resource(dev->bus, res, ®ion); 293 pcibios_resource_to_bus(dev->bus, &inverted_region, res); 294 295 /* 296 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is 297 * the corresponding resource address (the physical address used by 298 * the CPU. Converting that resource address back to a bus address 299 * should yield the original BAR value: 300 * 301 * resource_to_bus(bus_to_resource(A)) == A 302 * 303 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not 304 * be claimed by the device. 305 */ 306 if (inverted_region.start != region.start) { 307 res->flags |= IORESOURCE_UNSET; 308 res->start = 0; 309 res->end = region.end - region.start; 310 pci_info(dev, "reg 0x%x: initial BAR value %#010llx invalid\n", 311 pos, (unsigned long long)region.start); 312 } 313 314 goto out; 315 316 317 fail: 318 res->flags = 0; 319 out: 320 if (res->flags) 321 pci_printk(KERN_DEBUG, dev, "reg 0x%x: %pR\n", pos, res); 322 323 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; 324 } 325 326 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 327 { 328 unsigned int pos, reg; 329 330 if (dev->non_compliant_bars) 331 return; 332 333 /* Per PCIe r4.0, sec 9.3.4.1.11, the VF BARs are all RO Zero */ 334 if (dev->is_virtfn) 335 return; 336 337 for (pos = 0; pos < howmany; pos++) { 338 struct resource *res = &dev->resource[pos]; 339 reg = PCI_BASE_ADDRESS_0 + (pos << 2); 340 pos += __pci_read_base(dev, pci_bar_unknown, res, reg); 341 } 342 343 if (rom) { 344 struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; 345 dev->rom_base_reg = rom; 346 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | 347 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; 348 __pci_read_base(dev, pci_bar_mem32, res, rom); 349 } 350 } 351 352 static void pci_read_bridge_io(struct pci_bus *child) 353 { 354 struct pci_dev *dev = child->self; 355 u8 io_base_lo, io_limit_lo; 356 unsigned long io_mask, io_granularity, base, limit; 357 struct pci_bus_region region; 358 struct resource *res; 359 360 io_mask = PCI_IO_RANGE_MASK; 361 io_granularity = 0x1000; 362 if (dev->io_window_1k) { 363 /* Support 1K I/O space granularity */ 364 io_mask = PCI_IO_1K_RANGE_MASK; 365 io_granularity = 0x400; 366 } 367 368 res = child->resource[0]; 369 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); 370 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); 371 base = (io_base_lo & io_mask) << 8; 372 limit = (io_limit_lo & io_mask) << 8; 373 374 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { 375 u16 io_base_hi, io_limit_hi; 376 377 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); 378 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); 379 base |= ((unsigned long) io_base_hi << 16); 380 limit |= ((unsigned long) io_limit_hi << 16); 381 } 382 383 if (base <= limit) { 384 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; 385 region.start = base; 386 region.end = limit + io_granularity - 1; 387 pcibios_bus_to_resource(dev->bus, res, ®ion); 388 pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); 389 } 390 } 391 392 static void pci_read_bridge_mmio(struct pci_bus *child) 393 { 394 struct pci_dev *dev = child->self; 395 u16 mem_base_lo, mem_limit_lo; 396 unsigned long base, limit; 397 struct pci_bus_region region; 398 struct resource *res; 399 400 res = child->resource[1]; 401 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); 402 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); 403 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; 404 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; 405 if (base <= limit) { 406 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 407 region.start = base; 408 region.end = limit + 0xfffff; 409 pcibios_bus_to_resource(dev->bus, res, ®ion); 410 pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); 411 } 412 } 413 414 static void pci_read_bridge_mmio_pref(struct pci_bus *child) 415 { 416 struct pci_dev *dev = child->self; 417 u16 mem_base_lo, mem_limit_lo; 418 u64 base64, limit64; 419 pci_bus_addr_t base, limit; 420 struct pci_bus_region region; 421 struct resource *res; 422 423 res = child->resource[2]; 424 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); 425 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); 426 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; 427 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; 428 429 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { 430 u32 mem_base_hi, mem_limit_hi; 431 432 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); 433 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); 434 435 /* 436 * Some bridges set the base > limit by default, and some 437 * (broken) BIOSes do not initialize them. If we find 438 * this, just assume they are not being used. 439 */ 440 if (mem_base_hi <= mem_limit_hi) { 441 base64 |= (u64) mem_base_hi << 32; 442 limit64 |= (u64) mem_limit_hi << 32; 443 } 444 } 445 446 base = (pci_bus_addr_t) base64; 447 limit = (pci_bus_addr_t) limit64; 448 449 if (base != base64) { 450 pci_err(dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", 451 (unsigned long long) base64); 452 return; 453 } 454 455 if (base <= limit) { 456 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | 457 IORESOURCE_MEM | IORESOURCE_PREFETCH; 458 if (res->flags & PCI_PREF_RANGE_TYPE_64) 459 res->flags |= IORESOURCE_MEM_64; 460 region.start = base; 461 region.end = limit + 0xfffff; 462 pcibios_bus_to_resource(dev->bus, res, ®ion); 463 pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); 464 } 465 } 466 467 void pci_read_bridge_bases(struct pci_bus *child) 468 { 469 struct pci_dev *dev = child->self; 470 struct resource *res; 471 int i; 472 473 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ 474 return; 475 476 pci_info(dev, "PCI bridge to %pR%s\n", 477 &child->busn_res, 478 dev->transparent ? " (subtractive decode)" : ""); 479 480 pci_bus_remove_resources(child); 481 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 482 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; 483 484 pci_read_bridge_io(child); 485 pci_read_bridge_mmio(child); 486 pci_read_bridge_mmio_pref(child); 487 488 if (dev->transparent) { 489 pci_bus_for_each_resource(child->parent, res, i) { 490 if (res && res->flags) { 491 pci_bus_add_resource(child, res, 492 PCI_SUBTRACTIVE_DECODE); 493 pci_printk(KERN_DEBUG, dev, 494 " bridge window %pR (subtractive decode)\n", 495 res); 496 } 497 } 498 } 499 } 500 501 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent) 502 { 503 struct pci_bus *b; 504 505 b = kzalloc(sizeof(*b), GFP_KERNEL); 506 if (!b) 507 return NULL; 508 509 INIT_LIST_HEAD(&b->node); 510 INIT_LIST_HEAD(&b->children); 511 INIT_LIST_HEAD(&b->devices); 512 INIT_LIST_HEAD(&b->slots); 513 INIT_LIST_HEAD(&b->resources); 514 b->max_bus_speed = PCI_SPEED_UNKNOWN; 515 b->cur_bus_speed = PCI_SPEED_UNKNOWN; 516 #ifdef CONFIG_PCI_DOMAINS_GENERIC 517 if (parent) 518 b->domain_nr = parent->domain_nr; 519 #endif 520 return b; 521 } 522 523 static void devm_pci_release_host_bridge_dev(struct device *dev) 524 { 525 struct pci_host_bridge *bridge = to_pci_host_bridge(dev); 526 527 if (bridge->release_fn) 528 bridge->release_fn(bridge); 529 } 530 531 static void pci_release_host_bridge_dev(struct device *dev) 532 { 533 devm_pci_release_host_bridge_dev(dev); 534 pci_free_host_bridge(to_pci_host_bridge(dev)); 535 } 536 537 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) 538 { 539 struct pci_host_bridge *bridge; 540 541 bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); 542 if (!bridge) 543 return NULL; 544 545 INIT_LIST_HEAD(&bridge->windows); 546 bridge->dev.release = pci_release_host_bridge_dev; 547 548 /* 549 * We assume we can manage these PCIe features. Some systems may 550 * reserve these for use by the platform itself, e.g., an ACPI BIOS 551 * may implement its own AER handling and use _OSC to prevent the 552 * OS from interfering. 553 */ 554 bridge->native_aer = 1; 555 bridge->native_hotplug = 1; 556 bridge->native_pme = 1; 557 558 return bridge; 559 } 560 EXPORT_SYMBOL(pci_alloc_host_bridge); 561 562 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, 563 size_t priv) 564 { 565 struct pci_host_bridge *bridge; 566 567 bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL); 568 if (!bridge) 569 return NULL; 570 571 INIT_LIST_HEAD(&bridge->windows); 572 bridge->dev.release = devm_pci_release_host_bridge_dev; 573 574 return bridge; 575 } 576 EXPORT_SYMBOL(devm_pci_alloc_host_bridge); 577 578 void pci_free_host_bridge(struct pci_host_bridge *bridge) 579 { 580 pci_free_resource_list(&bridge->windows); 581 582 kfree(bridge); 583 } 584 EXPORT_SYMBOL(pci_free_host_bridge); 585 586 static const unsigned char pcix_bus_speed[] = { 587 PCI_SPEED_UNKNOWN, /* 0 */ 588 PCI_SPEED_66MHz_PCIX, /* 1 */ 589 PCI_SPEED_100MHz_PCIX, /* 2 */ 590 PCI_SPEED_133MHz_PCIX, /* 3 */ 591 PCI_SPEED_UNKNOWN, /* 4 */ 592 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ 593 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ 594 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ 595 PCI_SPEED_UNKNOWN, /* 8 */ 596 PCI_SPEED_66MHz_PCIX_266, /* 9 */ 597 PCI_SPEED_100MHz_PCIX_266, /* A */ 598 PCI_SPEED_133MHz_PCIX_266, /* B */ 599 PCI_SPEED_UNKNOWN, /* C */ 600 PCI_SPEED_66MHz_PCIX_533, /* D */ 601 PCI_SPEED_100MHz_PCIX_533, /* E */ 602 PCI_SPEED_133MHz_PCIX_533 /* F */ 603 }; 604 605 const unsigned char pcie_link_speed[] = { 606 PCI_SPEED_UNKNOWN, /* 0 */ 607 PCIE_SPEED_2_5GT, /* 1 */ 608 PCIE_SPEED_5_0GT, /* 2 */ 609 PCIE_SPEED_8_0GT, /* 3 */ 610 PCIE_SPEED_16_0GT, /* 4 */ 611 PCI_SPEED_UNKNOWN, /* 5 */ 612 PCI_SPEED_UNKNOWN, /* 6 */ 613 PCI_SPEED_UNKNOWN, /* 7 */ 614 PCI_SPEED_UNKNOWN, /* 8 */ 615 PCI_SPEED_UNKNOWN, /* 9 */ 616 PCI_SPEED_UNKNOWN, /* A */ 617 PCI_SPEED_UNKNOWN, /* B */ 618 PCI_SPEED_UNKNOWN, /* C */ 619 PCI_SPEED_UNKNOWN, /* D */ 620 PCI_SPEED_UNKNOWN, /* E */ 621 PCI_SPEED_UNKNOWN /* F */ 622 }; 623 624 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) 625 { 626 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; 627 } 628 EXPORT_SYMBOL_GPL(pcie_update_link_speed); 629 630 static unsigned char agp_speeds[] = { 631 AGP_UNKNOWN, 632 AGP_1X, 633 AGP_2X, 634 AGP_4X, 635 AGP_8X 636 }; 637 638 static enum pci_bus_speed agp_speed(int agp3, int agpstat) 639 { 640 int index = 0; 641 642 if (agpstat & 4) 643 index = 3; 644 else if (agpstat & 2) 645 index = 2; 646 else if (agpstat & 1) 647 index = 1; 648 else 649 goto out; 650 651 if (agp3) { 652 index += 2; 653 if (index == 5) 654 index = 0; 655 } 656 657 out: 658 return agp_speeds[index]; 659 } 660 661 static void pci_set_bus_speed(struct pci_bus *bus) 662 { 663 struct pci_dev *bridge = bus->self; 664 int pos; 665 666 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); 667 if (!pos) 668 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); 669 if (pos) { 670 u32 agpstat, agpcmd; 671 672 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); 673 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); 674 675 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); 676 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); 677 } 678 679 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); 680 if (pos) { 681 u16 status; 682 enum pci_bus_speed max; 683 684 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS, 685 &status); 686 687 if (status & PCI_X_SSTATUS_533MHZ) { 688 max = PCI_SPEED_133MHz_PCIX_533; 689 } else if (status & PCI_X_SSTATUS_266MHZ) { 690 max = PCI_SPEED_133MHz_PCIX_266; 691 } else if (status & PCI_X_SSTATUS_133MHZ) { 692 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) 693 max = PCI_SPEED_133MHz_PCIX_ECC; 694 else 695 max = PCI_SPEED_133MHz_PCIX; 696 } else { 697 max = PCI_SPEED_66MHz_PCIX; 698 } 699 700 bus->max_bus_speed = max; 701 bus->cur_bus_speed = pcix_bus_speed[ 702 (status & PCI_X_SSTATUS_FREQ) >> 6]; 703 704 return; 705 } 706 707 if (pci_is_pcie(bridge)) { 708 u32 linkcap; 709 u16 linksta; 710 711 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); 712 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; 713 714 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); 715 pcie_update_link_speed(bus, linksta); 716 } 717 } 718 719 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus) 720 { 721 struct irq_domain *d; 722 723 /* 724 * Any firmware interface that can resolve the msi_domain 725 * should be called from here. 726 */ 727 d = pci_host_bridge_of_msi_domain(bus); 728 if (!d) 729 d = pci_host_bridge_acpi_msi_domain(bus); 730 731 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN 732 /* 733 * If no IRQ domain was found via the OF tree, try looking it up 734 * directly through the fwnode_handle. 735 */ 736 if (!d) { 737 struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus); 738 739 if (fwnode) 740 d = irq_find_matching_fwnode(fwnode, 741 DOMAIN_BUS_PCI_MSI); 742 } 743 #endif 744 745 return d; 746 } 747 748 static void pci_set_bus_msi_domain(struct pci_bus *bus) 749 { 750 struct irq_domain *d; 751 struct pci_bus *b; 752 753 /* 754 * The bus can be a root bus, a subordinate bus, or a virtual bus 755 * created by an SR-IOV device. Walk up to the first bridge device 756 * found or derive the domain from the host bridge. 757 */ 758 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) { 759 if (b->self) 760 d = dev_get_msi_domain(&b->self->dev); 761 } 762 763 if (!d) 764 d = pci_host_bridge_msi_domain(b); 765 766 dev_set_msi_domain(&bus->dev, d); 767 } 768 769 static int pci_register_host_bridge(struct pci_host_bridge *bridge) 770 { 771 struct device *parent = bridge->dev.parent; 772 struct resource_entry *window, *n; 773 struct pci_bus *bus, *b; 774 resource_size_t offset; 775 LIST_HEAD(resources); 776 struct resource *res; 777 char addr[64], *fmt; 778 const char *name; 779 int err; 780 781 bus = pci_alloc_bus(NULL); 782 if (!bus) 783 return -ENOMEM; 784 785 bridge->bus = bus; 786 787 /* Temporarily move resources off the list */ 788 list_splice_init(&bridge->windows, &resources); 789 bus->sysdata = bridge->sysdata; 790 bus->msi = bridge->msi; 791 bus->ops = bridge->ops; 792 bus->number = bus->busn_res.start = bridge->busnr; 793 #ifdef CONFIG_PCI_DOMAINS_GENERIC 794 bus->domain_nr = pci_bus_find_domain_nr(bus, parent); 795 #endif 796 797 b = pci_find_bus(pci_domain_nr(bus), bridge->busnr); 798 if (b) { 799 /* Ignore it if we already got here via a different bridge */ 800 dev_dbg(&b->dev, "bus already known\n"); 801 err = -EEXIST; 802 goto free; 803 } 804 805 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus), 806 bridge->busnr); 807 808 err = pcibios_root_bridge_prepare(bridge); 809 if (err) 810 goto free; 811 812 err = device_register(&bridge->dev); 813 if (err) 814 put_device(&bridge->dev); 815 816 bus->bridge = get_device(&bridge->dev); 817 device_enable_async_suspend(bus->bridge); 818 pci_set_bus_of_node(bus); 819 pci_set_bus_msi_domain(bus); 820 821 if (!parent) 822 set_dev_node(bus->bridge, pcibus_to_node(bus)); 823 824 bus->dev.class = &pcibus_class; 825 bus->dev.parent = bus->bridge; 826 827 dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number); 828 name = dev_name(&bus->dev); 829 830 err = device_register(&bus->dev); 831 if (err) 832 goto unregister; 833 834 pcibios_add_bus(bus); 835 836 /* Create legacy_io and legacy_mem files for this bus */ 837 pci_create_legacy_files(bus); 838 839 if (parent) 840 dev_info(parent, "PCI host bridge to bus %s\n", name); 841 else 842 pr_info("PCI host bridge to bus %s\n", name); 843 844 /* Add initial resources to the bus */ 845 resource_list_for_each_entry_safe(window, n, &resources) { 846 list_move_tail(&window->node, &bridge->windows); 847 offset = window->offset; 848 res = window->res; 849 850 if (res->flags & IORESOURCE_BUS) 851 pci_bus_insert_busn_res(bus, bus->number, res->end); 852 else 853 pci_bus_add_resource(bus, res, 0); 854 855 if (offset) { 856 if (resource_type(res) == IORESOURCE_IO) 857 fmt = " (bus address [%#06llx-%#06llx])"; 858 else 859 fmt = " (bus address [%#010llx-%#010llx])"; 860 861 snprintf(addr, sizeof(addr), fmt, 862 (unsigned long long)(res->start - offset), 863 (unsigned long long)(res->end - offset)); 864 } else 865 addr[0] = '\0'; 866 867 dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr); 868 } 869 870 down_write(&pci_bus_sem); 871 list_add_tail(&bus->node, &pci_root_buses); 872 up_write(&pci_bus_sem); 873 874 return 0; 875 876 unregister: 877 put_device(&bridge->dev); 878 device_unregister(&bridge->dev); 879 880 free: 881 kfree(bus); 882 return err; 883 } 884 885 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, 886 struct pci_dev *bridge, int busnr) 887 { 888 struct pci_bus *child; 889 int i; 890 int ret; 891 892 /* Allocate a new bus and inherit stuff from the parent */ 893 child = pci_alloc_bus(parent); 894 if (!child) 895 return NULL; 896 897 child->parent = parent; 898 child->ops = parent->ops; 899 child->msi = parent->msi; 900 child->sysdata = parent->sysdata; 901 child->bus_flags = parent->bus_flags; 902 903 /* 904 * Initialize some portions of the bus device, but don't register 905 * it now as the parent is not properly set up yet. 906 */ 907 child->dev.class = &pcibus_class; 908 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); 909 910 /* Set up the primary, secondary and subordinate bus numbers */ 911 child->number = child->busn_res.start = busnr; 912 child->primary = parent->busn_res.start; 913 child->busn_res.end = 0xff; 914 915 if (!bridge) { 916 child->dev.parent = parent->bridge; 917 goto add_dev; 918 } 919 920 child->self = bridge; 921 child->bridge = get_device(&bridge->dev); 922 child->dev.parent = child->bridge; 923 pci_set_bus_of_node(child); 924 pci_set_bus_speed(child); 925 926 /* Set up default resource pointers and names */ 927 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 928 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 929 child->resource[i]->name = child->name; 930 } 931 bridge->subordinate = child; 932 933 add_dev: 934 pci_set_bus_msi_domain(child); 935 ret = device_register(&child->dev); 936 WARN_ON(ret < 0); 937 938 pcibios_add_bus(child); 939 940 if (child->ops->add_bus) { 941 ret = child->ops->add_bus(child); 942 if (WARN_ON(ret < 0)) 943 dev_err(&child->dev, "failed to add bus: %d\n", ret); 944 } 945 946 /* Create legacy_io and legacy_mem files for this bus */ 947 pci_create_legacy_files(child); 948 949 return child; 950 } 951 952 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 953 int busnr) 954 { 955 struct pci_bus *child; 956 957 child = pci_alloc_child_bus(parent, dev, busnr); 958 if (child) { 959 down_write(&pci_bus_sem); 960 list_add_tail(&child->node, &parent->children); 961 up_write(&pci_bus_sem); 962 } 963 return child; 964 } 965 EXPORT_SYMBOL(pci_add_new_bus); 966 967 static void pci_enable_crs(struct pci_dev *pdev) 968 { 969 u16 root_cap = 0; 970 971 /* Enable CRS Software Visibility if supported */ 972 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap); 973 if (root_cap & PCI_EXP_RTCAP_CRSVIS) 974 pcie_capability_set_word(pdev, PCI_EXP_RTCTL, 975 PCI_EXP_RTCTL_CRSSVE); 976 } 977 978 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, 979 unsigned int available_buses); 980 981 /* 982 * pci_scan_bridge_extend() - Scan buses behind a bridge 983 * @bus: Parent bus the bridge is on 984 * @dev: Bridge itself 985 * @max: Starting subordinate number of buses behind this bridge 986 * @available_buses: Total number of buses available for this bridge and 987 * the devices below. After the minimal bus space has 988 * been allocated the remaining buses will be 989 * distributed equally between hotplug-capable bridges. 990 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges 991 * that need to be reconfigured. 992 * 993 * If it's a bridge, configure it and scan the bus behind it. 994 * For CardBus bridges, we don't scan behind as the devices will 995 * be handled by the bridge driver itself. 996 * 997 * We need to process bridges in two passes -- first we scan those 998 * already configured by the BIOS and after we are done with all of 999 * them, we proceed to assigning numbers to the remaining buses in 1000 * order to avoid overlaps between old and new bus numbers. 1001 */ 1002 static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, 1003 int max, unsigned int available_buses, 1004 int pass) 1005 { 1006 struct pci_bus *child; 1007 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 1008 u32 buses, i, j = 0; 1009 u16 bctl; 1010 u8 primary, secondary, subordinate; 1011 int broken = 0; 1012 1013 /* 1014 * Make sure the bridge is powered on to be able to access config 1015 * space of devices below it. 1016 */ 1017 pm_runtime_get_sync(&dev->dev); 1018 1019 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 1020 primary = buses & 0xFF; 1021 secondary = (buses >> 8) & 0xFF; 1022 subordinate = (buses >> 16) & 0xFF; 1023 1024 pci_dbg(dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", 1025 secondary, subordinate, pass); 1026 1027 if (!primary && (primary != bus->number) && secondary && subordinate) { 1028 pci_warn(dev, "Primary bus is hard wired to 0\n"); 1029 primary = bus->number; 1030 } 1031 1032 /* Check if setup is sensible at all */ 1033 if (!pass && 1034 (primary != bus->number || secondary <= bus->number || 1035 secondary > subordinate)) { 1036 pci_info(dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", 1037 secondary, subordinate); 1038 broken = 1; 1039 } 1040 1041 /* 1042 * Disable Master-Abort Mode during probing to avoid reporting of 1043 * bus errors in some architectures. 1044 */ 1045 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 1046 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 1047 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 1048 1049 pci_enable_crs(dev); 1050 1051 if ((secondary || subordinate) && !pcibios_assign_all_busses() && 1052 !is_cardbus && !broken) { 1053 unsigned int cmax; 1054 1055 /* 1056 * Bus already configured by firmware, process it in the 1057 * first pass and just note the configuration. 1058 */ 1059 if (pass) 1060 goto out; 1061 1062 /* 1063 * The bus might already exist for two reasons: Either we 1064 * are rescanning the bus or the bus is reachable through 1065 * more than one bridge. The second case can happen with 1066 * the i450NX chipset. 1067 */ 1068 child = pci_find_bus(pci_domain_nr(bus), secondary); 1069 if (!child) { 1070 child = pci_add_new_bus(bus, dev, secondary); 1071 if (!child) 1072 goto out; 1073 child->primary = primary; 1074 pci_bus_insert_busn_res(child, secondary, subordinate); 1075 child->bridge_ctl = bctl; 1076 } 1077 1078 cmax = pci_scan_child_bus(child); 1079 if (cmax > subordinate) 1080 pci_warn(dev, "bridge has subordinate %02x but max busn %02x\n", 1081 subordinate, cmax); 1082 1083 /* Subordinate should equal child->busn_res.end */ 1084 if (subordinate > max) 1085 max = subordinate; 1086 } else { 1087 1088 /* 1089 * We need to assign a number to this bus which we always 1090 * do in the second pass. 1091 */ 1092 if (!pass) { 1093 if (pcibios_assign_all_busses() || broken || is_cardbus) 1094 1095 /* 1096 * Temporarily disable forwarding of the 1097 * configuration cycles on all bridges in 1098 * this bus segment to avoid possible 1099 * conflicts in the second pass between two 1100 * bridges programmed with overlapping bus 1101 * ranges. 1102 */ 1103 pci_write_config_dword(dev, PCI_PRIMARY_BUS, 1104 buses & ~0xffffff); 1105 goto out; 1106 } 1107 1108 /* Clear errors */ 1109 pci_write_config_word(dev, PCI_STATUS, 0xffff); 1110 1111 /* 1112 * Prevent assigning a bus number that already exists. 1113 * This can happen when a bridge is hot-plugged, so in this 1114 * case we only re-scan this bus. 1115 */ 1116 child = pci_find_bus(pci_domain_nr(bus), max+1); 1117 if (!child) { 1118 child = pci_add_new_bus(bus, dev, max+1); 1119 if (!child) 1120 goto out; 1121 pci_bus_insert_busn_res(child, max+1, 1122 bus->busn_res.end); 1123 } 1124 max++; 1125 if (available_buses) 1126 available_buses--; 1127 1128 buses = (buses & 0xff000000) 1129 | ((unsigned int)(child->primary) << 0) 1130 | ((unsigned int)(child->busn_res.start) << 8) 1131 | ((unsigned int)(child->busn_res.end) << 16); 1132 1133 /* 1134 * yenta.c forces a secondary latency timer of 176. 1135 * Copy that behaviour here. 1136 */ 1137 if (is_cardbus) { 1138 buses &= ~0xff000000; 1139 buses |= CARDBUS_LATENCY_TIMER << 24; 1140 } 1141 1142 /* We need to blast all three values with a single write */ 1143 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); 1144 1145 if (!is_cardbus) { 1146 child->bridge_ctl = bctl; 1147 max = pci_scan_child_bus_extend(child, available_buses); 1148 } else { 1149 1150 /* 1151 * For CardBus bridges, we leave 4 bus numbers as 1152 * cards with a PCI-to-PCI bridge can be inserted 1153 * later. 1154 */ 1155 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) { 1156 struct pci_bus *parent = bus; 1157 if (pci_find_bus(pci_domain_nr(bus), 1158 max+i+1)) 1159 break; 1160 while (parent->parent) { 1161 if ((!pcibios_assign_all_busses()) && 1162 (parent->busn_res.end > max) && 1163 (parent->busn_res.end <= max+i)) { 1164 j = 1; 1165 } 1166 parent = parent->parent; 1167 } 1168 if (j) { 1169 1170 /* 1171 * Often, there are two CardBus 1172 * bridges -- try to leave one 1173 * valid bus number for each one. 1174 */ 1175 i /= 2; 1176 break; 1177 } 1178 } 1179 max += i; 1180 } 1181 1182 /* Set subordinate bus number to its real value */ 1183 pci_bus_update_busn_res_end(child, max); 1184 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); 1185 } 1186 1187 sprintf(child->name, 1188 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), 1189 pci_domain_nr(bus), child->number); 1190 1191 /* Has only triggered on CardBus, fixup is in yenta_socket */ 1192 while (bus->parent) { 1193 if ((child->busn_res.end > bus->busn_res.end) || 1194 (child->number > bus->busn_res.end) || 1195 (child->number < bus->number) || 1196 (child->busn_res.end < bus->number)) { 1197 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n", 1198 &child->busn_res, 1199 (bus->number > child->busn_res.end && 1200 bus->busn_res.end < child->number) ? 1201 "wholly" : "partially", 1202 bus->self->transparent ? " transparent" : "", 1203 dev_name(&bus->dev), 1204 &bus->busn_res); 1205 } 1206 bus = bus->parent; 1207 } 1208 1209 out: 1210 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); 1211 1212 pm_runtime_put(&dev->dev); 1213 1214 return max; 1215 } 1216 1217 /* 1218 * pci_scan_bridge() - Scan buses behind a bridge 1219 * @bus: Parent bus the bridge is on 1220 * @dev: Bridge itself 1221 * @max: Starting subordinate number of buses behind this bridge 1222 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges 1223 * that need to be reconfigured. 1224 * 1225 * If it's a bridge, configure it and scan the bus behind it. 1226 * For CardBus bridges, we don't scan behind as the devices will 1227 * be handled by the bridge driver itself. 1228 * 1229 * We need to process bridges in two passes -- first we scan those 1230 * already configured by the BIOS and after we are done with all of 1231 * them, we proceed to assigning numbers to the remaining buses in 1232 * order to avoid overlaps between old and new bus numbers. 1233 */ 1234 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) 1235 { 1236 return pci_scan_bridge_extend(bus, dev, max, 0, pass); 1237 } 1238 EXPORT_SYMBOL(pci_scan_bridge); 1239 1240 /* 1241 * Read interrupt line and base address registers. 1242 * The architecture-dependent code can tweak these, of course. 1243 */ 1244 static void pci_read_irq(struct pci_dev *dev) 1245 { 1246 unsigned char irq; 1247 1248 /* VFs are not allowed to use INTx, so skip the config reads */ 1249 if (dev->is_virtfn) { 1250 dev->pin = 0; 1251 dev->irq = 0; 1252 return; 1253 } 1254 1255 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); 1256 dev->pin = irq; 1257 if (irq) 1258 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 1259 dev->irq = irq; 1260 } 1261 1262 void set_pcie_port_type(struct pci_dev *pdev) 1263 { 1264 int pos; 1265 u16 reg16; 1266 int type; 1267 struct pci_dev *parent; 1268 1269 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1270 if (!pos) 1271 return; 1272 1273 pdev->pcie_cap = pos; 1274 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); 1275 pdev->pcie_flags_reg = reg16; 1276 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); 1277 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 1278 1279 /* 1280 * A Root Port or a PCI-to-PCIe bridge is always the upstream end 1281 * of a Link. No PCIe component has two Links. Two Links are 1282 * connected by a Switch that has a Port on each Link and internal 1283 * logic to connect the two Ports. 1284 */ 1285 type = pci_pcie_type(pdev); 1286 if (type == PCI_EXP_TYPE_ROOT_PORT || 1287 type == PCI_EXP_TYPE_PCIE_BRIDGE) 1288 pdev->has_secondary_link = 1; 1289 else if (type == PCI_EXP_TYPE_UPSTREAM || 1290 type == PCI_EXP_TYPE_DOWNSTREAM) { 1291 parent = pci_upstream_bridge(pdev); 1292 1293 /* 1294 * Usually there's an upstream device (Root Port or Switch 1295 * Downstream Port), but we can't assume one exists. 1296 */ 1297 if (parent && !parent->has_secondary_link) 1298 pdev->has_secondary_link = 1; 1299 } 1300 } 1301 1302 void set_pcie_hotplug_bridge(struct pci_dev *pdev) 1303 { 1304 u32 reg32; 1305 1306 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); 1307 if (reg32 & PCI_EXP_SLTCAP_HPC) 1308 pdev->is_hotplug_bridge = 1; 1309 } 1310 1311 static void set_pcie_thunderbolt(struct pci_dev *dev) 1312 { 1313 int vsec = 0; 1314 u32 header; 1315 1316 while ((vsec = pci_find_next_ext_capability(dev, vsec, 1317 PCI_EXT_CAP_ID_VNDR))) { 1318 pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header); 1319 1320 /* Is the device part of a Thunderbolt controller? */ 1321 if (dev->vendor == PCI_VENDOR_ID_INTEL && 1322 PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) { 1323 dev->is_thunderbolt = 1; 1324 return; 1325 } 1326 } 1327 } 1328 1329 /** 1330 * pci_ext_cfg_is_aliased - Is ext config space just an alias of std config? 1331 * @dev: PCI device 1332 * 1333 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that 1334 * when forwarding a type1 configuration request the bridge must check that 1335 * the extended register address field is zero. The bridge is not permitted 1336 * to forward the transactions and must handle it as an Unsupported Request. 1337 * Some bridges do not follow this rule and simply drop the extended register 1338 * bits, resulting in the standard config space being aliased, every 256 1339 * bytes across the entire configuration space. Test for this condition by 1340 * comparing the first dword of each potential alias to the vendor/device ID. 1341 * Known offenders: 1342 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03) 1343 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40) 1344 */ 1345 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev) 1346 { 1347 #ifdef CONFIG_PCI_QUIRKS 1348 int pos; 1349 u32 header, tmp; 1350 1351 pci_read_config_dword(dev, PCI_VENDOR_ID, &header); 1352 1353 for (pos = PCI_CFG_SPACE_SIZE; 1354 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) { 1355 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL 1356 || header != tmp) 1357 return false; 1358 } 1359 1360 return true; 1361 #else 1362 return false; 1363 #endif 1364 } 1365 1366 /** 1367 * pci_cfg_space_size - Get the configuration space size of the PCI device 1368 * @dev: PCI device 1369 * 1370 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices 1371 * have 4096 bytes. Even if the device is capable, that doesn't mean we can 1372 * access it. Maybe we don't have a way to generate extended config space 1373 * accesses, or the device is behind a reverse Express bridge. So we try 1374 * reading the dword at 0x100 which must either be 0 or a valid extended 1375 * capability header. 1376 */ 1377 static int pci_cfg_space_size_ext(struct pci_dev *dev) 1378 { 1379 u32 status; 1380 int pos = PCI_CFG_SPACE_SIZE; 1381 1382 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) 1383 return PCI_CFG_SPACE_SIZE; 1384 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev)) 1385 return PCI_CFG_SPACE_SIZE; 1386 1387 return PCI_CFG_SPACE_EXP_SIZE; 1388 } 1389 1390 int pci_cfg_space_size(struct pci_dev *dev) 1391 { 1392 int pos; 1393 u32 status; 1394 u16 class; 1395 1396 class = dev->class >> 8; 1397 if (class == PCI_CLASS_BRIDGE_HOST) 1398 return pci_cfg_space_size_ext(dev); 1399 1400 if (pci_is_pcie(dev)) 1401 return pci_cfg_space_size_ext(dev); 1402 1403 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1404 if (!pos) 1405 return PCI_CFG_SPACE_SIZE; 1406 1407 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); 1408 if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)) 1409 return pci_cfg_space_size_ext(dev); 1410 1411 return PCI_CFG_SPACE_SIZE; 1412 } 1413 1414 static u32 pci_class(struct pci_dev *dev) 1415 { 1416 u32 class; 1417 1418 #ifdef CONFIG_PCI_IOV 1419 if (dev->is_virtfn) 1420 return dev->physfn->sriov->class; 1421 #endif 1422 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 1423 return class; 1424 } 1425 1426 static void pci_subsystem_ids(struct pci_dev *dev, u16 *vendor, u16 *device) 1427 { 1428 #ifdef CONFIG_PCI_IOV 1429 if (dev->is_virtfn) { 1430 *vendor = dev->physfn->sriov->subsystem_vendor; 1431 *device = dev->physfn->sriov->subsystem_device; 1432 return; 1433 } 1434 #endif 1435 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, vendor); 1436 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, device); 1437 } 1438 1439 static u8 pci_hdr_type(struct pci_dev *dev) 1440 { 1441 u8 hdr_type; 1442 1443 #ifdef CONFIG_PCI_IOV 1444 if (dev->is_virtfn) 1445 return dev->physfn->sriov->hdr_type; 1446 #endif 1447 pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type); 1448 return hdr_type; 1449 } 1450 1451 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 1452 1453 static void pci_msi_setup_pci_dev(struct pci_dev *dev) 1454 { 1455 /* 1456 * Disable the MSI hardware to avoid screaming interrupts 1457 * during boot. This is the power on reset default so 1458 * usually this should be a noop. 1459 */ 1460 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); 1461 if (dev->msi_cap) 1462 pci_msi_set_enable(dev, 0); 1463 1464 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); 1465 if (dev->msix_cap) 1466 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 1467 } 1468 1469 /** 1470 * pci_intx_mask_broken - Test PCI_COMMAND_INTX_DISABLE writability 1471 * @dev: PCI device 1472 * 1473 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this 1474 * at enumeration-time to avoid modifying PCI_COMMAND at run-time. 1475 */ 1476 static int pci_intx_mask_broken(struct pci_dev *dev) 1477 { 1478 u16 orig, toggle, new; 1479 1480 pci_read_config_word(dev, PCI_COMMAND, &orig); 1481 toggle = orig ^ PCI_COMMAND_INTX_DISABLE; 1482 pci_write_config_word(dev, PCI_COMMAND, toggle); 1483 pci_read_config_word(dev, PCI_COMMAND, &new); 1484 1485 pci_write_config_word(dev, PCI_COMMAND, orig); 1486 1487 /* 1488 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI 1489 * r2.3, so strictly speaking, a device is not *broken* if it's not 1490 * writable. But we'll live with the misnomer for now. 1491 */ 1492 if (new != toggle) 1493 return 1; 1494 return 0; 1495 } 1496 1497 /** 1498 * pci_setup_device - Fill in class and map information of a device 1499 * @dev: the device structure to fill 1500 * 1501 * Initialize the device structure with information about the device's 1502 * vendor,class,memory and IO-space addresses, IRQ lines etc. 1503 * Called at initialisation of the PCI subsystem and by CardBus services. 1504 * Returns 0 on success and negative if unknown type of device (not normal, 1505 * bridge or CardBus). 1506 */ 1507 int pci_setup_device(struct pci_dev *dev) 1508 { 1509 u32 class; 1510 u16 cmd; 1511 u8 hdr_type; 1512 int pos = 0; 1513 struct pci_bus_region region; 1514 struct resource *res; 1515 1516 hdr_type = pci_hdr_type(dev); 1517 1518 dev->sysdata = dev->bus->sysdata; 1519 dev->dev.parent = dev->bus->bridge; 1520 dev->dev.bus = &pci_bus_type; 1521 dev->hdr_type = hdr_type & 0x7f; 1522 dev->multifunction = !!(hdr_type & 0x80); 1523 dev->error_state = pci_channel_io_normal; 1524 set_pcie_port_type(dev); 1525 1526 pci_dev_assign_slot(dev); 1527 1528 /* 1529 * Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 1530 * set this higher, assuming the system even supports it. 1531 */ 1532 dev->dma_mask = 0xffffffff; 1533 1534 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 1535 dev->bus->number, PCI_SLOT(dev->devfn), 1536 PCI_FUNC(dev->devfn)); 1537 1538 class = pci_class(dev); 1539 1540 dev->revision = class & 0xff; 1541 dev->class = class >> 8; /* upper 3 bytes */ 1542 1543 pci_printk(KERN_DEBUG, dev, "[%04x:%04x] type %02x class %#08x\n", 1544 dev->vendor, dev->device, dev->hdr_type, dev->class); 1545 1546 /* Need to have dev->class ready */ 1547 dev->cfg_size = pci_cfg_space_size(dev); 1548 1549 /* Need to have dev->cfg_size ready */ 1550 set_pcie_thunderbolt(dev); 1551 1552 /* "Unknown power state" */ 1553 dev->current_state = PCI_UNKNOWN; 1554 1555 /* Early fixups, before probing the BARs */ 1556 pci_fixup_device(pci_fixup_early, dev); 1557 1558 /* Device class may be changed after fixup */ 1559 class = dev->class >> 8; 1560 1561 if (dev->non_compliant_bars) { 1562 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1563 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { 1564 pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); 1565 cmd &= ~PCI_COMMAND_IO; 1566 cmd &= ~PCI_COMMAND_MEMORY; 1567 pci_write_config_word(dev, PCI_COMMAND, cmd); 1568 } 1569 } 1570 1571 dev->broken_intx_masking = pci_intx_mask_broken(dev); 1572 1573 switch (dev->hdr_type) { /* header type */ 1574 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 1575 if (class == PCI_CLASS_BRIDGE_PCI) 1576 goto bad; 1577 pci_read_irq(dev); 1578 pci_read_bases(dev, 6, PCI_ROM_ADDRESS); 1579 1580 pci_subsystem_ids(dev, &dev->subsystem_vendor, &dev->subsystem_device); 1581 1582 /* 1583 * Do the ugly legacy mode stuff here rather than broken chip 1584 * quirk code. Legacy mode ATA controllers have fixed 1585 * addresses. These are not always echoed in BAR0-3, and 1586 * BAR0-3 in a few cases contain junk! 1587 */ 1588 if (class == PCI_CLASS_STORAGE_IDE) { 1589 u8 progif; 1590 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 1591 if ((progif & 1) == 0) { 1592 region.start = 0x1F0; 1593 region.end = 0x1F7; 1594 res = &dev->resource[0]; 1595 res->flags = LEGACY_IO_RESOURCE; 1596 pcibios_bus_to_resource(dev->bus, res, ®ion); 1597 pci_info(dev, "legacy IDE quirk: reg 0x10: %pR\n", 1598 res); 1599 region.start = 0x3F6; 1600 region.end = 0x3F6; 1601 res = &dev->resource[1]; 1602 res->flags = LEGACY_IO_RESOURCE; 1603 pcibios_bus_to_resource(dev->bus, res, ®ion); 1604 pci_info(dev, "legacy IDE quirk: reg 0x14: %pR\n", 1605 res); 1606 } 1607 if ((progif & 4) == 0) { 1608 region.start = 0x170; 1609 region.end = 0x177; 1610 res = &dev->resource[2]; 1611 res->flags = LEGACY_IO_RESOURCE; 1612 pcibios_bus_to_resource(dev->bus, res, ®ion); 1613 pci_info(dev, "legacy IDE quirk: reg 0x18: %pR\n", 1614 res); 1615 region.start = 0x376; 1616 region.end = 0x376; 1617 res = &dev->resource[3]; 1618 res->flags = LEGACY_IO_RESOURCE; 1619 pcibios_bus_to_resource(dev->bus, res, ®ion); 1620 pci_info(dev, "legacy IDE quirk: reg 0x1c: %pR\n", 1621 res); 1622 } 1623 } 1624 break; 1625 1626 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ 1627 if (class != PCI_CLASS_BRIDGE_PCI) 1628 goto bad; 1629 1630 /* 1631 * The PCI-to-PCI bridge spec requires that subtractive 1632 * decoding (i.e. transparent) bridge must have programming 1633 * interface code of 0x01. 1634 */ 1635 pci_read_irq(dev); 1636 dev->transparent = ((dev->class & 0xff) == 1); 1637 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 1638 set_pcie_hotplug_bridge(dev); 1639 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); 1640 if (pos) { 1641 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); 1642 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); 1643 } 1644 break; 1645 1646 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ 1647 if (class != PCI_CLASS_BRIDGE_CARDBUS) 1648 goto bad; 1649 pci_read_irq(dev); 1650 pci_read_bases(dev, 1, 0); 1651 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1652 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); 1653 break; 1654 1655 default: /* unknown header */ 1656 pci_err(dev, "unknown header type %02x, ignoring device\n", 1657 dev->hdr_type); 1658 return -EIO; 1659 1660 bad: 1661 pci_err(dev, "ignoring class %#08x (doesn't match header type %02x)\n", 1662 dev->class, dev->hdr_type); 1663 dev->class = PCI_CLASS_NOT_DEFINED << 8; 1664 } 1665 1666 /* We found a fine healthy device, go go go... */ 1667 return 0; 1668 } 1669 1670 static void pci_configure_mps(struct pci_dev *dev) 1671 { 1672 struct pci_dev *bridge = pci_upstream_bridge(dev); 1673 int mps, p_mps, rc; 1674 1675 if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) 1676 return; 1677 1678 mps = pcie_get_mps(dev); 1679 p_mps = pcie_get_mps(bridge); 1680 1681 if (mps == p_mps) 1682 return; 1683 1684 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) { 1685 pci_warn(dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", 1686 mps, pci_name(bridge), p_mps); 1687 return; 1688 } 1689 1690 /* 1691 * Fancier MPS configuration is done later by 1692 * pcie_bus_configure_settings() 1693 */ 1694 if (pcie_bus_config != PCIE_BUS_DEFAULT) 1695 return; 1696 1697 rc = pcie_set_mps(dev, p_mps); 1698 if (rc) { 1699 pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", 1700 p_mps); 1701 return; 1702 } 1703 1704 pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n", 1705 p_mps, mps, 128 << dev->pcie_mpss); 1706 } 1707 1708 static struct hpp_type0 pci_default_type0 = { 1709 .revision = 1, 1710 .cache_line_size = 8, 1711 .latency_timer = 0x40, 1712 .enable_serr = 0, 1713 .enable_perr = 0, 1714 }; 1715 1716 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) 1717 { 1718 u16 pci_cmd, pci_bctl; 1719 1720 if (!hpp) 1721 hpp = &pci_default_type0; 1722 1723 if (hpp->revision > 1) { 1724 pci_warn(dev, "PCI settings rev %d not supported; using defaults\n", 1725 hpp->revision); 1726 hpp = &pci_default_type0; 1727 } 1728 1729 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size); 1730 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer); 1731 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); 1732 if (hpp->enable_serr) 1733 pci_cmd |= PCI_COMMAND_SERR; 1734 if (hpp->enable_perr) 1735 pci_cmd |= PCI_COMMAND_PARITY; 1736 pci_write_config_word(dev, PCI_COMMAND, pci_cmd); 1737 1738 /* Program bridge control value */ 1739 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 1740 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 1741 hpp->latency_timer); 1742 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); 1743 if (hpp->enable_serr) 1744 pci_bctl |= PCI_BRIDGE_CTL_SERR; 1745 if (hpp->enable_perr) 1746 pci_bctl |= PCI_BRIDGE_CTL_PARITY; 1747 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); 1748 } 1749 } 1750 1751 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) 1752 { 1753 int pos; 1754 1755 if (!hpp) 1756 return; 1757 1758 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1759 if (!pos) 1760 return; 1761 1762 pci_warn(dev, "PCI-X settings not supported\n"); 1763 } 1764 1765 static bool pcie_root_rcb_set(struct pci_dev *dev) 1766 { 1767 struct pci_dev *rp = pcie_find_root_port(dev); 1768 u16 lnkctl; 1769 1770 if (!rp) 1771 return false; 1772 1773 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); 1774 if (lnkctl & PCI_EXP_LNKCTL_RCB) 1775 return true; 1776 1777 return false; 1778 } 1779 1780 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) 1781 { 1782 int pos; 1783 u32 reg32; 1784 1785 if (!hpp) 1786 return; 1787 1788 if (!pci_is_pcie(dev)) 1789 return; 1790 1791 if (hpp->revision > 1) { 1792 pci_warn(dev, "PCIe settings rev %d not supported\n", 1793 hpp->revision); 1794 return; 1795 } 1796 1797 /* 1798 * Don't allow _HPX to change MPS or MRRS settings. We manage 1799 * those to make sure they're consistent with the rest of the 1800 * platform. 1801 */ 1802 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | 1803 PCI_EXP_DEVCTL_READRQ; 1804 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | 1805 PCI_EXP_DEVCTL_READRQ); 1806 1807 /* Initialize Device Control Register */ 1808 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 1809 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); 1810 1811 /* Initialize Link Control Register */ 1812 if (pcie_cap_has_lnkctl(dev)) { 1813 1814 /* 1815 * If the Root Port supports Read Completion Boundary of 1816 * 128, set RCB to 128. Otherwise, clear it. 1817 */ 1818 hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; 1819 hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; 1820 if (pcie_root_rcb_set(dev)) 1821 hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; 1822 1823 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, 1824 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); 1825 } 1826 1827 /* Find Advanced Error Reporting Enhanced Capability */ 1828 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 1829 if (!pos) 1830 return; 1831 1832 /* Initialize Uncorrectable Error Mask Register */ 1833 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); 1834 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or; 1835 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); 1836 1837 /* Initialize Uncorrectable Error Severity Register */ 1838 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); 1839 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or; 1840 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); 1841 1842 /* Initialize Correctable Error Mask Register */ 1843 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); 1844 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or; 1845 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); 1846 1847 /* Initialize Advanced Error Capabilities and Control Register */ 1848 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); 1849 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; 1850 1851 /* Don't enable ECRC generation or checking if unsupported */ 1852 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) 1853 reg32 &= ~PCI_ERR_CAP_ECRC_GENE; 1854 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) 1855 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; 1856 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); 1857 1858 /* 1859 * FIXME: The following two registers are not supported yet. 1860 * 1861 * o Secondary Uncorrectable Error Severity Register 1862 * o Secondary Uncorrectable Error Mask Register 1863 */ 1864 } 1865 1866 int pci_configure_extended_tags(struct pci_dev *dev, void *ign) 1867 { 1868 struct pci_host_bridge *host; 1869 u32 cap; 1870 u16 ctl; 1871 int ret; 1872 1873 if (!pci_is_pcie(dev)) 1874 return 0; 1875 1876 ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); 1877 if (ret) 1878 return 0; 1879 1880 if (!(cap & PCI_EXP_DEVCAP_EXT_TAG)) 1881 return 0; 1882 1883 ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 1884 if (ret) 1885 return 0; 1886 1887 host = pci_find_host_bridge(dev->bus); 1888 if (!host) 1889 return 0; 1890 1891 /* 1892 * If some device in the hierarchy doesn't handle Extended Tags 1893 * correctly, make sure they're disabled. 1894 */ 1895 if (host->no_ext_tags) { 1896 if (ctl & PCI_EXP_DEVCTL_EXT_TAG) { 1897 pci_info(dev, "disabling Extended Tags\n"); 1898 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, 1899 PCI_EXP_DEVCTL_EXT_TAG); 1900 } 1901 return 0; 1902 } 1903 1904 if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) { 1905 pci_info(dev, "enabling Extended Tags\n"); 1906 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, 1907 PCI_EXP_DEVCTL_EXT_TAG); 1908 } 1909 return 0; 1910 } 1911 1912 /** 1913 * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable 1914 * @dev: PCI device to query 1915 * 1916 * Returns true if the device has enabled relaxed ordering attribute. 1917 */ 1918 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev) 1919 { 1920 u16 v; 1921 1922 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v); 1923 1924 return !!(v & PCI_EXP_DEVCTL_RELAX_EN); 1925 } 1926 EXPORT_SYMBOL(pcie_relaxed_ordering_enabled); 1927 1928 static void pci_configure_relaxed_ordering(struct pci_dev *dev) 1929 { 1930 struct pci_dev *root; 1931 1932 /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */ 1933 if (dev->is_virtfn) 1934 return; 1935 1936 if (!pcie_relaxed_ordering_enabled(dev)) 1937 return; 1938 1939 /* 1940 * For now, we only deal with Relaxed Ordering issues with Root 1941 * Ports. Peer-to-Peer DMA is another can of worms. 1942 */ 1943 root = pci_find_pcie_root_port(dev); 1944 if (!root) 1945 return; 1946 1947 if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) { 1948 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, 1949 PCI_EXP_DEVCTL_RELAX_EN); 1950 pci_info(dev, "Relaxed Ordering disabled because the Root Port didn't support it\n"); 1951 } 1952 } 1953 1954 static void pci_configure_ltr(struct pci_dev *dev) 1955 { 1956 #ifdef CONFIG_PCIEASPM 1957 u32 cap; 1958 struct pci_dev *bridge; 1959 1960 if (!pci_is_pcie(dev)) 1961 return; 1962 1963 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); 1964 if (!(cap & PCI_EXP_DEVCAP2_LTR)) 1965 return; 1966 1967 /* 1968 * Software must not enable LTR in an Endpoint unless the Root 1969 * Complex and all intermediate Switches indicate support for LTR. 1970 * PCIe r3.1, sec 6.18. 1971 */ 1972 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 1973 dev->ltr_path = 1; 1974 else { 1975 bridge = pci_upstream_bridge(dev); 1976 if (bridge && bridge->ltr_path) 1977 dev->ltr_path = 1; 1978 } 1979 1980 if (dev->ltr_path) 1981 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, 1982 PCI_EXP_DEVCTL2_LTR_EN); 1983 #endif 1984 } 1985 1986 static void pci_configure_device(struct pci_dev *dev) 1987 { 1988 struct hotplug_params hpp; 1989 int ret; 1990 1991 pci_configure_mps(dev); 1992 pci_configure_extended_tags(dev, NULL); 1993 pci_configure_relaxed_ordering(dev); 1994 pci_configure_ltr(dev); 1995 1996 memset(&hpp, 0, sizeof(hpp)); 1997 ret = pci_get_hp_params(dev, &hpp); 1998 if (ret) 1999 return; 2000 2001 program_hpp_type2(dev, hpp.t2); 2002 program_hpp_type1(dev, hpp.t1); 2003 program_hpp_type0(dev, hpp.t0); 2004 } 2005 2006 static void pci_release_capabilities(struct pci_dev *dev) 2007 { 2008 pci_vpd_release(dev); 2009 pci_iov_release(dev); 2010 pci_free_cap_save_buffers(dev); 2011 } 2012 2013 /** 2014 * pci_release_dev - Free a PCI device structure when all users of it are 2015 * finished 2016 * @dev: device that's been disconnected 2017 * 2018 * Will be called only by the device core when all users of this PCI device are 2019 * done. 2020 */ 2021 static void pci_release_dev(struct device *dev) 2022 { 2023 struct pci_dev *pci_dev; 2024 2025 pci_dev = to_pci_dev(dev); 2026 pci_release_capabilities(pci_dev); 2027 pci_release_of_node(pci_dev); 2028 pcibios_release_device(pci_dev); 2029 pci_bus_put(pci_dev->bus); 2030 kfree(pci_dev->driver_override); 2031 kfree(pci_dev->dma_alias_mask); 2032 kfree(pci_dev); 2033 } 2034 2035 struct pci_dev *pci_alloc_dev(struct pci_bus *bus) 2036 { 2037 struct pci_dev *dev; 2038 2039 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); 2040 if (!dev) 2041 return NULL; 2042 2043 INIT_LIST_HEAD(&dev->bus_list); 2044 dev->dev.type = &pci_dev_type; 2045 dev->bus = pci_bus_get(bus); 2046 2047 return dev; 2048 } 2049 EXPORT_SYMBOL(pci_alloc_dev); 2050 2051 static bool pci_bus_crs_vendor_id(u32 l) 2052 { 2053 return (l & 0xffff) == 0x0001; 2054 } 2055 2056 static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l, 2057 int timeout) 2058 { 2059 int delay = 1; 2060 2061 if (!pci_bus_crs_vendor_id(*l)) 2062 return true; /* not a CRS completion */ 2063 2064 if (!timeout) 2065 return false; /* CRS, but caller doesn't want to wait */ 2066 2067 /* 2068 * We got the reserved Vendor ID that indicates a completion with 2069 * Configuration Request Retry Status (CRS). Retry until we get a 2070 * valid Vendor ID or we time out. 2071 */ 2072 while (pci_bus_crs_vendor_id(*l)) { 2073 if (delay > timeout) { 2074 pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n", 2075 pci_domain_nr(bus), bus->number, 2076 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); 2077 2078 return false; 2079 } 2080 if (delay >= 1000) 2081 pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n", 2082 pci_domain_nr(bus), bus->number, 2083 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); 2084 2085 msleep(delay); 2086 delay *= 2; 2087 2088 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 2089 return false; 2090 } 2091 2092 if (delay >= 1000) 2093 pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n", 2094 pci_domain_nr(bus), bus->number, 2095 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); 2096 2097 return true; 2098 } 2099 2100 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, 2101 int timeout) 2102 { 2103 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 2104 return false; 2105 2106 /* Some broken boards return 0 or ~0 if a slot is empty: */ 2107 if (*l == 0xffffffff || *l == 0x00000000 || 2108 *l == 0x0000ffff || *l == 0xffff0000) 2109 return false; 2110 2111 if (pci_bus_crs_vendor_id(*l)) 2112 return pci_bus_wait_crs(bus, devfn, l, timeout); 2113 2114 return true; 2115 } 2116 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); 2117 2118 /* 2119 * Read the config data for a PCI device, sanity-check it, 2120 * and fill in the dev structure. 2121 */ 2122 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 2123 { 2124 struct pci_dev *dev; 2125 u32 l; 2126 2127 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) 2128 return NULL; 2129 2130 dev = pci_alloc_dev(bus); 2131 if (!dev) 2132 return NULL; 2133 2134 dev->devfn = devfn; 2135 dev->vendor = l & 0xffff; 2136 dev->device = (l >> 16) & 0xffff; 2137 2138 pci_set_of_node(dev); 2139 2140 if (pci_setup_device(dev)) { 2141 pci_bus_put(dev->bus); 2142 kfree(dev); 2143 return NULL; 2144 } 2145 2146 return dev; 2147 } 2148 2149 static void pci_init_capabilities(struct pci_dev *dev) 2150 { 2151 /* Enhanced Allocation */ 2152 pci_ea_init(dev); 2153 2154 /* Setup MSI caps & disable MSI/MSI-X interrupts */ 2155 pci_msi_setup_pci_dev(dev); 2156 2157 /* Buffers for saving PCIe and PCI-X capabilities */ 2158 pci_allocate_cap_save_buffers(dev); 2159 2160 /* Power Management */ 2161 pci_pm_init(dev); 2162 2163 /* Vital Product Data */ 2164 pci_vpd_init(dev); 2165 2166 /* Alternative Routing-ID Forwarding */ 2167 pci_configure_ari(dev); 2168 2169 /* Single Root I/O Virtualization */ 2170 pci_iov_init(dev); 2171 2172 /* Address Translation Services */ 2173 pci_ats_init(dev); 2174 2175 /* Enable ACS P2P upstream forwarding */ 2176 pci_enable_acs(dev); 2177 2178 /* Precision Time Measurement */ 2179 pci_ptm_init(dev); 2180 2181 /* Advanced Error Reporting */ 2182 pci_aer_init(dev); 2183 2184 if (pci_probe_reset_function(dev) == 0) 2185 dev->reset_fn = 1; 2186 } 2187 2188 /* 2189 * This is the equivalent of pci_host_bridge_msi_domain() that acts on 2190 * devices. Firmware interfaces that can select the MSI domain on a 2191 * per-device basis should be called from here. 2192 */ 2193 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev) 2194 { 2195 struct irq_domain *d; 2196 2197 /* 2198 * If a domain has been set through the pcibios_add_device() 2199 * callback, then this is the one (platform code knows best). 2200 */ 2201 d = dev_get_msi_domain(&dev->dev); 2202 if (d) 2203 return d; 2204 2205 /* 2206 * Let's see if we have a firmware interface able to provide 2207 * the domain. 2208 */ 2209 d = pci_msi_get_device_domain(dev); 2210 if (d) 2211 return d; 2212 2213 return NULL; 2214 } 2215 2216 static void pci_set_msi_domain(struct pci_dev *dev) 2217 { 2218 struct irq_domain *d; 2219 2220 /* 2221 * If the platform or firmware interfaces cannot supply a 2222 * device-specific MSI domain, then inherit the default domain 2223 * from the host bridge itself. 2224 */ 2225 d = pci_dev_msi_domain(dev); 2226 if (!d) 2227 d = dev_get_msi_domain(&dev->bus->dev); 2228 2229 dev_set_msi_domain(&dev->dev, d); 2230 } 2231 2232 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 2233 { 2234 int ret; 2235 2236 pci_configure_device(dev); 2237 2238 device_initialize(&dev->dev); 2239 dev->dev.release = pci_release_dev; 2240 2241 set_dev_node(&dev->dev, pcibus_to_node(bus)); 2242 dev->dev.dma_mask = &dev->dma_mask; 2243 dev->dev.dma_parms = &dev->dma_parms; 2244 dev->dev.coherent_dma_mask = 0xffffffffull; 2245 2246 pci_set_dma_max_seg_size(dev, 65536); 2247 pci_set_dma_seg_boundary(dev, 0xffffffff); 2248 2249 /* Fix up broken headers */ 2250 pci_fixup_device(pci_fixup_header, dev); 2251 2252 /* Moved out from quirk header fixup code */ 2253 pci_reassigndev_resource_alignment(dev); 2254 2255 /* Clear the state_saved flag */ 2256 dev->state_saved = false; 2257 2258 /* Initialize various capabilities */ 2259 pci_init_capabilities(dev); 2260 2261 /* 2262 * Add the device to our list of discovered devices 2263 * and the bus list for fixup functions, etc. 2264 */ 2265 down_write(&pci_bus_sem); 2266 list_add_tail(&dev->bus_list, &bus->devices); 2267 up_write(&pci_bus_sem); 2268 2269 ret = pcibios_add_device(dev); 2270 WARN_ON(ret < 0); 2271 2272 /* Set up MSI IRQ domain */ 2273 pci_set_msi_domain(dev); 2274 2275 /* Notifier could use PCI capabilities */ 2276 dev->match_driver = false; 2277 ret = device_add(&dev->dev); 2278 WARN_ON(ret < 0); 2279 } 2280 2281 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) 2282 { 2283 struct pci_dev *dev; 2284 2285 dev = pci_get_slot(bus, devfn); 2286 if (dev) { 2287 pci_dev_put(dev); 2288 return dev; 2289 } 2290 2291 dev = pci_scan_device(bus, devfn); 2292 if (!dev) 2293 return NULL; 2294 2295 pci_device_add(dev, bus); 2296 2297 return dev; 2298 } 2299 EXPORT_SYMBOL(pci_scan_single_device); 2300 2301 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn) 2302 { 2303 int pos; 2304 u16 cap = 0; 2305 unsigned next_fn; 2306 2307 if (pci_ari_enabled(bus)) { 2308 if (!dev) 2309 return 0; 2310 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 2311 if (!pos) 2312 return 0; 2313 2314 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap); 2315 next_fn = PCI_ARI_CAP_NFN(cap); 2316 if (next_fn <= fn) 2317 return 0; /* protect against malformed list */ 2318 2319 return next_fn; 2320 } 2321 2322 /* dev may be NULL for non-contiguous multifunction devices */ 2323 if (!dev || dev->multifunction) 2324 return (fn + 1) % 8; 2325 2326 return 0; 2327 } 2328 2329 static int only_one_child(struct pci_bus *bus) 2330 { 2331 struct pci_dev *bridge = bus->self; 2332 2333 /* 2334 * Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so 2335 * we scan for all possible devices, not just Device 0. 2336 */ 2337 if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) 2338 return 0; 2339 2340 /* 2341 * A PCIe Downstream Port normally leads to a Link with only Device 2342 * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan 2343 * only for Device 0 in that situation. 2344 * 2345 * Checking has_secondary_link is a hack to identify Downstream 2346 * Ports because sometimes Switches are configured such that the 2347 * PCIe Port Type labels are backwards. 2348 */ 2349 if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link) 2350 return 1; 2351 2352 return 0; 2353 } 2354 2355 /** 2356 * pci_scan_slot - Scan a PCI slot on a bus for devices 2357 * @bus: PCI bus to scan 2358 * @devfn: slot number to scan (must have zero function) 2359 * 2360 * Scan a PCI slot on the specified PCI bus for devices, adding 2361 * discovered devices to the @bus->devices list. New devices 2362 * will not have is_added set. 2363 * 2364 * Returns the number of new devices found. 2365 */ 2366 int pci_scan_slot(struct pci_bus *bus, int devfn) 2367 { 2368 unsigned fn, nr = 0; 2369 struct pci_dev *dev; 2370 2371 if (only_one_child(bus) && (devfn > 0)) 2372 return 0; /* Already scanned the entire slot */ 2373 2374 dev = pci_scan_single_device(bus, devfn); 2375 if (!dev) 2376 return 0; 2377 if (!dev->is_added) 2378 nr++; 2379 2380 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) { 2381 dev = pci_scan_single_device(bus, devfn + fn); 2382 if (dev) { 2383 if (!dev->is_added) 2384 nr++; 2385 dev->multifunction = 1; 2386 } 2387 } 2388 2389 /* Only one slot has PCIe device */ 2390 if (bus->self && nr) 2391 pcie_aspm_init_link_state(bus->self); 2392 2393 return nr; 2394 } 2395 EXPORT_SYMBOL(pci_scan_slot); 2396 2397 static int pcie_find_smpss(struct pci_dev *dev, void *data) 2398 { 2399 u8 *smpss = data; 2400 2401 if (!pci_is_pcie(dev)) 2402 return 0; 2403 2404 /* 2405 * We don't have a way to change MPS settings on devices that have 2406 * drivers attached. A hot-added device might support only the minimum 2407 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge 2408 * where devices may be hot-added, we limit the fabric MPS to 128 so 2409 * hot-added devices will work correctly. 2410 * 2411 * However, if we hot-add a device to a slot directly below a Root 2412 * Port, it's impossible for there to be other existing devices below 2413 * the port. We don't limit the MPS in this case because we can 2414 * reconfigure MPS on both the Root Port and the hot-added device, 2415 * and there are no other devices involved. 2416 * 2417 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA. 2418 */ 2419 if (dev->is_hotplug_bridge && 2420 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) 2421 *smpss = 0; 2422 2423 if (*smpss > dev->pcie_mpss) 2424 *smpss = dev->pcie_mpss; 2425 2426 return 0; 2427 } 2428 2429 static void pcie_write_mps(struct pci_dev *dev, int mps) 2430 { 2431 int rc; 2432 2433 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 2434 mps = 128 << dev->pcie_mpss; 2435 2436 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT && 2437 dev->bus->self) 2438 2439 /* 2440 * For "Performance", the assumption is made that 2441 * downstream communication will never be larger than 2442 * the MRRS. So, the MPS only needs to be configured 2443 * for the upstream communication. This being the case, 2444 * walk from the top down and set the MPS of the child 2445 * to that of the parent bus. 2446 * 2447 * Configure the device MPS with the smaller of the 2448 * device MPSS or the bridge MPS (which is assumed to be 2449 * properly configured at this point to the largest 2450 * allowable MPS based on its parent bus). 2451 */ 2452 mps = min(mps, pcie_get_mps(dev->bus->self)); 2453 } 2454 2455 rc = pcie_set_mps(dev, mps); 2456 if (rc) 2457 pci_err(dev, "Failed attempting to set the MPS\n"); 2458 } 2459 2460 static void pcie_write_mrrs(struct pci_dev *dev) 2461 { 2462 int rc, mrrs; 2463 2464 /* 2465 * In the "safe" case, do not configure the MRRS. There appear to be 2466 * issues with setting MRRS to 0 on a number of devices. 2467 */ 2468 if (pcie_bus_config != PCIE_BUS_PERFORMANCE) 2469 return; 2470 2471 /* 2472 * For max performance, the MRRS must be set to the largest supported 2473 * value. However, it cannot be configured larger than the MPS the 2474 * device or the bus can support. This should already be properly 2475 * configured by a prior call to pcie_write_mps(). 2476 */ 2477 mrrs = pcie_get_mps(dev); 2478 2479 /* 2480 * MRRS is a R/W register. Invalid values can be written, but a 2481 * subsequent read will verify if the value is acceptable or not. 2482 * If the MRRS value provided is not acceptable (e.g., too large), 2483 * shrink the value until it is acceptable to the HW. 2484 */ 2485 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { 2486 rc = pcie_set_readrq(dev, mrrs); 2487 if (!rc) 2488 break; 2489 2490 pci_warn(dev, "Failed attempting to set the MRRS\n"); 2491 mrrs /= 2; 2492 } 2493 2494 if (mrrs < 128) 2495 pci_err(dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n"); 2496 } 2497 2498 static int pcie_bus_configure_set(struct pci_dev *dev, void *data) 2499 { 2500 int mps, orig_mps; 2501 2502 if (!pci_is_pcie(dev)) 2503 return 0; 2504 2505 if (pcie_bus_config == PCIE_BUS_TUNE_OFF || 2506 pcie_bus_config == PCIE_BUS_DEFAULT) 2507 return 0; 2508 2509 mps = 128 << *(u8 *)data; 2510 orig_mps = pcie_get_mps(dev); 2511 2512 pcie_write_mps(dev, mps); 2513 pcie_write_mrrs(dev); 2514 2515 pci_info(dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n", 2516 pcie_get_mps(dev), 128 << dev->pcie_mpss, 2517 orig_mps, pcie_get_readrq(dev)); 2518 2519 return 0; 2520 } 2521 2522 /* 2523 * pcie_bus_configure_settings() requires that pci_walk_bus work in a top-down, 2524 * parents then children fashion. If this changes, then this code will not 2525 * work as designed. 2526 */ 2527 void pcie_bus_configure_settings(struct pci_bus *bus) 2528 { 2529 u8 smpss = 0; 2530 2531 if (!bus->self) 2532 return; 2533 2534 if (!pci_is_pcie(bus->self)) 2535 return; 2536 2537 /* 2538 * FIXME - Peer to peer DMA is possible, though the endpoint would need 2539 * to be aware of the MPS of the destination. To work around this, 2540 * simply force the MPS of the entire system to the smallest possible. 2541 */ 2542 if (pcie_bus_config == PCIE_BUS_PEER2PEER) 2543 smpss = 0; 2544 2545 if (pcie_bus_config == PCIE_BUS_SAFE) { 2546 smpss = bus->self->pcie_mpss; 2547 2548 pcie_find_smpss(bus->self, &smpss); 2549 pci_walk_bus(bus, pcie_find_smpss, &smpss); 2550 } 2551 2552 pcie_bus_configure_set(bus->self, &smpss); 2553 pci_walk_bus(bus, pcie_bus_configure_set, &smpss); 2554 } 2555 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); 2556 2557 /* 2558 * Called after each bus is probed, but before its children are examined. This 2559 * is marked as __weak because multiple architectures define it. 2560 */ 2561 void __weak pcibios_fixup_bus(struct pci_bus *bus) 2562 { 2563 /* nothing to do, expected to be removed in the future */ 2564 } 2565 2566 /** 2567 * pci_scan_child_bus_extend() - Scan devices below a bus 2568 * @bus: Bus to scan for devices 2569 * @available_buses: Total number of buses available (%0 does not try to 2570 * extend beyond the minimal) 2571 * 2572 * Scans devices below @bus including subordinate buses. Returns new 2573 * subordinate number including all the found devices. Passing 2574 * @available_buses causes the remaining bus space to be distributed 2575 * equally between hotplug-capable bridges to allow future extension of the 2576 * hierarchy. 2577 */ 2578 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, 2579 unsigned int available_buses) 2580 { 2581 unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0; 2582 unsigned int start = bus->busn_res.start; 2583 unsigned int devfn, fn, cmax, max = start; 2584 struct pci_dev *dev; 2585 int nr_devs; 2586 2587 dev_dbg(&bus->dev, "scanning bus\n"); 2588 2589 /* Go find them, Rover! */ 2590 for (devfn = 0; devfn < 256; devfn += 8) { 2591 nr_devs = pci_scan_slot(bus, devfn); 2592 2593 /* 2594 * The Jailhouse hypervisor may pass individual functions of a 2595 * multi-function device to a guest without passing function 0. 2596 * Look for them as well. 2597 */ 2598 if (jailhouse_paravirt() && nr_devs == 0) { 2599 for (fn = 1; fn < 8; fn++) { 2600 dev = pci_scan_single_device(bus, devfn + fn); 2601 if (dev) 2602 dev->multifunction = 1; 2603 } 2604 } 2605 } 2606 2607 /* Reserve buses for SR-IOV capability */ 2608 used_buses = pci_iov_bus_range(bus); 2609 max += used_buses; 2610 2611 /* 2612 * After performing arch-dependent fixup of the bus, look behind 2613 * all PCI-to-PCI bridges on this bus. 2614 */ 2615 if (!bus->is_added) { 2616 dev_dbg(&bus->dev, "fixups for bus\n"); 2617 pcibios_fixup_bus(bus); 2618 bus->is_added = 1; 2619 } 2620 2621 /* 2622 * Calculate how many hotplug bridges and normal bridges there 2623 * are on this bus. We will distribute the additional available 2624 * buses between hotplug bridges. 2625 */ 2626 for_each_pci_bridge(dev, bus) { 2627 if (dev->is_hotplug_bridge) 2628 hotplug_bridges++; 2629 else 2630 normal_bridges++; 2631 } 2632 2633 /* 2634 * Scan bridges that are already configured. We don't touch them 2635 * unless they are misconfigured (which will be done in the second 2636 * scan below). 2637 */ 2638 for_each_pci_bridge(dev, bus) { 2639 cmax = max; 2640 max = pci_scan_bridge_extend(bus, dev, max, 0, 0); 2641 used_buses += cmax - max; 2642 } 2643 2644 /* Scan bridges that need to be reconfigured */ 2645 for_each_pci_bridge(dev, bus) { 2646 unsigned int buses = 0; 2647 2648 if (!hotplug_bridges && normal_bridges == 1) { 2649 2650 /* 2651 * There is only one bridge on the bus (upstream 2652 * port) so it gets all available buses which it 2653 * can then distribute to the possible hotplug 2654 * bridges below. 2655 */ 2656 buses = available_buses; 2657 } else if (dev->is_hotplug_bridge) { 2658 2659 /* 2660 * Distribute the extra buses between hotplug 2661 * bridges if any. 2662 */ 2663 buses = available_buses / hotplug_bridges; 2664 buses = min(buses, available_buses - used_buses); 2665 } 2666 2667 cmax = max; 2668 max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1); 2669 used_buses += max - cmax; 2670 } 2671 2672 /* 2673 * Make sure a hotplug bridge has at least the minimum requested 2674 * number of buses but allow it to grow up to the maximum available 2675 * bus number of there is room. 2676 */ 2677 if (bus->self && bus->self->is_hotplug_bridge) { 2678 used_buses = max_t(unsigned int, available_buses, 2679 pci_hotplug_bus_size - 1); 2680 if (max - start < used_buses) { 2681 max = start + used_buses; 2682 2683 /* Do not allocate more buses than we have room left */ 2684 if (max > bus->busn_res.end) 2685 max = bus->busn_res.end; 2686 2687 dev_dbg(&bus->dev, "%pR extended by %#02x\n", 2688 &bus->busn_res, max - start); 2689 } 2690 } 2691 2692 /* 2693 * We've scanned the bus and so we know all about what's on 2694 * the other side of any bridges that may be on this bus plus 2695 * any devices. 2696 * 2697 * Return how far we've got finding sub-buses. 2698 */ 2699 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); 2700 return max; 2701 } 2702 2703 /** 2704 * pci_scan_child_bus() - Scan devices below a bus 2705 * @bus: Bus to scan for devices 2706 * 2707 * Scans devices below @bus including subordinate buses. Returns new 2708 * subordinate number including all the found devices. 2709 */ 2710 unsigned int pci_scan_child_bus(struct pci_bus *bus) 2711 { 2712 return pci_scan_child_bus_extend(bus, 0); 2713 } 2714 EXPORT_SYMBOL_GPL(pci_scan_child_bus); 2715 2716 /** 2717 * pcibios_root_bridge_prepare - Platform-specific host bridge setup 2718 * @bridge: Host bridge to set up 2719 * 2720 * Default empty implementation. Replace with an architecture-specific setup 2721 * routine, if necessary. 2722 */ 2723 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 2724 { 2725 return 0; 2726 } 2727 2728 void __weak pcibios_add_bus(struct pci_bus *bus) 2729 { 2730 } 2731 2732 void __weak pcibios_remove_bus(struct pci_bus *bus) 2733 { 2734 } 2735 2736 struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 2737 struct pci_ops *ops, void *sysdata, struct list_head *resources) 2738 { 2739 int error; 2740 struct pci_host_bridge *bridge; 2741 2742 bridge = pci_alloc_host_bridge(0); 2743 if (!bridge) 2744 return NULL; 2745 2746 bridge->dev.parent = parent; 2747 2748 list_splice_init(resources, &bridge->windows); 2749 bridge->sysdata = sysdata; 2750 bridge->busnr = bus; 2751 bridge->ops = ops; 2752 2753 error = pci_register_host_bridge(bridge); 2754 if (error < 0) 2755 goto err_out; 2756 2757 return bridge->bus; 2758 2759 err_out: 2760 kfree(bridge); 2761 return NULL; 2762 } 2763 EXPORT_SYMBOL_GPL(pci_create_root_bus); 2764 2765 int pci_host_probe(struct pci_host_bridge *bridge) 2766 { 2767 struct pci_bus *bus, *child; 2768 int ret; 2769 2770 ret = pci_scan_root_bus_bridge(bridge); 2771 if (ret < 0) { 2772 dev_err(bridge->dev.parent, "Scanning root bridge failed"); 2773 return ret; 2774 } 2775 2776 bus = bridge->bus; 2777 2778 /* 2779 * We insert PCI resources into the iomem_resource and 2780 * ioport_resource trees in either pci_bus_claim_resources() 2781 * or pci_bus_assign_resources(). 2782 */ 2783 if (pci_has_flag(PCI_PROBE_ONLY)) { 2784 pci_bus_claim_resources(bus); 2785 } else { 2786 pci_bus_size_bridges(bus); 2787 pci_bus_assign_resources(bus); 2788 2789 list_for_each_entry(child, &bus->children, node) 2790 pcie_bus_configure_settings(child); 2791 } 2792 2793 pci_bus_add_devices(bus); 2794 return 0; 2795 } 2796 EXPORT_SYMBOL_GPL(pci_host_probe); 2797 2798 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) 2799 { 2800 struct resource *res = &b->busn_res; 2801 struct resource *parent_res, *conflict; 2802 2803 res->start = bus; 2804 res->end = bus_max; 2805 res->flags = IORESOURCE_BUS; 2806 2807 if (!pci_is_root_bus(b)) 2808 parent_res = &b->parent->busn_res; 2809 else { 2810 parent_res = get_pci_domain_busn_res(pci_domain_nr(b)); 2811 res->flags |= IORESOURCE_PCI_FIXED; 2812 } 2813 2814 conflict = request_resource_conflict(parent_res, res); 2815 2816 if (conflict) 2817 dev_printk(KERN_DEBUG, &b->dev, 2818 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", 2819 res, pci_is_root_bus(b) ? "domain " : "", 2820 parent_res, conflict->name, conflict); 2821 2822 return conflict == NULL; 2823 } 2824 2825 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) 2826 { 2827 struct resource *res = &b->busn_res; 2828 struct resource old_res = *res; 2829 resource_size_t size; 2830 int ret; 2831 2832 if (res->start > bus_max) 2833 return -EINVAL; 2834 2835 size = bus_max - res->start + 1; 2836 ret = adjust_resource(res, res->start, size); 2837 dev_printk(KERN_DEBUG, &b->dev, 2838 "busn_res: %pR end %s updated to %02x\n", 2839 &old_res, ret ? "can not be" : "is", bus_max); 2840 2841 if (!ret && !res->parent) 2842 pci_bus_insert_busn_res(b, res->start, res->end); 2843 2844 return ret; 2845 } 2846 2847 void pci_bus_release_busn_res(struct pci_bus *b) 2848 { 2849 struct resource *res = &b->busn_res; 2850 int ret; 2851 2852 if (!res->flags || !res->parent) 2853 return; 2854 2855 ret = release_resource(res); 2856 dev_printk(KERN_DEBUG, &b->dev, 2857 "busn_res: %pR %s released\n", 2858 res, ret ? "can not be" : "is"); 2859 } 2860 2861 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge) 2862 { 2863 struct resource_entry *window; 2864 bool found = false; 2865 struct pci_bus *b; 2866 int max, bus, ret; 2867 2868 if (!bridge) 2869 return -EINVAL; 2870 2871 resource_list_for_each_entry(window, &bridge->windows) 2872 if (window->res->flags & IORESOURCE_BUS) { 2873 found = true; 2874 break; 2875 } 2876 2877 ret = pci_register_host_bridge(bridge); 2878 if (ret < 0) 2879 return ret; 2880 2881 b = bridge->bus; 2882 bus = bridge->busnr; 2883 2884 if (!found) { 2885 dev_info(&b->dev, 2886 "No busn resource found for root bus, will use [bus %02x-ff]\n", 2887 bus); 2888 pci_bus_insert_busn_res(b, bus, 255); 2889 } 2890 2891 max = pci_scan_child_bus(b); 2892 2893 if (!found) 2894 pci_bus_update_busn_res_end(b, max); 2895 2896 return 0; 2897 } 2898 EXPORT_SYMBOL(pci_scan_root_bus_bridge); 2899 2900 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 2901 struct pci_ops *ops, void *sysdata, struct list_head *resources) 2902 { 2903 struct resource_entry *window; 2904 bool found = false; 2905 struct pci_bus *b; 2906 int max; 2907 2908 resource_list_for_each_entry(window, resources) 2909 if (window->res->flags & IORESOURCE_BUS) { 2910 found = true; 2911 break; 2912 } 2913 2914 b = pci_create_root_bus(parent, bus, ops, sysdata, resources); 2915 if (!b) 2916 return NULL; 2917 2918 if (!found) { 2919 dev_info(&b->dev, 2920 "No busn resource found for root bus, will use [bus %02x-ff]\n", 2921 bus); 2922 pci_bus_insert_busn_res(b, bus, 255); 2923 } 2924 2925 max = pci_scan_child_bus(b); 2926 2927 if (!found) 2928 pci_bus_update_busn_res_end(b, max); 2929 2930 return b; 2931 } 2932 EXPORT_SYMBOL(pci_scan_root_bus); 2933 2934 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, 2935 void *sysdata) 2936 { 2937 LIST_HEAD(resources); 2938 struct pci_bus *b; 2939 2940 pci_add_resource(&resources, &ioport_resource); 2941 pci_add_resource(&resources, &iomem_resource); 2942 pci_add_resource(&resources, &busn_resource); 2943 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources); 2944 if (b) { 2945 pci_scan_child_bus(b); 2946 } else { 2947 pci_free_resource_list(&resources); 2948 } 2949 return b; 2950 } 2951 EXPORT_SYMBOL(pci_scan_bus); 2952 2953 /** 2954 * pci_rescan_bus_bridge_resize - Scan a PCI bus for devices 2955 * @bridge: PCI bridge for the bus to scan 2956 * 2957 * Scan a PCI bus and child buses for new devices, add them, 2958 * and enable them, resizing bridge mmio/io resource if necessary 2959 * and possible. The caller must ensure the child devices are already 2960 * removed for resizing to occur. 2961 * 2962 * Returns the max number of subordinate bus discovered. 2963 */ 2964 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge) 2965 { 2966 unsigned int max; 2967 struct pci_bus *bus = bridge->subordinate; 2968 2969 max = pci_scan_child_bus(bus); 2970 2971 pci_assign_unassigned_bridge_resources(bridge); 2972 2973 pci_bus_add_devices(bus); 2974 2975 return max; 2976 } 2977 2978 /** 2979 * pci_rescan_bus - Scan a PCI bus for devices 2980 * @bus: PCI bus to scan 2981 * 2982 * Scan a PCI bus and child buses for new devices, add them, 2983 * and enable them. 2984 * 2985 * Returns the max number of subordinate bus discovered. 2986 */ 2987 unsigned int pci_rescan_bus(struct pci_bus *bus) 2988 { 2989 unsigned int max; 2990 2991 max = pci_scan_child_bus(bus); 2992 pci_assign_unassigned_bus_resources(bus); 2993 pci_bus_add_devices(bus); 2994 2995 return max; 2996 } 2997 EXPORT_SYMBOL_GPL(pci_rescan_bus); 2998 2999 /* 3000 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal 3001 * routines should always be executed under this mutex. 3002 */ 3003 static DEFINE_MUTEX(pci_rescan_remove_lock); 3004 3005 void pci_lock_rescan_remove(void) 3006 { 3007 mutex_lock(&pci_rescan_remove_lock); 3008 } 3009 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove); 3010 3011 void pci_unlock_rescan_remove(void) 3012 { 3013 mutex_unlock(&pci_rescan_remove_lock); 3014 } 3015 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove); 3016 3017 static int __init pci_sort_bf_cmp(const struct device *d_a, 3018 const struct device *d_b) 3019 { 3020 const struct pci_dev *a = to_pci_dev(d_a); 3021 const struct pci_dev *b = to_pci_dev(d_b); 3022 3023 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; 3024 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; 3025 3026 if (a->bus->number < b->bus->number) return -1; 3027 else if (a->bus->number > b->bus->number) return 1; 3028 3029 if (a->devfn < b->devfn) return -1; 3030 else if (a->devfn > b->devfn) return 1; 3031 3032 return 0; 3033 } 3034 3035 void __init pci_sort_breadthfirst(void) 3036 { 3037 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); 3038 } 3039 3040 int pci_hp_add_bridge(struct pci_dev *dev) 3041 { 3042 struct pci_bus *parent = dev->bus; 3043 int busnr, start = parent->busn_res.start; 3044 unsigned int available_buses = 0; 3045 int end = parent->busn_res.end; 3046 3047 for (busnr = start; busnr <= end; busnr++) { 3048 if (!pci_find_bus(pci_domain_nr(parent), busnr)) 3049 break; 3050 } 3051 if (busnr-- > end) { 3052 pci_err(dev, "No bus number available for hot-added bridge\n"); 3053 return -1; 3054 } 3055 3056 /* Scan bridges that are already configured */ 3057 busnr = pci_scan_bridge(parent, dev, busnr, 0); 3058 3059 /* 3060 * Distribute the available bus numbers between hotplug-capable 3061 * bridges to make extending the chain later possible. 3062 */ 3063 available_buses = end - busnr; 3064 3065 /* Scan bridges that need to be reconfigured */ 3066 pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1); 3067 3068 if (!dev->subordinate) 3069 return -1; 3070 3071 return 0; 3072 } 3073 EXPORT_SYMBOL_GPL(pci_hp_add_bridge); 3074