1 /* 2 * Copyright (c) 2014 Google, Inc 3 * Written by Simon Glass <sjg@chromium.org> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 #include <common.h> 9 #include <dm.h> 10 #include <errno.h> 11 #include <fdtdec.h> 12 #include <inttypes.h> 13 #include <pci.h> 14 #include <dm/lists.h> 15 #include <dm/root.h> 16 #include <dm/device-internal.h> 17 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) 18 #include <asm/fsp/fsp_support.h> 19 #endif 20 21 DECLARE_GLOBAL_DATA_PTR; 22 23 struct pci_controller *pci_bus_to_hose(int busnum) 24 { 25 struct udevice *bus; 26 int ret; 27 28 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, &bus); 29 if (ret) { 30 debug("%s: Cannot get bus %d: ret=%d\n", __func__, busnum, ret); 31 return NULL; 32 } 33 return dev_get_uclass_priv(bus); 34 } 35 36 pci_dev_t pci_get_bdf(struct udevice *dev) 37 { 38 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev); 39 struct udevice *bus = dev->parent; 40 41 return PCI_ADD_BUS(bus->seq, pplat->devfn); 42 } 43 44 /** 45 * pci_get_bus_max() - returns the bus number of the last active bus 46 * 47 * @return last bus number, or -1 if no active buses 48 */ 49 static int pci_get_bus_max(void) 50 { 51 struct udevice *bus; 52 struct uclass *uc; 53 int ret = -1; 54 55 ret = uclass_get(UCLASS_PCI, &uc); 56 uclass_foreach_dev(bus, uc) { 57 if (bus->seq > ret) 58 ret = bus->seq; 59 } 60 61 debug("%s: ret=%d\n", __func__, ret); 62 63 return ret; 64 } 65 66 int pci_last_busno(void) 67 { 68 struct pci_controller *hose; 69 struct udevice *bus; 70 struct uclass *uc; 71 int ret; 72 73 debug("pci_last_busno\n"); 74 ret = uclass_get(UCLASS_PCI, &uc); 75 if (ret || list_empty(&uc->dev_head)) 76 return -1; 77 78 /* Probe the last bus */ 79 bus = list_entry(uc->dev_head.prev, struct udevice, uclass_node); 80 debug("bus = %p, %s\n", bus, bus->name); 81 assert(bus); 82 ret = device_probe(bus); 83 if (ret) 84 return ret; 85 86 /* If that bus has bridges, we may have new buses now. Get the last */ 87 bus = list_entry(uc->dev_head.prev, struct udevice, uclass_node); 88 hose = dev_get_uclass_priv(bus); 89 debug("bus = %s, hose = %p\n", bus->name, hose); 90 91 return hose->last_busno; 92 } 93 94 int pci_get_ff(enum pci_size_t size) 95 { 96 switch (size) { 97 case PCI_SIZE_8: 98 return 0xff; 99 case PCI_SIZE_16: 100 return 0xffff; 101 default: 102 return 0xffffffff; 103 } 104 } 105 106 int pci_bus_find_devfn(struct udevice *bus, pci_dev_t find_devfn, 107 struct udevice **devp) 108 { 109 struct udevice *dev; 110 111 for (device_find_first_child(bus, &dev); 112 dev; 113 device_find_next_child(&dev)) { 114 struct pci_child_platdata *pplat; 115 116 pplat = dev_get_parent_platdata(dev); 117 if (pplat && pplat->devfn == find_devfn) { 118 *devp = dev; 119 return 0; 120 } 121 } 122 123 return -ENODEV; 124 } 125 126 int pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp) 127 { 128 struct udevice *bus; 129 int ret; 130 131 ret = uclass_get_device_by_seq(UCLASS_PCI, PCI_BUS(bdf), &bus); 132 if (ret) 133 return ret; 134 return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp); 135 } 136 137 static int pci_device_matches_ids(struct udevice *dev, 138 struct pci_device_id *ids) 139 { 140 struct pci_child_platdata *pplat; 141 int i; 142 143 pplat = dev_get_parent_platdata(dev); 144 if (!pplat) 145 return -EINVAL; 146 for (i = 0; ids[i].vendor != 0; i++) { 147 if (pplat->vendor == ids[i].vendor && 148 pplat->device == ids[i].device) 149 return i; 150 } 151 152 return -EINVAL; 153 } 154 155 int pci_bus_find_devices(struct udevice *bus, struct pci_device_id *ids, 156 int *indexp, struct udevice **devp) 157 { 158 struct udevice *dev; 159 160 /* Scan all devices on this bus */ 161 for (device_find_first_child(bus, &dev); 162 dev; 163 device_find_next_child(&dev)) { 164 if (pci_device_matches_ids(dev, ids) >= 0) { 165 if ((*indexp)-- <= 0) { 166 *devp = dev; 167 return 0; 168 } 169 } 170 } 171 172 return -ENODEV; 173 } 174 175 int pci_find_device_id(struct pci_device_id *ids, int index, 176 struct udevice **devp) 177 { 178 struct udevice *bus; 179 180 /* Scan all known buses */ 181 for (uclass_first_device(UCLASS_PCI, &bus); 182 bus; 183 uclass_next_device(&bus)) { 184 if (!pci_bus_find_devices(bus, ids, &index, devp)) 185 return 0; 186 } 187 *devp = NULL; 188 189 return -ENODEV; 190 } 191 192 int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset, 193 unsigned long value, enum pci_size_t size) 194 { 195 struct dm_pci_ops *ops; 196 197 ops = pci_get_ops(bus); 198 if (!ops->write_config) 199 return -ENOSYS; 200 return ops->write_config(bus, bdf, offset, value, size); 201 } 202 203 int pci_write_config(pci_dev_t bdf, int offset, unsigned long value, 204 enum pci_size_t size) 205 { 206 struct udevice *bus; 207 int ret; 208 209 ret = uclass_get_device_by_seq(UCLASS_PCI, PCI_BUS(bdf), &bus); 210 if (ret) 211 return ret; 212 213 return pci_bus_write_config(bus, bdf, offset, value, size); 214 } 215 216 int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value, 217 enum pci_size_t size) 218 { 219 struct udevice *bus; 220 221 for (bus = dev; device_get_uclass_id(bus->parent) == UCLASS_PCI;) 222 bus = bus->parent; 223 return pci_bus_write_config(bus, pci_get_bdf(dev), offset, value, size); 224 } 225 226 227 int pci_write_config32(pci_dev_t bdf, int offset, u32 value) 228 { 229 return pci_write_config(bdf, offset, value, PCI_SIZE_32); 230 } 231 232 int pci_write_config16(pci_dev_t bdf, int offset, u16 value) 233 { 234 return pci_write_config(bdf, offset, value, PCI_SIZE_16); 235 } 236 237 int pci_write_config8(pci_dev_t bdf, int offset, u8 value) 238 { 239 return pci_write_config(bdf, offset, value, PCI_SIZE_8); 240 } 241 242 int dm_pci_write_config8(struct udevice *dev, int offset, u8 value) 243 { 244 return dm_pci_write_config(dev, offset, value, PCI_SIZE_8); 245 } 246 247 int dm_pci_write_config16(struct udevice *dev, int offset, u16 value) 248 { 249 return dm_pci_write_config(dev, offset, value, PCI_SIZE_16); 250 } 251 252 int dm_pci_write_config32(struct udevice *dev, int offset, u32 value) 253 { 254 return dm_pci_write_config(dev, offset, value, PCI_SIZE_32); 255 } 256 257 int pci_bus_read_config(struct udevice *bus, pci_dev_t bdf, int offset, 258 unsigned long *valuep, enum pci_size_t size) 259 { 260 struct dm_pci_ops *ops; 261 262 ops = pci_get_ops(bus); 263 if (!ops->read_config) 264 return -ENOSYS; 265 return ops->read_config(bus, bdf, offset, valuep, size); 266 } 267 268 int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep, 269 enum pci_size_t size) 270 { 271 struct udevice *bus; 272 int ret; 273 274 ret = uclass_get_device_by_seq(UCLASS_PCI, PCI_BUS(bdf), &bus); 275 if (ret) 276 return ret; 277 278 return pci_bus_read_config(bus, bdf, offset, valuep, size); 279 } 280 281 int dm_pci_read_config(struct udevice *dev, int offset, unsigned long *valuep, 282 enum pci_size_t size) 283 { 284 struct udevice *bus; 285 286 for (bus = dev; device_get_uclass_id(bus->parent) == UCLASS_PCI;) 287 bus = bus->parent; 288 return pci_bus_read_config(bus, pci_get_bdf(dev), offset, valuep, 289 size); 290 } 291 292 int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep) 293 { 294 unsigned long value; 295 int ret; 296 297 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32); 298 if (ret) 299 return ret; 300 *valuep = value; 301 302 return 0; 303 } 304 305 int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep) 306 { 307 unsigned long value; 308 int ret; 309 310 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16); 311 if (ret) 312 return ret; 313 *valuep = value; 314 315 return 0; 316 } 317 318 int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep) 319 { 320 unsigned long value; 321 int ret; 322 323 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8); 324 if (ret) 325 return ret; 326 *valuep = value; 327 328 return 0; 329 } 330 331 int dm_pci_read_config8(struct udevice *dev, int offset, u8 *valuep) 332 { 333 unsigned long value; 334 int ret; 335 336 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8); 337 if (ret) 338 return ret; 339 *valuep = value; 340 341 return 0; 342 } 343 344 int dm_pci_read_config16(struct udevice *dev, int offset, u16 *valuep) 345 { 346 unsigned long value; 347 int ret; 348 349 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16); 350 if (ret) 351 return ret; 352 *valuep = value; 353 354 return 0; 355 } 356 357 int dm_pci_read_config32(struct udevice *dev, int offset, u32 *valuep) 358 { 359 unsigned long value; 360 int ret; 361 362 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32); 363 if (ret) 364 return ret; 365 *valuep = value; 366 367 return 0; 368 } 369 370 int pci_auto_config_devices(struct udevice *bus) 371 { 372 struct pci_controller *hose = bus->uclass_priv; 373 unsigned int sub_bus; 374 struct udevice *dev; 375 int ret; 376 377 sub_bus = bus->seq; 378 debug("%s: start\n", __func__); 379 pciauto_config_init(hose); 380 for (ret = device_find_first_child(bus, &dev); 381 !ret && dev; 382 ret = device_find_next_child(&dev)) { 383 unsigned int max_bus; 384 385 debug("%s: device %s\n", __func__, dev->name); 386 max_bus = pciauto_config_device(hose, pci_get_bdf(dev)); 387 sub_bus = max(sub_bus, max_bus); 388 } 389 debug("%s: done\n", __func__); 390 391 return sub_bus; 392 } 393 394 int dm_pci_hose_probe_bus(struct pci_controller *hose, pci_dev_t bdf) 395 { 396 struct udevice *parent, *bus; 397 int sub_bus; 398 int ret; 399 400 debug("%s\n", __func__); 401 parent = hose->bus; 402 403 /* Find the bus within the parent */ 404 ret = pci_bus_find_devfn(parent, PCI_MASK_BUS(bdf), &bus); 405 if (ret) { 406 debug("%s: Cannot find device %x on bus %s: %d\n", __func__, 407 bdf, parent->name, ret); 408 return ret; 409 } 410 411 sub_bus = pci_get_bus_max() + 1; 412 debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name); 413 pciauto_prescan_setup_bridge(hose, bdf, sub_bus); 414 415 ret = device_probe(bus); 416 if (ret) { 417 debug("%s: Cannot probe bus bus %s: %d\n", __func__, bus->name, 418 ret); 419 return ret; 420 } 421 if (sub_bus != bus->seq) { 422 printf("%s: Internal error, bus '%s' got seq %d, expected %d\n", 423 __func__, bus->name, bus->seq, sub_bus); 424 return -EPIPE; 425 } 426 sub_bus = pci_get_bus_max(); 427 pciauto_postscan_setup_bridge(hose, bdf, sub_bus); 428 429 return sub_bus; 430 } 431 432 /** 433 * pci_match_one_device - Tell if a PCI device structure has a matching 434 * PCI device id structure 435 * @id: single PCI device id structure to match 436 * @dev: the PCI device structure to match against 437 * 438 * Returns the matching pci_device_id structure or %NULL if there is no match. 439 */ 440 static bool pci_match_one_id(const struct pci_device_id *id, 441 const struct pci_device_id *find) 442 { 443 if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) && 444 (id->device == PCI_ANY_ID || id->device == find->device) && 445 (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) && 446 (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) && 447 !((id->class ^ find->class) & id->class_mask)) 448 return true; 449 450 return false; 451 } 452 453 /** 454 * pci_find_and_bind_driver() - Find and bind the right PCI driver 455 * 456 * This only looks at certain fields in the descriptor. 457 */ 458 static int pci_find_and_bind_driver(struct udevice *parent, 459 struct pci_device_id *find_id, pci_dev_t bdf, 460 struct udevice **devp) 461 { 462 struct pci_driver_entry *start, *entry; 463 const char *drv; 464 int n_ents; 465 int ret; 466 char name[30], *str; 467 bool bridge; 468 469 *devp = NULL; 470 471 debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__, 472 find_id->vendor, find_id->device); 473 start = ll_entry_start(struct pci_driver_entry, pci_driver_entry); 474 n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry); 475 for (entry = start; entry != start + n_ents; entry++) { 476 const struct pci_device_id *id; 477 struct udevice *dev; 478 const struct driver *drv; 479 480 for (id = entry->match; 481 id->vendor || id->subvendor || id->class_mask; 482 id++) { 483 if (!pci_match_one_id(id, find_id)) 484 continue; 485 486 drv = entry->driver; 487 488 /* 489 * In the pre-relocation phase, we only bind devices 490 * whose driver has the DM_FLAG_PRE_RELOC set, to save 491 * precious memory space as on some platforms as that 492 * space is pretty limited (ie: using Cache As RAM). 493 */ 494 if (!(gd->flags & GD_FLG_RELOC) && 495 !(drv->flags & DM_FLAG_PRE_RELOC)) 496 return 0; 497 498 /* 499 * We could pass the descriptor to the driver as 500 * platdata (instead of NULL) and allow its bind() 501 * method to return -ENOENT if it doesn't support this 502 * device. That way we could continue the search to 503 * find another driver. For now this doesn't seem 504 * necesssary, so just bind the first match. 505 */ 506 ret = device_bind(parent, drv, drv->name, NULL, -1, 507 &dev); 508 if (ret) 509 goto error; 510 debug("%s: Match found: %s\n", __func__, drv->name); 511 dev->driver_data = find_id->driver_data; 512 *devp = dev; 513 return 0; 514 } 515 } 516 517 bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI; 518 /* 519 * In the pre-relocation phase, we only bind bridge devices to save 520 * precious memory space as on some platforms as that space is pretty 521 * limited (ie: using Cache As RAM). 522 */ 523 if (!(gd->flags & GD_FLG_RELOC) && !bridge) 524 return 0; 525 526 /* Bind a generic driver so that the device can be used */ 527 sprintf(name, "pci_%x:%x.%x", parent->seq, PCI_DEV(bdf), 528 PCI_FUNC(bdf)); 529 str = strdup(name); 530 if (!str) 531 return -ENOMEM; 532 drv = bridge ? "pci_bridge_drv" : "pci_generic_drv"; 533 534 ret = device_bind_driver(parent, drv, str, devp); 535 if (ret) { 536 debug("%s: Failed to bind generic driver: %d", __func__, ret); 537 return ret; 538 } 539 debug("%s: No match found: bound generic driver instead\n", __func__); 540 541 return 0; 542 543 error: 544 debug("%s: No match found: error %d\n", __func__, ret); 545 return ret; 546 } 547 548 int pci_bind_bus_devices(struct udevice *bus) 549 { 550 ulong vendor, device; 551 ulong header_type; 552 pci_dev_t bdf, end; 553 bool found_multi; 554 int ret; 555 556 found_multi = false; 557 end = PCI_BDF(bus->seq, PCI_MAX_PCI_DEVICES - 1, 558 PCI_MAX_PCI_FUNCTIONS - 1); 559 for (bdf = PCI_BDF(bus->seq, 0, 0); bdf < end; 560 bdf += PCI_BDF(0, 0, 1)) { 561 struct pci_child_platdata *pplat; 562 struct udevice *dev; 563 ulong class; 564 565 if (PCI_FUNC(bdf) && !found_multi) 566 continue; 567 /* Check only the first access, we don't expect problems */ 568 ret = pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE, 569 &header_type, PCI_SIZE_8); 570 if (ret) 571 goto error; 572 pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor, 573 PCI_SIZE_16); 574 if (vendor == 0xffff || vendor == 0x0000) 575 continue; 576 577 if (!PCI_FUNC(bdf)) 578 found_multi = header_type & 0x80; 579 580 debug("%s: bus %d/%s: found device %x, function %d\n", __func__, 581 bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf)); 582 pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device, 583 PCI_SIZE_16); 584 pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class, 585 PCI_SIZE_32); 586 class >>= 8; 587 588 /* Find this device in the device tree */ 589 ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev); 590 591 /* Search for a driver */ 592 593 /* If nothing in the device tree, bind a generic device */ 594 if (ret == -ENODEV) { 595 struct pci_device_id find_id; 596 ulong val; 597 598 memset(&find_id, '\0', sizeof(find_id)); 599 find_id.vendor = vendor; 600 find_id.device = device; 601 find_id.class = class; 602 if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) { 603 pci_bus_read_config(bus, bdf, 604 PCI_SUBSYSTEM_VENDOR_ID, 605 &val, PCI_SIZE_32); 606 find_id.subvendor = val & 0xffff; 607 find_id.subdevice = val >> 16; 608 } 609 ret = pci_find_and_bind_driver(bus, &find_id, bdf, 610 &dev); 611 } 612 if (ret) 613 return ret; 614 615 /* Update the platform data */ 616 if (dev) { 617 pplat = dev_get_parent_platdata(dev); 618 pplat->devfn = PCI_MASK_BUS(bdf); 619 pplat->vendor = vendor; 620 pplat->device = device; 621 pplat->class = class; 622 } 623 } 624 625 return 0; 626 error: 627 printf("Cannot read bus configuration: %d\n", ret); 628 629 return ret; 630 } 631 632 static int pci_uclass_post_bind(struct udevice *bus) 633 { 634 /* 635 * If there is no pci device listed in the device tree, 636 * don't bother scanning the device tree. 637 */ 638 if (bus->of_offset == -1) 639 return 0; 640 641 /* 642 * Scan the device tree for devices. This does not probe the PCI bus, 643 * as this is not permitted while binding. It just finds devices 644 * mentioned in the device tree. 645 * 646 * Before relocation, only bind devices marked for pre-relocation 647 * use. 648 */ 649 return dm_scan_fdt_node(bus, gd->fdt_blob, bus->of_offset, 650 gd->flags & GD_FLG_RELOC ? false : true); 651 } 652 653 static int decode_regions(struct pci_controller *hose, const void *blob, 654 int parent_node, int node) 655 { 656 int pci_addr_cells, addr_cells, size_cells; 657 int cells_per_record; 658 phys_addr_t addr; 659 const u32 *prop; 660 int len; 661 int i; 662 663 prop = fdt_getprop(blob, node, "ranges", &len); 664 if (!prop) 665 return -EINVAL; 666 pci_addr_cells = fdt_address_cells(blob, node); 667 addr_cells = fdt_address_cells(blob, parent_node); 668 size_cells = fdt_size_cells(blob, node); 669 670 /* PCI addresses are always 3-cells */ 671 len /= sizeof(u32); 672 cells_per_record = pci_addr_cells + addr_cells + size_cells; 673 hose->region_count = 0; 674 debug("%s: len=%d, cells_per_record=%d\n", __func__, len, 675 cells_per_record); 676 for (i = 0; i < MAX_PCI_REGIONS; i++, len -= cells_per_record) { 677 u64 pci_addr, addr, size; 678 int space_code; 679 u32 flags; 680 int type; 681 682 if (len < cells_per_record) 683 break; 684 flags = fdt32_to_cpu(prop[0]); 685 space_code = (flags >> 24) & 3; 686 pci_addr = fdtdec_get_number(prop + 1, 2); 687 prop += pci_addr_cells; 688 addr = fdtdec_get_number(prop, addr_cells); 689 prop += addr_cells; 690 size = fdtdec_get_number(prop, size_cells); 691 prop += size_cells; 692 debug("%s: region %d, pci_addr=%" PRIx64 ", addr=%" PRIx64 693 ", size=%" PRIx64 ", space_code=%d\n", __func__, 694 hose->region_count, pci_addr, addr, size, space_code); 695 if (space_code & 2) { 696 type = flags & (1U << 30) ? PCI_REGION_PREFETCH : 697 PCI_REGION_MEM; 698 } else if (space_code & 1) { 699 type = PCI_REGION_IO; 700 } else { 701 continue; 702 } 703 debug(" - type=%d\n", type); 704 pci_set_region(hose->regions + hose->region_count++, pci_addr, 705 addr, size, type); 706 } 707 708 /* Add a region for our local memory */ 709 addr = gd->ram_size; 710 if (gd->pci_ram_top && gd->pci_ram_top < addr) 711 addr = gd->pci_ram_top; 712 pci_set_region(hose->regions + hose->region_count++, 0, 0, addr, 713 PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); 714 715 return 0; 716 } 717 718 static int pci_uclass_pre_probe(struct udevice *bus) 719 { 720 struct pci_controller *hose; 721 int ret; 722 723 debug("%s, bus=%d/%s, parent=%s\n", __func__, bus->seq, bus->name, 724 bus->parent->name); 725 hose = bus->uclass_priv; 726 727 /* For bridges, use the top-level PCI controller */ 728 if (device_get_uclass_id(bus->parent) == UCLASS_ROOT) { 729 hose->ctlr = bus; 730 ret = decode_regions(hose, gd->fdt_blob, bus->parent->of_offset, 731 bus->of_offset); 732 if (ret) { 733 debug("%s: Cannot decode regions\n", __func__); 734 return ret; 735 } 736 } else { 737 struct pci_controller *parent_hose; 738 739 parent_hose = dev_get_uclass_priv(bus->parent); 740 hose->ctlr = parent_hose->bus; 741 } 742 hose->bus = bus; 743 hose->first_busno = bus->seq; 744 hose->last_busno = bus->seq; 745 746 return 0; 747 } 748 749 static int pci_uclass_post_probe(struct udevice *bus) 750 { 751 int ret; 752 753 debug("%s: probing bus %d\n", __func__, bus->seq); 754 ret = pci_bind_bus_devices(bus); 755 if (ret) 756 return ret; 757 758 #ifdef CONFIG_PCI_PNP 759 ret = pci_auto_config_devices(bus); 760 #endif 761 762 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) 763 /* 764 * Per Intel FSP specification, we should call FSP notify API to 765 * inform FSP that PCI enumeration has been done so that FSP will 766 * do any necessary initialization as required by the chipset's 767 * BIOS Writer's Guide (BWG). 768 * 769 * Unfortunately we have to put this call here as with driver model, 770 * the enumeration is all done on a lazy basis as needed, so until 771 * something is touched on PCI it won't happen. 772 * 773 * Note we only call this 1) after U-Boot is relocated, and 2) 774 * root bus has finished probing. 775 */ 776 if ((gd->flags & GD_FLG_RELOC) && (bus->seq == 0)) 777 ret = fsp_init_phase_pci(); 778 #endif 779 780 return ret < 0 ? ret : 0; 781 } 782 783 static int pci_uclass_child_post_bind(struct udevice *dev) 784 { 785 struct pci_child_platdata *pplat; 786 struct fdt_pci_addr addr; 787 int ret; 788 789 if (dev->of_offset == -1) 790 return 0; 791 792 /* 793 * We could read vendor, device, class if available. But for now we 794 * just check the address. 795 */ 796 pplat = dev_get_parent_platdata(dev); 797 ret = fdtdec_get_pci_addr(gd->fdt_blob, dev->of_offset, 798 FDT_PCI_SPACE_CONFIG, "reg", &addr); 799 800 if (ret) { 801 if (ret != -ENOENT) 802 return -EINVAL; 803 } else { 804 /* extract the devfn from fdt_pci_addr */ 805 pplat->devfn = addr.phys_hi & 0xff00; 806 } 807 808 return 0; 809 } 810 811 static int pci_bridge_read_config(struct udevice *bus, pci_dev_t bdf, 812 uint offset, ulong *valuep, 813 enum pci_size_t size) 814 { 815 struct pci_controller *hose = bus->uclass_priv; 816 817 return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size); 818 } 819 820 static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf, 821 uint offset, ulong value, 822 enum pci_size_t size) 823 { 824 struct pci_controller *hose = bus->uclass_priv; 825 826 return pci_bus_write_config(hose->ctlr, bdf, offset, value, size); 827 } 828 829 static int skip_to_next_device(struct udevice *bus, struct udevice **devp) 830 { 831 struct udevice *dev; 832 int ret = 0; 833 834 /* 835 * Scan through all the PCI controllers. On x86 there will only be one 836 * but that is not necessarily true on other hardware. 837 */ 838 do { 839 device_find_first_child(bus, &dev); 840 if (dev) { 841 *devp = dev; 842 return 0; 843 } 844 ret = uclass_next_device(&bus); 845 if (ret) 846 return ret; 847 } while (bus); 848 849 return 0; 850 } 851 852 int pci_find_next_device(struct udevice **devp) 853 { 854 struct udevice *child = *devp; 855 struct udevice *bus = child->parent; 856 int ret; 857 858 /* First try all the siblings */ 859 *devp = NULL; 860 while (child) { 861 device_find_next_child(&child); 862 if (child) { 863 *devp = child; 864 return 0; 865 } 866 } 867 868 /* We ran out of siblings. Try the next bus */ 869 ret = uclass_next_device(&bus); 870 if (ret) 871 return ret; 872 873 return bus ? skip_to_next_device(bus, devp) : 0; 874 } 875 876 int pci_find_first_device(struct udevice **devp) 877 { 878 struct udevice *bus; 879 int ret; 880 881 *devp = NULL; 882 ret = uclass_first_device(UCLASS_PCI, &bus); 883 if (ret) 884 return ret; 885 886 return skip_to_next_device(bus, devp); 887 } 888 889 UCLASS_DRIVER(pci) = { 890 .id = UCLASS_PCI, 891 .name = "pci", 892 .flags = DM_UC_FLAG_SEQ_ALIAS, 893 .post_bind = pci_uclass_post_bind, 894 .pre_probe = pci_uclass_pre_probe, 895 .post_probe = pci_uclass_post_probe, 896 .child_post_bind = pci_uclass_child_post_bind, 897 .per_device_auto_alloc_size = sizeof(struct pci_controller), 898 .per_child_platdata_auto_alloc_size = 899 sizeof(struct pci_child_platdata), 900 }; 901 902 static const struct dm_pci_ops pci_bridge_ops = { 903 .read_config = pci_bridge_read_config, 904 .write_config = pci_bridge_write_config, 905 }; 906 907 static const struct udevice_id pci_bridge_ids[] = { 908 { .compatible = "pci-bridge" }, 909 { } 910 }; 911 912 U_BOOT_DRIVER(pci_bridge_drv) = { 913 .name = "pci_bridge_drv", 914 .id = UCLASS_PCI, 915 .of_match = pci_bridge_ids, 916 .ops = &pci_bridge_ops, 917 }; 918 919 UCLASS_DRIVER(pci_generic) = { 920 .id = UCLASS_PCI_GENERIC, 921 .name = "pci_generic", 922 }; 923 924 static const struct udevice_id pci_generic_ids[] = { 925 { .compatible = "pci-generic" }, 926 { } 927 }; 928 929 U_BOOT_DRIVER(pci_generic_drv) = { 930 .name = "pci_generic_drv", 931 .id = UCLASS_PCI_GENERIC, 932 .of_match = pci_generic_ids, 933 }; 934