1 /* 2 * Copyright (c) 2014 Google, Inc 3 * Written by Simon Glass <sjg@chromium.org> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 #include <common.h> 9 #include <dm.h> 10 #include <errno.h> 11 #include <fdtdec.h> 12 #include <inttypes.h> 13 #include <pci.h> 14 #include <asm/io.h> 15 #include <dm/lists.h> 16 #include <dm/root.h> 17 #include <dm/device-internal.h> 18 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) 19 #include <asm/fsp/fsp_support.h> 20 #endif 21 #include "pci_internal.h" 22 23 DECLARE_GLOBAL_DATA_PTR; 24 25 int pci_get_bus(int busnum, struct udevice **busp) 26 { 27 int ret; 28 29 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); 30 31 /* Since buses may not be numbered yet try a little harder with bus 0 */ 32 if (ret == -ENODEV) { 33 ret = uclass_first_device_err(UCLASS_PCI, busp); 34 if (ret) 35 return ret; 36 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); 37 } 38 39 return ret; 40 } 41 42 struct udevice *pci_get_controller(struct udevice *dev) 43 { 44 while (device_is_on_pci_bus(dev)) 45 dev = dev->parent; 46 47 return dev; 48 } 49 50 pci_dev_t dm_pci_get_bdf(struct udevice *dev) 51 { 52 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev); 53 struct udevice *bus = dev->parent; 54 55 return PCI_ADD_BUS(bus->seq, pplat->devfn); 56 } 57 58 /** 59 * pci_get_bus_max() - returns the bus number of the last active bus 60 * 61 * @return last bus number, or -1 if no active buses 62 */ 63 static int pci_get_bus_max(void) 64 { 65 struct udevice *bus; 66 struct uclass *uc; 67 int ret = -1; 68 69 ret = uclass_get(UCLASS_PCI, &uc); 70 uclass_foreach_dev(bus, uc) { 71 if (bus->seq > ret) 72 ret = bus->seq; 73 } 74 75 debug("%s: ret=%d\n", __func__, ret); 76 77 return ret; 78 } 79 80 int pci_last_busno(void) 81 { 82 return pci_get_bus_max(); 83 } 84 85 int pci_get_ff(enum pci_size_t size) 86 { 87 switch (size) { 88 case PCI_SIZE_8: 89 return 0xff; 90 case PCI_SIZE_16: 91 return 0xffff; 92 default: 93 return 0xffffffff; 94 } 95 } 96 97 int pci_bus_find_devfn(struct udevice *bus, pci_dev_t find_devfn, 98 struct udevice **devp) 99 { 100 struct udevice *dev; 101 102 for (device_find_first_child(bus, &dev); 103 dev; 104 device_find_next_child(&dev)) { 105 struct pci_child_platdata *pplat; 106 107 pplat = dev_get_parent_platdata(dev); 108 if (pplat && pplat->devfn == find_devfn) { 109 *devp = dev; 110 return 0; 111 } 112 } 113 114 return -ENODEV; 115 } 116 117 int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp) 118 { 119 struct udevice *bus; 120 int ret; 121 122 ret = pci_get_bus(PCI_BUS(bdf), &bus); 123 if (ret) 124 return ret; 125 return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp); 126 } 127 128 static int pci_device_matches_ids(struct udevice *dev, 129 struct pci_device_id *ids) 130 { 131 struct pci_child_platdata *pplat; 132 int i; 133 134 pplat = dev_get_parent_platdata(dev); 135 if (!pplat) 136 return -EINVAL; 137 for (i = 0; ids[i].vendor != 0; i++) { 138 if (pplat->vendor == ids[i].vendor && 139 pplat->device == ids[i].device) 140 return i; 141 } 142 143 return -EINVAL; 144 } 145 146 int pci_bus_find_devices(struct udevice *bus, struct pci_device_id *ids, 147 int *indexp, struct udevice **devp) 148 { 149 struct udevice *dev; 150 151 /* Scan all devices on this bus */ 152 for (device_find_first_child(bus, &dev); 153 dev; 154 device_find_next_child(&dev)) { 155 if (pci_device_matches_ids(dev, ids) >= 0) { 156 if ((*indexp)-- <= 0) { 157 *devp = dev; 158 return 0; 159 } 160 } 161 } 162 163 return -ENODEV; 164 } 165 166 int pci_find_device_id(struct pci_device_id *ids, int index, 167 struct udevice **devp) 168 { 169 struct udevice *bus; 170 171 /* Scan all known buses */ 172 for (uclass_first_device(UCLASS_PCI, &bus); 173 bus; 174 uclass_next_device(&bus)) { 175 if (!pci_bus_find_devices(bus, ids, &index, devp)) 176 return 0; 177 } 178 *devp = NULL; 179 180 return -ENODEV; 181 } 182 183 static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor, 184 unsigned int device, int *indexp, 185 struct udevice **devp) 186 { 187 struct pci_child_platdata *pplat; 188 struct udevice *dev; 189 190 for (device_find_first_child(bus, &dev); 191 dev; 192 device_find_next_child(&dev)) { 193 pplat = dev_get_parent_platdata(dev); 194 if (pplat->vendor == vendor && pplat->device == device) { 195 if (!(*indexp)--) { 196 *devp = dev; 197 return 0; 198 } 199 } 200 } 201 202 return -ENODEV; 203 } 204 205 int dm_pci_find_device(unsigned int vendor, unsigned int device, int index, 206 struct udevice **devp) 207 { 208 struct udevice *bus; 209 210 /* Scan all known buses */ 211 for (uclass_first_device(UCLASS_PCI, &bus); 212 bus; 213 uclass_next_device(&bus)) { 214 if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp)) 215 return device_probe(*devp); 216 } 217 *devp = NULL; 218 219 return -ENODEV; 220 } 221 222 int dm_pci_find_class(uint find_class, int index, struct udevice **devp) 223 { 224 struct udevice *dev; 225 226 /* Scan all known buses */ 227 for (pci_find_first_device(&dev); 228 dev; 229 pci_find_next_device(&dev)) { 230 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev); 231 232 if (pplat->class == find_class && !index--) { 233 *devp = dev; 234 return device_probe(*devp); 235 } 236 } 237 *devp = NULL; 238 239 return -ENODEV; 240 } 241 242 int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset, 243 unsigned long value, enum pci_size_t size) 244 { 245 struct dm_pci_ops *ops; 246 247 ops = pci_get_ops(bus); 248 if (!ops->write_config) 249 return -ENOSYS; 250 return ops->write_config(bus, bdf, offset, value, size); 251 } 252 253 int pci_write_config(pci_dev_t bdf, int offset, unsigned long value, 254 enum pci_size_t size) 255 { 256 struct udevice *bus; 257 int ret; 258 259 ret = pci_get_bus(PCI_BUS(bdf), &bus); 260 if (ret) 261 return ret; 262 263 return pci_bus_write_config(bus, bdf, offset, value, size); 264 } 265 266 int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value, 267 enum pci_size_t size) 268 { 269 struct udevice *bus; 270 271 for (bus = dev; device_is_on_pci_bus(bus);) 272 bus = bus->parent; 273 return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value, 274 size); 275 } 276 277 278 int pci_write_config32(pci_dev_t bdf, int offset, u32 value) 279 { 280 return pci_write_config(bdf, offset, value, PCI_SIZE_32); 281 } 282 283 int pci_write_config16(pci_dev_t bdf, int offset, u16 value) 284 { 285 return pci_write_config(bdf, offset, value, PCI_SIZE_16); 286 } 287 288 int pci_write_config8(pci_dev_t bdf, int offset, u8 value) 289 { 290 return pci_write_config(bdf, offset, value, PCI_SIZE_8); 291 } 292 293 int dm_pci_write_config8(struct udevice *dev, int offset, u8 value) 294 { 295 return dm_pci_write_config(dev, offset, value, PCI_SIZE_8); 296 } 297 298 int dm_pci_write_config16(struct udevice *dev, int offset, u16 value) 299 { 300 return dm_pci_write_config(dev, offset, value, PCI_SIZE_16); 301 } 302 303 int dm_pci_write_config32(struct udevice *dev, int offset, u32 value) 304 { 305 return dm_pci_write_config(dev, offset, value, PCI_SIZE_32); 306 } 307 308 int pci_bus_read_config(struct udevice *bus, pci_dev_t bdf, int offset, 309 unsigned long *valuep, enum pci_size_t size) 310 { 311 struct dm_pci_ops *ops; 312 313 ops = pci_get_ops(bus); 314 if (!ops->read_config) 315 return -ENOSYS; 316 return ops->read_config(bus, bdf, offset, valuep, size); 317 } 318 319 int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep, 320 enum pci_size_t size) 321 { 322 struct udevice *bus; 323 int ret; 324 325 ret = pci_get_bus(PCI_BUS(bdf), &bus); 326 if (ret) 327 return ret; 328 329 return pci_bus_read_config(bus, bdf, offset, valuep, size); 330 } 331 332 int dm_pci_read_config(struct udevice *dev, int offset, unsigned long *valuep, 333 enum pci_size_t size) 334 { 335 struct udevice *bus; 336 337 for (bus = dev; device_is_on_pci_bus(bus);) 338 bus = bus->parent; 339 return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep, 340 size); 341 } 342 343 int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep) 344 { 345 unsigned long value; 346 int ret; 347 348 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32); 349 if (ret) 350 return ret; 351 *valuep = value; 352 353 return 0; 354 } 355 356 int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep) 357 { 358 unsigned long value; 359 int ret; 360 361 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16); 362 if (ret) 363 return ret; 364 *valuep = value; 365 366 return 0; 367 } 368 369 int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep) 370 { 371 unsigned long value; 372 int ret; 373 374 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8); 375 if (ret) 376 return ret; 377 *valuep = value; 378 379 return 0; 380 } 381 382 int dm_pci_read_config8(struct udevice *dev, int offset, u8 *valuep) 383 { 384 unsigned long value; 385 int ret; 386 387 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8); 388 if (ret) 389 return ret; 390 *valuep = value; 391 392 return 0; 393 } 394 395 int dm_pci_read_config16(struct udevice *dev, int offset, u16 *valuep) 396 { 397 unsigned long value; 398 int ret; 399 400 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16); 401 if (ret) 402 return ret; 403 *valuep = value; 404 405 return 0; 406 } 407 408 int dm_pci_read_config32(struct udevice *dev, int offset, u32 *valuep) 409 { 410 unsigned long value; 411 int ret; 412 413 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32); 414 if (ret) 415 return ret; 416 *valuep = value; 417 418 return 0; 419 } 420 421 static void set_vga_bridge_bits(struct udevice *dev) 422 { 423 struct udevice *parent = dev->parent; 424 u16 bc; 425 426 while (parent->seq != 0) { 427 dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc); 428 bc |= PCI_BRIDGE_CTL_VGA; 429 dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc); 430 parent = parent->parent; 431 } 432 } 433 434 int pci_auto_config_devices(struct udevice *bus) 435 { 436 struct pci_controller *hose = bus->uclass_priv; 437 struct pci_child_platdata *pplat; 438 unsigned int sub_bus; 439 struct udevice *dev; 440 int ret; 441 442 sub_bus = bus->seq; 443 debug("%s: start\n", __func__); 444 pciauto_config_init(hose); 445 for (ret = device_find_first_child(bus, &dev); 446 !ret && dev; 447 ret = device_find_next_child(&dev)) { 448 unsigned int max_bus; 449 int ret; 450 451 debug("%s: device %s\n", __func__, dev->name); 452 ret = dm_pciauto_config_device(dev); 453 if (ret < 0) 454 return ret; 455 max_bus = ret; 456 sub_bus = max(sub_bus, max_bus); 457 458 pplat = dev_get_parent_platdata(dev); 459 if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8)) 460 set_vga_bridge_bits(dev); 461 } 462 debug("%s: done\n", __func__); 463 464 return sub_bus; 465 } 466 467 int dm_pci_hose_probe_bus(struct udevice *bus) 468 { 469 int sub_bus; 470 int ret; 471 472 debug("%s\n", __func__); 473 474 sub_bus = pci_get_bus_max() + 1; 475 debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name); 476 dm_pciauto_prescan_setup_bridge(bus, sub_bus); 477 478 ret = device_probe(bus); 479 if (ret) { 480 debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name, 481 ret); 482 return ret; 483 } 484 if (sub_bus != bus->seq) { 485 printf("%s: Internal error, bus '%s' got seq %d, expected %d\n", 486 __func__, bus->name, bus->seq, sub_bus); 487 return -EPIPE; 488 } 489 sub_bus = pci_get_bus_max(); 490 dm_pciauto_postscan_setup_bridge(bus, sub_bus); 491 492 return sub_bus; 493 } 494 495 /** 496 * pci_match_one_device - Tell if a PCI device structure has a matching 497 * PCI device id structure 498 * @id: single PCI device id structure to match 499 * @dev: the PCI device structure to match against 500 * 501 * Returns the matching pci_device_id structure or %NULL if there is no match. 502 */ 503 static bool pci_match_one_id(const struct pci_device_id *id, 504 const struct pci_device_id *find) 505 { 506 if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) && 507 (id->device == PCI_ANY_ID || id->device == find->device) && 508 (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) && 509 (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) && 510 !((id->class ^ find->class) & id->class_mask)) 511 return true; 512 513 return false; 514 } 515 516 /** 517 * pci_find_and_bind_driver() - Find and bind the right PCI driver 518 * 519 * This only looks at certain fields in the descriptor. 520 * 521 * @parent: Parent bus 522 * @find_id: Specification of the driver to find 523 * @bdf: Bus/device/function addreess - see PCI_BDF() 524 * @devp: Returns a pointer to the device created 525 * @return 0 if OK, -EPERM if the device is not needed before relocation and 526 * therefore was not created, other -ve value on error 527 */ 528 static int pci_find_and_bind_driver(struct udevice *parent, 529 struct pci_device_id *find_id, 530 pci_dev_t bdf, struct udevice **devp) 531 { 532 struct pci_driver_entry *start, *entry; 533 const char *drv; 534 int n_ents; 535 int ret; 536 char name[30], *str; 537 bool bridge; 538 539 *devp = NULL; 540 541 debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__, 542 find_id->vendor, find_id->device); 543 start = ll_entry_start(struct pci_driver_entry, pci_driver_entry); 544 n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry); 545 for (entry = start; entry != start + n_ents; entry++) { 546 const struct pci_device_id *id; 547 struct udevice *dev; 548 const struct driver *drv; 549 550 for (id = entry->match; 551 id->vendor || id->subvendor || id->class_mask; 552 id++) { 553 if (!pci_match_one_id(id, find_id)) 554 continue; 555 556 drv = entry->driver; 557 558 /* 559 * In the pre-relocation phase, we only bind devices 560 * whose driver has the DM_FLAG_PRE_RELOC set, to save 561 * precious memory space as on some platforms as that 562 * space is pretty limited (ie: using Cache As RAM). 563 */ 564 if (!(gd->flags & GD_FLG_RELOC) && 565 !(drv->flags & DM_FLAG_PRE_RELOC)) 566 return -EPERM; 567 568 /* 569 * We could pass the descriptor to the driver as 570 * platdata (instead of NULL) and allow its bind() 571 * method to return -ENOENT if it doesn't support this 572 * device. That way we could continue the search to 573 * find another driver. For now this doesn't seem 574 * necesssary, so just bind the first match. 575 */ 576 ret = device_bind(parent, drv, drv->name, NULL, -1, 577 &dev); 578 if (ret) 579 goto error; 580 debug("%s: Match found: %s\n", __func__, drv->name); 581 dev->driver_data = find_id->driver_data; 582 *devp = dev; 583 return 0; 584 } 585 } 586 587 bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI; 588 /* 589 * In the pre-relocation phase, we only bind bridge devices to save 590 * precious memory space as on some platforms as that space is pretty 591 * limited (ie: using Cache As RAM). 592 */ 593 if (!(gd->flags & GD_FLG_RELOC) && !bridge) 594 return -EPERM; 595 596 /* Bind a generic driver so that the device can be used */ 597 sprintf(name, "pci_%x:%x.%x", parent->seq, PCI_DEV(bdf), 598 PCI_FUNC(bdf)); 599 str = strdup(name); 600 if (!str) 601 return -ENOMEM; 602 drv = bridge ? "pci_bridge_drv" : "pci_generic_drv"; 603 604 ret = device_bind_driver(parent, drv, str, devp); 605 if (ret) { 606 debug("%s: Failed to bind generic driver: %d\n", __func__, ret); 607 return ret; 608 } 609 debug("%s: No match found: bound generic driver instead\n", __func__); 610 611 return 0; 612 613 error: 614 debug("%s: No match found: error %d\n", __func__, ret); 615 return ret; 616 } 617 618 int pci_bind_bus_devices(struct udevice *bus) 619 { 620 ulong vendor, device; 621 ulong header_type; 622 pci_dev_t bdf, end; 623 bool found_multi; 624 int ret; 625 626 found_multi = false; 627 end = PCI_BDF(bus->seq, PCI_MAX_PCI_DEVICES - 1, 628 PCI_MAX_PCI_FUNCTIONS - 1); 629 for (bdf = PCI_BDF(bus->seq, 0, 0); bdf < end; 630 bdf += PCI_BDF(0, 0, 1)) { 631 struct pci_child_platdata *pplat; 632 struct udevice *dev; 633 ulong class; 634 635 if (PCI_FUNC(bdf) && !found_multi) 636 continue; 637 /* Check only the first access, we don't expect problems */ 638 ret = pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE, 639 &header_type, PCI_SIZE_8); 640 if (ret) 641 goto error; 642 pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor, 643 PCI_SIZE_16); 644 if (vendor == 0xffff || vendor == 0x0000) 645 continue; 646 647 if (!PCI_FUNC(bdf)) 648 found_multi = header_type & 0x80; 649 650 debug("%s: bus %d/%s: found device %x, function %d\n", __func__, 651 bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf)); 652 pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device, 653 PCI_SIZE_16); 654 pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class, 655 PCI_SIZE_32); 656 class >>= 8; 657 658 /* Find this device in the device tree */ 659 ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev); 660 661 /* If nothing in the device tree, bind a device */ 662 if (ret == -ENODEV) { 663 struct pci_device_id find_id; 664 ulong val; 665 666 memset(&find_id, '\0', sizeof(find_id)); 667 find_id.vendor = vendor; 668 find_id.device = device; 669 find_id.class = class; 670 if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) { 671 pci_bus_read_config(bus, bdf, 672 PCI_SUBSYSTEM_VENDOR_ID, 673 &val, PCI_SIZE_32); 674 find_id.subvendor = val & 0xffff; 675 find_id.subdevice = val >> 16; 676 } 677 ret = pci_find_and_bind_driver(bus, &find_id, bdf, 678 &dev); 679 } 680 if (ret == -EPERM) 681 continue; 682 else if (ret) 683 return ret; 684 685 /* Update the platform data */ 686 pplat = dev_get_parent_platdata(dev); 687 pplat->devfn = PCI_MASK_BUS(bdf); 688 pplat->vendor = vendor; 689 pplat->device = device; 690 pplat->class = class; 691 } 692 693 return 0; 694 error: 695 printf("Cannot read bus configuration: %d\n", ret); 696 697 return ret; 698 } 699 700 static int pci_uclass_post_bind(struct udevice *bus) 701 { 702 /* 703 * If there is no pci device listed in the device tree, 704 * don't bother scanning the device tree. 705 */ 706 if (bus->of_offset == -1) 707 return 0; 708 709 /* 710 * Scan the device tree for devices. This does not probe the PCI bus, 711 * as this is not permitted while binding. It just finds devices 712 * mentioned in the device tree. 713 * 714 * Before relocation, only bind devices marked for pre-relocation 715 * use. 716 */ 717 return dm_scan_fdt_node(bus, gd->fdt_blob, bus->of_offset, 718 gd->flags & GD_FLG_RELOC ? false : true); 719 } 720 721 static int decode_regions(struct pci_controller *hose, const void *blob, 722 int parent_node, int node) 723 { 724 int pci_addr_cells, addr_cells, size_cells; 725 phys_addr_t base = 0, size; 726 int cells_per_record; 727 const u32 *prop; 728 int len; 729 int i; 730 731 prop = fdt_getprop(blob, node, "ranges", &len); 732 if (!prop) 733 return -EINVAL; 734 pci_addr_cells = fdt_address_cells(blob, node); 735 addr_cells = fdt_address_cells(blob, parent_node); 736 size_cells = fdt_size_cells(blob, node); 737 738 /* PCI addresses are always 3-cells */ 739 len /= sizeof(u32); 740 cells_per_record = pci_addr_cells + addr_cells + size_cells; 741 hose->region_count = 0; 742 debug("%s: len=%d, cells_per_record=%d\n", __func__, len, 743 cells_per_record); 744 for (i = 0; i < MAX_PCI_REGIONS; i++, len -= cells_per_record) { 745 u64 pci_addr, addr, size; 746 int space_code; 747 u32 flags; 748 int type; 749 int pos; 750 751 if (len < cells_per_record) 752 break; 753 flags = fdt32_to_cpu(prop[0]); 754 space_code = (flags >> 24) & 3; 755 pci_addr = fdtdec_get_number(prop + 1, 2); 756 prop += pci_addr_cells; 757 addr = fdtdec_get_number(prop, addr_cells); 758 prop += addr_cells; 759 size = fdtdec_get_number(prop, size_cells); 760 prop += size_cells; 761 debug("%s: region %d, pci_addr=%" PRIx64 ", addr=%" PRIx64 762 ", size=%" PRIx64 ", space_code=%d\n", __func__, 763 hose->region_count, pci_addr, addr, size, space_code); 764 if (space_code & 2) { 765 type = flags & (1U << 30) ? PCI_REGION_PREFETCH : 766 PCI_REGION_MEM; 767 } else if (space_code & 1) { 768 type = PCI_REGION_IO; 769 } else { 770 continue; 771 } 772 pos = -1; 773 for (i = 0; i < hose->region_count; i++) { 774 if (hose->regions[i].flags == type) 775 pos = i; 776 } 777 if (pos == -1) 778 pos = hose->region_count++; 779 debug(" - type=%d, pos=%d\n", type, pos); 780 pci_set_region(hose->regions + pos, pci_addr, addr, size, type); 781 } 782 783 /* Add a region for our local memory */ 784 size = gd->ram_size; 785 #ifdef CONFIG_SYS_SDRAM_BASE 786 base = CONFIG_SYS_SDRAM_BASE; 787 #endif 788 if (gd->pci_ram_top && gd->pci_ram_top < base + size) 789 size = gd->pci_ram_top - base; 790 pci_set_region(hose->regions + hose->region_count++, base, base, 791 size, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); 792 793 return 0; 794 } 795 796 static int pci_uclass_pre_probe(struct udevice *bus) 797 { 798 struct pci_controller *hose; 799 int ret; 800 801 debug("%s, bus=%d/%s, parent=%s\n", __func__, bus->seq, bus->name, 802 bus->parent->name); 803 hose = bus->uclass_priv; 804 805 /* For bridges, use the top-level PCI controller */ 806 if (device_get_uclass_id(bus->parent) == UCLASS_ROOT) { 807 hose->ctlr = bus; 808 ret = decode_regions(hose, gd->fdt_blob, bus->parent->of_offset, 809 bus->of_offset); 810 if (ret) { 811 debug("%s: Cannot decode regions\n", __func__); 812 return ret; 813 } 814 } else { 815 struct pci_controller *parent_hose; 816 817 parent_hose = dev_get_uclass_priv(bus->parent); 818 hose->ctlr = parent_hose->bus; 819 } 820 hose->bus = bus; 821 hose->first_busno = bus->seq; 822 hose->last_busno = bus->seq; 823 824 return 0; 825 } 826 827 static int pci_uclass_post_probe(struct udevice *bus) 828 { 829 int ret; 830 831 debug("%s: probing bus %d\n", __func__, bus->seq); 832 ret = pci_bind_bus_devices(bus); 833 if (ret) 834 return ret; 835 836 #ifdef CONFIG_PCI_PNP 837 ret = pci_auto_config_devices(bus); 838 if (ret < 0) 839 return ret; 840 #endif 841 842 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) 843 /* 844 * Per Intel FSP specification, we should call FSP notify API to 845 * inform FSP that PCI enumeration has been done so that FSP will 846 * do any necessary initialization as required by the chipset's 847 * BIOS Writer's Guide (BWG). 848 * 849 * Unfortunately we have to put this call here as with driver model, 850 * the enumeration is all done on a lazy basis as needed, so until 851 * something is touched on PCI it won't happen. 852 * 853 * Note we only call this 1) after U-Boot is relocated, and 2) 854 * root bus has finished probing. 855 */ 856 if ((gd->flags & GD_FLG_RELOC) && (bus->seq == 0)) { 857 ret = fsp_init_phase_pci(); 858 if (ret) 859 return ret; 860 } 861 #endif 862 863 return 0; 864 } 865 866 static int pci_uclass_child_post_bind(struct udevice *dev) 867 { 868 struct pci_child_platdata *pplat; 869 struct fdt_pci_addr addr; 870 int ret; 871 872 if (dev->of_offset == -1) 873 return 0; 874 875 /* 876 * We could read vendor, device, class if available. But for now we 877 * just check the address. 878 */ 879 pplat = dev_get_parent_platdata(dev); 880 ret = fdtdec_get_pci_addr(gd->fdt_blob, dev->of_offset, 881 FDT_PCI_SPACE_CONFIG, "reg", &addr); 882 883 if (ret) { 884 if (ret != -ENOENT) 885 return -EINVAL; 886 } else { 887 /* extract the devfn from fdt_pci_addr */ 888 pplat->devfn = addr.phys_hi & 0xff00; 889 } 890 891 return 0; 892 } 893 894 static int pci_bridge_read_config(struct udevice *bus, pci_dev_t bdf, 895 uint offset, ulong *valuep, 896 enum pci_size_t size) 897 { 898 struct pci_controller *hose = bus->uclass_priv; 899 900 return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size); 901 } 902 903 static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf, 904 uint offset, ulong value, 905 enum pci_size_t size) 906 { 907 struct pci_controller *hose = bus->uclass_priv; 908 909 return pci_bus_write_config(hose->ctlr, bdf, offset, value, size); 910 } 911 912 static int skip_to_next_device(struct udevice *bus, struct udevice **devp) 913 { 914 struct udevice *dev; 915 int ret = 0; 916 917 /* 918 * Scan through all the PCI controllers. On x86 there will only be one 919 * but that is not necessarily true on other hardware. 920 */ 921 do { 922 device_find_first_child(bus, &dev); 923 if (dev) { 924 *devp = dev; 925 return 0; 926 } 927 ret = uclass_next_device(&bus); 928 if (ret) 929 return ret; 930 } while (bus); 931 932 return 0; 933 } 934 935 int pci_find_next_device(struct udevice **devp) 936 { 937 struct udevice *child = *devp; 938 struct udevice *bus = child->parent; 939 int ret; 940 941 /* First try all the siblings */ 942 *devp = NULL; 943 while (child) { 944 device_find_next_child(&child); 945 if (child) { 946 *devp = child; 947 return 0; 948 } 949 } 950 951 /* We ran out of siblings. Try the next bus */ 952 ret = uclass_next_device(&bus); 953 if (ret) 954 return ret; 955 956 return bus ? skip_to_next_device(bus, devp) : 0; 957 } 958 959 int pci_find_first_device(struct udevice **devp) 960 { 961 struct udevice *bus; 962 int ret; 963 964 *devp = NULL; 965 ret = uclass_first_device(UCLASS_PCI, &bus); 966 if (ret) 967 return ret; 968 969 return skip_to_next_device(bus, devp); 970 } 971 972 ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size) 973 { 974 switch (size) { 975 case PCI_SIZE_8: 976 return (value >> ((offset & 3) * 8)) & 0xff; 977 case PCI_SIZE_16: 978 return (value >> ((offset & 2) * 8)) & 0xffff; 979 default: 980 return value; 981 } 982 } 983 984 ulong pci_conv_size_to_32(ulong old, ulong value, uint offset, 985 enum pci_size_t size) 986 { 987 uint off_mask; 988 uint val_mask, shift; 989 ulong ldata, mask; 990 991 switch (size) { 992 case PCI_SIZE_8: 993 off_mask = 3; 994 val_mask = 0xff; 995 break; 996 case PCI_SIZE_16: 997 off_mask = 2; 998 val_mask = 0xffff; 999 break; 1000 default: 1001 return value; 1002 } 1003 shift = (offset & off_mask) * 8; 1004 ldata = (value & val_mask) << shift; 1005 mask = val_mask << shift; 1006 value = (old & ~mask) | ldata; 1007 1008 return value; 1009 } 1010 1011 int pci_get_regions(struct udevice *dev, struct pci_region **iop, 1012 struct pci_region **memp, struct pci_region **prefp) 1013 { 1014 struct udevice *bus = pci_get_controller(dev); 1015 struct pci_controller *hose = dev_get_uclass_priv(bus); 1016 int i; 1017 1018 *iop = NULL; 1019 *memp = NULL; 1020 *prefp = NULL; 1021 for (i = 0; i < hose->region_count; i++) { 1022 switch (hose->regions[i].flags) { 1023 case PCI_REGION_IO: 1024 if (!*iop || (*iop)->size < hose->regions[i].size) 1025 *iop = hose->regions + i; 1026 break; 1027 case PCI_REGION_MEM: 1028 if (!*memp || (*memp)->size < hose->regions[i].size) 1029 *memp = hose->regions + i; 1030 break; 1031 case (PCI_REGION_MEM | PCI_REGION_PREFETCH): 1032 if (!*prefp || (*prefp)->size < hose->regions[i].size) 1033 *prefp = hose->regions + i; 1034 break; 1035 } 1036 } 1037 1038 return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL); 1039 } 1040 1041 u32 dm_pci_read_bar32(struct udevice *dev, int barnum) 1042 { 1043 u32 addr; 1044 int bar; 1045 1046 bar = PCI_BASE_ADDRESS_0 + barnum * 4; 1047 dm_pci_read_config32(dev, bar, &addr); 1048 if (addr & PCI_BASE_ADDRESS_SPACE_IO) 1049 return addr & PCI_BASE_ADDRESS_IO_MASK; 1050 else 1051 return addr & PCI_BASE_ADDRESS_MEM_MASK; 1052 } 1053 1054 void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr) 1055 { 1056 int bar; 1057 1058 bar = PCI_BASE_ADDRESS_0 + barnum * 4; 1059 dm_pci_write_config32(dev, bar, addr); 1060 } 1061 1062 static int _dm_pci_bus_to_phys(struct udevice *ctlr, 1063 pci_addr_t bus_addr, unsigned long flags, 1064 unsigned long skip_mask, phys_addr_t *pa) 1065 { 1066 struct pci_controller *hose = dev_get_uclass_priv(ctlr); 1067 struct pci_region *res; 1068 int i; 1069 1070 for (i = 0; i < hose->region_count; i++) { 1071 res = &hose->regions[i]; 1072 1073 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) 1074 continue; 1075 1076 if (res->flags & skip_mask) 1077 continue; 1078 1079 if (bus_addr >= res->bus_start && 1080 (bus_addr - res->bus_start) < res->size) { 1081 *pa = (bus_addr - res->bus_start + res->phys_start); 1082 return 0; 1083 } 1084 } 1085 1086 return 1; 1087 } 1088 1089 phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr, 1090 unsigned long flags) 1091 { 1092 phys_addr_t phys_addr = 0; 1093 struct udevice *ctlr; 1094 int ret; 1095 1096 /* The root controller has the region information */ 1097 ctlr = pci_get_controller(dev); 1098 1099 /* 1100 * if PCI_REGION_MEM is set we do a two pass search with preference 1101 * on matches that don't have PCI_REGION_SYS_MEMORY set 1102 */ 1103 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { 1104 ret = _dm_pci_bus_to_phys(ctlr, bus_addr, 1105 flags, PCI_REGION_SYS_MEMORY, 1106 &phys_addr); 1107 if (!ret) 1108 return phys_addr; 1109 } 1110 1111 ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr); 1112 1113 if (ret) 1114 puts("pci_hose_bus_to_phys: invalid physical address\n"); 1115 1116 return phys_addr; 1117 } 1118 1119 int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, 1120 unsigned long flags, unsigned long skip_mask, 1121 pci_addr_t *ba) 1122 { 1123 struct pci_region *res; 1124 struct udevice *ctlr; 1125 pci_addr_t bus_addr; 1126 int i; 1127 struct pci_controller *hose; 1128 1129 /* The root controller has the region information */ 1130 ctlr = pci_get_controller(dev); 1131 hose = dev_get_uclass_priv(ctlr); 1132 1133 for (i = 0; i < hose->region_count; i++) { 1134 res = &hose->regions[i]; 1135 1136 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) 1137 continue; 1138 1139 if (res->flags & skip_mask) 1140 continue; 1141 1142 bus_addr = phys_addr - res->phys_start + res->bus_start; 1143 1144 if (bus_addr >= res->bus_start && 1145 (bus_addr - res->bus_start) < res->size) { 1146 *ba = bus_addr; 1147 return 0; 1148 } 1149 } 1150 1151 return 1; 1152 } 1153 1154 pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, 1155 unsigned long flags) 1156 { 1157 pci_addr_t bus_addr = 0; 1158 int ret; 1159 1160 /* 1161 * if PCI_REGION_MEM is set we do a two pass search with preference 1162 * on matches that don't have PCI_REGION_SYS_MEMORY set 1163 */ 1164 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { 1165 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 1166 PCI_REGION_SYS_MEMORY, &bus_addr); 1167 if (!ret) 1168 return bus_addr; 1169 } 1170 1171 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr); 1172 1173 if (ret) 1174 puts("pci_hose_phys_to_bus: invalid physical address\n"); 1175 1176 return bus_addr; 1177 } 1178 1179 void *dm_pci_map_bar(struct udevice *dev, int bar, int flags) 1180 { 1181 pci_addr_t pci_bus_addr; 1182 u32 bar_response; 1183 1184 /* read BAR address */ 1185 dm_pci_read_config32(dev, bar, &bar_response); 1186 pci_bus_addr = (pci_addr_t)(bar_response & ~0xf); 1187 1188 /* 1189 * Pass "0" as the length argument to pci_bus_to_virt. The arg 1190 * isn't actualy used on any platform because u-boot assumes a static 1191 * linear mapping. In the future, this could read the BAR size 1192 * and pass that as the size if needed. 1193 */ 1194 return dm_pci_bus_to_virt(dev, pci_bus_addr, flags, 0, MAP_NOCACHE); 1195 } 1196 1197 UCLASS_DRIVER(pci) = { 1198 .id = UCLASS_PCI, 1199 .name = "pci", 1200 .flags = DM_UC_FLAG_SEQ_ALIAS, 1201 .post_bind = pci_uclass_post_bind, 1202 .pre_probe = pci_uclass_pre_probe, 1203 .post_probe = pci_uclass_post_probe, 1204 .child_post_bind = pci_uclass_child_post_bind, 1205 .per_device_auto_alloc_size = sizeof(struct pci_controller), 1206 .per_child_platdata_auto_alloc_size = 1207 sizeof(struct pci_child_platdata), 1208 }; 1209 1210 static const struct dm_pci_ops pci_bridge_ops = { 1211 .read_config = pci_bridge_read_config, 1212 .write_config = pci_bridge_write_config, 1213 }; 1214 1215 static const struct udevice_id pci_bridge_ids[] = { 1216 { .compatible = "pci-bridge" }, 1217 { } 1218 }; 1219 1220 U_BOOT_DRIVER(pci_bridge_drv) = { 1221 .name = "pci_bridge_drv", 1222 .id = UCLASS_PCI, 1223 .of_match = pci_bridge_ids, 1224 .ops = &pci_bridge_ops, 1225 }; 1226 1227 UCLASS_DRIVER(pci_generic) = { 1228 .id = UCLASS_PCI_GENERIC, 1229 .name = "pci_generic", 1230 }; 1231 1232 static const struct udevice_id pci_generic_ids[] = { 1233 { .compatible = "pci-generic" }, 1234 { } 1235 }; 1236 1237 U_BOOT_DRIVER(pci_generic_drv) = { 1238 .name = "pci_generic_drv", 1239 .id = UCLASS_PCI_GENERIC, 1240 .of_match = pci_generic_ids, 1241 }; 1242 1243 void pci_init(void) 1244 { 1245 struct udevice *bus; 1246 1247 /* 1248 * Enumerate all known controller devices. Enumeration has the side- 1249 * effect of probing them, so PCIe devices will be enumerated too. 1250 */ 1251 for (uclass_first_device(UCLASS_PCI, &bus); 1252 bus; 1253 uclass_next_device(&bus)) { 1254 ; 1255 } 1256 } 1257