1 /* 2 * Copyright (c) 2014 Google, Inc 3 * Written by Simon Glass <sjg@chromium.org> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 #include <common.h> 9 #include <dm.h> 10 #include <errno.h> 11 #include <fdtdec.h> 12 #include <inttypes.h> 13 #include <pci.h> 14 #include <asm/io.h> 15 #include <dm/lists.h> 16 #include <dm/root.h> 17 #include <dm/device-internal.h> 18 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) 19 #include <asm/fsp/fsp_support.h> 20 #endif 21 #include "pci_internal.h" 22 23 DECLARE_GLOBAL_DATA_PTR; 24 25 static int pci_get_bus(int busnum, struct udevice **busp) 26 { 27 int ret; 28 29 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); 30 31 /* Since buses may not be numbered yet try a little harder with bus 0 */ 32 if (ret == -ENODEV) { 33 ret = uclass_first_device(UCLASS_PCI, busp); 34 if (ret) 35 return ret; 36 else if (!*busp) 37 return -ENODEV; 38 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); 39 } 40 41 return ret; 42 } 43 44 struct pci_controller *pci_bus_to_hose(int busnum) 45 { 46 struct udevice *bus; 47 int ret; 48 49 ret = pci_get_bus(busnum, &bus); 50 if (ret) { 51 debug("%s: Cannot get bus %d: ret=%d\n", __func__, busnum, ret); 52 return NULL; 53 } 54 55 return dev_get_uclass_priv(bus); 56 } 57 58 struct udevice *pci_get_controller(struct udevice *dev) 59 { 60 while (device_is_on_pci_bus(dev)) 61 dev = dev->parent; 62 63 return dev; 64 } 65 66 pci_dev_t dm_pci_get_bdf(struct udevice *dev) 67 { 68 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev); 69 struct udevice *bus = dev->parent; 70 71 return PCI_ADD_BUS(bus->seq, pplat->devfn); 72 } 73 74 /** 75 * pci_get_bus_max() - returns the bus number of the last active bus 76 * 77 * @return last bus number, or -1 if no active buses 78 */ 79 static int pci_get_bus_max(void) 80 { 81 struct udevice *bus; 82 struct uclass *uc; 83 int ret = -1; 84 85 ret = uclass_get(UCLASS_PCI, &uc); 86 uclass_foreach_dev(bus, uc) { 87 if (bus->seq > ret) 88 ret = bus->seq; 89 } 90 91 debug("%s: ret=%d\n", __func__, ret); 92 93 return ret; 94 } 95 96 int pci_last_busno(void) 97 { 98 return pci_get_bus_max(); 99 } 100 101 int pci_get_ff(enum pci_size_t size) 102 { 103 switch (size) { 104 case PCI_SIZE_8: 105 return 0xff; 106 case PCI_SIZE_16: 107 return 0xffff; 108 default: 109 return 0xffffffff; 110 } 111 } 112 113 int pci_bus_find_devfn(struct udevice *bus, pci_dev_t find_devfn, 114 struct udevice **devp) 115 { 116 struct udevice *dev; 117 118 for (device_find_first_child(bus, &dev); 119 dev; 120 device_find_next_child(&dev)) { 121 struct pci_child_platdata *pplat; 122 123 pplat = dev_get_parent_platdata(dev); 124 if (pplat && pplat->devfn == find_devfn) { 125 *devp = dev; 126 return 0; 127 } 128 } 129 130 return -ENODEV; 131 } 132 133 int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp) 134 { 135 struct udevice *bus; 136 int ret; 137 138 ret = pci_get_bus(PCI_BUS(bdf), &bus); 139 if (ret) 140 return ret; 141 return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp); 142 } 143 144 static int pci_device_matches_ids(struct udevice *dev, 145 struct pci_device_id *ids) 146 { 147 struct pci_child_platdata *pplat; 148 int i; 149 150 pplat = dev_get_parent_platdata(dev); 151 if (!pplat) 152 return -EINVAL; 153 for (i = 0; ids[i].vendor != 0; i++) { 154 if (pplat->vendor == ids[i].vendor && 155 pplat->device == ids[i].device) 156 return i; 157 } 158 159 return -EINVAL; 160 } 161 162 int pci_bus_find_devices(struct udevice *bus, struct pci_device_id *ids, 163 int *indexp, struct udevice **devp) 164 { 165 struct udevice *dev; 166 167 /* Scan all devices on this bus */ 168 for (device_find_first_child(bus, &dev); 169 dev; 170 device_find_next_child(&dev)) { 171 if (pci_device_matches_ids(dev, ids) >= 0) { 172 if ((*indexp)-- <= 0) { 173 *devp = dev; 174 return 0; 175 } 176 } 177 } 178 179 return -ENODEV; 180 } 181 182 int pci_find_device_id(struct pci_device_id *ids, int index, 183 struct udevice **devp) 184 { 185 struct udevice *bus; 186 187 /* Scan all known buses */ 188 for (uclass_first_device(UCLASS_PCI, &bus); 189 bus; 190 uclass_next_device(&bus)) { 191 if (!pci_bus_find_devices(bus, ids, &index, devp)) 192 return 0; 193 } 194 *devp = NULL; 195 196 return -ENODEV; 197 } 198 199 static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor, 200 unsigned int device, int *indexp, 201 struct udevice **devp) 202 { 203 struct pci_child_platdata *pplat; 204 struct udevice *dev; 205 206 for (device_find_first_child(bus, &dev); 207 dev; 208 device_find_next_child(&dev)) { 209 pplat = dev_get_parent_platdata(dev); 210 if (pplat->vendor == vendor && pplat->device == device) { 211 if (!(*indexp)--) { 212 *devp = dev; 213 return 0; 214 } 215 } 216 } 217 218 return -ENODEV; 219 } 220 221 int dm_pci_find_device(unsigned int vendor, unsigned int device, int index, 222 struct udevice **devp) 223 { 224 struct udevice *bus; 225 226 /* Scan all known buses */ 227 for (uclass_first_device(UCLASS_PCI, &bus); 228 bus; 229 uclass_next_device(&bus)) { 230 if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp)) 231 return device_probe(*devp); 232 } 233 *devp = NULL; 234 235 return -ENODEV; 236 } 237 238 int dm_pci_find_class(uint find_class, int index, struct udevice **devp) 239 { 240 struct udevice *dev; 241 242 /* Scan all known buses */ 243 for (pci_find_first_device(&dev); 244 dev; 245 pci_find_next_device(&dev)) { 246 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev); 247 248 if (pplat->class == find_class && !index--) { 249 *devp = dev; 250 return device_probe(*devp); 251 } 252 } 253 *devp = NULL; 254 255 return -ENODEV; 256 } 257 258 int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset, 259 unsigned long value, enum pci_size_t size) 260 { 261 struct dm_pci_ops *ops; 262 263 ops = pci_get_ops(bus); 264 if (!ops->write_config) 265 return -ENOSYS; 266 return ops->write_config(bus, bdf, offset, value, size); 267 } 268 269 int pci_write_config(pci_dev_t bdf, int offset, unsigned long value, 270 enum pci_size_t size) 271 { 272 struct udevice *bus; 273 int ret; 274 275 ret = pci_get_bus(PCI_BUS(bdf), &bus); 276 if (ret) 277 return ret; 278 279 return pci_bus_write_config(bus, bdf, offset, value, size); 280 } 281 282 int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value, 283 enum pci_size_t size) 284 { 285 struct udevice *bus; 286 287 for (bus = dev; device_is_on_pci_bus(bus);) 288 bus = bus->parent; 289 return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value, 290 size); 291 } 292 293 294 int pci_write_config32(pci_dev_t bdf, int offset, u32 value) 295 { 296 return pci_write_config(bdf, offset, value, PCI_SIZE_32); 297 } 298 299 int pci_write_config16(pci_dev_t bdf, int offset, u16 value) 300 { 301 return pci_write_config(bdf, offset, value, PCI_SIZE_16); 302 } 303 304 int pci_write_config8(pci_dev_t bdf, int offset, u8 value) 305 { 306 return pci_write_config(bdf, offset, value, PCI_SIZE_8); 307 } 308 309 int dm_pci_write_config8(struct udevice *dev, int offset, u8 value) 310 { 311 return dm_pci_write_config(dev, offset, value, PCI_SIZE_8); 312 } 313 314 int dm_pci_write_config16(struct udevice *dev, int offset, u16 value) 315 { 316 return dm_pci_write_config(dev, offset, value, PCI_SIZE_16); 317 } 318 319 int dm_pci_write_config32(struct udevice *dev, int offset, u32 value) 320 { 321 return dm_pci_write_config(dev, offset, value, PCI_SIZE_32); 322 } 323 324 int pci_bus_read_config(struct udevice *bus, pci_dev_t bdf, int offset, 325 unsigned long *valuep, enum pci_size_t size) 326 { 327 struct dm_pci_ops *ops; 328 329 ops = pci_get_ops(bus); 330 if (!ops->read_config) 331 return -ENOSYS; 332 return ops->read_config(bus, bdf, offset, valuep, size); 333 } 334 335 int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep, 336 enum pci_size_t size) 337 { 338 struct udevice *bus; 339 int ret; 340 341 ret = pci_get_bus(PCI_BUS(bdf), &bus); 342 if (ret) 343 return ret; 344 345 return pci_bus_read_config(bus, bdf, offset, valuep, size); 346 } 347 348 int dm_pci_read_config(struct udevice *dev, int offset, unsigned long *valuep, 349 enum pci_size_t size) 350 { 351 struct udevice *bus; 352 353 for (bus = dev; device_is_on_pci_bus(bus);) 354 bus = bus->parent; 355 return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep, 356 size); 357 } 358 359 int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep) 360 { 361 unsigned long value; 362 int ret; 363 364 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32); 365 if (ret) 366 return ret; 367 *valuep = value; 368 369 return 0; 370 } 371 372 int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep) 373 { 374 unsigned long value; 375 int ret; 376 377 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16); 378 if (ret) 379 return ret; 380 *valuep = value; 381 382 return 0; 383 } 384 385 int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep) 386 { 387 unsigned long value; 388 int ret; 389 390 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8); 391 if (ret) 392 return ret; 393 *valuep = value; 394 395 return 0; 396 } 397 398 int dm_pci_read_config8(struct udevice *dev, int offset, u8 *valuep) 399 { 400 unsigned long value; 401 int ret; 402 403 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8); 404 if (ret) 405 return ret; 406 *valuep = value; 407 408 return 0; 409 } 410 411 int dm_pci_read_config16(struct udevice *dev, int offset, u16 *valuep) 412 { 413 unsigned long value; 414 int ret; 415 416 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16); 417 if (ret) 418 return ret; 419 *valuep = value; 420 421 return 0; 422 } 423 424 int dm_pci_read_config32(struct udevice *dev, int offset, u32 *valuep) 425 { 426 unsigned long value; 427 int ret; 428 429 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32); 430 if (ret) 431 return ret; 432 *valuep = value; 433 434 return 0; 435 } 436 437 static void set_vga_bridge_bits(struct udevice *dev) 438 { 439 struct udevice *parent = dev->parent; 440 u16 bc; 441 442 while (parent->seq != 0) { 443 dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc); 444 bc |= PCI_BRIDGE_CTL_VGA; 445 dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc); 446 parent = parent->parent; 447 } 448 } 449 450 int pci_auto_config_devices(struct udevice *bus) 451 { 452 struct pci_controller *hose = bus->uclass_priv; 453 struct pci_child_platdata *pplat; 454 unsigned int sub_bus; 455 struct udevice *dev; 456 int ret; 457 458 sub_bus = bus->seq; 459 debug("%s: start\n", __func__); 460 pciauto_config_init(hose); 461 for (ret = device_find_first_child(bus, &dev); 462 !ret && dev; 463 ret = device_find_next_child(&dev)) { 464 unsigned int max_bus; 465 int ret; 466 467 debug("%s: device %s\n", __func__, dev->name); 468 ret = dm_pciauto_config_device(dev); 469 if (ret < 0) 470 return ret; 471 max_bus = ret; 472 sub_bus = max(sub_bus, max_bus); 473 474 pplat = dev_get_parent_platdata(dev); 475 if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8)) 476 set_vga_bridge_bits(dev); 477 } 478 debug("%s: done\n", __func__); 479 480 return sub_bus; 481 } 482 483 int dm_pci_hose_probe_bus(struct udevice *bus) 484 { 485 int sub_bus; 486 int ret; 487 488 debug("%s\n", __func__); 489 490 sub_bus = pci_get_bus_max() + 1; 491 debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name); 492 dm_pciauto_prescan_setup_bridge(bus, sub_bus); 493 494 ret = device_probe(bus); 495 if (ret) { 496 debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name, 497 ret); 498 return ret; 499 } 500 if (sub_bus != bus->seq) { 501 printf("%s: Internal error, bus '%s' got seq %d, expected %d\n", 502 __func__, bus->name, bus->seq, sub_bus); 503 return -EPIPE; 504 } 505 sub_bus = pci_get_bus_max(); 506 dm_pciauto_postscan_setup_bridge(bus, sub_bus); 507 508 return sub_bus; 509 } 510 511 /** 512 * pci_match_one_device - Tell if a PCI device structure has a matching 513 * PCI device id structure 514 * @id: single PCI device id structure to match 515 * @dev: the PCI device structure to match against 516 * 517 * Returns the matching pci_device_id structure or %NULL if there is no match. 518 */ 519 static bool pci_match_one_id(const struct pci_device_id *id, 520 const struct pci_device_id *find) 521 { 522 if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) && 523 (id->device == PCI_ANY_ID || id->device == find->device) && 524 (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) && 525 (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) && 526 !((id->class ^ find->class) & id->class_mask)) 527 return true; 528 529 return false; 530 } 531 532 /** 533 * pci_find_and_bind_driver() - Find and bind the right PCI driver 534 * 535 * This only looks at certain fields in the descriptor. 536 * 537 * @parent: Parent bus 538 * @find_id: Specification of the driver to find 539 * @bdf: Bus/device/function addreess - see PCI_BDF() 540 * @devp: Returns a pointer to the device created 541 * @return 0 if OK, -EPERM if the device is not needed before relocation and 542 * therefore was not created, other -ve value on error 543 */ 544 static int pci_find_and_bind_driver(struct udevice *parent, 545 struct pci_device_id *find_id, 546 pci_dev_t bdf, struct udevice **devp) 547 { 548 struct pci_driver_entry *start, *entry; 549 const char *drv; 550 int n_ents; 551 int ret; 552 char name[30], *str; 553 bool bridge; 554 555 *devp = NULL; 556 557 debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__, 558 find_id->vendor, find_id->device); 559 start = ll_entry_start(struct pci_driver_entry, pci_driver_entry); 560 n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry); 561 for (entry = start; entry != start + n_ents; entry++) { 562 const struct pci_device_id *id; 563 struct udevice *dev; 564 const struct driver *drv; 565 566 for (id = entry->match; 567 id->vendor || id->subvendor || id->class_mask; 568 id++) { 569 if (!pci_match_one_id(id, find_id)) 570 continue; 571 572 drv = entry->driver; 573 574 /* 575 * In the pre-relocation phase, we only bind devices 576 * whose driver has the DM_FLAG_PRE_RELOC set, to save 577 * precious memory space as on some platforms as that 578 * space is pretty limited (ie: using Cache As RAM). 579 */ 580 if (!(gd->flags & GD_FLG_RELOC) && 581 !(drv->flags & DM_FLAG_PRE_RELOC)) 582 return -EPERM; 583 584 /* 585 * We could pass the descriptor to the driver as 586 * platdata (instead of NULL) and allow its bind() 587 * method to return -ENOENT if it doesn't support this 588 * device. That way we could continue the search to 589 * find another driver. For now this doesn't seem 590 * necesssary, so just bind the first match. 591 */ 592 ret = device_bind(parent, drv, drv->name, NULL, -1, 593 &dev); 594 if (ret) 595 goto error; 596 debug("%s: Match found: %s\n", __func__, drv->name); 597 dev->driver_data = find_id->driver_data; 598 *devp = dev; 599 return 0; 600 } 601 } 602 603 bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI; 604 /* 605 * In the pre-relocation phase, we only bind bridge devices to save 606 * precious memory space as on some platforms as that space is pretty 607 * limited (ie: using Cache As RAM). 608 */ 609 if (!(gd->flags & GD_FLG_RELOC) && !bridge) 610 return -EPERM; 611 612 /* Bind a generic driver so that the device can be used */ 613 sprintf(name, "pci_%x:%x.%x", parent->seq, PCI_DEV(bdf), 614 PCI_FUNC(bdf)); 615 str = strdup(name); 616 if (!str) 617 return -ENOMEM; 618 drv = bridge ? "pci_bridge_drv" : "pci_generic_drv"; 619 620 ret = device_bind_driver(parent, drv, str, devp); 621 if (ret) { 622 debug("%s: Failed to bind generic driver: %d\n", __func__, ret); 623 return ret; 624 } 625 debug("%s: No match found: bound generic driver instead\n", __func__); 626 627 return 0; 628 629 error: 630 debug("%s: No match found: error %d\n", __func__, ret); 631 return ret; 632 } 633 634 int pci_bind_bus_devices(struct udevice *bus) 635 { 636 ulong vendor, device; 637 ulong header_type; 638 pci_dev_t bdf, end; 639 bool found_multi; 640 int ret; 641 642 found_multi = false; 643 end = PCI_BDF(bus->seq, PCI_MAX_PCI_DEVICES - 1, 644 PCI_MAX_PCI_FUNCTIONS - 1); 645 for (bdf = PCI_BDF(bus->seq, 0, 0); bdf < end; 646 bdf += PCI_BDF(0, 0, 1)) { 647 struct pci_child_platdata *pplat; 648 struct udevice *dev; 649 ulong class; 650 651 if (PCI_FUNC(bdf) && !found_multi) 652 continue; 653 /* Check only the first access, we don't expect problems */ 654 ret = pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE, 655 &header_type, PCI_SIZE_8); 656 if (ret) 657 goto error; 658 pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor, 659 PCI_SIZE_16); 660 if (vendor == 0xffff || vendor == 0x0000) 661 continue; 662 663 if (!PCI_FUNC(bdf)) 664 found_multi = header_type & 0x80; 665 666 debug("%s: bus %d/%s: found device %x, function %d\n", __func__, 667 bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf)); 668 pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device, 669 PCI_SIZE_16); 670 pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class, 671 PCI_SIZE_32); 672 class >>= 8; 673 674 /* Find this device in the device tree */ 675 ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev); 676 677 /* If nothing in the device tree, bind a device */ 678 if (ret == -ENODEV) { 679 struct pci_device_id find_id; 680 ulong val; 681 682 memset(&find_id, '\0', sizeof(find_id)); 683 find_id.vendor = vendor; 684 find_id.device = device; 685 find_id.class = class; 686 if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) { 687 pci_bus_read_config(bus, bdf, 688 PCI_SUBSYSTEM_VENDOR_ID, 689 &val, PCI_SIZE_32); 690 find_id.subvendor = val & 0xffff; 691 find_id.subdevice = val >> 16; 692 } 693 ret = pci_find_and_bind_driver(bus, &find_id, bdf, 694 &dev); 695 } 696 if (ret == -EPERM) 697 continue; 698 else if (ret) 699 return ret; 700 701 /* Update the platform data */ 702 pplat = dev_get_parent_platdata(dev); 703 pplat->devfn = PCI_MASK_BUS(bdf); 704 pplat->vendor = vendor; 705 pplat->device = device; 706 pplat->class = class; 707 } 708 709 return 0; 710 error: 711 printf("Cannot read bus configuration: %d\n", ret); 712 713 return ret; 714 } 715 716 static int pci_uclass_post_bind(struct udevice *bus) 717 { 718 /* 719 * If there is no pci device listed in the device tree, 720 * don't bother scanning the device tree. 721 */ 722 if (bus->of_offset == -1) 723 return 0; 724 725 /* 726 * Scan the device tree for devices. This does not probe the PCI bus, 727 * as this is not permitted while binding. It just finds devices 728 * mentioned in the device tree. 729 * 730 * Before relocation, only bind devices marked for pre-relocation 731 * use. 732 */ 733 return dm_scan_fdt_node(bus, gd->fdt_blob, bus->of_offset, 734 gd->flags & GD_FLG_RELOC ? false : true); 735 } 736 737 static int decode_regions(struct pci_controller *hose, const void *blob, 738 int parent_node, int node) 739 { 740 int pci_addr_cells, addr_cells, size_cells; 741 phys_addr_t base = 0, size; 742 int cells_per_record; 743 const u32 *prop; 744 int len; 745 int i; 746 747 prop = fdt_getprop(blob, node, "ranges", &len); 748 if (!prop) 749 return -EINVAL; 750 pci_addr_cells = fdt_address_cells(blob, node); 751 addr_cells = fdt_address_cells(blob, parent_node); 752 size_cells = fdt_size_cells(blob, node); 753 754 /* PCI addresses are always 3-cells */ 755 len /= sizeof(u32); 756 cells_per_record = pci_addr_cells + addr_cells + size_cells; 757 hose->region_count = 0; 758 debug("%s: len=%d, cells_per_record=%d\n", __func__, len, 759 cells_per_record); 760 for (i = 0; i < MAX_PCI_REGIONS; i++, len -= cells_per_record) { 761 u64 pci_addr, addr, size; 762 int space_code; 763 u32 flags; 764 int type; 765 int pos; 766 767 if (len < cells_per_record) 768 break; 769 flags = fdt32_to_cpu(prop[0]); 770 space_code = (flags >> 24) & 3; 771 pci_addr = fdtdec_get_number(prop + 1, 2); 772 prop += pci_addr_cells; 773 addr = fdtdec_get_number(prop, addr_cells); 774 prop += addr_cells; 775 size = fdtdec_get_number(prop, size_cells); 776 prop += size_cells; 777 debug("%s: region %d, pci_addr=%" PRIx64 ", addr=%" PRIx64 778 ", size=%" PRIx64 ", space_code=%d\n", __func__, 779 hose->region_count, pci_addr, addr, size, space_code); 780 if (space_code & 2) { 781 type = flags & (1U << 30) ? PCI_REGION_PREFETCH : 782 PCI_REGION_MEM; 783 } else if (space_code & 1) { 784 type = PCI_REGION_IO; 785 } else { 786 continue; 787 } 788 pos = -1; 789 for (i = 0; i < hose->region_count; i++) { 790 if (hose->regions[i].flags == type) 791 pos = i; 792 } 793 if (pos == -1) 794 pos = hose->region_count++; 795 debug(" - type=%d, pos=%d\n", type, pos); 796 pci_set_region(hose->regions + pos, pci_addr, addr, size, type); 797 } 798 799 /* Add a region for our local memory */ 800 size = gd->ram_size; 801 #ifdef CONFIG_SYS_SDRAM_BASE 802 base = CONFIG_SYS_SDRAM_BASE; 803 #endif 804 if (gd->pci_ram_top && gd->pci_ram_top < base + size) 805 size = gd->pci_ram_top - base; 806 pci_set_region(hose->regions + hose->region_count++, base, base, 807 size, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); 808 809 return 0; 810 } 811 812 static int pci_uclass_pre_probe(struct udevice *bus) 813 { 814 struct pci_controller *hose; 815 int ret; 816 817 debug("%s, bus=%d/%s, parent=%s\n", __func__, bus->seq, bus->name, 818 bus->parent->name); 819 hose = bus->uclass_priv; 820 821 /* For bridges, use the top-level PCI controller */ 822 if (device_get_uclass_id(bus->parent) == UCLASS_ROOT) { 823 hose->ctlr = bus; 824 ret = decode_regions(hose, gd->fdt_blob, bus->parent->of_offset, 825 bus->of_offset); 826 if (ret) { 827 debug("%s: Cannot decode regions\n", __func__); 828 return ret; 829 } 830 } else { 831 struct pci_controller *parent_hose; 832 833 parent_hose = dev_get_uclass_priv(bus->parent); 834 hose->ctlr = parent_hose->bus; 835 } 836 hose->bus = bus; 837 hose->first_busno = bus->seq; 838 hose->last_busno = bus->seq; 839 840 return 0; 841 } 842 843 static int pci_uclass_post_probe(struct udevice *bus) 844 { 845 int ret; 846 847 debug("%s: probing bus %d\n", __func__, bus->seq); 848 ret = pci_bind_bus_devices(bus); 849 if (ret) 850 return ret; 851 852 #ifdef CONFIG_PCI_PNP 853 ret = pci_auto_config_devices(bus); 854 if (ret < 0) 855 return ret; 856 #endif 857 858 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) 859 /* 860 * Per Intel FSP specification, we should call FSP notify API to 861 * inform FSP that PCI enumeration has been done so that FSP will 862 * do any necessary initialization as required by the chipset's 863 * BIOS Writer's Guide (BWG). 864 * 865 * Unfortunately we have to put this call here as with driver model, 866 * the enumeration is all done on a lazy basis as needed, so until 867 * something is touched on PCI it won't happen. 868 * 869 * Note we only call this 1) after U-Boot is relocated, and 2) 870 * root bus has finished probing. 871 */ 872 if ((gd->flags & GD_FLG_RELOC) && (bus->seq == 0)) { 873 ret = fsp_init_phase_pci(); 874 if (ret) 875 return ret; 876 } 877 #endif 878 879 return 0; 880 } 881 882 static int pci_uclass_child_post_bind(struct udevice *dev) 883 { 884 struct pci_child_platdata *pplat; 885 struct fdt_pci_addr addr; 886 int ret; 887 888 if (dev->of_offset == -1) 889 return 0; 890 891 /* 892 * We could read vendor, device, class if available. But for now we 893 * just check the address. 894 */ 895 pplat = dev_get_parent_platdata(dev); 896 ret = fdtdec_get_pci_addr(gd->fdt_blob, dev->of_offset, 897 FDT_PCI_SPACE_CONFIG, "reg", &addr); 898 899 if (ret) { 900 if (ret != -ENOENT) 901 return -EINVAL; 902 } else { 903 /* extract the devfn from fdt_pci_addr */ 904 pplat->devfn = addr.phys_hi & 0xff00; 905 } 906 907 return 0; 908 } 909 910 static int pci_bridge_read_config(struct udevice *bus, pci_dev_t bdf, 911 uint offset, ulong *valuep, 912 enum pci_size_t size) 913 { 914 struct pci_controller *hose = bus->uclass_priv; 915 916 return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size); 917 } 918 919 static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf, 920 uint offset, ulong value, 921 enum pci_size_t size) 922 { 923 struct pci_controller *hose = bus->uclass_priv; 924 925 return pci_bus_write_config(hose->ctlr, bdf, offset, value, size); 926 } 927 928 static int skip_to_next_device(struct udevice *bus, struct udevice **devp) 929 { 930 struct udevice *dev; 931 int ret = 0; 932 933 /* 934 * Scan through all the PCI controllers. On x86 there will only be one 935 * but that is not necessarily true on other hardware. 936 */ 937 do { 938 device_find_first_child(bus, &dev); 939 if (dev) { 940 *devp = dev; 941 return 0; 942 } 943 ret = uclass_next_device(&bus); 944 if (ret) 945 return ret; 946 } while (bus); 947 948 return 0; 949 } 950 951 int pci_find_next_device(struct udevice **devp) 952 { 953 struct udevice *child = *devp; 954 struct udevice *bus = child->parent; 955 int ret; 956 957 /* First try all the siblings */ 958 *devp = NULL; 959 while (child) { 960 device_find_next_child(&child); 961 if (child) { 962 *devp = child; 963 return 0; 964 } 965 } 966 967 /* We ran out of siblings. Try the next bus */ 968 ret = uclass_next_device(&bus); 969 if (ret) 970 return ret; 971 972 return bus ? skip_to_next_device(bus, devp) : 0; 973 } 974 975 int pci_find_first_device(struct udevice **devp) 976 { 977 struct udevice *bus; 978 int ret; 979 980 *devp = NULL; 981 ret = uclass_first_device(UCLASS_PCI, &bus); 982 if (ret) 983 return ret; 984 985 return skip_to_next_device(bus, devp); 986 } 987 988 ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size) 989 { 990 switch (size) { 991 case PCI_SIZE_8: 992 return (value >> ((offset & 3) * 8)) & 0xff; 993 case PCI_SIZE_16: 994 return (value >> ((offset & 2) * 8)) & 0xffff; 995 default: 996 return value; 997 } 998 } 999 1000 ulong pci_conv_size_to_32(ulong old, ulong value, uint offset, 1001 enum pci_size_t size) 1002 { 1003 uint off_mask; 1004 uint val_mask, shift; 1005 ulong ldata, mask; 1006 1007 switch (size) { 1008 case PCI_SIZE_8: 1009 off_mask = 3; 1010 val_mask = 0xff; 1011 break; 1012 case PCI_SIZE_16: 1013 off_mask = 2; 1014 val_mask = 0xffff; 1015 break; 1016 default: 1017 return value; 1018 } 1019 shift = (offset & off_mask) * 8; 1020 ldata = (value & val_mask) << shift; 1021 mask = val_mask << shift; 1022 value = (old & ~mask) | ldata; 1023 1024 return value; 1025 } 1026 1027 int pci_get_regions(struct udevice *dev, struct pci_region **iop, 1028 struct pci_region **memp, struct pci_region **prefp) 1029 { 1030 struct udevice *bus = pci_get_controller(dev); 1031 struct pci_controller *hose = dev_get_uclass_priv(bus); 1032 int i; 1033 1034 *iop = NULL; 1035 *memp = NULL; 1036 *prefp = NULL; 1037 for (i = 0; i < hose->region_count; i++) { 1038 switch (hose->regions[i].flags) { 1039 case PCI_REGION_IO: 1040 if (!*iop || (*iop)->size < hose->regions[i].size) 1041 *iop = hose->regions + i; 1042 break; 1043 case PCI_REGION_MEM: 1044 if (!*memp || (*memp)->size < hose->regions[i].size) 1045 *memp = hose->regions + i; 1046 break; 1047 case (PCI_REGION_MEM | PCI_REGION_PREFETCH): 1048 if (!*prefp || (*prefp)->size < hose->regions[i].size) 1049 *prefp = hose->regions + i; 1050 break; 1051 } 1052 } 1053 1054 return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL); 1055 } 1056 1057 u32 dm_pci_read_bar32(struct udevice *dev, int barnum) 1058 { 1059 u32 addr; 1060 int bar; 1061 1062 bar = PCI_BASE_ADDRESS_0 + barnum * 4; 1063 dm_pci_read_config32(dev, bar, &addr); 1064 if (addr & PCI_BASE_ADDRESS_SPACE_IO) 1065 return addr & PCI_BASE_ADDRESS_IO_MASK; 1066 else 1067 return addr & PCI_BASE_ADDRESS_MEM_MASK; 1068 } 1069 1070 static int _dm_pci_bus_to_phys(struct udevice *ctlr, 1071 pci_addr_t bus_addr, unsigned long flags, 1072 unsigned long skip_mask, phys_addr_t *pa) 1073 { 1074 struct pci_controller *hose = dev_get_uclass_priv(ctlr); 1075 struct pci_region *res; 1076 int i; 1077 1078 for (i = 0; i < hose->region_count; i++) { 1079 res = &hose->regions[i]; 1080 1081 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) 1082 continue; 1083 1084 if (res->flags & skip_mask) 1085 continue; 1086 1087 if (bus_addr >= res->bus_start && 1088 (bus_addr - res->bus_start) < res->size) { 1089 *pa = (bus_addr - res->bus_start + res->phys_start); 1090 return 0; 1091 } 1092 } 1093 1094 return 1; 1095 } 1096 1097 phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr, 1098 unsigned long flags) 1099 { 1100 phys_addr_t phys_addr = 0; 1101 struct udevice *ctlr; 1102 int ret; 1103 1104 /* The root controller has the region information */ 1105 ctlr = pci_get_controller(dev); 1106 1107 /* 1108 * if PCI_REGION_MEM is set we do a two pass search with preference 1109 * on matches that don't have PCI_REGION_SYS_MEMORY set 1110 */ 1111 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { 1112 ret = _dm_pci_bus_to_phys(ctlr, bus_addr, 1113 flags, PCI_REGION_SYS_MEMORY, 1114 &phys_addr); 1115 if (!ret) 1116 return phys_addr; 1117 } 1118 1119 ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr); 1120 1121 if (ret) 1122 puts("pci_hose_bus_to_phys: invalid physical address\n"); 1123 1124 return phys_addr; 1125 } 1126 1127 int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, 1128 unsigned long flags, unsigned long skip_mask, 1129 pci_addr_t *ba) 1130 { 1131 struct pci_region *res; 1132 struct udevice *ctlr; 1133 pci_addr_t bus_addr; 1134 int i; 1135 struct pci_controller *hose; 1136 1137 /* The root controller has the region information */ 1138 ctlr = pci_get_controller(dev); 1139 hose = dev_get_uclass_priv(ctlr); 1140 1141 for (i = 0; i < hose->region_count; i++) { 1142 res = &hose->regions[i]; 1143 1144 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) 1145 continue; 1146 1147 if (res->flags & skip_mask) 1148 continue; 1149 1150 bus_addr = phys_addr - res->phys_start + res->bus_start; 1151 1152 if (bus_addr >= res->bus_start && 1153 (bus_addr - res->bus_start) < res->size) { 1154 *ba = bus_addr; 1155 return 0; 1156 } 1157 } 1158 1159 return 1; 1160 } 1161 1162 pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, 1163 unsigned long flags) 1164 { 1165 pci_addr_t bus_addr = 0; 1166 int ret; 1167 1168 /* 1169 * if PCI_REGION_MEM is set we do a two pass search with preference 1170 * on matches that don't have PCI_REGION_SYS_MEMORY set 1171 */ 1172 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { 1173 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 1174 PCI_REGION_SYS_MEMORY, &bus_addr); 1175 if (!ret) 1176 return bus_addr; 1177 } 1178 1179 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr); 1180 1181 if (ret) 1182 puts("pci_hose_phys_to_bus: invalid physical address\n"); 1183 1184 return bus_addr; 1185 } 1186 1187 void *dm_pci_map_bar(struct udevice *dev, int bar, int flags) 1188 { 1189 pci_addr_t pci_bus_addr; 1190 u32 bar_response; 1191 1192 /* read BAR address */ 1193 dm_pci_read_config32(dev, bar, &bar_response); 1194 pci_bus_addr = (pci_addr_t)(bar_response & ~0xf); 1195 1196 /* 1197 * Pass "0" as the length argument to pci_bus_to_virt. The arg 1198 * isn't actualy used on any platform because u-boot assumes a static 1199 * linear mapping. In the future, this could read the BAR size 1200 * and pass that as the size if needed. 1201 */ 1202 return dm_pci_bus_to_virt(dev, pci_bus_addr, flags, 0, MAP_NOCACHE); 1203 } 1204 1205 UCLASS_DRIVER(pci) = { 1206 .id = UCLASS_PCI, 1207 .name = "pci", 1208 .flags = DM_UC_FLAG_SEQ_ALIAS, 1209 .post_bind = pci_uclass_post_bind, 1210 .pre_probe = pci_uclass_pre_probe, 1211 .post_probe = pci_uclass_post_probe, 1212 .child_post_bind = pci_uclass_child_post_bind, 1213 .per_device_auto_alloc_size = sizeof(struct pci_controller), 1214 .per_child_platdata_auto_alloc_size = 1215 sizeof(struct pci_child_platdata), 1216 }; 1217 1218 static const struct dm_pci_ops pci_bridge_ops = { 1219 .read_config = pci_bridge_read_config, 1220 .write_config = pci_bridge_write_config, 1221 }; 1222 1223 static const struct udevice_id pci_bridge_ids[] = { 1224 { .compatible = "pci-bridge" }, 1225 { } 1226 }; 1227 1228 U_BOOT_DRIVER(pci_bridge_drv) = { 1229 .name = "pci_bridge_drv", 1230 .id = UCLASS_PCI, 1231 .of_match = pci_bridge_ids, 1232 .ops = &pci_bridge_ops, 1233 }; 1234 1235 UCLASS_DRIVER(pci_generic) = { 1236 .id = UCLASS_PCI_GENERIC, 1237 .name = "pci_generic", 1238 }; 1239 1240 static const struct udevice_id pci_generic_ids[] = { 1241 { .compatible = "pci-generic" }, 1242 { } 1243 }; 1244 1245 U_BOOT_DRIVER(pci_generic_drv) = { 1246 .name = "pci_generic_drv", 1247 .id = UCLASS_PCI_GENERIC, 1248 .of_match = pci_generic_ids, 1249 }; 1250