1 /* 2 * Copyright (c) 2014 Google, Inc 3 * Written by Simon Glass <sjg@chromium.org> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 #include <common.h> 9 #include <dm.h> 10 #include <errno.h> 11 #include <inttypes.h> 12 #include <pci.h> 13 #include <asm/io.h> 14 #include <dm/device-internal.h> 15 #include <dm/lists.h> 16 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) 17 #include <asm/fsp/fsp_support.h> 18 #endif 19 #include "pci_internal.h" 20 21 DECLARE_GLOBAL_DATA_PTR; 22 23 int pci_get_bus(int busnum, struct udevice **busp) 24 { 25 int ret; 26 27 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); 28 29 /* Since buses may not be numbered yet try a little harder with bus 0 */ 30 if (ret == -ENODEV) { 31 ret = uclass_first_device_err(UCLASS_PCI, busp); 32 if (ret) 33 return ret; 34 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); 35 } 36 37 return ret; 38 } 39 40 struct udevice *pci_get_controller(struct udevice *dev) 41 { 42 while (device_is_on_pci_bus(dev)) 43 dev = dev->parent; 44 45 return dev; 46 } 47 48 pci_dev_t dm_pci_get_bdf(struct udevice *dev) 49 { 50 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev); 51 struct udevice *bus = dev->parent; 52 53 return PCI_ADD_BUS(bus->seq, pplat->devfn); 54 } 55 56 /** 57 * pci_get_bus_max() - returns the bus number of the last active bus 58 * 59 * @return last bus number, or -1 if no active buses 60 */ 61 static int pci_get_bus_max(void) 62 { 63 struct udevice *bus; 64 struct uclass *uc; 65 int ret = -1; 66 67 ret = uclass_get(UCLASS_PCI, &uc); 68 uclass_foreach_dev(bus, uc) { 69 if (bus->seq > ret) 70 ret = bus->seq; 71 } 72 73 debug("%s: ret=%d\n", __func__, ret); 74 75 return ret; 76 } 77 78 int pci_last_busno(void) 79 { 80 return pci_get_bus_max(); 81 } 82 83 int pci_get_ff(enum pci_size_t size) 84 { 85 switch (size) { 86 case PCI_SIZE_8: 87 return 0xff; 88 case PCI_SIZE_16: 89 return 0xffff; 90 default: 91 return 0xffffffff; 92 } 93 } 94 95 int pci_bus_find_devfn(struct udevice *bus, pci_dev_t find_devfn, 96 struct udevice **devp) 97 { 98 struct udevice *dev; 99 100 for (device_find_first_child(bus, &dev); 101 dev; 102 device_find_next_child(&dev)) { 103 struct pci_child_platdata *pplat; 104 105 pplat = dev_get_parent_platdata(dev); 106 if (pplat && pplat->devfn == find_devfn) { 107 *devp = dev; 108 return 0; 109 } 110 } 111 112 return -ENODEV; 113 } 114 115 int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp) 116 { 117 struct udevice *bus; 118 int ret; 119 120 ret = pci_get_bus(PCI_BUS(bdf), &bus); 121 if (ret) 122 return ret; 123 return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp); 124 } 125 126 static int pci_device_matches_ids(struct udevice *dev, 127 struct pci_device_id *ids) 128 { 129 struct pci_child_platdata *pplat; 130 int i; 131 132 pplat = dev_get_parent_platdata(dev); 133 if (!pplat) 134 return -EINVAL; 135 for (i = 0; ids[i].vendor != 0; i++) { 136 if (pplat->vendor == ids[i].vendor && 137 pplat->device == ids[i].device) 138 return i; 139 } 140 141 return -EINVAL; 142 } 143 144 int pci_bus_find_devices(struct udevice *bus, struct pci_device_id *ids, 145 int *indexp, struct udevice **devp) 146 { 147 struct udevice *dev; 148 149 /* Scan all devices on this bus */ 150 for (device_find_first_child(bus, &dev); 151 dev; 152 device_find_next_child(&dev)) { 153 if (pci_device_matches_ids(dev, ids) >= 0) { 154 if ((*indexp)-- <= 0) { 155 *devp = dev; 156 return 0; 157 } 158 } 159 } 160 161 return -ENODEV; 162 } 163 164 int pci_find_device_id(struct pci_device_id *ids, int index, 165 struct udevice **devp) 166 { 167 struct udevice *bus; 168 169 /* Scan all known buses */ 170 for (uclass_first_device(UCLASS_PCI, &bus); 171 bus; 172 uclass_next_device(&bus)) { 173 if (!pci_bus_find_devices(bus, ids, &index, devp)) 174 return 0; 175 } 176 *devp = NULL; 177 178 return -ENODEV; 179 } 180 181 static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor, 182 unsigned int device, int *indexp, 183 struct udevice **devp) 184 { 185 struct pci_child_platdata *pplat; 186 struct udevice *dev; 187 188 for (device_find_first_child(bus, &dev); 189 dev; 190 device_find_next_child(&dev)) { 191 pplat = dev_get_parent_platdata(dev); 192 if (pplat->vendor == vendor && pplat->device == device) { 193 if (!(*indexp)--) { 194 *devp = dev; 195 return 0; 196 } 197 } 198 } 199 200 return -ENODEV; 201 } 202 203 int dm_pci_find_device(unsigned int vendor, unsigned int device, int index, 204 struct udevice **devp) 205 { 206 struct udevice *bus; 207 208 /* Scan all known buses */ 209 for (uclass_first_device(UCLASS_PCI, &bus); 210 bus; 211 uclass_next_device(&bus)) { 212 if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp)) 213 return device_probe(*devp); 214 } 215 *devp = NULL; 216 217 return -ENODEV; 218 } 219 220 int dm_pci_find_class(uint find_class, int index, struct udevice **devp) 221 { 222 struct udevice *dev; 223 224 /* Scan all known buses */ 225 for (pci_find_first_device(&dev); 226 dev; 227 pci_find_next_device(&dev)) { 228 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev); 229 230 if (pplat->class == find_class && !index--) { 231 *devp = dev; 232 return device_probe(*devp); 233 } 234 } 235 *devp = NULL; 236 237 return -ENODEV; 238 } 239 240 int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset, 241 unsigned long value, enum pci_size_t size) 242 { 243 struct dm_pci_ops *ops; 244 245 ops = pci_get_ops(bus); 246 if (!ops->write_config) 247 return -ENOSYS; 248 return ops->write_config(bus, bdf, offset, value, size); 249 } 250 251 int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset, 252 u32 clr, u32 set) 253 { 254 ulong val; 255 int ret; 256 257 ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32); 258 if (ret) 259 return ret; 260 val &= ~clr; 261 val |= set; 262 263 return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32); 264 } 265 266 int pci_write_config(pci_dev_t bdf, int offset, unsigned long value, 267 enum pci_size_t size) 268 { 269 struct udevice *bus; 270 int ret; 271 272 ret = pci_get_bus(PCI_BUS(bdf), &bus); 273 if (ret) 274 return ret; 275 276 return pci_bus_write_config(bus, bdf, offset, value, size); 277 } 278 279 int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value, 280 enum pci_size_t size) 281 { 282 struct udevice *bus; 283 284 for (bus = dev; device_is_on_pci_bus(bus);) 285 bus = bus->parent; 286 return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value, 287 size); 288 } 289 290 int pci_write_config32(pci_dev_t bdf, int offset, u32 value) 291 { 292 return pci_write_config(bdf, offset, value, PCI_SIZE_32); 293 } 294 295 int pci_write_config16(pci_dev_t bdf, int offset, u16 value) 296 { 297 return pci_write_config(bdf, offset, value, PCI_SIZE_16); 298 } 299 300 int pci_write_config8(pci_dev_t bdf, int offset, u8 value) 301 { 302 return pci_write_config(bdf, offset, value, PCI_SIZE_8); 303 } 304 305 int dm_pci_write_config8(struct udevice *dev, int offset, u8 value) 306 { 307 return dm_pci_write_config(dev, offset, value, PCI_SIZE_8); 308 } 309 310 int dm_pci_write_config16(struct udevice *dev, int offset, u16 value) 311 { 312 return dm_pci_write_config(dev, offset, value, PCI_SIZE_16); 313 } 314 315 int dm_pci_write_config32(struct udevice *dev, int offset, u32 value) 316 { 317 return dm_pci_write_config(dev, offset, value, PCI_SIZE_32); 318 } 319 320 int pci_bus_read_config(struct udevice *bus, pci_dev_t bdf, int offset, 321 unsigned long *valuep, enum pci_size_t size) 322 { 323 struct dm_pci_ops *ops; 324 325 ops = pci_get_ops(bus); 326 if (!ops->read_config) 327 return -ENOSYS; 328 return ops->read_config(bus, bdf, offset, valuep, size); 329 } 330 331 int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep, 332 enum pci_size_t size) 333 { 334 struct udevice *bus; 335 int ret; 336 337 ret = pci_get_bus(PCI_BUS(bdf), &bus); 338 if (ret) 339 return ret; 340 341 return pci_bus_read_config(bus, bdf, offset, valuep, size); 342 } 343 344 int dm_pci_read_config(struct udevice *dev, int offset, unsigned long *valuep, 345 enum pci_size_t size) 346 { 347 struct udevice *bus; 348 349 for (bus = dev; device_is_on_pci_bus(bus);) 350 bus = bus->parent; 351 return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep, 352 size); 353 } 354 355 int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep) 356 { 357 unsigned long value; 358 int ret; 359 360 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32); 361 if (ret) 362 return ret; 363 *valuep = value; 364 365 return 0; 366 } 367 368 int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep) 369 { 370 unsigned long value; 371 int ret; 372 373 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16); 374 if (ret) 375 return ret; 376 *valuep = value; 377 378 return 0; 379 } 380 381 int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep) 382 { 383 unsigned long value; 384 int ret; 385 386 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8); 387 if (ret) 388 return ret; 389 *valuep = value; 390 391 return 0; 392 } 393 394 int dm_pci_read_config8(struct udevice *dev, int offset, u8 *valuep) 395 { 396 unsigned long value; 397 int ret; 398 399 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8); 400 if (ret) 401 return ret; 402 *valuep = value; 403 404 return 0; 405 } 406 407 int dm_pci_read_config16(struct udevice *dev, int offset, u16 *valuep) 408 { 409 unsigned long value; 410 int ret; 411 412 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16); 413 if (ret) 414 return ret; 415 *valuep = value; 416 417 return 0; 418 } 419 420 int dm_pci_read_config32(struct udevice *dev, int offset, u32 *valuep) 421 { 422 unsigned long value; 423 int ret; 424 425 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32); 426 if (ret) 427 return ret; 428 *valuep = value; 429 430 return 0; 431 } 432 433 int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set) 434 { 435 u8 val; 436 int ret; 437 438 ret = dm_pci_read_config8(dev, offset, &val); 439 if (ret) 440 return ret; 441 val &= ~clr; 442 val |= set; 443 444 return dm_pci_write_config8(dev, offset, val); 445 } 446 447 int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set) 448 { 449 u16 val; 450 int ret; 451 452 ret = dm_pci_read_config16(dev, offset, &val); 453 if (ret) 454 return ret; 455 val &= ~clr; 456 val |= set; 457 458 return dm_pci_write_config16(dev, offset, val); 459 } 460 461 int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set) 462 { 463 u32 val; 464 int ret; 465 466 ret = dm_pci_read_config32(dev, offset, &val); 467 if (ret) 468 return ret; 469 val &= ~clr; 470 val |= set; 471 472 return dm_pci_write_config32(dev, offset, val); 473 } 474 475 static void set_vga_bridge_bits(struct udevice *dev) 476 { 477 struct udevice *parent = dev->parent; 478 u16 bc; 479 480 while (parent->seq != 0) { 481 dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc); 482 bc |= PCI_BRIDGE_CTL_VGA; 483 dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc); 484 parent = parent->parent; 485 } 486 } 487 488 int pci_auto_config_devices(struct udevice *bus) 489 { 490 struct pci_controller *hose = bus->uclass_priv; 491 struct pci_child_platdata *pplat; 492 unsigned int sub_bus; 493 struct udevice *dev; 494 int ret; 495 496 sub_bus = bus->seq; 497 debug("%s: start\n", __func__); 498 pciauto_config_init(hose); 499 for (ret = device_find_first_child(bus, &dev); 500 !ret && dev; 501 ret = device_find_next_child(&dev)) { 502 unsigned int max_bus; 503 int ret; 504 505 debug("%s: device %s\n", __func__, dev->name); 506 ret = dm_pciauto_config_device(dev); 507 if (ret < 0) 508 return ret; 509 max_bus = ret; 510 sub_bus = max(sub_bus, max_bus); 511 512 pplat = dev_get_parent_platdata(dev); 513 if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8)) 514 set_vga_bridge_bits(dev); 515 } 516 debug("%s: done\n", __func__); 517 518 return sub_bus; 519 } 520 521 int pci_generic_mmap_write_config( 522 struct udevice *bus, 523 int (*addr_f)(struct udevice *bus, pci_dev_t bdf, uint offset, void **addrp), 524 pci_dev_t bdf, 525 uint offset, 526 ulong value, 527 enum pci_size_t size) 528 { 529 void *address; 530 531 if (addr_f(bus, bdf, offset, &address) < 0) 532 return 0; 533 534 switch (size) { 535 case PCI_SIZE_8: 536 writeb(value, address); 537 return 0; 538 case PCI_SIZE_16: 539 writew(value, address); 540 return 0; 541 case PCI_SIZE_32: 542 writel(value, address); 543 return 0; 544 default: 545 return -EINVAL; 546 } 547 } 548 549 int pci_generic_mmap_read_config( 550 struct udevice *bus, 551 int (*addr_f)(struct udevice *bus, pci_dev_t bdf, uint offset, void **addrp), 552 pci_dev_t bdf, 553 uint offset, 554 ulong *valuep, 555 enum pci_size_t size) 556 { 557 void *address; 558 559 if (addr_f(bus, bdf, offset, &address) < 0) { 560 *valuep = pci_get_ff(size); 561 return 0; 562 } 563 564 switch (size) { 565 case PCI_SIZE_8: 566 *valuep = readb(address); 567 return 0; 568 case PCI_SIZE_16: 569 *valuep = readw(address); 570 return 0; 571 case PCI_SIZE_32: 572 *valuep = readl(address); 573 return 0; 574 default: 575 return -EINVAL; 576 } 577 } 578 579 int dm_pci_hose_probe_bus(struct udevice *bus) 580 { 581 int sub_bus; 582 int ret; 583 584 debug("%s\n", __func__); 585 586 sub_bus = pci_get_bus_max() + 1; 587 debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name); 588 dm_pciauto_prescan_setup_bridge(bus, sub_bus); 589 590 ret = device_probe(bus); 591 if (ret) { 592 debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name, 593 ret); 594 return ret; 595 } 596 if (sub_bus != bus->seq) { 597 printf("%s: Internal error, bus '%s' got seq %d, expected %d\n", 598 __func__, bus->name, bus->seq, sub_bus); 599 return -EPIPE; 600 } 601 sub_bus = pci_get_bus_max(); 602 dm_pciauto_postscan_setup_bridge(bus, sub_bus); 603 604 return sub_bus; 605 } 606 607 /** 608 * pci_match_one_device - Tell if a PCI device structure has a matching 609 * PCI device id structure 610 * @id: single PCI device id structure to match 611 * @find: the PCI device id structure to match against 612 * 613 * Returns true if the finding pci_device_id structure matched or false if 614 * there is no match. 615 */ 616 static bool pci_match_one_id(const struct pci_device_id *id, 617 const struct pci_device_id *find) 618 { 619 if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) && 620 (id->device == PCI_ANY_ID || id->device == find->device) && 621 (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) && 622 (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) && 623 !((id->class ^ find->class) & id->class_mask)) 624 return true; 625 626 return false; 627 } 628 629 /** 630 * pci_find_and_bind_driver() - Find and bind the right PCI driver 631 * 632 * This only looks at certain fields in the descriptor. 633 * 634 * @parent: Parent bus 635 * @find_id: Specification of the driver to find 636 * @bdf: Bus/device/function addreess - see PCI_BDF() 637 * @devp: Returns a pointer to the device created 638 * @return 0 if OK, -EPERM if the device is not needed before relocation and 639 * therefore was not created, other -ve value on error 640 */ 641 static int pci_find_and_bind_driver(struct udevice *parent, 642 struct pci_device_id *find_id, 643 pci_dev_t bdf, struct udevice **devp) 644 { 645 struct pci_driver_entry *start, *entry; 646 const char *drv; 647 int n_ents; 648 int ret; 649 char name[30], *str; 650 bool bridge; 651 652 *devp = NULL; 653 654 debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__, 655 find_id->vendor, find_id->device); 656 start = ll_entry_start(struct pci_driver_entry, pci_driver_entry); 657 n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry); 658 for (entry = start; entry != start + n_ents; entry++) { 659 const struct pci_device_id *id; 660 struct udevice *dev; 661 const struct driver *drv; 662 663 for (id = entry->match; 664 id->vendor || id->subvendor || id->class_mask; 665 id++) { 666 if (!pci_match_one_id(id, find_id)) 667 continue; 668 669 drv = entry->driver; 670 671 /* 672 * In the pre-relocation phase, we only bind devices 673 * whose driver has the DM_FLAG_PRE_RELOC set, to save 674 * precious memory space as on some platforms as that 675 * space is pretty limited (ie: using Cache As RAM). 676 */ 677 if (!(gd->flags & GD_FLG_RELOC) && 678 !(drv->flags & DM_FLAG_PRE_RELOC)) 679 return -EPERM; 680 681 /* 682 * We could pass the descriptor to the driver as 683 * platdata (instead of NULL) and allow its bind() 684 * method to return -ENOENT if it doesn't support this 685 * device. That way we could continue the search to 686 * find another driver. For now this doesn't seem 687 * necesssary, so just bind the first match. 688 */ 689 ret = device_bind(parent, drv, drv->name, NULL, -1, 690 &dev); 691 if (ret) 692 goto error; 693 debug("%s: Match found: %s\n", __func__, drv->name); 694 dev->driver_data = find_id->driver_data; 695 *devp = dev; 696 return 0; 697 } 698 } 699 700 bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI; 701 /* 702 * In the pre-relocation phase, we only bind bridge devices to save 703 * precious memory space as on some platforms as that space is pretty 704 * limited (ie: using Cache As RAM). 705 */ 706 if (!(gd->flags & GD_FLG_RELOC) && !bridge) 707 return -EPERM; 708 709 /* Bind a generic driver so that the device can be used */ 710 sprintf(name, "pci_%x:%x.%x", parent->seq, PCI_DEV(bdf), 711 PCI_FUNC(bdf)); 712 str = strdup(name); 713 if (!str) 714 return -ENOMEM; 715 drv = bridge ? "pci_bridge_drv" : "pci_generic_drv"; 716 717 ret = device_bind_driver(parent, drv, str, devp); 718 if (ret) { 719 debug("%s: Failed to bind generic driver: %d\n", __func__, ret); 720 free(str); 721 return ret; 722 } 723 debug("%s: No match found: bound generic driver instead\n", __func__); 724 725 return 0; 726 727 error: 728 debug("%s: No match found: error %d\n", __func__, ret); 729 return ret; 730 } 731 732 int pci_bind_bus_devices(struct udevice *bus) 733 { 734 ulong vendor, device; 735 ulong header_type; 736 pci_dev_t bdf, end; 737 bool found_multi; 738 int ret; 739 740 found_multi = false; 741 end = PCI_BDF(bus->seq, PCI_MAX_PCI_DEVICES - 1, 742 PCI_MAX_PCI_FUNCTIONS - 1); 743 for (bdf = PCI_BDF(bus->seq, 0, 0); bdf <= end; 744 bdf += PCI_BDF(0, 0, 1)) { 745 struct pci_child_platdata *pplat; 746 struct udevice *dev; 747 ulong class; 748 749 if (PCI_FUNC(bdf) && !found_multi) 750 continue; 751 /* Check only the first access, we don't expect problems */ 752 ret = pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE, 753 &header_type, PCI_SIZE_8); 754 if (ret) 755 goto error; 756 pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor, 757 PCI_SIZE_16); 758 if (vendor == 0xffff || vendor == 0x0000) 759 continue; 760 761 if (!PCI_FUNC(bdf)) 762 found_multi = header_type & 0x80; 763 764 debug("%s: bus %d/%s: found device %x, function %d\n", __func__, 765 bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf)); 766 pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device, 767 PCI_SIZE_16); 768 pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class, 769 PCI_SIZE_32); 770 class >>= 8; 771 772 /* Find this device in the device tree */ 773 ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev); 774 775 /* If nothing in the device tree, bind a device */ 776 if (ret == -ENODEV) { 777 struct pci_device_id find_id; 778 ulong val; 779 780 memset(&find_id, '\0', sizeof(find_id)); 781 find_id.vendor = vendor; 782 find_id.device = device; 783 find_id.class = class; 784 if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) { 785 pci_bus_read_config(bus, bdf, 786 PCI_SUBSYSTEM_VENDOR_ID, 787 &val, PCI_SIZE_32); 788 find_id.subvendor = val & 0xffff; 789 find_id.subdevice = val >> 16; 790 } 791 ret = pci_find_and_bind_driver(bus, &find_id, bdf, 792 &dev); 793 } 794 if (ret == -EPERM) 795 continue; 796 else if (ret) 797 return ret; 798 799 /* Update the platform data */ 800 pplat = dev_get_parent_platdata(dev); 801 pplat->devfn = PCI_MASK_BUS(bdf); 802 pplat->vendor = vendor; 803 pplat->device = device; 804 pplat->class = class; 805 } 806 807 return 0; 808 error: 809 printf("Cannot read bus configuration: %d\n", ret); 810 811 return ret; 812 } 813 814 static int decode_regions(struct pci_controller *hose, ofnode parent_node, 815 ofnode node) 816 { 817 int pci_addr_cells, addr_cells, size_cells; 818 phys_addr_t base = 0, size; 819 int cells_per_record; 820 const u32 *prop; 821 int len; 822 int i; 823 824 prop = ofnode_get_property(node, "ranges", &len); 825 if (!prop) 826 return -EINVAL; 827 pci_addr_cells = ofnode_read_simple_addr_cells(node); 828 addr_cells = ofnode_read_simple_addr_cells(parent_node); 829 size_cells = ofnode_read_simple_size_cells(node); 830 831 /* PCI addresses are always 3-cells */ 832 len /= sizeof(u32); 833 cells_per_record = pci_addr_cells + addr_cells + size_cells; 834 hose->region_count = 0; 835 debug("%s: len=%d, cells_per_record=%d\n", __func__, len, 836 cells_per_record); 837 for (i = 0; i < MAX_PCI_REGIONS; i++, len -= cells_per_record) { 838 u64 pci_addr, addr, size; 839 int space_code; 840 u32 flags; 841 int type; 842 int pos; 843 844 if (len < cells_per_record) 845 break; 846 flags = fdt32_to_cpu(prop[0]); 847 space_code = (flags >> 24) & 3; 848 pci_addr = fdtdec_get_number(prop + 1, 2); 849 prop += pci_addr_cells; 850 addr = fdtdec_get_number(prop, addr_cells); 851 prop += addr_cells; 852 size = fdtdec_get_number(prop, size_cells); 853 prop += size_cells; 854 debug("%s: region %d, pci_addr=%" PRIx64 ", addr=%" PRIx64 855 ", size=%" PRIx64 ", space_code=%d\n", __func__, 856 hose->region_count, pci_addr, addr, size, space_code); 857 if (space_code & 2) { 858 type = flags & (1U << 30) ? PCI_REGION_PREFETCH : 859 PCI_REGION_MEM; 860 } else if (space_code & 1) { 861 type = PCI_REGION_IO; 862 } else { 863 continue; 864 } 865 pos = -1; 866 for (i = 0; i < hose->region_count; i++) { 867 if (hose->regions[i].flags == type) 868 pos = i; 869 } 870 if (pos == -1) 871 pos = hose->region_count++; 872 debug(" - type=%d, pos=%d\n", type, pos); 873 pci_set_region(hose->regions + pos, pci_addr, addr, size, type); 874 } 875 876 /* Add a region for our local memory */ 877 size = gd->ram_size; 878 #ifdef CONFIG_SYS_SDRAM_BASE 879 base = CONFIG_SYS_SDRAM_BASE; 880 #endif 881 if (gd->pci_ram_top && gd->pci_ram_top < base + size) 882 size = gd->pci_ram_top - base; 883 pci_set_region(hose->regions + hose->region_count++, base, base, 884 size, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); 885 886 return 0; 887 } 888 889 static int pci_uclass_pre_probe(struct udevice *bus) 890 { 891 struct pci_controller *hose; 892 int ret; 893 894 debug("%s, bus=%d/%s, parent=%s\n", __func__, bus->seq, bus->name, 895 bus->parent->name); 896 hose = bus->uclass_priv; 897 898 /* For bridges, use the top-level PCI controller */ 899 if (!device_is_on_pci_bus(bus)) { 900 hose->ctlr = bus; 901 ret = decode_regions(hose, dev_ofnode(bus->parent), 902 dev_ofnode(bus)); 903 if (ret) { 904 debug("%s: Cannot decode regions\n", __func__); 905 return ret; 906 } 907 } else { 908 struct pci_controller *parent_hose; 909 910 parent_hose = dev_get_uclass_priv(bus->parent); 911 hose->ctlr = parent_hose->bus; 912 } 913 hose->bus = bus; 914 hose->first_busno = bus->seq; 915 hose->last_busno = bus->seq; 916 917 return 0; 918 } 919 920 static int pci_uclass_post_probe(struct udevice *bus) 921 { 922 int ret; 923 924 debug("%s: probing bus %d\n", __func__, bus->seq); 925 ret = pci_bind_bus_devices(bus); 926 if (ret) 927 return ret; 928 929 #ifdef CONFIG_PCI_PNP 930 ret = pci_auto_config_devices(bus); 931 if (ret < 0) 932 return ret; 933 #endif 934 935 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) 936 /* 937 * Per Intel FSP specification, we should call FSP notify API to 938 * inform FSP that PCI enumeration has been done so that FSP will 939 * do any necessary initialization as required by the chipset's 940 * BIOS Writer's Guide (BWG). 941 * 942 * Unfortunately we have to put this call here as with driver model, 943 * the enumeration is all done on a lazy basis as needed, so until 944 * something is touched on PCI it won't happen. 945 * 946 * Note we only call this 1) after U-Boot is relocated, and 2) 947 * root bus has finished probing. 948 */ 949 if ((gd->flags & GD_FLG_RELOC) && (bus->seq == 0)) { 950 ret = fsp_init_phase_pci(); 951 if (ret) 952 return ret; 953 } 954 #endif 955 956 return 0; 957 } 958 959 static int pci_uclass_child_post_bind(struct udevice *dev) 960 { 961 struct pci_child_platdata *pplat; 962 struct fdt_pci_addr addr; 963 int ret; 964 965 if (!dev_of_valid(dev)) 966 return 0; 967 968 /* 969 * We could read vendor, device, class if available. But for now we 970 * just check the address. 971 */ 972 pplat = dev_get_parent_platdata(dev); 973 ret = ofnode_read_pci_addr(dev_ofnode(dev), FDT_PCI_SPACE_CONFIG, "reg", 974 &addr); 975 976 if (ret) { 977 if (ret != -ENOENT) 978 return -EINVAL; 979 } else { 980 /* extract the devfn from fdt_pci_addr */ 981 pplat->devfn = addr.phys_hi & 0xff00; 982 } 983 984 return 0; 985 } 986 987 static int pci_bridge_read_config(struct udevice *bus, pci_dev_t bdf, 988 uint offset, ulong *valuep, 989 enum pci_size_t size) 990 { 991 struct pci_controller *hose = bus->uclass_priv; 992 993 return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size); 994 } 995 996 static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf, 997 uint offset, ulong value, 998 enum pci_size_t size) 999 { 1000 struct pci_controller *hose = bus->uclass_priv; 1001 1002 return pci_bus_write_config(hose->ctlr, bdf, offset, value, size); 1003 } 1004 1005 static int skip_to_next_device(struct udevice *bus, struct udevice **devp) 1006 { 1007 struct udevice *dev; 1008 int ret = 0; 1009 1010 /* 1011 * Scan through all the PCI controllers. On x86 there will only be one 1012 * but that is not necessarily true on other hardware. 1013 */ 1014 do { 1015 device_find_first_child(bus, &dev); 1016 if (dev) { 1017 *devp = dev; 1018 return 0; 1019 } 1020 ret = uclass_next_device(&bus); 1021 if (ret) 1022 return ret; 1023 } while (bus); 1024 1025 return 0; 1026 } 1027 1028 int pci_find_next_device(struct udevice **devp) 1029 { 1030 struct udevice *child = *devp; 1031 struct udevice *bus = child->parent; 1032 int ret; 1033 1034 /* First try all the siblings */ 1035 *devp = NULL; 1036 while (child) { 1037 device_find_next_child(&child); 1038 if (child) { 1039 *devp = child; 1040 return 0; 1041 } 1042 } 1043 1044 /* We ran out of siblings. Try the next bus */ 1045 ret = uclass_next_device(&bus); 1046 if (ret) 1047 return ret; 1048 1049 return bus ? skip_to_next_device(bus, devp) : 0; 1050 } 1051 1052 int pci_find_first_device(struct udevice **devp) 1053 { 1054 struct udevice *bus; 1055 int ret; 1056 1057 *devp = NULL; 1058 ret = uclass_first_device(UCLASS_PCI, &bus); 1059 if (ret) 1060 return ret; 1061 1062 return skip_to_next_device(bus, devp); 1063 } 1064 1065 ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size) 1066 { 1067 switch (size) { 1068 case PCI_SIZE_8: 1069 return (value >> ((offset & 3) * 8)) & 0xff; 1070 case PCI_SIZE_16: 1071 return (value >> ((offset & 2) * 8)) & 0xffff; 1072 default: 1073 return value; 1074 } 1075 } 1076 1077 ulong pci_conv_size_to_32(ulong old, ulong value, uint offset, 1078 enum pci_size_t size) 1079 { 1080 uint off_mask; 1081 uint val_mask, shift; 1082 ulong ldata, mask; 1083 1084 switch (size) { 1085 case PCI_SIZE_8: 1086 off_mask = 3; 1087 val_mask = 0xff; 1088 break; 1089 case PCI_SIZE_16: 1090 off_mask = 2; 1091 val_mask = 0xffff; 1092 break; 1093 default: 1094 return value; 1095 } 1096 shift = (offset & off_mask) * 8; 1097 ldata = (value & val_mask) << shift; 1098 mask = val_mask << shift; 1099 value = (old & ~mask) | ldata; 1100 1101 return value; 1102 } 1103 1104 int pci_get_regions(struct udevice *dev, struct pci_region **iop, 1105 struct pci_region **memp, struct pci_region **prefp) 1106 { 1107 struct udevice *bus = pci_get_controller(dev); 1108 struct pci_controller *hose = dev_get_uclass_priv(bus); 1109 int i; 1110 1111 *iop = NULL; 1112 *memp = NULL; 1113 *prefp = NULL; 1114 for (i = 0; i < hose->region_count; i++) { 1115 switch (hose->regions[i].flags) { 1116 case PCI_REGION_IO: 1117 if (!*iop || (*iop)->size < hose->regions[i].size) 1118 *iop = hose->regions + i; 1119 break; 1120 case PCI_REGION_MEM: 1121 if (!*memp || (*memp)->size < hose->regions[i].size) 1122 *memp = hose->regions + i; 1123 break; 1124 case (PCI_REGION_MEM | PCI_REGION_PREFETCH): 1125 if (!*prefp || (*prefp)->size < hose->regions[i].size) 1126 *prefp = hose->regions + i; 1127 break; 1128 } 1129 } 1130 1131 return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL); 1132 } 1133 1134 u32 dm_pci_read_bar32(struct udevice *dev, int barnum) 1135 { 1136 u32 addr; 1137 int bar; 1138 1139 bar = PCI_BASE_ADDRESS_0 + barnum * 4; 1140 dm_pci_read_config32(dev, bar, &addr); 1141 if (addr & PCI_BASE_ADDRESS_SPACE_IO) 1142 return addr & PCI_BASE_ADDRESS_IO_MASK; 1143 else 1144 return addr & PCI_BASE_ADDRESS_MEM_MASK; 1145 } 1146 1147 void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr) 1148 { 1149 int bar; 1150 1151 bar = PCI_BASE_ADDRESS_0 + barnum * 4; 1152 dm_pci_write_config32(dev, bar, addr); 1153 } 1154 1155 static int _dm_pci_bus_to_phys(struct udevice *ctlr, 1156 pci_addr_t bus_addr, unsigned long flags, 1157 unsigned long skip_mask, phys_addr_t *pa) 1158 { 1159 struct pci_controller *hose = dev_get_uclass_priv(ctlr); 1160 struct pci_region *res; 1161 int i; 1162 1163 for (i = 0; i < hose->region_count; i++) { 1164 res = &hose->regions[i]; 1165 1166 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) 1167 continue; 1168 1169 if (res->flags & skip_mask) 1170 continue; 1171 1172 if (bus_addr >= res->bus_start && 1173 (bus_addr - res->bus_start) < res->size) { 1174 *pa = (bus_addr - res->bus_start + res->phys_start); 1175 return 0; 1176 } 1177 } 1178 1179 return 1; 1180 } 1181 1182 phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr, 1183 unsigned long flags) 1184 { 1185 phys_addr_t phys_addr = 0; 1186 struct udevice *ctlr; 1187 int ret; 1188 1189 /* The root controller has the region information */ 1190 ctlr = pci_get_controller(dev); 1191 1192 /* 1193 * if PCI_REGION_MEM is set we do a two pass search with preference 1194 * on matches that don't have PCI_REGION_SYS_MEMORY set 1195 */ 1196 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { 1197 ret = _dm_pci_bus_to_phys(ctlr, bus_addr, 1198 flags, PCI_REGION_SYS_MEMORY, 1199 &phys_addr); 1200 if (!ret) 1201 return phys_addr; 1202 } 1203 1204 ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr); 1205 1206 if (ret) 1207 puts("pci_hose_bus_to_phys: invalid physical address\n"); 1208 1209 return phys_addr; 1210 } 1211 1212 int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, 1213 unsigned long flags, unsigned long skip_mask, 1214 pci_addr_t *ba) 1215 { 1216 struct pci_region *res; 1217 struct udevice *ctlr; 1218 pci_addr_t bus_addr; 1219 int i; 1220 struct pci_controller *hose; 1221 1222 /* The root controller has the region information */ 1223 ctlr = pci_get_controller(dev); 1224 hose = dev_get_uclass_priv(ctlr); 1225 1226 for (i = 0; i < hose->region_count; i++) { 1227 res = &hose->regions[i]; 1228 1229 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) 1230 continue; 1231 1232 if (res->flags & skip_mask) 1233 continue; 1234 1235 bus_addr = phys_addr - res->phys_start + res->bus_start; 1236 1237 if (bus_addr >= res->bus_start && 1238 (bus_addr - res->bus_start) < res->size) { 1239 *ba = bus_addr; 1240 return 0; 1241 } 1242 } 1243 1244 return 1; 1245 } 1246 1247 pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, 1248 unsigned long flags) 1249 { 1250 pci_addr_t bus_addr = 0; 1251 int ret; 1252 1253 /* 1254 * if PCI_REGION_MEM is set we do a two pass search with preference 1255 * on matches that don't have PCI_REGION_SYS_MEMORY set 1256 */ 1257 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { 1258 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 1259 PCI_REGION_SYS_MEMORY, &bus_addr); 1260 if (!ret) 1261 return bus_addr; 1262 } 1263 1264 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr); 1265 1266 if (ret) 1267 puts("pci_hose_phys_to_bus: invalid physical address\n"); 1268 1269 return bus_addr; 1270 } 1271 1272 void *dm_pci_map_bar(struct udevice *dev, int bar, int flags) 1273 { 1274 pci_addr_t pci_bus_addr; 1275 u32 bar_response; 1276 1277 /* read BAR address */ 1278 dm_pci_read_config32(dev, bar, &bar_response); 1279 pci_bus_addr = (pci_addr_t)(bar_response & ~0xf); 1280 1281 /* 1282 * Pass "0" as the length argument to pci_bus_to_virt. The arg 1283 * isn't actualy used on any platform because u-boot assumes a static 1284 * linear mapping. In the future, this could read the BAR size 1285 * and pass that as the size if needed. 1286 */ 1287 return dm_pci_bus_to_virt(dev, pci_bus_addr, flags, 0, MAP_NOCACHE); 1288 } 1289 1290 UCLASS_DRIVER(pci) = { 1291 .id = UCLASS_PCI, 1292 .name = "pci", 1293 .flags = DM_UC_FLAG_SEQ_ALIAS, 1294 .post_bind = dm_scan_fdt_dev, 1295 .pre_probe = pci_uclass_pre_probe, 1296 .post_probe = pci_uclass_post_probe, 1297 .child_post_bind = pci_uclass_child_post_bind, 1298 .per_device_auto_alloc_size = sizeof(struct pci_controller), 1299 .per_child_platdata_auto_alloc_size = 1300 sizeof(struct pci_child_platdata), 1301 }; 1302 1303 static const struct dm_pci_ops pci_bridge_ops = { 1304 .read_config = pci_bridge_read_config, 1305 .write_config = pci_bridge_write_config, 1306 }; 1307 1308 static const struct udevice_id pci_bridge_ids[] = { 1309 { .compatible = "pci-bridge" }, 1310 { } 1311 }; 1312 1313 U_BOOT_DRIVER(pci_bridge_drv) = { 1314 .name = "pci_bridge_drv", 1315 .id = UCLASS_PCI, 1316 .of_match = pci_bridge_ids, 1317 .ops = &pci_bridge_ops, 1318 }; 1319 1320 UCLASS_DRIVER(pci_generic) = { 1321 .id = UCLASS_PCI_GENERIC, 1322 .name = "pci_generic", 1323 }; 1324 1325 static const struct udevice_id pci_generic_ids[] = { 1326 { .compatible = "pci-generic" }, 1327 { } 1328 }; 1329 1330 U_BOOT_DRIVER(pci_generic_drv) = { 1331 .name = "pci_generic_drv", 1332 .id = UCLASS_PCI_GENERIC, 1333 .of_match = pci_generic_ids, 1334 }; 1335 1336 void pci_init(void) 1337 { 1338 struct udevice *bus; 1339 1340 /* 1341 * Enumerate all known controller devices. Enumeration has the side- 1342 * effect of probing them, so PCIe devices will be enumerated too. 1343 */ 1344 for (uclass_first_device(UCLASS_PCI, &bus); 1345 bus; 1346 uclass_next_device(&bus)) { 1347 ; 1348 } 1349 } 1350