1 /* 2 * Broadcom specific AMBA 3 * Bus subsystem 4 * 5 * Licensed under the GNU/GPL. See COPYING for details. 6 */ 7 8 #include "bcma_private.h" 9 #include <linux/module.h> 10 #include <linux/mmc/sdio_func.h> 11 #include <linux/platform_device.h> 12 #include <linux/pci.h> 13 #include <linux/bcma/bcma.h> 14 #include <linux/slab.h> 15 #include <linux/of_address.h> 16 #include <linux/of_irq.h> 17 #include <linux/of_platform.h> 18 19 MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); 20 MODULE_LICENSE("GPL"); 21 22 /* contains the number the next bus should get. */ 23 static unsigned int bcma_bus_next_num = 0; 24 25 /* bcma_buses_mutex locks the bcma_bus_next_num */ 26 static DEFINE_MUTEX(bcma_buses_mutex); 27 28 static int bcma_bus_match(struct device *dev, struct device_driver *drv); 29 static int bcma_device_probe(struct device *dev); 30 static int bcma_device_remove(struct device *dev); 31 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env); 32 33 static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) 34 { 35 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 36 return sprintf(buf, "0x%03X\n", core->id.manuf); 37 } 38 static DEVICE_ATTR_RO(manuf); 39 40 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) 41 { 42 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 43 return sprintf(buf, "0x%03X\n", core->id.id); 44 } 45 static DEVICE_ATTR_RO(id); 46 47 static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf) 48 { 49 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 50 return sprintf(buf, "0x%02X\n", core->id.rev); 51 } 52 static DEVICE_ATTR_RO(rev); 53 54 static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf) 55 { 56 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 57 return sprintf(buf, "0x%X\n", core->id.class); 58 } 59 static DEVICE_ATTR_RO(class); 60 61 static struct attribute *bcma_device_attrs[] = { 62 &dev_attr_manuf.attr, 63 &dev_attr_id.attr, 64 &dev_attr_rev.attr, 65 &dev_attr_class.attr, 66 NULL, 67 }; 68 ATTRIBUTE_GROUPS(bcma_device); 69 70 static struct bus_type bcma_bus_type = { 71 .name = "bcma", 72 .match = bcma_bus_match, 73 .probe = bcma_device_probe, 74 .remove = bcma_device_remove, 75 .uevent = bcma_device_uevent, 76 .dev_groups = bcma_device_groups, 77 }; 78 79 static u16 bcma_cc_core_id(struct bcma_bus *bus) 80 { 81 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) 82 return BCMA_CORE_4706_CHIPCOMMON; 83 return BCMA_CORE_CHIPCOMMON; 84 } 85 86 struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid, 87 u8 unit) 88 { 89 struct bcma_device *core; 90 91 list_for_each_entry(core, &bus->cores, list) { 92 if (core->id.id == coreid && core->core_unit == unit) 93 return core; 94 } 95 return NULL; 96 } 97 EXPORT_SYMBOL_GPL(bcma_find_core_unit); 98 99 bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, 100 int timeout) 101 { 102 unsigned long deadline = jiffies + timeout; 103 u32 val; 104 105 do { 106 val = bcma_read32(core, reg); 107 if ((val & mask) == value) 108 return true; 109 cpu_relax(); 110 udelay(10); 111 } while (!time_after_eq(jiffies, deadline)); 112 113 bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg); 114 115 return false; 116 } 117 118 static void bcma_release_core_dev(struct device *dev) 119 { 120 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 121 if (core->io_addr) 122 iounmap(core->io_addr); 123 if (core->io_wrap) 124 iounmap(core->io_wrap); 125 kfree(core); 126 } 127 128 static bool bcma_is_core_needed_early(u16 core_id) 129 { 130 switch (core_id) { 131 case BCMA_CORE_NS_NAND: 132 case BCMA_CORE_NS_QSPI: 133 return true; 134 } 135 136 return false; 137 } 138 139 #if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS) 140 static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 141 struct bcma_device *core) 142 { 143 struct device_node *node; 144 u64 size; 145 const __be32 *reg; 146 147 if (!parent || !parent->dev.of_node) 148 return NULL; 149 150 for_each_child_of_node(parent->dev.of_node, node) { 151 reg = of_get_address(node, 0, &size, NULL); 152 if (!reg) 153 continue; 154 if (of_translate_address(node, reg) == core->addr) 155 return node; 156 } 157 return NULL; 158 } 159 160 static int bcma_of_irq_parse(struct platform_device *parent, 161 struct bcma_device *core, 162 struct of_phandle_args *out_irq, int num) 163 { 164 __be32 laddr[1]; 165 int rc; 166 167 if (core->dev.of_node) { 168 rc = of_irq_parse_one(core->dev.of_node, num, out_irq); 169 if (!rc) 170 return rc; 171 } 172 173 out_irq->np = parent->dev.of_node; 174 out_irq->args_count = 1; 175 out_irq->args[0] = num; 176 177 laddr[0] = cpu_to_be32(core->addr); 178 return of_irq_parse_raw(laddr, out_irq); 179 } 180 181 static unsigned int bcma_of_get_irq(struct platform_device *parent, 182 struct bcma_device *core, int num) 183 { 184 struct of_phandle_args out_irq; 185 int ret; 186 187 if (!parent || !parent->dev.of_node) 188 return 0; 189 190 ret = bcma_of_irq_parse(parent, core, &out_irq, num); 191 if (ret) { 192 bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n", 193 ret); 194 return 0; 195 } 196 197 return irq_create_of_mapping(&out_irq); 198 } 199 200 static void bcma_of_fill_device(struct platform_device *parent, 201 struct bcma_device *core) 202 { 203 struct device_node *node; 204 205 node = bcma_of_find_child_device(parent, core); 206 if (node) 207 core->dev.of_node = node; 208 209 core->irq = bcma_of_get_irq(parent, core, 0); 210 } 211 #else 212 static void bcma_of_fill_device(struct platform_device *parent, 213 struct bcma_device *core) 214 { 215 } 216 static inline unsigned int bcma_of_get_irq(struct platform_device *parent, 217 struct bcma_device *core, int num) 218 { 219 return 0; 220 } 221 #endif /* CONFIG_OF */ 222 223 unsigned int bcma_core_irq(struct bcma_device *core, int num) 224 { 225 struct bcma_bus *bus = core->bus; 226 unsigned int mips_irq; 227 228 switch (bus->hosttype) { 229 case BCMA_HOSTTYPE_PCI: 230 return bus->host_pci->irq; 231 case BCMA_HOSTTYPE_SOC: 232 if (bus->drv_mips.core && num == 0) { 233 mips_irq = bcma_core_mips_irq(core); 234 return mips_irq <= 4 ? mips_irq + 2 : 0; 235 } 236 if (bus->host_pdev) 237 return bcma_of_get_irq(bus->host_pdev, core, num); 238 return 0; 239 case BCMA_HOSTTYPE_SDIO: 240 return 0; 241 } 242 243 return 0; 244 } 245 EXPORT_SYMBOL(bcma_core_irq); 246 247 void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) 248 { 249 core->dev.release = bcma_release_core_dev; 250 core->dev.bus = &bcma_bus_type; 251 dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); 252 253 switch (bus->hosttype) { 254 case BCMA_HOSTTYPE_PCI: 255 core->dev.parent = &bus->host_pci->dev; 256 core->dma_dev = &bus->host_pci->dev; 257 core->irq = bus->host_pci->irq; 258 break; 259 case BCMA_HOSTTYPE_SOC: 260 core->dev.dma_mask = &core->dev.coherent_dma_mask; 261 if (bus->host_pdev) { 262 core->dma_dev = &bus->host_pdev->dev; 263 core->dev.parent = &bus->host_pdev->dev; 264 bcma_of_fill_device(bus->host_pdev, core); 265 } else { 266 core->dma_dev = &core->dev; 267 } 268 break; 269 case BCMA_HOSTTYPE_SDIO: 270 break; 271 } 272 } 273 274 struct device *bcma_bus_get_host_dev(struct bcma_bus *bus) 275 { 276 switch (bus->hosttype) { 277 case BCMA_HOSTTYPE_PCI: 278 if (bus->host_pci) 279 return &bus->host_pci->dev; 280 else 281 return NULL; 282 case BCMA_HOSTTYPE_SOC: 283 if (bus->host_pdev) 284 return &bus->host_pdev->dev; 285 else 286 return NULL; 287 case BCMA_HOSTTYPE_SDIO: 288 if (bus->host_sdio) 289 return &bus->host_sdio->dev; 290 else 291 return NULL; 292 } 293 return NULL; 294 } 295 296 void bcma_init_bus(struct bcma_bus *bus) 297 { 298 mutex_lock(&bcma_buses_mutex); 299 bus->num = bcma_bus_next_num++; 300 mutex_unlock(&bcma_buses_mutex); 301 302 INIT_LIST_HEAD(&bus->cores); 303 bus->nr_cores = 0; 304 305 bcma_detect_chip(bus); 306 } 307 308 static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) 309 { 310 int err; 311 312 err = device_register(&core->dev); 313 if (err) { 314 bcma_err(bus, "Could not register dev for core 0x%03X\n", 315 core->id.id); 316 put_device(&core->dev); 317 return; 318 } 319 core->dev_registered = true; 320 } 321 322 static int bcma_register_devices(struct bcma_bus *bus) 323 { 324 struct bcma_device *core; 325 int err; 326 327 list_for_each_entry(core, &bus->cores, list) { 328 /* We support that cores ourself */ 329 switch (core->id.id) { 330 case BCMA_CORE_4706_CHIPCOMMON: 331 case BCMA_CORE_CHIPCOMMON: 332 case BCMA_CORE_NS_CHIPCOMMON_B: 333 case BCMA_CORE_PCI: 334 case BCMA_CORE_PCIE: 335 case BCMA_CORE_PCIE2: 336 case BCMA_CORE_MIPS_74K: 337 case BCMA_CORE_4706_MAC_GBIT_COMMON: 338 continue; 339 } 340 341 /* Early cores were already registered */ 342 if (bcma_is_core_needed_early(core->id.id)) 343 continue; 344 345 /* Only first GMAC core on BCM4706 is connected and working */ 346 if (core->id.id == BCMA_CORE_4706_MAC_GBIT && 347 core->core_unit > 0) 348 continue; 349 350 bcma_register_core(bus, core); 351 } 352 353 #ifdef CONFIG_BCMA_DRIVER_MIPS 354 if (bus->drv_cc.pflash.present) { 355 err = platform_device_register(&bcma_pflash_dev); 356 if (err) 357 bcma_err(bus, "Error registering parallel flash\n"); 358 } 359 #endif 360 361 #ifdef CONFIG_BCMA_SFLASH 362 if (bus->drv_cc.sflash.present) { 363 err = platform_device_register(&bcma_sflash_dev); 364 if (err) 365 bcma_err(bus, "Error registering serial flash\n"); 366 } 367 #endif 368 369 #ifdef CONFIG_BCMA_NFLASH 370 if (bus->drv_cc.nflash.present) { 371 err = platform_device_register(&bcma_nflash_dev); 372 if (err) 373 bcma_err(bus, "Error registering NAND flash\n"); 374 } 375 #endif 376 err = bcma_gpio_init(&bus->drv_cc); 377 if (err == -ENOTSUPP) 378 bcma_debug(bus, "GPIO driver not activated\n"); 379 else if (err) 380 bcma_err(bus, "Error registering GPIO driver: %i\n", err); 381 382 if (bus->hosttype == BCMA_HOSTTYPE_SOC) { 383 err = bcma_chipco_watchdog_register(&bus->drv_cc); 384 if (err) 385 bcma_err(bus, "Error registering watchdog driver\n"); 386 } 387 388 return 0; 389 } 390 391 void bcma_unregister_cores(struct bcma_bus *bus) 392 { 393 struct bcma_device *core, *tmp; 394 395 list_for_each_entry_safe(core, tmp, &bus->cores, list) { 396 if (!core->dev_registered) 397 continue; 398 list_del(&core->list); 399 device_unregister(&core->dev); 400 } 401 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 402 platform_device_unregister(bus->drv_cc.watchdog); 403 404 /* Now noone uses internally-handled cores, we can free them */ 405 list_for_each_entry_safe(core, tmp, &bus->cores, list) { 406 list_del(&core->list); 407 kfree(core); 408 } 409 } 410 411 int bcma_bus_register(struct bcma_bus *bus) 412 { 413 int err; 414 struct bcma_device *core; 415 struct device *dev; 416 417 /* Scan for devices (cores) */ 418 err = bcma_bus_scan(bus); 419 if (err) { 420 bcma_err(bus, "Failed to scan: %d\n", err); 421 return err; 422 } 423 424 /* Early init CC core */ 425 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 426 if (core) { 427 bus->drv_cc.core = core; 428 bcma_core_chipcommon_early_init(&bus->drv_cc); 429 } 430 431 /* Early init PCIE core */ 432 core = bcma_find_core(bus, BCMA_CORE_PCIE); 433 if (core) { 434 bus->drv_pci[0].core = core; 435 bcma_core_pci_early_init(&bus->drv_pci[0]); 436 } 437 438 dev = bcma_bus_get_host_dev(bus); 439 if (dev) { 440 of_platform_default_populate(dev->of_node, NULL, dev); 441 } 442 443 /* Cores providing flash access go before SPROM init */ 444 list_for_each_entry(core, &bus->cores, list) { 445 if (bcma_is_core_needed_early(core->id.id)) 446 bcma_register_core(bus, core); 447 } 448 449 /* Try to get SPROM */ 450 err = bcma_sprom_get(bus); 451 if (err == -ENOENT) { 452 bcma_err(bus, "No SPROM available\n"); 453 } else if (err) 454 bcma_err(bus, "Failed to get SPROM: %d\n", err); 455 456 /* Init CC core */ 457 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 458 if (core) { 459 bus->drv_cc.core = core; 460 bcma_core_chipcommon_init(&bus->drv_cc); 461 } 462 463 /* Init CC core */ 464 core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B); 465 if (core) { 466 bus->drv_cc_b.core = core; 467 bcma_core_chipcommon_b_init(&bus->drv_cc_b); 468 } 469 470 /* Init MIPS core */ 471 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 472 if (core) { 473 bus->drv_mips.core = core; 474 bcma_core_mips_init(&bus->drv_mips); 475 } 476 477 /* Init PCIE core */ 478 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0); 479 if (core) { 480 bus->drv_pci[0].core = core; 481 bcma_core_pci_init(&bus->drv_pci[0]); 482 } 483 484 /* Init PCIE core */ 485 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1); 486 if (core) { 487 bus->drv_pci[1].core = core; 488 bcma_core_pci_init(&bus->drv_pci[1]); 489 } 490 491 /* Init PCIe Gen 2 core */ 492 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0); 493 if (core) { 494 bus->drv_pcie2.core = core; 495 bcma_core_pcie2_init(&bus->drv_pcie2); 496 } 497 498 /* Init GBIT MAC COMMON core */ 499 core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON); 500 if (core) { 501 bus->drv_gmac_cmn.core = core; 502 bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn); 503 } 504 505 /* Register found cores */ 506 bcma_register_devices(bus); 507 508 bcma_info(bus, "Bus registered\n"); 509 510 return 0; 511 } 512 513 void bcma_bus_unregister(struct bcma_bus *bus) 514 { 515 int err; 516 517 err = bcma_gpio_unregister(&bus->drv_cc); 518 if (err == -EBUSY) 519 bcma_err(bus, "Some GPIOs are still in use.\n"); 520 else if (err) 521 bcma_err(bus, "Can not unregister GPIO driver: %i\n", err); 522 523 bcma_core_chipcommon_b_free(&bus->drv_cc_b); 524 525 bcma_unregister_cores(bus); 526 } 527 528 /* 529 * This is a special version of bus registration function designed for SoCs. 530 * It scans bus and performs basic initialization of main cores only. 531 * Please note it requires memory allocation, however it won't try to sleep. 532 */ 533 int __init bcma_bus_early_register(struct bcma_bus *bus) 534 { 535 int err; 536 struct bcma_device *core; 537 538 /* Scan for devices (cores) */ 539 err = bcma_bus_scan(bus); 540 if (err) { 541 bcma_err(bus, "Failed to scan bus: %d\n", err); 542 return -1; 543 } 544 545 /* Early init CC core */ 546 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 547 if (core) { 548 bus->drv_cc.core = core; 549 bcma_core_chipcommon_early_init(&bus->drv_cc); 550 } 551 552 /* Early init MIPS core */ 553 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 554 if (core) { 555 bus->drv_mips.core = core; 556 bcma_core_mips_early_init(&bus->drv_mips); 557 } 558 559 bcma_info(bus, "Early bus registered\n"); 560 561 return 0; 562 } 563 564 #ifdef CONFIG_PM 565 int bcma_bus_suspend(struct bcma_bus *bus) 566 { 567 struct bcma_device *core; 568 569 list_for_each_entry(core, &bus->cores, list) { 570 struct device_driver *drv = core->dev.driver; 571 if (drv) { 572 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 573 if (adrv->suspend) 574 adrv->suspend(core); 575 } 576 } 577 return 0; 578 } 579 580 int bcma_bus_resume(struct bcma_bus *bus) 581 { 582 struct bcma_device *core; 583 584 /* Init CC core */ 585 if (bus->drv_cc.core) { 586 bus->drv_cc.setup_done = false; 587 bcma_core_chipcommon_init(&bus->drv_cc); 588 } 589 590 list_for_each_entry(core, &bus->cores, list) { 591 struct device_driver *drv = core->dev.driver; 592 if (drv) { 593 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 594 if (adrv->resume) 595 adrv->resume(core); 596 } 597 } 598 599 return 0; 600 } 601 #endif 602 603 int __bcma_driver_register(struct bcma_driver *drv, struct module *owner) 604 { 605 drv->drv.name = drv->name; 606 drv->drv.bus = &bcma_bus_type; 607 drv->drv.owner = owner; 608 609 return driver_register(&drv->drv); 610 } 611 EXPORT_SYMBOL_GPL(__bcma_driver_register); 612 613 void bcma_driver_unregister(struct bcma_driver *drv) 614 { 615 driver_unregister(&drv->drv); 616 } 617 EXPORT_SYMBOL_GPL(bcma_driver_unregister); 618 619 static int bcma_bus_match(struct device *dev, struct device_driver *drv) 620 { 621 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 622 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 623 const struct bcma_device_id *cid = &core->id; 624 const struct bcma_device_id *did; 625 626 for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) { 627 if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) && 628 (did->id == cid->id || did->id == BCMA_ANY_ID) && 629 (did->rev == cid->rev || did->rev == BCMA_ANY_REV) && 630 (did->class == cid->class || did->class == BCMA_ANY_CLASS)) 631 return 1; 632 } 633 return 0; 634 } 635 636 static int bcma_device_probe(struct device *dev) 637 { 638 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 639 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 640 drv); 641 int err = 0; 642 643 if (adrv->probe) 644 err = adrv->probe(core); 645 646 return err; 647 } 648 649 static int bcma_device_remove(struct device *dev) 650 { 651 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 652 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 653 drv); 654 655 if (adrv->remove) 656 adrv->remove(core); 657 658 return 0; 659 } 660 661 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env) 662 { 663 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 664 665 return add_uevent_var(env, 666 "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X", 667 core->id.manuf, core->id.id, 668 core->id.rev, core->id.class); 669 } 670 671 static unsigned int bcma_bus_registered; 672 673 /* 674 * If built-in, bus has to be registered early, before any driver calls 675 * bcma_driver_register. 676 * Otherwise registering driver would trigger BUG in driver_register. 677 */ 678 static int __init bcma_init_bus_register(void) 679 { 680 int err; 681 682 if (bcma_bus_registered) 683 return 0; 684 685 err = bus_register(&bcma_bus_type); 686 if (!err) 687 bcma_bus_registered = 1; 688 689 return err; 690 } 691 #ifndef MODULE 692 fs_initcall(bcma_init_bus_register); 693 #endif 694 695 /* Main initialization has to be done with SPI/mtd/NAND/SPROM available */ 696 static int __init bcma_modinit(void) 697 { 698 int err; 699 700 err = bcma_init_bus_register(); 701 if (err) 702 return err; 703 704 err = bcma_host_soc_register_driver(); 705 if (err) { 706 pr_err("SoC host initialization failed\n"); 707 err = 0; 708 } 709 #ifdef CONFIG_BCMA_HOST_PCI 710 err = bcma_host_pci_init(); 711 if (err) { 712 pr_err("PCI host initialization failed\n"); 713 err = 0; 714 } 715 #endif 716 717 return err; 718 } 719 module_init(bcma_modinit); 720 721 static void __exit bcma_modexit(void) 722 { 723 #ifdef CONFIG_BCMA_HOST_PCI 724 bcma_host_pci_exit(); 725 #endif 726 bcma_host_soc_unregister_driver(); 727 bus_unregister(&bcma_bus_type); 728 } 729 module_exit(bcma_modexit) 730