1 /* 2 * Broadcom specific AMBA 3 * Bus subsystem 4 * 5 * Licensed under the GNU/GPL. See COPYING for details. 6 */ 7 8 #include "bcma_private.h" 9 #include <linux/module.h> 10 #include <linux/mmc/sdio_func.h> 11 #include <linux/platform_device.h> 12 #include <linux/pci.h> 13 #include <linux/bcma/bcma.h> 14 #include <linux/slab.h> 15 #include <linux/of_address.h> 16 #include <linux/of_irq.h> 17 #include <linux/of_platform.h> 18 19 MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); 20 MODULE_LICENSE("GPL"); 21 22 /* contains the number the next bus should get. */ 23 static unsigned int bcma_bus_next_num = 0; 24 25 /* bcma_buses_mutex locks the bcma_bus_next_num */ 26 static DEFINE_MUTEX(bcma_buses_mutex); 27 28 static int bcma_bus_match(struct device *dev, struct device_driver *drv); 29 static int bcma_device_probe(struct device *dev); 30 static int bcma_device_remove(struct device *dev); 31 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env); 32 33 static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) 34 { 35 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 36 return sprintf(buf, "0x%03X\n", core->id.manuf); 37 } 38 static DEVICE_ATTR_RO(manuf); 39 40 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) 41 { 42 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 43 return sprintf(buf, "0x%03X\n", core->id.id); 44 } 45 static DEVICE_ATTR_RO(id); 46 47 static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf) 48 { 49 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 50 return sprintf(buf, "0x%02X\n", core->id.rev); 51 } 52 static DEVICE_ATTR_RO(rev); 53 54 static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf) 55 { 56 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 57 return sprintf(buf, "0x%X\n", core->id.class); 58 } 59 static DEVICE_ATTR_RO(class); 60 61 static struct attribute *bcma_device_attrs[] = { 62 &dev_attr_manuf.attr, 63 &dev_attr_id.attr, 64 &dev_attr_rev.attr, 65 &dev_attr_class.attr, 66 NULL, 67 }; 68 ATTRIBUTE_GROUPS(bcma_device); 69 70 static struct bus_type bcma_bus_type = { 71 .name = "bcma", 72 .match = bcma_bus_match, 73 .probe = bcma_device_probe, 74 .remove = bcma_device_remove, 75 .uevent = bcma_device_uevent, 76 .dev_groups = bcma_device_groups, 77 }; 78 79 static u16 bcma_cc_core_id(struct bcma_bus *bus) 80 { 81 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) 82 return BCMA_CORE_4706_CHIPCOMMON; 83 return BCMA_CORE_CHIPCOMMON; 84 } 85 86 struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid, 87 u8 unit) 88 { 89 struct bcma_device *core; 90 91 list_for_each_entry(core, &bus->cores, list) { 92 if (core->id.id == coreid && core->core_unit == unit) 93 return core; 94 } 95 return NULL; 96 } 97 EXPORT_SYMBOL_GPL(bcma_find_core_unit); 98 99 bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, 100 int timeout) 101 { 102 unsigned long deadline = jiffies + timeout; 103 u32 val; 104 105 do { 106 val = bcma_read32(core, reg); 107 if ((val & mask) == value) 108 return true; 109 cpu_relax(); 110 udelay(10); 111 } while (!time_after_eq(jiffies, deadline)); 112 113 bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg); 114 115 return false; 116 } 117 118 static void bcma_release_core_dev(struct device *dev) 119 { 120 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 121 if (core->io_addr) 122 iounmap(core->io_addr); 123 if (core->io_wrap) 124 iounmap(core->io_wrap); 125 kfree(core); 126 } 127 128 static bool bcma_is_core_needed_early(u16 core_id) 129 { 130 switch (core_id) { 131 case BCMA_CORE_NS_NAND: 132 case BCMA_CORE_NS_QSPI: 133 return true; 134 } 135 136 return false; 137 } 138 139 #if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS) 140 static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 141 struct bcma_device *core) 142 { 143 struct device_node *node; 144 u64 size; 145 const __be32 *reg; 146 147 if (!parent || !parent->dev.of_node) 148 return NULL; 149 150 for_each_child_of_node(parent->dev.of_node, node) { 151 reg = of_get_address(node, 0, &size, NULL); 152 if (!reg) 153 continue; 154 if (of_translate_address(node, reg) == core->addr) 155 return node; 156 } 157 return NULL; 158 } 159 160 static int bcma_of_irq_parse(struct platform_device *parent, 161 struct bcma_device *core, 162 struct of_phandle_args *out_irq, int num) 163 { 164 __be32 laddr[1]; 165 int rc; 166 167 if (core->dev.of_node) { 168 rc = of_irq_parse_one(core->dev.of_node, num, out_irq); 169 if (!rc) 170 return rc; 171 } 172 173 out_irq->np = parent->dev.of_node; 174 out_irq->args_count = 1; 175 out_irq->args[0] = num; 176 177 laddr[0] = cpu_to_be32(core->addr); 178 return of_irq_parse_raw(laddr, out_irq); 179 } 180 181 static unsigned int bcma_of_get_irq(struct platform_device *parent, 182 struct bcma_device *core, int num) 183 { 184 struct of_phandle_args out_irq; 185 int ret; 186 187 if (!parent || !parent->dev.of_node) 188 return 0; 189 190 ret = bcma_of_irq_parse(parent, core, &out_irq, num); 191 if (ret) { 192 bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n", 193 ret); 194 return 0; 195 } 196 197 return irq_create_of_mapping(&out_irq); 198 } 199 200 static void bcma_of_fill_device(struct platform_device *parent, 201 struct bcma_device *core) 202 { 203 struct device_node *node; 204 205 node = bcma_of_find_child_device(parent, core); 206 if (node) 207 core->dev.of_node = node; 208 209 core->irq = bcma_of_get_irq(parent, core, 0); 210 } 211 #else 212 static void bcma_of_fill_device(struct platform_device *parent, 213 struct bcma_device *core) 214 { 215 } 216 static inline unsigned int bcma_of_get_irq(struct platform_device *parent, 217 struct bcma_device *core, int num) 218 { 219 return 0; 220 } 221 #endif /* CONFIG_OF */ 222 223 unsigned int bcma_core_irq(struct bcma_device *core, int num) 224 { 225 struct bcma_bus *bus = core->bus; 226 unsigned int mips_irq; 227 228 switch (bus->hosttype) { 229 case BCMA_HOSTTYPE_PCI: 230 return bus->host_pci->irq; 231 case BCMA_HOSTTYPE_SOC: 232 if (bus->drv_mips.core && num == 0) { 233 mips_irq = bcma_core_mips_irq(core); 234 return mips_irq <= 4 ? mips_irq + 2 : 0; 235 } 236 if (bus->host_pdev) 237 return bcma_of_get_irq(bus->host_pdev, core, num); 238 return 0; 239 case BCMA_HOSTTYPE_SDIO: 240 return 0; 241 } 242 243 return 0; 244 } 245 EXPORT_SYMBOL(bcma_core_irq); 246 247 void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) 248 { 249 core->dev.release = bcma_release_core_dev; 250 core->dev.bus = &bcma_bus_type; 251 dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); 252 253 switch (bus->hosttype) { 254 case BCMA_HOSTTYPE_PCI: 255 core->dev.parent = &bus->host_pci->dev; 256 core->dma_dev = &bus->host_pci->dev; 257 core->irq = bus->host_pci->irq; 258 break; 259 case BCMA_HOSTTYPE_SOC: 260 core->dev.dma_mask = &core->dev.coherent_dma_mask; 261 if (bus->host_pdev) { 262 core->dma_dev = &bus->host_pdev->dev; 263 core->dev.parent = &bus->host_pdev->dev; 264 bcma_of_fill_device(bus->host_pdev, core); 265 } else { 266 core->dma_dev = &core->dev; 267 } 268 break; 269 case BCMA_HOSTTYPE_SDIO: 270 break; 271 } 272 } 273 274 struct device *bcma_bus_get_host_dev(struct bcma_bus *bus) 275 { 276 switch (bus->hosttype) { 277 case BCMA_HOSTTYPE_PCI: 278 if (bus->host_pci) 279 return &bus->host_pci->dev; 280 else 281 return NULL; 282 case BCMA_HOSTTYPE_SOC: 283 if (bus->host_pdev) 284 return &bus->host_pdev->dev; 285 else 286 return NULL; 287 case BCMA_HOSTTYPE_SDIO: 288 if (bus->host_sdio) 289 return &bus->host_sdio->dev; 290 else 291 return NULL; 292 } 293 return NULL; 294 } 295 296 void bcma_init_bus(struct bcma_bus *bus) 297 { 298 mutex_lock(&bcma_buses_mutex); 299 bus->num = bcma_bus_next_num++; 300 mutex_unlock(&bcma_buses_mutex); 301 302 INIT_LIST_HEAD(&bus->cores); 303 bus->nr_cores = 0; 304 305 bcma_detect_chip(bus); 306 } 307 308 static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) 309 { 310 int err; 311 312 err = device_register(&core->dev); 313 if (err) { 314 bcma_err(bus, "Could not register dev for core 0x%03X\n", 315 core->id.id); 316 put_device(&core->dev); 317 return; 318 } 319 core->dev_registered = true; 320 } 321 322 static int bcma_register_devices(struct bcma_bus *bus) 323 { 324 struct bcma_device *core; 325 int err; 326 327 list_for_each_entry(core, &bus->cores, list) { 328 /* We support that cores ourself */ 329 switch (core->id.id) { 330 case BCMA_CORE_4706_CHIPCOMMON: 331 case BCMA_CORE_CHIPCOMMON: 332 case BCMA_CORE_NS_CHIPCOMMON_B: 333 case BCMA_CORE_PCI: 334 case BCMA_CORE_PCIE: 335 case BCMA_CORE_PCIE2: 336 case BCMA_CORE_MIPS_74K: 337 case BCMA_CORE_4706_MAC_GBIT_COMMON: 338 continue; 339 } 340 341 /* Early cores were already registered */ 342 if (bcma_is_core_needed_early(core->id.id)) 343 continue; 344 345 /* Only first GMAC core on BCM4706 is connected and working */ 346 if (core->id.id == BCMA_CORE_4706_MAC_GBIT && 347 core->core_unit > 0) 348 continue; 349 350 bcma_register_core(bus, core); 351 } 352 353 #ifdef CONFIG_BCMA_DRIVER_MIPS 354 if (bus->drv_cc.pflash.present) { 355 err = platform_device_register(&bcma_pflash_dev); 356 if (err) 357 bcma_err(bus, "Error registering parallel flash\n"); 358 } 359 #endif 360 361 #ifdef CONFIG_BCMA_SFLASH 362 if (bus->drv_cc.sflash.present) { 363 err = platform_device_register(&bcma_sflash_dev); 364 if (err) 365 bcma_err(bus, "Error registering serial flash\n"); 366 } 367 #endif 368 369 #ifdef CONFIG_BCMA_NFLASH 370 if (bus->drv_cc.nflash.present) { 371 err = platform_device_register(&bcma_nflash_dev); 372 if (err) 373 bcma_err(bus, "Error registering NAND flash\n"); 374 } 375 #endif 376 err = bcma_gpio_init(&bus->drv_cc); 377 if (err == -ENOTSUPP) 378 bcma_debug(bus, "GPIO driver not activated\n"); 379 else if (err) 380 bcma_err(bus, "Error registering GPIO driver: %i\n", err); 381 382 if (bus->hosttype == BCMA_HOSTTYPE_SOC) { 383 err = bcma_chipco_watchdog_register(&bus->drv_cc); 384 if (err) 385 bcma_err(bus, "Error registering watchdog driver\n"); 386 } 387 388 return 0; 389 } 390 391 void bcma_unregister_cores(struct bcma_bus *bus) 392 { 393 struct bcma_device *core, *tmp; 394 395 list_for_each_entry_safe(core, tmp, &bus->cores, list) { 396 if (!core->dev_registered) 397 continue; 398 list_del(&core->list); 399 device_unregister(&core->dev); 400 } 401 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 402 platform_device_unregister(bus->drv_cc.watchdog); 403 404 /* Now noone uses internally-handled cores, we can free them */ 405 list_for_each_entry_safe(core, tmp, &bus->cores, list) { 406 list_del(&core->list); 407 kfree(core); 408 } 409 } 410 411 int bcma_bus_register(struct bcma_bus *bus) 412 { 413 int err; 414 struct bcma_device *core; 415 struct device *dev; 416 417 /* Scan for devices (cores) */ 418 err = bcma_bus_scan(bus); 419 if (err) { 420 bcma_err(bus, "Failed to scan: %d\n", err); 421 return err; 422 } 423 424 /* Early init CC core */ 425 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 426 if (core) { 427 bus->drv_cc.core = core; 428 bcma_core_chipcommon_early_init(&bus->drv_cc); 429 } 430 431 /* Early init PCIE core */ 432 core = bcma_find_core(bus, BCMA_CORE_PCIE); 433 if (core) { 434 bus->drv_pci[0].core = core; 435 bcma_core_pci_early_init(&bus->drv_pci[0]); 436 } 437 438 dev = bcma_bus_get_host_dev(bus); 439 /* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when 440 * of_default_bus_match_table is exported or in some other way 441 * accessible. This is just a temporary workaround. 442 */ 443 if (IS_BUILTIN(CONFIG_BCMA) && dev) { 444 of_platform_populate(dev->of_node, of_default_bus_match_table, 445 NULL, dev); 446 } 447 448 /* Cores providing flash access go before SPROM init */ 449 list_for_each_entry(core, &bus->cores, list) { 450 if (bcma_is_core_needed_early(core->id.id)) 451 bcma_register_core(bus, core); 452 } 453 454 /* Try to get SPROM */ 455 err = bcma_sprom_get(bus); 456 if (err == -ENOENT) { 457 bcma_err(bus, "No SPROM available\n"); 458 } else if (err) 459 bcma_err(bus, "Failed to get SPROM: %d\n", err); 460 461 /* Init CC core */ 462 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 463 if (core) { 464 bus->drv_cc.core = core; 465 bcma_core_chipcommon_init(&bus->drv_cc); 466 } 467 468 /* Init CC core */ 469 core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B); 470 if (core) { 471 bus->drv_cc_b.core = core; 472 bcma_core_chipcommon_b_init(&bus->drv_cc_b); 473 } 474 475 /* Init MIPS core */ 476 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 477 if (core) { 478 bus->drv_mips.core = core; 479 bcma_core_mips_init(&bus->drv_mips); 480 } 481 482 /* Init PCIE core */ 483 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0); 484 if (core) { 485 bus->drv_pci[0].core = core; 486 bcma_core_pci_init(&bus->drv_pci[0]); 487 } 488 489 /* Init PCIE core */ 490 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1); 491 if (core) { 492 bus->drv_pci[1].core = core; 493 bcma_core_pci_init(&bus->drv_pci[1]); 494 } 495 496 /* Init PCIe Gen 2 core */ 497 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0); 498 if (core) { 499 bus->drv_pcie2.core = core; 500 bcma_core_pcie2_init(&bus->drv_pcie2); 501 } 502 503 /* Init GBIT MAC COMMON core */ 504 core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON); 505 if (core) { 506 bus->drv_gmac_cmn.core = core; 507 bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn); 508 } 509 510 /* Register found cores */ 511 bcma_register_devices(bus); 512 513 bcma_info(bus, "Bus registered\n"); 514 515 return 0; 516 } 517 518 void bcma_bus_unregister(struct bcma_bus *bus) 519 { 520 int err; 521 522 err = bcma_gpio_unregister(&bus->drv_cc); 523 if (err == -EBUSY) 524 bcma_err(bus, "Some GPIOs are still in use.\n"); 525 else if (err) 526 bcma_err(bus, "Can not unregister GPIO driver: %i\n", err); 527 528 bcma_core_chipcommon_b_free(&bus->drv_cc_b); 529 530 bcma_unregister_cores(bus); 531 } 532 533 /* 534 * This is a special version of bus registration function designed for SoCs. 535 * It scans bus and performs basic initialization of main cores only. 536 * Please note it requires memory allocation, however it won't try to sleep. 537 */ 538 int __init bcma_bus_early_register(struct bcma_bus *bus) 539 { 540 int err; 541 struct bcma_device *core; 542 543 /* Scan for devices (cores) */ 544 err = bcma_bus_scan(bus); 545 if (err) { 546 bcma_err(bus, "Failed to scan bus: %d\n", err); 547 return -1; 548 } 549 550 /* Early init CC core */ 551 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 552 if (core) { 553 bus->drv_cc.core = core; 554 bcma_core_chipcommon_early_init(&bus->drv_cc); 555 } 556 557 /* Early init MIPS core */ 558 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 559 if (core) { 560 bus->drv_mips.core = core; 561 bcma_core_mips_early_init(&bus->drv_mips); 562 } 563 564 bcma_info(bus, "Early bus registered\n"); 565 566 return 0; 567 } 568 569 #ifdef CONFIG_PM 570 int bcma_bus_suspend(struct bcma_bus *bus) 571 { 572 struct bcma_device *core; 573 574 list_for_each_entry(core, &bus->cores, list) { 575 struct device_driver *drv = core->dev.driver; 576 if (drv) { 577 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 578 if (adrv->suspend) 579 adrv->suspend(core); 580 } 581 } 582 return 0; 583 } 584 585 int bcma_bus_resume(struct bcma_bus *bus) 586 { 587 struct bcma_device *core; 588 589 /* Init CC core */ 590 if (bus->drv_cc.core) { 591 bus->drv_cc.setup_done = false; 592 bcma_core_chipcommon_init(&bus->drv_cc); 593 } 594 595 list_for_each_entry(core, &bus->cores, list) { 596 struct device_driver *drv = core->dev.driver; 597 if (drv) { 598 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 599 if (adrv->resume) 600 adrv->resume(core); 601 } 602 } 603 604 return 0; 605 } 606 #endif 607 608 int __bcma_driver_register(struct bcma_driver *drv, struct module *owner) 609 { 610 drv->drv.name = drv->name; 611 drv->drv.bus = &bcma_bus_type; 612 drv->drv.owner = owner; 613 614 return driver_register(&drv->drv); 615 } 616 EXPORT_SYMBOL_GPL(__bcma_driver_register); 617 618 void bcma_driver_unregister(struct bcma_driver *drv) 619 { 620 driver_unregister(&drv->drv); 621 } 622 EXPORT_SYMBOL_GPL(bcma_driver_unregister); 623 624 static int bcma_bus_match(struct device *dev, struct device_driver *drv) 625 { 626 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 627 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 628 const struct bcma_device_id *cid = &core->id; 629 const struct bcma_device_id *did; 630 631 for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) { 632 if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) && 633 (did->id == cid->id || did->id == BCMA_ANY_ID) && 634 (did->rev == cid->rev || did->rev == BCMA_ANY_REV) && 635 (did->class == cid->class || did->class == BCMA_ANY_CLASS)) 636 return 1; 637 } 638 return 0; 639 } 640 641 static int bcma_device_probe(struct device *dev) 642 { 643 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 644 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 645 drv); 646 int err = 0; 647 648 if (adrv->probe) 649 err = adrv->probe(core); 650 651 return err; 652 } 653 654 static int bcma_device_remove(struct device *dev) 655 { 656 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 657 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 658 drv); 659 660 if (adrv->remove) 661 adrv->remove(core); 662 663 return 0; 664 } 665 666 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env) 667 { 668 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 669 670 return add_uevent_var(env, 671 "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X", 672 core->id.manuf, core->id.id, 673 core->id.rev, core->id.class); 674 } 675 676 static int __init bcma_modinit(void) 677 { 678 int err; 679 680 err = bus_register(&bcma_bus_type); 681 if (err) 682 return err; 683 684 err = bcma_host_soc_register_driver(); 685 if (err) { 686 pr_err("SoC host initialization failed\n"); 687 err = 0; 688 } 689 #ifdef CONFIG_BCMA_HOST_PCI 690 err = bcma_host_pci_init(); 691 if (err) { 692 pr_err("PCI host initialization failed\n"); 693 err = 0; 694 } 695 #endif 696 697 return err; 698 } 699 fs_initcall(bcma_modinit); 700 701 static void __exit bcma_modexit(void) 702 { 703 #ifdef CONFIG_BCMA_HOST_PCI 704 bcma_host_pci_exit(); 705 #endif 706 bcma_host_soc_unregister_driver(); 707 bus_unregister(&bcma_bus_type); 708 } 709 module_exit(bcma_modexit) 710