1 /* 2 * Broadcom specific AMBA 3 * Bus subsystem 4 * 5 * Licensed under the GNU/GPL. See COPYING for details. 6 */ 7 8 #include "bcma_private.h" 9 #include <linux/module.h> 10 #include <linux/mmc/sdio_func.h> 11 #include <linux/platform_device.h> 12 #include <linux/pci.h> 13 #include <linux/bcma/bcma.h> 14 #include <linux/slab.h> 15 #include <linux/of_address.h> 16 #include <linux/of_irq.h> 17 #include <linux/of_platform.h> 18 19 MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); 20 MODULE_LICENSE("GPL"); 21 22 /* contains the number the next bus should get. */ 23 static unsigned int bcma_bus_next_num; 24 25 /* bcma_buses_mutex locks the bcma_bus_next_num */ 26 static DEFINE_MUTEX(bcma_buses_mutex); 27 28 static int bcma_bus_match(struct device *dev, struct device_driver *drv); 29 static int bcma_device_probe(struct device *dev); 30 static void bcma_device_remove(struct device *dev); 31 static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env); 32 33 static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) 34 { 35 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 36 return sprintf(buf, "0x%03X\n", core->id.manuf); 37 } 38 static DEVICE_ATTR_RO(manuf); 39 40 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) 41 { 42 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 43 return sprintf(buf, "0x%03X\n", core->id.id); 44 } 45 static DEVICE_ATTR_RO(id); 46 47 static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf) 48 { 49 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 50 return sprintf(buf, "0x%02X\n", core->id.rev); 51 } 52 static DEVICE_ATTR_RO(rev); 53 54 static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf) 55 { 56 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 57 return sprintf(buf, "0x%X\n", core->id.class); 58 } 59 static DEVICE_ATTR_RO(class); 60 61 static struct attribute *bcma_device_attrs[] = { 62 &dev_attr_manuf.attr, 63 &dev_attr_id.attr, 64 &dev_attr_rev.attr, 65 &dev_attr_class.attr, 66 NULL, 67 }; 68 ATTRIBUTE_GROUPS(bcma_device); 69 70 static struct bus_type bcma_bus_type = { 71 .name = "bcma", 72 .match = bcma_bus_match, 73 .probe = bcma_device_probe, 74 .remove = bcma_device_remove, 75 .uevent = bcma_device_uevent, 76 .dev_groups = bcma_device_groups, 77 }; 78 79 static u16 bcma_cc_core_id(struct bcma_bus *bus) 80 { 81 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) 82 return BCMA_CORE_4706_CHIPCOMMON; 83 return BCMA_CORE_CHIPCOMMON; 84 } 85 86 struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid, 87 u8 unit) 88 { 89 struct bcma_device *core; 90 91 list_for_each_entry(core, &bus->cores, list) { 92 if (core->id.id == coreid && core->core_unit == unit) 93 return core; 94 } 95 return NULL; 96 } 97 EXPORT_SYMBOL_GPL(bcma_find_core_unit); 98 99 bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, 100 int timeout) 101 { 102 unsigned long deadline = jiffies + timeout; 103 u32 val; 104 105 do { 106 val = bcma_read32(core, reg); 107 if ((val & mask) == value) 108 return true; 109 cpu_relax(); 110 udelay(10); 111 } while (!time_after_eq(jiffies, deadline)); 112 113 bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg); 114 115 return false; 116 } 117 118 static void bcma_release_core_dev(struct device *dev) 119 { 120 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 121 if (core->io_addr) 122 iounmap(core->io_addr); 123 if (core->io_wrap) 124 iounmap(core->io_wrap); 125 kfree(core); 126 } 127 128 static bool bcma_is_core_needed_early(u16 core_id) 129 { 130 switch (core_id) { 131 case BCMA_CORE_NS_NAND: 132 case BCMA_CORE_NS_QSPI: 133 return true; 134 } 135 136 return false; 137 } 138 139 static struct device_node *bcma_of_find_child_device(struct device *parent, 140 struct bcma_device *core) 141 { 142 struct device_node *node; 143 u64 size; 144 const __be32 *reg; 145 146 if (!parent->of_node) 147 return NULL; 148 149 for_each_child_of_node(parent->of_node, node) { 150 reg = of_get_address(node, 0, &size, NULL); 151 if (!reg) 152 continue; 153 if (of_translate_address(node, reg) == core->addr) 154 return node; 155 } 156 return NULL; 157 } 158 159 static int bcma_of_irq_parse(struct device *parent, 160 struct bcma_device *core, 161 struct of_phandle_args *out_irq, int num) 162 { 163 __be32 laddr[1]; 164 int rc; 165 166 if (core->dev.of_node) { 167 rc = of_irq_parse_one(core->dev.of_node, num, out_irq); 168 if (!rc) 169 return rc; 170 } 171 172 out_irq->np = parent->of_node; 173 out_irq->args_count = 1; 174 out_irq->args[0] = num; 175 176 laddr[0] = cpu_to_be32(core->addr); 177 return of_irq_parse_raw(laddr, out_irq); 178 } 179 180 static unsigned int bcma_of_get_irq(struct device *parent, 181 struct bcma_device *core, int num) 182 { 183 struct of_phandle_args out_irq; 184 int ret; 185 186 if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node) 187 return 0; 188 189 ret = bcma_of_irq_parse(parent, core, &out_irq, num); 190 if (ret) { 191 bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n", 192 ret); 193 return 0; 194 } 195 196 return irq_create_of_mapping(&out_irq); 197 } 198 199 static void bcma_of_fill_device(struct device *parent, 200 struct bcma_device *core) 201 { 202 struct device_node *node; 203 204 node = bcma_of_find_child_device(parent, core); 205 if (node) 206 core->dev.of_node = node; 207 208 core->irq = bcma_of_get_irq(parent, core, 0); 209 210 of_dma_configure(&core->dev, node, false); 211 } 212 213 unsigned int bcma_core_irq(struct bcma_device *core, int num) 214 { 215 struct bcma_bus *bus = core->bus; 216 unsigned int mips_irq; 217 218 switch (bus->hosttype) { 219 case BCMA_HOSTTYPE_PCI: 220 return bus->host_pci->irq; 221 case BCMA_HOSTTYPE_SOC: 222 if (bus->drv_mips.core && num == 0) { 223 mips_irq = bcma_core_mips_irq(core); 224 return mips_irq <= 4 ? mips_irq + 2 : 0; 225 } 226 if (bus->dev) 227 return bcma_of_get_irq(bus->dev, core, num); 228 return 0; 229 case BCMA_HOSTTYPE_SDIO: 230 return 0; 231 } 232 233 return 0; 234 } 235 EXPORT_SYMBOL(bcma_core_irq); 236 237 void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) 238 { 239 device_initialize(&core->dev); 240 core->dev.release = bcma_release_core_dev; 241 core->dev.bus = &bcma_bus_type; 242 dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); 243 core->dev.parent = bus->dev; 244 if (bus->dev) 245 bcma_of_fill_device(bus->dev, core); 246 247 switch (bus->hosttype) { 248 case BCMA_HOSTTYPE_PCI: 249 core->dma_dev = bus->dev; 250 core->irq = bus->host_pci->irq; 251 break; 252 case BCMA_HOSTTYPE_SOC: 253 if (IS_ENABLED(CONFIG_OF) && bus->dev) { 254 core->dma_dev = bus->dev; 255 } else { 256 core->dev.dma_mask = &core->dev.coherent_dma_mask; 257 core->dma_dev = &core->dev; 258 } 259 break; 260 case BCMA_HOSTTYPE_SDIO: 261 break; 262 } 263 } 264 265 void bcma_init_bus(struct bcma_bus *bus) 266 { 267 mutex_lock(&bcma_buses_mutex); 268 bus->num = bcma_bus_next_num++; 269 mutex_unlock(&bcma_buses_mutex); 270 271 INIT_LIST_HEAD(&bus->cores); 272 bus->nr_cores = 0; 273 274 bcma_detect_chip(bus); 275 } 276 277 static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) 278 { 279 int err; 280 281 err = device_add(&core->dev); 282 if (err) { 283 bcma_err(bus, "Could not register dev for core 0x%03X\n", 284 core->id.id); 285 return; 286 } 287 core->dev_registered = true; 288 } 289 290 static int bcma_register_devices(struct bcma_bus *bus) 291 { 292 struct bcma_device *core; 293 int err; 294 295 list_for_each_entry(core, &bus->cores, list) { 296 /* We support that core ourselves */ 297 switch (core->id.id) { 298 case BCMA_CORE_4706_CHIPCOMMON: 299 case BCMA_CORE_CHIPCOMMON: 300 case BCMA_CORE_NS_CHIPCOMMON_B: 301 case BCMA_CORE_PCI: 302 case BCMA_CORE_PCIE: 303 case BCMA_CORE_PCIE2: 304 case BCMA_CORE_MIPS_74K: 305 case BCMA_CORE_4706_MAC_GBIT_COMMON: 306 continue; 307 } 308 309 /* Early cores were already registered */ 310 if (bcma_is_core_needed_early(core->id.id)) 311 continue; 312 313 /* Only first GMAC core on BCM4706 is connected and working */ 314 if (core->id.id == BCMA_CORE_4706_MAC_GBIT && 315 core->core_unit > 0) 316 continue; 317 318 bcma_register_core(bus, core); 319 } 320 321 #ifdef CONFIG_BCMA_PFLASH 322 if (bus->drv_cc.pflash.present) { 323 err = platform_device_register(&bcma_pflash_dev); 324 if (err) 325 bcma_err(bus, "Error registering parallel flash\n"); 326 } 327 #endif 328 329 #ifdef CONFIG_BCMA_SFLASH 330 if (bus->drv_cc.sflash.present) { 331 err = platform_device_register(&bcma_sflash_dev); 332 if (err) 333 bcma_err(bus, "Error registering serial flash\n"); 334 } 335 #endif 336 337 #ifdef CONFIG_BCMA_NFLASH 338 if (bus->drv_cc.nflash.present) { 339 err = platform_device_register(&bcma_nflash_dev); 340 if (err) 341 bcma_err(bus, "Error registering NAND flash\n"); 342 } 343 #endif 344 err = bcma_gpio_init(&bus->drv_cc); 345 if (err == -ENOTSUPP) 346 bcma_debug(bus, "GPIO driver not activated\n"); 347 else if (err) { 348 bcma_err(bus, "Error registering GPIO driver: %i\n", err); 349 return err; 350 } 351 352 if (bus->hosttype == BCMA_HOSTTYPE_SOC) { 353 err = bcma_chipco_watchdog_register(&bus->drv_cc); 354 if (err) 355 bcma_err(bus, "Error registering watchdog driver\n"); 356 } 357 358 return 0; 359 } 360 361 void bcma_unregister_cores(struct bcma_bus *bus) 362 { 363 struct bcma_device *core, *tmp; 364 365 list_for_each_entry_safe(core, tmp, &bus->cores, list) { 366 if (!core->dev_registered) 367 continue; 368 list_del(&core->list); 369 device_unregister(&core->dev); 370 } 371 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 372 platform_device_unregister(bus->drv_cc.watchdog); 373 374 /* Now no one uses internally-handled cores, we can free them */ 375 list_for_each_entry_safe(core, tmp, &bus->cores, list) { 376 list_del(&core->list); 377 put_device(&core->dev); 378 } 379 } 380 381 int bcma_bus_register(struct bcma_bus *bus) 382 { 383 int err; 384 struct bcma_device *core; 385 386 /* Scan for devices (cores) */ 387 err = bcma_bus_scan(bus); 388 if (err) { 389 bcma_err(bus, "Failed to scan: %d\n", err); 390 return err; 391 } 392 393 /* Early init CC core */ 394 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 395 if (core) { 396 bus->drv_cc.core = core; 397 bcma_core_chipcommon_early_init(&bus->drv_cc); 398 } 399 400 /* Early init PCIE core */ 401 core = bcma_find_core(bus, BCMA_CORE_PCIE); 402 if (core) { 403 bus->drv_pci[0].core = core; 404 bcma_core_pci_early_init(&bus->drv_pci[0]); 405 } 406 407 if (bus->dev) 408 of_platform_default_populate(bus->dev->of_node, NULL, bus->dev); 409 410 /* Cores providing flash access go before SPROM init */ 411 list_for_each_entry(core, &bus->cores, list) { 412 if (bcma_is_core_needed_early(core->id.id)) 413 bcma_register_core(bus, core); 414 } 415 416 /* Try to get SPROM */ 417 err = bcma_sprom_get(bus); 418 if (err == -ENOENT) { 419 bcma_err(bus, "No SPROM available\n"); 420 } else if (err) 421 bcma_err(bus, "Failed to get SPROM: %d\n", err); 422 423 /* Init CC core */ 424 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 425 if (core) { 426 bus->drv_cc.core = core; 427 bcma_core_chipcommon_init(&bus->drv_cc); 428 } 429 430 /* Init CC core */ 431 core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B); 432 if (core) { 433 bus->drv_cc_b.core = core; 434 bcma_core_chipcommon_b_init(&bus->drv_cc_b); 435 } 436 437 /* Init MIPS core */ 438 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 439 if (core) { 440 bus->drv_mips.core = core; 441 bcma_core_mips_init(&bus->drv_mips); 442 } 443 444 /* Init PCIE core */ 445 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0); 446 if (core) { 447 bus->drv_pci[0].core = core; 448 bcma_core_pci_init(&bus->drv_pci[0]); 449 } 450 451 /* Init PCIE core */ 452 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1); 453 if (core) { 454 bus->drv_pci[1].core = core; 455 bcma_core_pci_init(&bus->drv_pci[1]); 456 } 457 458 /* Init PCIe Gen 2 core */ 459 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0); 460 if (core) { 461 bus->drv_pcie2.core = core; 462 bcma_core_pcie2_init(&bus->drv_pcie2); 463 } 464 465 /* Init GBIT MAC COMMON core */ 466 core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON); 467 if (core) { 468 bus->drv_gmac_cmn.core = core; 469 bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn); 470 } 471 472 /* Register found cores */ 473 bcma_register_devices(bus); 474 475 bcma_info(bus, "Bus registered\n"); 476 477 return 0; 478 } 479 480 void bcma_bus_unregister(struct bcma_bus *bus) 481 { 482 int err; 483 484 err = bcma_gpio_unregister(&bus->drv_cc); 485 if (err == -EBUSY) 486 bcma_err(bus, "Some GPIOs are still in use.\n"); 487 else if (err) 488 bcma_err(bus, "Can not unregister GPIO driver: %i\n", err); 489 490 bcma_core_chipcommon_b_free(&bus->drv_cc_b); 491 492 bcma_unregister_cores(bus); 493 } 494 495 /* 496 * This is a special version of bus registration function designed for SoCs. 497 * It scans bus and performs basic initialization of main cores only. 498 * Please note it requires memory allocation, however it won't try to sleep. 499 */ 500 int __init bcma_bus_early_register(struct bcma_bus *bus) 501 { 502 int err; 503 struct bcma_device *core; 504 505 /* Scan for devices (cores) */ 506 err = bcma_bus_scan(bus); 507 if (err) { 508 bcma_err(bus, "Failed to scan bus: %d\n", err); 509 return -1; 510 } 511 512 /* Early init CC core */ 513 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 514 if (core) { 515 bus->drv_cc.core = core; 516 bcma_core_chipcommon_early_init(&bus->drv_cc); 517 } 518 519 /* Early init MIPS core */ 520 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 521 if (core) { 522 bus->drv_mips.core = core; 523 bcma_core_mips_early_init(&bus->drv_mips); 524 } 525 526 bcma_info(bus, "Early bus registered\n"); 527 528 return 0; 529 } 530 531 #ifdef CONFIG_PM 532 int bcma_bus_suspend(struct bcma_bus *bus) 533 { 534 struct bcma_device *core; 535 536 list_for_each_entry(core, &bus->cores, list) { 537 struct device_driver *drv = core->dev.driver; 538 if (drv) { 539 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 540 if (adrv->suspend) 541 adrv->suspend(core); 542 } 543 } 544 return 0; 545 } 546 547 int bcma_bus_resume(struct bcma_bus *bus) 548 { 549 struct bcma_device *core; 550 551 /* Init CC core */ 552 if (bus->drv_cc.core) { 553 bus->drv_cc.setup_done = false; 554 bcma_core_chipcommon_init(&bus->drv_cc); 555 } 556 557 list_for_each_entry(core, &bus->cores, list) { 558 struct device_driver *drv = core->dev.driver; 559 if (drv) { 560 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 561 if (adrv->resume) 562 adrv->resume(core); 563 } 564 } 565 566 return 0; 567 } 568 #endif 569 570 int __bcma_driver_register(struct bcma_driver *drv, struct module *owner) 571 { 572 drv->drv.name = drv->name; 573 drv->drv.bus = &bcma_bus_type; 574 drv->drv.owner = owner; 575 576 return driver_register(&drv->drv); 577 } 578 EXPORT_SYMBOL_GPL(__bcma_driver_register); 579 580 void bcma_driver_unregister(struct bcma_driver *drv) 581 { 582 driver_unregister(&drv->drv); 583 } 584 EXPORT_SYMBOL_GPL(bcma_driver_unregister); 585 586 static int bcma_bus_match(struct device *dev, struct device_driver *drv) 587 { 588 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 589 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 590 const struct bcma_device_id *cid = &core->id; 591 const struct bcma_device_id *did; 592 593 for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) { 594 if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) && 595 (did->id == cid->id || did->id == BCMA_ANY_ID) && 596 (did->rev == cid->rev || did->rev == BCMA_ANY_REV) && 597 (did->class == cid->class || did->class == BCMA_ANY_CLASS)) 598 return 1; 599 } 600 return 0; 601 } 602 603 static int bcma_device_probe(struct device *dev) 604 { 605 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 606 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 607 drv); 608 int err = 0; 609 610 get_device(dev); 611 if (adrv->probe) 612 err = adrv->probe(core); 613 if (err) 614 put_device(dev); 615 616 return err; 617 } 618 619 static void bcma_device_remove(struct device *dev) 620 { 621 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 622 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 623 drv); 624 625 if (adrv->remove) 626 adrv->remove(core); 627 put_device(dev); 628 } 629 630 static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 631 { 632 const struct bcma_device *core = container_of_const(dev, struct bcma_device, dev); 633 634 return add_uevent_var(env, 635 "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X", 636 core->id.manuf, core->id.id, 637 core->id.rev, core->id.class); 638 } 639 640 static unsigned int bcma_bus_registered; 641 642 /* 643 * If built-in, bus has to be registered early, before any driver calls 644 * bcma_driver_register. 645 * Otherwise registering driver would trigger BUG in driver_register. 646 */ 647 static int __init bcma_init_bus_register(void) 648 { 649 int err; 650 651 if (bcma_bus_registered) 652 return 0; 653 654 err = bus_register(&bcma_bus_type); 655 if (!err) 656 bcma_bus_registered = 1; 657 658 return err; 659 } 660 #ifndef MODULE 661 fs_initcall(bcma_init_bus_register); 662 #endif 663 664 /* Main initialization has to be done with SPI/mtd/NAND/SPROM available */ 665 static int __init bcma_modinit(void) 666 { 667 int err; 668 669 err = bcma_init_bus_register(); 670 if (err) 671 return err; 672 673 err = bcma_host_soc_register_driver(); 674 if (err) { 675 pr_err("SoC host initialization failed\n"); 676 err = 0; 677 } 678 #ifdef CONFIG_BCMA_HOST_PCI 679 err = bcma_host_pci_init(); 680 if (err) { 681 pr_err("PCI host initialization failed\n"); 682 err = 0; 683 } 684 #endif 685 686 return err; 687 } 688 module_init(bcma_modinit); 689 690 static void __exit bcma_modexit(void) 691 { 692 #ifdef CONFIG_BCMA_HOST_PCI 693 bcma_host_pci_exit(); 694 #endif 695 bcma_host_soc_unregister_driver(); 696 bus_unregister(&bcma_bus_type); 697 } 698 module_exit(bcma_modexit) 699