1 /* 2 * Core maple bus functionality 3 * 4 * Copyright (C) 2007, 2008 Adrian McMenamin 5 * 6 * Based on 2.4 code by: 7 * 8 * Copyright (C) 2000-2001 YAEGASHI Takeshi 9 * Copyright (C) 2001 M. R. Brown 10 * Copyright (C) 2001 Paul Mundt 11 * 12 * and others. 13 * 14 * This file is subject to the terms and conditions of the GNU General Public 15 * License. See the file "COPYING" in the main directory of this archive 16 * for more details. 17 */ 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/device.h> 21 #include <linux/interrupt.h> 22 #include <linux/list.h> 23 #include <linux/io.h> 24 #include <linux/slab.h> 25 #include <linux/maple.h> 26 #include <linux/dma-mapping.h> 27 #include <asm/cacheflush.h> 28 #include <asm/dma.h> 29 #include <asm/io.h> 30 #include <asm/mach/dma.h> 31 #include <asm/mach/sysasic.h> 32 #include <asm/mach/maple.h> 33 #include <linux/delay.h> 34 35 MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin"); 36 MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); 37 MODULE_LICENSE("GPL v2"); 38 MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); 39 40 static void maple_dma_handler(struct work_struct *work); 41 static void maple_vblank_handler(struct work_struct *work); 42 43 static DECLARE_WORK(maple_dma_process, maple_dma_handler); 44 static DECLARE_WORK(maple_vblank_process, maple_vblank_handler); 45 46 static LIST_HEAD(maple_waitq); 47 static LIST_HEAD(maple_sentq); 48 49 static DEFINE_MUTEX(maple_list_lock); 50 51 static struct maple_driver maple_dummy_driver; 52 static struct device maple_bus; 53 static int subdevice_map[MAPLE_PORTS]; 54 static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; 55 static unsigned long maple_pnp_time; 56 static int started, scanning, liststatus, fullscan; 57 static struct kmem_cache *maple_queue_cache; 58 59 struct maple_device_specify { 60 int port; 61 int unit; 62 }; 63 64 static bool checked[4]; 65 static struct maple_device *baseunits[4]; 66 67 /** 68 * maple_driver_register - register a device driver 69 * automatically makes the driver bus a maple bus 70 * @drv: the driver to be registered 71 */ 72 int maple_driver_register(struct device_driver *drv) 73 { 74 if (!drv) 75 return -EINVAL; 76 drv->bus = &maple_bus_type; 77 return driver_register(drv); 78 } 79 EXPORT_SYMBOL_GPL(maple_driver_register); 80 81 /* set hardware registers to enable next round of dma */ 82 static void maplebus_dma_reset(void) 83 { 84 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); 85 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ 86 ctrl_outl(1, MAPLE_TRIGTYPE); 87 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); 88 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); 89 ctrl_outl(1, MAPLE_ENABLE); 90 } 91 92 /** 93 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND 94 * @dev: device responding 95 * @callback: handler callback 96 * @interval: interval in jiffies between callbacks 97 * @function: the function code for the device 98 */ 99 void maple_getcond_callback(struct maple_device *dev, 100 void (*callback) (struct mapleq *mq), 101 unsigned long interval, unsigned long function) 102 { 103 dev->callback = callback; 104 dev->interval = interval; 105 dev->function = cpu_to_be32(function); 106 dev->when = jiffies; 107 } 108 EXPORT_SYMBOL_GPL(maple_getcond_callback); 109 110 static int maple_dma_done(void) 111 { 112 return (ctrl_inl(MAPLE_STATE) & 1) == 0; 113 } 114 115 static void maple_release_device(struct device *dev) 116 { 117 struct maple_device *mdev; 118 struct mapleq *mq; 119 if (!dev) 120 return; 121 mdev = to_maple_dev(dev); 122 mq = mdev->mq; 123 if (mq) { 124 if (mq->recvbufdcsp) 125 kmem_cache_free(maple_queue_cache, mq->recvbufdcsp); 126 kfree(mq); 127 mq = NULL; 128 } 129 kfree(mdev); 130 } 131 132 /** 133 * maple_add_packet - add a single instruction to the queue 134 * @mq: instruction to add to waiting queue 135 */ 136 void maple_add_packet(struct mapleq *mq) 137 { 138 mutex_lock(&maple_list_lock); 139 list_add(&mq->list, &maple_waitq); 140 mutex_unlock(&maple_list_lock); 141 } 142 EXPORT_SYMBOL_GPL(maple_add_packet); 143 144 static struct mapleq *maple_allocq(struct maple_device *mdev) 145 { 146 struct mapleq *mq; 147 148 mq = kmalloc(sizeof(*mq), GFP_KERNEL); 149 if (!mq) 150 return NULL; 151 152 mq->dev = mdev; 153 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); 154 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); 155 if (!mq->recvbuf) { 156 kfree(mq); 157 return NULL; 158 } 159 160 return mq; 161 } 162 163 static struct maple_device *maple_alloc_dev(int port, int unit) 164 { 165 struct maple_device *mdev; 166 167 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); 168 if (!mdev) 169 return NULL; 170 171 mdev->port = port; 172 mdev->unit = unit; 173 mdev->mq = maple_allocq(mdev); 174 175 if (!mdev->mq) { 176 kfree(mdev); 177 return NULL; 178 } 179 mdev->dev.bus = &maple_bus_type; 180 mdev->dev.parent = &maple_bus; 181 mdev->function = 0; 182 return mdev; 183 } 184 185 static void maple_free_dev(struct maple_device *mdev) 186 { 187 if (!mdev) 188 return; 189 if (mdev->mq) { 190 if (mdev->mq->recvbufdcsp) 191 kmem_cache_free(maple_queue_cache, 192 mdev->mq->recvbufdcsp); 193 kfree(mdev->mq); 194 } 195 kfree(mdev); 196 } 197 198 /* process the command queue into a maple command block 199 * terminating command has bit 32 of first long set to 0 200 */ 201 static void maple_build_block(struct mapleq *mq) 202 { 203 int port, unit, from, to, len; 204 unsigned long *lsendbuf = mq->sendbuf; 205 206 port = mq->dev->port & 3; 207 unit = mq->dev->unit; 208 len = mq->length; 209 from = port << 6; 210 to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20); 211 212 *maple_lastptr &= 0x7fffffff; 213 maple_lastptr = maple_sendptr; 214 215 *maple_sendptr++ = (port << 16) | len | 0x80000000; 216 *maple_sendptr++ = PHYSADDR(mq->recvbuf); 217 *maple_sendptr++ = 218 mq->command | (to << 8) | (from << 16) | (len << 24); 219 220 while (len-- > 0) 221 *maple_sendptr++ = *lsendbuf++; 222 } 223 224 /* build up command queue */ 225 static void maple_send(void) 226 { 227 int i; 228 int maple_packets; 229 struct mapleq *mq, *nmq; 230 231 if (!list_empty(&maple_sentq)) 232 return; 233 if (list_empty(&maple_waitq) || !maple_dma_done()) 234 return; 235 maple_packets = 0; 236 maple_sendptr = maple_lastptr = maple_sendbuf; 237 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { 238 maple_build_block(mq); 239 list_move(&mq->list, &maple_sentq); 240 if (maple_packets++ > MAPLE_MAXPACKETS) 241 break; 242 } 243 if (maple_packets > 0) { 244 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) 245 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, 246 PAGE_SIZE, DMA_BIDIRECTIONAL); 247 } 248 } 249 250 static int attach_matching_maple_driver(struct device_driver *driver, 251 void *devptr) 252 { 253 struct maple_driver *maple_drv; 254 struct maple_device *mdev; 255 256 mdev = devptr; 257 maple_drv = to_maple_driver(driver); 258 if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) { 259 if (maple_drv->connect(mdev) == 0) { 260 mdev->driver = maple_drv; 261 return 1; 262 } 263 } 264 return 0; 265 } 266 267 static void maple_detach_driver(struct maple_device *mdev) 268 { 269 if (!mdev) 270 return; 271 if (mdev->driver) { 272 if (mdev->driver->disconnect) 273 mdev->driver->disconnect(mdev); 274 } 275 mdev->driver = NULL; 276 device_unregister(&mdev->dev); 277 mdev = NULL; 278 } 279 280 /* process initial MAPLE_COMMAND_DEVINFO for each device or port */ 281 static void maple_attach_driver(struct maple_device *mdev) 282 { 283 char *p, *recvbuf; 284 unsigned long function; 285 int matched, retval; 286 287 recvbuf = mdev->mq->recvbuf; 288 /* copy the data as individual elements in 289 * case of memory optimisation */ 290 memcpy(&mdev->devinfo.function, recvbuf + 4, 4); 291 memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12); 292 memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); 293 memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); 294 memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); 295 memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60); 296 memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); 297 memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); 298 memcpy(mdev->product_name, mdev->devinfo.product_name, 30); 299 mdev->product_name[30] = '\0'; 300 memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60); 301 mdev->product_licence[60] = '\0'; 302 303 for (p = mdev->product_name + 29; mdev->product_name <= p; p--) 304 if (*p == ' ') 305 *p = '\0'; 306 else 307 break; 308 for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--) 309 if (*p == ' ') 310 *p = '\0'; 311 else 312 break; 313 314 printk(KERN_INFO "Maple device detected: %s\n", 315 mdev->product_name); 316 printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); 317 318 function = be32_to_cpu(mdev->devinfo.function); 319 320 if (function > 0x200) { 321 /* Do this silently - as not a real device */ 322 function = 0; 323 mdev->driver = &maple_dummy_driver; 324 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); 325 } else { 326 printk(KERN_INFO 327 "Maple bus at (%d, %d): Function 0x%lX\n", 328 mdev->port, mdev->unit, function); 329 330 matched = 331 bus_for_each_drv(&maple_bus_type, NULL, mdev, 332 attach_matching_maple_driver); 333 334 if (matched == 0) { 335 /* Driver does not exist yet */ 336 printk(KERN_INFO 337 "No maple driver found.\n"); 338 mdev->driver = &maple_dummy_driver; 339 } 340 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, 341 mdev->unit, function); 342 } 343 mdev->function = function; 344 mdev->dev.release = &maple_release_device; 345 retval = device_register(&mdev->dev); 346 if (retval) { 347 printk(KERN_INFO 348 "Maple bus: Attempt to register device" 349 " (%x, %x) failed.\n", 350 mdev->port, mdev->unit); 351 maple_free_dev(mdev); 352 mdev = NULL; 353 return; 354 } 355 } 356 357 /* 358 * if device has been registered for the given 359 * port and unit then return 1 - allows identification 360 * of which devices need to be attached or detached 361 */ 362 static int detach_maple_device(struct device *device, void *portptr) 363 { 364 struct maple_device_specify *ds; 365 struct maple_device *mdev; 366 367 ds = portptr; 368 mdev = to_maple_dev(device); 369 if (mdev->port == ds->port && mdev->unit == ds->unit) 370 return 1; 371 return 0; 372 } 373 374 static int setup_maple_commands(struct device *device, void *ignored) 375 { 376 struct maple_device *maple_dev = to_maple_dev(device); 377 378 if ((maple_dev->interval > 0) 379 && time_after(jiffies, maple_dev->when)) { 380 maple_dev->when = jiffies + maple_dev->interval; 381 maple_dev->mq->command = MAPLE_COMMAND_GETCOND; 382 maple_dev->mq->sendbuf = &maple_dev->function; 383 maple_dev->mq->length = 1; 384 maple_add_packet(maple_dev->mq); 385 liststatus++; 386 } else { 387 if (time_after(jiffies, maple_pnp_time)) { 388 maple_dev->mq->command = MAPLE_COMMAND_DEVINFO; 389 maple_dev->mq->length = 0; 390 maple_add_packet(maple_dev->mq); 391 liststatus++; 392 } 393 } 394 395 return 0; 396 } 397 398 /* VBLANK bottom half - implemented via workqueue */ 399 static void maple_vblank_handler(struct work_struct *work) 400 { 401 if (!maple_dma_done()) 402 return; 403 if (!list_empty(&maple_sentq)) 404 return; 405 ctrl_outl(0, MAPLE_ENABLE); 406 liststatus = 0; 407 bus_for_each_dev(&maple_bus_type, NULL, NULL, 408 setup_maple_commands); 409 if (time_after(jiffies, maple_pnp_time)) 410 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; 411 if (liststatus && list_empty(&maple_sentq)) { 412 INIT_LIST_HEAD(&maple_sentq); 413 maple_send(); 414 } 415 maplebus_dma_reset(); 416 } 417 418 /* handle devices added via hotplugs - placing them on queue for DEVINFO*/ 419 static void maple_map_subunits(struct maple_device *mdev, int submask) 420 { 421 int retval, k, devcheck; 422 struct maple_device *mdev_add; 423 struct maple_device_specify ds; 424 425 for (k = 0; k < 5; k++) { 426 ds.port = mdev->port; 427 ds.unit = k + 1; 428 retval = 429 bus_for_each_dev(&maple_bus_type, NULL, &ds, 430 detach_maple_device); 431 if (retval) { 432 submask = submask >> 1; 433 continue; 434 } 435 devcheck = submask & 0x01; 436 if (devcheck) { 437 mdev_add = maple_alloc_dev(mdev->port, k + 1); 438 if (!mdev_add) 439 return; 440 mdev_add->mq->command = MAPLE_COMMAND_DEVINFO; 441 mdev_add->mq->length = 0; 442 maple_add_packet(mdev_add->mq); 443 scanning = 1; 444 } 445 submask = submask >> 1; 446 } 447 } 448 449 /* mark a device as removed */ 450 static void maple_clean_submap(struct maple_device *mdev) 451 { 452 int killbit; 453 454 killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20); 455 killbit = ~killbit; 456 killbit &= 0xFF; 457 subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit; 458 } 459 460 /* handle empty port or hotplug removal */ 461 static void maple_response_none(struct maple_device *mdev, 462 struct mapleq *mq) 463 { 464 if (mdev->unit != 0) { 465 list_del(&mq->list); 466 maple_clean_submap(mdev); 467 printk(KERN_INFO 468 "Maple bus device detaching at (%d, %d)\n", 469 mdev->port, mdev->unit); 470 maple_detach_driver(mdev); 471 return; 472 } 473 if (!started || !fullscan) { 474 if (checked[mdev->port] == false) { 475 checked[mdev->port] = true; 476 printk(KERN_INFO "No maple devices attached" 477 " to port %d\n", mdev->port); 478 } 479 return; 480 } 481 maple_clean_submap(mdev); 482 } 483 484 /* preprocess hotplugs or scans */ 485 static void maple_response_devinfo(struct maple_device *mdev, 486 char *recvbuf) 487 { 488 char submask; 489 if (!started || (scanning == 2) || !fullscan) { 490 if ((mdev->unit == 0) && (checked[mdev->port] == false)) { 491 checked[mdev->port] = true; 492 maple_attach_driver(mdev); 493 } else { 494 if (mdev->unit != 0) 495 maple_attach_driver(mdev); 496 } 497 return; 498 } 499 if (mdev->unit == 0) { 500 submask = recvbuf[2] & 0x1F; 501 if (submask ^ subdevice_map[mdev->port]) { 502 maple_map_subunits(mdev, submask); 503 subdevice_map[mdev->port] = submask; 504 } 505 } 506 } 507 508 /* maple dma end bottom half - implemented via workqueue */ 509 static void maple_dma_handler(struct work_struct *work) 510 { 511 struct mapleq *mq, *nmq; 512 struct maple_device *dev; 513 char *recvbuf; 514 enum maple_code code; 515 int i; 516 517 if (!maple_dma_done()) 518 return; 519 ctrl_outl(0, MAPLE_ENABLE); 520 if (!list_empty(&maple_sentq)) { 521 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { 522 recvbuf = mq->recvbuf; 523 code = recvbuf[0]; 524 dev = mq->dev; 525 switch (code) { 526 case MAPLE_RESPONSE_NONE: 527 maple_response_none(dev, mq); 528 break; 529 530 case MAPLE_RESPONSE_DEVINFO: 531 maple_response_devinfo(dev, recvbuf); 532 break; 533 534 case MAPLE_RESPONSE_DATATRF: 535 if (dev->callback) 536 dev->callback(mq); 537 break; 538 539 case MAPLE_RESPONSE_FILEERR: 540 case MAPLE_RESPONSE_AGAIN: 541 case MAPLE_RESPONSE_BADCMD: 542 case MAPLE_RESPONSE_BADFUNC: 543 printk(KERN_DEBUG 544 "Maple non-fatal error 0x%X\n", 545 code); 546 break; 547 548 case MAPLE_RESPONSE_ALLINFO: 549 printk(KERN_DEBUG 550 "Maple - extended device information" 551 " not supported\n"); 552 break; 553 554 case MAPLE_RESPONSE_OK: 555 break; 556 557 default: 558 break; 559 } 560 } 561 INIT_LIST_HEAD(&maple_sentq); 562 if (scanning == 1) { 563 maple_send(); 564 scanning = 2; 565 } else 566 scanning = 0; 567 568 if (!fullscan) { 569 fullscan = 1; 570 for (i = 0; i < MAPLE_PORTS; i++) { 571 if (checked[i] == false) { 572 fullscan = 0; 573 dev = baseunits[i]; 574 dev->mq->command = 575 MAPLE_COMMAND_DEVINFO; 576 dev->mq->length = 0; 577 maple_add_packet(dev->mq); 578 } 579 } 580 } 581 if (started == 0) 582 started = 1; 583 } 584 maplebus_dma_reset(); 585 } 586 587 static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id) 588 { 589 /* Load everything into the bottom half */ 590 schedule_work(&maple_dma_process); 591 return IRQ_HANDLED; 592 } 593 594 static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) 595 { 596 schedule_work(&maple_vblank_process); 597 return IRQ_HANDLED; 598 } 599 600 static int maple_set_dma_interrupt_handler(void) 601 { 602 return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt, 603 IRQF_SHARED, "maple bus DMA", &maple_dummy_driver); 604 } 605 606 static int maple_set_vblank_interrupt_handler(void) 607 { 608 return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt, 609 IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver); 610 } 611 612 static int maple_get_dma_buffer(void) 613 { 614 maple_sendbuf = 615 (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, 616 MAPLE_DMA_PAGES); 617 if (!maple_sendbuf) 618 return -ENOMEM; 619 return 0; 620 } 621 622 static int match_maple_bus_driver(struct device *devptr, 623 struct device_driver *drvptr) 624 { 625 struct maple_driver *maple_drv; 626 struct maple_device *maple_dev; 627 628 maple_drv = container_of(drvptr, struct maple_driver, drv); 629 maple_dev = container_of(devptr, struct maple_device, dev); 630 /* Trap empty port case */ 631 if (maple_dev->devinfo.function == 0xFFFFFFFF) 632 return 0; 633 else if (maple_dev->devinfo.function & 634 be32_to_cpu(maple_drv->function)) 635 return 1; 636 return 0; 637 } 638 639 static int maple_bus_uevent(struct device *dev, 640 struct kobj_uevent_env *env) 641 { 642 return 0; 643 } 644 645 static void maple_bus_release(struct device *dev) 646 { 647 } 648 649 static struct maple_driver maple_dummy_driver = { 650 .drv = { 651 .name = "maple_dummy_driver", 652 .bus = &maple_bus_type, 653 }, 654 }; 655 656 struct bus_type maple_bus_type = { 657 .name = "maple", 658 .match = match_maple_bus_driver, 659 .uevent = maple_bus_uevent, 660 }; 661 EXPORT_SYMBOL_GPL(maple_bus_type); 662 663 static struct device maple_bus = { 664 .bus_id = "maple", 665 .release = maple_bus_release, 666 }; 667 668 static int __init maple_bus_init(void) 669 { 670 int retval, i; 671 struct maple_device *mdev[MAPLE_PORTS]; 672 ctrl_outl(0, MAPLE_STATE); 673 674 retval = device_register(&maple_bus); 675 if (retval) 676 goto cleanup; 677 678 retval = bus_register(&maple_bus_type); 679 if (retval) 680 goto cleanup_device; 681 682 retval = driver_register(&maple_dummy_driver.drv); 683 if (retval) 684 goto cleanup_bus; 685 686 /* allocate memory for maple bus dma */ 687 retval = maple_get_dma_buffer(); 688 if (retval) { 689 printk(KERN_INFO 690 "Maple bus: Failed to allocate Maple DMA buffers\n"); 691 goto cleanup_basic; 692 } 693 694 /* set up DMA interrupt handler */ 695 retval = maple_set_dma_interrupt_handler(); 696 if (retval) { 697 printk(KERN_INFO 698 "Maple bus: Failed to grab maple DMA IRQ\n"); 699 goto cleanup_dma; 700 } 701 702 /* set up VBLANK interrupt handler */ 703 retval = maple_set_vblank_interrupt_handler(); 704 if (retval) { 705 printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); 706 goto cleanup_irq; 707 } 708 709 maple_queue_cache = 710 kmem_cache_create("maple_queue_cache", 0x400, 0, 711 SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL); 712 713 if (!maple_queue_cache) 714 goto cleanup_bothirqs; 715 716 /* setup maple ports */ 717 for (i = 0; i < MAPLE_PORTS; i++) { 718 checked[i] = false; 719 mdev[i] = maple_alloc_dev(i, 0); 720 baseunits[i] = mdev[i]; 721 if (!mdev[i]) { 722 while (i-- > 0) 723 maple_free_dev(mdev[i]); 724 goto cleanup_cache; 725 } 726 mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; 727 mdev[i]->mq->length = 0; 728 maple_add_packet(mdev[i]->mq); 729 subdevice_map[i] = 0; 730 } 731 732 /* setup maplebus hardware */ 733 maplebus_dma_reset(); 734 /* initial detection */ 735 maple_send(); 736 maple_pnp_time = jiffies; 737 printk(KERN_INFO "Maple bus core now registered.\n"); 738 739 return 0; 740 741 cleanup_cache: 742 kmem_cache_destroy(maple_queue_cache); 743 744 cleanup_bothirqs: 745 free_irq(HW_EVENT_VSYNC, 0); 746 747 cleanup_irq: 748 free_irq(HW_EVENT_MAPLE_DMA, 0); 749 750 cleanup_dma: 751 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); 752 753 cleanup_basic: 754 driver_unregister(&maple_dummy_driver.drv); 755 756 cleanup_bus: 757 bus_unregister(&maple_bus_type); 758 759 cleanup_device: 760 device_unregister(&maple_bus); 761 762 cleanup: 763 printk(KERN_INFO "Maple bus registration failed\n"); 764 return retval; 765 } 766 /* Push init to later to ensure hardware gets detected */ 767 fs_initcall(maple_bus_init); 768