1 /* 2 * Device driver for the Apple Desktop Bus 3 * and the /dev/adb device on macintoshes. 4 * 5 * Copyright (C) 1996 Paul Mackerras. 6 * 7 * Modified to declare controllers as structures, added 8 * client notification of bus reset and handles PowerBook 9 * sleep, by Benjamin Herrenschmidt. 10 * 11 * To do: 12 * 13 * - /sys/bus/adb to list the devices and infos 14 * - more /dev/adb to allow userland to receive the 15 * flow of auto-polling datas from a given device. 16 * - move bus probe to a kernel thread 17 */ 18 19 #include <linux/types.h> 20 #include <linux/errno.h> 21 #include <linux/kernel.h> 22 #include <linux/slab.h> 23 #include <linux/module.h> 24 #include <linux/fs.h> 25 #include <linux/mm.h> 26 #include <linux/sched/signal.h> 27 #include <linux/adb.h> 28 #include <linux/cuda.h> 29 #include <linux/pmu.h> 30 #include <linux/notifier.h> 31 #include <linux/wait.h> 32 #include <linux/init.h> 33 #include <linux/delay.h> 34 #include <linux/spinlock.h> 35 #include <linux/completion.h> 36 #include <linux/device.h> 37 #include <linux/kthread.h> 38 #include <linux/platform_device.h> 39 #include <linux/mutex.h> 40 41 #include <linux/uaccess.h> 42 #ifdef CONFIG_PPC 43 #include <asm/prom.h> 44 #include <asm/machdep.h> 45 #endif 46 47 48 EXPORT_SYMBOL(adb_client_list); 49 50 extern struct adb_driver via_macii_driver; 51 extern struct adb_driver via_cuda_driver; 52 extern struct adb_driver adb_iop_driver; 53 extern struct adb_driver via_pmu_driver; 54 extern struct adb_driver macio_adb_driver; 55 56 static DEFINE_MUTEX(adb_mutex); 57 static struct adb_driver *adb_driver_list[] = { 58 #ifdef CONFIG_ADB_MACII 59 &via_macii_driver, 60 #endif 61 #ifdef CONFIG_ADB_CUDA 62 &via_cuda_driver, 63 #endif 64 #ifdef CONFIG_ADB_IOP 65 &adb_iop_driver, 66 #endif 67 #if defined(CONFIG_ADB_PMU) || defined(CONFIG_ADB_PMU68K) 68 &via_pmu_driver, 69 #endif 70 #ifdef CONFIG_ADB_MACIO 71 &macio_adb_driver, 72 #endif 73 NULL 74 }; 75 76 static struct class *adb_dev_class; 77 78 static struct adb_driver *adb_controller; 79 BLOCKING_NOTIFIER_HEAD(adb_client_list); 80 static int adb_got_sleep; 81 static int adb_inited; 82 static DEFINE_SEMAPHORE(adb_probe_mutex); 83 static int sleepy_trackpad; 84 static int autopoll_devs; 85 int __adb_probe_sync; 86 87 static int adb_scan_bus(void); 88 static int do_adb_reset_bus(void); 89 static void adbdev_init(void); 90 static int try_handler_change(int, int); 91 92 static struct adb_handler { 93 void (*handler)(unsigned char *, int, int); 94 int original_address; 95 int handler_id; 96 int busy; 97 } adb_handler[16]; 98 99 /* 100 * The adb_handler_mutex mutex protects all accesses to the original_address 101 * and handler_id fields of adb_handler[i] for all i, and changes to the 102 * handler field. 103 * Accesses to the handler field are protected by the adb_handler_lock 104 * rwlock. It is held across all calls to any handler, so that by the 105 * time adb_unregister returns, we know that the old handler isn't being 106 * called. 107 */ 108 static DEFINE_MUTEX(adb_handler_mutex); 109 static DEFINE_RWLOCK(adb_handler_lock); 110 111 #if 0 112 static void printADBreply(struct adb_request *req) 113 { 114 int i; 115 116 printk("adb reply (%d)", req->reply_len); 117 for(i = 0; i < req->reply_len; i++) 118 printk(" %x", req->reply[i]); 119 printk("\n"); 120 121 } 122 #endif 123 124 static int adb_scan_bus(void) 125 { 126 int i, highFree=0, noMovement; 127 int devmask = 0; 128 struct adb_request req; 129 130 /* assumes adb_handler[] is all zeroes at this point */ 131 for (i = 1; i < 16; i++) { 132 /* see if there is anything at address i */ 133 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, 134 (i << 4) | 0xf); 135 if (req.reply_len > 1) 136 /* one or more devices at this address */ 137 adb_handler[i].original_address = i; 138 else if (i > highFree) 139 highFree = i; 140 } 141 142 /* Note we reset noMovement to 0 each time we move a device */ 143 for (noMovement = 1; noMovement < 2 && highFree > 0; noMovement++) { 144 for (i = 1; i < 16; i++) { 145 if (adb_handler[i].original_address == 0) 146 continue; 147 /* 148 * Send a "talk register 3" command to address i 149 * to provoke a collision if there is more than 150 * one device at this address. 151 */ 152 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, 153 (i << 4) | 0xf); 154 /* 155 * Move the device(s) which didn't detect a 156 * collision to address `highFree'. Hopefully 157 * this only moves one device. 158 */ 159 adb_request(&req, NULL, ADBREQ_SYNC, 3, 160 (i<< 4) | 0xb, (highFree | 0x60), 0xfe); 161 /* 162 * See if anybody actually moved. This is suggested 163 * by HW TechNote 01: 164 * 165 * http://developer.apple.com/technotes/hw/hw_01.html 166 */ 167 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, 168 (highFree << 4) | 0xf); 169 if (req.reply_len <= 1) continue; 170 /* 171 * Test whether there are any device(s) left 172 * at address i. 173 */ 174 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, 175 (i << 4) | 0xf); 176 if (req.reply_len > 1) { 177 /* 178 * There are still one or more devices 179 * left at address i. Register the one(s) 180 * we moved to `highFree', and find a new 181 * value for highFree. 182 */ 183 adb_handler[highFree].original_address = 184 adb_handler[i].original_address; 185 while (highFree > 0 && 186 adb_handler[highFree].original_address) 187 highFree--; 188 if (highFree <= 0) 189 break; 190 191 noMovement = 0; 192 } else { 193 /* 194 * No devices left at address i; move the 195 * one(s) we moved to `highFree' back to i. 196 */ 197 adb_request(&req, NULL, ADBREQ_SYNC, 3, 198 (highFree << 4) | 0xb, 199 (i | 0x60), 0xfe); 200 } 201 } 202 } 203 204 /* Now fill in the handler_id field of the adb_handler entries. */ 205 printk(KERN_DEBUG "adb devices:"); 206 for (i = 1; i < 16; i++) { 207 if (adb_handler[i].original_address == 0) 208 continue; 209 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, 210 (i << 4) | 0xf); 211 adb_handler[i].handler_id = req.reply[2]; 212 printk(" [%d]: %d %x", i, adb_handler[i].original_address, 213 adb_handler[i].handler_id); 214 devmask |= 1 << i; 215 } 216 printk("\n"); 217 return devmask; 218 } 219 220 /* 221 * This kernel task handles ADB probing. It dies once probing is 222 * completed. 223 */ 224 static int 225 adb_probe_task(void *x) 226 { 227 printk(KERN_INFO "adb: starting probe task...\n"); 228 do_adb_reset_bus(); 229 printk(KERN_INFO "adb: finished probe task...\n"); 230 231 up(&adb_probe_mutex); 232 233 return 0; 234 } 235 236 static void 237 __adb_probe_task(struct work_struct *bullshit) 238 { 239 kthread_run(adb_probe_task, NULL, "kadbprobe"); 240 } 241 242 static DECLARE_WORK(adb_reset_work, __adb_probe_task); 243 244 int 245 adb_reset_bus(void) 246 { 247 if (__adb_probe_sync) { 248 do_adb_reset_bus(); 249 return 0; 250 } 251 252 down(&adb_probe_mutex); 253 schedule_work(&adb_reset_work); 254 return 0; 255 } 256 257 #ifdef CONFIG_PM 258 /* 259 * notify clients before sleep 260 */ 261 static int __adb_suspend(struct platform_device *dev, pm_message_t state) 262 { 263 adb_got_sleep = 1; 264 /* We need to get a lock on the probe thread */ 265 down(&adb_probe_mutex); 266 /* Stop autopoll */ 267 if (adb_controller->autopoll) 268 adb_controller->autopoll(0); 269 blocking_notifier_call_chain(&adb_client_list, ADB_MSG_POWERDOWN, NULL); 270 271 return 0; 272 } 273 274 static int adb_suspend(struct device *dev) 275 { 276 return __adb_suspend(to_platform_device(dev), PMSG_SUSPEND); 277 } 278 279 static int adb_freeze(struct device *dev) 280 { 281 return __adb_suspend(to_platform_device(dev), PMSG_FREEZE); 282 } 283 284 static int adb_poweroff(struct device *dev) 285 { 286 return __adb_suspend(to_platform_device(dev), PMSG_HIBERNATE); 287 } 288 289 /* 290 * reset bus after sleep 291 */ 292 static int __adb_resume(struct platform_device *dev) 293 { 294 adb_got_sleep = 0; 295 up(&adb_probe_mutex); 296 adb_reset_bus(); 297 298 return 0; 299 } 300 301 static int adb_resume(struct device *dev) 302 { 303 return __adb_resume(to_platform_device(dev)); 304 } 305 #endif /* CONFIG_PM */ 306 307 static int __init adb_init(void) 308 { 309 struct adb_driver *driver; 310 int i; 311 312 #ifdef CONFIG_PPC32 313 if (!machine_is(chrp) && !machine_is(powermac)) 314 return 0; 315 #endif 316 #ifdef CONFIG_MAC 317 if (!MACH_IS_MAC) 318 return 0; 319 #endif 320 321 /* xmon may do early-init */ 322 if (adb_inited) 323 return 0; 324 adb_inited = 1; 325 326 adb_controller = NULL; 327 328 i = 0; 329 while ((driver = adb_driver_list[i++]) != NULL) { 330 if (!driver->probe()) { 331 adb_controller = driver; 332 break; 333 } 334 } 335 if (adb_controller != NULL && adb_controller->init && 336 adb_controller->init()) 337 adb_controller = NULL; 338 if (adb_controller == NULL) { 339 printk(KERN_WARNING "Warning: no ADB interface detected\n"); 340 } else { 341 #ifdef CONFIG_PPC 342 if (of_machine_is_compatible("AAPL,PowerBook1998") || 343 of_machine_is_compatible("PowerBook1,1")) 344 sleepy_trackpad = 1; 345 #endif /* CONFIG_PPC */ 346 347 adbdev_init(); 348 adb_reset_bus(); 349 } 350 return 0; 351 } 352 353 device_initcall(adb_init); 354 355 static int 356 do_adb_reset_bus(void) 357 { 358 int ret; 359 360 if (adb_controller == NULL) 361 return -ENXIO; 362 363 if (adb_controller->autopoll) 364 adb_controller->autopoll(0); 365 366 blocking_notifier_call_chain(&adb_client_list, 367 ADB_MSG_PRE_RESET, NULL); 368 369 if (sleepy_trackpad) { 370 /* Let the trackpad settle down */ 371 msleep(500); 372 } 373 374 mutex_lock(&adb_handler_mutex); 375 write_lock_irq(&adb_handler_lock); 376 memset(adb_handler, 0, sizeof(adb_handler)); 377 write_unlock_irq(&adb_handler_lock); 378 379 /* That one is still a bit synchronous, oh well... */ 380 if (adb_controller->reset_bus) 381 ret = adb_controller->reset_bus(); 382 else 383 ret = 0; 384 385 if (sleepy_trackpad) { 386 /* Let the trackpad settle down */ 387 msleep(1500); 388 } 389 390 if (!ret) { 391 autopoll_devs = adb_scan_bus(); 392 if (adb_controller->autopoll) 393 adb_controller->autopoll(autopoll_devs); 394 } 395 mutex_unlock(&adb_handler_mutex); 396 397 blocking_notifier_call_chain(&adb_client_list, 398 ADB_MSG_POST_RESET, NULL); 399 400 return ret; 401 } 402 403 void 404 adb_poll(void) 405 { 406 if ((adb_controller == NULL)||(adb_controller->poll == NULL)) 407 return; 408 adb_controller->poll(); 409 } 410 EXPORT_SYMBOL(adb_poll); 411 412 static void adb_sync_req_done(struct adb_request *req) 413 { 414 struct completion *comp = req->arg; 415 416 complete(comp); 417 } 418 419 int 420 adb_request(struct adb_request *req, void (*done)(struct adb_request *), 421 int flags, int nbytes, ...) 422 { 423 va_list list; 424 int i; 425 int rc; 426 struct completion comp; 427 428 if ((adb_controller == NULL) || (adb_controller->send_request == NULL)) 429 return -ENXIO; 430 if (nbytes < 1) 431 return -EINVAL; 432 433 req->nbytes = nbytes+1; 434 req->done = done; 435 req->reply_expected = flags & ADBREQ_REPLY; 436 req->data[0] = ADB_PACKET; 437 va_start(list, nbytes); 438 for (i = 0; i < nbytes; ++i) 439 req->data[i+1] = va_arg(list, int); 440 va_end(list); 441 442 if (flags & ADBREQ_NOSEND) 443 return 0; 444 445 /* Synchronous requests block using an on-stack completion */ 446 if (flags & ADBREQ_SYNC) { 447 WARN_ON(done); 448 req->done = adb_sync_req_done; 449 req->arg = ∁ 450 init_completion(&comp); 451 } 452 453 rc = adb_controller->send_request(req, 0); 454 455 if ((flags & ADBREQ_SYNC) && !rc && !req->complete) 456 wait_for_completion(&comp); 457 458 return rc; 459 } 460 EXPORT_SYMBOL(adb_request); 461 462 /* Ultimately this should return the number of devices with 463 the given default id. 464 And it does it now ! Note: changed behaviour: This function 465 will now register if default_id _and_ handler_id both match 466 but handler_id can be left to 0 to match with default_id only. 467 When handler_id is set, this function will try to adjust 468 the handler_id id it doesn't match. */ 469 int 470 adb_register(int default_id, int handler_id, struct adb_ids *ids, 471 void (*handler)(unsigned char *, int, int)) 472 { 473 int i; 474 475 mutex_lock(&adb_handler_mutex); 476 ids->nids = 0; 477 for (i = 1; i < 16; i++) { 478 if ((adb_handler[i].original_address == default_id) && 479 (!handler_id || (handler_id == adb_handler[i].handler_id) || 480 try_handler_change(i, handler_id))) { 481 if (adb_handler[i].handler != 0) { 482 printk(KERN_ERR 483 "Two handlers for ADB device %d\n", 484 default_id); 485 continue; 486 } 487 write_lock_irq(&adb_handler_lock); 488 adb_handler[i].handler = handler; 489 write_unlock_irq(&adb_handler_lock); 490 ids->id[ids->nids++] = i; 491 } 492 } 493 mutex_unlock(&adb_handler_mutex); 494 return ids->nids; 495 } 496 EXPORT_SYMBOL(adb_register); 497 498 int 499 adb_unregister(int index) 500 { 501 int ret = -ENODEV; 502 503 mutex_lock(&adb_handler_mutex); 504 write_lock_irq(&adb_handler_lock); 505 if (adb_handler[index].handler) { 506 while(adb_handler[index].busy) { 507 write_unlock_irq(&adb_handler_lock); 508 yield(); 509 write_lock_irq(&adb_handler_lock); 510 } 511 ret = 0; 512 adb_handler[index].handler = NULL; 513 } 514 write_unlock_irq(&adb_handler_lock); 515 mutex_unlock(&adb_handler_mutex); 516 return ret; 517 } 518 EXPORT_SYMBOL(adb_unregister); 519 520 void 521 adb_input(unsigned char *buf, int nb, int autopoll) 522 { 523 int i, id; 524 static int dump_adb_input; 525 unsigned long flags; 526 527 void (*handler)(unsigned char *, int, int); 528 529 /* We skip keystrokes and mouse moves when the sleep process 530 * has been started. We stop autopoll, but this is another security 531 */ 532 if (adb_got_sleep) 533 return; 534 535 id = buf[0] >> 4; 536 if (dump_adb_input) { 537 printk(KERN_INFO "adb packet: "); 538 for (i = 0; i < nb; ++i) 539 printk(" %x", buf[i]); 540 printk(", id = %d\n", id); 541 } 542 write_lock_irqsave(&adb_handler_lock, flags); 543 handler = adb_handler[id].handler; 544 if (handler != NULL) 545 adb_handler[id].busy = 1; 546 write_unlock_irqrestore(&adb_handler_lock, flags); 547 if (handler != NULL) { 548 (*handler)(buf, nb, autopoll); 549 wmb(); 550 adb_handler[id].busy = 0; 551 } 552 553 } 554 555 /* Try to change handler to new_id. Will return 1 if successful. */ 556 static int try_handler_change(int address, int new_id) 557 { 558 struct adb_request req; 559 560 if (adb_handler[address].handler_id == new_id) 561 return 1; 562 adb_request(&req, NULL, ADBREQ_SYNC, 3, 563 ADB_WRITEREG(address, 3), address | 0x20, new_id); 564 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, 565 ADB_READREG(address, 3)); 566 if (req.reply_len < 2) 567 return 0; 568 if (req.reply[2] != new_id) 569 return 0; 570 adb_handler[address].handler_id = req.reply[2]; 571 572 return 1; 573 } 574 575 int 576 adb_try_handler_change(int address, int new_id) 577 { 578 int ret; 579 580 mutex_lock(&adb_handler_mutex); 581 ret = try_handler_change(address, new_id); 582 mutex_unlock(&adb_handler_mutex); 583 return ret; 584 } 585 EXPORT_SYMBOL(adb_try_handler_change); 586 587 int 588 adb_get_infos(int address, int *original_address, int *handler_id) 589 { 590 mutex_lock(&adb_handler_mutex); 591 *original_address = adb_handler[address].original_address; 592 *handler_id = adb_handler[address].handler_id; 593 mutex_unlock(&adb_handler_mutex); 594 595 return (*original_address != 0); 596 } 597 598 599 /* 600 * /dev/adb device driver. 601 */ 602 603 #define ADB_MAJOR 56 /* major number for /dev/adb */ 604 605 struct adbdev_state { 606 spinlock_t lock; 607 atomic_t n_pending; 608 struct adb_request *completed; 609 wait_queue_head_t wait_queue; 610 int inuse; 611 }; 612 613 static void adb_write_done(struct adb_request *req) 614 { 615 struct adbdev_state *state = (struct adbdev_state *) req->arg; 616 unsigned long flags; 617 618 if (!req->complete) { 619 req->reply_len = 0; 620 req->complete = 1; 621 } 622 spin_lock_irqsave(&state->lock, flags); 623 atomic_dec(&state->n_pending); 624 if (!state->inuse) { 625 kfree(req); 626 if (atomic_read(&state->n_pending) == 0) { 627 spin_unlock_irqrestore(&state->lock, flags); 628 kfree(state); 629 return; 630 } 631 } else { 632 struct adb_request **ap = &state->completed; 633 while (*ap != NULL) 634 ap = &(*ap)->next; 635 req->next = NULL; 636 *ap = req; 637 wake_up_interruptible(&state->wait_queue); 638 } 639 spin_unlock_irqrestore(&state->lock, flags); 640 } 641 642 static int 643 do_adb_query(struct adb_request *req) 644 { 645 int ret = -EINVAL; 646 647 switch(req->data[1]) { 648 case ADB_QUERY_GETDEVINFO: 649 if (req->nbytes < 3) 650 break; 651 mutex_lock(&adb_handler_mutex); 652 req->reply[0] = adb_handler[req->data[2]].original_address; 653 req->reply[1] = adb_handler[req->data[2]].handler_id; 654 mutex_unlock(&adb_handler_mutex); 655 req->complete = 1; 656 req->reply_len = 2; 657 adb_write_done(req); 658 ret = 0; 659 break; 660 } 661 return ret; 662 } 663 664 static int adb_open(struct inode *inode, struct file *file) 665 { 666 struct adbdev_state *state; 667 int ret = 0; 668 669 mutex_lock(&adb_mutex); 670 if (iminor(inode) > 0 || adb_controller == NULL) { 671 ret = -ENXIO; 672 goto out; 673 } 674 state = kmalloc(sizeof(struct adbdev_state), GFP_KERNEL); 675 if (state == 0) { 676 ret = -ENOMEM; 677 goto out; 678 } 679 file->private_data = state; 680 spin_lock_init(&state->lock); 681 atomic_set(&state->n_pending, 0); 682 state->completed = NULL; 683 init_waitqueue_head(&state->wait_queue); 684 state->inuse = 1; 685 686 out: 687 mutex_unlock(&adb_mutex); 688 return ret; 689 } 690 691 static int adb_release(struct inode *inode, struct file *file) 692 { 693 struct adbdev_state *state = file->private_data; 694 unsigned long flags; 695 696 mutex_lock(&adb_mutex); 697 if (state) { 698 file->private_data = NULL; 699 spin_lock_irqsave(&state->lock, flags); 700 if (atomic_read(&state->n_pending) == 0 701 && state->completed == NULL) { 702 spin_unlock_irqrestore(&state->lock, flags); 703 kfree(state); 704 } else { 705 state->inuse = 0; 706 spin_unlock_irqrestore(&state->lock, flags); 707 } 708 } 709 mutex_unlock(&adb_mutex); 710 return 0; 711 } 712 713 static ssize_t adb_read(struct file *file, char __user *buf, 714 size_t count, loff_t *ppos) 715 { 716 int ret = 0; 717 struct adbdev_state *state = file->private_data; 718 struct adb_request *req; 719 DECLARE_WAITQUEUE(wait, current); 720 unsigned long flags; 721 722 if (count < 2) 723 return -EINVAL; 724 if (count > sizeof(req->reply)) 725 count = sizeof(req->reply); 726 727 req = NULL; 728 spin_lock_irqsave(&state->lock, flags); 729 add_wait_queue(&state->wait_queue, &wait); 730 set_current_state(TASK_INTERRUPTIBLE); 731 732 for (;;) { 733 req = state->completed; 734 if (req != NULL) 735 state->completed = req->next; 736 else if (atomic_read(&state->n_pending) == 0) 737 ret = -EIO; 738 if (req != NULL || ret != 0) 739 break; 740 741 if (file->f_flags & O_NONBLOCK) { 742 ret = -EAGAIN; 743 break; 744 } 745 if (signal_pending(current)) { 746 ret = -ERESTARTSYS; 747 break; 748 } 749 spin_unlock_irqrestore(&state->lock, flags); 750 schedule(); 751 spin_lock_irqsave(&state->lock, flags); 752 } 753 754 set_current_state(TASK_RUNNING); 755 remove_wait_queue(&state->wait_queue, &wait); 756 spin_unlock_irqrestore(&state->lock, flags); 757 758 if (ret) 759 return ret; 760 761 ret = req->reply_len; 762 if (ret > count) 763 ret = count; 764 if (ret > 0 && copy_to_user(buf, req->reply, ret)) 765 ret = -EFAULT; 766 767 kfree(req); 768 return ret; 769 } 770 771 static ssize_t adb_write(struct file *file, const char __user *buf, 772 size_t count, loff_t *ppos) 773 { 774 int ret/*, i*/; 775 struct adbdev_state *state = file->private_data; 776 struct adb_request *req; 777 778 if (count < 2 || count > sizeof(req->data)) 779 return -EINVAL; 780 if (adb_controller == NULL) 781 return -ENXIO; 782 783 req = kmalloc(sizeof(struct adb_request), 784 GFP_KERNEL); 785 if (req == NULL) 786 return -ENOMEM; 787 788 req->nbytes = count; 789 req->done = adb_write_done; 790 req->arg = (void *) state; 791 req->complete = 0; 792 793 ret = -EFAULT; 794 if (copy_from_user(req->data, buf, count)) 795 goto out; 796 797 atomic_inc(&state->n_pending); 798 799 /* If a probe is in progress or we are sleeping, wait for it to complete */ 800 down(&adb_probe_mutex); 801 802 /* Queries are special requests sent to the ADB driver itself */ 803 if (req->data[0] == ADB_QUERY) { 804 if (count > 1) 805 ret = do_adb_query(req); 806 else 807 ret = -EINVAL; 808 up(&adb_probe_mutex); 809 } 810 /* Special case for ADB_BUSRESET request, all others are sent to 811 the controller */ 812 else if ((req->data[0] == ADB_PACKET) && (count > 1) 813 && (req->data[1] == ADB_BUSRESET)) { 814 ret = do_adb_reset_bus(); 815 up(&adb_probe_mutex); 816 atomic_dec(&state->n_pending); 817 if (ret == 0) 818 ret = count; 819 goto out; 820 } else { 821 req->reply_expected = ((req->data[1] & 0xc) == 0xc); 822 if (adb_controller && adb_controller->send_request) 823 ret = adb_controller->send_request(req, 0); 824 else 825 ret = -ENXIO; 826 up(&adb_probe_mutex); 827 } 828 829 if (ret != 0) { 830 atomic_dec(&state->n_pending); 831 goto out; 832 } 833 return count; 834 835 out: 836 kfree(req); 837 return ret; 838 } 839 840 static const struct file_operations adb_fops = { 841 .owner = THIS_MODULE, 842 .llseek = no_llseek, 843 .read = adb_read, 844 .write = adb_write, 845 .open = adb_open, 846 .release = adb_release, 847 }; 848 849 #ifdef CONFIG_PM 850 static const struct dev_pm_ops adb_dev_pm_ops = { 851 .suspend = adb_suspend, 852 .resume = adb_resume, 853 /* Hibernate hooks */ 854 .freeze = adb_freeze, 855 .thaw = adb_resume, 856 .poweroff = adb_poweroff, 857 .restore = adb_resume, 858 }; 859 #endif 860 861 static struct platform_driver adb_pfdrv = { 862 .driver = { 863 .name = "adb", 864 #ifdef CONFIG_PM 865 .pm = &adb_dev_pm_ops, 866 #endif 867 }, 868 }; 869 870 static struct platform_device adb_pfdev = { 871 .name = "adb", 872 }; 873 874 static int __init 875 adb_dummy_probe(struct platform_device *dev) 876 { 877 if (dev == &adb_pfdev) 878 return 0; 879 return -ENODEV; 880 } 881 882 static void __init 883 adbdev_init(void) 884 { 885 if (register_chrdev(ADB_MAJOR, "adb", &adb_fops)) { 886 printk(KERN_ERR "adb: unable to get major %d\n", ADB_MAJOR); 887 return; 888 } 889 890 adb_dev_class = class_create(THIS_MODULE, "adb"); 891 if (IS_ERR(adb_dev_class)) 892 return; 893 device_create(adb_dev_class, NULL, MKDEV(ADB_MAJOR, 0), NULL, "adb"); 894 895 platform_device_register(&adb_pfdev); 896 platform_driver_probe(&adb_pfdrv, adb_dummy_probe); 897 } 898