1 /* 2 * The Serio abstraction module 3 * 4 * Copyright (c) 1999-2004 Vojtech Pavlik 5 * Copyright (c) 2004 Dmitry Torokhov 6 * Copyright (c) 2003 Daniele Bellucci 7 */ 8 9 /* 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 * Should you need to contact me, the author, you can do so either by 25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: 26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/stddef.h> 32 #include <linux/module.h> 33 #include <linux/serio.h> 34 #include <linux/errno.h> 35 #include <linux/sched.h> 36 #include <linux/slab.h> 37 #include <linux/workqueue.h> 38 #include <linux/mutex.h> 39 40 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 41 MODULE_DESCRIPTION("Serio abstraction core"); 42 MODULE_LICENSE("GPL"); 43 44 /* 45 * serio_mutex protects entire serio subsystem and is taken every time 46 * serio port or driver registered or unregistered. 47 */ 48 static DEFINE_MUTEX(serio_mutex); 49 50 static LIST_HEAD(serio_list); 51 52 static void serio_add_port(struct serio *serio); 53 static int serio_reconnect_port(struct serio *serio); 54 static void serio_disconnect_port(struct serio *serio); 55 static void serio_reconnect_subtree(struct serio *serio); 56 static void serio_attach_driver(struct serio_driver *drv); 57 58 static int serio_connect_driver(struct serio *serio, struct serio_driver *drv) 59 { 60 int retval; 61 62 mutex_lock(&serio->drv_mutex); 63 retval = drv->connect(serio, drv); 64 mutex_unlock(&serio->drv_mutex); 65 66 return retval; 67 } 68 69 static int serio_reconnect_driver(struct serio *serio) 70 { 71 int retval = -1; 72 73 mutex_lock(&serio->drv_mutex); 74 if (serio->drv && serio->drv->reconnect) 75 retval = serio->drv->reconnect(serio); 76 mutex_unlock(&serio->drv_mutex); 77 78 return retval; 79 } 80 81 static void serio_disconnect_driver(struct serio *serio) 82 { 83 mutex_lock(&serio->drv_mutex); 84 if (serio->drv) 85 serio->drv->disconnect(serio); 86 mutex_unlock(&serio->drv_mutex); 87 } 88 89 static int serio_match_port(const struct serio_device_id *ids, struct serio *serio) 90 { 91 while (ids->type || ids->proto) { 92 if ((ids->type == SERIO_ANY || ids->type == serio->id.type) && 93 (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) && 94 (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) && 95 (ids->id == SERIO_ANY || ids->id == serio->id.id)) 96 return 1; 97 ids++; 98 } 99 return 0; 100 } 101 102 /* 103 * Basic serio -> driver core mappings 104 */ 105 106 static int serio_bind_driver(struct serio *serio, struct serio_driver *drv) 107 { 108 int error; 109 110 if (serio_match_port(drv->id_table, serio)) { 111 112 serio->dev.driver = &drv->driver; 113 if (serio_connect_driver(serio, drv)) { 114 serio->dev.driver = NULL; 115 return -ENODEV; 116 } 117 118 error = device_bind_driver(&serio->dev); 119 if (error) { 120 dev_warn(&serio->dev, 121 "device_bind_driver() failed for %s (%s) and %s, error: %d\n", 122 serio->phys, serio->name, 123 drv->description, error); 124 serio_disconnect_driver(serio); 125 serio->dev.driver = NULL; 126 return error; 127 } 128 } 129 return 0; 130 } 131 132 static void serio_find_driver(struct serio *serio) 133 { 134 int error; 135 136 error = device_attach(&serio->dev); 137 if (error < 0) 138 dev_warn(&serio->dev, 139 "device_attach() failed for %s (%s), error: %d\n", 140 serio->phys, serio->name, error); 141 } 142 143 144 /* 145 * Serio event processing. 146 */ 147 148 enum serio_event_type { 149 SERIO_RESCAN_PORT, 150 SERIO_RECONNECT_PORT, 151 SERIO_RECONNECT_SUBTREE, 152 SERIO_REGISTER_PORT, 153 SERIO_ATTACH_DRIVER, 154 }; 155 156 struct serio_event { 157 enum serio_event_type type; 158 void *object; 159 struct module *owner; 160 struct list_head node; 161 }; 162 163 static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */ 164 static LIST_HEAD(serio_event_list); 165 166 static struct serio_event *serio_get_event(void) 167 { 168 struct serio_event *event = NULL; 169 unsigned long flags; 170 171 spin_lock_irqsave(&serio_event_lock, flags); 172 173 if (!list_empty(&serio_event_list)) { 174 event = list_first_entry(&serio_event_list, 175 struct serio_event, node); 176 list_del_init(&event->node); 177 } 178 179 spin_unlock_irqrestore(&serio_event_lock, flags); 180 return event; 181 } 182 183 static void serio_free_event(struct serio_event *event) 184 { 185 module_put(event->owner); 186 kfree(event); 187 } 188 189 static void serio_remove_duplicate_events(void *object, 190 enum serio_event_type type) 191 { 192 struct serio_event *e, *next; 193 unsigned long flags; 194 195 spin_lock_irqsave(&serio_event_lock, flags); 196 197 list_for_each_entry_safe(e, next, &serio_event_list, node) { 198 if (object == e->object) { 199 /* 200 * If this event is of different type we should not 201 * look further - we only suppress duplicate events 202 * that were sent back-to-back. 203 */ 204 if (type != e->type) 205 break; 206 207 list_del_init(&e->node); 208 serio_free_event(e); 209 } 210 } 211 212 spin_unlock_irqrestore(&serio_event_lock, flags); 213 } 214 215 static void serio_handle_event(struct work_struct *work) 216 { 217 struct serio_event *event; 218 219 mutex_lock(&serio_mutex); 220 221 while ((event = serio_get_event())) { 222 223 switch (event->type) { 224 225 case SERIO_REGISTER_PORT: 226 serio_add_port(event->object); 227 break; 228 229 case SERIO_RECONNECT_PORT: 230 serio_reconnect_port(event->object); 231 break; 232 233 case SERIO_RESCAN_PORT: 234 serio_disconnect_port(event->object); 235 serio_find_driver(event->object); 236 break; 237 238 case SERIO_RECONNECT_SUBTREE: 239 serio_reconnect_subtree(event->object); 240 break; 241 242 case SERIO_ATTACH_DRIVER: 243 serio_attach_driver(event->object); 244 break; 245 } 246 247 serio_remove_duplicate_events(event->object, event->type); 248 serio_free_event(event); 249 } 250 251 mutex_unlock(&serio_mutex); 252 } 253 254 static DECLARE_WORK(serio_event_work, serio_handle_event); 255 256 static int serio_queue_event(void *object, struct module *owner, 257 enum serio_event_type event_type) 258 { 259 unsigned long flags; 260 struct serio_event *event; 261 int retval = 0; 262 263 spin_lock_irqsave(&serio_event_lock, flags); 264 265 /* 266 * Scan event list for the other events for the same serio port, 267 * starting with the most recent one. If event is the same we 268 * do not need add new one. If event is of different type we 269 * need to add this event and should not look further because 270 * we need to preseve sequence of distinct events. 271 */ 272 list_for_each_entry_reverse(event, &serio_event_list, node) { 273 if (event->object == object) { 274 if (event->type == event_type) 275 goto out; 276 break; 277 } 278 } 279 280 event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC); 281 if (!event) { 282 pr_err("Not enough memory to queue event %d\n", event_type); 283 retval = -ENOMEM; 284 goto out; 285 } 286 287 if (!try_module_get(owner)) { 288 pr_warning("Can't get module reference, dropping event %d\n", 289 event_type); 290 kfree(event); 291 retval = -EINVAL; 292 goto out; 293 } 294 295 event->type = event_type; 296 event->object = object; 297 event->owner = owner; 298 299 list_add_tail(&event->node, &serio_event_list); 300 queue_work(system_long_wq, &serio_event_work); 301 302 out: 303 spin_unlock_irqrestore(&serio_event_lock, flags); 304 return retval; 305 } 306 307 /* 308 * Remove all events that have been submitted for a given 309 * object, be it serio port or driver. 310 */ 311 static void serio_remove_pending_events(void *object) 312 { 313 struct serio_event *event, *next; 314 unsigned long flags; 315 316 spin_lock_irqsave(&serio_event_lock, flags); 317 318 list_for_each_entry_safe(event, next, &serio_event_list, node) { 319 if (event->object == object) { 320 list_del_init(&event->node); 321 serio_free_event(event); 322 } 323 } 324 325 spin_unlock_irqrestore(&serio_event_lock, flags); 326 } 327 328 /* 329 * Locate child serio port (if any) that has not been fully registered yet. 330 * 331 * Children are registered by driver's connect() handler so there can't be a 332 * grandchild pending registration together with a child. 333 */ 334 static struct serio *serio_get_pending_child(struct serio *parent) 335 { 336 struct serio_event *event; 337 struct serio *serio, *child = NULL; 338 unsigned long flags; 339 340 spin_lock_irqsave(&serio_event_lock, flags); 341 342 list_for_each_entry(event, &serio_event_list, node) { 343 if (event->type == SERIO_REGISTER_PORT) { 344 serio = event->object; 345 if (serio->parent == parent) { 346 child = serio; 347 break; 348 } 349 } 350 } 351 352 spin_unlock_irqrestore(&serio_event_lock, flags); 353 return child; 354 } 355 356 /* 357 * Serio port operations 358 */ 359 360 static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf) 361 { 362 struct serio *serio = to_serio_port(dev); 363 return sprintf(buf, "%s\n", serio->name); 364 } 365 366 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) 367 { 368 struct serio *serio = to_serio_port(dev); 369 370 return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n", 371 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 372 } 373 374 static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) 375 { 376 struct serio *serio = to_serio_port(dev); 377 return sprintf(buf, "%02x\n", serio->id.type); 378 } 379 380 static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf) 381 { 382 struct serio *serio = to_serio_port(dev); 383 return sprintf(buf, "%02x\n", serio->id.proto); 384 } 385 386 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) 387 { 388 struct serio *serio = to_serio_port(dev); 389 return sprintf(buf, "%02x\n", serio->id.id); 390 } 391 392 static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf) 393 { 394 struct serio *serio = to_serio_port(dev); 395 return sprintf(buf, "%02x\n", serio->id.extra); 396 } 397 398 static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 399 { 400 struct serio *serio = to_serio_port(dev); 401 struct device_driver *drv; 402 int error; 403 404 error = mutex_lock_interruptible(&serio_mutex); 405 if (error) 406 return error; 407 408 if (!strncmp(buf, "none", count)) { 409 serio_disconnect_port(serio); 410 } else if (!strncmp(buf, "reconnect", count)) { 411 serio_reconnect_subtree(serio); 412 } else if (!strncmp(buf, "rescan", count)) { 413 serio_disconnect_port(serio); 414 serio_find_driver(serio); 415 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); 416 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { 417 serio_disconnect_port(serio); 418 error = serio_bind_driver(serio, to_serio_driver(drv)); 419 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); 420 } else { 421 error = -EINVAL; 422 } 423 424 mutex_unlock(&serio_mutex); 425 426 return error ? error : count; 427 } 428 429 static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf) 430 { 431 struct serio *serio = to_serio_port(dev); 432 return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto"); 433 } 434 435 static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 436 { 437 struct serio *serio = to_serio_port(dev); 438 int retval; 439 440 retval = count; 441 if (!strncmp(buf, "manual", count)) { 442 serio->manual_bind = true; 443 } else if (!strncmp(buf, "auto", count)) { 444 serio->manual_bind = false; 445 } else { 446 retval = -EINVAL; 447 } 448 449 return retval; 450 } 451 452 static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf) 453 { 454 struct serio *serio = to_serio_port(dev); 455 456 return sprintf(buf, "%s\n", serio->firmware_id); 457 } 458 459 static DEVICE_ATTR_RO(type); 460 static DEVICE_ATTR_RO(proto); 461 static DEVICE_ATTR_RO(id); 462 static DEVICE_ATTR_RO(extra); 463 464 static struct attribute *serio_device_id_attrs[] = { 465 &dev_attr_type.attr, 466 &dev_attr_proto.attr, 467 &dev_attr_id.attr, 468 &dev_attr_extra.attr, 469 NULL 470 }; 471 472 static struct attribute_group serio_id_attr_group = { 473 .name = "id", 474 .attrs = serio_device_id_attrs, 475 }; 476 477 static DEVICE_ATTR_RO(modalias); 478 static DEVICE_ATTR_WO(drvctl); 479 static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); 480 static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); 481 static DEVICE_ATTR_RO(firmware_id); 482 483 static struct attribute *serio_device_attrs[] = { 484 &dev_attr_modalias.attr, 485 &dev_attr_description.attr, 486 &dev_attr_drvctl.attr, 487 &dev_attr_bind_mode.attr, 488 &dev_attr_firmware_id.attr, 489 NULL 490 }; 491 492 static struct attribute_group serio_device_attr_group = { 493 .attrs = serio_device_attrs, 494 }; 495 496 static const struct attribute_group *serio_device_attr_groups[] = { 497 &serio_id_attr_group, 498 &serio_device_attr_group, 499 NULL 500 }; 501 502 static void serio_release_port(struct device *dev) 503 { 504 struct serio *serio = to_serio_port(dev); 505 506 kfree(serio); 507 module_put(THIS_MODULE); 508 } 509 510 /* 511 * Prepare serio port for registration. 512 */ 513 static void serio_init_port(struct serio *serio) 514 { 515 static atomic_t serio_no = ATOMIC_INIT(-1); 516 517 __module_get(THIS_MODULE); 518 519 INIT_LIST_HEAD(&serio->node); 520 INIT_LIST_HEAD(&serio->child_node); 521 INIT_LIST_HEAD(&serio->children); 522 spin_lock_init(&serio->lock); 523 mutex_init(&serio->drv_mutex); 524 device_initialize(&serio->dev); 525 dev_set_name(&serio->dev, "serio%lu", 526 (unsigned long)atomic_inc_return(&serio_no)); 527 serio->dev.bus = &serio_bus; 528 serio->dev.release = serio_release_port; 529 serio->dev.groups = serio_device_attr_groups; 530 if (serio->parent) { 531 serio->dev.parent = &serio->parent->dev; 532 serio->depth = serio->parent->depth + 1; 533 } else 534 serio->depth = 0; 535 lockdep_set_subclass(&serio->lock, serio->depth); 536 } 537 538 /* 539 * Complete serio port registration. 540 * Driver core will attempt to find appropriate driver for the port. 541 */ 542 static void serio_add_port(struct serio *serio) 543 { 544 struct serio *parent = serio->parent; 545 int error; 546 547 if (parent) { 548 serio_pause_rx(parent); 549 list_add_tail(&serio->child_node, &parent->children); 550 serio_continue_rx(parent); 551 } 552 553 list_add_tail(&serio->node, &serio_list); 554 555 if (serio->start) 556 serio->start(serio); 557 558 error = device_add(&serio->dev); 559 if (error) 560 dev_err(&serio->dev, 561 "device_add() failed for %s (%s), error: %d\n", 562 serio->phys, serio->name, error); 563 } 564 565 /* 566 * serio_destroy_port() completes unregistration process and removes 567 * port from the system 568 */ 569 static void serio_destroy_port(struct serio *serio) 570 { 571 struct serio *child; 572 573 while ((child = serio_get_pending_child(serio)) != NULL) { 574 serio_remove_pending_events(child); 575 put_device(&child->dev); 576 } 577 578 if (serio->stop) 579 serio->stop(serio); 580 581 if (serio->parent) { 582 serio_pause_rx(serio->parent); 583 list_del_init(&serio->child_node); 584 serio_continue_rx(serio->parent); 585 serio->parent = NULL; 586 } 587 588 if (device_is_registered(&serio->dev)) 589 device_del(&serio->dev); 590 591 list_del_init(&serio->node); 592 serio_remove_pending_events(serio); 593 put_device(&serio->dev); 594 } 595 596 /* 597 * Reconnect serio port (re-initialize attached device). 598 * If reconnect fails (old device is no longer attached or 599 * there was no device to begin with) we do full rescan in 600 * hope of finding a driver for the port. 601 */ 602 static int serio_reconnect_port(struct serio *serio) 603 { 604 int error = serio_reconnect_driver(serio); 605 606 if (error) { 607 serio_disconnect_port(serio); 608 serio_find_driver(serio); 609 } 610 611 return error; 612 } 613 614 /* 615 * Reconnect serio port and all its children (re-initialize attached 616 * devices). 617 */ 618 static void serio_reconnect_subtree(struct serio *root) 619 { 620 struct serio *s = root; 621 int error; 622 623 do { 624 error = serio_reconnect_port(s); 625 if (!error) { 626 /* 627 * Reconnect was successful, move on to do the 628 * first child. 629 */ 630 if (!list_empty(&s->children)) { 631 s = list_first_entry(&s->children, 632 struct serio, child_node); 633 continue; 634 } 635 } 636 637 /* 638 * Either it was a leaf node or reconnect failed and it 639 * became a leaf node. Continue reconnecting starting with 640 * the next sibling of the parent node. 641 */ 642 while (s != root) { 643 struct serio *parent = s->parent; 644 645 if (!list_is_last(&s->child_node, &parent->children)) { 646 s = list_entry(s->child_node.next, 647 struct serio, child_node); 648 break; 649 } 650 651 s = parent; 652 } 653 } while (s != root); 654 } 655 656 /* 657 * serio_disconnect_port() unbinds a port from its driver. As a side effect 658 * all children ports are unbound and destroyed. 659 */ 660 static void serio_disconnect_port(struct serio *serio) 661 { 662 struct serio *s = serio; 663 664 /* 665 * Children ports should be disconnected and destroyed 666 * first; we travel the tree in depth-first order. 667 */ 668 while (!list_empty(&serio->children)) { 669 670 /* Locate a leaf */ 671 while (!list_empty(&s->children)) 672 s = list_first_entry(&s->children, 673 struct serio, child_node); 674 675 /* 676 * Prune this leaf node unless it is the one we 677 * started with. 678 */ 679 if (s != serio) { 680 struct serio *parent = s->parent; 681 682 device_release_driver(&s->dev); 683 serio_destroy_port(s); 684 685 s = parent; 686 } 687 } 688 689 /* 690 * OK, no children left, now disconnect this port. 691 */ 692 device_release_driver(&serio->dev); 693 } 694 695 void serio_rescan(struct serio *serio) 696 { 697 serio_queue_event(serio, NULL, SERIO_RESCAN_PORT); 698 } 699 EXPORT_SYMBOL(serio_rescan); 700 701 void serio_reconnect(struct serio *serio) 702 { 703 serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE); 704 } 705 EXPORT_SYMBOL(serio_reconnect); 706 707 /* 708 * Submits register request to kseriod for subsequent execution. 709 * Note that port registration is always asynchronous. 710 */ 711 void __serio_register_port(struct serio *serio, struct module *owner) 712 { 713 serio_init_port(serio); 714 serio_queue_event(serio, owner, SERIO_REGISTER_PORT); 715 } 716 EXPORT_SYMBOL(__serio_register_port); 717 718 /* 719 * Synchronously unregisters serio port. 720 */ 721 void serio_unregister_port(struct serio *serio) 722 { 723 mutex_lock(&serio_mutex); 724 serio_disconnect_port(serio); 725 serio_destroy_port(serio); 726 mutex_unlock(&serio_mutex); 727 } 728 EXPORT_SYMBOL(serio_unregister_port); 729 730 /* 731 * Safely unregisters children ports if they are present. 732 */ 733 void serio_unregister_child_port(struct serio *serio) 734 { 735 struct serio *s, *next; 736 737 mutex_lock(&serio_mutex); 738 list_for_each_entry_safe(s, next, &serio->children, child_node) { 739 serio_disconnect_port(s); 740 serio_destroy_port(s); 741 } 742 mutex_unlock(&serio_mutex); 743 } 744 EXPORT_SYMBOL(serio_unregister_child_port); 745 746 747 /* 748 * Serio driver operations 749 */ 750 751 static ssize_t description_show(struct device_driver *drv, char *buf) 752 { 753 struct serio_driver *driver = to_serio_driver(drv); 754 return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)"); 755 } 756 static DRIVER_ATTR_RO(description); 757 758 static ssize_t bind_mode_show(struct device_driver *drv, char *buf) 759 { 760 struct serio_driver *serio_drv = to_serio_driver(drv); 761 return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto"); 762 } 763 764 static ssize_t bind_mode_store(struct device_driver *drv, const char *buf, size_t count) 765 { 766 struct serio_driver *serio_drv = to_serio_driver(drv); 767 int retval; 768 769 retval = count; 770 if (!strncmp(buf, "manual", count)) { 771 serio_drv->manual_bind = true; 772 } else if (!strncmp(buf, "auto", count)) { 773 serio_drv->manual_bind = false; 774 } else { 775 retval = -EINVAL; 776 } 777 778 return retval; 779 } 780 static DRIVER_ATTR_RW(bind_mode); 781 782 static struct attribute *serio_driver_attrs[] = { 783 &driver_attr_description.attr, 784 &driver_attr_bind_mode.attr, 785 NULL, 786 }; 787 ATTRIBUTE_GROUPS(serio_driver); 788 789 static int serio_driver_probe(struct device *dev) 790 { 791 struct serio *serio = to_serio_port(dev); 792 struct serio_driver *drv = to_serio_driver(dev->driver); 793 794 return serio_connect_driver(serio, drv); 795 } 796 797 static int serio_driver_remove(struct device *dev) 798 { 799 struct serio *serio = to_serio_port(dev); 800 801 serio_disconnect_driver(serio); 802 return 0; 803 } 804 805 static void serio_cleanup(struct serio *serio) 806 { 807 mutex_lock(&serio->drv_mutex); 808 if (serio->drv && serio->drv->cleanup) 809 serio->drv->cleanup(serio); 810 mutex_unlock(&serio->drv_mutex); 811 } 812 813 static void serio_shutdown(struct device *dev) 814 { 815 struct serio *serio = to_serio_port(dev); 816 817 serio_cleanup(serio); 818 } 819 820 static void serio_attach_driver(struct serio_driver *drv) 821 { 822 int error; 823 824 error = driver_attach(&drv->driver); 825 if (error) 826 pr_warning("driver_attach() failed for %s with error %d\n", 827 drv->driver.name, error); 828 } 829 830 int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name) 831 { 832 bool manual_bind = drv->manual_bind; 833 int error; 834 835 drv->driver.bus = &serio_bus; 836 drv->driver.owner = owner; 837 drv->driver.mod_name = mod_name; 838 839 /* 840 * Temporarily disable automatic binding because probing 841 * takes long time and we are better off doing it in kseriod 842 */ 843 drv->manual_bind = true; 844 845 error = driver_register(&drv->driver); 846 if (error) { 847 pr_err("driver_register() failed for %s, error: %d\n", 848 drv->driver.name, error); 849 return error; 850 } 851 852 /* 853 * Restore original bind mode and let kseriod bind the 854 * driver to free ports 855 */ 856 if (!manual_bind) { 857 drv->manual_bind = false; 858 error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER); 859 if (error) { 860 driver_unregister(&drv->driver); 861 return error; 862 } 863 } 864 865 return 0; 866 } 867 EXPORT_SYMBOL(__serio_register_driver); 868 869 void serio_unregister_driver(struct serio_driver *drv) 870 { 871 struct serio *serio; 872 873 mutex_lock(&serio_mutex); 874 875 drv->manual_bind = true; /* so serio_find_driver ignores it */ 876 serio_remove_pending_events(drv); 877 878 start_over: 879 list_for_each_entry(serio, &serio_list, node) { 880 if (serio->drv == drv) { 881 serio_disconnect_port(serio); 882 serio_find_driver(serio); 883 /* we could've deleted some ports, restart */ 884 goto start_over; 885 } 886 } 887 888 driver_unregister(&drv->driver); 889 mutex_unlock(&serio_mutex); 890 } 891 EXPORT_SYMBOL(serio_unregister_driver); 892 893 static void serio_set_drv(struct serio *serio, struct serio_driver *drv) 894 { 895 serio_pause_rx(serio); 896 serio->drv = drv; 897 serio_continue_rx(serio); 898 } 899 900 static int serio_bus_match(struct device *dev, struct device_driver *drv) 901 { 902 struct serio *serio = to_serio_port(dev); 903 struct serio_driver *serio_drv = to_serio_driver(drv); 904 905 if (serio->manual_bind || serio_drv->manual_bind) 906 return 0; 907 908 return serio_match_port(serio_drv->id_table, serio); 909 } 910 911 #define SERIO_ADD_UEVENT_VAR(fmt, val...) \ 912 do { \ 913 int err = add_uevent_var(env, fmt, val); \ 914 if (err) \ 915 return err; \ 916 } while (0) 917 918 static int serio_uevent(struct device *dev, struct kobj_uevent_env *env) 919 { 920 struct serio *serio; 921 922 if (!dev) 923 return -ENODEV; 924 925 serio = to_serio_port(dev); 926 927 SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type); 928 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto); 929 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id); 930 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra); 931 932 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X", 933 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 934 935 if (serio->firmware_id[0]) 936 SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s", 937 serio->firmware_id); 938 939 return 0; 940 } 941 #undef SERIO_ADD_UEVENT_VAR 942 943 #ifdef CONFIG_PM 944 static int serio_suspend(struct device *dev) 945 { 946 struct serio *serio = to_serio_port(dev); 947 948 serio_cleanup(serio); 949 950 return 0; 951 } 952 953 static int serio_resume(struct device *dev) 954 { 955 struct serio *serio = to_serio_port(dev); 956 957 /* 958 * Driver reconnect can take a while, so better let kseriod 959 * deal with it. 960 */ 961 serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT); 962 963 return 0; 964 } 965 966 static const struct dev_pm_ops serio_pm_ops = { 967 .suspend = serio_suspend, 968 .resume = serio_resume, 969 .poweroff = serio_suspend, 970 .restore = serio_resume, 971 }; 972 #endif /* CONFIG_PM */ 973 974 /* called from serio_driver->connect/disconnect methods under serio_mutex */ 975 int serio_open(struct serio *serio, struct serio_driver *drv) 976 { 977 serio_set_drv(serio, drv); 978 979 if (serio->open && serio->open(serio)) { 980 serio_set_drv(serio, NULL); 981 return -1; 982 } 983 return 0; 984 } 985 EXPORT_SYMBOL(serio_open); 986 987 /* called from serio_driver->connect/disconnect methods under serio_mutex */ 988 void serio_close(struct serio *serio) 989 { 990 if (serio->close) 991 serio->close(serio); 992 993 serio_set_drv(serio, NULL); 994 } 995 EXPORT_SYMBOL(serio_close); 996 997 irqreturn_t serio_interrupt(struct serio *serio, 998 unsigned char data, unsigned int dfl) 999 { 1000 unsigned long flags; 1001 irqreturn_t ret = IRQ_NONE; 1002 1003 spin_lock_irqsave(&serio->lock, flags); 1004 1005 if (likely(serio->drv)) { 1006 ret = serio->drv->interrupt(serio, data, dfl); 1007 } else if (!dfl && device_is_registered(&serio->dev)) { 1008 serio_rescan(serio); 1009 ret = IRQ_HANDLED; 1010 } 1011 1012 spin_unlock_irqrestore(&serio->lock, flags); 1013 1014 return ret; 1015 } 1016 EXPORT_SYMBOL(serio_interrupt); 1017 1018 struct bus_type serio_bus = { 1019 .name = "serio", 1020 .drv_groups = serio_driver_groups, 1021 .match = serio_bus_match, 1022 .uevent = serio_uevent, 1023 .probe = serio_driver_probe, 1024 .remove = serio_driver_remove, 1025 .shutdown = serio_shutdown, 1026 #ifdef CONFIG_PM 1027 .pm = &serio_pm_ops, 1028 #endif 1029 }; 1030 EXPORT_SYMBOL(serio_bus); 1031 1032 static int __init serio_init(void) 1033 { 1034 int error; 1035 1036 error = bus_register(&serio_bus); 1037 if (error) { 1038 pr_err("Failed to register serio bus, error: %d\n", error); 1039 return error; 1040 } 1041 1042 return 0; 1043 } 1044 1045 static void __exit serio_exit(void) 1046 { 1047 bus_unregister(&serio_bus); 1048 1049 /* 1050 * There should not be any outstanding events but work may 1051 * still be scheduled so simply cancel it. 1052 */ 1053 cancel_work_sync(&serio_event_work); 1054 } 1055 1056 subsys_initcall(serio_init); 1057 module_exit(serio_exit); 1058