1 /* 2 * The Serio abstraction module 3 * 4 * Copyright (c) 1999-2004 Vojtech Pavlik 5 * Copyright (c) 2004 Dmitry Torokhov 6 * Copyright (c) 2003 Daniele Bellucci 7 */ 8 9 /* 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 26 27 #include <linux/stddef.h> 28 #include <linux/module.h> 29 #include <linux/serio.h> 30 #include <linux/errno.h> 31 #include <linux/sched.h> 32 #include <linux/slab.h> 33 #include <linux/workqueue.h> 34 #include <linux/mutex.h> 35 36 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 37 MODULE_DESCRIPTION("Serio abstraction core"); 38 MODULE_LICENSE("GPL"); 39 40 /* 41 * serio_mutex protects entire serio subsystem and is taken every time 42 * serio port or driver registered or unregistered. 43 */ 44 static DEFINE_MUTEX(serio_mutex); 45 46 static LIST_HEAD(serio_list); 47 48 static void serio_add_port(struct serio *serio); 49 static int serio_reconnect_port(struct serio *serio); 50 static void serio_disconnect_port(struct serio *serio); 51 static void serio_reconnect_subtree(struct serio *serio); 52 static void serio_attach_driver(struct serio_driver *drv); 53 54 static int serio_connect_driver(struct serio *serio, struct serio_driver *drv) 55 { 56 int retval; 57 58 mutex_lock(&serio->drv_mutex); 59 retval = drv->connect(serio, drv); 60 mutex_unlock(&serio->drv_mutex); 61 62 return retval; 63 } 64 65 static int serio_reconnect_driver(struct serio *serio) 66 { 67 int retval = -1; 68 69 mutex_lock(&serio->drv_mutex); 70 if (serio->drv && serio->drv->reconnect) 71 retval = serio->drv->reconnect(serio); 72 mutex_unlock(&serio->drv_mutex); 73 74 return retval; 75 } 76 77 static void serio_disconnect_driver(struct serio *serio) 78 { 79 mutex_lock(&serio->drv_mutex); 80 if (serio->drv) 81 serio->drv->disconnect(serio); 82 mutex_unlock(&serio->drv_mutex); 83 } 84 85 static int serio_match_port(const struct serio_device_id *ids, struct serio *serio) 86 { 87 while (ids->type || ids->proto) { 88 if ((ids->type == SERIO_ANY || ids->type == serio->id.type) && 89 (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) && 90 (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) && 91 (ids->id == SERIO_ANY || ids->id == serio->id.id)) 92 return 1; 93 ids++; 94 } 95 return 0; 96 } 97 98 /* 99 * Basic serio -> driver core mappings 100 */ 101 102 static int serio_bind_driver(struct serio *serio, struct serio_driver *drv) 103 { 104 int error; 105 106 if (serio_match_port(drv->id_table, serio)) { 107 108 serio->dev.driver = &drv->driver; 109 if (serio_connect_driver(serio, drv)) { 110 serio->dev.driver = NULL; 111 return -ENODEV; 112 } 113 114 error = device_bind_driver(&serio->dev); 115 if (error) { 116 dev_warn(&serio->dev, 117 "device_bind_driver() failed for %s (%s) and %s, error: %d\n", 118 serio->phys, serio->name, 119 drv->description, error); 120 serio_disconnect_driver(serio); 121 serio->dev.driver = NULL; 122 return error; 123 } 124 } 125 return 0; 126 } 127 128 static void serio_find_driver(struct serio *serio) 129 { 130 int error; 131 132 error = device_attach(&serio->dev); 133 if (error < 0 && error != -EPROBE_DEFER) 134 dev_warn(&serio->dev, 135 "device_attach() failed for %s (%s), error: %d\n", 136 serio->phys, serio->name, error); 137 } 138 139 140 /* 141 * Serio event processing. 142 */ 143 144 enum serio_event_type { 145 SERIO_RESCAN_PORT, 146 SERIO_RECONNECT_PORT, 147 SERIO_RECONNECT_SUBTREE, 148 SERIO_REGISTER_PORT, 149 SERIO_ATTACH_DRIVER, 150 }; 151 152 struct serio_event { 153 enum serio_event_type type; 154 void *object; 155 struct module *owner; 156 struct list_head node; 157 }; 158 159 static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */ 160 static LIST_HEAD(serio_event_list); 161 162 static struct serio_event *serio_get_event(void) 163 { 164 struct serio_event *event = NULL; 165 unsigned long flags; 166 167 spin_lock_irqsave(&serio_event_lock, flags); 168 169 if (!list_empty(&serio_event_list)) { 170 event = list_first_entry(&serio_event_list, 171 struct serio_event, node); 172 list_del_init(&event->node); 173 } 174 175 spin_unlock_irqrestore(&serio_event_lock, flags); 176 return event; 177 } 178 179 static void serio_free_event(struct serio_event *event) 180 { 181 module_put(event->owner); 182 kfree(event); 183 } 184 185 static void serio_remove_duplicate_events(void *object, 186 enum serio_event_type type) 187 { 188 struct serio_event *e, *next; 189 unsigned long flags; 190 191 spin_lock_irqsave(&serio_event_lock, flags); 192 193 list_for_each_entry_safe(e, next, &serio_event_list, node) { 194 if (object == e->object) { 195 /* 196 * If this event is of different type we should not 197 * look further - we only suppress duplicate events 198 * that were sent back-to-back. 199 */ 200 if (type != e->type) 201 break; 202 203 list_del_init(&e->node); 204 serio_free_event(e); 205 } 206 } 207 208 spin_unlock_irqrestore(&serio_event_lock, flags); 209 } 210 211 static void serio_handle_event(struct work_struct *work) 212 { 213 struct serio_event *event; 214 215 mutex_lock(&serio_mutex); 216 217 while ((event = serio_get_event())) { 218 219 switch (event->type) { 220 221 case SERIO_REGISTER_PORT: 222 serio_add_port(event->object); 223 break; 224 225 case SERIO_RECONNECT_PORT: 226 serio_reconnect_port(event->object); 227 break; 228 229 case SERIO_RESCAN_PORT: 230 serio_disconnect_port(event->object); 231 serio_find_driver(event->object); 232 break; 233 234 case SERIO_RECONNECT_SUBTREE: 235 serio_reconnect_subtree(event->object); 236 break; 237 238 case SERIO_ATTACH_DRIVER: 239 serio_attach_driver(event->object); 240 break; 241 } 242 243 serio_remove_duplicate_events(event->object, event->type); 244 serio_free_event(event); 245 } 246 247 mutex_unlock(&serio_mutex); 248 } 249 250 static DECLARE_WORK(serio_event_work, serio_handle_event); 251 252 static int serio_queue_event(void *object, struct module *owner, 253 enum serio_event_type event_type) 254 { 255 unsigned long flags; 256 struct serio_event *event; 257 int retval = 0; 258 259 spin_lock_irqsave(&serio_event_lock, flags); 260 261 /* 262 * Scan event list for the other events for the same serio port, 263 * starting with the most recent one. If event is the same we 264 * do not need add new one. If event is of different type we 265 * need to add this event and should not look further because 266 * we need to preseve sequence of distinct events. 267 */ 268 list_for_each_entry_reverse(event, &serio_event_list, node) { 269 if (event->object == object) { 270 if (event->type == event_type) 271 goto out; 272 break; 273 } 274 } 275 276 event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC); 277 if (!event) { 278 pr_err("Not enough memory to queue event %d\n", event_type); 279 retval = -ENOMEM; 280 goto out; 281 } 282 283 if (!try_module_get(owner)) { 284 pr_warn("Can't get module reference, dropping event %d\n", 285 event_type); 286 kfree(event); 287 retval = -EINVAL; 288 goto out; 289 } 290 291 event->type = event_type; 292 event->object = object; 293 event->owner = owner; 294 295 list_add_tail(&event->node, &serio_event_list); 296 queue_work(system_long_wq, &serio_event_work); 297 298 out: 299 spin_unlock_irqrestore(&serio_event_lock, flags); 300 return retval; 301 } 302 303 /* 304 * Remove all events that have been submitted for a given 305 * object, be it serio port or driver. 306 */ 307 static void serio_remove_pending_events(void *object) 308 { 309 struct serio_event *event, *next; 310 unsigned long flags; 311 312 spin_lock_irqsave(&serio_event_lock, flags); 313 314 list_for_each_entry_safe(event, next, &serio_event_list, node) { 315 if (event->object == object) { 316 list_del_init(&event->node); 317 serio_free_event(event); 318 } 319 } 320 321 spin_unlock_irqrestore(&serio_event_lock, flags); 322 } 323 324 /* 325 * Locate child serio port (if any) that has not been fully registered yet. 326 * 327 * Children are registered by driver's connect() handler so there can't be a 328 * grandchild pending registration together with a child. 329 */ 330 static struct serio *serio_get_pending_child(struct serio *parent) 331 { 332 struct serio_event *event; 333 struct serio *serio, *child = NULL; 334 unsigned long flags; 335 336 spin_lock_irqsave(&serio_event_lock, flags); 337 338 list_for_each_entry(event, &serio_event_list, node) { 339 if (event->type == SERIO_REGISTER_PORT) { 340 serio = event->object; 341 if (serio->parent == parent) { 342 child = serio; 343 break; 344 } 345 } 346 } 347 348 spin_unlock_irqrestore(&serio_event_lock, flags); 349 return child; 350 } 351 352 /* 353 * Serio port operations 354 */ 355 356 static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf) 357 { 358 struct serio *serio = to_serio_port(dev); 359 return sprintf(buf, "%s\n", serio->name); 360 } 361 362 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) 363 { 364 struct serio *serio = to_serio_port(dev); 365 366 return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n", 367 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 368 } 369 370 static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) 371 { 372 struct serio *serio = to_serio_port(dev); 373 return sprintf(buf, "%02x\n", serio->id.type); 374 } 375 376 static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf) 377 { 378 struct serio *serio = to_serio_port(dev); 379 return sprintf(buf, "%02x\n", serio->id.proto); 380 } 381 382 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) 383 { 384 struct serio *serio = to_serio_port(dev); 385 return sprintf(buf, "%02x\n", serio->id.id); 386 } 387 388 static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf) 389 { 390 struct serio *serio = to_serio_port(dev); 391 return sprintf(buf, "%02x\n", serio->id.extra); 392 } 393 394 static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 395 { 396 struct serio *serio = to_serio_port(dev); 397 struct device_driver *drv; 398 int error; 399 400 error = mutex_lock_interruptible(&serio_mutex); 401 if (error) 402 return error; 403 404 if (!strncmp(buf, "none", count)) { 405 serio_disconnect_port(serio); 406 } else if (!strncmp(buf, "reconnect", count)) { 407 serio_reconnect_subtree(serio); 408 } else if (!strncmp(buf, "rescan", count)) { 409 serio_disconnect_port(serio); 410 serio_find_driver(serio); 411 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); 412 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { 413 serio_disconnect_port(serio); 414 error = serio_bind_driver(serio, to_serio_driver(drv)); 415 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); 416 } else { 417 error = -EINVAL; 418 } 419 420 mutex_unlock(&serio_mutex); 421 422 return error ? error : count; 423 } 424 425 static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf) 426 { 427 struct serio *serio = to_serio_port(dev); 428 return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto"); 429 } 430 431 static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 432 { 433 struct serio *serio = to_serio_port(dev); 434 int retval; 435 436 retval = count; 437 if (!strncmp(buf, "manual", count)) { 438 serio->manual_bind = true; 439 } else if (!strncmp(buf, "auto", count)) { 440 serio->manual_bind = false; 441 } else { 442 retval = -EINVAL; 443 } 444 445 return retval; 446 } 447 448 static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf) 449 { 450 struct serio *serio = to_serio_port(dev); 451 452 return sprintf(buf, "%s\n", serio->firmware_id); 453 } 454 455 static DEVICE_ATTR_RO(type); 456 static DEVICE_ATTR_RO(proto); 457 static DEVICE_ATTR_RO(id); 458 static DEVICE_ATTR_RO(extra); 459 460 static struct attribute *serio_device_id_attrs[] = { 461 &dev_attr_type.attr, 462 &dev_attr_proto.attr, 463 &dev_attr_id.attr, 464 &dev_attr_extra.attr, 465 NULL 466 }; 467 468 static const struct attribute_group serio_id_attr_group = { 469 .name = "id", 470 .attrs = serio_device_id_attrs, 471 }; 472 473 static DEVICE_ATTR_RO(modalias); 474 static DEVICE_ATTR_WO(drvctl); 475 static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); 476 static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); 477 static DEVICE_ATTR_RO(firmware_id); 478 479 static struct attribute *serio_device_attrs[] = { 480 &dev_attr_modalias.attr, 481 &dev_attr_description.attr, 482 &dev_attr_drvctl.attr, 483 &dev_attr_bind_mode.attr, 484 &dev_attr_firmware_id.attr, 485 NULL 486 }; 487 488 static const struct attribute_group serio_device_attr_group = { 489 .attrs = serio_device_attrs, 490 }; 491 492 static const struct attribute_group *serio_device_attr_groups[] = { 493 &serio_id_attr_group, 494 &serio_device_attr_group, 495 NULL 496 }; 497 498 static void serio_release_port(struct device *dev) 499 { 500 struct serio *serio = to_serio_port(dev); 501 502 kfree(serio); 503 module_put(THIS_MODULE); 504 } 505 506 /* 507 * Prepare serio port for registration. 508 */ 509 static void serio_init_port(struct serio *serio) 510 { 511 static atomic_t serio_no = ATOMIC_INIT(-1); 512 513 __module_get(THIS_MODULE); 514 515 INIT_LIST_HEAD(&serio->node); 516 INIT_LIST_HEAD(&serio->child_node); 517 INIT_LIST_HEAD(&serio->children); 518 spin_lock_init(&serio->lock); 519 mutex_init(&serio->drv_mutex); 520 device_initialize(&serio->dev); 521 dev_set_name(&serio->dev, "serio%lu", 522 (unsigned long)atomic_inc_return(&serio_no)); 523 serio->dev.bus = &serio_bus; 524 serio->dev.release = serio_release_port; 525 serio->dev.groups = serio_device_attr_groups; 526 if (serio->parent) { 527 serio->dev.parent = &serio->parent->dev; 528 serio->depth = serio->parent->depth + 1; 529 } else 530 serio->depth = 0; 531 lockdep_set_subclass(&serio->lock, serio->depth); 532 } 533 534 /* 535 * Complete serio port registration. 536 * Driver core will attempt to find appropriate driver for the port. 537 */ 538 static void serio_add_port(struct serio *serio) 539 { 540 struct serio *parent = serio->parent; 541 int error; 542 543 if (parent) { 544 serio_pause_rx(parent); 545 list_add_tail(&serio->child_node, &parent->children); 546 serio_continue_rx(parent); 547 } 548 549 list_add_tail(&serio->node, &serio_list); 550 551 if (serio->start) 552 serio->start(serio); 553 554 error = device_add(&serio->dev); 555 if (error) 556 dev_err(&serio->dev, 557 "device_add() failed for %s (%s), error: %d\n", 558 serio->phys, serio->name, error); 559 } 560 561 /* 562 * serio_destroy_port() completes unregistration process and removes 563 * port from the system 564 */ 565 static void serio_destroy_port(struct serio *serio) 566 { 567 struct serio *child; 568 569 while ((child = serio_get_pending_child(serio)) != NULL) { 570 serio_remove_pending_events(child); 571 put_device(&child->dev); 572 } 573 574 if (serio->stop) 575 serio->stop(serio); 576 577 if (serio->parent) { 578 serio_pause_rx(serio->parent); 579 list_del_init(&serio->child_node); 580 serio_continue_rx(serio->parent); 581 serio->parent = NULL; 582 } 583 584 if (device_is_registered(&serio->dev)) 585 device_del(&serio->dev); 586 587 list_del_init(&serio->node); 588 serio_remove_pending_events(serio); 589 put_device(&serio->dev); 590 } 591 592 /* 593 * Reconnect serio port (re-initialize attached device). 594 * If reconnect fails (old device is no longer attached or 595 * there was no device to begin with) we do full rescan in 596 * hope of finding a driver for the port. 597 */ 598 static int serio_reconnect_port(struct serio *serio) 599 { 600 int error = serio_reconnect_driver(serio); 601 602 if (error) { 603 serio_disconnect_port(serio); 604 serio_find_driver(serio); 605 } 606 607 return error; 608 } 609 610 /* 611 * Reconnect serio port and all its children (re-initialize attached 612 * devices). 613 */ 614 static void serio_reconnect_subtree(struct serio *root) 615 { 616 struct serio *s = root; 617 int error; 618 619 do { 620 error = serio_reconnect_port(s); 621 if (!error) { 622 /* 623 * Reconnect was successful, move on to do the 624 * first child. 625 */ 626 if (!list_empty(&s->children)) { 627 s = list_first_entry(&s->children, 628 struct serio, child_node); 629 continue; 630 } 631 } 632 633 /* 634 * Either it was a leaf node or reconnect failed and it 635 * became a leaf node. Continue reconnecting starting with 636 * the next sibling of the parent node. 637 */ 638 while (s != root) { 639 struct serio *parent = s->parent; 640 641 if (!list_is_last(&s->child_node, &parent->children)) { 642 s = list_entry(s->child_node.next, 643 struct serio, child_node); 644 break; 645 } 646 647 s = parent; 648 } 649 } while (s != root); 650 } 651 652 /* 653 * serio_disconnect_port() unbinds a port from its driver. As a side effect 654 * all children ports are unbound and destroyed. 655 */ 656 static void serio_disconnect_port(struct serio *serio) 657 { 658 struct serio *s = serio; 659 660 /* 661 * Children ports should be disconnected and destroyed 662 * first; we travel the tree in depth-first order. 663 */ 664 while (!list_empty(&serio->children)) { 665 666 /* Locate a leaf */ 667 while (!list_empty(&s->children)) 668 s = list_first_entry(&s->children, 669 struct serio, child_node); 670 671 /* 672 * Prune this leaf node unless it is the one we 673 * started with. 674 */ 675 if (s != serio) { 676 struct serio *parent = s->parent; 677 678 device_release_driver(&s->dev); 679 serio_destroy_port(s); 680 681 s = parent; 682 } 683 } 684 685 /* 686 * OK, no children left, now disconnect this port. 687 */ 688 device_release_driver(&serio->dev); 689 } 690 691 void serio_rescan(struct serio *serio) 692 { 693 serio_queue_event(serio, NULL, SERIO_RESCAN_PORT); 694 } 695 EXPORT_SYMBOL(serio_rescan); 696 697 void serio_reconnect(struct serio *serio) 698 { 699 serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE); 700 } 701 EXPORT_SYMBOL(serio_reconnect); 702 703 /* 704 * Submits register request to kseriod for subsequent execution. 705 * Note that port registration is always asynchronous. 706 */ 707 void __serio_register_port(struct serio *serio, struct module *owner) 708 { 709 serio_init_port(serio); 710 serio_queue_event(serio, owner, SERIO_REGISTER_PORT); 711 } 712 EXPORT_SYMBOL(__serio_register_port); 713 714 /* 715 * Synchronously unregisters serio port. 716 */ 717 void serio_unregister_port(struct serio *serio) 718 { 719 mutex_lock(&serio_mutex); 720 serio_disconnect_port(serio); 721 serio_destroy_port(serio); 722 mutex_unlock(&serio_mutex); 723 } 724 EXPORT_SYMBOL(serio_unregister_port); 725 726 /* 727 * Safely unregisters children ports if they are present. 728 */ 729 void serio_unregister_child_port(struct serio *serio) 730 { 731 struct serio *s, *next; 732 733 mutex_lock(&serio_mutex); 734 list_for_each_entry_safe(s, next, &serio->children, child_node) { 735 serio_disconnect_port(s); 736 serio_destroy_port(s); 737 } 738 mutex_unlock(&serio_mutex); 739 } 740 EXPORT_SYMBOL(serio_unregister_child_port); 741 742 743 /* 744 * Serio driver operations 745 */ 746 747 static ssize_t description_show(struct device_driver *drv, char *buf) 748 { 749 struct serio_driver *driver = to_serio_driver(drv); 750 return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)"); 751 } 752 static DRIVER_ATTR_RO(description); 753 754 static ssize_t bind_mode_show(struct device_driver *drv, char *buf) 755 { 756 struct serio_driver *serio_drv = to_serio_driver(drv); 757 return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto"); 758 } 759 760 static ssize_t bind_mode_store(struct device_driver *drv, const char *buf, size_t count) 761 { 762 struct serio_driver *serio_drv = to_serio_driver(drv); 763 int retval; 764 765 retval = count; 766 if (!strncmp(buf, "manual", count)) { 767 serio_drv->manual_bind = true; 768 } else if (!strncmp(buf, "auto", count)) { 769 serio_drv->manual_bind = false; 770 } else { 771 retval = -EINVAL; 772 } 773 774 return retval; 775 } 776 static DRIVER_ATTR_RW(bind_mode); 777 778 static struct attribute *serio_driver_attrs[] = { 779 &driver_attr_description.attr, 780 &driver_attr_bind_mode.attr, 781 NULL, 782 }; 783 ATTRIBUTE_GROUPS(serio_driver); 784 785 static int serio_driver_probe(struct device *dev) 786 { 787 struct serio *serio = to_serio_port(dev); 788 struct serio_driver *drv = to_serio_driver(dev->driver); 789 790 return serio_connect_driver(serio, drv); 791 } 792 793 static int serio_driver_remove(struct device *dev) 794 { 795 struct serio *serio = to_serio_port(dev); 796 797 serio_disconnect_driver(serio); 798 return 0; 799 } 800 801 static void serio_cleanup(struct serio *serio) 802 { 803 mutex_lock(&serio->drv_mutex); 804 if (serio->drv && serio->drv->cleanup) 805 serio->drv->cleanup(serio); 806 mutex_unlock(&serio->drv_mutex); 807 } 808 809 static void serio_shutdown(struct device *dev) 810 { 811 struct serio *serio = to_serio_port(dev); 812 813 serio_cleanup(serio); 814 } 815 816 static void serio_attach_driver(struct serio_driver *drv) 817 { 818 int error; 819 820 error = driver_attach(&drv->driver); 821 if (error) 822 pr_warn("driver_attach() failed for %s with error %d\n", 823 drv->driver.name, error); 824 } 825 826 int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name) 827 { 828 bool manual_bind = drv->manual_bind; 829 int error; 830 831 drv->driver.bus = &serio_bus; 832 drv->driver.owner = owner; 833 drv->driver.mod_name = mod_name; 834 835 /* 836 * Temporarily disable automatic binding because probing 837 * takes long time and we are better off doing it in kseriod 838 */ 839 drv->manual_bind = true; 840 841 error = driver_register(&drv->driver); 842 if (error) { 843 pr_err("driver_register() failed for %s, error: %d\n", 844 drv->driver.name, error); 845 return error; 846 } 847 848 /* 849 * Restore original bind mode and let kseriod bind the 850 * driver to free ports 851 */ 852 if (!manual_bind) { 853 drv->manual_bind = false; 854 error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER); 855 if (error) { 856 driver_unregister(&drv->driver); 857 return error; 858 } 859 } 860 861 return 0; 862 } 863 EXPORT_SYMBOL(__serio_register_driver); 864 865 void serio_unregister_driver(struct serio_driver *drv) 866 { 867 struct serio *serio; 868 869 mutex_lock(&serio_mutex); 870 871 drv->manual_bind = true; /* so serio_find_driver ignores it */ 872 serio_remove_pending_events(drv); 873 874 start_over: 875 list_for_each_entry(serio, &serio_list, node) { 876 if (serio->drv == drv) { 877 serio_disconnect_port(serio); 878 serio_find_driver(serio); 879 /* we could've deleted some ports, restart */ 880 goto start_over; 881 } 882 } 883 884 driver_unregister(&drv->driver); 885 mutex_unlock(&serio_mutex); 886 } 887 EXPORT_SYMBOL(serio_unregister_driver); 888 889 static void serio_set_drv(struct serio *serio, struct serio_driver *drv) 890 { 891 serio_pause_rx(serio); 892 serio->drv = drv; 893 serio_continue_rx(serio); 894 } 895 896 static int serio_bus_match(struct device *dev, struct device_driver *drv) 897 { 898 struct serio *serio = to_serio_port(dev); 899 struct serio_driver *serio_drv = to_serio_driver(drv); 900 901 if (serio->manual_bind || serio_drv->manual_bind) 902 return 0; 903 904 return serio_match_port(serio_drv->id_table, serio); 905 } 906 907 #define SERIO_ADD_UEVENT_VAR(fmt, val...) \ 908 do { \ 909 int err = add_uevent_var(env, fmt, val); \ 910 if (err) \ 911 return err; \ 912 } while (0) 913 914 static int serio_uevent(struct device *dev, struct kobj_uevent_env *env) 915 { 916 struct serio *serio; 917 918 if (!dev) 919 return -ENODEV; 920 921 serio = to_serio_port(dev); 922 923 SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type); 924 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto); 925 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id); 926 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra); 927 928 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X", 929 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 930 931 if (serio->firmware_id[0]) 932 SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s", 933 serio->firmware_id); 934 935 return 0; 936 } 937 #undef SERIO_ADD_UEVENT_VAR 938 939 #ifdef CONFIG_PM 940 static int serio_suspend(struct device *dev) 941 { 942 struct serio *serio = to_serio_port(dev); 943 944 serio_cleanup(serio); 945 946 return 0; 947 } 948 949 static int serio_resume(struct device *dev) 950 { 951 struct serio *serio = to_serio_port(dev); 952 int error = -ENOENT; 953 954 mutex_lock(&serio->drv_mutex); 955 if (serio->drv && serio->drv->fast_reconnect) { 956 error = serio->drv->fast_reconnect(serio); 957 if (error && error != -ENOENT) 958 dev_warn(dev, "fast reconnect failed with error %d\n", 959 error); 960 } 961 mutex_unlock(&serio->drv_mutex); 962 963 if (error) { 964 /* 965 * Driver reconnect can take a while, so better let 966 * kseriod deal with it. 967 */ 968 serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT); 969 } 970 971 return 0; 972 } 973 974 static const struct dev_pm_ops serio_pm_ops = { 975 .suspend = serio_suspend, 976 .resume = serio_resume, 977 .poweroff = serio_suspend, 978 .restore = serio_resume, 979 }; 980 #endif /* CONFIG_PM */ 981 982 /* called from serio_driver->connect/disconnect methods under serio_mutex */ 983 int serio_open(struct serio *serio, struct serio_driver *drv) 984 { 985 serio_set_drv(serio, drv); 986 987 if (serio->open && serio->open(serio)) { 988 serio_set_drv(serio, NULL); 989 return -1; 990 } 991 return 0; 992 } 993 EXPORT_SYMBOL(serio_open); 994 995 /* called from serio_driver->connect/disconnect methods under serio_mutex */ 996 void serio_close(struct serio *serio) 997 { 998 if (serio->close) 999 serio->close(serio); 1000 1001 serio_set_drv(serio, NULL); 1002 } 1003 EXPORT_SYMBOL(serio_close); 1004 1005 irqreturn_t serio_interrupt(struct serio *serio, 1006 unsigned char data, unsigned int dfl) 1007 { 1008 unsigned long flags; 1009 irqreturn_t ret = IRQ_NONE; 1010 1011 spin_lock_irqsave(&serio->lock, flags); 1012 1013 if (likely(serio->drv)) { 1014 ret = serio->drv->interrupt(serio, data, dfl); 1015 } else if (!dfl && device_is_registered(&serio->dev)) { 1016 serio_rescan(serio); 1017 ret = IRQ_HANDLED; 1018 } 1019 1020 spin_unlock_irqrestore(&serio->lock, flags); 1021 1022 return ret; 1023 } 1024 EXPORT_SYMBOL(serio_interrupt); 1025 1026 struct bus_type serio_bus = { 1027 .name = "serio", 1028 .drv_groups = serio_driver_groups, 1029 .match = serio_bus_match, 1030 .uevent = serio_uevent, 1031 .probe = serio_driver_probe, 1032 .remove = serio_driver_remove, 1033 .shutdown = serio_shutdown, 1034 #ifdef CONFIG_PM 1035 .pm = &serio_pm_ops, 1036 #endif 1037 }; 1038 EXPORT_SYMBOL(serio_bus); 1039 1040 static int __init serio_init(void) 1041 { 1042 int error; 1043 1044 error = bus_register(&serio_bus); 1045 if (error) { 1046 pr_err("Failed to register serio bus, error: %d\n", error); 1047 return error; 1048 } 1049 1050 return 0; 1051 } 1052 1053 static void __exit serio_exit(void) 1054 { 1055 bus_unregister(&serio_bus); 1056 1057 /* 1058 * There should not be any outstanding events but work may 1059 * still be scheduled so simply cancel it. 1060 */ 1061 cancel_work_sync(&serio_event_work); 1062 } 1063 1064 subsys_initcall(serio_init); 1065 module_exit(serio_exit); 1066