1 /* 2 * bios-less APM driver for ARM Linux 3 * Jamey Hicks <jamey@crl.dec.com> 4 * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com) 5 * 6 * APM 1.2 Reference: 7 * Intel Corporation, Microsoft Corporation. Advanced Power Management 8 * (APM) BIOS Interface Specification, Revision 1.2, February 1996. 9 * 10 * This document is available from Microsoft at: 11 * http://www.microsoft.com/whdc/archive/amp_12.mspx 12 */ 13 #include <linux/module.h> 14 #include <linux/poll.h> 15 #include <linux/slab.h> 16 #include <linux/mutex.h> 17 #include <linux/proc_fs.h> 18 #include <linux/seq_file.h> 19 #include <linux/miscdevice.h> 20 #include <linux/apm_bios.h> 21 #include <linux/capability.h> 22 #include <linux/sched.h> 23 #include <linux/suspend.h> 24 #include <linux/apm-emulation.h> 25 #include <linux/freezer.h> 26 #include <linux/device.h> 27 #include <linux/kernel.h> 28 #include <linux/list.h> 29 #include <linux/init.h> 30 #include <linux/completion.h> 31 #include <linux/kthread.h> 32 #include <linux/delay.h> 33 34 /* 35 * One option can be changed at boot time as follows: 36 * apm=on/off enable/disable APM 37 */ 38 39 /* 40 * Maximum number of events stored 41 */ 42 #define APM_MAX_EVENTS 16 43 44 struct apm_queue { 45 unsigned int event_head; 46 unsigned int event_tail; 47 apm_event_t events[APM_MAX_EVENTS]; 48 }; 49 50 /* 51 * thread states (for threads using a writable /dev/apm_bios fd): 52 * 53 * SUSPEND_NONE: nothing happening 54 * SUSPEND_PENDING: suspend event queued for thread and pending to be read 55 * SUSPEND_READ: suspend event read, pending acknowledgement 56 * SUSPEND_ACKED: acknowledgement received from thread (via ioctl), 57 * waiting for resume 58 * SUSPEND_ACKTO: acknowledgement timeout 59 * SUSPEND_DONE: thread had acked suspend and is now notified of 60 * resume 61 * 62 * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume 63 * 64 * A thread migrates in one of three paths: 65 * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE 66 * -6-> ACKTO -7-> NONE 67 * NONE -8-> WAIT -9-> NONE 68 * 69 * While in PENDING or READ, the thread is accounted for in the 70 * suspend_acks_pending counter. 71 * 72 * The transitions are invoked as follows: 73 * 1: suspend event is signalled from the core PM code 74 * 2: the suspend event is read from the fd by the userspace thread 75 * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack) 76 * 4: core PM code signals that we have resumed 77 * 5: APM_IOC_SUSPEND ioctl returns 78 * 79 * 6: the notifier invoked from the core PM code timed out waiting 80 * for all relevant threds to enter ACKED state and puts those 81 * that haven't into ACKTO 82 * 7: those threads issue APM_IOC_SUSPEND ioctl too late, 83 * get an error 84 * 85 * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend), 86 * ioctl code invokes pm_suspend() 87 * 9: pm_suspend() returns indicating resume 88 */ 89 enum apm_suspend_state { 90 SUSPEND_NONE, 91 SUSPEND_PENDING, 92 SUSPEND_READ, 93 SUSPEND_ACKED, 94 SUSPEND_ACKTO, 95 SUSPEND_WAIT, 96 SUSPEND_DONE, 97 }; 98 99 /* 100 * The per-file APM data 101 */ 102 struct apm_user { 103 struct list_head list; 104 105 unsigned int suser: 1; 106 unsigned int writer: 1; 107 unsigned int reader: 1; 108 109 int suspend_result; 110 enum apm_suspend_state suspend_state; 111 112 struct apm_queue queue; 113 }; 114 115 /* 116 * Local variables 117 */ 118 static atomic_t suspend_acks_pending = ATOMIC_INIT(0); 119 static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0); 120 static int apm_disabled; 121 static struct task_struct *kapmd_tsk; 122 123 static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); 124 static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); 125 126 /* 127 * This is a list of everyone who has opened /dev/apm_bios 128 */ 129 static DECLARE_RWSEM(user_list_lock); 130 static LIST_HEAD(apm_user_list); 131 132 /* 133 * kapmd info. kapmd provides us a process context to handle 134 * "APM" events within - specifically necessary if we're going 135 * to be suspending the system. 136 */ 137 static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait); 138 static DEFINE_SPINLOCK(kapmd_queue_lock); 139 static struct apm_queue kapmd_queue; 140 141 static DEFINE_MUTEX(state_lock); 142 143 static const char driver_version[] = "1.13"; /* no spaces */ 144 145 146 147 /* 148 * Compatibility cruft until the IPAQ people move over to the new 149 * interface. 150 */ 151 static void __apm_get_power_status(struct apm_power_info *info) 152 { 153 } 154 155 /* 156 * This allows machines to provide their own "apm get power status" function. 157 */ 158 void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status; 159 EXPORT_SYMBOL(apm_get_power_status); 160 161 162 /* 163 * APM event queue management. 164 */ 165 static inline int queue_empty(struct apm_queue *q) 166 { 167 return q->event_head == q->event_tail; 168 } 169 170 static inline apm_event_t queue_get_event(struct apm_queue *q) 171 { 172 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; 173 return q->events[q->event_tail]; 174 } 175 176 static void queue_add_event(struct apm_queue *q, apm_event_t event) 177 { 178 q->event_head = (q->event_head + 1) % APM_MAX_EVENTS; 179 if (q->event_head == q->event_tail) { 180 static int notified; 181 182 if (notified++ == 0) 183 printk(KERN_ERR "apm: an event queue overflowed\n"); 184 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; 185 } 186 q->events[q->event_head] = event; 187 } 188 189 static void queue_event(apm_event_t event) 190 { 191 struct apm_user *as; 192 193 down_read(&user_list_lock); 194 list_for_each_entry(as, &apm_user_list, list) { 195 if (as->reader) 196 queue_add_event(&as->queue, event); 197 } 198 up_read(&user_list_lock); 199 wake_up_interruptible(&apm_waitqueue); 200 } 201 202 static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) 203 { 204 struct apm_user *as = fp->private_data; 205 apm_event_t event; 206 int i = count, ret = 0; 207 208 if (count < sizeof(apm_event_t)) 209 return -EINVAL; 210 211 if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK) 212 return -EAGAIN; 213 214 wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue)); 215 216 while ((i >= sizeof(event)) && !queue_empty(&as->queue)) { 217 event = queue_get_event(&as->queue); 218 219 ret = -EFAULT; 220 if (copy_to_user(buf, &event, sizeof(event))) 221 break; 222 223 mutex_lock(&state_lock); 224 if (as->suspend_state == SUSPEND_PENDING && 225 (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)) 226 as->suspend_state = SUSPEND_READ; 227 mutex_unlock(&state_lock); 228 229 buf += sizeof(event); 230 i -= sizeof(event); 231 } 232 233 if (i < count) 234 ret = count - i; 235 236 return ret; 237 } 238 239 static __poll_t apm_poll(struct file *fp, poll_table * wait) 240 { 241 struct apm_user *as = fp->private_data; 242 243 poll_wait(fp, &apm_waitqueue, wait); 244 return queue_empty(&as->queue) ? 0 : EPOLLIN | EPOLLRDNORM; 245 } 246 247 /* 248 * apm_ioctl - handle APM ioctl 249 * 250 * APM_IOC_SUSPEND 251 * This IOCTL is overloaded, and performs two functions. It is used to: 252 * - initiate a suspend 253 * - acknowledge a suspend read from /dev/apm_bios. 254 * Only when everyone who has opened /dev/apm_bios with write permission 255 * has acknowledge does the actual suspend happen. 256 */ 257 static long 258 apm_ioctl(struct file *filp, u_int cmd, u_long arg) 259 { 260 struct apm_user *as = filp->private_data; 261 int err = -EINVAL; 262 263 if (!as->suser || !as->writer) 264 return -EPERM; 265 266 switch (cmd) { 267 case APM_IOC_SUSPEND: 268 mutex_lock(&state_lock); 269 270 as->suspend_result = -EINTR; 271 272 switch (as->suspend_state) { 273 case SUSPEND_READ: 274 /* 275 * If we read a suspend command from /dev/apm_bios, 276 * then the corresponding APM_IOC_SUSPEND ioctl is 277 * interpreted as an acknowledge. 278 */ 279 as->suspend_state = SUSPEND_ACKED; 280 atomic_dec(&suspend_acks_pending); 281 mutex_unlock(&state_lock); 282 283 /* 284 * suspend_acks_pending changed, the notifier needs to 285 * be woken up for this 286 */ 287 wake_up(&apm_suspend_waitqueue); 288 289 /* 290 * Wait for the suspend/resume to complete. If there 291 * are pending acknowledges, we wait here for them. 292 * wait_event_freezable() is interruptible and pending 293 * signal can cause busy looping. We aren't doing 294 * anything critical, chill a bit on each iteration. 295 */ 296 while (wait_event_freezable(apm_suspend_waitqueue, 297 as->suspend_state != SUSPEND_ACKED)) 298 msleep(10); 299 break; 300 case SUSPEND_ACKTO: 301 as->suspend_result = -ETIMEDOUT; 302 mutex_unlock(&state_lock); 303 break; 304 default: 305 as->suspend_state = SUSPEND_WAIT; 306 mutex_unlock(&state_lock); 307 308 /* 309 * Otherwise it is a request to suspend the system. 310 * Just invoke pm_suspend(), we'll handle it from 311 * there via the notifier. 312 */ 313 as->suspend_result = pm_suspend(PM_SUSPEND_MEM); 314 } 315 316 mutex_lock(&state_lock); 317 err = as->suspend_result; 318 as->suspend_state = SUSPEND_NONE; 319 mutex_unlock(&state_lock); 320 break; 321 } 322 323 return err; 324 } 325 326 static int apm_release(struct inode * inode, struct file * filp) 327 { 328 struct apm_user *as = filp->private_data; 329 330 filp->private_data = NULL; 331 332 down_write(&user_list_lock); 333 list_del(&as->list); 334 up_write(&user_list_lock); 335 336 /* 337 * We are now unhooked from the chain. As far as new 338 * events are concerned, we no longer exist. 339 */ 340 mutex_lock(&state_lock); 341 if (as->suspend_state == SUSPEND_PENDING || 342 as->suspend_state == SUSPEND_READ) 343 atomic_dec(&suspend_acks_pending); 344 mutex_unlock(&state_lock); 345 346 wake_up(&apm_suspend_waitqueue); 347 348 kfree(as); 349 return 0; 350 } 351 352 static int apm_open(struct inode * inode, struct file * filp) 353 { 354 struct apm_user *as; 355 356 as = kzalloc(sizeof(*as), GFP_KERNEL); 357 if (as) { 358 /* 359 * XXX - this is a tiny bit broken, when we consider BSD 360 * process accounting. If the device is opened by root, we 361 * instantly flag that we used superuser privs. Who knows, 362 * we might close the device immediately without doing a 363 * privileged operation -- cevans 364 */ 365 as->suser = capable(CAP_SYS_ADMIN); 366 as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE; 367 as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ; 368 369 down_write(&user_list_lock); 370 list_add(&as->list, &apm_user_list); 371 up_write(&user_list_lock); 372 373 filp->private_data = as; 374 } 375 376 return as ? 0 : -ENOMEM; 377 } 378 379 static const struct file_operations apm_bios_fops = { 380 .owner = THIS_MODULE, 381 .read = apm_read, 382 .poll = apm_poll, 383 .unlocked_ioctl = apm_ioctl, 384 .open = apm_open, 385 .release = apm_release, 386 .llseek = noop_llseek, 387 }; 388 389 static struct miscdevice apm_device = { 390 .minor = APM_MINOR_DEV, 391 .name = "apm_bios", 392 .fops = &apm_bios_fops 393 }; 394 395 396 #ifdef CONFIG_PROC_FS 397 /* 398 * Arguments, with symbols from linux/apm_bios.h. 399 * 400 * 0) Linux driver version (this will change if format changes) 401 * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2. 402 * 2) APM flags from APM Installation Check (0x00): 403 * bit 0: APM_16_BIT_SUPPORT 404 * bit 1: APM_32_BIT_SUPPORT 405 * bit 2: APM_IDLE_SLOWS_CLOCK 406 * bit 3: APM_BIOS_DISABLED 407 * bit 4: APM_BIOS_DISENGAGED 408 * 3) AC line status 409 * 0x00: Off-line 410 * 0x01: On-line 411 * 0x02: On backup power (BIOS >= 1.1 only) 412 * 0xff: Unknown 413 * 4) Battery status 414 * 0x00: High 415 * 0x01: Low 416 * 0x02: Critical 417 * 0x03: Charging 418 * 0x04: Selected battery not present (BIOS >= 1.2 only) 419 * 0xff: Unknown 420 * 5) Battery flag 421 * bit 0: High 422 * bit 1: Low 423 * bit 2: Critical 424 * bit 3: Charging 425 * bit 7: No system battery 426 * 0xff: Unknown 427 * 6) Remaining battery life (percentage of charge): 428 * 0-100: valid 429 * -1: Unknown 430 * 7) Remaining battery life (time units): 431 * Number of remaining minutes or seconds 432 * -1: Unknown 433 * 8) min = minutes; sec = seconds 434 */ 435 static int proc_apm_show(struct seq_file *m, void *v) 436 { 437 struct apm_power_info info; 438 char *units; 439 440 info.ac_line_status = 0xff; 441 info.battery_status = 0xff; 442 info.battery_flag = 0xff; 443 info.battery_life = -1; 444 info.time = -1; 445 info.units = -1; 446 447 if (apm_get_power_status) 448 apm_get_power_status(&info); 449 450 switch (info.units) { 451 default: units = "?"; break; 452 case 0: units = "min"; break; 453 case 1: units = "sec"; break; 454 } 455 456 seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", 457 driver_version, APM_32_BIT_SUPPORT, 458 info.ac_line_status, info.battery_status, 459 info.battery_flag, info.battery_life, 460 info.time, units); 461 462 return 0; 463 } 464 465 static int proc_apm_open(struct inode *inode, struct file *file) 466 { 467 return single_open(file, proc_apm_show, NULL); 468 } 469 470 static const struct file_operations apm_proc_fops = { 471 .owner = THIS_MODULE, 472 .open = proc_apm_open, 473 .read = seq_read, 474 .llseek = seq_lseek, 475 .release = single_release, 476 }; 477 #endif 478 479 static int kapmd(void *arg) 480 { 481 do { 482 apm_event_t event; 483 484 wait_event_interruptible(kapmd_wait, 485 !queue_empty(&kapmd_queue) || kthread_should_stop()); 486 487 if (kthread_should_stop()) 488 break; 489 490 spin_lock_irq(&kapmd_queue_lock); 491 event = 0; 492 if (!queue_empty(&kapmd_queue)) 493 event = queue_get_event(&kapmd_queue); 494 spin_unlock_irq(&kapmd_queue_lock); 495 496 switch (event) { 497 case 0: 498 break; 499 500 case APM_LOW_BATTERY: 501 case APM_POWER_STATUS_CHANGE: 502 queue_event(event); 503 break; 504 505 case APM_USER_SUSPEND: 506 case APM_SYS_SUSPEND: 507 pm_suspend(PM_SUSPEND_MEM); 508 break; 509 510 case APM_CRITICAL_SUSPEND: 511 atomic_inc(&userspace_notification_inhibit); 512 pm_suspend(PM_SUSPEND_MEM); 513 atomic_dec(&userspace_notification_inhibit); 514 break; 515 } 516 } while (1); 517 518 return 0; 519 } 520 521 static int apm_suspend_notifier(struct notifier_block *nb, 522 unsigned long event, 523 void *dummy) 524 { 525 struct apm_user *as; 526 int err; 527 unsigned long apm_event; 528 529 /* short-cut emergency suspends */ 530 if (atomic_read(&userspace_notification_inhibit)) 531 return NOTIFY_DONE; 532 533 switch (event) { 534 case PM_SUSPEND_PREPARE: 535 case PM_HIBERNATION_PREPARE: 536 apm_event = (event == PM_SUSPEND_PREPARE) ? 537 APM_USER_SUSPEND : APM_USER_HIBERNATION; 538 /* 539 * Queue an event to all "writer" users that we want 540 * to suspend and need their ack. 541 */ 542 mutex_lock(&state_lock); 543 down_read(&user_list_lock); 544 545 list_for_each_entry(as, &apm_user_list, list) { 546 if (as->suspend_state != SUSPEND_WAIT && as->reader && 547 as->writer && as->suser) { 548 as->suspend_state = SUSPEND_PENDING; 549 atomic_inc(&suspend_acks_pending); 550 queue_add_event(&as->queue, apm_event); 551 } 552 } 553 554 up_read(&user_list_lock); 555 mutex_unlock(&state_lock); 556 wake_up_interruptible(&apm_waitqueue); 557 558 /* 559 * Wait for the the suspend_acks_pending variable to drop to 560 * zero, meaning everybody acked the suspend event (or the 561 * process was killed.) 562 * 563 * If the app won't answer within a short while we assume it 564 * locked up and ignore it. 565 */ 566 err = wait_event_interruptible_timeout( 567 apm_suspend_waitqueue, 568 atomic_read(&suspend_acks_pending) == 0, 569 5*HZ); 570 571 /* timed out */ 572 if (err == 0) { 573 /* 574 * Move anybody who timed out to "ack timeout" state. 575 * 576 * We could time out and the userspace does the ACK 577 * right after we time out but before we enter the 578 * locked section here, but that's fine. 579 */ 580 mutex_lock(&state_lock); 581 down_read(&user_list_lock); 582 list_for_each_entry(as, &apm_user_list, list) { 583 if (as->suspend_state == SUSPEND_PENDING || 584 as->suspend_state == SUSPEND_READ) { 585 as->suspend_state = SUSPEND_ACKTO; 586 atomic_dec(&suspend_acks_pending); 587 } 588 } 589 up_read(&user_list_lock); 590 mutex_unlock(&state_lock); 591 } 592 593 /* let suspend proceed */ 594 if (err >= 0) 595 return NOTIFY_OK; 596 597 /* interrupted by signal */ 598 return notifier_from_errno(err); 599 600 case PM_POST_SUSPEND: 601 case PM_POST_HIBERNATION: 602 apm_event = (event == PM_POST_SUSPEND) ? 603 APM_NORMAL_RESUME : APM_HIBERNATION_RESUME; 604 /* 605 * Anyone on the APM queues will think we're still suspended. 606 * Send a message so everyone knows we're now awake again. 607 */ 608 queue_event(apm_event); 609 610 /* 611 * Finally, wake up anyone who is sleeping on the suspend. 612 */ 613 mutex_lock(&state_lock); 614 down_read(&user_list_lock); 615 list_for_each_entry(as, &apm_user_list, list) { 616 if (as->suspend_state == SUSPEND_ACKED) { 617 /* 618 * TODO: maybe grab error code, needs core 619 * changes to push the error to the notifier 620 * chain (could use the second parameter if 621 * implemented) 622 */ 623 as->suspend_result = 0; 624 as->suspend_state = SUSPEND_DONE; 625 } 626 } 627 up_read(&user_list_lock); 628 mutex_unlock(&state_lock); 629 630 wake_up(&apm_suspend_waitqueue); 631 return NOTIFY_OK; 632 633 default: 634 return NOTIFY_DONE; 635 } 636 } 637 638 static struct notifier_block apm_notif_block = { 639 .notifier_call = apm_suspend_notifier, 640 }; 641 642 static int __init apm_init(void) 643 { 644 int ret; 645 646 if (apm_disabled) { 647 printk(KERN_NOTICE "apm: disabled on user request.\n"); 648 return -ENODEV; 649 } 650 651 kapmd_tsk = kthread_create(kapmd, NULL, "kapmd"); 652 if (IS_ERR(kapmd_tsk)) { 653 ret = PTR_ERR(kapmd_tsk); 654 kapmd_tsk = NULL; 655 goto out; 656 } 657 wake_up_process(kapmd_tsk); 658 659 #ifdef CONFIG_PROC_FS 660 proc_create("apm", 0, NULL, &apm_proc_fops); 661 #endif 662 663 ret = misc_register(&apm_device); 664 if (ret) 665 goto out_stop; 666 667 ret = register_pm_notifier(&apm_notif_block); 668 if (ret) 669 goto out_unregister; 670 671 return 0; 672 673 out_unregister: 674 misc_deregister(&apm_device); 675 out_stop: 676 remove_proc_entry("apm", NULL); 677 kthread_stop(kapmd_tsk); 678 out: 679 return ret; 680 } 681 682 static void __exit apm_exit(void) 683 { 684 unregister_pm_notifier(&apm_notif_block); 685 misc_deregister(&apm_device); 686 remove_proc_entry("apm", NULL); 687 688 kthread_stop(kapmd_tsk); 689 } 690 691 module_init(apm_init); 692 module_exit(apm_exit); 693 694 MODULE_AUTHOR("Stephen Rothwell"); 695 MODULE_DESCRIPTION("Advanced Power Management"); 696 MODULE_LICENSE("GPL"); 697 698 #ifndef MODULE 699 static int __init apm_setup(char *str) 700 { 701 while ((str != NULL) && (*str != '\0')) { 702 if (strncmp(str, "off", 3) == 0) 703 apm_disabled = 1; 704 if (strncmp(str, "on", 2) == 0) 705 apm_disabled = 0; 706 str = strchr(str, ','); 707 if (str != NULL) 708 str += strspn(str, ", \t"); 709 } 710 return 1; 711 } 712 713 __setup("apm=", apm_setup); 714 #endif 715 716 /** 717 * apm_queue_event - queue an APM event for kapmd 718 * @event: APM event 719 * 720 * Queue an APM event for kapmd to process and ultimately take the 721 * appropriate action. Only a subset of events are handled: 722 * %APM_LOW_BATTERY 723 * %APM_POWER_STATUS_CHANGE 724 * %APM_USER_SUSPEND 725 * %APM_SYS_SUSPEND 726 * %APM_CRITICAL_SUSPEND 727 */ 728 void apm_queue_event(apm_event_t event) 729 { 730 unsigned long flags; 731 732 spin_lock_irqsave(&kapmd_queue_lock, flags); 733 queue_add_event(&kapmd_queue, event); 734 spin_unlock_irqrestore(&kapmd_queue_lock, flags); 735 736 wake_up_interruptible(&kapmd_wait); 737 } 738 EXPORT_SYMBOL(apm_queue_event); 739