1 /* 2 * RTC subsystem, interface functions 3 * 4 * Copyright (C) 2005 Tower Technologies 5 * Author: Alessandro Zummo <a.zummo@towertech.it> 6 * 7 * based on arch/arm/common/rtctime.c 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/rtc.h> 15 #include <linux/sched.h> 16 #include <linux/module.h> 17 #include <linux/log2.h> 18 #include <linux/workqueue.h> 19 20 static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer); 21 static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer); 22 23 static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) 24 { 25 int err; 26 if (!rtc->ops) 27 err = -ENODEV; 28 else if (!rtc->ops->read_time) 29 err = -EINVAL; 30 else { 31 memset(tm, 0, sizeof(struct rtc_time)); 32 err = rtc->ops->read_time(rtc->dev.parent, tm); 33 if (err < 0) { 34 dev_dbg(&rtc->dev, "read_time: fail to read: %d\n", 35 err); 36 return err; 37 } 38 39 err = rtc_valid_tm(tm); 40 if (err < 0) 41 dev_dbg(&rtc->dev, "read_time: rtc_time isn't valid\n"); 42 } 43 return err; 44 } 45 46 int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) 47 { 48 int err; 49 50 err = mutex_lock_interruptible(&rtc->ops_lock); 51 if (err) 52 return err; 53 54 err = __rtc_read_time(rtc, tm); 55 mutex_unlock(&rtc->ops_lock); 56 return err; 57 } 58 EXPORT_SYMBOL_GPL(rtc_read_time); 59 60 int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) 61 { 62 int err; 63 64 err = rtc_valid_tm(tm); 65 if (err != 0) 66 return err; 67 68 err = mutex_lock_interruptible(&rtc->ops_lock); 69 if (err) 70 return err; 71 72 if (!rtc->ops) 73 err = -ENODEV; 74 else if (rtc->ops->set_time) 75 err = rtc->ops->set_time(rtc->dev.parent, tm); 76 else if (rtc->ops->set_mmss64) { 77 time64_t secs64 = rtc_tm_to_time64(tm); 78 79 err = rtc->ops->set_mmss64(rtc->dev.parent, secs64); 80 } else if (rtc->ops->set_mmss) { 81 time64_t secs64 = rtc_tm_to_time64(tm); 82 err = rtc->ops->set_mmss(rtc->dev.parent, secs64); 83 } else 84 err = -EINVAL; 85 86 pm_stay_awake(rtc->dev.parent); 87 mutex_unlock(&rtc->ops_lock); 88 /* A timer might have just expired */ 89 schedule_work(&rtc->irqwork); 90 return err; 91 } 92 EXPORT_SYMBOL_GPL(rtc_set_time); 93 94 static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 95 { 96 int err; 97 98 err = mutex_lock_interruptible(&rtc->ops_lock); 99 if (err) 100 return err; 101 102 if (rtc->ops == NULL) 103 err = -ENODEV; 104 else if (!rtc->ops->read_alarm) 105 err = -EINVAL; 106 else { 107 memset(alarm, 0, sizeof(struct rtc_wkalrm)); 108 err = rtc->ops->read_alarm(rtc->dev.parent, alarm); 109 } 110 111 mutex_unlock(&rtc->ops_lock); 112 return err; 113 } 114 115 int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 116 { 117 int err; 118 struct rtc_time before, now; 119 int first_time = 1; 120 time64_t t_now, t_alm; 121 enum { none, day, month, year } missing = none; 122 unsigned days; 123 124 /* The lower level RTC driver may return -1 in some fields, 125 * creating invalid alarm->time values, for reasons like: 126 * 127 * - The hardware may not be capable of filling them in; 128 * many alarms match only on time-of-day fields, not 129 * day/month/year calendar data. 130 * 131 * - Some hardware uses illegal values as "wildcard" match 132 * values, which non-Linux firmware (like a BIOS) may try 133 * to set up as e.g. "alarm 15 minutes after each hour". 134 * Linux uses only oneshot alarms. 135 * 136 * When we see that here, we deal with it by using values from 137 * a current RTC timestamp for any missing (-1) values. The 138 * RTC driver prevents "periodic alarm" modes. 139 * 140 * But this can be racey, because some fields of the RTC timestamp 141 * may have wrapped in the interval since we read the RTC alarm, 142 * which would lead to us inserting inconsistent values in place 143 * of the -1 fields. 144 * 145 * Reading the alarm and timestamp in the reverse sequence 146 * would have the same race condition, and not solve the issue. 147 * 148 * So, we must first read the RTC timestamp, 149 * then read the RTC alarm value, 150 * and then read a second RTC timestamp. 151 * 152 * If any fields of the second timestamp have changed 153 * when compared with the first timestamp, then we know 154 * our timestamp may be inconsistent with that used by 155 * the low-level rtc_read_alarm_internal() function. 156 * 157 * So, when the two timestamps disagree, we just loop and do 158 * the process again to get a fully consistent set of values. 159 * 160 * This could all instead be done in the lower level driver, 161 * but since more than one lower level RTC implementation needs it, 162 * then it's probably best best to do it here instead of there.. 163 */ 164 165 /* Get the "before" timestamp */ 166 err = rtc_read_time(rtc, &before); 167 if (err < 0) 168 return err; 169 do { 170 if (!first_time) 171 memcpy(&before, &now, sizeof(struct rtc_time)); 172 first_time = 0; 173 174 /* get the RTC alarm values, which may be incomplete */ 175 err = rtc_read_alarm_internal(rtc, alarm); 176 if (err) 177 return err; 178 179 /* full-function RTCs won't have such missing fields */ 180 if (rtc_valid_tm(&alarm->time) == 0) 181 return 0; 182 183 /* get the "after" timestamp, to detect wrapped fields */ 184 err = rtc_read_time(rtc, &now); 185 if (err < 0) 186 return err; 187 188 /* note that tm_sec is a "don't care" value here: */ 189 } while ( before.tm_min != now.tm_min 190 || before.tm_hour != now.tm_hour 191 || before.tm_mon != now.tm_mon 192 || before.tm_year != now.tm_year); 193 194 /* Fill in the missing alarm fields using the timestamp; we 195 * know there's at least one since alarm->time is invalid. 196 */ 197 if (alarm->time.tm_sec == -1) 198 alarm->time.tm_sec = now.tm_sec; 199 if (alarm->time.tm_min == -1) 200 alarm->time.tm_min = now.tm_min; 201 if (alarm->time.tm_hour == -1) 202 alarm->time.tm_hour = now.tm_hour; 203 204 /* For simplicity, only support date rollover for now */ 205 if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) { 206 alarm->time.tm_mday = now.tm_mday; 207 missing = day; 208 } 209 if ((unsigned)alarm->time.tm_mon >= 12) { 210 alarm->time.tm_mon = now.tm_mon; 211 if (missing == none) 212 missing = month; 213 } 214 if (alarm->time.tm_year == -1) { 215 alarm->time.tm_year = now.tm_year; 216 if (missing == none) 217 missing = year; 218 } 219 220 /* with luck, no rollover is needed */ 221 t_now = rtc_tm_to_time64(&now); 222 t_alm = rtc_tm_to_time64(&alarm->time); 223 if (t_now < t_alm) 224 goto done; 225 226 switch (missing) { 227 228 /* 24 hour rollover ... if it's now 10am Monday, an alarm that 229 * that will trigger at 5am will do so at 5am Tuesday, which 230 * could also be in the next month or year. This is a common 231 * case, especially for PCs. 232 */ 233 case day: 234 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); 235 t_alm += 24 * 60 * 60; 236 rtc_time64_to_tm(t_alm, &alarm->time); 237 break; 238 239 /* Month rollover ... if it's the 31th, an alarm on the 3rd will 240 * be next month. An alarm matching on the 30th, 29th, or 28th 241 * may end up in the month after that! Many newer PCs support 242 * this type of alarm. 243 */ 244 case month: 245 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month"); 246 do { 247 if (alarm->time.tm_mon < 11) 248 alarm->time.tm_mon++; 249 else { 250 alarm->time.tm_mon = 0; 251 alarm->time.tm_year++; 252 } 253 days = rtc_month_days(alarm->time.tm_mon, 254 alarm->time.tm_year); 255 } while (days < alarm->time.tm_mday); 256 break; 257 258 /* Year rollover ... easy except for leap years! */ 259 case year: 260 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year"); 261 do { 262 alarm->time.tm_year++; 263 } while (!is_leap_year(alarm->time.tm_year + 1900) 264 && rtc_valid_tm(&alarm->time) != 0); 265 break; 266 267 default: 268 dev_warn(&rtc->dev, "alarm rollover not handled\n"); 269 } 270 271 done: 272 err = rtc_valid_tm(&alarm->time); 273 274 if (err) { 275 dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n", 276 alarm->time.tm_year + 1900, alarm->time.tm_mon + 1, 277 alarm->time.tm_mday, alarm->time.tm_hour, alarm->time.tm_min, 278 alarm->time.tm_sec); 279 } 280 281 return err; 282 } 283 284 int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 285 { 286 int err; 287 288 err = mutex_lock_interruptible(&rtc->ops_lock); 289 if (err) 290 return err; 291 if (rtc->ops == NULL) 292 err = -ENODEV; 293 else if (!rtc->ops->read_alarm) 294 err = -EINVAL; 295 else { 296 memset(alarm, 0, sizeof(struct rtc_wkalrm)); 297 alarm->enabled = rtc->aie_timer.enabled; 298 alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires); 299 } 300 mutex_unlock(&rtc->ops_lock); 301 302 return err; 303 } 304 EXPORT_SYMBOL_GPL(rtc_read_alarm); 305 306 static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 307 { 308 struct rtc_time tm; 309 time64_t now, scheduled; 310 int err; 311 312 err = rtc_valid_tm(&alarm->time); 313 if (err) 314 return err; 315 scheduled = rtc_tm_to_time64(&alarm->time); 316 317 /* Make sure we're not setting alarms in the past */ 318 err = __rtc_read_time(rtc, &tm); 319 if (err) 320 return err; 321 now = rtc_tm_to_time64(&tm); 322 if (scheduled <= now) 323 return -ETIME; 324 /* 325 * XXX - We just checked to make sure the alarm time is not 326 * in the past, but there is still a race window where if 327 * the is alarm set for the next second and the second ticks 328 * over right here, before we set the alarm. 329 */ 330 331 if (!rtc->ops) 332 err = -ENODEV; 333 else if (!rtc->ops->set_alarm) 334 err = -EINVAL; 335 else 336 err = rtc->ops->set_alarm(rtc->dev.parent, alarm); 337 338 return err; 339 } 340 341 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 342 { 343 int err; 344 345 err = rtc_valid_tm(&alarm->time); 346 if (err != 0) 347 return err; 348 349 err = mutex_lock_interruptible(&rtc->ops_lock); 350 if (err) 351 return err; 352 if (rtc->aie_timer.enabled) 353 rtc_timer_remove(rtc, &rtc->aie_timer); 354 355 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); 356 rtc->aie_timer.period = ktime_set(0, 0); 357 if (alarm->enabled) 358 err = rtc_timer_enqueue(rtc, &rtc->aie_timer); 359 360 mutex_unlock(&rtc->ops_lock); 361 return err; 362 } 363 EXPORT_SYMBOL_GPL(rtc_set_alarm); 364 365 /* Called once per device from rtc_device_register */ 366 int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 367 { 368 int err; 369 struct rtc_time now; 370 371 err = rtc_valid_tm(&alarm->time); 372 if (err != 0) 373 return err; 374 375 err = rtc_read_time(rtc, &now); 376 if (err) 377 return err; 378 379 err = mutex_lock_interruptible(&rtc->ops_lock); 380 if (err) 381 return err; 382 383 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); 384 rtc->aie_timer.period = ktime_set(0, 0); 385 386 /* Alarm has to be enabled & in the futrure for us to enqueue it */ 387 if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 < 388 rtc->aie_timer.node.expires.tv64)) { 389 390 rtc->aie_timer.enabled = 1; 391 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); 392 } 393 mutex_unlock(&rtc->ops_lock); 394 return err; 395 } 396 EXPORT_SYMBOL_GPL(rtc_initialize_alarm); 397 398 399 400 int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) 401 { 402 int err = mutex_lock_interruptible(&rtc->ops_lock); 403 if (err) 404 return err; 405 406 if (rtc->aie_timer.enabled != enabled) { 407 if (enabled) 408 err = rtc_timer_enqueue(rtc, &rtc->aie_timer); 409 else 410 rtc_timer_remove(rtc, &rtc->aie_timer); 411 } 412 413 if (err) 414 /* nothing */; 415 else if (!rtc->ops) 416 err = -ENODEV; 417 else if (!rtc->ops->alarm_irq_enable) 418 err = -EINVAL; 419 else 420 err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled); 421 422 mutex_unlock(&rtc->ops_lock); 423 return err; 424 } 425 EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable); 426 427 int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) 428 { 429 int err = mutex_lock_interruptible(&rtc->ops_lock); 430 if (err) 431 return err; 432 433 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 434 if (enabled == 0 && rtc->uie_irq_active) { 435 mutex_unlock(&rtc->ops_lock); 436 return rtc_dev_update_irq_enable_emul(rtc, 0); 437 } 438 #endif 439 /* make sure we're changing state */ 440 if (rtc->uie_rtctimer.enabled == enabled) 441 goto out; 442 443 if (rtc->uie_unsupported) { 444 err = -EINVAL; 445 goto out; 446 } 447 448 if (enabled) { 449 struct rtc_time tm; 450 ktime_t now, onesec; 451 452 __rtc_read_time(rtc, &tm); 453 onesec = ktime_set(1, 0); 454 now = rtc_tm_to_ktime(tm); 455 rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); 456 rtc->uie_rtctimer.period = ktime_set(1, 0); 457 err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); 458 } else 459 rtc_timer_remove(rtc, &rtc->uie_rtctimer); 460 461 out: 462 mutex_unlock(&rtc->ops_lock); 463 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 464 /* 465 * Enable emulation if the driver did not provide 466 * the update_irq_enable function pointer or if returned 467 * -EINVAL to signal that it has been configured without 468 * interrupts or that are not available at the moment. 469 */ 470 if (err == -EINVAL) 471 err = rtc_dev_update_irq_enable_emul(rtc, enabled); 472 #endif 473 return err; 474 475 } 476 EXPORT_SYMBOL_GPL(rtc_update_irq_enable); 477 478 479 /** 480 * rtc_handle_legacy_irq - AIE, UIE and PIE event hook 481 * @rtc: pointer to the rtc device 482 * 483 * This function is called when an AIE, UIE or PIE mode interrupt 484 * has occurred (or been emulated). 485 * 486 * Triggers the registered irq_task function callback. 487 */ 488 void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) 489 { 490 unsigned long flags; 491 492 /* mark one irq of the appropriate mode */ 493 spin_lock_irqsave(&rtc->irq_lock, flags); 494 rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode); 495 spin_unlock_irqrestore(&rtc->irq_lock, flags); 496 497 /* call the task func */ 498 spin_lock_irqsave(&rtc->irq_task_lock, flags); 499 if (rtc->irq_task) 500 rtc->irq_task->func(rtc->irq_task->private_data); 501 spin_unlock_irqrestore(&rtc->irq_task_lock, flags); 502 503 wake_up_interruptible(&rtc->irq_queue); 504 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); 505 } 506 507 508 /** 509 * rtc_aie_update_irq - AIE mode rtctimer hook 510 * @private: pointer to the rtc_device 511 * 512 * This functions is called when the aie_timer expires. 513 */ 514 void rtc_aie_update_irq(void *private) 515 { 516 struct rtc_device *rtc = (struct rtc_device *)private; 517 rtc_handle_legacy_irq(rtc, 1, RTC_AF); 518 } 519 520 521 /** 522 * rtc_uie_update_irq - UIE mode rtctimer hook 523 * @private: pointer to the rtc_device 524 * 525 * This functions is called when the uie_timer expires. 526 */ 527 void rtc_uie_update_irq(void *private) 528 { 529 struct rtc_device *rtc = (struct rtc_device *)private; 530 rtc_handle_legacy_irq(rtc, 1, RTC_UF); 531 } 532 533 534 /** 535 * rtc_pie_update_irq - PIE mode hrtimer hook 536 * @timer: pointer to the pie mode hrtimer 537 * 538 * This function is used to emulate PIE mode interrupts 539 * using an hrtimer. This function is called when the periodic 540 * hrtimer expires. 541 */ 542 enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) 543 { 544 struct rtc_device *rtc; 545 ktime_t period; 546 int count; 547 rtc = container_of(timer, struct rtc_device, pie_timer); 548 549 period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); 550 count = hrtimer_forward_now(timer, period); 551 552 rtc_handle_legacy_irq(rtc, count, RTC_PF); 553 554 return HRTIMER_RESTART; 555 } 556 557 /** 558 * rtc_update_irq - Triggered when a RTC interrupt occurs. 559 * @rtc: the rtc device 560 * @num: how many irqs are being reported (usually one) 561 * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF 562 * Context: any 563 */ 564 void rtc_update_irq(struct rtc_device *rtc, 565 unsigned long num, unsigned long events) 566 { 567 if (unlikely(IS_ERR_OR_NULL(rtc))) 568 return; 569 570 pm_stay_awake(rtc->dev.parent); 571 schedule_work(&rtc->irqwork); 572 } 573 EXPORT_SYMBOL_GPL(rtc_update_irq); 574 575 static int __rtc_match(struct device *dev, const void *data) 576 { 577 const char *name = data; 578 579 if (strcmp(dev_name(dev), name) == 0) 580 return 1; 581 return 0; 582 } 583 584 struct rtc_device *rtc_class_open(const char *name) 585 { 586 struct device *dev; 587 struct rtc_device *rtc = NULL; 588 589 dev = class_find_device(rtc_class, NULL, name, __rtc_match); 590 if (dev) 591 rtc = to_rtc_device(dev); 592 593 if (rtc) { 594 if (!try_module_get(rtc->owner)) { 595 put_device(dev); 596 rtc = NULL; 597 } 598 } 599 600 return rtc; 601 } 602 EXPORT_SYMBOL_GPL(rtc_class_open); 603 604 void rtc_class_close(struct rtc_device *rtc) 605 { 606 module_put(rtc->owner); 607 put_device(&rtc->dev); 608 } 609 EXPORT_SYMBOL_GPL(rtc_class_close); 610 611 int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task) 612 { 613 int retval = -EBUSY; 614 615 if (task == NULL || task->func == NULL) 616 return -EINVAL; 617 618 /* Cannot register while the char dev is in use */ 619 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) 620 return -EBUSY; 621 622 spin_lock_irq(&rtc->irq_task_lock); 623 if (rtc->irq_task == NULL) { 624 rtc->irq_task = task; 625 retval = 0; 626 } 627 spin_unlock_irq(&rtc->irq_task_lock); 628 629 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); 630 631 return retval; 632 } 633 EXPORT_SYMBOL_GPL(rtc_irq_register); 634 635 void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task) 636 { 637 spin_lock_irq(&rtc->irq_task_lock); 638 if (rtc->irq_task == task) 639 rtc->irq_task = NULL; 640 spin_unlock_irq(&rtc->irq_task_lock); 641 } 642 EXPORT_SYMBOL_GPL(rtc_irq_unregister); 643 644 static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) 645 { 646 /* 647 * We always cancel the timer here first, because otherwise 648 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); 649 * when we manage to start the timer before the callback 650 * returns HRTIMER_RESTART. 651 * 652 * We cannot use hrtimer_cancel() here as a running callback 653 * could be blocked on rtc->irq_task_lock and hrtimer_cancel() 654 * would spin forever. 655 */ 656 if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0) 657 return -1; 658 659 if (enabled) { 660 ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq); 661 662 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); 663 } 664 return 0; 665 } 666 667 /** 668 * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs 669 * @rtc: the rtc device 670 * @task: currently registered with rtc_irq_register() 671 * @enabled: true to enable periodic IRQs 672 * Context: any 673 * 674 * Note that rtc_irq_set_freq() should previously have been used to 675 * specify the desired frequency of periodic IRQ task->func() callbacks. 676 */ 677 int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled) 678 { 679 int err = 0; 680 unsigned long flags; 681 682 retry: 683 spin_lock_irqsave(&rtc->irq_task_lock, flags); 684 if (rtc->irq_task != NULL && task == NULL) 685 err = -EBUSY; 686 else if (rtc->irq_task != task) 687 err = -EACCES; 688 else { 689 if (rtc_update_hrtimer(rtc, enabled) < 0) { 690 spin_unlock_irqrestore(&rtc->irq_task_lock, flags); 691 cpu_relax(); 692 goto retry; 693 } 694 rtc->pie_enabled = enabled; 695 } 696 spin_unlock_irqrestore(&rtc->irq_task_lock, flags); 697 return err; 698 } 699 EXPORT_SYMBOL_GPL(rtc_irq_set_state); 700 701 /** 702 * rtc_irq_set_freq - set 2^N Hz periodic IRQ frequency for IRQ 703 * @rtc: the rtc device 704 * @task: currently registered with rtc_irq_register() 705 * @freq: positive frequency with which task->func() will be called 706 * Context: any 707 * 708 * Note that rtc_irq_set_state() is used to enable or disable the 709 * periodic IRQs. 710 */ 711 int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) 712 { 713 int err = 0; 714 unsigned long flags; 715 716 if (freq <= 0 || freq > RTC_MAX_FREQ) 717 return -EINVAL; 718 retry: 719 spin_lock_irqsave(&rtc->irq_task_lock, flags); 720 if (rtc->irq_task != NULL && task == NULL) 721 err = -EBUSY; 722 else if (rtc->irq_task != task) 723 err = -EACCES; 724 else { 725 rtc->irq_freq = freq; 726 if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) { 727 spin_unlock_irqrestore(&rtc->irq_task_lock, flags); 728 cpu_relax(); 729 goto retry; 730 } 731 } 732 spin_unlock_irqrestore(&rtc->irq_task_lock, flags); 733 return err; 734 } 735 EXPORT_SYMBOL_GPL(rtc_irq_set_freq); 736 737 /** 738 * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue 739 * @rtc rtc device 740 * @timer timer being added. 741 * 742 * Enqueues a timer onto the rtc devices timerqueue and sets 743 * the next alarm event appropriately. 744 * 745 * Sets the enabled bit on the added timer. 746 * 747 * Must hold ops_lock for proper serialization of timerqueue 748 */ 749 static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) 750 { 751 timer->enabled = 1; 752 timerqueue_add(&rtc->timerqueue, &timer->node); 753 if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) { 754 struct rtc_wkalrm alarm; 755 int err; 756 alarm.time = rtc_ktime_to_tm(timer->node.expires); 757 alarm.enabled = 1; 758 err = __rtc_set_alarm(rtc, &alarm); 759 if (err == -ETIME) { 760 pm_stay_awake(rtc->dev.parent); 761 schedule_work(&rtc->irqwork); 762 } else if (err) { 763 timerqueue_del(&rtc->timerqueue, &timer->node); 764 timer->enabled = 0; 765 return err; 766 } 767 } 768 return 0; 769 } 770 771 static void rtc_alarm_disable(struct rtc_device *rtc) 772 { 773 if (!rtc->ops || !rtc->ops->alarm_irq_enable) 774 return; 775 776 rtc->ops->alarm_irq_enable(rtc->dev.parent, false); 777 } 778 779 /** 780 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue 781 * @rtc rtc device 782 * @timer timer being removed. 783 * 784 * Removes a timer onto the rtc devices timerqueue and sets 785 * the next alarm event appropriately. 786 * 787 * Clears the enabled bit on the removed timer. 788 * 789 * Must hold ops_lock for proper serialization of timerqueue 790 */ 791 static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) 792 { 793 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); 794 timerqueue_del(&rtc->timerqueue, &timer->node); 795 timer->enabled = 0; 796 if (next == &timer->node) { 797 struct rtc_wkalrm alarm; 798 int err; 799 next = timerqueue_getnext(&rtc->timerqueue); 800 if (!next) { 801 rtc_alarm_disable(rtc); 802 return; 803 } 804 alarm.time = rtc_ktime_to_tm(next->expires); 805 alarm.enabled = 1; 806 err = __rtc_set_alarm(rtc, &alarm); 807 if (err == -ETIME) { 808 pm_stay_awake(rtc->dev.parent); 809 schedule_work(&rtc->irqwork); 810 } 811 } 812 } 813 814 /** 815 * rtc_timer_do_work - Expires rtc timers 816 * @rtc rtc device 817 * @timer timer being removed. 818 * 819 * Expires rtc timers. Reprograms next alarm event if needed. 820 * Called via worktask. 821 * 822 * Serializes access to timerqueue via ops_lock mutex 823 */ 824 void rtc_timer_do_work(struct work_struct *work) 825 { 826 struct rtc_timer *timer; 827 struct timerqueue_node *next; 828 ktime_t now; 829 struct rtc_time tm; 830 831 struct rtc_device *rtc = 832 container_of(work, struct rtc_device, irqwork); 833 834 mutex_lock(&rtc->ops_lock); 835 again: 836 __rtc_read_time(rtc, &tm); 837 now = rtc_tm_to_ktime(tm); 838 while ((next = timerqueue_getnext(&rtc->timerqueue))) { 839 if (next->expires.tv64 > now.tv64) 840 break; 841 842 /* expire timer */ 843 timer = container_of(next, struct rtc_timer, node); 844 timerqueue_del(&rtc->timerqueue, &timer->node); 845 timer->enabled = 0; 846 if (timer->task.func) 847 timer->task.func(timer->task.private_data); 848 849 /* Re-add/fwd periodic timers */ 850 if (ktime_to_ns(timer->period)) { 851 timer->node.expires = ktime_add(timer->node.expires, 852 timer->period); 853 timer->enabled = 1; 854 timerqueue_add(&rtc->timerqueue, &timer->node); 855 } 856 } 857 858 /* Set next alarm */ 859 if (next) { 860 struct rtc_wkalrm alarm; 861 int err; 862 int retry = 3; 863 864 alarm.time = rtc_ktime_to_tm(next->expires); 865 alarm.enabled = 1; 866 reprogram: 867 err = __rtc_set_alarm(rtc, &alarm); 868 if (err == -ETIME) 869 goto again; 870 else if (err) { 871 if (retry-- > 0) 872 goto reprogram; 873 874 timer = container_of(next, struct rtc_timer, node); 875 timerqueue_del(&rtc->timerqueue, &timer->node); 876 timer->enabled = 0; 877 dev_err(&rtc->dev, "__rtc_set_alarm: err=%d\n", err); 878 goto again; 879 } 880 } else 881 rtc_alarm_disable(rtc); 882 883 pm_relax(rtc->dev.parent); 884 mutex_unlock(&rtc->ops_lock); 885 } 886 887 888 /* rtc_timer_init - Initializes an rtc_timer 889 * @timer: timer to be intiialized 890 * @f: function pointer to be called when timer fires 891 * @data: private data passed to function pointer 892 * 893 * Kernel interface to initializing an rtc_timer. 894 */ 895 void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data) 896 { 897 timerqueue_init(&timer->node); 898 timer->enabled = 0; 899 timer->task.func = f; 900 timer->task.private_data = data; 901 } 902 903 /* rtc_timer_start - Sets an rtc_timer to fire in the future 904 * @ rtc: rtc device to be used 905 * @ timer: timer being set 906 * @ expires: time at which to expire the timer 907 * @ period: period that the timer will recur 908 * 909 * Kernel interface to set an rtc_timer 910 */ 911 int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, 912 ktime_t expires, ktime_t period) 913 { 914 int ret = 0; 915 mutex_lock(&rtc->ops_lock); 916 if (timer->enabled) 917 rtc_timer_remove(rtc, timer); 918 919 timer->node.expires = expires; 920 timer->period = period; 921 922 ret = rtc_timer_enqueue(rtc, timer); 923 924 mutex_unlock(&rtc->ops_lock); 925 return ret; 926 } 927 928 /* rtc_timer_cancel - Stops an rtc_timer 929 * @ rtc: rtc device to be used 930 * @ timer: timer being set 931 * 932 * Kernel interface to cancel an rtc_timer 933 */ 934 void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer) 935 { 936 mutex_lock(&rtc->ops_lock); 937 if (timer->enabled) 938 rtc_timer_remove(rtc, timer); 939 mutex_unlock(&rtc->ops_lock); 940 } 941 942 943