1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * RTC subsystem, interface functions 4 * 5 * Copyright (C) 2005 Tower Technologies 6 * Author: Alessandro Zummo <a.zummo@towertech.it> 7 * 8 * based on arch/arm/common/rtctime.c 9 */ 10 11 #include <linux/rtc.h> 12 #include <linux/sched.h> 13 #include <linux/module.h> 14 #include <linux/log2.h> 15 #include <linux/workqueue.h> 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/rtc.h> 19 20 static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer); 21 static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer); 22 23 static void rtc_add_offset(struct rtc_device *rtc, struct rtc_time *tm) 24 { 25 time64_t secs; 26 27 if (!rtc->offset_secs) 28 return; 29 30 secs = rtc_tm_to_time64(tm); 31 32 /* 33 * Since the reading time values from RTC device are always in the RTC 34 * original valid range, but we need to skip the overlapped region 35 * between expanded range and original range, which is no need to add 36 * the offset. 37 */ 38 if ((rtc->start_secs > rtc->range_min && secs >= rtc->start_secs) || 39 (rtc->start_secs < rtc->range_min && 40 secs <= (rtc->start_secs + rtc->range_max - rtc->range_min))) 41 return; 42 43 rtc_time64_to_tm(secs + rtc->offset_secs, tm); 44 } 45 46 static void rtc_subtract_offset(struct rtc_device *rtc, struct rtc_time *tm) 47 { 48 time64_t secs; 49 50 if (!rtc->offset_secs) 51 return; 52 53 secs = rtc_tm_to_time64(tm); 54 55 /* 56 * If the setting time values are in the valid range of RTC hardware 57 * device, then no need to subtract the offset when setting time to RTC 58 * device. Otherwise we need to subtract the offset to make the time 59 * values are valid for RTC hardware device. 60 */ 61 if (secs >= rtc->range_min && secs <= rtc->range_max) 62 return; 63 64 rtc_time64_to_tm(secs - rtc->offset_secs, tm); 65 } 66 67 static int rtc_valid_range(struct rtc_device *rtc, struct rtc_time *tm) 68 { 69 if (rtc->range_min != rtc->range_max) { 70 time64_t time = rtc_tm_to_time64(tm); 71 time64_t range_min = rtc->set_start_time ? rtc->start_secs : 72 rtc->range_min; 73 time64_t range_max = rtc->set_start_time ? 74 (rtc->start_secs + rtc->range_max - rtc->range_min) : 75 rtc->range_max; 76 77 if (time < range_min || time > range_max) 78 return -ERANGE; 79 } 80 81 return 0; 82 } 83 84 static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) 85 { 86 int err; 87 88 if (!rtc->ops) { 89 err = -ENODEV; 90 } else if (!rtc->ops->read_time) { 91 err = -EINVAL; 92 } else { 93 memset(tm, 0, sizeof(struct rtc_time)); 94 err = rtc->ops->read_time(rtc->dev.parent, tm); 95 if (err < 0) { 96 dev_dbg(&rtc->dev, "read_time: fail to read: %d\n", 97 err); 98 return err; 99 } 100 101 rtc_add_offset(rtc, tm); 102 103 err = rtc_valid_tm(tm); 104 if (err < 0) 105 dev_dbg(&rtc->dev, "read_time: rtc_time isn't valid\n"); 106 } 107 return err; 108 } 109 110 int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) 111 { 112 int err; 113 114 err = mutex_lock_interruptible(&rtc->ops_lock); 115 if (err) 116 return err; 117 118 err = __rtc_read_time(rtc, tm); 119 mutex_unlock(&rtc->ops_lock); 120 121 trace_rtc_read_time(rtc_tm_to_time64(tm), err); 122 return err; 123 } 124 EXPORT_SYMBOL_GPL(rtc_read_time); 125 126 int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) 127 { 128 int err; 129 130 err = rtc_valid_tm(tm); 131 if (err != 0) 132 return err; 133 134 err = rtc_valid_range(rtc, tm); 135 if (err) 136 return err; 137 138 rtc_subtract_offset(rtc, tm); 139 140 err = mutex_lock_interruptible(&rtc->ops_lock); 141 if (err) 142 return err; 143 144 if (!rtc->ops) 145 err = -ENODEV; 146 else if (rtc->ops->set_time) 147 err = rtc->ops->set_time(rtc->dev.parent, tm); 148 else 149 err = -EINVAL; 150 151 pm_stay_awake(rtc->dev.parent); 152 mutex_unlock(&rtc->ops_lock); 153 /* A timer might have just expired */ 154 schedule_work(&rtc->irqwork); 155 156 trace_rtc_set_time(rtc_tm_to_time64(tm), err); 157 return err; 158 } 159 EXPORT_SYMBOL_GPL(rtc_set_time); 160 161 static int rtc_read_alarm_internal(struct rtc_device *rtc, 162 struct rtc_wkalrm *alarm) 163 { 164 int err; 165 166 err = mutex_lock_interruptible(&rtc->ops_lock); 167 if (err) 168 return err; 169 170 if (!rtc->ops) { 171 err = -ENODEV; 172 } else if (!rtc->ops->read_alarm) { 173 err = -EINVAL; 174 } else { 175 alarm->enabled = 0; 176 alarm->pending = 0; 177 alarm->time.tm_sec = -1; 178 alarm->time.tm_min = -1; 179 alarm->time.tm_hour = -1; 180 alarm->time.tm_mday = -1; 181 alarm->time.tm_mon = -1; 182 alarm->time.tm_year = -1; 183 alarm->time.tm_wday = -1; 184 alarm->time.tm_yday = -1; 185 alarm->time.tm_isdst = -1; 186 err = rtc->ops->read_alarm(rtc->dev.parent, alarm); 187 } 188 189 mutex_unlock(&rtc->ops_lock); 190 191 trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err); 192 return err; 193 } 194 195 int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 196 { 197 int err; 198 struct rtc_time before, now; 199 int first_time = 1; 200 time64_t t_now, t_alm; 201 enum { none, day, month, year } missing = none; 202 unsigned int days; 203 204 /* The lower level RTC driver may return -1 in some fields, 205 * creating invalid alarm->time values, for reasons like: 206 * 207 * - The hardware may not be capable of filling them in; 208 * many alarms match only on time-of-day fields, not 209 * day/month/year calendar data. 210 * 211 * - Some hardware uses illegal values as "wildcard" match 212 * values, which non-Linux firmware (like a BIOS) may try 213 * to set up as e.g. "alarm 15 minutes after each hour". 214 * Linux uses only oneshot alarms. 215 * 216 * When we see that here, we deal with it by using values from 217 * a current RTC timestamp for any missing (-1) values. The 218 * RTC driver prevents "periodic alarm" modes. 219 * 220 * But this can be racey, because some fields of the RTC timestamp 221 * may have wrapped in the interval since we read the RTC alarm, 222 * which would lead to us inserting inconsistent values in place 223 * of the -1 fields. 224 * 225 * Reading the alarm and timestamp in the reverse sequence 226 * would have the same race condition, and not solve the issue. 227 * 228 * So, we must first read the RTC timestamp, 229 * then read the RTC alarm value, 230 * and then read a second RTC timestamp. 231 * 232 * If any fields of the second timestamp have changed 233 * when compared with the first timestamp, then we know 234 * our timestamp may be inconsistent with that used by 235 * the low-level rtc_read_alarm_internal() function. 236 * 237 * So, when the two timestamps disagree, we just loop and do 238 * the process again to get a fully consistent set of values. 239 * 240 * This could all instead be done in the lower level driver, 241 * but since more than one lower level RTC implementation needs it, 242 * then it's probably best best to do it here instead of there.. 243 */ 244 245 /* Get the "before" timestamp */ 246 err = rtc_read_time(rtc, &before); 247 if (err < 0) 248 return err; 249 do { 250 if (!first_time) 251 memcpy(&before, &now, sizeof(struct rtc_time)); 252 first_time = 0; 253 254 /* get the RTC alarm values, which may be incomplete */ 255 err = rtc_read_alarm_internal(rtc, alarm); 256 if (err) 257 return err; 258 259 /* full-function RTCs won't have such missing fields */ 260 if (rtc_valid_tm(&alarm->time) == 0) { 261 rtc_add_offset(rtc, &alarm->time); 262 return 0; 263 } 264 265 /* get the "after" timestamp, to detect wrapped fields */ 266 err = rtc_read_time(rtc, &now); 267 if (err < 0) 268 return err; 269 270 /* note that tm_sec is a "don't care" value here: */ 271 } while (before.tm_min != now.tm_min || 272 before.tm_hour != now.tm_hour || 273 before.tm_mon != now.tm_mon || 274 before.tm_year != now.tm_year); 275 276 /* Fill in the missing alarm fields using the timestamp; we 277 * know there's at least one since alarm->time is invalid. 278 */ 279 if (alarm->time.tm_sec == -1) 280 alarm->time.tm_sec = now.tm_sec; 281 if (alarm->time.tm_min == -1) 282 alarm->time.tm_min = now.tm_min; 283 if (alarm->time.tm_hour == -1) 284 alarm->time.tm_hour = now.tm_hour; 285 286 /* For simplicity, only support date rollover for now */ 287 if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) { 288 alarm->time.tm_mday = now.tm_mday; 289 missing = day; 290 } 291 if ((unsigned int)alarm->time.tm_mon >= 12) { 292 alarm->time.tm_mon = now.tm_mon; 293 if (missing == none) 294 missing = month; 295 } 296 if (alarm->time.tm_year == -1) { 297 alarm->time.tm_year = now.tm_year; 298 if (missing == none) 299 missing = year; 300 } 301 302 /* Can't proceed if alarm is still invalid after replacing 303 * missing fields. 304 */ 305 err = rtc_valid_tm(&alarm->time); 306 if (err) 307 goto done; 308 309 /* with luck, no rollover is needed */ 310 t_now = rtc_tm_to_time64(&now); 311 t_alm = rtc_tm_to_time64(&alarm->time); 312 if (t_now < t_alm) 313 goto done; 314 315 switch (missing) { 316 /* 24 hour rollover ... if it's now 10am Monday, an alarm that 317 * that will trigger at 5am will do so at 5am Tuesday, which 318 * could also be in the next month or year. This is a common 319 * case, especially for PCs. 320 */ 321 case day: 322 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); 323 t_alm += 24 * 60 * 60; 324 rtc_time64_to_tm(t_alm, &alarm->time); 325 break; 326 327 /* Month rollover ... if it's the 31th, an alarm on the 3rd will 328 * be next month. An alarm matching on the 30th, 29th, or 28th 329 * may end up in the month after that! Many newer PCs support 330 * this type of alarm. 331 */ 332 case month: 333 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month"); 334 do { 335 if (alarm->time.tm_mon < 11) { 336 alarm->time.tm_mon++; 337 } else { 338 alarm->time.tm_mon = 0; 339 alarm->time.tm_year++; 340 } 341 days = rtc_month_days(alarm->time.tm_mon, 342 alarm->time.tm_year); 343 } while (days < alarm->time.tm_mday); 344 break; 345 346 /* Year rollover ... easy except for leap years! */ 347 case year: 348 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year"); 349 do { 350 alarm->time.tm_year++; 351 } while (!is_leap_year(alarm->time.tm_year + 1900) && 352 rtc_valid_tm(&alarm->time) != 0); 353 break; 354 355 default: 356 dev_warn(&rtc->dev, "alarm rollover not handled\n"); 357 } 358 359 err = rtc_valid_tm(&alarm->time); 360 361 done: 362 if (err) 363 dev_warn(&rtc->dev, "invalid alarm value: %ptR\n", 364 &alarm->time); 365 366 return err; 367 } 368 369 int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 370 { 371 int err; 372 373 err = mutex_lock_interruptible(&rtc->ops_lock); 374 if (err) 375 return err; 376 if (!rtc->ops) { 377 err = -ENODEV; 378 } else if (!rtc->ops->read_alarm) { 379 err = -EINVAL; 380 } else { 381 memset(alarm, 0, sizeof(struct rtc_wkalrm)); 382 alarm->enabled = rtc->aie_timer.enabled; 383 alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires); 384 } 385 mutex_unlock(&rtc->ops_lock); 386 387 trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err); 388 return err; 389 } 390 EXPORT_SYMBOL_GPL(rtc_read_alarm); 391 392 static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 393 { 394 struct rtc_time tm; 395 time64_t now, scheduled; 396 int err; 397 398 err = rtc_valid_tm(&alarm->time); 399 if (err) 400 return err; 401 402 scheduled = rtc_tm_to_time64(&alarm->time); 403 404 /* Make sure we're not setting alarms in the past */ 405 err = __rtc_read_time(rtc, &tm); 406 if (err) 407 return err; 408 now = rtc_tm_to_time64(&tm); 409 if (scheduled <= now) 410 return -ETIME; 411 /* 412 * XXX - We just checked to make sure the alarm time is not 413 * in the past, but there is still a race window where if 414 * the is alarm set for the next second and the second ticks 415 * over right here, before we set the alarm. 416 */ 417 418 rtc_subtract_offset(rtc, &alarm->time); 419 420 if (!rtc->ops) 421 err = -ENODEV; 422 else if (!rtc->ops->set_alarm) 423 err = -EINVAL; 424 else 425 err = rtc->ops->set_alarm(rtc->dev.parent, alarm); 426 427 trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err); 428 return err; 429 } 430 431 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 432 { 433 int err; 434 435 if (!rtc->ops) 436 return -ENODEV; 437 else if (!rtc->ops->set_alarm) 438 return -EINVAL; 439 440 err = rtc_valid_tm(&alarm->time); 441 if (err != 0) 442 return err; 443 444 err = rtc_valid_range(rtc, &alarm->time); 445 if (err) 446 return err; 447 448 err = mutex_lock_interruptible(&rtc->ops_lock); 449 if (err) 450 return err; 451 if (rtc->aie_timer.enabled) 452 rtc_timer_remove(rtc, &rtc->aie_timer); 453 454 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); 455 rtc->aie_timer.period = 0; 456 if (alarm->enabled) 457 err = rtc_timer_enqueue(rtc, &rtc->aie_timer); 458 459 mutex_unlock(&rtc->ops_lock); 460 461 return err; 462 } 463 EXPORT_SYMBOL_GPL(rtc_set_alarm); 464 465 /* Called once per device from rtc_device_register */ 466 int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 467 { 468 int err; 469 struct rtc_time now; 470 471 err = rtc_valid_tm(&alarm->time); 472 if (err != 0) 473 return err; 474 475 err = rtc_read_time(rtc, &now); 476 if (err) 477 return err; 478 479 err = mutex_lock_interruptible(&rtc->ops_lock); 480 if (err) 481 return err; 482 483 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); 484 rtc->aie_timer.period = 0; 485 486 /* Alarm has to be enabled & in the future for us to enqueue it */ 487 if (alarm->enabled && (rtc_tm_to_ktime(now) < 488 rtc->aie_timer.node.expires)) { 489 rtc->aie_timer.enabled = 1; 490 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); 491 trace_rtc_timer_enqueue(&rtc->aie_timer); 492 } 493 mutex_unlock(&rtc->ops_lock); 494 return err; 495 } 496 EXPORT_SYMBOL_GPL(rtc_initialize_alarm); 497 498 int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) 499 { 500 int err; 501 502 err = mutex_lock_interruptible(&rtc->ops_lock); 503 if (err) 504 return err; 505 506 if (rtc->aie_timer.enabled != enabled) { 507 if (enabled) 508 err = rtc_timer_enqueue(rtc, &rtc->aie_timer); 509 else 510 rtc_timer_remove(rtc, &rtc->aie_timer); 511 } 512 513 if (err) 514 /* nothing */; 515 else if (!rtc->ops) 516 err = -ENODEV; 517 else if (!rtc->ops->alarm_irq_enable) 518 err = -EINVAL; 519 else 520 err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled); 521 522 mutex_unlock(&rtc->ops_lock); 523 524 trace_rtc_alarm_irq_enable(enabled, err); 525 return err; 526 } 527 EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable); 528 529 int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) 530 { 531 int err; 532 533 err = mutex_lock_interruptible(&rtc->ops_lock); 534 if (err) 535 return err; 536 537 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 538 if (enabled == 0 && rtc->uie_irq_active) { 539 mutex_unlock(&rtc->ops_lock); 540 return rtc_dev_update_irq_enable_emul(rtc, 0); 541 } 542 #endif 543 /* make sure we're changing state */ 544 if (rtc->uie_rtctimer.enabled == enabled) 545 goto out; 546 547 if (rtc->uie_unsupported) { 548 err = -EINVAL; 549 goto out; 550 } 551 552 if (enabled) { 553 struct rtc_time tm; 554 ktime_t now, onesec; 555 556 __rtc_read_time(rtc, &tm); 557 onesec = ktime_set(1, 0); 558 now = rtc_tm_to_ktime(tm); 559 rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); 560 rtc->uie_rtctimer.period = ktime_set(1, 0); 561 err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); 562 } else { 563 rtc_timer_remove(rtc, &rtc->uie_rtctimer); 564 } 565 566 out: 567 mutex_unlock(&rtc->ops_lock); 568 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 569 /* 570 * Enable emulation if the driver returned -EINVAL to signal that it has 571 * been configured without interrupts or they are not available at the 572 * moment. 573 */ 574 if (err == -EINVAL) 575 err = rtc_dev_update_irq_enable_emul(rtc, enabled); 576 #endif 577 return err; 578 } 579 EXPORT_SYMBOL_GPL(rtc_update_irq_enable); 580 581 /** 582 * rtc_handle_legacy_irq - AIE, UIE and PIE event hook 583 * @rtc: pointer to the rtc device 584 * 585 * This function is called when an AIE, UIE or PIE mode interrupt 586 * has occurred (or been emulated). 587 * 588 */ 589 void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) 590 { 591 unsigned long flags; 592 593 /* mark one irq of the appropriate mode */ 594 spin_lock_irqsave(&rtc->irq_lock, flags); 595 rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF | mode); 596 spin_unlock_irqrestore(&rtc->irq_lock, flags); 597 598 wake_up_interruptible(&rtc->irq_queue); 599 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); 600 } 601 602 /** 603 * rtc_aie_update_irq - AIE mode rtctimer hook 604 * @rtc: pointer to the rtc_device 605 * 606 * This functions is called when the aie_timer expires. 607 */ 608 void rtc_aie_update_irq(struct rtc_device *rtc) 609 { 610 rtc_handle_legacy_irq(rtc, 1, RTC_AF); 611 } 612 613 /** 614 * rtc_uie_update_irq - UIE mode rtctimer hook 615 * @rtc: pointer to the rtc_device 616 * 617 * This functions is called when the uie_timer expires. 618 */ 619 void rtc_uie_update_irq(struct rtc_device *rtc) 620 { 621 rtc_handle_legacy_irq(rtc, 1, RTC_UF); 622 } 623 624 /** 625 * rtc_pie_update_irq - PIE mode hrtimer hook 626 * @timer: pointer to the pie mode hrtimer 627 * 628 * This function is used to emulate PIE mode interrupts 629 * using an hrtimer. This function is called when the periodic 630 * hrtimer expires. 631 */ 632 enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) 633 { 634 struct rtc_device *rtc; 635 ktime_t period; 636 u64 count; 637 638 rtc = container_of(timer, struct rtc_device, pie_timer); 639 640 period = NSEC_PER_SEC / rtc->irq_freq; 641 count = hrtimer_forward_now(timer, period); 642 643 rtc_handle_legacy_irq(rtc, count, RTC_PF); 644 645 return HRTIMER_RESTART; 646 } 647 648 /** 649 * rtc_update_irq - Triggered when a RTC interrupt occurs. 650 * @rtc: the rtc device 651 * @num: how many irqs are being reported (usually one) 652 * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF 653 * Context: any 654 */ 655 void rtc_update_irq(struct rtc_device *rtc, 656 unsigned long num, unsigned long events) 657 { 658 if (IS_ERR_OR_NULL(rtc)) 659 return; 660 661 pm_stay_awake(rtc->dev.parent); 662 schedule_work(&rtc->irqwork); 663 } 664 EXPORT_SYMBOL_GPL(rtc_update_irq); 665 666 static int __rtc_match(struct device *dev, const void *data) 667 { 668 const char *name = data; 669 670 if (strcmp(dev_name(dev), name) == 0) 671 return 1; 672 return 0; 673 } 674 675 struct rtc_device *rtc_class_open(const char *name) 676 { 677 struct device *dev; 678 struct rtc_device *rtc = NULL; 679 680 dev = class_find_device(rtc_class, NULL, name, __rtc_match); 681 if (dev) 682 rtc = to_rtc_device(dev); 683 684 if (rtc) { 685 if (!try_module_get(rtc->owner)) { 686 put_device(dev); 687 rtc = NULL; 688 } 689 } 690 691 return rtc; 692 } 693 EXPORT_SYMBOL_GPL(rtc_class_open); 694 695 void rtc_class_close(struct rtc_device *rtc) 696 { 697 module_put(rtc->owner); 698 put_device(&rtc->dev); 699 } 700 EXPORT_SYMBOL_GPL(rtc_class_close); 701 702 static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) 703 { 704 /* 705 * We always cancel the timer here first, because otherwise 706 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); 707 * when we manage to start the timer before the callback 708 * returns HRTIMER_RESTART. 709 * 710 * We cannot use hrtimer_cancel() here as a running callback 711 * could be blocked on rtc->irq_task_lock and hrtimer_cancel() 712 * would spin forever. 713 */ 714 if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0) 715 return -1; 716 717 if (enabled) { 718 ktime_t period = NSEC_PER_SEC / rtc->irq_freq; 719 720 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); 721 } 722 return 0; 723 } 724 725 /** 726 * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs 727 * @rtc: the rtc device 728 * @enabled: true to enable periodic IRQs 729 * Context: any 730 * 731 * Note that rtc_irq_set_freq() should previously have been used to 732 * specify the desired frequency of periodic IRQ. 733 */ 734 int rtc_irq_set_state(struct rtc_device *rtc, int enabled) 735 { 736 int err = 0; 737 738 while (rtc_update_hrtimer(rtc, enabled) < 0) 739 cpu_relax(); 740 741 rtc->pie_enabled = enabled; 742 743 trace_rtc_irq_set_state(enabled, err); 744 return err; 745 } 746 747 /** 748 * rtc_irq_set_freq - set 2^N Hz periodic IRQ frequency for IRQ 749 * @rtc: the rtc device 750 * @freq: positive frequency 751 * Context: any 752 * 753 * Note that rtc_irq_set_state() is used to enable or disable the 754 * periodic IRQs. 755 */ 756 int rtc_irq_set_freq(struct rtc_device *rtc, int freq) 757 { 758 int err = 0; 759 760 if (freq <= 0 || freq > RTC_MAX_FREQ) 761 return -EINVAL; 762 763 rtc->irq_freq = freq; 764 while (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) 765 cpu_relax(); 766 767 trace_rtc_irq_set_freq(freq, err); 768 return err; 769 } 770 771 /** 772 * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue 773 * @rtc rtc device 774 * @timer timer being added. 775 * 776 * Enqueues a timer onto the rtc devices timerqueue and sets 777 * the next alarm event appropriately. 778 * 779 * Sets the enabled bit on the added timer. 780 * 781 * Must hold ops_lock for proper serialization of timerqueue 782 */ 783 static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) 784 { 785 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); 786 struct rtc_time tm; 787 ktime_t now; 788 789 timer->enabled = 1; 790 __rtc_read_time(rtc, &tm); 791 now = rtc_tm_to_ktime(tm); 792 793 /* Skip over expired timers */ 794 while (next) { 795 if (next->expires >= now) 796 break; 797 next = timerqueue_iterate_next(next); 798 } 799 800 timerqueue_add(&rtc->timerqueue, &timer->node); 801 trace_rtc_timer_enqueue(timer); 802 if (!next || ktime_before(timer->node.expires, next->expires)) { 803 struct rtc_wkalrm alarm; 804 int err; 805 806 alarm.time = rtc_ktime_to_tm(timer->node.expires); 807 alarm.enabled = 1; 808 err = __rtc_set_alarm(rtc, &alarm); 809 if (err == -ETIME) { 810 pm_stay_awake(rtc->dev.parent); 811 schedule_work(&rtc->irqwork); 812 } else if (err) { 813 timerqueue_del(&rtc->timerqueue, &timer->node); 814 trace_rtc_timer_dequeue(timer); 815 timer->enabled = 0; 816 return err; 817 } 818 } 819 return 0; 820 } 821 822 static void rtc_alarm_disable(struct rtc_device *rtc) 823 { 824 if (!rtc->ops || !rtc->ops->alarm_irq_enable) 825 return; 826 827 rtc->ops->alarm_irq_enable(rtc->dev.parent, false); 828 trace_rtc_alarm_irq_enable(0, 0); 829 } 830 831 /** 832 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue 833 * @rtc rtc device 834 * @timer timer being removed. 835 * 836 * Removes a timer onto the rtc devices timerqueue and sets 837 * the next alarm event appropriately. 838 * 839 * Clears the enabled bit on the removed timer. 840 * 841 * Must hold ops_lock for proper serialization of timerqueue 842 */ 843 static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) 844 { 845 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); 846 847 timerqueue_del(&rtc->timerqueue, &timer->node); 848 trace_rtc_timer_dequeue(timer); 849 timer->enabled = 0; 850 if (next == &timer->node) { 851 struct rtc_wkalrm alarm; 852 int err; 853 854 next = timerqueue_getnext(&rtc->timerqueue); 855 if (!next) { 856 rtc_alarm_disable(rtc); 857 return; 858 } 859 alarm.time = rtc_ktime_to_tm(next->expires); 860 alarm.enabled = 1; 861 err = __rtc_set_alarm(rtc, &alarm); 862 if (err == -ETIME) { 863 pm_stay_awake(rtc->dev.parent); 864 schedule_work(&rtc->irqwork); 865 } 866 } 867 } 868 869 /** 870 * rtc_timer_do_work - Expires rtc timers 871 * @rtc rtc device 872 * @timer timer being removed. 873 * 874 * Expires rtc timers. Reprograms next alarm event if needed. 875 * Called via worktask. 876 * 877 * Serializes access to timerqueue via ops_lock mutex 878 */ 879 void rtc_timer_do_work(struct work_struct *work) 880 { 881 struct rtc_timer *timer; 882 struct timerqueue_node *next; 883 ktime_t now; 884 struct rtc_time tm; 885 886 struct rtc_device *rtc = 887 container_of(work, struct rtc_device, irqwork); 888 889 mutex_lock(&rtc->ops_lock); 890 again: 891 __rtc_read_time(rtc, &tm); 892 now = rtc_tm_to_ktime(tm); 893 while ((next = timerqueue_getnext(&rtc->timerqueue))) { 894 if (next->expires > now) 895 break; 896 897 /* expire timer */ 898 timer = container_of(next, struct rtc_timer, node); 899 timerqueue_del(&rtc->timerqueue, &timer->node); 900 trace_rtc_timer_dequeue(timer); 901 timer->enabled = 0; 902 if (timer->func) 903 timer->func(timer->rtc); 904 905 trace_rtc_timer_fired(timer); 906 /* Re-add/fwd periodic timers */ 907 if (ktime_to_ns(timer->period)) { 908 timer->node.expires = ktime_add(timer->node.expires, 909 timer->period); 910 timer->enabled = 1; 911 timerqueue_add(&rtc->timerqueue, &timer->node); 912 trace_rtc_timer_enqueue(timer); 913 } 914 } 915 916 /* Set next alarm */ 917 if (next) { 918 struct rtc_wkalrm alarm; 919 int err; 920 int retry = 3; 921 922 alarm.time = rtc_ktime_to_tm(next->expires); 923 alarm.enabled = 1; 924 reprogram: 925 err = __rtc_set_alarm(rtc, &alarm); 926 if (err == -ETIME) { 927 goto again; 928 } else if (err) { 929 if (retry-- > 0) 930 goto reprogram; 931 932 timer = container_of(next, struct rtc_timer, node); 933 timerqueue_del(&rtc->timerqueue, &timer->node); 934 trace_rtc_timer_dequeue(timer); 935 timer->enabled = 0; 936 dev_err(&rtc->dev, "__rtc_set_alarm: err=%d\n", err); 937 goto again; 938 } 939 } else { 940 rtc_alarm_disable(rtc); 941 } 942 943 pm_relax(rtc->dev.parent); 944 mutex_unlock(&rtc->ops_lock); 945 } 946 947 /* rtc_timer_init - Initializes an rtc_timer 948 * @timer: timer to be intiialized 949 * @f: function pointer to be called when timer fires 950 * @rtc: pointer to the rtc_device 951 * 952 * Kernel interface to initializing an rtc_timer. 953 */ 954 void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r), 955 struct rtc_device *rtc) 956 { 957 timerqueue_init(&timer->node); 958 timer->enabled = 0; 959 timer->func = f; 960 timer->rtc = rtc; 961 } 962 963 /* rtc_timer_start - Sets an rtc_timer to fire in the future 964 * @ rtc: rtc device to be used 965 * @ timer: timer being set 966 * @ expires: time at which to expire the timer 967 * @ period: period that the timer will recur 968 * 969 * Kernel interface to set an rtc_timer 970 */ 971 int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, 972 ktime_t expires, ktime_t period) 973 { 974 int ret = 0; 975 976 mutex_lock(&rtc->ops_lock); 977 if (timer->enabled) 978 rtc_timer_remove(rtc, timer); 979 980 timer->node.expires = expires; 981 timer->period = period; 982 983 ret = rtc_timer_enqueue(rtc, timer); 984 985 mutex_unlock(&rtc->ops_lock); 986 return ret; 987 } 988 989 /* rtc_timer_cancel - Stops an rtc_timer 990 * @ rtc: rtc device to be used 991 * @ timer: timer being set 992 * 993 * Kernel interface to cancel an rtc_timer 994 */ 995 void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer) 996 { 997 mutex_lock(&rtc->ops_lock); 998 if (timer->enabled) 999 rtc_timer_remove(rtc, timer); 1000 mutex_unlock(&rtc->ops_lock); 1001 } 1002 1003 /** 1004 * rtc_read_offset - Read the amount of rtc offset in parts per billion 1005 * @ rtc: rtc device to be used 1006 * @ offset: the offset in parts per billion 1007 * 1008 * see below for details. 1009 * 1010 * Kernel interface to read rtc clock offset 1011 * Returns 0 on success, or a negative number on error. 1012 * If read_offset() is not implemented for the rtc, return -EINVAL 1013 */ 1014 int rtc_read_offset(struct rtc_device *rtc, long *offset) 1015 { 1016 int ret; 1017 1018 if (!rtc->ops) 1019 return -ENODEV; 1020 1021 if (!rtc->ops->read_offset) 1022 return -EINVAL; 1023 1024 mutex_lock(&rtc->ops_lock); 1025 ret = rtc->ops->read_offset(rtc->dev.parent, offset); 1026 mutex_unlock(&rtc->ops_lock); 1027 1028 trace_rtc_read_offset(*offset, ret); 1029 return ret; 1030 } 1031 1032 /** 1033 * rtc_set_offset - Adjusts the duration of the average second 1034 * @ rtc: rtc device to be used 1035 * @ offset: the offset in parts per billion 1036 * 1037 * Some rtc's allow an adjustment to the average duration of a second 1038 * to compensate for differences in the actual clock rate due to temperature, 1039 * the crystal, capacitor, etc. 1040 * 1041 * The adjustment applied is as follows: 1042 * t = t0 * (1 + offset * 1e-9) 1043 * where t0 is the measured length of 1 RTC second with offset = 0 1044 * 1045 * Kernel interface to adjust an rtc clock offset. 1046 * Return 0 on success, or a negative number on error. 1047 * If the rtc offset is not setable (or not implemented), return -EINVAL 1048 */ 1049 int rtc_set_offset(struct rtc_device *rtc, long offset) 1050 { 1051 int ret; 1052 1053 if (!rtc->ops) 1054 return -ENODEV; 1055 1056 if (!rtc->ops->set_offset) 1057 return -EINVAL; 1058 1059 mutex_lock(&rtc->ops_lock); 1060 ret = rtc->ops->set_offset(rtc->dev.parent, offset); 1061 mutex_unlock(&rtc->ops_lock); 1062 1063 trace_rtc_set_offset(offset, ret); 1064 return ret; 1065 } 1066