xref: /openbmc/linux/drivers/rtc/interface.c (revision 2113852b)
1 /*
2  * RTC subsystem, interface functions
3  *
4  * Copyright (C) 2005 Tower Technologies
5  * Author: Alessandro Zummo <a.zummo@towertech.it>
6  *
7  * based on arch/arm/common/rtctime.c
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12 */
13 
14 #include <linux/rtc.h>
15 #include <linux/sched.h>
16 #include <linux/module.h>
17 #include <linux/log2.h>
18 #include <linux/workqueue.h>
19 
20 static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
21 static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
22 
23 static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
24 {
25 	int err;
26 	if (!rtc->ops)
27 		err = -ENODEV;
28 	else if (!rtc->ops->read_time)
29 		err = -EINVAL;
30 	else {
31 		memset(tm, 0, sizeof(struct rtc_time));
32 		err = rtc->ops->read_time(rtc->dev.parent, tm);
33 	}
34 	return err;
35 }
36 
37 int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
38 {
39 	int err;
40 
41 	err = mutex_lock_interruptible(&rtc->ops_lock);
42 	if (err)
43 		return err;
44 
45 	err = __rtc_read_time(rtc, tm);
46 	mutex_unlock(&rtc->ops_lock);
47 	return err;
48 }
49 EXPORT_SYMBOL_GPL(rtc_read_time);
50 
51 int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
52 {
53 	int err;
54 
55 	err = rtc_valid_tm(tm);
56 	if (err != 0)
57 		return err;
58 
59 	err = mutex_lock_interruptible(&rtc->ops_lock);
60 	if (err)
61 		return err;
62 
63 	if (!rtc->ops)
64 		err = -ENODEV;
65 	else if (rtc->ops->set_time)
66 		err = rtc->ops->set_time(rtc->dev.parent, tm);
67 	else if (rtc->ops->set_mmss) {
68 		unsigned long secs;
69 		err = rtc_tm_to_time(tm, &secs);
70 		if (err == 0)
71 			err = rtc->ops->set_mmss(rtc->dev.parent, secs);
72 	} else
73 		err = -EINVAL;
74 
75 	mutex_unlock(&rtc->ops_lock);
76 	return err;
77 }
78 EXPORT_SYMBOL_GPL(rtc_set_time);
79 
80 int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
81 {
82 	int err;
83 
84 	err = mutex_lock_interruptible(&rtc->ops_lock);
85 	if (err)
86 		return err;
87 
88 	if (!rtc->ops)
89 		err = -ENODEV;
90 	else if (rtc->ops->set_mmss)
91 		err = rtc->ops->set_mmss(rtc->dev.parent, secs);
92 	else if (rtc->ops->read_time && rtc->ops->set_time) {
93 		struct rtc_time new, old;
94 
95 		err = rtc->ops->read_time(rtc->dev.parent, &old);
96 		if (err == 0) {
97 			rtc_time_to_tm(secs, &new);
98 
99 			/*
100 			 * avoid writing when we're going to change the day of
101 			 * the month. We will retry in the next minute. This
102 			 * basically means that if the RTC must not drift
103 			 * by more than 1 minute in 11 minutes.
104 			 */
105 			if (!((old.tm_hour == 23 && old.tm_min == 59) ||
106 				(new.tm_hour == 23 && new.tm_min == 59)))
107 				err = rtc->ops->set_time(rtc->dev.parent,
108 						&new);
109 		}
110 	}
111 	else
112 		err = -EINVAL;
113 
114 	mutex_unlock(&rtc->ops_lock);
115 
116 	return err;
117 }
118 EXPORT_SYMBOL_GPL(rtc_set_mmss);
119 
120 static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
121 {
122 	int err;
123 
124 	err = mutex_lock_interruptible(&rtc->ops_lock);
125 	if (err)
126 		return err;
127 
128 	if (rtc->ops == NULL)
129 		err = -ENODEV;
130 	else if (!rtc->ops->read_alarm)
131 		err = -EINVAL;
132 	else {
133 		memset(alarm, 0, sizeof(struct rtc_wkalrm));
134 		err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
135 	}
136 
137 	mutex_unlock(&rtc->ops_lock);
138 	return err;
139 }
140 
141 int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
142 {
143 	int err;
144 	struct rtc_time before, now;
145 	int first_time = 1;
146 	unsigned long t_now, t_alm;
147 	enum { none, day, month, year } missing = none;
148 	unsigned days;
149 
150 	/* The lower level RTC driver may return -1 in some fields,
151 	 * creating invalid alarm->time values, for reasons like:
152 	 *
153 	 *   - The hardware may not be capable of filling them in;
154 	 *     many alarms match only on time-of-day fields, not
155 	 *     day/month/year calendar data.
156 	 *
157 	 *   - Some hardware uses illegal values as "wildcard" match
158 	 *     values, which non-Linux firmware (like a BIOS) may try
159 	 *     to set up as e.g. "alarm 15 minutes after each hour".
160 	 *     Linux uses only oneshot alarms.
161 	 *
162 	 * When we see that here, we deal with it by using values from
163 	 * a current RTC timestamp for any missing (-1) values.  The
164 	 * RTC driver prevents "periodic alarm" modes.
165 	 *
166 	 * But this can be racey, because some fields of the RTC timestamp
167 	 * may have wrapped in the interval since we read the RTC alarm,
168 	 * which would lead to us inserting inconsistent values in place
169 	 * of the -1 fields.
170 	 *
171 	 * Reading the alarm and timestamp in the reverse sequence
172 	 * would have the same race condition, and not solve the issue.
173 	 *
174 	 * So, we must first read the RTC timestamp,
175 	 * then read the RTC alarm value,
176 	 * and then read a second RTC timestamp.
177 	 *
178 	 * If any fields of the second timestamp have changed
179 	 * when compared with the first timestamp, then we know
180 	 * our timestamp may be inconsistent with that used by
181 	 * the low-level rtc_read_alarm_internal() function.
182 	 *
183 	 * So, when the two timestamps disagree, we just loop and do
184 	 * the process again to get a fully consistent set of values.
185 	 *
186 	 * This could all instead be done in the lower level driver,
187 	 * but since more than one lower level RTC implementation needs it,
188 	 * then it's probably best best to do it here instead of there..
189 	 */
190 
191 	/* Get the "before" timestamp */
192 	err = rtc_read_time(rtc, &before);
193 	if (err < 0)
194 		return err;
195 	do {
196 		if (!first_time)
197 			memcpy(&before, &now, sizeof(struct rtc_time));
198 		first_time = 0;
199 
200 		/* get the RTC alarm values, which may be incomplete */
201 		err = rtc_read_alarm_internal(rtc, alarm);
202 		if (err)
203 			return err;
204 
205 		/* full-function RTCs won't have such missing fields */
206 		if (rtc_valid_tm(&alarm->time) == 0)
207 			return 0;
208 
209 		/* get the "after" timestamp, to detect wrapped fields */
210 		err = rtc_read_time(rtc, &now);
211 		if (err < 0)
212 			return err;
213 
214 		/* note that tm_sec is a "don't care" value here: */
215 	} while (   before.tm_min   != now.tm_min
216 		 || before.tm_hour  != now.tm_hour
217 		 || before.tm_mon   != now.tm_mon
218 		 || before.tm_year  != now.tm_year);
219 
220 	/* Fill in the missing alarm fields using the timestamp; we
221 	 * know there's at least one since alarm->time is invalid.
222 	 */
223 	if (alarm->time.tm_sec == -1)
224 		alarm->time.tm_sec = now.tm_sec;
225 	if (alarm->time.tm_min == -1)
226 		alarm->time.tm_min = now.tm_min;
227 	if (alarm->time.tm_hour == -1)
228 		alarm->time.tm_hour = now.tm_hour;
229 
230 	/* For simplicity, only support date rollover for now */
231 	if (alarm->time.tm_mday == -1) {
232 		alarm->time.tm_mday = now.tm_mday;
233 		missing = day;
234 	}
235 	if (alarm->time.tm_mon == -1) {
236 		alarm->time.tm_mon = now.tm_mon;
237 		if (missing == none)
238 			missing = month;
239 	}
240 	if (alarm->time.tm_year == -1) {
241 		alarm->time.tm_year = now.tm_year;
242 		if (missing == none)
243 			missing = year;
244 	}
245 
246 	/* with luck, no rollover is needed */
247 	rtc_tm_to_time(&now, &t_now);
248 	rtc_tm_to_time(&alarm->time, &t_alm);
249 	if (t_now < t_alm)
250 		goto done;
251 
252 	switch (missing) {
253 
254 	/* 24 hour rollover ... if it's now 10am Monday, an alarm that
255 	 * that will trigger at 5am will do so at 5am Tuesday, which
256 	 * could also be in the next month or year.  This is a common
257 	 * case, especially for PCs.
258 	 */
259 	case day:
260 		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
261 		t_alm += 24 * 60 * 60;
262 		rtc_time_to_tm(t_alm, &alarm->time);
263 		break;
264 
265 	/* Month rollover ... if it's the 31th, an alarm on the 3rd will
266 	 * be next month.  An alarm matching on the 30th, 29th, or 28th
267 	 * may end up in the month after that!  Many newer PCs support
268 	 * this type of alarm.
269 	 */
270 	case month:
271 		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
272 		do {
273 			if (alarm->time.tm_mon < 11)
274 				alarm->time.tm_mon++;
275 			else {
276 				alarm->time.tm_mon = 0;
277 				alarm->time.tm_year++;
278 			}
279 			days = rtc_month_days(alarm->time.tm_mon,
280 					alarm->time.tm_year);
281 		} while (days < alarm->time.tm_mday);
282 		break;
283 
284 	/* Year rollover ... easy except for leap years! */
285 	case year:
286 		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
287 		do {
288 			alarm->time.tm_year++;
289 		} while (rtc_valid_tm(&alarm->time) != 0);
290 		break;
291 
292 	default:
293 		dev_warn(&rtc->dev, "alarm rollover not handled\n");
294 	}
295 
296 done:
297 	return 0;
298 }
299 
300 int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
301 {
302 	int err;
303 
304 	err = mutex_lock_interruptible(&rtc->ops_lock);
305 	if (err)
306 		return err;
307 	if (rtc->ops == NULL)
308 		err = -ENODEV;
309 	else if (!rtc->ops->read_alarm)
310 		err = -EINVAL;
311 	else {
312 		memset(alarm, 0, sizeof(struct rtc_wkalrm));
313 		alarm->enabled = rtc->aie_timer.enabled;
314 		alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
315 	}
316 	mutex_unlock(&rtc->ops_lock);
317 
318 	return err;
319 }
320 EXPORT_SYMBOL_GPL(rtc_read_alarm);
321 
322 static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
323 {
324 	struct rtc_time tm;
325 	long now, scheduled;
326 	int err;
327 
328 	err = rtc_valid_tm(&alarm->time);
329 	if (err)
330 		return err;
331 	rtc_tm_to_time(&alarm->time, &scheduled);
332 
333 	/* Make sure we're not setting alarms in the past */
334 	err = __rtc_read_time(rtc, &tm);
335 	rtc_tm_to_time(&tm, &now);
336 	if (scheduled <= now)
337 		return -ETIME;
338 	/*
339 	 * XXX - We just checked to make sure the alarm time is not
340 	 * in the past, but there is still a race window where if
341 	 * the is alarm set for the next second and the second ticks
342 	 * over right here, before we set the alarm.
343 	 */
344 
345 	if (!rtc->ops)
346 		err = -ENODEV;
347 	else if (!rtc->ops->set_alarm)
348 		err = -EINVAL;
349 	else
350 		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
351 
352 	return err;
353 }
354 
355 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
356 {
357 	int err;
358 
359 	err = rtc_valid_tm(&alarm->time);
360 	if (err != 0)
361 		return err;
362 
363 	err = mutex_lock_interruptible(&rtc->ops_lock);
364 	if (err)
365 		return err;
366 	if (rtc->aie_timer.enabled) {
367 		rtc_timer_remove(rtc, &rtc->aie_timer);
368 	}
369 	rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
370 	rtc->aie_timer.period = ktime_set(0, 0);
371 	if (alarm->enabled) {
372 		err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
373 	}
374 	mutex_unlock(&rtc->ops_lock);
375 	return err;
376 }
377 EXPORT_SYMBOL_GPL(rtc_set_alarm);
378 
379 /* Called once per device from rtc_device_register */
380 int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
381 {
382 	int err;
383 
384 	err = rtc_valid_tm(&alarm->time);
385 	if (err != 0)
386 		return err;
387 
388 	err = mutex_lock_interruptible(&rtc->ops_lock);
389 	if (err)
390 		return err;
391 
392 	rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
393 	rtc->aie_timer.period = ktime_set(0, 0);
394 	if (alarm->enabled) {
395 		rtc->aie_timer.enabled = 1;
396 		timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
397 	}
398 	mutex_unlock(&rtc->ops_lock);
399 	return err;
400 }
401 EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
402 
403 
404 
405 int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
406 {
407 	int err = mutex_lock_interruptible(&rtc->ops_lock);
408 	if (err)
409 		return err;
410 
411 	if (rtc->aie_timer.enabled != enabled) {
412 		if (enabled)
413 			err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
414 		else
415 			rtc_timer_remove(rtc, &rtc->aie_timer);
416 	}
417 
418 	if (err)
419 		/* nothing */;
420 	else if (!rtc->ops)
421 		err = -ENODEV;
422 	else if (!rtc->ops->alarm_irq_enable)
423 		err = -EINVAL;
424 	else
425 		err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled);
426 
427 	mutex_unlock(&rtc->ops_lock);
428 	return err;
429 }
430 EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
431 
432 int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
433 {
434 	int err = mutex_lock_interruptible(&rtc->ops_lock);
435 	if (err)
436 		return err;
437 
438 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
439 	if (enabled == 0 && rtc->uie_irq_active) {
440 		mutex_unlock(&rtc->ops_lock);
441 		return rtc_dev_update_irq_enable_emul(rtc, 0);
442 	}
443 #endif
444 	/* make sure we're changing state */
445 	if (rtc->uie_rtctimer.enabled == enabled)
446 		goto out;
447 
448 	if (enabled) {
449 		struct rtc_time tm;
450 		ktime_t now, onesec;
451 
452 		__rtc_read_time(rtc, &tm);
453 		onesec = ktime_set(1, 0);
454 		now = rtc_tm_to_ktime(tm);
455 		rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
456 		rtc->uie_rtctimer.period = ktime_set(1, 0);
457 		err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
458 	} else
459 		rtc_timer_remove(rtc, &rtc->uie_rtctimer);
460 
461 out:
462 	mutex_unlock(&rtc->ops_lock);
463 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
464 	/*
465 	 * Enable emulation if the driver did not provide
466 	 * the update_irq_enable function pointer or if returned
467 	 * -EINVAL to signal that it has been configured without
468 	 * interrupts or that are not available at the moment.
469 	 */
470 	if (err == -EINVAL)
471 		err = rtc_dev_update_irq_enable_emul(rtc, enabled);
472 #endif
473 	return err;
474 
475 }
476 EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
477 
478 
479 /**
480  * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
481  * @rtc: pointer to the rtc device
482  *
483  * This function is called when an AIE, UIE or PIE mode interrupt
484  * has occurred (or been emulated).
485  *
486  * Triggers the registered irq_task function callback.
487  */
488 void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
489 {
490 	unsigned long flags;
491 
492 	/* mark one irq of the appropriate mode */
493 	spin_lock_irqsave(&rtc->irq_lock, flags);
494 	rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
495 	spin_unlock_irqrestore(&rtc->irq_lock, flags);
496 
497 	/* call the task func */
498 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
499 	if (rtc->irq_task)
500 		rtc->irq_task->func(rtc->irq_task->private_data);
501 	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
502 
503 	wake_up_interruptible(&rtc->irq_queue);
504 	kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
505 }
506 
507 
508 /**
509  * rtc_aie_update_irq - AIE mode rtctimer hook
510  * @private: pointer to the rtc_device
511  *
512  * This functions is called when the aie_timer expires.
513  */
514 void rtc_aie_update_irq(void *private)
515 {
516 	struct rtc_device *rtc = (struct rtc_device *)private;
517 	rtc_handle_legacy_irq(rtc, 1, RTC_AF);
518 }
519 
520 
521 /**
522  * rtc_uie_update_irq - UIE mode rtctimer hook
523  * @private: pointer to the rtc_device
524  *
525  * This functions is called when the uie_timer expires.
526  */
527 void rtc_uie_update_irq(void *private)
528 {
529 	struct rtc_device *rtc = (struct rtc_device *)private;
530 	rtc_handle_legacy_irq(rtc, 1,  RTC_UF);
531 }
532 
533 
534 /**
535  * rtc_pie_update_irq - PIE mode hrtimer hook
536  * @timer: pointer to the pie mode hrtimer
537  *
538  * This function is used to emulate PIE mode interrupts
539  * using an hrtimer. This function is called when the periodic
540  * hrtimer expires.
541  */
542 enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
543 {
544 	struct rtc_device *rtc;
545 	ktime_t period;
546 	int count;
547 	rtc = container_of(timer, struct rtc_device, pie_timer);
548 
549 	period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
550 	count = hrtimer_forward_now(timer, period);
551 
552 	rtc_handle_legacy_irq(rtc, count, RTC_PF);
553 
554 	return HRTIMER_RESTART;
555 }
556 
557 /**
558  * rtc_update_irq - Triggered when a RTC interrupt occurs.
559  * @rtc: the rtc device
560  * @num: how many irqs are being reported (usually one)
561  * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
562  * Context: any
563  */
564 void rtc_update_irq(struct rtc_device *rtc,
565 		unsigned long num, unsigned long events)
566 {
567 	schedule_work(&rtc->irqwork);
568 }
569 EXPORT_SYMBOL_GPL(rtc_update_irq);
570 
571 static int __rtc_match(struct device *dev, void *data)
572 {
573 	char *name = (char *)data;
574 
575 	if (strcmp(dev_name(dev), name) == 0)
576 		return 1;
577 	return 0;
578 }
579 
580 struct rtc_device *rtc_class_open(char *name)
581 {
582 	struct device *dev;
583 	struct rtc_device *rtc = NULL;
584 
585 	dev = class_find_device(rtc_class, NULL, name, __rtc_match);
586 	if (dev)
587 		rtc = to_rtc_device(dev);
588 
589 	if (rtc) {
590 		if (!try_module_get(rtc->owner)) {
591 			put_device(dev);
592 			rtc = NULL;
593 		}
594 	}
595 
596 	return rtc;
597 }
598 EXPORT_SYMBOL_GPL(rtc_class_open);
599 
600 void rtc_class_close(struct rtc_device *rtc)
601 {
602 	module_put(rtc->owner);
603 	put_device(&rtc->dev);
604 }
605 EXPORT_SYMBOL_GPL(rtc_class_close);
606 
607 int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task)
608 {
609 	int retval = -EBUSY;
610 
611 	if (task == NULL || task->func == NULL)
612 		return -EINVAL;
613 
614 	/* Cannot register while the char dev is in use */
615 	if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
616 		return -EBUSY;
617 
618 	spin_lock_irq(&rtc->irq_task_lock);
619 	if (rtc->irq_task == NULL) {
620 		rtc->irq_task = task;
621 		retval = 0;
622 	}
623 	spin_unlock_irq(&rtc->irq_task_lock);
624 
625 	clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
626 
627 	return retval;
628 }
629 EXPORT_SYMBOL_GPL(rtc_irq_register);
630 
631 void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task)
632 {
633 	spin_lock_irq(&rtc->irq_task_lock);
634 	if (rtc->irq_task == task)
635 		rtc->irq_task = NULL;
636 	spin_unlock_irq(&rtc->irq_task_lock);
637 }
638 EXPORT_SYMBOL_GPL(rtc_irq_unregister);
639 
640 static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
641 {
642 	/*
643 	 * We always cancel the timer here first, because otherwise
644 	 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
645 	 * when we manage to start the timer before the callback
646 	 * returns HRTIMER_RESTART.
647 	 *
648 	 * We cannot use hrtimer_cancel() here as a running callback
649 	 * could be blocked on rtc->irq_task_lock and hrtimer_cancel()
650 	 * would spin forever.
651 	 */
652 	if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0)
653 		return -1;
654 
655 	if (enabled) {
656 		ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq);
657 
658 		hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
659 	}
660 	return 0;
661 }
662 
663 /**
664  * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs
665  * @rtc: the rtc device
666  * @task: currently registered with rtc_irq_register()
667  * @enabled: true to enable periodic IRQs
668  * Context: any
669  *
670  * Note that rtc_irq_set_freq() should previously have been used to
671  * specify the desired frequency of periodic IRQ task->func() callbacks.
672  */
673 int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled)
674 {
675 	int err = 0;
676 	unsigned long flags;
677 
678 retry:
679 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
680 	if (rtc->irq_task != NULL && task == NULL)
681 		err = -EBUSY;
682 	if (rtc->irq_task != task)
683 		err = -EACCES;
684 	if (!err) {
685 		if (rtc_update_hrtimer(rtc, enabled) < 0) {
686 			spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
687 			cpu_relax();
688 			goto retry;
689 		}
690 		rtc->pie_enabled = enabled;
691 	}
692 	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
693 	return err;
694 }
695 EXPORT_SYMBOL_GPL(rtc_irq_set_state);
696 
697 /**
698  * rtc_irq_set_freq - set 2^N Hz periodic IRQ frequency for IRQ
699  * @rtc: the rtc device
700  * @task: currently registered with rtc_irq_register()
701  * @freq: positive frequency with which task->func() will be called
702  * Context: any
703  *
704  * Note that rtc_irq_set_state() is used to enable or disable the
705  * periodic IRQs.
706  */
707 int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
708 {
709 	int err = 0;
710 	unsigned long flags;
711 
712 	if (freq <= 0 || freq > RTC_MAX_FREQ)
713 		return -EINVAL;
714 retry:
715 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
716 	if (rtc->irq_task != NULL && task == NULL)
717 		err = -EBUSY;
718 	if (rtc->irq_task != task)
719 		err = -EACCES;
720 	if (!err) {
721 		rtc->irq_freq = freq;
722 		if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) {
723 			spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
724 			cpu_relax();
725 			goto retry;
726 		}
727 	}
728 	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
729 	return err;
730 }
731 EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
732 
733 /**
734  * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
735  * @rtc rtc device
736  * @timer timer being added.
737  *
738  * Enqueues a timer onto the rtc devices timerqueue and sets
739  * the next alarm event appropriately.
740  *
741  * Sets the enabled bit on the added timer.
742  *
743  * Must hold ops_lock for proper serialization of timerqueue
744  */
745 static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
746 {
747 	timer->enabled = 1;
748 	timerqueue_add(&rtc->timerqueue, &timer->node);
749 	if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
750 		struct rtc_wkalrm alarm;
751 		int err;
752 		alarm.time = rtc_ktime_to_tm(timer->node.expires);
753 		alarm.enabled = 1;
754 		err = __rtc_set_alarm(rtc, &alarm);
755 		if (err == -ETIME)
756 			schedule_work(&rtc->irqwork);
757 		else if (err) {
758 			timerqueue_del(&rtc->timerqueue, &timer->node);
759 			timer->enabled = 0;
760 			return err;
761 		}
762 	}
763 	return 0;
764 }
765 
766 /**
767  * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
768  * @rtc rtc device
769  * @timer timer being removed.
770  *
771  * Removes a timer onto the rtc devices timerqueue and sets
772  * the next alarm event appropriately.
773  *
774  * Clears the enabled bit on the removed timer.
775  *
776  * Must hold ops_lock for proper serialization of timerqueue
777  */
778 static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
779 {
780 	struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
781 	timerqueue_del(&rtc->timerqueue, &timer->node);
782 	timer->enabled = 0;
783 	if (next == &timer->node) {
784 		struct rtc_wkalrm alarm;
785 		int err;
786 		next = timerqueue_getnext(&rtc->timerqueue);
787 		if (!next)
788 			return;
789 		alarm.time = rtc_ktime_to_tm(next->expires);
790 		alarm.enabled = 1;
791 		err = __rtc_set_alarm(rtc, &alarm);
792 		if (err == -ETIME)
793 			schedule_work(&rtc->irqwork);
794 	}
795 }
796 
797 /**
798  * rtc_timer_do_work - Expires rtc timers
799  * @rtc rtc device
800  * @timer timer being removed.
801  *
802  * Expires rtc timers. Reprograms next alarm event if needed.
803  * Called via worktask.
804  *
805  * Serializes access to timerqueue via ops_lock mutex
806  */
807 void rtc_timer_do_work(struct work_struct *work)
808 {
809 	struct rtc_timer *timer;
810 	struct timerqueue_node *next;
811 	ktime_t now;
812 	struct rtc_time tm;
813 
814 	struct rtc_device *rtc =
815 		container_of(work, struct rtc_device, irqwork);
816 
817 	mutex_lock(&rtc->ops_lock);
818 again:
819 	__rtc_read_time(rtc, &tm);
820 	now = rtc_tm_to_ktime(tm);
821 	while ((next = timerqueue_getnext(&rtc->timerqueue))) {
822 		if (next->expires.tv64 > now.tv64)
823 			break;
824 
825 		/* expire timer */
826 		timer = container_of(next, struct rtc_timer, node);
827 		timerqueue_del(&rtc->timerqueue, &timer->node);
828 		timer->enabled = 0;
829 		if (timer->task.func)
830 			timer->task.func(timer->task.private_data);
831 
832 		/* Re-add/fwd periodic timers */
833 		if (ktime_to_ns(timer->period)) {
834 			timer->node.expires = ktime_add(timer->node.expires,
835 							timer->period);
836 			timer->enabled = 1;
837 			timerqueue_add(&rtc->timerqueue, &timer->node);
838 		}
839 	}
840 
841 	/* Set next alarm */
842 	if (next) {
843 		struct rtc_wkalrm alarm;
844 		int err;
845 		alarm.time = rtc_ktime_to_tm(next->expires);
846 		alarm.enabled = 1;
847 		err = __rtc_set_alarm(rtc, &alarm);
848 		if (err == -ETIME)
849 			goto again;
850 	}
851 
852 	mutex_unlock(&rtc->ops_lock);
853 }
854 
855 
856 /* rtc_timer_init - Initializes an rtc_timer
857  * @timer: timer to be intiialized
858  * @f: function pointer to be called when timer fires
859  * @data: private data passed to function pointer
860  *
861  * Kernel interface to initializing an rtc_timer.
862  */
863 void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data)
864 {
865 	timerqueue_init(&timer->node);
866 	timer->enabled = 0;
867 	timer->task.func = f;
868 	timer->task.private_data = data;
869 }
870 
871 /* rtc_timer_start - Sets an rtc_timer to fire in the future
872  * @ rtc: rtc device to be used
873  * @ timer: timer being set
874  * @ expires: time at which to expire the timer
875  * @ period: period that the timer will recur
876  *
877  * Kernel interface to set an rtc_timer
878  */
879 int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
880 			ktime_t expires, ktime_t period)
881 {
882 	int ret = 0;
883 	mutex_lock(&rtc->ops_lock);
884 	if (timer->enabled)
885 		rtc_timer_remove(rtc, timer);
886 
887 	timer->node.expires = expires;
888 	timer->period = period;
889 
890 	ret = rtc_timer_enqueue(rtc, timer);
891 
892 	mutex_unlock(&rtc->ops_lock);
893 	return ret;
894 }
895 
896 /* rtc_timer_cancel - Stops an rtc_timer
897  * @ rtc: rtc device to be used
898  * @ timer: timer being set
899  *
900  * Kernel interface to cancel an rtc_timer
901  */
902 int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
903 {
904 	int ret = 0;
905 	mutex_lock(&rtc->ops_lock);
906 	if (timer->enabled)
907 		rtc_timer_remove(rtc, timer);
908 	mutex_unlock(&rtc->ops_lock);
909 	return ret;
910 }
911 
912 
913