xref: /openbmc/linux/drivers/rtc/interface.c (revision e109ebd1)
1 /*
2  * RTC subsystem, interface functions
3  *
4  * Copyright (C) 2005 Tower Technologies
5  * Author: Alessandro Zummo <a.zummo@towertech.it>
6  *
7  * based on arch/arm/common/rtctime.c
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12 */
13 
14 #include <linux/rtc.h>
15 
16 int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm)
17 {
18 	int err;
19 	struct rtc_device *rtc = to_rtc_device(class_dev);
20 
21 	err = mutex_lock_interruptible(&rtc->ops_lock);
22 	if (err)
23 		return -EBUSY;
24 
25 	if (!rtc->ops)
26 		err = -ENODEV;
27 	else if (!rtc->ops->read_time)
28 		err = -EINVAL;
29 	else {
30 		memset(tm, 0, sizeof(struct rtc_time));
31 		err = rtc->ops->read_time(class_dev->dev, tm);
32 	}
33 
34 	mutex_unlock(&rtc->ops_lock);
35 	return err;
36 }
37 EXPORT_SYMBOL_GPL(rtc_read_time);
38 
39 int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm)
40 {
41 	int err;
42 	struct rtc_device *rtc = to_rtc_device(class_dev);
43 
44 	err = rtc_valid_tm(tm);
45 	if (err != 0)
46 		return err;
47 
48 	err = mutex_lock_interruptible(&rtc->ops_lock);
49 	if (err)
50 		return -EBUSY;
51 
52 	if (!rtc->ops)
53 		err = -ENODEV;
54 	else if (!rtc->ops->set_time)
55 		err = -EINVAL;
56 	else
57 		err = rtc->ops->set_time(class_dev->dev, tm);
58 
59 	mutex_unlock(&rtc->ops_lock);
60 	return err;
61 }
62 EXPORT_SYMBOL_GPL(rtc_set_time);
63 
64 int rtc_set_mmss(struct class_device *class_dev, unsigned long secs)
65 {
66 	int err;
67 	struct rtc_device *rtc = to_rtc_device(class_dev);
68 
69 	err = mutex_lock_interruptible(&rtc->ops_lock);
70 	if (err)
71 		return -EBUSY;
72 
73 	if (!rtc->ops)
74 		err = -ENODEV;
75 	else if (rtc->ops->set_mmss)
76 		err = rtc->ops->set_mmss(class_dev->dev, secs);
77 	else if (rtc->ops->read_time && rtc->ops->set_time) {
78 		struct rtc_time new, old;
79 
80 		err = rtc->ops->read_time(class_dev->dev, &old);
81 		if (err == 0) {
82 			rtc_time_to_tm(secs, &new);
83 
84 			/*
85 			 * avoid writing when we're going to change the day of
86 			 * the month. We will retry in the next minute. This
87 			 * basically means that if the RTC must not drift
88 			 * by more than 1 minute in 11 minutes.
89 			 */
90 			if (!((old.tm_hour == 23 && old.tm_min == 59) ||
91 				(new.tm_hour == 23 && new.tm_min == 59)))
92 				err = rtc->ops->set_time(class_dev->dev, &new);
93 		}
94 	}
95 	else
96 		err = -EINVAL;
97 
98 	mutex_unlock(&rtc->ops_lock);
99 
100 	return err;
101 }
102 EXPORT_SYMBOL_GPL(rtc_set_mmss);
103 
104 int rtc_read_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm)
105 {
106 	int err;
107 	struct rtc_device *rtc = to_rtc_device(class_dev);
108 
109 	err = mutex_lock_interruptible(&rtc->ops_lock);
110 	if (err)
111 		return -EBUSY;
112 
113 	if (rtc->ops == NULL)
114 		err = -ENODEV;
115 	else if (!rtc->ops->read_alarm)
116 		err = -EINVAL;
117 	else {
118 		memset(alarm, 0, sizeof(struct rtc_wkalrm));
119 		err = rtc->ops->read_alarm(class_dev->dev, alarm);
120 	}
121 
122 	mutex_unlock(&rtc->ops_lock);
123 	return err;
124 }
125 EXPORT_SYMBOL_GPL(rtc_read_alarm);
126 
127 int rtc_set_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm)
128 {
129 	int err;
130 	struct rtc_device *rtc = to_rtc_device(class_dev);
131 
132 	err = mutex_lock_interruptible(&rtc->ops_lock);
133 	if (err)
134 		return -EBUSY;
135 
136 	if (!rtc->ops)
137 		err = -ENODEV;
138 	else if (!rtc->ops->set_alarm)
139 		err = -EINVAL;
140 	else
141 		err = rtc->ops->set_alarm(class_dev->dev, alarm);
142 
143 	mutex_unlock(&rtc->ops_lock);
144 	return err;
145 }
146 EXPORT_SYMBOL_GPL(rtc_set_alarm);
147 
148 /**
149  * rtc_update_irq - report RTC periodic, alarm, and/or update irqs
150  * @class_dev: the rtc's class device
151  * @num: how many irqs are being reported (usually one)
152  * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
153  * Context: in_interrupt(), irqs blocked
154  */
155 void rtc_update_irq(struct class_device *class_dev,
156 		unsigned long num, unsigned long events)
157 {
158 	struct rtc_device *rtc = to_rtc_device(class_dev);
159 
160 	spin_lock(&rtc->irq_lock);
161 	rtc->irq_data = (rtc->irq_data + (num << 8)) | events;
162 	spin_unlock(&rtc->irq_lock);
163 
164 	spin_lock(&rtc->irq_task_lock);
165 	if (rtc->irq_task)
166 		rtc->irq_task->func(rtc->irq_task->private_data);
167 	spin_unlock(&rtc->irq_task_lock);
168 
169 	wake_up_interruptible(&rtc->irq_queue);
170 	kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
171 }
172 EXPORT_SYMBOL_GPL(rtc_update_irq);
173 
174 struct class_device *rtc_class_open(char *name)
175 {
176 	struct class_device *class_dev = NULL,
177 				*class_dev_tmp;
178 
179 	down(&rtc_class->sem);
180 	list_for_each_entry(class_dev_tmp, &rtc_class->children, node) {
181 		if (strncmp(class_dev_tmp->class_id, name, BUS_ID_SIZE) == 0) {
182 			class_dev = class_device_get(class_dev_tmp);
183 			break;
184 		}
185 	}
186 
187 	if (class_dev) {
188 		if (!try_module_get(to_rtc_device(class_dev)->owner))
189 			class_dev = NULL;
190 	}
191 	up(&rtc_class->sem);
192 
193 	return class_dev;
194 }
195 EXPORT_SYMBOL_GPL(rtc_class_open);
196 
197 void rtc_class_close(struct class_device *class_dev)
198 {
199 	module_put(to_rtc_device(class_dev)->owner);
200 	class_device_put(class_dev);
201 }
202 EXPORT_SYMBOL_GPL(rtc_class_close);
203 
204 int rtc_irq_register(struct class_device *class_dev, struct rtc_task *task)
205 {
206 	int retval = -EBUSY;
207 	struct rtc_device *rtc = to_rtc_device(class_dev);
208 
209 	if (task == NULL || task->func == NULL)
210 		return -EINVAL;
211 
212 	spin_lock_irq(&rtc->irq_task_lock);
213 	if (rtc->irq_task == NULL) {
214 		rtc->irq_task = task;
215 		retval = 0;
216 	}
217 	spin_unlock_irq(&rtc->irq_task_lock);
218 
219 	return retval;
220 }
221 EXPORT_SYMBOL_GPL(rtc_irq_register);
222 
223 void rtc_irq_unregister(struct class_device *class_dev, struct rtc_task *task)
224 {
225 	struct rtc_device *rtc = to_rtc_device(class_dev);
226 
227 	spin_lock_irq(&rtc->irq_task_lock);
228 	if (rtc->irq_task == task)
229 		rtc->irq_task = NULL;
230 	spin_unlock_irq(&rtc->irq_task_lock);
231 }
232 EXPORT_SYMBOL_GPL(rtc_irq_unregister);
233 
234 int rtc_irq_set_state(struct class_device *class_dev, struct rtc_task *task, int enabled)
235 {
236 	int err = 0;
237 	unsigned long flags;
238 	struct rtc_device *rtc = to_rtc_device(class_dev);
239 
240 	if (rtc->ops->irq_set_state == NULL)
241 		return -ENXIO;
242 
243 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
244 	if (rtc->irq_task != task)
245 		err = -ENXIO;
246 	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
247 
248 	if (err == 0)
249 		err = rtc->ops->irq_set_state(class_dev->dev, enabled);
250 
251 	return err;
252 }
253 EXPORT_SYMBOL_GPL(rtc_irq_set_state);
254 
255 int rtc_irq_set_freq(struct class_device *class_dev, struct rtc_task *task, int freq)
256 {
257 	int err = 0;
258 	unsigned long flags;
259 	struct rtc_device *rtc = to_rtc_device(class_dev);
260 
261 	if (rtc->ops->irq_set_freq == NULL)
262 		return -ENXIO;
263 
264 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
265 	if (rtc->irq_task != task)
266 		err = -ENXIO;
267 	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
268 
269 	if (err == 0) {
270 		err = rtc->ops->irq_set_freq(class_dev->dev, freq);
271 		if (err == 0)
272 			rtc->irq_freq = freq;
273 	}
274 	return err;
275 }
276 EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
277