xref: /openbmc/linux/kernel/time/clockevents.c (revision de81e64b)
1d316c57fSThomas Gleixner /*
2d316c57fSThomas Gleixner  * linux/kernel/time/clockevents.c
3d316c57fSThomas Gleixner  *
4d316c57fSThomas Gleixner  * This file contains functions which manage clock event devices.
5d316c57fSThomas Gleixner  *
6d316c57fSThomas Gleixner  * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7d316c57fSThomas Gleixner  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8d316c57fSThomas Gleixner  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9d316c57fSThomas Gleixner  *
10d316c57fSThomas Gleixner  * This code is licenced under the GPL version 2. For details see
11d316c57fSThomas Gleixner  * kernel-base/COPYING.
12d316c57fSThomas Gleixner  */
13d316c57fSThomas Gleixner 
14d316c57fSThomas Gleixner #include <linux/clockchips.h>
15d316c57fSThomas Gleixner #include <linux/hrtimer.h>
16d316c57fSThomas Gleixner #include <linux/init.h>
17d316c57fSThomas Gleixner #include <linux/module.h>
18d316c57fSThomas Gleixner #include <linux/smp.h>
19501f8670SThomas Gleixner #include <linux/device.h>
20d316c57fSThomas Gleixner 
218e1a928aSH Hartley Sweeten #include "tick-internal.h"
228e1a928aSH Hartley Sweeten 
23d316c57fSThomas Gleixner /* The registered clock event devices */
24d316c57fSThomas Gleixner static LIST_HEAD(clockevent_devices);
25d316c57fSThomas Gleixner static LIST_HEAD(clockevents_released);
26d316c57fSThomas Gleixner /* Protection for the above */
27b5f91da0SThomas Gleixner static DEFINE_RAW_SPINLOCK(clockevents_lock);
2803e13cf5SThomas Gleixner /* Protection for unbind operations */
2903e13cf5SThomas Gleixner static DEFINE_MUTEX(clockevents_mutex);
3003e13cf5SThomas Gleixner 
3103e13cf5SThomas Gleixner struct ce_unbind {
3203e13cf5SThomas Gleixner 	struct clock_event_device *ce;
3303e13cf5SThomas Gleixner 	int res;
3403e13cf5SThomas Gleixner };
35d316c57fSThomas Gleixner 
3697b94106SThomas Gleixner static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
3797b94106SThomas Gleixner 			bool ismax)
3897b94106SThomas Gleixner {
3997b94106SThomas Gleixner 	u64 clc = (u64) latch << evt->shift;
4097b94106SThomas Gleixner 	u64 rnd;
4197b94106SThomas Gleixner 
4297b94106SThomas Gleixner 	if (unlikely(!evt->mult)) {
4397b94106SThomas Gleixner 		evt->mult = 1;
4497b94106SThomas Gleixner 		WARN_ON(1);
4597b94106SThomas Gleixner 	}
4697b94106SThomas Gleixner 	rnd = (u64) evt->mult - 1;
4797b94106SThomas Gleixner 
4897b94106SThomas Gleixner 	/*
4997b94106SThomas Gleixner 	 * Upper bound sanity check. If the backwards conversion is
5097b94106SThomas Gleixner 	 * not equal latch, we know that the above shift overflowed.
5197b94106SThomas Gleixner 	 */
5297b94106SThomas Gleixner 	if ((clc >> evt->shift) != (u64)latch)
5397b94106SThomas Gleixner 		clc = ~0ULL;
5497b94106SThomas Gleixner 
5597b94106SThomas Gleixner 	/*
5697b94106SThomas Gleixner 	 * Scaled math oddities:
5797b94106SThomas Gleixner 	 *
5897b94106SThomas Gleixner 	 * For mult <= (1 << shift) we can safely add mult - 1 to
5997b94106SThomas Gleixner 	 * prevent integer rounding loss. So the backwards conversion
6097b94106SThomas Gleixner 	 * from nsec to device ticks will be correct.
6197b94106SThomas Gleixner 	 *
6297b94106SThomas Gleixner 	 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
6397b94106SThomas Gleixner 	 * need to be careful. Adding mult - 1 will result in a value
6497b94106SThomas Gleixner 	 * which when converted back to device ticks can be larger
6597b94106SThomas Gleixner 	 * than latch by up to (mult - 1) >> shift. For the min_delta
6697b94106SThomas Gleixner 	 * calculation we still want to apply this in order to stay
6797b94106SThomas Gleixner 	 * above the minimum device ticks limit. For the upper limit
6897b94106SThomas Gleixner 	 * we would end up with a latch value larger than the upper
6997b94106SThomas Gleixner 	 * limit of the device, so we omit the add to stay below the
7097b94106SThomas Gleixner 	 * device upper boundary.
7197b94106SThomas Gleixner 	 *
7297b94106SThomas Gleixner 	 * Also omit the add if it would overflow the u64 boundary.
7397b94106SThomas Gleixner 	 */
7497b94106SThomas Gleixner 	if ((~0ULL - clc > rnd) &&
7510632008SThomas Gleixner 	    (!ismax || evt->mult <= (1ULL << evt->shift)))
7697b94106SThomas Gleixner 		clc += rnd;
7797b94106SThomas Gleixner 
7897b94106SThomas Gleixner 	do_div(clc, evt->mult);
7997b94106SThomas Gleixner 
8097b94106SThomas Gleixner 	/* Deltas less than 1usec are pointless noise */
8197b94106SThomas Gleixner 	return clc > 1000 ? clc : 1000;
8297b94106SThomas Gleixner }
8397b94106SThomas Gleixner 
84d316c57fSThomas Gleixner /**
85d316c57fSThomas Gleixner  * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
86d316c57fSThomas Gleixner  * @latch:	value to convert
87d316c57fSThomas Gleixner  * @evt:	pointer to clock event device descriptor
88d316c57fSThomas Gleixner  *
89d316c57fSThomas Gleixner  * Math helper, returns latch value converted to nanoseconds (bound checked)
90d316c57fSThomas Gleixner  */
9197813f2fSJon Hunter u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
92d316c57fSThomas Gleixner {
9397b94106SThomas Gleixner 	return cev_delta2ns(latch, evt, false);
94d316c57fSThomas Gleixner }
95c81fc2c3SMagnus Damm EXPORT_SYMBOL_GPL(clockevent_delta2ns);
96d316c57fSThomas Gleixner 
9777e32c89SViresh Kumar static int __clockevents_set_state(struct clock_event_device *dev,
9877e32c89SViresh Kumar 				   enum clock_event_state state)
99bd624d75SViresh Kumar {
100bd624d75SViresh Kumar 	/* Transition with legacy set_mode() callback */
101bd624d75SViresh Kumar 	if (dev->set_mode) {
102bd624d75SViresh Kumar 		/* Legacy callback doesn't support new modes */
10377e32c89SViresh Kumar 		if (state > CLOCK_EVT_STATE_ONESHOT)
104bd624d75SViresh Kumar 			return -ENOSYS;
10577e32c89SViresh Kumar 		/*
10677e32c89SViresh Kumar 		 * 'clock_event_state' and 'clock_event_mode' have 1-to-1
10777e32c89SViresh Kumar 		 * mapping until *_ONESHOT, and so a simple cast will work.
10877e32c89SViresh Kumar 		 */
10977e32c89SViresh Kumar 		dev->set_mode((enum clock_event_mode)state, dev);
11077e32c89SViresh Kumar 		dev->mode = (enum clock_event_mode)state;
111bd624d75SViresh Kumar 		return 0;
112bd624d75SViresh Kumar 	}
113bd624d75SViresh Kumar 
114bd624d75SViresh Kumar 	if (dev->features & CLOCK_EVT_FEAT_DUMMY)
115bd624d75SViresh Kumar 		return 0;
116bd624d75SViresh Kumar 
11777e32c89SViresh Kumar 	/* Transition with new state-specific callbacks */
11877e32c89SViresh Kumar 	switch (state) {
11977e32c89SViresh Kumar 	case CLOCK_EVT_STATE_DETACHED:
120bd624d75SViresh Kumar 		/*
121bd624d75SViresh Kumar 		 * This is an internal state, which is guaranteed to go from
12277e32c89SViresh Kumar 		 * SHUTDOWN to DETACHED. No driver interaction required.
123bd624d75SViresh Kumar 		 */
124bd624d75SViresh Kumar 		return 0;
125bd624d75SViresh Kumar 
12677e32c89SViresh Kumar 	case CLOCK_EVT_STATE_SHUTDOWN:
12777e32c89SViresh Kumar 		return dev->set_state_shutdown(dev);
128bd624d75SViresh Kumar 
12977e32c89SViresh Kumar 	case CLOCK_EVT_STATE_PERIODIC:
130bd624d75SViresh Kumar 		/* Core internal bug */
131bd624d75SViresh Kumar 		if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
132bd624d75SViresh Kumar 			return -ENOSYS;
13377e32c89SViresh Kumar 		return dev->set_state_periodic(dev);
134bd624d75SViresh Kumar 
13577e32c89SViresh Kumar 	case CLOCK_EVT_STATE_ONESHOT:
136bd624d75SViresh Kumar 		/* Core internal bug */
137bd624d75SViresh Kumar 		if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
138bd624d75SViresh Kumar 			return -ENOSYS;
13977e32c89SViresh Kumar 		return dev->set_state_oneshot(dev);
140bd624d75SViresh Kumar 
141bd624d75SViresh Kumar 	default:
142bd624d75SViresh Kumar 		return -ENOSYS;
143bd624d75SViresh Kumar 	}
144bd624d75SViresh Kumar }
145bd624d75SViresh Kumar 
146d316c57fSThomas Gleixner /**
14777e32c89SViresh Kumar  * clockevents_set_state - set the operating state of a clock event device
148d316c57fSThomas Gleixner  * @dev:	device to modify
14977e32c89SViresh Kumar  * @state:	new state
150d316c57fSThomas Gleixner  *
151d316c57fSThomas Gleixner  * Must be called with interrupts disabled !
152d316c57fSThomas Gleixner  */
15377e32c89SViresh Kumar void clockevents_set_state(struct clock_event_device *dev,
15477e32c89SViresh Kumar 			   enum clock_event_state state)
155d316c57fSThomas Gleixner {
15677e32c89SViresh Kumar 	if (dev->state != state) {
15777e32c89SViresh Kumar 		if (__clockevents_set_state(dev, state))
158bd624d75SViresh Kumar 			return;
159bd624d75SViresh Kumar 
16077e32c89SViresh Kumar 		dev->state = state;
1612d68259dSMagnus Damm 
1622d68259dSMagnus Damm 		/*
1632d68259dSMagnus Damm 		 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
1642d68259dSMagnus Damm 		 * on it, so fix it up and emit a warning:
1652d68259dSMagnus Damm 		 */
16677e32c89SViresh Kumar 		if (state == CLOCK_EVT_STATE_ONESHOT) {
1672d68259dSMagnus Damm 			if (unlikely(!dev->mult)) {
1682d68259dSMagnus Damm 				dev->mult = 1;
1692d68259dSMagnus Damm 				WARN_ON(1);
1702d68259dSMagnus Damm 			}
1712d68259dSMagnus Damm 		}
172d316c57fSThomas Gleixner 	}
173d316c57fSThomas Gleixner }
174d316c57fSThomas Gleixner 
175d316c57fSThomas Gleixner /**
1762344abbcSThomas Gleixner  * clockevents_shutdown - shutdown the device and clear next_event
1772344abbcSThomas Gleixner  * @dev:	device to shutdown
1782344abbcSThomas Gleixner  */
1792344abbcSThomas Gleixner void clockevents_shutdown(struct clock_event_device *dev)
1802344abbcSThomas Gleixner {
18177e32c89SViresh Kumar 	clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
1822344abbcSThomas Gleixner 	dev->next_event.tv64 = KTIME_MAX;
1832344abbcSThomas Gleixner }
1842344abbcSThomas Gleixner 
185554ef387SViresh Kumar /**
186554ef387SViresh Kumar  * clockevents_tick_resume -	Resume the tick device before using it again
187554ef387SViresh Kumar  * @dev:			device to resume
188554ef387SViresh Kumar  */
189554ef387SViresh Kumar int clockevents_tick_resume(struct clock_event_device *dev)
190554ef387SViresh Kumar {
191554ef387SViresh Kumar 	int ret = 0;
192554ef387SViresh Kumar 
19377e32c89SViresh Kumar 	if (dev->set_mode) {
194554ef387SViresh Kumar 		dev->set_mode(CLOCK_EVT_MODE_RESUME, dev);
195554ef387SViresh Kumar 		dev->mode = CLOCK_EVT_MODE_RESUME;
19677e32c89SViresh Kumar 	} else if (dev->tick_resume) {
19777e32c89SViresh Kumar 		ret = dev->tick_resume(dev);
19877e32c89SViresh Kumar 	}
199554ef387SViresh Kumar 
200554ef387SViresh Kumar 	return ret;
201554ef387SViresh Kumar }
202554ef387SViresh Kumar 
203d1748302SMartin Schwidefsky #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
204d1748302SMartin Schwidefsky 
205d1748302SMartin Schwidefsky /* Limit min_delta to a jiffie */
206d1748302SMartin Schwidefsky #define MIN_DELTA_LIMIT		(NSEC_PER_SEC / HZ)
207d1748302SMartin Schwidefsky 
208d1748302SMartin Schwidefsky /**
209d1748302SMartin Schwidefsky  * clockevents_increase_min_delta - raise minimum delta of a clock event device
210d1748302SMartin Schwidefsky  * @dev:       device to increase the minimum delta
211d1748302SMartin Schwidefsky  *
212d1748302SMartin Schwidefsky  * Returns 0 on success, -ETIME when the minimum delta reached the limit.
213d1748302SMartin Schwidefsky  */
214d1748302SMartin Schwidefsky static int clockevents_increase_min_delta(struct clock_event_device *dev)
215d1748302SMartin Schwidefsky {
216d1748302SMartin Schwidefsky 	/* Nothing to do if we already reached the limit */
217d1748302SMartin Schwidefsky 	if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
218504d5874SJan Kara 		printk_deferred(KERN_WARNING
219504d5874SJan Kara 				"CE: Reprogramming failure. Giving up\n");
220d1748302SMartin Schwidefsky 		dev->next_event.tv64 = KTIME_MAX;
221d1748302SMartin Schwidefsky 		return -ETIME;
222d1748302SMartin Schwidefsky 	}
223d1748302SMartin Schwidefsky 
224d1748302SMartin Schwidefsky 	if (dev->min_delta_ns < 5000)
225d1748302SMartin Schwidefsky 		dev->min_delta_ns = 5000;
226d1748302SMartin Schwidefsky 	else
227d1748302SMartin Schwidefsky 		dev->min_delta_ns += dev->min_delta_ns >> 1;
228d1748302SMartin Schwidefsky 
229d1748302SMartin Schwidefsky 	if (dev->min_delta_ns > MIN_DELTA_LIMIT)
230d1748302SMartin Schwidefsky 		dev->min_delta_ns = MIN_DELTA_LIMIT;
231d1748302SMartin Schwidefsky 
232504d5874SJan Kara 	printk_deferred(KERN_WARNING
233504d5874SJan Kara 			"CE: %s increased min_delta_ns to %llu nsec\n",
234d1748302SMartin Schwidefsky 			dev->name ? dev->name : "?",
235d1748302SMartin Schwidefsky 			(unsigned long long) dev->min_delta_ns);
236d1748302SMartin Schwidefsky 	return 0;
237d1748302SMartin Schwidefsky }
238d1748302SMartin Schwidefsky 
239d1748302SMartin Schwidefsky /**
240d1748302SMartin Schwidefsky  * clockevents_program_min_delta - Set clock event device to the minimum delay.
241d1748302SMartin Schwidefsky  * @dev:	device to program
242d1748302SMartin Schwidefsky  *
243d1748302SMartin Schwidefsky  * Returns 0 on success, -ETIME when the retry loop failed.
244d1748302SMartin Schwidefsky  */
245d1748302SMartin Schwidefsky static int clockevents_program_min_delta(struct clock_event_device *dev)
246d1748302SMartin Schwidefsky {
247d1748302SMartin Schwidefsky 	unsigned long long clc;
248d1748302SMartin Schwidefsky 	int64_t delta;
249d1748302SMartin Schwidefsky 	int i;
250d1748302SMartin Schwidefsky 
251d1748302SMartin Schwidefsky 	for (i = 0;;) {
252d1748302SMartin Schwidefsky 		delta = dev->min_delta_ns;
253d1748302SMartin Schwidefsky 		dev->next_event = ktime_add_ns(ktime_get(), delta);
254d1748302SMartin Schwidefsky 
25577e32c89SViresh Kumar 		if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
256d1748302SMartin Schwidefsky 			return 0;
257d1748302SMartin Schwidefsky 
258d1748302SMartin Schwidefsky 		dev->retries++;
259d1748302SMartin Schwidefsky 		clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
260d1748302SMartin Schwidefsky 		if (dev->set_next_event((unsigned long) clc, dev) == 0)
261d1748302SMartin Schwidefsky 			return 0;
262d1748302SMartin Schwidefsky 
263d1748302SMartin Schwidefsky 		if (++i > 2) {
264d1748302SMartin Schwidefsky 			/*
265d1748302SMartin Schwidefsky 			 * We tried 3 times to program the device with the
266d1748302SMartin Schwidefsky 			 * given min_delta_ns. Try to increase the minimum
267d1748302SMartin Schwidefsky 			 * delta, if that fails as well get out of here.
268d1748302SMartin Schwidefsky 			 */
269d1748302SMartin Schwidefsky 			if (clockevents_increase_min_delta(dev))
270d1748302SMartin Schwidefsky 				return -ETIME;
271d1748302SMartin Schwidefsky 			i = 0;
272d1748302SMartin Schwidefsky 		}
273d1748302SMartin Schwidefsky 	}
274d1748302SMartin Schwidefsky }
275d1748302SMartin Schwidefsky 
276d1748302SMartin Schwidefsky #else  /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
277d1748302SMartin Schwidefsky 
278d1748302SMartin Schwidefsky /**
279d1748302SMartin Schwidefsky  * clockevents_program_min_delta - Set clock event device to the minimum delay.
280d1748302SMartin Schwidefsky  * @dev:	device to program
281d1748302SMartin Schwidefsky  *
282d1748302SMartin Schwidefsky  * Returns 0 on success, -ETIME when the retry loop failed.
283d1748302SMartin Schwidefsky  */
284d1748302SMartin Schwidefsky static int clockevents_program_min_delta(struct clock_event_device *dev)
285d1748302SMartin Schwidefsky {
286d1748302SMartin Schwidefsky 	unsigned long long clc;
287d1748302SMartin Schwidefsky 	int64_t delta;
288d1748302SMartin Schwidefsky 
289d1748302SMartin Schwidefsky 	delta = dev->min_delta_ns;
290d1748302SMartin Schwidefsky 	dev->next_event = ktime_add_ns(ktime_get(), delta);
291d1748302SMartin Schwidefsky 
29277e32c89SViresh Kumar 	if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
293d1748302SMartin Schwidefsky 		return 0;
294d1748302SMartin Schwidefsky 
295d1748302SMartin Schwidefsky 	dev->retries++;
296d1748302SMartin Schwidefsky 	clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
297d1748302SMartin Schwidefsky 	return dev->set_next_event((unsigned long) clc, dev);
298d1748302SMartin Schwidefsky }
299d1748302SMartin Schwidefsky 
300d1748302SMartin Schwidefsky #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
301d1748302SMartin Schwidefsky 
3022344abbcSThomas Gleixner /**
303d316c57fSThomas Gleixner  * clockevents_program_event - Reprogram the clock event device.
304d1748302SMartin Schwidefsky  * @dev:	device to program
305d316c57fSThomas Gleixner  * @expires:	absolute expiry time (monotonic clock)
306d1748302SMartin Schwidefsky  * @force:	program minimum delay if expires can not be set
307d316c57fSThomas Gleixner  *
308d316c57fSThomas Gleixner  * Returns 0 on success, -ETIME when the event is in the past.
309d316c57fSThomas Gleixner  */
310d316c57fSThomas Gleixner int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
311d1748302SMartin Schwidefsky 			      bool force)
312d316c57fSThomas Gleixner {
313d316c57fSThomas Gleixner 	unsigned long long clc;
314d316c57fSThomas Gleixner 	int64_t delta;
315d1748302SMartin Schwidefsky 	int rc;
316d316c57fSThomas Gleixner 
317167b1de3SThomas Gleixner 	if (unlikely(expires.tv64 < 0)) {
318167b1de3SThomas Gleixner 		WARN_ON_ONCE(1);
319167b1de3SThomas Gleixner 		return -ETIME;
320167b1de3SThomas Gleixner 	}
321167b1de3SThomas Gleixner 
322d316c57fSThomas Gleixner 	dev->next_event = expires;
323d316c57fSThomas Gleixner 
32477e32c89SViresh Kumar 	if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
325d316c57fSThomas Gleixner 		return 0;
326d316c57fSThomas Gleixner 
32765516f8aSMartin Schwidefsky 	/* Shortcut for clockevent devices that can deal with ktime. */
32865516f8aSMartin Schwidefsky 	if (dev->features & CLOCK_EVT_FEAT_KTIME)
32965516f8aSMartin Schwidefsky 		return dev->set_next_ktime(expires, dev);
33065516f8aSMartin Schwidefsky 
331d1748302SMartin Schwidefsky 	delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
332d1748302SMartin Schwidefsky 	if (delta <= 0)
333d1748302SMartin Schwidefsky 		return force ? clockevents_program_min_delta(dev) : -ETIME;
334d316c57fSThomas Gleixner 
335d1748302SMartin Schwidefsky 	delta = min(delta, (int64_t) dev->max_delta_ns);
336d1748302SMartin Schwidefsky 	delta = max(delta, (int64_t) dev->min_delta_ns);
337d316c57fSThomas Gleixner 
338d1748302SMartin Schwidefsky 	clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
339d1748302SMartin Schwidefsky 	rc = dev->set_next_event((unsigned long) clc, dev);
340d1748302SMartin Schwidefsky 
341d1748302SMartin Schwidefsky 	return (rc && force) ? clockevents_program_min_delta(dev) : rc;
342d316c57fSThomas Gleixner }
343d316c57fSThomas Gleixner 
344d316c57fSThomas Gleixner /*
3453eb05676SLi Zefan  * Called after a notify add to make devices available which were
346d316c57fSThomas Gleixner  * released from the notifier call.
347d316c57fSThomas Gleixner  */
348d316c57fSThomas Gleixner static void clockevents_notify_released(void)
349d316c57fSThomas Gleixner {
350d316c57fSThomas Gleixner 	struct clock_event_device *dev;
351d316c57fSThomas Gleixner 
352d316c57fSThomas Gleixner 	while (!list_empty(&clockevents_released)) {
353d316c57fSThomas Gleixner 		dev = list_entry(clockevents_released.next,
354d316c57fSThomas Gleixner 				 struct clock_event_device, list);
355d316c57fSThomas Gleixner 		list_del(&dev->list);
356d316c57fSThomas Gleixner 		list_add(&dev->list, &clockevent_devices);
3577172a286SThomas Gleixner 		tick_check_new_device(dev);
358d316c57fSThomas Gleixner 	}
359d316c57fSThomas Gleixner }
360d316c57fSThomas Gleixner 
36103e13cf5SThomas Gleixner /*
36203e13cf5SThomas Gleixner  * Try to install a replacement clock event device
36303e13cf5SThomas Gleixner  */
36403e13cf5SThomas Gleixner static int clockevents_replace(struct clock_event_device *ced)
36503e13cf5SThomas Gleixner {
36603e13cf5SThomas Gleixner 	struct clock_event_device *dev, *newdev = NULL;
36703e13cf5SThomas Gleixner 
36803e13cf5SThomas Gleixner 	list_for_each_entry(dev, &clockevent_devices, list) {
36977e32c89SViresh Kumar 		if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED)
37003e13cf5SThomas Gleixner 			continue;
37103e13cf5SThomas Gleixner 
37203e13cf5SThomas Gleixner 		if (!tick_check_replacement(newdev, dev))
37303e13cf5SThomas Gleixner 			continue;
37403e13cf5SThomas Gleixner 
37503e13cf5SThomas Gleixner 		if (!try_module_get(dev->owner))
37603e13cf5SThomas Gleixner 			continue;
37703e13cf5SThomas Gleixner 
37803e13cf5SThomas Gleixner 		if (newdev)
37903e13cf5SThomas Gleixner 			module_put(newdev->owner);
38003e13cf5SThomas Gleixner 		newdev = dev;
38103e13cf5SThomas Gleixner 	}
38203e13cf5SThomas Gleixner 	if (newdev) {
38303e13cf5SThomas Gleixner 		tick_install_replacement(newdev);
38403e13cf5SThomas Gleixner 		list_del_init(&ced->list);
38503e13cf5SThomas Gleixner 	}
38603e13cf5SThomas Gleixner 	return newdev ? 0 : -EBUSY;
38703e13cf5SThomas Gleixner }
38803e13cf5SThomas Gleixner 
38903e13cf5SThomas Gleixner /*
39003e13cf5SThomas Gleixner  * Called with clockevents_mutex and clockevents_lock held
39103e13cf5SThomas Gleixner  */
39203e13cf5SThomas Gleixner static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
39303e13cf5SThomas Gleixner {
39403e13cf5SThomas Gleixner 	/* Fast track. Device is unused */
39577e32c89SViresh Kumar 	if (ced->state == CLOCK_EVT_STATE_DETACHED) {
39603e13cf5SThomas Gleixner 		list_del_init(&ced->list);
39703e13cf5SThomas Gleixner 		return 0;
39803e13cf5SThomas Gleixner 	}
39903e13cf5SThomas Gleixner 
40003e13cf5SThomas Gleixner 	return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
40103e13cf5SThomas Gleixner }
40203e13cf5SThomas Gleixner 
40303e13cf5SThomas Gleixner /*
40403e13cf5SThomas Gleixner  * SMP function call to unbind a device
40503e13cf5SThomas Gleixner  */
40603e13cf5SThomas Gleixner static void __clockevents_unbind(void *arg)
40703e13cf5SThomas Gleixner {
40803e13cf5SThomas Gleixner 	struct ce_unbind *cu = arg;
40903e13cf5SThomas Gleixner 	int res;
41003e13cf5SThomas Gleixner 
41103e13cf5SThomas Gleixner 	raw_spin_lock(&clockevents_lock);
41203e13cf5SThomas Gleixner 	res = __clockevents_try_unbind(cu->ce, smp_processor_id());
41303e13cf5SThomas Gleixner 	if (res == -EAGAIN)
41403e13cf5SThomas Gleixner 		res = clockevents_replace(cu->ce);
41503e13cf5SThomas Gleixner 	cu->res = res;
41603e13cf5SThomas Gleixner 	raw_spin_unlock(&clockevents_lock);
41703e13cf5SThomas Gleixner }
41803e13cf5SThomas Gleixner 
41903e13cf5SThomas Gleixner /*
42003e13cf5SThomas Gleixner  * Issues smp function call to unbind a per cpu device. Called with
42103e13cf5SThomas Gleixner  * clockevents_mutex held.
42203e13cf5SThomas Gleixner  */
42303e13cf5SThomas Gleixner static int clockevents_unbind(struct clock_event_device *ced, int cpu)
42403e13cf5SThomas Gleixner {
42503e13cf5SThomas Gleixner 	struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
42603e13cf5SThomas Gleixner 
42703e13cf5SThomas Gleixner 	smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
42803e13cf5SThomas Gleixner 	return cu.res;
42903e13cf5SThomas Gleixner }
43003e13cf5SThomas Gleixner 
43103e13cf5SThomas Gleixner /*
43203e13cf5SThomas Gleixner  * Unbind a clockevents device.
43303e13cf5SThomas Gleixner  */
43403e13cf5SThomas Gleixner int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
43503e13cf5SThomas Gleixner {
43603e13cf5SThomas Gleixner 	int ret;
43703e13cf5SThomas Gleixner 
43803e13cf5SThomas Gleixner 	mutex_lock(&clockevents_mutex);
43903e13cf5SThomas Gleixner 	ret = clockevents_unbind(ced, cpu);
44003e13cf5SThomas Gleixner 	mutex_unlock(&clockevents_mutex);
44103e13cf5SThomas Gleixner 	return ret;
44203e13cf5SThomas Gleixner }
44303e13cf5SThomas Gleixner EXPORT_SYMBOL_GPL(clockevents_unbind);
44403e13cf5SThomas Gleixner 
44577e32c89SViresh Kumar /* Sanity check of state transition callbacks */
446bd624d75SViresh Kumar static int clockevents_sanity_check(struct clock_event_device *dev)
447bd624d75SViresh Kumar {
448bd624d75SViresh Kumar 	/* Legacy set_mode() callback */
449bd624d75SViresh Kumar 	if (dev->set_mode) {
450bd624d75SViresh Kumar 		/* We shouldn't be supporting new modes now */
45177e32c89SViresh Kumar 		WARN_ON(dev->set_state_periodic || dev->set_state_oneshot ||
45277e32c89SViresh Kumar 			dev->set_state_shutdown || dev->tick_resume);
453de81e64bSViresh Kumar 
454de81e64bSViresh Kumar 		BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
455bd624d75SViresh Kumar 		return 0;
456bd624d75SViresh Kumar 	}
457bd624d75SViresh Kumar 
458bd624d75SViresh Kumar 	if (dev->features & CLOCK_EVT_FEAT_DUMMY)
459bd624d75SViresh Kumar 		return 0;
460bd624d75SViresh Kumar 
46177e32c89SViresh Kumar 	/* New state-specific callbacks */
46277e32c89SViresh Kumar 	if (!dev->set_state_shutdown)
463bd624d75SViresh Kumar 		return -EINVAL;
464bd624d75SViresh Kumar 
465bd624d75SViresh Kumar 	if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
46677e32c89SViresh Kumar 	    !dev->set_state_periodic)
467bd624d75SViresh Kumar 		return -EINVAL;
468bd624d75SViresh Kumar 
469bd624d75SViresh Kumar 	if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
47077e32c89SViresh Kumar 	    !dev->set_state_oneshot)
471bd624d75SViresh Kumar 		return -EINVAL;
472bd624d75SViresh Kumar 
473bd624d75SViresh Kumar 	return 0;
474bd624d75SViresh Kumar }
475bd624d75SViresh Kumar 
476d316c57fSThomas Gleixner /**
477d316c57fSThomas Gleixner  * clockevents_register_device - register a clock event device
478d316c57fSThomas Gleixner  * @dev:	device to register
479d316c57fSThomas Gleixner  */
480d316c57fSThomas Gleixner void clockevents_register_device(struct clock_event_device *dev)
481d316c57fSThomas Gleixner {
482f833bab8SSuresh Siddha 	unsigned long flags;
483f833bab8SSuresh Siddha 
484bd624d75SViresh Kumar 	BUG_ON(clockevents_sanity_check(dev));
485bd624d75SViresh Kumar 
48677e32c89SViresh Kumar 	/* Initialize state to DETACHED */
48777e32c89SViresh Kumar 	dev->state = CLOCK_EVT_STATE_DETACHED;
48877e32c89SViresh Kumar 
4891b054b67SThomas Gleixner 	if (!dev->cpumask) {
4901b054b67SThomas Gleixner 		WARN_ON(num_possible_cpus() > 1);
4911b054b67SThomas Gleixner 		dev->cpumask = cpumask_of(smp_processor_id());
4921b054b67SThomas Gleixner 	}
493320ab2b0SRusty Russell 
494b5f91da0SThomas Gleixner 	raw_spin_lock_irqsave(&clockevents_lock, flags);
495d316c57fSThomas Gleixner 
496d316c57fSThomas Gleixner 	list_add(&dev->list, &clockevent_devices);
4977172a286SThomas Gleixner 	tick_check_new_device(dev);
498d316c57fSThomas Gleixner 	clockevents_notify_released();
499d316c57fSThomas Gleixner 
500b5f91da0SThomas Gleixner 	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
501d316c57fSThomas Gleixner }
502c81fc2c3SMagnus Damm EXPORT_SYMBOL_GPL(clockevents_register_device);
503d316c57fSThomas Gleixner 
504e5400321SMagnus Damm void clockevents_config(struct clock_event_device *dev, u32 freq)
50557f0fcbeSThomas Gleixner {
506c0e299b1SThomas Gleixner 	u64 sec;
50757f0fcbeSThomas Gleixner 
50857f0fcbeSThomas Gleixner 	if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
50957f0fcbeSThomas Gleixner 		return;
51057f0fcbeSThomas Gleixner 
51157f0fcbeSThomas Gleixner 	/*
51257f0fcbeSThomas Gleixner 	 * Calculate the maximum number of seconds we can sleep. Limit
51357f0fcbeSThomas Gleixner 	 * to 10 minutes for hardware which can program more than
51457f0fcbeSThomas Gleixner 	 * 32bit ticks so we still get reasonable conversion values.
51557f0fcbeSThomas Gleixner 	 */
51657f0fcbeSThomas Gleixner 	sec = dev->max_delta_ticks;
51757f0fcbeSThomas Gleixner 	do_div(sec, freq);
51857f0fcbeSThomas Gleixner 	if (!sec)
51957f0fcbeSThomas Gleixner 		sec = 1;
52057f0fcbeSThomas Gleixner 	else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
52157f0fcbeSThomas Gleixner 		sec = 600;
52257f0fcbeSThomas Gleixner 
52357f0fcbeSThomas Gleixner 	clockevents_calc_mult_shift(dev, freq, sec);
52497b94106SThomas Gleixner 	dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
52597b94106SThomas Gleixner 	dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
52657f0fcbeSThomas Gleixner }
52757f0fcbeSThomas Gleixner 
52857f0fcbeSThomas Gleixner /**
52957f0fcbeSThomas Gleixner  * clockevents_config_and_register - Configure and register a clock event device
53057f0fcbeSThomas Gleixner  * @dev:	device to register
53157f0fcbeSThomas Gleixner  * @freq:	The clock frequency
53257f0fcbeSThomas Gleixner  * @min_delta:	The minimum clock ticks to program in oneshot mode
53357f0fcbeSThomas Gleixner  * @max_delta:	The maximum clock ticks to program in oneshot mode
53457f0fcbeSThomas Gleixner  *
53557f0fcbeSThomas Gleixner  * min/max_delta can be 0 for devices which do not support oneshot mode.
53657f0fcbeSThomas Gleixner  */
53757f0fcbeSThomas Gleixner void clockevents_config_and_register(struct clock_event_device *dev,
53857f0fcbeSThomas Gleixner 				     u32 freq, unsigned long min_delta,
53957f0fcbeSThomas Gleixner 				     unsigned long max_delta)
54057f0fcbeSThomas Gleixner {
54157f0fcbeSThomas Gleixner 	dev->min_delta_ticks = min_delta;
54257f0fcbeSThomas Gleixner 	dev->max_delta_ticks = max_delta;
54357f0fcbeSThomas Gleixner 	clockevents_config(dev, freq);
54457f0fcbeSThomas Gleixner 	clockevents_register_device(dev);
54557f0fcbeSThomas Gleixner }
546c35ef95cSShawn Guo EXPORT_SYMBOL_GPL(clockevents_config_and_register);
54757f0fcbeSThomas Gleixner 
548627ee794SThomas Gleixner int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
54980b816b7SThomas Gleixner {
55080b816b7SThomas Gleixner 	clockevents_config(dev, freq);
55180b816b7SThomas Gleixner 
55277e32c89SViresh Kumar 	if (dev->state == CLOCK_EVT_STATE_ONESHOT)
553d1748302SMartin Schwidefsky 		return clockevents_program_event(dev, dev->next_event, false);
554fe79a9baSSoren Brinkmann 
55577e32c89SViresh Kumar 	if (dev->state == CLOCK_EVT_STATE_PERIODIC)
55677e32c89SViresh Kumar 		return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
557fe79a9baSSoren Brinkmann 
558fe79a9baSSoren Brinkmann 	return 0;
55980b816b7SThomas Gleixner }
56080b816b7SThomas Gleixner 
561627ee794SThomas Gleixner /**
562627ee794SThomas Gleixner  * clockevents_update_freq - Update frequency and reprogram a clock event device.
563627ee794SThomas Gleixner  * @dev:	device to modify
564627ee794SThomas Gleixner  * @freq:	new device frequency
565627ee794SThomas Gleixner  *
566627ee794SThomas Gleixner  * Reconfigure and reprogram a clock event device in oneshot
567627ee794SThomas Gleixner  * mode. Must be called on the cpu for which the device delivers per
568627ee794SThomas Gleixner  * cpu timer events. If called for the broadcast device the core takes
569627ee794SThomas Gleixner  * care of serialization.
570627ee794SThomas Gleixner  *
571627ee794SThomas Gleixner  * Returns 0 on success, -ETIME when the event is in the past.
572627ee794SThomas Gleixner  */
573627ee794SThomas Gleixner int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
574627ee794SThomas Gleixner {
575627ee794SThomas Gleixner 	unsigned long flags;
576627ee794SThomas Gleixner 	int ret;
577627ee794SThomas Gleixner 
578627ee794SThomas Gleixner 	local_irq_save(flags);
579627ee794SThomas Gleixner 	ret = tick_broadcast_update_freq(dev, freq);
580627ee794SThomas Gleixner 	if (ret == -ENODEV)
581627ee794SThomas Gleixner 		ret = __clockevents_update_freq(dev, freq);
582627ee794SThomas Gleixner 	local_irq_restore(flags);
583627ee794SThomas Gleixner 	return ret;
584627ee794SThomas Gleixner }
585627ee794SThomas Gleixner 
586d316c57fSThomas Gleixner /*
587d316c57fSThomas Gleixner  * Noop handler when we shut down an event device
588d316c57fSThomas Gleixner  */
5897c1e7689SVenkatesh Pallipadi void clockevents_handle_noop(struct clock_event_device *dev)
590d316c57fSThomas Gleixner {
591d316c57fSThomas Gleixner }
592d316c57fSThomas Gleixner 
593d316c57fSThomas Gleixner /**
594d316c57fSThomas Gleixner  * clockevents_exchange_device - release and request clock devices
595d316c57fSThomas Gleixner  * @old:	device to release (can be NULL)
596d316c57fSThomas Gleixner  * @new:	device to request (can be NULL)
597d316c57fSThomas Gleixner  *
598d316c57fSThomas Gleixner  * Called from the notifier chain. clockevents_lock is held already
599d316c57fSThomas Gleixner  */
600d316c57fSThomas Gleixner void clockevents_exchange_device(struct clock_event_device *old,
601d316c57fSThomas Gleixner 				 struct clock_event_device *new)
602d316c57fSThomas Gleixner {
603d316c57fSThomas Gleixner 	unsigned long flags;
604d316c57fSThomas Gleixner 
605d316c57fSThomas Gleixner 	local_irq_save(flags);
606d316c57fSThomas Gleixner 	/*
607d316c57fSThomas Gleixner 	 * Caller releases a clock event device. We queue it into the
608d316c57fSThomas Gleixner 	 * released list and do a notify add later.
609d316c57fSThomas Gleixner 	 */
610d316c57fSThomas Gleixner 	if (old) {
611ccf33d68SThomas Gleixner 		module_put(old->owner);
61277e32c89SViresh Kumar 		clockevents_set_state(old, CLOCK_EVT_STATE_DETACHED);
613d316c57fSThomas Gleixner 		list_del(&old->list);
614d316c57fSThomas Gleixner 		list_add(&old->list, &clockevents_released);
615d316c57fSThomas Gleixner 	}
616d316c57fSThomas Gleixner 
617d316c57fSThomas Gleixner 	if (new) {
61877e32c89SViresh Kumar 		BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED);
6192344abbcSThomas Gleixner 		clockevents_shutdown(new);
620d316c57fSThomas Gleixner 	}
621d316c57fSThomas Gleixner 	local_irq_restore(flags);
622d316c57fSThomas Gleixner }
623d316c57fSThomas Gleixner 
624adc78e6bSRafael J. Wysocki /**
625adc78e6bSRafael J. Wysocki  * clockevents_suspend - suspend clock devices
626adc78e6bSRafael J. Wysocki  */
627adc78e6bSRafael J. Wysocki void clockevents_suspend(void)
628adc78e6bSRafael J. Wysocki {
629adc78e6bSRafael J. Wysocki 	struct clock_event_device *dev;
630adc78e6bSRafael J. Wysocki 
631adc78e6bSRafael J. Wysocki 	list_for_each_entry_reverse(dev, &clockevent_devices, list)
632adc78e6bSRafael J. Wysocki 		if (dev->suspend)
633adc78e6bSRafael J. Wysocki 			dev->suspend(dev);
634adc78e6bSRafael J. Wysocki }
635adc78e6bSRafael J. Wysocki 
636adc78e6bSRafael J. Wysocki /**
637adc78e6bSRafael J. Wysocki  * clockevents_resume - resume clock devices
638adc78e6bSRafael J. Wysocki  */
639adc78e6bSRafael J. Wysocki void clockevents_resume(void)
640adc78e6bSRafael J. Wysocki {
641adc78e6bSRafael J. Wysocki 	struct clock_event_device *dev;
642adc78e6bSRafael J. Wysocki 
643adc78e6bSRafael J. Wysocki 	list_for_each_entry(dev, &clockevent_devices, list)
644adc78e6bSRafael J. Wysocki 		if (dev->resume)
645adc78e6bSRafael J. Wysocki 			dev->resume(dev);
646adc78e6bSRafael J. Wysocki }
647adc78e6bSRafael J. Wysocki 
648de68d9b1SThomas Gleixner #ifdef CONFIG_GENERIC_CLOCKEVENTS
649d316c57fSThomas Gleixner /**
650d316c57fSThomas Gleixner  * clockevents_notify - notification about relevant events
651da7e6f45SPreeti U Murthy  * Returns 0 on success, any other value on error
652d316c57fSThomas Gleixner  */
653da7e6f45SPreeti U Murthy int clockevents_notify(unsigned long reason, void *arg)
654d316c57fSThomas Gleixner {
655bb6eddf7SThomas Gleixner 	struct clock_event_device *dev, *tmp;
656f833bab8SSuresh Siddha 	unsigned long flags;
657da7e6f45SPreeti U Murthy 	int cpu, ret = 0;
6580b858e6fSLi Zefan 
659b5f91da0SThomas Gleixner 	raw_spin_lock_irqsave(&clockevents_lock, flags);
660d316c57fSThomas Gleixner 
661d316c57fSThomas Gleixner 	switch (reason) {
6628c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
6638c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
6648c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
6658c53daf6SThomas Gleixner 		tick_broadcast_on_off(reason, arg);
6668c53daf6SThomas Gleixner 		break;
6678c53daf6SThomas Gleixner 
6688c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
6698c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
670da7e6f45SPreeti U Murthy 		ret = tick_broadcast_oneshot_control(reason);
6718c53daf6SThomas Gleixner 		break;
6728c53daf6SThomas Gleixner 
6738c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_CPU_DYING:
6748c53daf6SThomas Gleixner 		tick_handover_do_timer(arg);
6758c53daf6SThomas Gleixner 		break;
6768c53daf6SThomas Gleixner 
6778c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_SUSPEND:
6788c53daf6SThomas Gleixner 		tick_suspend();
6798c53daf6SThomas Gleixner 		tick_suspend_broadcast();
6808c53daf6SThomas Gleixner 		break;
6818c53daf6SThomas Gleixner 
6828c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_RESUME:
6838c53daf6SThomas Gleixner 		tick_resume();
6848c53daf6SThomas Gleixner 		break;
6858c53daf6SThomas Gleixner 
686d316c57fSThomas Gleixner 	case CLOCK_EVT_NOTIFY_CPU_DEAD:
6878c53daf6SThomas Gleixner 		tick_shutdown_broadcast_oneshot(arg);
6888c53daf6SThomas Gleixner 		tick_shutdown_broadcast(arg);
6898c53daf6SThomas Gleixner 		tick_shutdown(arg);
690d316c57fSThomas Gleixner 		/*
691d316c57fSThomas Gleixner 		 * Unregister the clock event devices which were
692d316c57fSThomas Gleixner 		 * released from the users in the notify chain.
693d316c57fSThomas Gleixner 		 */
694bb6eddf7SThomas Gleixner 		list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
695bb6eddf7SThomas Gleixner 			list_del(&dev->list);
696bb6eddf7SThomas Gleixner 		/*
697bb6eddf7SThomas Gleixner 		 * Now check whether the CPU has left unused per cpu devices
698bb6eddf7SThomas Gleixner 		 */
699bb6eddf7SThomas Gleixner 		cpu = *((int *)arg);
700bb6eddf7SThomas Gleixner 		list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
701bb6eddf7SThomas Gleixner 			if (cpumask_test_cpu(cpu, dev->cpumask) &&
702ea9d8e3fSXiaotian Feng 			    cpumask_weight(dev->cpumask) == 1 &&
703ea9d8e3fSXiaotian Feng 			    !tick_is_broadcast_device(dev)) {
70477e32c89SViresh Kumar 				BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED);
705bb6eddf7SThomas Gleixner 				list_del(&dev->list);
706bb6eddf7SThomas Gleixner 			}
707bb6eddf7SThomas Gleixner 		}
708d316c57fSThomas Gleixner 		break;
709d316c57fSThomas Gleixner 	default:
710d316c57fSThomas Gleixner 		break;
711d316c57fSThomas Gleixner 	}
712b5f91da0SThomas Gleixner 	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
713da7e6f45SPreeti U Murthy 	return ret;
714d316c57fSThomas Gleixner }
715d316c57fSThomas Gleixner EXPORT_SYMBOL_GPL(clockevents_notify);
716501f8670SThomas Gleixner 
717501f8670SThomas Gleixner #ifdef CONFIG_SYSFS
718501f8670SThomas Gleixner struct bus_type clockevents_subsys = {
719501f8670SThomas Gleixner 	.name		= "clockevents",
720501f8670SThomas Gleixner 	.dev_name       = "clockevent",
721501f8670SThomas Gleixner };
722501f8670SThomas Gleixner 
723501f8670SThomas Gleixner static DEFINE_PER_CPU(struct device, tick_percpu_dev);
724501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev);
725501f8670SThomas Gleixner 
726501f8670SThomas Gleixner static ssize_t sysfs_show_current_tick_dev(struct device *dev,
727501f8670SThomas Gleixner 					   struct device_attribute *attr,
728501f8670SThomas Gleixner 					   char *buf)
729501f8670SThomas Gleixner {
730501f8670SThomas Gleixner 	struct tick_device *td;
731501f8670SThomas Gleixner 	ssize_t count = 0;
732501f8670SThomas Gleixner 
733501f8670SThomas Gleixner 	raw_spin_lock_irq(&clockevents_lock);
734501f8670SThomas Gleixner 	td = tick_get_tick_dev(dev);
735501f8670SThomas Gleixner 	if (td && td->evtdev)
736501f8670SThomas Gleixner 		count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name);
737501f8670SThomas Gleixner 	raw_spin_unlock_irq(&clockevents_lock);
738501f8670SThomas Gleixner 	return count;
739501f8670SThomas Gleixner }
740501f8670SThomas Gleixner static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL);
741501f8670SThomas Gleixner 
74203e13cf5SThomas Gleixner /* We don't support the abomination of removable broadcast devices */
74303e13cf5SThomas Gleixner static ssize_t sysfs_unbind_tick_dev(struct device *dev,
74403e13cf5SThomas Gleixner 				     struct device_attribute *attr,
74503e13cf5SThomas Gleixner 				     const char *buf, size_t count)
74603e13cf5SThomas Gleixner {
74703e13cf5SThomas Gleixner 	char name[CS_NAME_LEN];
748891292a7SPatrick Palka 	ssize_t ret = sysfs_get_uname(buf, name, count);
74903e13cf5SThomas Gleixner 	struct clock_event_device *ce;
75003e13cf5SThomas Gleixner 
75103e13cf5SThomas Gleixner 	if (ret < 0)
75203e13cf5SThomas Gleixner 		return ret;
75303e13cf5SThomas Gleixner 
75403e13cf5SThomas Gleixner 	ret = -ENODEV;
75503e13cf5SThomas Gleixner 	mutex_lock(&clockevents_mutex);
75603e13cf5SThomas Gleixner 	raw_spin_lock_irq(&clockevents_lock);
75703e13cf5SThomas Gleixner 	list_for_each_entry(ce, &clockevent_devices, list) {
75803e13cf5SThomas Gleixner 		if (!strcmp(ce->name, name)) {
75903e13cf5SThomas Gleixner 			ret = __clockevents_try_unbind(ce, dev->id);
76003e13cf5SThomas Gleixner 			break;
76103e13cf5SThomas Gleixner 		}
76203e13cf5SThomas Gleixner 	}
76303e13cf5SThomas Gleixner 	raw_spin_unlock_irq(&clockevents_lock);
76403e13cf5SThomas Gleixner 	/*
76503e13cf5SThomas Gleixner 	 * We hold clockevents_mutex, so ce can't go away
76603e13cf5SThomas Gleixner 	 */
76703e13cf5SThomas Gleixner 	if (ret == -EAGAIN)
76803e13cf5SThomas Gleixner 		ret = clockevents_unbind(ce, dev->id);
76903e13cf5SThomas Gleixner 	mutex_unlock(&clockevents_mutex);
77003e13cf5SThomas Gleixner 	return ret ? ret : count;
77103e13cf5SThomas Gleixner }
77203e13cf5SThomas Gleixner static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev);
77303e13cf5SThomas Gleixner 
774501f8670SThomas Gleixner #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
775501f8670SThomas Gleixner static struct device tick_bc_dev = {
776501f8670SThomas Gleixner 	.init_name	= "broadcast",
777501f8670SThomas Gleixner 	.id		= 0,
778501f8670SThomas Gleixner 	.bus		= &clockevents_subsys,
779501f8670SThomas Gleixner };
780501f8670SThomas Gleixner 
781501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev)
782501f8670SThomas Gleixner {
783501f8670SThomas Gleixner 	return dev == &tick_bc_dev ? tick_get_broadcast_device() :
784501f8670SThomas Gleixner 		&per_cpu(tick_cpu_device, dev->id);
785501f8670SThomas Gleixner }
786501f8670SThomas Gleixner 
787501f8670SThomas Gleixner static __init int tick_broadcast_init_sysfs(void)
788501f8670SThomas Gleixner {
789501f8670SThomas Gleixner 	int err = device_register(&tick_bc_dev);
790501f8670SThomas Gleixner 
791501f8670SThomas Gleixner 	if (!err)
792501f8670SThomas Gleixner 		err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
793501f8670SThomas Gleixner 	return err;
794501f8670SThomas Gleixner }
795501f8670SThomas Gleixner #else
796501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev)
797501f8670SThomas Gleixner {
798501f8670SThomas Gleixner 	return &per_cpu(tick_cpu_device, dev->id);
799501f8670SThomas Gleixner }
800501f8670SThomas Gleixner static inline int tick_broadcast_init_sysfs(void) { return 0; }
801de68d9b1SThomas Gleixner #endif
802501f8670SThomas Gleixner 
803501f8670SThomas Gleixner static int __init tick_init_sysfs(void)
804501f8670SThomas Gleixner {
805501f8670SThomas Gleixner 	int cpu;
806501f8670SThomas Gleixner 
807501f8670SThomas Gleixner 	for_each_possible_cpu(cpu) {
808501f8670SThomas Gleixner 		struct device *dev = &per_cpu(tick_percpu_dev, cpu);
809501f8670SThomas Gleixner 		int err;
810501f8670SThomas Gleixner 
811501f8670SThomas Gleixner 		dev->id = cpu;
812501f8670SThomas Gleixner 		dev->bus = &clockevents_subsys;
813501f8670SThomas Gleixner 		err = device_register(dev);
814501f8670SThomas Gleixner 		if (!err)
815501f8670SThomas Gleixner 			err = device_create_file(dev, &dev_attr_current_device);
81603e13cf5SThomas Gleixner 		if (!err)
81703e13cf5SThomas Gleixner 			err = device_create_file(dev, &dev_attr_unbind_device);
818501f8670SThomas Gleixner 		if (err)
819501f8670SThomas Gleixner 			return err;
820501f8670SThomas Gleixner 	}
821501f8670SThomas Gleixner 	return tick_broadcast_init_sysfs();
822501f8670SThomas Gleixner }
823501f8670SThomas Gleixner 
824501f8670SThomas Gleixner static int __init clockevents_init_sysfs(void)
825501f8670SThomas Gleixner {
826501f8670SThomas Gleixner 	int err = subsys_system_register(&clockevents_subsys, NULL);
827501f8670SThomas Gleixner 
828501f8670SThomas Gleixner 	if (!err)
829501f8670SThomas Gleixner 		err = tick_init_sysfs();
830501f8670SThomas Gleixner 	return err;
831501f8670SThomas Gleixner }
832501f8670SThomas Gleixner device_initcall(clockevents_init_sysfs);
833501f8670SThomas Gleixner #endif /* SYSFS */
834501f8670SThomas Gleixner 
835501f8670SThomas Gleixner #endif /* GENERIC_CLOCK_EVENTS */
836