xref: /openbmc/linux/kernel/time/clockevents.c (revision 03e13cf5)
1d316c57fSThomas Gleixner /*
2d316c57fSThomas Gleixner  * linux/kernel/time/clockevents.c
3d316c57fSThomas Gleixner  *
4d316c57fSThomas Gleixner  * This file contains functions which manage clock event devices.
5d316c57fSThomas Gleixner  *
6d316c57fSThomas Gleixner  * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7d316c57fSThomas Gleixner  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8d316c57fSThomas Gleixner  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9d316c57fSThomas Gleixner  *
10d316c57fSThomas Gleixner  * This code is licenced under the GPL version 2. For details see
11d316c57fSThomas Gleixner  * kernel-base/COPYING.
12d316c57fSThomas Gleixner  */
13d316c57fSThomas Gleixner 
14d316c57fSThomas Gleixner #include <linux/clockchips.h>
15d316c57fSThomas Gleixner #include <linux/hrtimer.h>
16d316c57fSThomas Gleixner #include <linux/init.h>
17d316c57fSThomas Gleixner #include <linux/module.h>
18d316c57fSThomas Gleixner #include <linux/smp.h>
19501f8670SThomas Gleixner #include <linux/device.h>
20d316c57fSThomas Gleixner 
218e1a928aSH Hartley Sweeten #include "tick-internal.h"
228e1a928aSH Hartley Sweeten 
23d316c57fSThomas Gleixner /* The registered clock event devices */
24d316c57fSThomas Gleixner static LIST_HEAD(clockevent_devices);
25d316c57fSThomas Gleixner static LIST_HEAD(clockevents_released);
26d316c57fSThomas Gleixner /* Protection for the above */
27b5f91da0SThomas Gleixner static DEFINE_RAW_SPINLOCK(clockevents_lock);
2803e13cf5SThomas Gleixner /* Protection for unbind operations */
2903e13cf5SThomas Gleixner static DEFINE_MUTEX(clockevents_mutex);
3003e13cf5SThomas Gleixner 
3103e13cf5SThomas Gleixner struct ce_unbind {
3203e13cf5SThomas Gleixner 	struct clock_event_device *ce;
3303e13cf5SThomas Gleixner 	int res;
3403e13cf5SThomas Gleixner };
35d316c57fSThomas Gleixner 
36d316c57fSThomas Gleixner /**
37d316c57fSThomas Gleixner  * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
38d316c57fSThomas Gleixner  * @latch:	value to convert
39d316c57fSThomas Gleixner  * @evt:	pointer to clock event device descriptor
40d316c57fSThomas Gleixner  *
41d316c57fSThomas Gleixner  * Math helper, returns latch value converted to nanoseconds (bound checked)
42d316c57fSThomas Gleixner  */
4397813f2fSJon Hunter u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
44d316c57fSThomas Gleixner {
4597813f2fSJon Hunter 	u64 clc = (u64) latch << evt->shift;
46d316c57fSThomas Gleixner 
4745fe4fe1SIngo Molnar 	if (unlikely(!evt->mult)) {
4845fe4fe1SIngo Molnar 		evt->mult = 1;
4945fe4fe1SIngo Molnar 		WARN_ON(1);
5045fe4fe1SIngo Molnar 	}
5145fe4fe1SIngo Molnar 
52d316c57fSThomas Gleixner 	do_div(clc, evt->mult);
53d316c57fSThomas Gleixner 	if (clc < 1000)
54d316c57fSThomas Gleixner 		clc = 1000;
5597813f2fSJon Hunter 	if (clc > KTIME_MAX)
5697813f2fSJon Hunter 		clc = KTIME_MAX;
57d316c57fSThomas Gleixner 
5897813f2fSJon Hunter 	return clc;
59d316c57fSThomas Gleixner }
60c81fc2c3SMagnus Damm EXPORT_SYMBOL_GPL(clockevent_delta2ns);
61d316c57fSThomas Gleixner 
62d316c57fSThomas Gleixner /**
63d316c57fSThomas Gleixner  * clockevents_set_mode - set the operating mode of a clock event device
64d316c57fSThomas Gleixner  * @dev:	device to modify
65d316c57fSThomas Gleixner  * @mode:	new mode
66d316c57fSThomas Gleixner  *
67d316c57fSThomas Gleixner  * Must be called with interrupts disabled !
68d316c57fSThomas Gleixner  */
69d316c57fSThomas Gleixner void clockevents_set_mode(struct clock_event_device *dev,
70d316c57fSThomas Gleixner 				 enum clock_event_mode mode)
71d316c57fSThomas Gleixner {
72d316c57fSThomas Gleixner 	if (dev->mode != mode) {
73d316c57fSThomas Gleixner 		dev->set_mode(mode, dev);
74d316c57fSThomas Gleixner 		dev->mode = mode;
752d68259dSMagnus Damm 
762d68259dSMagnus Damm 		/*
772d68259dSMagnus Damm 		 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
782d68259dSMagnus Damm 		 * on it, so fix it up and emit a warning:
792d68259dSMagnus Damm 		 */
802d68259dSMagnus Damm 		if (mode == CLOCK_EVT_MODE_ONESHOT) {
812d68259dSMagnus Damm 			if (unlikely(!dev->mult)) {
822d68259dSMagnus Damm 				dev->mult = 1;
832d68259dSMagnus Damm 				WARN_ON(1);
842d68259dSMagnus Damm 			}
852d68259dSMagnus Damm 		}
86d316c57fSThomas Gleixner 	}
87d316c57fSThomas Gleixner }
88d316c57fSThomas Gleixner 
89d316c57fSThomas Gleixner /**
902344abbcSThomas Gleixner  * clockevents_shutdown - shutdown the device and clear next_event
912344abbcSThomas Gleixner  * @dev:	device to shutdown
922344abbcSThomas Gleixner  */
932344abbcSThomas Gleixner void clockevents_shutdown(struct clock_event_device *dev)
942344abbcSThomas Gleixner {
952344abbcSThomas Gleixner 	clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
962344abbcSThomas Gleixner 	dev->next_event.tv64 = KTIME_MAX;
972344abbcSThomas Gleixner }
982344abbcSThomas Gleixner 
99d1748302SMartin Schwidefsky #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
100d1748302SMartin Schwidefsky 
101d1748302SMartin Schwidefsky /* Limit min_delta to a jiffie */
102d1748302SMartin Schwidefsky #define MIN_DELTA_LIMIT		(NSEC_PER_SEC / HZ)
103d1748302SMartin Schwidefsky 
104d1748302SMartin Schwidefsky /**
105d1748302SMartin Schwidefsky  * clockevents_increase_min_delta - raise minimum delta of a clock event device
106d1748302SMartin Schwidefsky  * @dev:       device to increase the minimum delta
107d1748302SMartin Schwidefsky  *
108d1748302SMartin Schwidefsky  * Returns 0 on success, -ETIME when the minimum delta reached the limit.
109d1748302SMartin Schwidefsky  */
110d1748302SMartin Schwidefsky static int clockevents_increase_min_delta(struct clock_event_device *dev)
111d1748302SMartin Schwidefsky {
112d1748302SMartin Schwidefsky 	/* Nothing to do if we already reached the limit */
113d1748302SMartin Schwidefsky 	if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
114d1748302SMartin Schwidefsky 		printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
115d1748302SMartin Schwidefsky 		dev->next_event.tv64 = KTIME_MAX;
116d1748302SMartin Schwidefsky 		return -ETIME;
117d1748302SMartin Schwidefsky 	}
118d1748302SMartin Schwidefsky 
119d1748302SMartin Schwidefsky 	if (dev->min_delta_ns < 5000)
120d1748302SMartin Schwidefsky 		dev->min_delta_ns = 5000;
121d1748302SMartin Schwidefsky 	else
122d1748302SMartin Schwidefsky 		dev->min_delta_ns += dev->min_delta_ns >> 1;
123d1748302SMartin Schwidefsky 
124d1748302SMartin Schwidefsky 	if (dev->min_delta_ns > MIN_DELTA_LIMIT)
125d1748302SMartin Schwidefsky 		dev->min_delta_ns = MIN_DELTA_LIMIT;
126d1748302SMartin Schwidefsky 
127d1748302SMartin Schwidefsky 	printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
128d1748302SMartin Schwidefsky 	       dev->name ? dev->name : "?",
129d1748302SMartin Schwidefsky 	       (unsigned long long) dev->min_delta_ns);
130d1748302SMartin Schwidefsky 	return 0;
131d1748302SMartin Schwidefsky }
132d1748302SMartin Schwidefsky 
133d1748302SMartin Schwidefsky /**
134d1748302SMartin Schwidefsky  * clockevents_program_min_delta - Set clock event device to the minimum delay.
135d1748302SMartin Schwidefsky  * @dev:	device to program
136d1748302SMartin Schwidefsky  *
137d1748302SMartin Schwidefsky  * Returns 0 on success, -ETIME when the retry loop failed.
138d1748302SMartin Schwidefsky  */
139d1748302SMartin Schwidefsky static int clockevents_program_min_delta(struct clock_event_device *dev)
140d1748302SMartin Schwidefsky {
141d1748302SMartin Schwidefsky 	unsigned long long clc;
142d1748302SMartin Schwidefsky 	int64_t delta;
143d1748302SMartin Schwidefsky 	int i;
144d1748302SMartin Schwidefsky 
145d1748302SMartin Schwidefsky 	for (i = 0;;) {
146d1748302SMartin Schwidefsky 		delta = dev->min_delta_ns;
147d1748302SMartin Schwidefsky 		dev->next_event = ktime_add_ns(ktime_get(), delta);
148d1748302SMartin Schwidefsky 
149d1748302SMartin Schwidefsky 		if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
150d1748302SMartin Schwidefsky 			return 0;
151d1748302SMartin Schwidefsky 
152d1748302SMartin Schwidefsky 		dev->retries++;
153d1748302SMartin Schwidefsky 		clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
154d1748302SMartin Schwidefsky 		if (dev->set_next_event((unsigned long) clc, dev) == 0)
155d1748302SMartin Schwidefsky 			return 0;
156d1748302SMartin Schwidefsky 
157d1748302SMartin Schwidefsky 		if (++i > 2) {
158d1748302SMartin Schwidefsky 			/*
159d1748302SMartin Schwidefsky 			 * We tried 3 times to program the device with the
160d1748302SMartin Schwidefsky 			 * given min_delta_ns. Try to increase the minimum
161d1748302SMartin Schwidefsky 			 * delta, if that fails as well get out of here.
162d1748302SMartin Schwidefsky 			 */
163d1748302SMartin Schwidefsky 			if (clockevents_increase_min_delta(dev))
164d1748302SMartin Schwidefsky 				return -ETIME;
165d1748302SMartin Schwidefsky 			i = 0;
166d1748302SMartin Schwidefsky 		}
167d1748302SMartin Schwidefsky 	}
168d1748302SMartin Schwidefsky }
169d1748302SMartin Schwidefsky 
170d1748302SMartin Schwidefsky #else  /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
171d1748302SMartin Schwidefsky 
172d1748302SMartin Schwidefsky /**
173d1748302SMartin Schwidefsky  * clockevents_program_min_delta - Set clock event device to the minimum delay.
174d1748302SMartin Schwidefsky  * @dev:	device to program
175d1748302SMartin Schwidefsky  *
176d1748302SMartin Schwidefsky  * Returns 0 on success, -ETIME when the retry loop failed.
177d1748302SMartin Schwidefsky  */
178d1748302SMartin Schwidefsky static int clockevents_program_min_delta(struct clock_event_device *dev)
179d1748302SMartin Schwidefsky {
180d1748302SMartin Schwidefsky 	unsigned long long clc;
181d1748302SMartin Schwidefsky 	int64_t delta;
182d1748302SMartin Schwidefsky 
183d1748302SMartin Schwidefsky 	delta = dev->min_delta_ns;
184d1748302SMartin Schwidefsky 	dev->next_event = ktime_add_ns(ktime_get(), delta);
185d1748302SMartin Schwidefsky 
186d1748302SMartin Schwidefsky 	if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
187d1748302SMartin Schwidefsky 		return 0;
188d1748302SMartin Schwidefsky 
189d1748302SMartin Schwidefsky 	dev->retries++;
190d1748302SMartin Schwidefsky 	clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
191d1748302SMartin Schwidefsky 	return dev->set_next_event((unsigned long) clc, dev);
192d1748302SMartin Schwidefsky }
193d1748302SMartin Schwidefsky 
194d1748302SMartin Schwidefsky #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
195d1748302SMartin Schwidefsky 
1962344abbcSThomas Gleixner /**
197d316c57fSThomas Gleixner  * clockevents_program_event - Reprogram the clock event device.
198d1748302SMartin Schwidefsky  * @dev:	device to program
199d316c57fSThomas Gleixner  * @expires:	absolute expiry time (monotonic clock)
200d1748302SMartin Schwidefsky  * @force:	program minimum delay if expires can not be set
201d316c57fSThomas Gleixner  *
202d316c57fSThomas Gleixner  * Returns 0 on success, -ETIME when the event is in the past.
203d316c57fSThomas Gleixner  */
204d316c57fSThomas Gleixner int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
205d1748302SMartin Schwidefsky 			      bool force)
206d316c57fSThomas Gleixner {
207d316c57fSThomas Gleixner 	unsigned long long clc;
208d316c57fSThomas Gleixner 	int64_t delta;
209d1748302SMartin Schwidefsky 	int rc;
210d316c57fSThomas Gleixner 
211167b1de3SThomas Gleixner 	if (unlikely(expires.tv64 < 0)) {
212167b1de3SThomas Gleixner 		WARN_ON_ONCE(1);
213167b1de3SThomas Gleixner 		return -ETIME;
214167b1de3SThomas Gleixner 	}
215167b1de3SThomas Gleixner 
216d316c57fSThomas Gleixner 	dev->next_event = expires;
217d316c57fSThomas Gleixner 
218d316c57fSThomas Gleixner 	if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
219d316c57fSThomas Gleixner 		return 0;
220d316c57fSThomas Gleixner 
22165516f8aSMartin Schwidefsky 	/* Shortcut for clockevent devices that can deal with ktime. */
22265516f8aSMartin Schwidefsky 	if (dev->features & CLOCK_EVT_FEAT_KTIME)
22365516f8aSMartin Schwidefsky 		return dev->set_next_ktime(expires, dev);
22465516f8aSMartin Schwidefsky 
225d1748302SMartin Schwidefsky 	delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
226d1748302SMartin Schwidefsky 	if (delta <= 0)
227d1748302SMartin Schwidefsky 		return force ? clockevents_program_min_delta(dev) : -ETIME;
228d316c57fSThomas Gleixner 
229d1748302SMartin Schwidefsky 	delta = min(delta, (int64_t) dev->max_delta_ns);
230d1748302SMartin Schwidefsky 	delta = max(delta, (int64_t) dev->min_delta_ns);
231d316c57fSThomas Gleixner 
232d1748302SMartin Schwidefsky 	clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
233d1748302SMartin Schwidefsky 	rc = dev->set_next_event((unsigned long) clc, dev);
234d1748302SMartin Schwidefsky 
235d1748302SMartin Schwidefsky 	return (rc && force) ? clockevents_program_min_delta(dev) : rc;
236d316c57fSThomas Gleixner }
237d316c57fSThomas Gleixner 
238d316c57fSThomas Gleixner /*
2393eb05676SLi Zefan  * Called after a notify add to make devices available which were
240d316c57fSThomas Gleixner  * released from the notifier call.
241d316c57fSThomas Gleixner  */
242d316c57fSThomas Gleixner static void clockevents_notify_released(void)
243d316c57fSThomas Gleixner {
244d316c57fSThomas Gleixner 	struct clock_event_device *dev;
245d316c57fSThomas Gleixner 
246d316c57fSThomas Gleixner 	while (!list_empty(&clockevents_released)) {
247d316c57fSThomas Gleixner 		dev = list_entry(clockevents_released.next,
248d316c57fSThomas Gleixner 				 struct clock_event_device, list);
249d316c57fSThomas Gleixner 		list_del(&dev->list);
250d316c57fSThomas Gleixner 		list_add(&dev->list, &clockevent_devices);
2517172a286SThomas Gleixner 		tick_check_new_device(dev);
252d316c57fSThomas Gleixner 	}
253d316c57fSThomas Gleixner }
254d316c57fSThomas Gleixner 
25503e13cf5SThomas Gleixner /*
25603e13cf5SThomas Gleixner  * Try to install a replacement clock event device
25703e13cf5SThomas Gleixner  */
25803e13cf5SThomas Gleixner static int clockevents_replace(struct clock_event_device *ced)
25903e13cf5SThomas Gleixner {
26003e13cf5SThomas Gleixner 	struct clock_event_device *dev, *newdev = NULL;
26103e13cf5SThomas Gleixner 
26203e13cf5SThomas Gleixner 	list_for_each_entry(dev, &clockevent_devices, list) {
26303e13cf5SThomas Gleixner 		if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED)
26403e13cf5SThomas Gleixner 			continue;
26503e13cf5SThomas Gleixner 
26603e13cf5SThomas Gleixner 		if (!tick_check_replacement(newdev, dev))
26703e13cf5SThomas Gleixner 			continue;
26803e13cf5SThomas Gleixner 
26903e13cf5SThomas Gleixner 		if (!try_module_get(dev->owner))
27003e13cf5SThomas Gleixner 			continue;
27103e13cf5SThomas Gleixner 
27203e13cf5SThomas Gleixner 		if (newdev)
27303e13cf5SThomas Gleixner 			module_put(newdev->owner);
27403e13cf5SThomas Gleixner 		newdev = dev;
27503e13cf5SThomas Gleixner 	}
27603e13cf5SThomas Gleixner 	if (newdev) {
27703e13cf5SThomas Gleixner 		tick_install_replacement(newdev);
27803e13cf5SThomas Gleixner 		list_del_init(&ced->list);
27903e13cf5SThomas Gleixner 	}
28003e13cf5SThomas Gleixner 	return newdev ? 0 : -EBUSY;
28103e13cf5SThomas Gleixner }
28203e13cf5SThomas Gleixner 
28303e13cf5SThomas Gleixner /*
28403e13cf5SThomas Gleixner  * Called with clockevents_mutex and clockevents_lock held
28503e13cf5SThomas Gleixner  */
28603e13cf5SThomas Gleixner static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
28703e13cf5SThomas Gleixner {
28803e13cf5SThomas Gleixner 	/* Fast track. Device is unused */
28903e13cf5SThomas Gleixner 	if (ced->mode == CLOCK_EVT_MODE_UNUSED) {
29003e13cf5SThomas Gleixner 		list_del_init(&ced->list);
29103e13cf5SThomas Gleixner 		return 0;
29203e13cf5SThomas Gleixner 	}
29303e13cf5SThomas Gleixner 
29403e13cf5SThomas Gleixner 	return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
29503e13cf5SThomas Gleixner }
29603e13cf5SThomas Gleixner 
29703e13cf5SThomas Gleixner /*
29803e13cf5SThomas Gleixner  * SMP function call to unbind a device
29903e13cf5SThomas Gleixner  */
30003e13cf5SThomas Gleixner static void __clockevents_unbind(void *arg)
30103e13cf5SThomas Gleixner {
30203e13cf5SThomas Gleixner 	struct ce_unbind *cu = arg;
30303e13cf5SThomas Gleixner 	int res;
30403e13cf5SThomas Gleixner 
30503e13cf5SThomas Gleixner 	raw_spin_lock(&clockevents_lock);
30603e13cf5SThomas Gleixner 	res = __clockevents_try_unbind(cu->ce, smp_processor_id());
30703e13cf5SThomas Gleixner 	if (res == -EAGAIN)
30803e13cf5SThomas Gleixner 		res = clockevents_replace(cu->ce);
30903e13cf5SThomas Gleixner 	cu->res = res;
31003e13cf5SThomas Gleixner 	raw_spin_unlock(&clockevents_lock);
31103e13cf5SThomas Gleixner }
31203e13cf5SThomas Gleixner 
31303e13cf5SThomas Gleixner /*
31403e13cf5SThomas Gleixner  * Issues smp function call to unbind a per cpu device. Called with
31503e13cf5SThomas Gleixner  * clockevents_mutex held.
31603e13cf5SThomas Gleixner  */
31703e13cf5SThomas Gleixner static int clockevents_unbind(struct clock_event_device *ced, int cpu)
31803e13cf5SThomas Gleixner {
31903e13cf5SThomas Gleixner 	struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
32003e13cf5SThomas Gleixner 
32103e13cf5SThomas Gleixner 	smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
32203e13cf5SThomas Gleixner 	return cu.res;
32303e13cf5SThomas Gleixner }
32403e13cf5SThomas Gleixner 
32503e13cf5SThomas Gleixner /*
32603e13cf5SThomas Gleixner  * Unbind a clockevents device.
32703e13cf5SThomas Gleixner  */
32803e13cf5SThomas Gleixner int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
32903e13cf5SThomas Gleixner {
33003e13cf5SThomas Gleixner 	int ret;
33103e13cf5SThomas Gleixner 
33203e13cf5SThomas Gleixner 	mutex_lock(&clockevents_mutex);
33303e13cf5SThomas Gleixner 	ret = clockevents_unbind(ced, cpu);
33403e13cf5SThomas Gleixner 	mutex_unlock(&clockevents_mutex);
33503e13cf5SThomas Gleixner 	return ret;
33603e13cf5SThomas Gleixner }
33703e13cf5SThomas Gleixner EXPORT_SYMBOL_GPL(clockevents_unbind);
33803e13cf5SThomas Gleixner 
339d316c57fSThomas Gleixner /**
340d316c57fSThomas Gleixner  * clockevents_register_device - register a clock event device
341d316c57fSThomas Gleixner  * @dev:	device to register
342d316c57fSThomas Gleixner  */
343d316c57fSThomas Gleixner void clockevents_register_device(struct clock_event_device *dev)
344d316c57fSThomas Gleixner {
345f833bab8SSuresh Siddha 	unsigned long flags;
346f833bab8SSuresh Siddha 
347d316c57fSThomas Gleixner 	BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
3481b054b67SThomas Gleixner 	if (!dev->cpumask) {
3491b054b67SThomas Gleixner 		WARN_ON(num_possible_cpus() > 1);
3501b054b67SThomas Gleixner 		dev->cpumask = cpumask_of(smp_processor_id());
3511b054b67SThomas Gleixner 	}
352320ab2b0SRusty Russell 
353b5f91da0SThomas Gleixner 	raw_spin_lock_irqsave(&clockevents_lock, flags);
354d316c57fSThomas Gleixner 
355d316c57fSThomas Gleixner 	list_add(&dev->list, &clockevent_devices);
3567172a286SThomas Gleixner 	tick_check_new_device(dev);
357d316c57fSThomas Gleixner 	clockevents_notify_released();
358d316c57fSThomas Gleixner 
359b5f91da0SThomas Gleixner 	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
360d316c57fSThomas Gleixner }
361c81fc2c3SMagnus Damm EXPORT_SYMBOL_GPL(clockevents_register_device);
362d316c57fSThomas Gleixner 
363e5400321SMagnus Damm void clockevents_config(struct clock_event_device *dev, u32 freq)
36457f0fcbeSThomas Gleixner {
365c0e299b1SThomas Gleixner 	u64 sec;
36657f0fcbeSThomas Gleixner 
36757f0fcbeSThomas Gleixner 	if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
36857f0fcbeSThomas Gleixner 		return;
36957f0fcbeSThomas Gleixner 
37057f0fcbeSThomas Gleixner 	/*
37157f0fcbeSThomas Gleixner 	 * Calculate the maximum number of seconds we can sleep. Limit
37257f0fcbeSThomas Gleixner 	 * to 10 minutes for hardware which can program more than
37357f0fcbeSThomas Gleixner 	 * 32bit ticks so we still get reasonable conversion values.
37457f0fcbeSThomas Gleixner 	 */
37557f0fcbeSThomas Gleixner 	sec = dev->max_delta_ticks;
37657f0fcbeSThomas Gleixner 	do_div(sec, freq);
37757f0fcbeSThomas Gleixner 	if (!sec)
37857f0fcbeSThomas Gleixner 		sec = 1;
37957f0fcbeSThomas Gleixner 	else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
38057f0fcbeSThomas Gleixner 		sec = 600;
38157f0fcbeSThomas Gleixner 
38257f0fcbeSThomas Gleixner 	clockevents_calc_mult_shift(dev, freq, sec);
38357f0fcbeSThomas Gleixner 	dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
38457f0fcbeSThomas Gleixner 	dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
38557f0fcbeSThomas Gleixner }
38657f0fcbeSThomas Gleixner 
38757f0fcbeSThomas Gleixner /**
38857f0fcbeSThomas Gleixner  * clockevents_config_and_register - Configure and register a clock event device
38957f0fcbeSThomas Gleixner  * @dev:	device to register
39057f0fcbeSThomas Gleixner  * @freq:	The clock frequency
39157f0fcbeSThomas Gleixner  * @min_delta:	The minimum clock ticks to program in oneshot mode
39257f0fcbeSThomas Gleixner  * @max_delta:	The maximum clock ticks to program in oneshot mode
39357f0fcbeSThomas Gleixner  *
39457f0fcbeSThomas Gleixner  * min/max_delta can be 0 for devices which do not support oneshot mode.
39557f0fcbeSThomas Gleixner  */
39657f0fcbeSThomas Gleixner void clockevents_config_and_register(struct clock_event_device *dev,
39757f0fcbeSThomas Gleixner 				     u32 freq, unsigned long min_delta,
39857f0fcbeSThomas Gleixner 				     unsigned long max_delta)
39957f0fcbeSThomas Gleixner {
40057f0fcbeSThomas Gleixner 	dev->min_delta_ticks = min_delta;
40157f0fcbeSThomas Gleixner 	dev->max_delta_ticks = max_delta;
40257f0fcbeSThomas Gleixner 	clockevents_config(dev, freq);
40357f0fcbeSThomas Gleixner 	clockevents_register_device(dev);
40457f0fcbeSThomas Gleixner }
405c35ef95cSShawn Guo EXPORT_SYMBOL_GPL(clockevents_config_and_register);
40657f0fcbeSThomas Gleixner 
40780b816b7SThomas Gleixner /**
40880b816b7SThomas Gleixner  * clockevents_update_freq - Update frequency and reprogram a clock event device.
40980b816b7SThomas Gleixner  * @dev:	device to modify
41080b816b7SThomas Gleixner  * @freq:	new device frequency
41180b816b7SThomas Gleixner  *
41280b816b7SThomas Gleixner  * Reconfigure and reprogram a clock event device in oneshot
41380b816b7SThomas Gleixner  * mode. Must be called on the cpu for which the device delivers per
41480b816b7SThomas Gleixner  * cpu timer events with interrupts disabled!  Returns 0 on success,
41580b816b7SThomas Gleixner  * -ETIME when the event is in the past.
41680b816b7SThomas Gleixner  */
41780b816b7SThomas Gleixner int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
41880b816b7SThomas Gleixner {
41980b816b7SThomas Gleixner 	clockevents_config(dev, freq);
42080b816b7SThomas Gleixner 
42180b816b7SThomas Gleixner 	if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
42280b816b7SThomas Gleixner 		return 0;
42380b816b7SThomas Gleixner 
424d1748302SMartin Schwidefsky 	return clockevents_program_event(dev, dev->next_event, false);
42580b816b7SThomas Gleixner }
42680b816b7SThomas Gleixner 
427d316c57fSThomas Gleixner /*
428d316c57fSThomas Gleixner  * Noop handler when we shut down an event device
429d316c57fSThomas Gleixner  */
4307c1e7689SVenkatesh Pallipadi void clockevents_handle_noop(struct clock_event_device *dev)
431d316c57fSThomas Gleixner {
432d316c57fSThomas Gleixner }
433d316c57fSThomas Gleixner 
434d316c57fSThomas Gleixner /**
435d316c57fSThomas Gleixner  * clockevents_exchange_device - release and request clock devices
436d316c57fSThomas Gleixner  * @old:	device to release (can be NULL)
437d316c57fSThomas Gleixner  * @new:	device to request (can be NULL)
438d316c57fSThomas Gleixner  *
439d316c57fSThomas Gleixner  * Called from the notifier chain. clockevents_lock is held already
440d316c57fSThomas Gleixner  */
441d316c57fSThomas Gleixner void clockevents_exchange_device(struct clock_event_device *old,
442d316c57fSThomas Gleixner 				 struct clock_event_device *new)
443d316c57fSThomas Gleixner {
444d316c57fSThomas Gleixner 	unsigned long flags;
445d316c57fSThomas Gleixner 
446d316c57fSThomas Gleixner 	local_irq_save(flags);
447d316c57fSThomas Gleixner 	/*
448d316c57fSThomas Gleixner 	 * Caller releases a clock event device. We queue it into the
449d316c57fSThomas Gleixner 	 * released list and do a notify add later.
450d316c57fSThomas Gleixner 	 */
451d316c57fSThomas Gleixner 	if (old) {
452ccf33d68SThomas Gleixner 		module_put(old->owner);
453d316c57fSThomas Gleixner 		clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
454d316c57fSThomas Gleixner 		list_del(&old->list);
455d316c57fSThomas Gleixner 		list_add(&old->list, &clockevents_released);
456d316c57fSThomas Gleixner 	}
457d316c57fSThomas Gleixner 
458d316c57fSThomas Gleixner 	if (new) {
459d316c57fSThomas Gleixner 		BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
4602344abbcSThomas Gleixner 		clockevents_shutdown(new);
461d316c57fSThomas Gleixner 	}
462d316c57fSThomas Gleixner 	local_irq_restore(flags);
463d316c57fSThomas Gleixner }
464d316c57fSThomas Gleixner 
465adc78e6bSRafael J. Wysocki /**
466adc78e6bSRafael J. Wysocki  * clockevents_suspend - suspend clock devices
467adc78e6bSRafael J. Wysocki  */
468adc78e6bSRafael J. Wysocki void clockevents_suspend(void)
469adc78e6bSRafael J. Wysocki {
470adc78e6bSRafael J. Wysocki 	struct clock_event_device *dev;
471adc78e6bSRafael J. Wysocki 
472adc78e6bSRafael J. Wysocki 	list_for_each_entry_reverse(dev, &clockevent_devices, list)
473adc78e6bSRafael J. Wysocki 		if (dev->suspend)
474adc78e6bSRafael J. Wysocki 			dev->suspend(dev);
475adc78e6bSRafael J. Wysocki }
476adc78e6bSRafael J. Wysocki 
477adc78e6bSRafael J. Wysocki /**
478adc78e6bSRafael J. Wysocki  * clockevents_resume - resume clock devices
479adc78e6bSRafael J. Wysocki  */
480adc78e6bSRafael J. Wysocki void clockevents_resume(void)
481adc78e6bSRafael J. Wysocki {
482adc78e6bSRafael J. Wysocki 	struct clock_event_device *dev;
483adc78e6bSRafael J. Wysocki 
484adc78e6bSRafael J. Wysocki 	list_for_each_entry(dev, &clockevent_devices, list)
485adc78e6bSRafael J. Wysocki 		if (dev->resume)
486adc78e6bSRafael J. Wysocki 			dev->resume(dev);
487adc78e6bSRafael J. Wysocki }
488adc78e6bSRafael J. Wysocki 
489de68d9b1SThomas Gleixner #ifdef CONFIG_GENERIC_CLOCKEVENTS
490d316c57fSThomas Gleixner /**
491d316c57fSThomas Gleixner  * clockevents_notify - notification about relevant events
492d316c57fSThomas Gleixner  */
493d316c57fSThomas Gleixner void clockevents_notify(unsigned long reason, void *arg)
494d316c57fSThomas Gleixner {
495bb6eddf7SThomas Gleixner 	struct clock_event_device *dev, *tmp;
496f833bab8SSuresh Siddha 	unsigned long flags;
497bb6eddf7SThomas Gleixner 	int cpu;
4980b858e6fSLi Zefan 
499b5f91da0SThomas Gleixner 	raw_spin_lock_irqsave(&clockevents_lock, flags);
500d316c57fSThomas Gleixner 
501d316c57fSThomas Gleixner 	switch (reason) {
5028c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
5038c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
5048c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
5058c53daf6SThomas Gleixner 		tick_broadcast_on_off(reason, arg);
5068c53daf6SThomas Gleixner 		break;
5078c53daf6SThomas Gleixner 
5088c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
5098c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
5108c53daf6SThomas Gleixner 		tick_broadcast_oneshot_control(reason);
5118c53daf6SThomas Gleixner 		break;
5128c53daf6SThomas Gleixner 
5138c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_CPU_DYING:
5148c53daf6SThomas Gleixner 		tick_handover_do_timer(arg);
5158c53daf6SThomas Gleixner 		break;
5168c53daf6SThomas Gleixner 
5178c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_SUSPEND:
5188c53daf6SThomas Gleixner 		tick_suspend();
5198c53daf6SThomas Gleixner 		tick_suspend_broadcast();
5208c53daf6SThomas Gleixner 		break;
5218c53daf6SThomas Gleixner 
5228c53daf6SThomas Gleixner 	case CLOCK_EVT_NOTIFY_RESUME:
5238c53daf6SThomas Gleixner 		tick_resume();
5248c53daf6SThomas Gleixner 		break;
5258c53daf6SThomas Gleixner 
526d316c57fSThomas Gleixner 	case CLOCK_EVT_NOTIFY_CPU_DEAD:
5278c53daf6SThomas Gleixner 		tick_shutdown_broadcast_oneshot(arg);
5288c53daf6SThomas Gleixner 		tick_shutdown_broadcast(arg);
5298c53daf6SThomas Gleixner 		tick_shutdown(arg);
530d316c57fSThomas Gleixner 		/*
531d316c57fSThomas Gleixner 		 * Unregister the clock event devices which were
532d316c57fSThomas Gleixner 		 * released from the users in the notify chain.
533d316c57fSThomas Gleixner 		 */
534bb6eddf7SThomas Gleixner 		list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
535bb6eddf7SThomas Gleixner 			list_del(&dev->list);
536bb6eddf7SThomas Gleixner 		/*
537bb6eddf7SThomas Gleixner 		 * Now check whether the CPU has left unused per cpu devices
538bb6eddf7SThomas Gleixner 		 */
539bb6eddf7SThomas Gleixner 		cpu = *((int *)arg);
540bb6eddf7SThomas Gleixner 		list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
541bb6eddf7SThomas Gleixner 			if (cpumask_test_cpu(cpu, dev->cpumask) &&
542ea9d8e3fSXiaotian Feng 			    cpumask_weight(dev->cpumask) == 1 &&
543ea9d8e3fSXiaotian Feng 			    !tick_is_broadcast_device(dev)) {
544bb6eddf7SThomas Gleixner 				BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
545bb6eddf7SThomas Gleixner 				list_del(&dev->list);
546bb6eddf7SThomas Gleixner 			}
547bb6eddf7SThomas Gleixner 		}
548d316c57fSThomas Gleixner 		break;
549d316c57fSThomas Gleixner 	default:
550d316c57fSThomas Gleixner 		break;
551d316c57fSThomas Gleixner 	}
552b5f91da0SThomas Gleixner 	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
553d316c57fSThomas Gleixner }
554d316c57fSThomas Gleixner EXPORT_SYMBOL_GPL(clockevents_notify);
555501f8670SThomas Gleixner 
556501f8670SThomas Gleixner #ifdef CONFIG_SYSFS
557501f8670SThomas Gleixner struct bus_type clockevents_subsys = {
558501f8670SThomas Gleixner 	.name		= "clockevents",
559501f8670SThomas Gleixner 	.dev_name       = "clockevent",
560501f8670SThomas Gleixner };
561501f8670SThomas Gleixner 
562501f8670SThomas Gleixner static DEFINE_PER_CPU(struct device, tick_percpu_dev);
563501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev);
564501f8670SThomas Gleixner 
565501f8670SThomas Gleixner static ssize_t sysfs_show_current_tick_dev(struct device *dev,
566501f8670SThomas Gleixner 					   struct device_attribute *attr,
567501f8670SThomas Gleixner 					   char *buf)
568501f8670SThomas Gleixner {
569501f8670SThomas Gleixner 	struct tick_device *td;
570501f8670SThomas Gleixner 	ssize_t count = 0;
571501f8670SThomas Gleixner 
572501f8670SThomas Gleixner 	raw_spin_lock_irq(&clockevents_lock);
573501f8670SThomas Gleixner 	td = tick_get_tick_dev(dev);
574501f8670SThomas Gleixner 	if (td && td->evtdev)
575501f8670SThomas Gleixner 		count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name);
576501f8670SThomas Gleixner 	raw_spin_unlock_irq(&clockevents_lock);
577501f8670SThomas Gleixner 	return count;
578501f8670SThomas Gleixner }
579501f8670SThomas Gleixner static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL);
580501f8670SThomas Gleixner 
58103e13cf5SThomas Gleixner /* We don't support the abomination of removable broadcast devices */
58203e13cf5SThomas Gleixner static ssize_t sysfs_unbind_tick_dev(struct device *dev,
58303e13cf5SThomas Gleixner 				     struct device_attribute *attr,
58403e13cf5SThomas Gleixner 				     const char *buf, size_t count)
58503e13cf5SThomas Gleixner {
58603e13cf5SThomas Gleixner 	char name[CS_NAME_LEN];
58703e13cf5SThomas Gleixner 	size_t ret = sysfs_get_uname(buf, name, count);
58803e13cf5SThomas Gleixner 	struct clock_event_device *ce;
58903e13cf5SThomas Gleixner 
59003e13cf5SThomas Gleixner 	if (ret < 0)
59103e13cf5SThomas Gleixner 		return ret;
59203e13cf5SThomas Gleixner 
59303e13cf5SThomas Gleixner 	ret = -ENODEV;
59403e13cf5SThomas Gleixner 	mutex_lock(&clockevents_mutex);
59503e13cf5SThomas Gleixner 	raw_spin_lock_irq(&clockevents_lock);
59603e13cf5SThomas Gleixner 	list_for_each_entry(ce, &clockevent_devices, list) {
59703e13cf5SThomas Gleixner 		if (!strcmp(ce->name, name)) {
59803e13cf5SThomas Gleixner 			ret = __clockevents_try_unbind(ce, dev->id);
59903e13cf5SThomas Gleixner 			break;
60003e13cf5SThomas Gleixner 		}
60103e13cf5SThomas Gleixner 	}
60203e13cf5SThomas Gleixner 	raw_spin_unlock_irq(&clockevents_lock);
60303e13cf5SThomas Gleixner 	/*
60403e13cf5SThomas Gleixner 	 * We hold clockevents_mutex, so ce can't go away
60503e13cf5SThomas Gleixner 	 */
60603e13cf5SThomas Gleixner 	if (ret == -EAGAIN)
60703e13cf5SThomas Gleixner 		ret = clockevents_unbind(ce, dev->id);
60803e13cf5SThomas Gleixner 	mutex_unlock(&clockevents_mutex);
60903e13cf5SThomas Gleixner 	return ret ? ret : count;
61003e13cf5SThomas Gleixner }
61103e13cf5SThomas Gleixner static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev);
61203e13cf5SThomas Gleixner 
613501f8670SThomas Gleixner #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
614501f8670SThomas Gleixner static struct device tick_bc_dev = {
615501f8670SThomas Gleixner 	.init_name	= "broadcast",
616501f8670SThomas Gleixner 	.id		= 0,
617501f8670SThomas Gleixner 	.bus		= &clockevents_subsys,
618501f8670SThomas Gleixner };
619501f8670SThomas Gleixner 
620501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev)
621501f8670SThomas Gleixner {
622501f8670SThomas Gleixner 	return dev == &tick_bc_dev ? tick_get_broadcast_device() :
623501f8670SThomas Gleixner 		&per_cpu(tick_cpu_device, dev->id);
624501f8670SThomas Gleixner }
625501f8670SThomas Gleixner 
626501f8670SThomas Gleixner static __init int tick_broadcast_init_sysfs(void)
627501f8670SThomas Gleixner {
628501f8670SThomas Gleixner 	int err = device_register(&tick_bc_dev);
629501f8670SThomas Gleixner 
630501f8670SThomas Gleixner 	if (!err)
631501f8670SThomas Gleixner 		err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
632501f8670SThomas Gleixner 	return err;
633501f8670SThomas Gleixner }
634501f8670SThomas Gleixner #else
635501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev)
636501f8670SThomas Gleixner {
637501f8670SThomas Gleixner 	return &per_cpu(tick_cpu_device, dev->id);
638501f8670SThomas Gleixner }
639501f8670SThomas Gleixner static inline int tick_broadcast_init_sysfs(void) { return 0; }
640de68d9b1SThomas Gleixner #endif
641501f8670SThomas Gleixner 
642501f8670SThomas Gleixner static int __init tick_init_sysfs(void)
643501f8670SThomas Gleixner {
644501f8670SThomas Gleixner 	int cpu;
645501f8670SThomas Gleixner 
646501f8670SThomas Gleixner 	for_each_possible_cpu(cpu) {
647501f8670SThomas Gleixner 		struct device *dev = &per_cpu(tick_percpu_dev, cpu);
648501f8670SThomas Gleixner 		int err;
649501f8670SThomas Gleixner 
650501f8670SThomas Gleixner 		dev->id = cpu;
651501f8670SThomas Gleixner 		dev->bus = &clockevents_subsys;
652501f8670SThomas Gleixner 		err = device_register(dev);
653501f8670SThomas Gleixner 		if (!err)
654501f8670SThomas Gleixner 			err = device_create_file(dev, &dev_attr_current_device);
65503e13cf5SThomas Gleixner 		if (!err)
65603e13cf5SThomas Gleixner 			err = device_create_file(dev, &dev_attr_unbind_device);
657501f8670SThomas Gleixner 		if (err)
658501f8670SThomas Gleixner 			return err;
659501f8670SThomas Gleixner 	}
660501f8670SThomas Gleixner 	return tick_broadcast_init_sysfs();
661501f8670SThomas Gleixner }
662501f8670SThomas Gleixner 
663501f8670SThomas Gleixner static int __init clockevents_init_sysfs(void)
664501f8670SThomas Gleixner {
665501f8670SThomas Gleixner 	int err = subsys_system_register(&clockevents_subsys, NULL);
666501f8670SThomas Gleixner 
667501f8670SThomas Gleixner 	if (!err)
668501f8670SThomas Gleixner 		err = tick_init_sysfs();
669501f8670SThomas Gleixner 	return err;
670501f8670SThomas Gleixner }
671501f8670SThomas Gleixner device_initcall(clockevents_init_sysfs);
672501f8670SThomas Gleixner #endif /* SYSFS */
673501f8670SThomas Gleixner 
674501f8670SThomas Gleixner #endif /* GENERIC_CLOCK_EVENTS */
675