xref: /openbmc/linux/kernel/time/clockevents.c (revision ea9d8e3f)
1d316c57fSThomas Gleixner /*
2d316c57fSThomas Gleixner  * linux/kernel/time/clockevents.c
3d316c57fSThomas Gleixner  *
4d316c57fSThomas Gleixner  * This file contains functions which manage clock event devices.
5d316c57fSThomas Gleixner  *
6d316c57fSThomas Gleixner  * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7d316c57fSThomas Gleixner  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8d316c57fSThomas Gleixner  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9d316c57fSThomas Gleixner  *
10d316c57fSThomas Gleixner  * This code is licenced under the GPL version 2. For details see
11d316c57fSThomas Gleixner  * kernel-base/COPYING.
12d316c57fSThomas Gleixner  */
13d316c57fSThomas Gleixner 
14d316c57fSThomas Gleixner #include <linux/clockchips.h>
15d316c57fSThomas Gleixner #include <linux/hrtimer.h>
16d316c57fSThomas Gleixner #include <linux/init.h>
17d316c57fSThomas Gleixner #include <linux/module.h>
18d316c57fSThomas Gleixner #include <linux/notifier.h>
19d316c57fSThomas Gleixner #include <linux/smp.h>
20d316c57fSThomas Gleixner #include <linux/sysdev.h>
21eea08f32SArun R Bharadwaj #include <linux/tick.h>
22d316c57fSThomas Gleixner 
238e1a928aSH Hartley Sweeten #include "tick-internal.h"
248e1a928aSH Hartley Sweeten 
25d316c57fSThomas Gleixner /* The registered clock event devices */
26d316c57fSThomas Gleixner static LIST_HEAD(clockevent_devices);
27d316c57fSThomas Gleixner static LIST_HEAD(clockevents_released);
28d316c57fSThomas Gleixner 
29d316c57fSThomas Gleixner /* Notification for clock events */
30d316c57fSThomas Gleixner static RAW_NOTIFIER_HEAD(clockevents_chain);
31d316c57fSThomas Gleixner 
32d316c57fSThomas Gleixner /* Protection for the above */
33b5f91da0SThomas Gleixner static DEFINE_RAW_SPINLOCK(clockevents_lock);
34d316c57fSThomas Gleixner 
35d316c57fSThomas Gleixner /**
36d316c57fSThomas Gleixner  * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
37d316c57fSThomas Gleixner  * @latch:	value to convert
38d316c57fSThomas Gleixner  * @evt:	pointer to clock event device descriptor
39d316c57fSThomas Gleixner  *
40d316c57fSThomas Gleixner  * Math helper, returns latch value converted to nanoseconds (bound checked)
41d316c57fSThomas Gleixner  */
4297813f2fSJon Hunter u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
43d316c57fSThomas Gleixner {
4497813f2fSJon Hunter 	u64 clc = (u64) latch << evt->shift;
45d316c57fSThomas Gleixner 
4645fe4fe1SIngo Molnar 	if (unlikely(!evt->mult)) {
4745fe4fe1SIngo Molnar 		evt->mult = 1;
4845fe4fe1SIngo Molnar 		WARN_ON(1);
4945fe4fe1SIngo Molnar 	}
5045fe4fe1SIngo Molnar 
51d316c57fSThomas Gleixner 	do_div(clc, evt->mult);
52d316c57fSThomas Gleixner 	if (clc < 1000)
53d316c57fSThomas Gleixner 		clc = 1000;
5497813f2fSJon Hunter 	if (clc > KTIME_MAX)
5597813f2fSJon Hunter 		clc = KTIME_MAX;
56d316c57fSThomas Gleixner 
5797813f2fSJon Hunter 	return clc;
58d316c57fSThomas Gleixner }
59c81fc2c3SMagnus Damm EXPORT_SYMBOL_GPL(clockevent_delta2ns);
60d316c57fSThomas Gleixner 
61d316c57fSThomas Gleixner /**
62d316c57fSThomas Gleixner  * clockevents_set_mode - set the operating mode of a clock event device
63d316c57fSThomas Gleixner  * @dev:	device to modify
64d316c57fSThomas Gleixner  * @mode:	new mode
65d316c57fSThomas Gleixner  *
66d316c57fSThomas Gleixner  * Must be called with interrupts disabled !
67d316c57fSThomas Gleixner  */
68d316c57fSThomas Gleixner void clockevents_set_mode(struct clock_event_device *dev,
69d316c57fSThomas Gleixner 				 enum clock_event_mode mode)
70d316c57fSThomas Gleixner {
71d316c57fSThomas Gleixner 	if (dev->mode != mode) {
72d316c57fSThomas Gleixner 		dev->set_mode(mode, dev);
73d316c57fSThomas Gleixner 		dev->mode = mode;
742d68259dSMagnus Damm 
752d68259dSMagnus Damm 		/*
762d68259dSMagnus Damm 		 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
772d68259dSMagnus Damm 		 * on it, so fix it up and emit a warning:
782d68259dSMagnus Damm 		 */
792d68259dSMagnus Damm 		if (mode == CLOCK_EVT_MODE_ONESHOT) {
802d68259dSMagnus Damm 			if (unlikely(!dev->mult)) {
812d68259dSMagnus Damm 				dev->mult = 1;
822d68259dSMagnus Damm 				WARN_ON(1);
832d68259dSMagnus Damm 			}
842d68259dSMagnus Damm 		}
85d316c57fSThomas Gleixner 	}
86d316c57fSThomas Gleixner }
87d316c57fSThomas Gleixner 
88d316c57fSThomas Gleixner /**
892344abbcSThomas Gleixner  * clockevents_shutdown - shutdown the device and clear next_event
902344abbcSThomas Gleixner  * @dev:	device to shutdown
912344abbcSThomas Gleixner  */
922344abbcSThomas Gleixner void clockevents_shutdown(struct clock_event_device *dev)
932344abbcSThomas Gleixner {
942344abbcSThomas Gleixner 	clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
952344abbcSThomas Gleixner 	dev->next_event.tv64 = KTIME_MAX;
962344abbcSThomas Gleixner }
972344abbcSThomas Gleixner 
982344abbcSThomas Gleixner /**
99d316c57fSThomas Gleixner  * clockevents_program_event - Reprogram the clock event device.
100d316c57fSThomas Gleixner  * @expires:	absolute expiry time (monotonic clock)
101d316c57fSThomas Gleixner  *
102d316c57fSThomas Gleixner  * Returns 0 on success, -ETIME when the event is in the past.
103d316c57fSThomas Gleixner  */
104d316c57fSThomas Gleixner int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
105d316c57fSThomas Gleixner 			      ktime_t now)
106d316c57fSThomas Gleixner {
107d316c57fSThomas Gleixner 	unsigned long long clc;
108d316c57fSThomas Gleixner 	int64_t delta;
109d316c57fSThomas Gleixner 
110167b1de3SThomas Gleixner 	if (unlikely(expires.tv64 < 0)) {
111167b1de3SThomas Gleixner 		WARN_ON_ONCE(1);
112167b1de3SThomas Gleixner 		return -ETIME;
113167b1de3SThomas Gleixner 	}
114167b1de3SThomas Gleixner 
115d316c57fSThomas Gleixner 	delta = ktime_to_ns(ktime_sub(expires, now));
116d316c57fSThomas Gleixner 
117d316c57fSThomas Gleixner 	if (delta <= 0)
118d316c57fSThomas Gleixner 		return -ETIME;
119d316c57fSThomas Gleixner 
120d316c57fSThomas Gleixner 	dev->next_event = expires;
121d316c57fSThomas Gleixner 
122d316c57fSThomas Gleixner 	if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
123d316c57fSThomas Gleixner 		return 0;
124d316c57fSThomas Gleixner 
125d316c57fSThomas Gleixner 	if (delta > dev->max_delta_ns)
126d316c57fSThomas Gleixner 		delta = dev->max_delta_ns;
127d316c57fSThomas Gleixner 	if (delta < dev->min_delta_ns)
128d316c57fSThomas Gleixner 		delta = dev->min_delta_ns;
129d316c57fSThomas Gleixner 
130d316c57fSThomas Gleixner 	clc = delta * dev->mult;
131d316c57fSThomas Gleixner 	clc >>= dev->shift;
132d316c57fSThomas Gleixner 
133d316c57fSThomas Gleixner 	return dev->set_next_event((unsigned long) clc, dev);
134d316c57fSThomas Gleixner }
135d316c57fSThomas Gleixner 
136d316c57fSThomas Gleixner /**
137d316c57fSThomas Gleixner  * clockevents_register_notifier - register a clock events change listener
138d316c57fSThomas Gleixner  */
139d316c57fSThomas Gleixner int clockevents_register_notifier(struct notifier_block *nb)
140d316c57fSThomas Gleixner {
141f833bab8SSuresh Siddha 	unsigned long flags;
142d316c57fSThomas Gleixner 	int ret;
143d316c57fSThomas Gleixner 
144b5f91da0SThomas Gleixner 	raw_spin_lock_irqsave(&clockevents_lock, flags);
145d316c57fSThomas Gleixner 	ret = raw_notifier_chain_register(&clockevents_chain, nb);
146b5f91da0SThomas Gleixner 	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
147d316c57fSThomas Gleixner 
148d316c57fSThomas Gleixner 	return ret;
149d316c57fSThomas Gleixner }
150d316c57fSThomas Gleixner 
151d316c57fSThomas Gleixner /*
152d316c57fSThomas Gleixner  * Notify about a clock event change. Called with clockevents_lock
153d316c57fSThomas Gleixner  * held.
154d316c57fSThomas Gleixner  */
155d316c57fSThomas Gleixner static void clockevents_do_notify(unsigned long reason, void *dev)
156d316c57fSThomas Gleixner {
157d316c57fSThomas Gleixner 	raw_notifier_call_chain(&clockevents_chain, reason, dev);
158d316c57fSThomas Gleixner }
159d316c57fSThomas Gleixner 
160d316c57fSThomas Gleixner /*
1613eb05676SLi Zefan  * Called after a notify add to make devices available which were
162d316c57fSThomas Gleixner  * released from the notifier call.
163d316c57fSThomas Gleixner  */
164d316c57fSThomas Gleixner static void clockevents_notify_released(void)
165d316c57fSThomas Gleixner {
166d316c57fSThomas Gleixner 	struct clock_event_device *dev;
167d316c57fSThomas Gleixner 
168d316c57fSThomas Gleixner 	while (!list_empty(&clockevents_released)) {
169d316c57fSThomas Gleixner 		dev = list_entry(clockevents_released.next,
170d316c57fSThomas Gleixner 				 struct clock_event_device, list);
171d316c57fSThomas Gleixner 		list_del(&dev->list);
172d316c57fSThomas Gleixner 		list_add(&dev->list, &clockevent_devices);
173d316c57fSThomas Gleixner 		clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
174d316c57fSThomas Gleixner 	}
175d316c57fSThomas Gleixner }
176d316c57fSThomas Gleixner 
177d316c57fSThomas Gleixner /**
178d316c57fSThomas Gleixner  * clockevents_register_device - register a clock event device
179d316c57fSThomas Gleixner  * @dev:	device to register
180d316c57fSThomas Gleixner  */
181d316c57fSThomas Gleixner void clockevents_register_device(struct clock_event_device *dev)
182d316c57fSThomas Gleixner {
183f833bab8SSuresh Siddha 	unsigned long flags;
184f833bab8SSuresh Siddha 
185d316c57fSThomas Gleixner 	BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
186320ab2b0SRusty Russell 	BUG_ON(!dev->cpumask);
187320ab2b0SRusty Russell 
188b5f91da0SThomas Gleixner 	raw_spin_lock_irqsave(&clockevents_lock, flags);
189d316c57fSThomas Gleixner 
190d316c57fSThomas Gleixner 	list_add(&dev->list, &clockevent_devices);
191d316c57fSThomas Gleixner 	clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
192d316c57fSThomas Gleixner 	clockevents_notify_released();
193d316c57fSThomas Gleixner 
194b5f91da0SThomas Gleixner 	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
195d316c57fSThomas Gleixner }
196c81fc2c3SMagnus Damm EXPORT_SYMBOL_GPL(clockevents_register_device);
197d316c57fSThomas Gleixner 
198d316c57fSThomas Gleixner /*
199d316c57fSThomas Gleixner  * Noop handler when we shut down an event device
200d316c57fSThomas Gleixner  */
2017c1e7689SVenkatesh Pallipadi void clockevents_handle_noop(struct clock_event_device *dev)
202d316c57fSThomas Gleixner {
203d316c57fSThomas Gleixner }
204d316c57fSThomas Gleixner 
205d316c57fSThomas Gleixner /**
206d316c57fSThomas Gleixner  * clockevents_exchange_device - release and request clock devices
207d316c57fSThomas Gleixner  * @old:	device to release (can be NULL)
208d316c57fSThomas Gleixner  * @new:	device to request (can be NULL)
209d316c57fSThomas Gleixner  *
210d316c57fSThomas Gleixner  * Called from the notifier chain. clockevents_lock is held already
211d316c57fSThomas Gleixner  */
212d316c57fSThomas Gleixner void clockevents_exchange_device(struct clock_event_device *old,
213d316c57fSThomas Gleixner 				 struct clock_event_device *new)
214d316c57fSThomas Gleixner {
215d316c57fSThomas Gleixner 	unsigned long flags;
216d316c57fSThomas Gleixner 
217d316c57fSThomas Gleixner 	local_irq_save(flags);
218d316c57fSThomas Gleixner 	/*
219d316c57fSThomas Gleixner 	 * Caller releases a clock event device. We queue it into the
220d316c57fSThomas Gleixner 	 * released list and do a notify add later.
221d316c57fSThomas Gleixner 	 */
222d316c57fSThomas Gleixner 	if (old) {
223d316c57fSThomas Gleixner 		clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
224d316c57fSThomas Gleixner 		list_del(&old->list);
225d316c57fSThomas Gleixner 		list_add(&old->list, &clockevents_released);
226d316c57fSThomas Gleixner 	}
227d316c57fSThomas Gleixner 
228d316c57fSThomas Gleixner 	if (new) {
229d316c57fSThomas Gleixner 		BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
2302344abbcSThomas Gleixner 		clockevents_shutdown(new);
231d316c57fSThomas Gleixner 	}
232d316c57fSThomas Gleixner 	local_irq_restore(flags);
233d316c57fSThomas Gleixner }
234d316c57fSThomas Gleixner 
235de68d9b1SThomas Gleixner #ifdef CONFIG_GENERIC_CLOCKEVENTS
236d316c57fSThomas Gleixner /**
237d316c57fSThomas Gleixner  * clockevents_notify - notification about relevant events
238d316c57fSThomas Gleixner  */
239d316c57fSThomas Gleixner void clockevents_notify(unsigned long reason, void *arg)
240d316c57fSThomas Gleixner {
241bb6eddf7SThomas Gleixner 	struct clock_event_device *dev, *tmp;
242f833bab8SSuresh Siddha 	unsigned long flags;
243bb6eddf7SThomas Gleixner 	int cpu;
2440b858e6fSLi Zefan 
245b5f91da0SThomas Gleixner 	raw_spin_lock_irqsave(&clockevents_lock, flags);
246d316c57fSThomas Gleixner 	clockevents_do_notify(reason, arg);
247d316c57fSThomas Gleixner 
248d316c57fSThomas Gleixner 	switch (reason) {
249d316c57fSThomas Gleixner 	case CLOCK_EVT_NOTIFY_CPU_DEAD:
250d316c57fSThomas Gleixner 		/*
251d316c57fSThomas Gleixner 		 * Unregister the clock event devices which were
252d316c57fSThomas Gleixner 		 * released from the users in the notify chain.
253d316c57fSThomas Gleixner 		 */
254bb6eddf7SThomas Gleixner 		list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
255bb6eddf7SThomas Gleixner 			list_del(&dev->list);
256bb6eddf7SThomas Gleixner 		/*
257bb6eddf7SThomas Gleixner 		 * Now check whether the CPU has left unused per cpu devices
258bb6eddf7SThomas Gleixner 		 */
259bb6eddf7SThomas Gleixner 		cpu = *((int *)arg);
260bb6eddf7SThomas Gleixner 		list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
261bb6eddf7SThomas Gleixner 			if (cpumask_test_cpu(cpu, dev->cpumask) &&
262ea9d8e3fSXiaotian Feng 			    cpumask_weight(dev->cpumask) == 1 &&
263ea9d8e3fSXiaotian Feng 			    !tick_is_broadcast_device(dev)) {
264bb6eddf7SThomas Gleixner 				BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
265bb6eddf7SThomas Gleixner 				list_del(&dev->list);
266bb6eddf7SThomas Gleixner 			}
267bb6eddf7SThomas Gleixner 		}
268d316c57fSThomas Gleixner 		break;
269d316c57fSThomas Gleixner 	default:
270d316c57fSThomas Gleixner 		break;
271d316c57fSThomas Gleixner 	}
272b5f91da0SThomas Gleixner 	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
273d316c57fSThomas Gleixner }
274d316c57fSThomas Gleixner EXPORT_SYMBOL_GPL(clockevents_notify);
275de68d9b1SThomas Gleixner #endif
276