xref: /openbmc/linux/kernel/time/tick-common.c (revision 906568c9c668ff994f4078932ec6ae1e3950d1af)
1*906568c9SThomas Gleixner /*
2*906568c9SThomas Gleixner  * linux/kernel/time/tick-common.c
3*906568c9SThomas Gleixner  *
4*906568c9SThomas Gleixner  * This file contains the base functions to manage periodic tick
5*906568c9SThomas Gleixner  * related events.
6*906568c9SThomas Gleixner  *
7*906568c9SThomas Gleixner  * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8*906568c9SThomas Gleixner  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9*906568c9SThomas Gleixner  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10*906568c9SThomas Gleixner  *
11*906568c9SThomas Gleixner  * This code is licenced under the GPL version 2. For details see
12*906568c9SThomas Gleixner  * kernel-base/COPYING.
13*906568c9SThomas Gleixner  */
14*906568c9SThomas Gleixner #include <linux/cpu.h>
15*906568c9SThomas Gleixner #include <linux/err.h>
16*906568c9SThomas Gleixner #include <linux/hrtimer.h>
17*906568c9SThomas Gleixner #include <linux/irq.h>
18*906568c9SThomas Gleixner #include <linux/percpu.h>
19*906568c9SThomas Gleixner #include <linux/profile.h>
20*906568c9SThomas Gleixner #include <linux/sched.h>
21*906568c9SThomas Gleixner #include <linux/tick.h>
22*906568c9SThomas Gleixner 
23*906568c9SThomas Gleixner /*
24*906568c9SThomas Gleixner  * Tick devices
25*906568c9SThomas Gleixner  */
26*906568c9SThomas Gleixner static DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
27*906568c9SThomas Gleixner /*
28*906568c9SThomas Gleixner  * Tick next event: keeps track of the tick time
29*906568c9SThomas Gleixner  */
30*906568c9SThomas Gleixner static ktime_t tick_next_period;
31*906568c9SThomas Gleixner static ktime_t tick_period;
32*906568c9SThomas Gleixner static int tick_do_timer_cpu = -1;
33*906568c9SThomas Gleixner static DEFINE_SPINLOCK(tick_device_lock);
34*906568c9SThomas Gleixner 
35*906568c9SThomas Gleixner /*
36*906568c9SThomas Gleixner  * Periodic tick
37*906568c9SThomas Gleixner  */
38*906568c9SThomas Gleixner static void tick_periodic(int cpu)
39*906568c9SThomas Gleixner {
40*906568c9SThomas Gleixner 	if (tick_do_timer_cpu == cpu) {
41*906568c9SThomas Gleixner 		write_seqlock(&xtime_lock);
42*906568c9SThomas Gleixner 
43*906568c9SThomas Gleixner 		/* Keep track of the next tick event */
44*906568c9SThomas Gleixner 		tick_next_period = ktime_add(tick_next_period, tick_period);
45*906568c9SThomas Gleixner 
46*906568c9SThomas Gleixner 		do_timer(1);
47*906568c9SThomas Gleixner 		write_sequnlock(&xtime_lock);
48*906568c9SThomas Gleixner 	}
49*906568c9SThomas Gleixner 
50*906568c9SThomas Gleixner 	update_process_times(user_mode(get_irq_regs()));
51*906568c9SThomas Gleixner 	profile_tick(CPU_PROFILING);
52*906568c9SThomas Gleixner }
53*906568c9SThomas Gleixner 
54*906568c9SThomas Gleixner /*
55*906568c9SThomas Gleixner  * Event handler for periodic ticks
56*906568c9SThomas Gleixner  */
57*906568c9SThomas Gleixner void tick_handle_periodic(struct clock_event_device *dev)
58*906568c9SThomas Gleixner {
59*906568c9SThomas Gleixner 	int cpu = smp_processor_id();
60*906568c9SThomas Gleixner 
61*906568c9SThomas Gleixner 	tick_periodic(cpu);
62*906568c9SThomas Gleixner 
63*906568c9SThomas Gleixner 	if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
64*906568c9SThomas Gleixner 		return;
65*906568c9SThomas Gleixner 	/*
66*906568c9SThomas Gleixner 	 * Setup the next period for devices, which do not have
67*906568c9SThomas Gleixner 	 * periodic mode:
68*906568c9SThomas Gleixner 	 */
69*906568c9SThomas Gleixner 	for (;;) {
70*906568c9SThomas Gleixner 		ktime_t next = ktime_add(dev->next_event, tick_period);
71*906568c9SThomas Gleixner 
72*906568c9SThomas Gleixner 		if (!clockevents_program_event(dev, next, ktime_get()))
73*906568c9SThomas Gleixner 			return;
74*906568c9SThomas Gleixner 		tick_periodic(cpu);
75*906568c9SThomas Gleixner 	}
76*906568c9SThomas Gleixner }
77*906568c9SThomas Gleixner 
78*906568c9SThomas Gleixner /*
79*906568c9SThomas Gleixner  * Setup the device for a periodic tick
80*906568c9SThomas Gleixner  */
81*906568c9SThomas Gleixner void tick_setup_periodic(struct clock_event_device *dev)
82*906568c9SThomas Gleixner {
83*906568c9SThomas Gleixner 	dev->event_handler = tick_handle_periodic;
84*906568c9SThomas Gleixner 
85*906568c9SThomas Gleixner 	if (dev->features & CLOCK_EVT_FEAT_PERIODIC) {
86*906568c9SThomas Gleixner 		clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
87*906568c9SThomas Gleixner 	} else {
88*906568c9SThomas Gleixner 		unsigned long seq;
89*906568c9SThomas Gleixner 		ktime_t next;
90*906568c9SThomas Gleixner 
91*906568c9SThomas Gleixner 		do {
92*906568c9SThomas Gleixner 			seq = read_seqbegin(&xtime_lock);
93*906568c9SThomas Gleixner 			next = tick_next_period;
94*906568c9SThomas Gleixner 		} while (read_seqretry(&xtime_lock, seq));
95*906568c9SThomas Gleixner 
96*906568c9SThomas Gleixner 		clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
97*906568c9SThomas Gleixner 
98*906568c9SThomas Gleixner 		for (;;) {
99*906568c9SThomas Gleixner 			if (!clockevents_program_event(dev, next, ktime_get()))
100*906568c9SThomas Gleixner 				return;
101*906568c9SThomas Gleixner 			next = ktime_add(next, tick_period);
102*906568c9SThomas Gleixner 		}
103*906568c9SThomas Gleixner 	}
104*906568c9SThomas Gleixner }
105*906568c9SThomas Gleixner 
106*906568c9SThomas Gleixner /*
107*906568c9SThomas Gleixner  * Setup the tick device
108*906568c9SThomas Gleixner  */
109*906568c9SThomas Gleixner static void tick_setup_device(struct tick_device *td,
110*906568c9SThomas Gleixner 			      struct clock_event_device *newdev, int cpu,
111*906568c9SThomas Gleixner 			      cpumask_t cpumask)
112*906568c9SThomas Gleixner {
113*906568c9SThomas Gleixner 	ktime_t next_event;
114*906568c9SThomas Gleixner 	void (*handler)(struct clock_event_device *) = NULL;
115*906568c9SThomas Gleixner 
116*906568c9SThomas Gleixner 	/*
117*906568c9SThomas Gleixner 	 * First device setup ?
118*906568c9SThomas Gleixner 	 */
119*906568c9SThomas Gleixner 	if (!td->evtdev) {
120*906568c9SThomas Gleixner 		/*
121*906568c9SThomas Gleixner 		 * If no cpu took the do_timer update, assign it to
122*906568c9SThomas Gleixner 		 * this cpu:
123*906568c9SThomas Gleixner 		 */
124*906568c9SThomas Gleixner 		if (tick_do_timer_cpu == -1) {
125*906568c9SThomas Gleixner 			tick_do_timer_cpu = cpu;
126*906568c9SThomas Gleixner 			tick_next_period = ktime_get();
127*906568c9SThomas Gleixner 			tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
128*906568c9SThomas Gleixner 		}
129*906568c9SThomas Gleixner 
130*906568c9SThomas Gleixner 		/*
131*906568c9SThomas Gleixner 		 * Startup in periodic mode first.
132*906568c9SThomas Gleixner 		 */
133*906568c9SThomas Gleixner 		td->mode = TICKDEV_MODE_PERIODIC;
134*906568c9SThomas Gleixner 	} else {
135*906568c9SThomas Gleixner 		handler = td->evtdev->event_handler;
136*906568c9SThomas Gleixner 		next_event = td->evtdev->next_event;
137*906568c9SThomas Gleixner 	}
138*906568c9SThomas Gleixner 
139*906568c9SThomas Gleixner 	td->evtdev = newdev;
140*906568c9SThomas Gleixner 
141*906568c9SThomas Gleixner 	/*
142*906568c9SThomas Gleixner 	 * When the device is not per cpu, pin the interrupt to the
143*906568c9SThomas Gleixner 	 * current cpu:
144*906568c9SThomas Gleixner 	 */
145*906568c9SThomas Gleixner 	if (!cpus_equal(newdev->cpumask, cpumask))
146*906568c9SThomas Gleixner 		irq_set_affinity(newdev->irq, cpumask);
147*906568c9SThomas Gleixner 
148*906568c9SThomas Gleixner 	if (td->mode == TICKDEV_MODE_PERIODIC)
149*906568c9SThomas Gleixner 		tick_setup_periodic(newdev, 0);
150*906568c9SThomas Gleixner }
151*906568c9SThomas Gleixner 
152*906568c9SThomas Gleixner /*
153*906568c9SThomas Gleixner  * Check, if the new registered device should be used.
154*906568c9SThomas Gleixner  */
155*906568c9SThomas Gleixner static int tick_check_new_device(struct clock_event_device *newdev)
156*906568c9SThomas Gleixner {
157*906568c9SThomas Gleixner 	struct clock_event_device *curdev;
158*906568c9SThomas Gleixner 	struct tick_device *td;
159*906568c9SThomas Gleixner 	int cpu, ret = NOTIFY_OK;
160*906568c9SThomas Gleixner 	unsigned long flags;
161*906568c9SThomas Gleixner 	cpumask_t cpumask;
162*906568c9SThomas Gleixner 
163*906568c9SThomas Gleixner 	spin_lock_irqsave(&tick_device_lock, flags);
164*906568c9SThomas Gleixner 
165*906568c9SThomas Gleixner 	cpu = smp_processor_id();
166*906568c9SThomas Gleixner 	if (!cpu_isset(cpu, newdev->cpumask))
167*906568c9SThomas Gleixner 		goto out;
168*906568c9SThomas Gleixner 
169*906568c9SThomas Gleixner 	td = &per_cpu(tick_cpu_device, cpu);
170*906568c9SThomas Gleixner 	curdev = td->evtdev;
171*906568c9SThomas Gleixner 	cpumask = cpumask_of_cpu(cpu);
172*906568c9SThomas Gleixner 
173*906568c9SThomas Gleixner 	/* cpu local device ? */
174*906568c9SThomas Gleixner 	if (!cpus_equal(newdev->cpumask, cpumask)) {
175*906568c9SThomas Gleixner 
176*906568c9SThomas Gleixner 		/*
177*906568c9SThomas Gleixner 		 * If the cpu affinity of the device interrupt can not
178*906568c9SThomas Gleixner 		 * be set, ignore it.
179*906568c9SThomas Gleixner 		 */
180*906568c9SThomas Gleixner 		if (!irq_can_set_affinity(newdev->irq))
181*906568c9SThomas Gleixner 			goto out_bc;
182*906568c9SThomas Gleixner 
183*906568c9SThomas Gleixner 		/*
184*906568c9SThomas Gleixner 		 * If we have a cpu local device already, do not replace it
185*906568c9SThomas Gleixner 		 * by a non cpu local device
186*906568c9SThomas Gleixner 		 */
187*906568c9SThomas Gleixner 		if (curdev && cpus_equal(curdev->cpumask, cpumask))
188*906568c9SThomas Gleixner 			goto out_bc;
189*906568c9SThomas Gleixner 	}
190*906568c9SThomas Gleixner 
191*906568c9SThomas Gleixner 	/*
192*906568c9SThomas Gleixner 	 * If we have an active device, then check the rating and the oneshot
193*906568c9SThomas Gleixner 	 * feature.
194*906568c9SThomas Gleixner 	 */
195*906568c9SThomas Gleixner 	if (curdev) {
196*906568c9SThomas Gleixner 		/*
197*906568c9SThomas Gleixner 		 * Check the rating
198*906568c9SThomas Gleixner 		 */
199*906568c9SThomas Gleixner 		if (curdev->rating >= newdev->rating)
200*906568c9SThomas Gleixner 			goto out;
201*906568c9SThomas Gleixner 	}
202*906568c9SThomas Gleixner 
203*906568c9SThomas Gleixner 	/*
204*906568c9SThomas Gleixner 	 * Replace the eventually existing device by the new
205*906568c9SThomas Gleixner 	 * device.
206*906568c9SThomas Gleixner 	 */
207*906568c9SThomas Gleixner 	clockevents_exchange_device(curdev, newdev);
208*906568c9SThomas Gleixner 	tick_setup_device(td, newdev, cpu, cpumask);
209*906568c9SThomas Gleixner 	ret = NOTIFY_STOP;
210*906568c9SThomas Gleixner 
211*906568c9SThomas Gleixner out:
212*906568c9SThomas Gleixner 	spin_unlock_irqrestore(&tick_device_lock, flags);
213*906568c9SThomas Gleixner 	return ret;
214*906568c9SThomas Gleixner }
215*906568c9SThomas Gleixner 
216*906568c9SThomas Gleixner /*
217*906568c9SThomas Gleixner  * Shutdown an event device on a given cpu:
218*906568c9SThomas Gleixner  *
219*906568c9SThomas Gleixner  * This is called on a life CPU, when a CPU is dead. So we cannot
220*906568c9SThomas Gleixner  * access the hardware device itself.
221*906568c9SThomas Gleixner  * We just set the mode and remove it from the lists.
222*906568c9SThomas Gleixner  */
223*906568c9SThomas Gleixner static void tick_shutdown(unsigned int *cpup)
224*906568c9SThomas Gleixner {
225*906568c9SThomas Gleixner 	struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
226*906568c9SThomas Gleixner 	struct clock_event_device *dev = td->evtdev;
227*906568c9SThomas Gleixner 	unsigned long flags;
228*906568c9SThomas Gleixner 
229*906568c9SThomas Gleixner 	spin_lock_irqsave(&tick_device_lock, flags);
230*906568c9SThomas Gleixner 	td->mode = TICKDEV_MODE_PERIODIC;
231*906568c9SThomas Gleixner 	if (dev) {
232*906568c9SThomas Gleixner 		/*
233*906568c9SThomas Gleixner 		 * Prevent that the clock events layer tries to call
234*906568c9SThomas Gleixner 		 * the set mode function!
235*906568c9SThomas Gleixner 		 */
236*906568c9SThomas Gleixner 		dev->mode = CLOCK_EVT_MODE_UNUSED;
237*906568c9SThomas Gleixner 		clockevents_exchange_device(dev, NULL);
238*906568c9SThomas Gleixner 		td->evtdev = NULL;
239*906568c9SThomas Gleixner 	}
240*906568c9SThomas Gleixner 	spin_unlock_irqrestore(&tick_device_lock, flags);
241*906568c9SThomas Gleixner }
242*906568c9SThomas Gleixner 
243*906568c9SThomas Gleixner /*
244*906568c9SThomas Gleixner  * Notification about clock event devices
245*906568c9SThomas Gleixner  */
246*906568c9SThomas Gleixner static int tick_notify(struct notifier_block *nb, unsigned long reason,
247*906568c9SThomas Gleixner 			       void *dev)
248*906568c9SThomas Gleixner {
249*906568c9SThomas Gleixner 	switch (reason) {
250*906568c9SThomas Gleixner 
251*906568c9SThomas Gleixner 	case CLOCK_EVT_NOTIFY_ADD:
252*906568c9SThomas Gleixner 		return tick_check_new_device(dev);
253*906568c9SThomas Gleixner 
254*906568c9SThomas Gleixner 	case CLOCK_EVT_NOTIFY_CPU_DEAD:
255*906568c9SThomas Gleixner 		tick_shutdown(dev);
256*906568c9SThomas Gleixner 		break;
257*906568c9SThomas Gleixner 
258*906568c9SThomas Gleixner 	default:
259*906568c9SThomas Gleixner 		break;
260*906568c9SThomas Gleixner 	}
261*906568c9SThomas Gleixner 
262*906568c9SThomas Gleixner 	return NOTIFY_OK;
263*906568c9SThomas Gleixner }
264*906568c9SThomas Gleixner 
265*906568c9SThomas Gleixner static struct notifier_block tick_notifier = {
266*906568c9SThomas Gleixner 	.notifier_call = tick_notify,
267*906568c9SThomas Gleixner };
268*906568c9SThomas Gleixner 
269*906568c9SThomas Gleixner /**
270*906568c9SThomas Gleixner  * tick_init - initialize the tick control
271*906568c9SThomas Gleixner  *
272*906568c9SThomas Gleixner  * Register the notifier with the clockevents framework
273*906568c9SThomas Gleixner  */
274*906568c9SThomas Gleixner void __init tick_init(void)
275*906568c9SThomas Gleixner {
276*906568c9SThomas Gleixner 	clockevents_register_notifier(&tick_notifier);
277*906568c9SThomas Gleixner }
278