xref: /openbmc/linux/kernel/time/tick-common.c (revision f8381cba04ba8173fd5a2b8e5cd8b3290ee13a98)
1 /*
2  * linux/kernel/time/tick-common.c
3  *
4  * This file contains the base functions to manage periodic tick
5  * related events.
6  *
7  * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10  *
11  * This code is licenced under the GPL version 2. For details see
12  * kernel-base/COPYING.
13  */
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/irq.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/tick.h>
22 
23 #include "tick-internal.h"
24 
25 /*
26  * Tick devices
27  */
28 DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
29 /*
30  * Tick next event: keeps track of the tick time
31  */
32 ktime_t tick_next_period;
33 ktime_t tick_period;
34 static int tick_do_timer_cpu = -1;
35 DEFINE_SPINLOCK(tick_device_lock);
36 
37 /*
38  * Periodic tick
39  */
40 static void tick_periodic(int cpu)
41 {
42 	if (tick_do_timer_cpu == cpu) {
43 		write_seqlock(&xtime_lock);
44 
45 		/* Keep track of the next tick event */
46 		tick_next_period = ktime_add(tick_next_period, tick_period);
47 
48 		do_timer(1);
49 		write_sequnlock(&xtime_lock);
50 	}
51 
52 	update_process_times(user_mode(get_irq_regs()));
53 	profile_tick(CPU_PROFILING);
54 }
55 
56 /*
57  * Event handler for periodic ticks
58  */
59 void tick_handle_periodic(struct clock_event_device *dev)
60 {
61 	int cpu = smp_processor_id();
62 
63 	tick_periodic(cpu);
64 
65 	if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
66 		return;
67 	/*
68 	 * Setup the next period for devices, which do not have
69 	 * periodic mode:
70 	 */
71 	for (;;) {
72 		ktime_t next = ktime_add(dev->next_event, tick_period);
73 
74 		if (!clockevents_program_event(dev, next, ktime_get()))
75 			return;
76 		tick_periodic(cpu);
77 	}
78 }
79 
80 /*
81  * Setup the device for a periodic tick
82  */
83 void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
84 {
85 	tick_set_periodic_handler(dev, broadcast);
86 
87 	/* Broadcast setup ? */
88 	if (!tick_device_is_functional(dev))
89 		return;
90 
91 	if (dev->features & CLOCK_EVT_FEAT_PERIODIC) {
92 		clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
93 	} else {
94 		unsigned long seq;
95 		ktime_t next;
96 
97 		do {
98 			seq = read_seqbegin(&xtime_lock);
99 			next = tick_next_period;
100 		} while (read_seqretry(&xtime_lock, seq));
101 
102 		clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
103 
104 		for (;;) {
105 			if (!clockevents_program_event(dev, next, ktime_get()))
106 				return;
107 			next = ktime_add(next, tick_period);
108 		}
109 	}
110 }
111 
112 /*
113  * Setup the tick device
114  */
115 static void tick_setup_device(struct tick_device *td,
116 			      struct clock_event_device *newdev, int cpu,
117 			      cpumask_t cpumask)
118 {
119 	ktime_t next_event;
120 	void (*handler)(struct clock_event_device *) = NULL;
121 
122 	/*
123 	 * First device setup ?
124 	 */
125 	if (!td->evtdev) {
126 		/*
127 		 * If no cpu took the do_timer update, assign it to
128 		 * this cpu:
129 		 */
130 		if (tick_do_timer_cpu == -1) {
131 			tick_do_timer_cpu = cpu;
132 			tick_next_period = ktime_get();
133 			tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
134 		}
135 
136 		/*
137 		 * Startup in periodic mode first.
138 		 */
139 		td->mode = TICKDEV_MODE_PERIODIC;
140 	} else {
141 		handler = td->evtdev->event_handler;
142 		next_event = td->evtdev->next_event;
143 	}
144 
145 	td->evtdev = newdev;
146 
147 	/*
148 	 * When the device is not per cpu, pin the interrupt to the
149 	 * current cpu:
150 	 */
151 	if (!cpus_equal(newdev->cpumask, cpumask))
152 		irq_set_affinity(newdev->irq, cpumask);
153 
154 	/*
155 	 * When global broadcasting is active, check if the current
156 	 * device is registered as a placeholder for broadcast mode.
157 	 * This allows us to handle this x86 misfeature in a generic
158 	 * way.
159 	 */
160 	if (tick_device_uses_broadcast(newdev, cpu))
161 		return;
162 
163 	if (td->mode == TICKDEV_MODE_PERIODIC)
164 		tick_setup_periodic(newdev, 0);
165 }
166 
167 /*
168  * Check, if the new registered device should be used.
169  */
170 static int tick_check_new_device(struct clock_event_device *newdev)
171 {
172 	struct clock_event_device *curdev;
173 	struct tick_device *td;
174 	int cpu, ret = NOTIFY_OK;
175 	unsigned long flags;
176 	cpumask_t cpumask;
177 
178 	spin_lock_irqsave(&tick_device_lock, flags);
179 
180 	cpu = smp_processor_id();
181 	if (!cpu_isset(cpu, newdev->cpumask))
182 		goto out;
183 
184 	td = &per_cpu(tick_cpu_device, cpu);
185 	curdev = td->evtdev;
186 	cpumask = cpumask_of_cpu(cpu);
187 
188 	/* cpu local device ? */
189 	if (!cpus_equal(newdev->cpumask, cpumask)) {
190 
191 		/*
192 		 * If the cpu affinity of the device interrupt can not
193 		 * be set, ignore it.
194 		 */
195 		if (!irq_can_set_affinity(newdev->irq))
196 			goto out_bc;
197 
198 		/*
199 		 * If we have a cpu local device already, do not replace it
200 		 * by a non cpu local device
201 		 */
202 		if (curdev && cpus_equal(curdev->cpumask, cpumask))
203 			goto out_bc;
204 	}
205 
206 	/*
207 	 * If we have an active device, then check the rating and the oneshot
208 	 * feature.
209 	 */
210 	if (curdev) {
211 		/*
212 		 * Check the rating
213 		 */
214 		if (curdev->rating >= newdev->rating)
215 			goto out_bc;
216 	}
217 
218 	/*
219 	 * Replace the eventually existing device by the new
220 	 * device. If the current device is the broadcast device, do
221 	 * not give it back to the clockevents layer !
222 	 */
223 	if (tick_is_broadcast_device(curdev)) {
224 		clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN);
225 		curdev = NULL;
226 	}
227 	clockevents_exchange_device(curdev, newdev);
228 	tick_setup_device(td, newdev, cpu, cpumask);
229 
230 	spin_unlock_irqrestore(&tick_device_lock, flags);
231 	return NOTIFY_STOP;
232 
233 out_bc:
234 	/*
235 	 * Can the new device be used as a broadcast device ?
236 	 */
237 	if (tick_check_broadcast_device(newdev))
238 		ret = NOTIFY_STOP;
239 out:
240 	spin_unlock_irqrestore(&tick_device_lock, flags);
241 
242 	return ret;
243 }
244 
245 /*
246  * Shutdown an event device on a given cpu:
247  *
248  * This is called on a life CPU, when a CPU is dead. So we cannot
249  * access the hardware device itself.
250  * We just set the mode and remove it from the lists.
251  */
252 static void tick_shutdown(unsigned int *cpup)
253 {
254 	struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
255 	struct clock_event_device *dev = td->evtdev;
256 	unsigned long flags;
257 
258 	spin_lock_irqsave(&tick_device_lock, flags);
259 	td->mode = TICKDEV_MODE_PERIODIC;
260 	if (dev) {
261 		/*
262 		 * Prevent that the clock events layer tries to call
263 		 * the set mode function!
264 		 */
265 		dev->mode = CLOCK_EVT_MODE_UNUSED;
266 		clockevents_exchange_device(dev, NULL);
267 		td->evtdev = NULL;
268 	}
269 	spin_unlock_irqrestore(&tick_device_lock, flags);
270 }
271 
272 /*
273  * Notification about clock event devices
274  */
275 static int tick_notify(struct notifier_block *nb, unsigned long reason,
276 			       void *dev)
277 {
278 	switch (reason) {
279 
280 	case CLOCK_EVT_NOTIFY_ADD:
281 		return tick_check_new_device(dev);
282 
283 	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
284 	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
285 		tick_broadcast_on_off(reason, dev);
286 		break;
287 
288 	case CLOCK_EVT_NOTIFY_CPU_DEAD:
289 		tick_shutdown_broadcast(dev);
290 		tick_shutdown(dev);
291 		break;
292 
293 	default:
294 		break;
295 	}
296 
297 	return NOTIFY_OK;
298 }
299 
300 static struct notifier_block tick_notifier = {
301 	.notifier_call = tick_notify,
302 };
303 
304 /**
305  * tick_init - initialize the tick control
306  *
307  * Register the notifier with the clockevents framework
308  */
309 void __init tick_init(void)
310 {
311 	clockevents_register_notifier(&tick_notifier);
312 }
313