xref: /openbmc/linux/kernel/time/tick-broadcast.c (revision c21b37f6)
1 /*
2  * linux/kernel/time/tick-broadcast.c
3  *
4  * This file contains functions which emulate a local clock-event
5  * device via a broadcast event source.
6  *
7  * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10  *
11  * This code is licenced under the GPL version 2. For details see
12  * kernel-base/COPYING.
13  */
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/irq.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/tick.h>
22 
23 #include "tick-internal.h"
24 
25 /*
26  * Broadcast support for broken x86 hardware, where the local apic
27  * timer stops in C3 state.
28  */
29 
30 struct tick_device tick_broadcast_device;
31 static cpumask_t tick_broadcast_mask;
32 static DEFINE_SPINLOCK(tick_broadcast_lock);
33 
34 #ifdef CONFIG_TICK_ONESHOT
35 static void tick_broadcast_clear_oneshot(int cpu);
36 #else
37 static inline void tick_broadcast_clear_oneshot(int cpu) { }
38 #endif
39 
40 /*
41  * Debugging: see timer_list.c
42  */
43 struct tick_device *tick_get_broadcast_device(void)
44 {
45 	return &tick_broadcast_device;
46 }
47 
48 cpumask_t *tick_get_broadcast_mask(void)
49 {
50 	return &tick_broadcast_mask;
51 }
52 
53 /*
54  * Start the device in periodic mode
55  */
56 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
57 {
58 	if (bc)
59 		tick_setup_periodic(bc, 1);
60 }
61 
62 /*
63  * Check, if the device can be utilized as broadcast device:
64  */
65 int tick_check_broadcast_device(struct clock_event_device *dev)
66 {
67 	if (tick_broadcast_device.evtdev ||
68 	    (dev->features & CLOCK_EVT_FEAT_C3STOP))
69 		return 0;
70 
71 	clockevents_exchange_device(NULL, dev);
72 	tick_broadcast_device.evtdev = dev;
73 	if (!cpus_empty(tick_broadcast_mask))
74 		tick_broadcast_start_periodic(dev);
75 	return 1;
76 }
77 
78 /*
79  * Check, if the device is the broadcast device
80  */
81 int tick_is_broadcast_device(struct clock_event_device *dev)
82 {
83 	return (dev && tick_broadcast_device.evtdev == dev);
84 }
85 
86 /*
87  * Check, if the device is disfunctional and a place holder, which
88  * needs to be handled by the broadcast device.
89  */
90 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
91 {
92 	unsigned long flags;
93 	int ret = 0;
94 
95 	spin_lock_irqsave(&tick_broadcast_lock, flags);
96 
97 	/*
98 	 * Devices might be registered with both periodic and oneshot
99 	 * mode disabled. This signals, that the device needs to be
100 	 * operated from the broadcast device and is a placeholder for
101 	 * the cpu local device.
102 	 */
103 	if (!tick_device_is_functional(dev)) {
104 		dev->event_handler = tick_handle_periodic;
105 		cpu_set(cpu, tick_broadcast_mask);
106 		tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
107 		ret = 1;
108 	} else {
109 		/*
110 		 * When the new device is not affected by the stop
111 		 * feature and the cpu is marked in the broadcast mask
112 		 * then clear the broadcast bit.
113 		 */
114 		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
115 			int cpu = smp_processor_id();
116 
117 			cpu_clear(cpu, tick_broadcast_mask);
118 			tick_broadcast_clear_oneshot(cpu);
119 		}
120 	}
121 	spin_unlock_irqrestore(&tick_broadcast_lock, flags);
122 	return ret;
123 }
124 
125 /*
126  * Broadcast the event to the cpus, which are set in the mask
127  */
128 int tick_do_broadcast(cpumask_t mask)
129 {
130 	int ret = 0, cpu = smp_processor_id();
131 	struct tick_device *td;
132 
133 	/*
134 	 * Check, if the current cpu is in the mask
135 	 */
136 	if (cpu_isset(cpu, mask)) {
137 		cpu_clear(cpu, mask);
138 		td = &per_cpu(tick_cpu_device, cpu);
139 		td->evtdev->event_handler(td->evtdev);
140 		ret = 1;
141 	}
142 
143 	if (!cpus_empty(mask)) {
144 		/*
145 		 * It might be necessary to actually check whether the devices
146 		 * have different broadcast functions. For now, just use the
147 		 * one of the first device. This works as long as we have this
148 		 * misfeature only on x86 (lapic)
149 		 */
150 		cpu = first_cpu(mask);
151 		td = &per_cpu(tick_cpu_device, cpu);
152 		td->evtdev->broadcast(mask);
153 		ret = 1;
154 	}
155 	return ret;
156 }
157 
158 /*
159  * Periodic broadcast:
160  * - invoke the broadcast handlers
161  */
162 static void tick_do_periodic_broadcast(void)
163 {
164 	cpumask_t mask;
165 
166 	spin_lock(&tick_broadcast_lock);
167 
168 	cpus_and(mask, cpu_online_map, tick_broadcast_mask);
169 	tick_do_broadcast(mask);
170 
171 	spin_unlock(&tick_broadcast_lock);
172 }
173 
174 /*
175  * Event handler for periodic broadcast ticks
176  */
177 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
178 {
179 	dev->next_event.tv64 = KTIME_MAX;
180 
181 	tick_do_periodic_broadcast();
182 
183 	/*
184 	 * The device is in periodic mode. No reprogramming necessary:
185 	 */
186 	if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
187 		return;
188 
189 	/*
190 	 * Setup the next period for devices, which do not have
191 	 * periodic mode:
192 	 */
193 	for (;;) {
194 		ktime_t next = ktime_add(dev->next_event, tick_period);
195 
196 		if (!clockevents_program_event(dev, next, ktime_get()))
197 			return;
198 		tick_do_periodic_broadcast();
199 	}
200 }
201 
202 /*
203  * Powerstate information: The system enters/leaves a state, where
204  * affected devices might stop
205  */
206 static void tick_do_broadcast_on_off(void *why)
207 {
208 	struct clock_event_device *bc, *dev;
209 	struct tick_device *td;
210 	unsigned long flags, *reason = why;
211 	int cpu;
212 
213 	spin_lock_irqsave(&tick_broadcast_lock, flags);
214 
215 	cpu = smp_processor_id();
216 	td = &per_cpu(tick_cpu_device, cpu);
217 	dev = td->evtdev;
218 	bc = tick_broadcast_device.evtdev;
219 
220 	/*
221 	 * Is the device in broadcast mode forever or is it not
222 	 * affected by the powerstate ?
223 	 */
224 	if (!dev || !tick_device_is_functional(dev) ||
225 	    !(dev->features & CLOCK_EVT_FEAT_C3STOP))
226 		goto out;
227 
228 	if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) {
229 		if (!cpu_isset(cpu, tick_broadcast_mask)) {
230 			cpu_set(cpu, tick_broadcast_mask);
231 			if (td->mode == TICKDEV_MODE_PERIODIC)
232 				clockevents_set_mode(dev,
233 						     CLOCK_EVT_MODE_SHUTDOWN);
234 		}
235 	} else {
236 		if (cpu_isset(cpu, tick_broadcast_mask)) {
237 			cpu_clear(cpu, tick_broadcast_mask);
238 			if (td->mode == TICKDEV_MODE_PERIODIC)
239 				tick_setup_periodic(dev, 0);
240 		}
241 	}
242 
243 	if (cpus_empty(tick_broadcast_mask))
244 		clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
245 	else {
246 		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
247 			tick_broadcast_start_periodic(bc);
248 		else
249 			tick_broadcast_setup_oneshot(bc);
250 	}
251 out:
252 	spin_unlock_irqrestore(&tick_broadcast_lock, flags);
253 }
254 
255 /*
256  * Powerstate information: The system enters/leaves a state, where
257  * affected devices might stop.
258  */
259 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
260 {
261 	int cpu = get_cpu();
262 
263 	if (!cpu_isset(*oncpu, cpu_online_map)) {
264 		printk(KERN_ERR "tick-braodcast: ignoring broadcast for "
265 		       "offline CPU #%d\n", *oncpu);
266 	} else {
267 
268 		if (cpu == *oncpu)
269 			tick_do_broadcast_on_off(&reason);
270 		else
271 			smp_call_function_single(*oncpu,
272 						 tick_do_broadcast_on_off,
273 						 &reason, 1, 1);
274 	}
275 	put_cpu();
276 }
277 
278 /*
279  * Set the periodic handler depending on broadcast on/off
280  */
281 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
282 {
283 	if (!broadcast)
284 		dev->event_handler = tick_handle_periodic;
285 	else
286 		dev->event_handler = tick_handle_periodic_broadcast;
287 }
288 
289 /*
290  * Remove a CPU from broadcasting
291  */
292 void tick_shutdown_broadcast(unsigned int *cpup)
293 {
294 	struct clock_event_device *bc;
295 	unsigned long flags;
296 	unsigned int cpu = *cpup;
297 
298 	spin_lock_irqsave(&tick_broadcast_lock, flags);
299 
300 	bc = tick_broadcast_device.evtdev;
301 	cpu_clear(cpu, tick_broadcast_mask);
302 
303 	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
304 		if (bc && cpus_empty(tick_broadcast_mask))
305 			clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
306 	}
307 
308 	spin_unlock_irqrestore(&tick_broadcast_lock, flags);
309 }
310 
311 void tick_suspend_broadcast(void)
312 {
313 	struct clock_event_device *bc;
314 	unsigned long flags;
315 
316 	spin_lock_irqsave(&tick_broadcast_lock, flags);
317 
318 	bc = tick_broadcast_device.evtdev;
319 	if (bc)
320 		clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
321 
322 	spin_unlock_irqrestore(&tick_broadcast_lock, flags);
323 }
324 
325 int tick_resume_broadcast(void)
326 {
327 	struct clock_event_device *bc;
328 	unsigned long flags;
329 	int broadcast = 0;
330 
331 	spin_lock_irqsave(&tick_broadcast_lock, flags);
332 
333 	bc = tick_broadcast_device.evtdev;
334 
335 	if (bc) {
336 		clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
337 
338 		switch (tick_broadcast_device.mode) {
339 		case TICKDEV_MODE_PERIODIC:
340 			if(!cpus_empty(tick_broadcast_mask))
341 				tick_broadcast_start_periodic(bc);
342 			broadcast = cpu_isset(smp_processor_id(),
343 					      tick_broadcast_mask);
344 			break;
345 		case TICKDEV_MODE_ONESHOT:
346 			broadcast = tick_resume_broadcast_oneshot(bc);
347 			break;
348 		}
349 	}
350 	spin_unlock_irqrestore(&tick_broadcast_lock, flags);
351 
352 	return broadcast;
353 }
354 
355 
356 #ifdef CONFIG_TICK_ONESHOT
357 
358 static cpumask_t tick_broadcast_oneshot_mask;
359 
360 /*
361  * Debugging: see timer_list.c
362  */
363 cpumask_t *tick_get_broadcast_oneshot_mask(void)
364 {
365 	return &tick_broadcast_oneshot_mask;
366 }
367 
368 static int tick_broadcast_set_event(ktime_t expires, int force)
369 {
370 	struct clock_event_device *bc = tick_broadcast_device.evtdev;
371 	ktime_t now = ktime_get();
372 	int res;
373 
374 	for(;;) {
375 		res = clockevents_program_event(bc, expires, now);
376 		if (!res || !force)
377 			return res;
378 		now = ktime_get();
379 		expires = ktime_add(now, ktime_set(0, bc->min_delta_ns));
380 	}
381 }
382 
383 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
384 {
385 	clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
386 
387 	if(!cpus_empty(tick_broadcast_oneshot_mask))
388 		tick_broadcast_set_event(ktime_get(), 1);
389 
390 	return cpu_isset(smp_processor_id(), tick_broadcast_oneshot_mask);
391 }
392 
393 /*
394  * Reprogram the broadcast device:
395  *
396  * Called with tick_broadcast_lock held and interrupts disabled.
397  */
398 static int tick_broadcast_reprogram(void)
399 {
400 	ktime_t expires = { .tv64 = KTIME_MAX };
401 	struct tick_device *td;
402 	int cpu;
403 
404 	/*
405 	 * Find the event which expires next:
406 	 */
407 	for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
408 	     cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
409 		td = &per_cpu(tick_cpu_device, cpu);
410 		if (td->evtdev->next_event.tv64 < expires.tv64)
411 			expires = td->evtdev->next_event;
412 	}
413 
414 	if (expires.tv64 == KTIME_MAX)
415 		return 0;
416 
417 	return tick_broadcast_set_event(expires, 0);
418 }
419 
420 /*
421  * Handle oneshot mode broadcasting
422  */
423 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
424 {
425 	struct tick_device *td;
426 	cpumask_t mask;
427 	ktime_t now;
428 	int cpu;
429 
430 	spin_lock(&tick_broadcast_lock);
431 again:
432 	dev->next_event.tv64 = KTIME_MAX;
433 	mask = CPU_MASK_NONE;
434 	now = ktime_get();
435 	/* Find all expired events */
436 	for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
437 	     cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
438 		td = &per_cpu(tick_cpu_device, cpu);
439 		if (td->evtdev->next_event.tv64 <= now.tv64)
440 			cpu_set(cpu, mask);
441 	}
442 
443 	/*
444 	 * Wakeup the cpus which have an expired event. The broadcast
445 	 * device is reprogrammed in the return from idle code.
446 	 */
447 	if (!tick_do_broadcast(mask)) {
448 		/*
449 		 * The global event did not expire any CPU local
450 		 * events. This happens in dyntick mode, as the
451 		 * maximum PIT delta is quite small.
452 		 */
453 		if (tick_broadcast_reprogram())
454 			goto again;
455 	}
456 	spin_unlock(&tick_broadcast_lock);
457 }
458 
459 /*
460  * Powerstate information: The system enters/leaves a state, where
461  * affected devices might stop
462  */
463 void tick_broadcast_oneshot_control(unsigned long reason)
464 {
465 	struct clock_event_device *bc, *dev;
466 	struct tick_device *td;
467 	unsigned long flags;
468 	int cpu;
469 
470 	spin_lock_irqsave(&tick_broadcast_lock, flags);
471 
472 	/*
473 	 * Periodic mode does not care about the enter/exit of power
474 	 * states
475 	 */
476 	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
477 		goto out;
478 
479 	bc = tick_broadcast_device.evtdev;
480 	cpu = smp_processor_id();
481 	td = &per_cpu(tick_cpu_device, cpu);
482 	dev = td->evtdev;
483 
484 	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
485 		goto out;
486 
487 	if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
488 		if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
489 			cpu_set(cpu, tick_broadcast_oneshot_mask);
490 			clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
491 			if (dev->next_event.tv64 < bc->next_event.tv64)
492 				tick_broadcast_set_event(dev->next_event, 1);
493 		}
494 	} else {
495 		if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
496 			cpu_clear(cpu, tick_broadcast_oneshot_mask);
497 			clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
498 			if (dev->next_event.tv64 != KTIME_MAX)
499 				tick_program_event(dev->next_event, 1);
500 		}
501 	}
502 
503 out:
504 	spin_unlock_irqrestore(&tick_broadcast_lock, flags);
505 }
506 
507 /*
508  * Reset the one shot broadcast for a cpu
509  *
510  * Called with tick_broadcast_lock held
511  */
512 static void tick_broadcast_clear_oneshot(int cpu)
513 {
514 	cpu_clear(cpu, tick_broadcast_oneshot_mask);
515 }
516 
517 /**
518  * tick_broadcast_setup_highres - setup the broadcast device for highres
519  */
520 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
521 {
522 	if (bc->mode != CLOCK_EVT_MODE_ONESHOT) {
523 		bc->event_handler = tick_handle_oneshot_broadcast;
524 		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
525 		bc->next_event.tv64 = KTIME_MAX;
526 	}
527 }
528 
529 /*
530  * Select oneshot operating mode for the broadcast device
531  */
532 void tick_broadcast_switch_to_oneshot(void)
533 {
534 	struct clock_event_device *bc;
535 	unsigned long flags;
536 
537 	spin_lock_irqsave(&tick_broadcast_lock, flags);
538 
539 	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
540 	bc = tick_broadcast_device.evtdev;
541 	if (bc)
542 		tick_broadcast_setup_oneshot(bc);
543 	spin_unlock_irqrestore(&tick_broadcast_lock, flags);
544 }
545 
546 
547 /*
548  * Remove a dead CPU from broadcasting
549  */
550 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
551 {
552 	struct clock_event_device *bc;
553 	unsigned long flags;
554 	unsigned int cpu = *cpup;
555 
556 	spin_lock_irqsave(&tick_broadcast_lock, flags);
557 
558 	bc = tick_broadcast_device.evtdev;
559 	cpu_clear(cpu, tick_broadcast_oneshot_mask);
560 
561 	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) {
562 		if (bc && cpus_empty(tick_broadcast_oneshot_mask))
563 			clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
564 	}
565 
566 	spin_unlock_irqrestore(&tick_broadcast_lock, flags);
567 }
568 
569 #endif
570