xref: /openbmc/linux/kernel/time/tick-broadcast.c (revision 31af04cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains functions which emulate a local clock-event
4  * device via a broadcast event source.
5  *
6  * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9  */
10 #include <linux/cpu.h>
11 #include <linux/err.h>
12 #include <linux/hrtimer.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/profile.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/module.h>
19 
20 #include "tick-internal.h"
21 
22 /*
23  * Broadcast support for broken x86 hardware, where the local apic
24  * timer stops in C3 state.
25  */
26 
27 static struct tick_device tick_broadcast_device;
28 static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly;
29 static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly;
30 static cpumask_var_t tmpmask __cpumask_var_read_mostly;
31 static int tick_broadcast_forced;
32 
33 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34 
35 #ifdef CONFIG_TICK_ONESHOT
36 static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
37 static void tick_broadcast_clear_oneshot(int cpu);
38 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
39 #else
40 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
41 static inline void tick_broadcast_clear_oneshot(int cpu) { }
42 static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
43 #endif
44 
45 /*
46  * Debugging: see timer_list.c
47  */
48 struct tick_device *tick_get_broadcast_device(void)
49 {
50 	return &tick_broadcast_device;
51 }
52 
53 struct cpumask *tick_get_broadcast_mask(void)
54 {
55 	return tick_broadcast_mask;
56 }
57 
58 /*
59  * Start the device in periodic mode
60  */
61 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
62 {
63 	if (bc)
64 		tick_setup_periodic(bc, 1);
65 }
66 
67 /*
68  * Check, if the device can be utilized as broadcast device:
69  */
70 static bool tick_check_broadcast_device(struct clock_event_device *curdev,
71 					struct clock_event_device *newdev)
72 {
73 	if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
74 	    (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
75 	    (newdev->features & CLOCK_EVT_FEAT_C3STOP))
76 		return false;
77 
78 	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
79 	    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
80 		return false;
81 
82 	return !curdev || newdev->rating > curdev->rating;
83 }
84 
85 /*
86  * Conditionally install/replace broadcast device
87  */
88 void tick_install_broadcast_device(struct clock_event_device *dev)
89 {
90 	struct clock_event_device *cur = tick_broadcast_device.evtdev;
91 
92 	if (!tick_check_broadcast_device(cur, dev))
93 		return;
94 
95 	if (!try_module_get(dev->owner))
96 		return;
97 
98 	clockevents_exchange_device(cur, dev);
99 	if (cur)
100 		cur->event_handler = clockevents_handle_noop;
101 	tick_broadcast_device.evtdev = dev;
102 	if (!cpumask_empty(tick_broadcast_mask))
103 		tick_broadcast_start_periodic(dev);
104 	/*
105 	 * Inform all cpus about this. We might be in a situation
106 	 * where we did not switch to oneshot mode because the per cpu
107 	 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
108 	 * of a oneshot capable broadcast device. Without that
109 	 * notification the systems stays stuck in periodic mode
110 	 * forever.
111 	 */
112 	if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
113 		tick_clock_notify();
114 }
115 
116 /*
117  * Check, if the device is the broadcast device
118  */
119 int tick_is_broadcast_device(struct clock_event_device *dev)
120 {
121 	return (dev && tick_broadcast_device.evtdev == dev);
122 }
123 
124 int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
125 {
126 	int ret = -ENODEV;
127 
128 	if (tick_is_broadcast_device(dev)) {
129 		raw_spin_lock(&tick_broadcast_lock);
130 		ret = __clockevents_update_freq(dev, freq);
131 		raw_spin_unlock(&tick_broadcast_lock);
132 	}
133 	return ret;
134 }
135 
136 
137 static void err_broadcast(const struct cpumask *mask)
138 {
139 	pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
140 }
141 
142 static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
143 {
144 	if (!dev->broadcast)
145 		dev->broadcast = tick_broadcast;
146 	if (!dev->broadcast) {
147 		pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
148 			     dev->name);
149 		dev->broadcast = err_broadcast;
150 	}
151 }
152 
153 /*
154  * Check, if the device is disfunctional and a place holder, which
155  * needs to be handled by the broadcast device.
156  */
157 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
158 {
159 	struct clock_event_device *bc = tick_broadcast_device.evtdev;
160 	unsigned long flags;
161 	int ret = 0;
162 
163 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
164 
165 	/*
166 	 * Devices might be registered with both periodic and oneshot
167 	 * mode disabled. This signals, that the device needs to be
168 	 * operated from the broadcast device and is a placeholder for
169 	 * the cpu local device.
170 	 */
171 	if (!tick_device_is_functional(dev)) {
172 		dev->event_handler = tick_handle_periodic;
173 		tick_device_setup_broadcast_func(dev);
174 		cpumask_set_cpu(cpu, tick_broadcast_mask);
175 		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
176 			tick_broadcast_start_periodic(bc);
177 		else
178 			tick_broadcast_setup_oneshot(bc);
179 		ret = 1;
180 	} else {
181 		/*
182 		 * Clear the broadcast bit for this cpu if the
183 		 * device is not power state affected.
184 		 */
185 		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
186 			cpumask_clear_cpu(cpu, tick_broadcast_mask);
187 		else
188 			tick_device_setup_broadcast_func(dev);
189 
190 		/*
191 		 * Clear the broadcast bit if the CPU is not in
192 		 * periodic broadcast on state.
193 		 */
194 		if (!cpumask_test_cpu(cpu, tick_broadcast_on))
195 			cpumask_clear_cpu(cpu, tick_broadcast_mask);
196 
197 		switch (tick_broadcast_device.mode) {
198 		case TICKDEV_MODE_ONESHOT:
199 			/*
200 			 * If the system is in oneshot mode we can
201 			 * unconditionally clear the oneshot mask bit,
202 			 * because the CPU is running and therefore
203 			 * not in an idle state which causes the power
204 			 * state affected device to stop. Let the
205 			 * caller initialize the device.
206 			 */
207 			tick_broadcast_clear_oneshot(cpu);
208 			ret = 0;
209 			break;
210 
211 		case TICKDEV_MODE_PERIODIC:
212 			/*
213 			 * If the system is in periodic mode, check
214 			 * whether the broadcast device can be
215 			 * switched off now.
216 			 */
217 			if (cpumask_empty(tick_broadcast_mask) && bc)
218 				clockevents_shutdown(bc);
219 			/*
220 			 * If we kept the cpu in the broadcast mask,
221 			 * tell the caller to leave the per cpu device
222 			 * in shutdown state. The periodic interrupt
223 			 * is delivered by the broadcast device, if
224 			 * the broadcast device exists and is not
225 			 * hrtimer based.
226 			 */
227 			if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
228 				ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
229 			break;
230 		default:
231 			break;
232 		}
233 	}
234 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
235 	return ret;
236 }
237 
238 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
239 int tick_receive_broadcast(void)
240 {
241 	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
242 	struct clock_event_device *evt = td->evtdev;
243 
244 	if (!evt)
245 		return -ENODEV;
246 
247 	if (!evt->event_handler)
248 		return -EINVAL;
249 
250 	evt->event_handler(evt);
251 	return 0;
252 }
253 #endif
254 
255 /*
256  * Broadcast the event to the cpus, which are set in the mask (mangled).
257  */
258 static bool tick_do_broadcast(struct cpumask *mask)
259 {
260 	int cpu = smp_processor_id();
261 	struct tick_device *td;
262 	bool local = false;
263 
264 	/*
265 	 * Check, if the current cpu is in the mask
266 	 */
267 	if (cpumask_test_cpu(cpu, mask)) {
268 		struct clock_event_device *bc = tick_broadcast_device.evtdev;
269 
270 		cpumask_clear_cpu(cpu, mask);
271 		/*
272 		 * We only run the local handler, if the broadcast
273 		 * device is not hrtimer based. Otherwise we run into
274 		 * a hrtimer recursion.
275 		 *
276 		 * local timer_interrupt()
277 		 *   local_handler()
278 		 *     expire_hrtimers()
279 		 *       bc_handler()
280 		 *         local_handler()
281 		 *	     expire_hrtimers()
282 		 */
283 		local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
284 	}
285 
286 	if (!cpumask_empty(mask)) {
287 		/*
288 		 * It might be necessary to actually check whether the devices
289 		 * have different broadcast functions. For now, just use the
290 		 * one of the first device. This works as long as we have this
291 		 * misfeature only on x86 (lapic)
292 		 */
293 		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
294 		td->evtdev->broadcast(mask);
295 	}
296 	return local;
297 }
298 
299 /*
300  * Periodic broadcast:
301  * - invoke the broadcast handlers
302  */
303 static bool tick_do_periodic_broadcast(void)
304 {
305 	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
306 	return tick_do_broadcast(tmpmask);
307 }
308 
309 /*
310  * Event handler for periodic broadcast ticks
311  */
312 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
313 {
314 	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
315 	bool bc_local;
316 
317 	raw_spin_lock(&tick_broadcast_lock);
318 
319 	/* Handle spurious interrupts gracefully */
320 	if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
321 		raw_spin_unlock(&tick_broadcast_lock);
322 		return;
323 	}
324 
325 	bc_local = tick_do_periodic_broadcast();
326 
327 	if (clockevent_state_oneshot(dev)) {
328 		ktime_t next = ktime_add(dev->next_event, tick_period);
329 
330 		clockevents_program_event(dev, next, true);
331 	}
332 	raw_spin_unlock(&tick_broadcast_lock);
333 
334 	/*
335 	 * We run the handler of the local cpu after dropping
336 	 * tick_broadcast_lock because the handler might deadlock when
337 	 * trying to switch to oneshot mode.
338 	 */
339 	if (bc_local)
340 		td->evtdev->event_handler(td->evtdev);
341 }
342 
343 /**
344  * tick_broadcast_control - Enable/disable or force broadcast mode
345  * @mode:	The selected broadcast mode
346  *
347  * Called when the system enters a state where affected tick devices
348  * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
349  */
350 void tick_broadcast_control(enum tick_broadcast_mode mode)
351 {
352 	struct clock_event_device *bc, *dev;
353 	struct tick_device *td;
354 	int cpu, bc_stopped;
355 	unsigned long flags;
356 
357 	/* Protects also the local clockevent device. */
358 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
359 	td = this_cpu_ptr(&tick_cpu_device);
360 	dev = td->evtdev;
361 
362 	/*
363 	 * Is the device not affected by the powerstate ?
364 	 */
365 	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
366 		goto out;
367 
368 	if (!tick_device_is_functional(dev))
369 		goto out;
370 
371 	cpu = smp_processor_id();
372 	bc = tick_broadcast_device.evtdev;
373 	bc_stopped = cpumask_empty(tick_broadcast_mask);
374 
375 	switch (mode) {
376 	case TICK_BROADCAST_FORCE:
377 		tick_broadcast_forced = 1;
378 	case TICK_BROADCAST_ON:
379 		cpumask_set_cpu(cpu, tick_broadcast_on);
380 		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
381 			/*
382 			 * Only shutdown the cpu local device, if:
383 			 *
384 			 * - the broadcast device exists
385 			 * - the broadcast device is not a hrtimer based one
386 			 * - the broadcast device is in periodic mode to
387 			 *   avoid a hickup during switch to oneshot mode
388 			 */
389 			if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
390 			    tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
391 				clockevents_shutdown(dev);
392 		}
393 		break;
394 
395 	case TICK_BROADCAST_OFF:
396 		if (tick_broadcast_forced)
397 			break;
398 		cpumask_clear_cpu(cpu, tick_broadcast_on);
399 		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
400 			if (tick_broadcast_device.mode ==
401 			    TICKDEV_MODE_PERIODIC)
402 				tick_setup_periodic(dev, 0);
403 		}
404 		break;
405 	}
406 
407 	if (bc) {
408 		if (cpumask_empty(tick_broadcast_mask)) {
409 			if (!bc_stopped)
410 				clockevents_shutdown(bc);
411 		} else if (bc_stopped) {
412 			if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
413 				tick_broadcast_start_periodic(bc);
414 			else
415 				tick_broadcast_setup_oneshot(bc);
416 		}
417 	}
418 out:
419 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
420 }
421 EXPORT_SYMBOL_GPL(tick_broadcast_control);
422 
423 /*
424  * Set the periodic handler depending on broadcast on/off
425  */
426 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
427 {
428 	if (!broadcast)
429 		dev->event_handler = tick_handle_periodic;
430 	else
431 		dev->event_handler = tick_handle_periodic_broadcast;
432 }
433 
434 #ifdef CONFIG_HOTPLUG_CPU
435 /*
436  * Remove a CPU from broadcasting
437  */
438 void tick_shutdown_broadcast(unsigned int cpu)
439 {
440 	struct clock_event_device *bc;
441 	unsigned long flags;
442 
443 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
444 
445 	bc = tick_broadcast_device.evtdev;
446 	cpumask_clear_cpu(cpu, tick_broadcast_mask);
447 	cpumask_clear_cpu(cpu, tick_broadcast_on);
448 
449 	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
450 		if (bc && cpumask_empty(tick_broadcast_mask))
451 			clockevents_shutdown(bc);
452 	}
453 
454 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
455 }
456 #endif
457 
458 void tick_suspend_broadcast(void)
459 {
460 	struct clock_event_device *bc;
461 	unsigned long flags;
462 
463 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
464 
465 	bc = tick_broadcast_device.evtdev;
466 	if (bc)
467 		clockevents_shutdown(bc);
468 
469 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
470 }
471 
472 /*
473  * This is called from tick_resume_local() on a resuming CPU. That's
474  * called from the core resume function, tick_unfreeze() and the magic XEN
475  * resume hackery.
476  *
477  * In none of these cases the broadcast device mode can change and the
478  * bit of the resuming CPU in the broadcast mask is safe as well.
479  */
480 bool tick_resume_check_broadcast(void)
481 {
482 	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT)
483 		return false;
484 	else
485 		return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask);
486 }
487 
488 void tick_resume_broadcast(void)
489 {
490 	struct clock_event_device *bc;
491 	unsigned long flags;
492 
493 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
494 
495 	bc = tick_broadcast_device.evtdev;
496 
497 	if (bc) {
498 		clockevents_tick_resume(bc);
499 
500 		switch (tick_broadcast_device.mode) {
501 		case TICKDEV_MODE_PERIODIC:
502 			if (!cpumask_empty(tick_broadcast_mask))
503 				tick_broadcast_start_periodic(bc);
504 			break;
505 		case TICKDEV_MODE_ONESHOT:
506 			if (!cpumask_empty(tick_broadcast_mask))
507 				tick_resume_broadcast_oneshot(bc);
508 			break;
509 		}
510 	}
511 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
512 }
513 
514 #ifdef CONFIG_TICK_ONESHOT
515 
516 static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly;
517 static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly;
518 static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly;
519 
520 /*
521  * Exposed for debugging: see timer_list.c
522  */
523 struct cpumask *tick_get_broadcast_oneshot_mask(void)
524 {
525 	return tick_broadcast_oneshot_mask;
526 }
527 
528 /*
529  * Called before going idle with interrupts disabled. Checks whether a
530  * broadcast event from the other core is about to happen. We detected
531  * that in tick_broadcast_oneshot_control(). The callsite can use this
532  * to avoid a deep idle transition as we are about to get the
533  * broadcast IPI right away.
534  */
535 int tick_check_broadcast_expired(void)
536 {
537 	return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
538 }
539 
540 /*
541  * Set broadcast interrupt affinity
542  */
543 static void tick_broadcast_set_affinity(struct clock_event_device *bc,
544 					const struct cpumask *cpumask)
545 {
546 	if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
547 		return;
548 
549 	if (cpumask_equal(bc->cpumask, cpumask))
550 		return;
551 
552 	bc->cpumask = cpumask;
553 	irq_set_affinity(bc->irq, bc->cpumask);
554 }
555 
556 static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
557 				     ktime_t expires)
558 {
559 	if (!clockevent_state_oneshot(bc))
560 		clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
561 
562 	clockevents_program_event(bc, expires, 1);
563 	tick_broadcast_set_affinity(bc, cpumask_of(cpu));
564 }
565 
566 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
567 {
568 	clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
569 }
570 
571 /*
572  * Called from irq_enter() when idle was interrupted to reenable the
573  * per cpu device.
574  */
575 void tick_check_oneshot_broadcast_this_cpu(void)
576 {
577 	if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
578 		struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
579 
580 		/*
581 		 * We might be in the middle of switching over from
582 		 * periodic to oneshot. If the CPU has not yet
583 		 * switched over, leave the device alone.
584 		 */
585 		if (td->mode == TICKDEV_MODE_ONESHOT) {
586 			clockevents_switch_state(td->evtdev,
587 					      CLOCK_EVT_STATE_ONESHOT);
588 		}
589 	}
590 }
591 
592 /*
593  * Handle oneshot mode broadcasting
594  */
595 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
596 {
597 	struct tick_device *td;
598 	ktime_t now, next_event;
599 	int cpu, next_cpu = 0;
600 	bool bc_local;
601 
602 	raw_spin_lock(&tick_broadcast_lock);
603 	dev->next_event = KTIME_MAX;
604 	next_event = KTIME_MAX;
605 	cpumask_clear(tmpmask);
606 	now = ktime_get();
607 	/* Find all expired events */
608 	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
609 		/*
610 		 * Required for !SMP because for_each_cpu() reports
611 		 * unconditionally CPU0 as set on UP kernels.
612 		 */
613 		if (!IS_ENABLED(CONFIG_SMP) &&
614 		    cpumask_empty(tick_broadcast_oneshot_mask))
615 			break;
616 
617 		td = &per_cpu(tick_cpu_device, cpu);
618 		if (td->evtdev->next_event <= now) {
619 			cpumask_set_cpu(cpu, tmpmask);
620 			/*
621 			 * Mark the remote cpu in the pending mask, so
622 			 * it can avoid reprogramming the cpu local
623 			 * timer in tick_broadcast_oneshot_control().
624 			 */
625 			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
626 		} else if (td->evtdev->next_event < next_event) {
627 			next_event = td->evtdev->next_event;
628 			next_cpu = cpu;
629 		}
630 	}
631 
632 	/*
633 	 * Remove the current cpu from the pending mask. The event is
634 	 * delivered immediately in tick_do_broadcast() !
635 	 */
636 	cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
637 
638 	/* Take care of enforced broadcast requests */
639 	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
640 	cpumask_clear(tick_broadcast_force_mask);
641 
642 	/*
643 	 * Sanity check. Catch the case where we try to broadcast to
644 	 * offline cpus.
645 	 */
646 	if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
647 		cpumask_and(tmpmask, tmpmask, cpu_online_mask);
648 
649 	/*
650 	 * Wakeup the cpus which have an expired event.
651 	 */
652 	bc_local = tick_do_broadcast(tmpmask);
653 
654 	/*
655 	 * Two reasons for reprogram:
656 	 *
657 	 * - The global event did not expire any CPU local
658 	 * events. This happens in dyntick mode, as the maximum PIT
659 	 * delta is quite small.
660 	 *
661 	 * - There are pending events on sleeping CPUs which were not
662 	 * in the event mask
663 	 */
664 	if (next_event != KTIME_MAX)
665 		tick_broadcast_set_event(dev, next_cpu, next_event);
666 
667 	raw_spin_unlock(&tick_broadcast_lock);
668 
669 	if (bc_local) {
670 		td = this_cpu_ptr(&tick_cpu_device);
671 		td->evtdev->event_handler(td->evtdev);
672 	}
673 }
674 
675 static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
676 {
677 	if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
678 		return 0;
679 	if (bc->next_event == KTIME_MAX)
680 		return 0;
681 	return bc->bound_on == cpu ? -EBUSY : 0;
682 }
683 
684 static void broadcast_shutdown_local(struct clock_event_device *bc,
685 				     struct clock_event_device *dev)
686 {
687 	/*
688 	 * For hrtimer based broadcasting we cannot shutdown the cpu
689 	 * local device if our own event is the first one to expire or
690 	 * if we own the broadcast timer.
691 	 */
692 	if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
693 		if (broadcast_needs_cpu(bc, smp_processor_id()))
694 			return;
695 		if (dev->next_event < bc->next_event)
696 			return;
697 	}
698 	clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
699 }
700 
701 int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
702 {
703 	struct clock_event_device *bc, *dev;
704 	int cpu, ret = 0;
705 	ktime_t now;
706 
707 	/*
708 	 * If there is no broadcast device, tell the caller not to go
709 	 * into deep idle.
710 	 */
711 	if (!tick_broadcast_device.evtdev)
712 		return -EBUSY;
713 
714 	dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
715 
716 	raw_spin_lock(&tick_broadcast_lock);
717 	bc = tick_broadcast_device.evtdev;
718 	cpu = smp_processor_id();
719 
720 	if (state == TICK_BROADCAST_ENTER) {
721 		/*
722 		 * If the current CPU owns the hrtimer broadcast
723 		 * mechanism, it cannot go deep idle and we do not add
724 		 * the CPU to the broadcast mask. We don't have to go
725 		 * through the EXIT path as the local timer is not
726 		 * shutdown.
727 		 */
728 		ret = broadcast_needs_cpu(bc, cpu);
729 		if (ret)
730 			goto out;
731 
732 		/*
733 		 * If the broadcast device is in periodic mode, we
734 		 * return.
735 		 */
736 		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
737 			/* If it is a hrtimer based broadcast, return busy */
738 			if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
739 				ret = -EBUSY;
740 			goto out;
741 		}
742 
743 		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
744 			WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
745 
746 			/* Conditionally shut down the local timer. */
747 			broadcast_shutdown_local(bc, dev);
748 
749 			/*
750 			 * We only reprogram the broadcast timer if we
751 			 * did not mark ourself in the force mask and
752 			 * if the cpu local event is earlier than the
753 			 * broadcast event. If the current CPU is in
754 			 * the force mask, then we are going to be
755 			 * woken by the IPI right away; we return
756 			 * busy, so the CPU does not try to go deep
757 			 * idle.
758 			 */
759 			if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
760 				ret = -EBUSY;
761 			} else if (dev->next_event < bc->next_event) {
762 				tick_broadcast_set_event(bc, cpu, dev->next_event);
763 				/*
764 				 * In case of hrtimer broadcasts the
765 				 * programming might have moved the
766 				 * timer to this cpu. If yes, remove
767 				 * us from the broadcast mask and
768 				 * return busy.
769 				 */
770 				ret = broadcast_needs_cpu(bc, cpu);
771 				if (ret) {
772 					cpumask_clear_cpu(cpu,
773 						tick_broadcast_oneshot_mask);
774 				}
775 			}
776 		}
777 	} else {
778 		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
779 			clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
780 			/*
781 			 * The cpu which was handling the broadcast
782 			 * timer marked this cpu in the broadcast
783 			 * pending mask and fired the broadcast
784 			 * IPI. So we are going to handle the expired
785 			 * event anyway via the broadcast IPI
786 			 * handler. No need to reprogram the timer
787 			 * with an already expired event.
788 			 */
789 			if (cpumask_test_and_clear_cpu(cpu,
790 				       tick_broadcast_pending_mask))
791 				goto out;
792 
793 			/*
794 			 * Bail out if there is no next event.
795 			 */
796 			if (dev->next_event == KTIME_MAX)
797 				goto out;
798 			/*
799 			 * If the pending bit is not set, then we are
800 			 * either the CPU handling the broadcast
801 			 * interrupt or we got woken by something else.
802 			 *
803 			 * We are not longer in the broadcast mask, so
804 			 * if the cpu local expiry time is already
805 			 * reached, we would reprogram the cpu local
806 			 * timer with an already expired event.
807 			 *
808 			 * This can lead to a ping-pong when we return
809 			 * to idle and therefor rearm the broadcast
810 			 * timer before the cpu local timer was able
811 			 * to fire. This happens because the forced
812 			 * reprogramming makes sure that the event
813 			 * will happen in the future and depending on
814 			 * the min_delta setting this might be far
815 			 * enough out that the ping-pong starts.
816 			 *
817 			 * If the cpu local next_event has expired
818 			 * then we know that the broadcast timer
819 			 * next_event has expired as well and
820 			 * broadcast is about to be handled. So we
821 			 * avoid reprogramming and enforce that the
822 			 * broadcast handler, which did not run yet,
823 			 * will invoke the cpu local handler.
824 			 *
825 			 * We cannot call the handler directly from
826 			 * here, because we might be in a NOHZ phase
827 			 * and we did not go through the irq_enter()
828 			 * nohz fixups.
829 			 */
830 			now = ktime_get();
831 			if (dev->next_event <= now) {
832 				cpumask_set_cpu(cpu, tick_broadcast_force_mask);
833 				goto out;
834 			}
835 			/*
836 			 * We got woken by something else. Reprogram
837 			 * the cpu local timer device.
838 			 */
839 			tick_program_event(dev->next_event, 1);
840 		}
841 	}
842 out:
843 	raw_spin_unlock(&tick_broadcast_lock);
844 	return ret;
845 }
846 
847 /*
848  * Reset the one shot broadcast for a cpu
849  *
850  * Called with tick_broadcast_lock held
851  */
852 static void tick_broadcast_clear_oneshot(int cpu)
853 {
854 	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
855 	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
856 }
857 
858 static void tick_broadcast_init_next_event(struct cpumask *mask,
859 					   ktime_t expires)
860 {
861 	struct tick_device *td;
862 	int cpu;
863 
864 	for_each_cpu(cpu, mask) {
865 		td = &per_cpu(tick_cpu_device, cpu);
866 		if (td->evtdev)
867 			td->evtdev->next_event = expires;
868 	}
869 }
870 
871 /**
872  * tick_broadcast_setup_oneshot - setup the broadcast device
873  */
874 static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
875 {
876 	int cpu = smp_processor_id();
877 
878 	if (!bc)
879 		return;
880 
881 	/* Set it up only once ! */
882 	if (bc->event_handler != tick_handle_oneshot_broadcast) {
883 		int was_periodic = clockevent_state_periodic(bc);
884 
885 		bc->event_handler = tick_handle_oneshot_broadcast;
886 
887 		/*
888 		 * We must be careful here. There might be other CPUs
889 		 * waiting for periodic broadcast. We need to set the
890 		 * oneshot_mask bits for those and program the
891 		 * broadcast device to fire.
892 		 */
893 		cpumask_copy(tmpmask, tick_broadcast_mask);
894 		cpumask_clear_cpu(cpu, tmpmask);
895 		cpumask_or(tick_broadcast_oneshot_mask,
896 			   tick_broadcast_oneshot_mask, tmpmask);
897 
898 		if (was_periodic && !cpumask_empty(tmpmask)) {
899 			clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
900 			tick_broadcast_init_next_event(tmpmask,
901 						       tick_next_period);
902 			tick_broadcast_set_event(bc, cpu, tick_next_period);
903 		} else
904 			bc->next_event = KTIME_MAX;
905 	} else {
906 		/*
907 		 * The first cpu which switches to oneshot mode sets
908 		 * the bit for all other cpus which are in the general
909 		 * (periodic) broadcast mask. So the bit is set and
910 		 * would prevent the first broadcast enter after this
911 		 * to program the bc device.
912 		 */
913 		tick_broadcast_clear_oneshot(cpu);
914 	}
915 }
916 
917 /*
918  * Select oneshot operating mode for the broadcast device
919  */
920 void tick_broadcast_switch_to_oneshot(void)
921 {
922 	struct clock_event_device *bc;
923 	unsigned long flags;
924 
925 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
926 
927 	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
928 	bc = tick_broadcast_device.evtdev;
929 	if (bc)
930 		tick_broadcast_setup_oneshot(bc);
931 
932 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
933 }
934 
935 #ifdef CONFIG_HOTPLUG_CPU
936 void hotplug_cpu__broadcast_tick_pull(int deadcpu)
937 {
938 	struct clock_event_device *bc;
939 	unsigned long flags;
940 
941 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
942 	bc = tick_broadcast_device.evtdev;
943 
944 	if (bc && broadcast_needs_cpu(bc, deadcpu)) {
945 		/* This moves the broadcast assignment to this CPU: */
946 		clockevents_program_event(bc, bc->next_event, 1);
947 	}
948 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
949 }
950 
951 /*
952  * Remove a dead CPU from broadcasting
953  */
954 void tick_shutdown_broadcast_oneshot(unsigned int cpu)
955 {
956 	unsigned long flags;
957 
958 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
959 
960 	/*
961 	 * Clear the broadcast masks for the dead cpu, but do not stop
962 	 * the broadcast device!
963 	 */
964 	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
965 	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
966 	cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
967 
968 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
969 }
970 #endif
971 
972 /*
973  * Check, whether the broadcast device is in one shot mode
974  */
975 int tick_broadcast_oneshot_active(void)
976 {
977 	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
978 }
979 
980 /*
981  * Check whether the broadcast device supports oneshot.
982  */
983 bool tick_broadcast_oneshot_available(void)
984 {
985 	struct clock_event_device *bc = tick_broadcast_device.evtdev;
986 
987 	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
988 }
989 
990 #else
991 int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
992 {
993 	struct clock_event_device *bc = tick_broadcast_device.evtdev;
994 
995 	if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
996 		return -EBUSY;
997 
998 	return 0;
999 }
1000 #endif
1001 
1002 void __init tick_broadcast_init(void)
1003 {
1004 	zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
1005 	zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
1006 	zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
1007 #ifdef CONFIG_TICK_ONESHOT
1008 	zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
1009 	zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
1010 	zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
1011 #endif
1012 }
1013