xref: /openbmc/linux/kernel/time/tick-sched.c (revision 7d7ae873b5e0f46d19e5dc818d1a7809e4b7cc81)
135728b82SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
279bf2bb3SThomas Gleixner /*
379bf2bb3SThomas Gleixner  *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
479bf2bb3SThomas Gleixner  *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
579bf2bb3SThomas Gleixner  *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
679bf2bb3SThomas Gleixner  *
779bf2bb3SThomas Gleixner  *  No idle tick implementation for low and high resolution timers
879bf2bb3SThomas Gleixner  *
979bf2bb3SThomas Gleixner  *  Started by: Thomas Gleixner and Ingo Molnar
1079bf2bb3SThomas Gleixner  */
1179bf2bb3SThomas Gleixner #include <linux/cpu.h>
1279bf2bb3SThomas Gleixner #include <linux/err.h>
1379bf2bb3SThomas Gleixner #include <linux/hrtimer.h>
1479bf2bb3SThomas Gleixner #include <linux/interrupt.h>
1579bf2bb3SThomas Gleixner #include <linux/kernel_stat.h>
1679bf2bb3SThomas Gleixner #include <linux/percpu.h>
1738b8d208SIngo Molnar #include <linux/nmi.h>
1879bf2bb3SThomas Gleixner #include <linux/profile.h>
193f07c014SIngo Molnar #include <linux/sched/signal.h>
20e6017571SIngo Molnar #include <linux/sched/clock.h>
2103441a34SIngo Molnar #include <linux/sched/stat.h>
22370c9135SIngo Molnar #include <linux/sched/nohz.h>
23896b969eSYunfeng Ye #include <linux/sched/loadavg.h>
248083e4adSvenkatesh.pallipadi@intel.com #include <linux/module.h>
2500b42959SFrederic Weisbecker #include <linux/irq_work.h>
269014c45dSFrederic Weisbecker #include <linux/posix-timers.h>
272e709338SFrederic Weisbecker #include <linux/context_tracking.h>
2862cb1188SPeter Zijlstra #include <linux/mm.h>
2979bf2bb3SThomas Gleixner 
309e203bccSDavid S. Miller #include <asm/irq_regs.h>
319e203bccSDavid S. Miller 
3279bf2bb3SThomas Gleixner #include "tick-internal.h"
3379bf2bb3SThomas Gleixner 
34cb41a290SFrederic Weisbecker #include <trace/events/timer.h>
35cb41a290SFrederic Weisbecker 
3679bf2bb3SThomas Gleixner /*
370de7611aSIngo Molnar  * Per-CPU nohz control structure
3879bf2bb3SThomas Gleixner  */
39c1797bafSThomas Gleixner static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
4079bf2bb3SThomas Gleixner 
tick_get_tick_sched(int cpu)41289f480aSIngo Molnar struct tick_sched *tick_get_tick_sched(int cpu)
42289f480aSIngo Molnar {
43289f480aSIngo Molnar 	return &per_cpu(tick_cpu_sched, cpu);
44289f480aSIngo Molnar }
45289f480aSIngo Molnar 
467809998aSArnd Bergmann #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
477809998aSArnd Bergmann /*
48c398960cSThomas Gleixner  * The time, when the last jiffy update happened. Write access must hold
49c398960cSThomas Gleixner  * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a
50c398960cSThomas Gleixner  * consistent view of jiffies and last_jiffies_update.
517809998aSArnd Bergmann  */
527809998aSArnd Bergmann static ktime_t last_jiffies_update;
537809998aSArnd Bergmann 
5479bf2bb3SThomas Gleixner /*
5579bf2bb3SThomas Gleixner  * Must be called with interrupts disabled !
5679bf2bb3SThomas Gleixner  */
tick_do_update_jiffies64(ktime_t now)5779bf2bb3SThomas Gleixner static void tick_do_update_jiffies64(ktime_t now)
5879bf2bb3SThomas Gleixner {
597a35bf2aSThomas Gleixner 	unsigned long ticks = 1;
60aa3b66f4SThomas Gleixner 	ktime_t delta, nextp;
6179bf2bb3SThomas Gleixner 
627a14ce1dSIngo Molnar 	/*
63aa3b66f4SThomas Gleixner 	 * 64bit can do a quick check without holding jiffies lock and
64aa3b66f4SThomas Gleixner 	 * without looking at the sequence count. The smp_load_acquire()
65372acbbaSThomas Gleixner 	 * pairs with the update done later in this function.
66372acbbaSThomas Gleixner 	 *
67aa3b66f4SThomas Gleixner 	 * 32bit cannot do that because the store of tick_next_period
68aa3b66f4SThomas Gleixner 	 * consists of two 32bit stores and the first store could move it
69aa3b66f4SThomas Gleixner 	 * to a random point in the future.
707a14ce1dSIngo Molnar 	 */
71aa3b66f4SThomas Gleixner 	if (IS_ENABLED(CONFIG_64BIT)) {
72aa3b66f4SThomas Gleixner 		if (ktime_before(now, smp_load_acquire(&tick_next_period)))
737a14ce1dSIngo Molnar 			return;
74aa3b66f4SThomas Gleixner 	} else {
75aa3b66f4SThomas Gleixner 		unsigned int seq;
767a14ce1dSIngo Molnar 
77aa3b66f4SThomas Gleixner 		/*
78aa3b66f4SThomas Gleixner 		 * Avoid contention on jiffies_lock and protect the quick
79aa3b66f4SThomas Gleixner 		 * check with the sequence count.
80aa3b66f4SThomas Gleixner 		 */
81aa3b66f4SThomas Gleixner 		do {
82aa3b66f4SThomas Gleixner 			seq = read_seqcount_begin(&jiffies_seq);
83aa3b66f4SThomas Gleixner 			nextp = tick_next_period;
84aa3b66f4SThomas Gleixner 		} while (read_seqcount_retry(&jiffies_seq, seq));
85aa3b66f4SThomas Gleixner 
86aa3b66f4SThomas Gleixner 		if (ktime_before(now, nextp))
87aa3b66f4SThomas Gleixner 			return;
88aa3b66f4SThomas Gleixner 	}
89aa3b66f4SThomas Gleixner 
90aa3b66f4SThomas Gleixner 	/* Quick check failed, i.e. update is required. */
91e5d4d175SThomas Gleixner 	raw_spin_lock(&jiffies_lock);
92aa3b66f4SThomas Gleixner 	/*
93aa3b66f4SThomas Gleixner 	 * Reevaluate with the lock held. Another CPU might have done the
94aa3b66f4SThomas Gleixner 	 * update already.
95aa3b66f4SThomas Gleixner 	 */
9694ad2e3cSYunfeng Ye 	if (ktime_before(now, tick_next_period)) {
9794ad2e3cSYunfeng Ye 		raw_spin_unlock(&jiffies_lock);
9894ad2e3cSYunfeng Ye 		return;
9994ad2e3cSYunfeng Ye 	}
10094ad2e3cSYunfeng Ye 
101e5d4d175SThomas Gleixner 	write_seqcount_begin(&jiffies_seq);
10279bf2bb3SThomas Gleixner 
10394ad2e3cSYunfeng Ye 	delta = ktime_sub(now, tick_next_period);
104b9965449SThomas Gleixner 	if (unlikely(delta >= TICK_NSEC)) {
10594ad2e3cSYunfeng Ye 		/* Slow path for long idle sleep times */
106b9965449SThomas Gleixner 		s64 incr = TICK_NSEC;
10779bf2bb3SThomas Gleixner 
1087a35bf2aSThomas Gleixner 		ticks += ktime_divns(delta, incr);
10979bf2bb3SThomas Gleixner 
110372acbbaSThomas Gleixner 		last_jiffies_update = ktime_add_ns(last_jiffies_update,
111372acbbaSThomas Gleixner 						   incr * ticks);
1127a35bf2aSThomas Gleixner 	} else {
113b9965449SThomas Gleixner 		last_jiffies_update = ktime_add_ns(last_jiffies_update,
114b9965449SThomas Gleixner 						   TICK_NSEC);
11579bf2bb3SThomas Gleixner 	}
11694ad2e3cSYunfeng Ye 
117896b969eSYunfeng Ye 	/* Advance jiffies to complete the jiffies_seq protected job */
118896b969eSYunfeng Ye 	jiffies_64 += ticks;
11949d670fbSThomas Gleixner 
120372acbbaSThomas Gleixner 	/*
121aa3b66f4SThomas Gleixner 	 * Keep the tick_next_period variable up to date.
122372acbbaSThomas Gleixner 	 */
123aa3b66f4SThomas Gleixner 	nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC);
124aa3b66f4SThomas Gleixner 
125aa3b66f4SThomas Gleixner 	if (IS_ENABLED(CONFIG_64BIT)) {
126aa3b66f4SThomas Gleixner 		/*
127aa3b66f4SThomas Gleixner 		 * Pairs with smp_load_acquire() in the lockless quick
128aa3b66f4SThomas Gleixner 		 * check above and ensures that the update to jiffies_64 is
129aa3b66f4SThomas Gleixner 		 * not reordered vs. the store to tick_next_period, neither
130aa3b66f4SThomas Gleixner 		 * by the compiler nor by the CPU.
131aa3b66f4SThomas Gleixner 		 */
132aa3b66f4SThomas Gleixner 		smp_store_release(&tick_next_period, nextp);
133aa3b66f4SThomas Gleixner 	} else {
134aa3b66f4SThomas Gleixner 		/*
135aa3b66f4SThomas Gleixner 		 * A plain store is good enough on 32bit as the quick check
136aa3b66f4SThomas Gleixner 		 * above is protected by the sequence count.
137aa3b66f4SThomas Gleixner 		 */
138aa3b66f4SThomas Gleixner 		tick_next_period = nextp;
139aa3b66f4SThomas Gleixner 	}
14094ad2e3cSYunfeng Ye 
141896b969eSYunfeng Ye 	/*
142896b969eSYunfeng Ye 	 * Release the sequence count. calc_global_load() below is not
143896b969eSYunfeng Ye 	 * protected by it, but jiffies_lock needs to be held to prevent
144896b969eSYunfeng Ye 	 * concurrent invocations.
145896b969eSYunfeng Ye 	 */
146e5d4d175SThomas Gleixner 	write_seqcount_end(&jiffies_seq);
147896b969eSYunfeng Ye 
148896b969eSYunfeng Ye 	calc_global_load();
149896b969eSYunfeng Ye 
150e5d4d175SThomas Gleixner 	raw_spin_unlock(&jiffies_lock);
15147a1b796SJohn Stultz 	update_wall_time();
15279bf2bb3SThomas Gleixner }
15379bf2bb3SThomas Gleixner 
15479bf2bb3SThomas Gleixner /*
15579bf2bb3SThomas Gleixner  * Initialize and return retrieve the jiffies update.
15679bf2bb3SThomas Gleixner  */
tick_init_jiffy_update(void)15779bf2bb3SThomas Gleixner static ktime_t tick_init_jiffy_update(void)
15879bf2bb3SThomas Gleixner {
15979bf2bb3SThomas Gleixner 	ktime_t period;
16079bf2bb3SThomas Gleixner 
161e5d4d175SThomas Gleixner 	raw_spin_lock(&jiffies_lock);
162e5d4d175SThomas Gleixner 	write_seqcount_begin(&jiffies_seq);
16379bf2bb3SThomas Gleixner 	/* Did we start the jiffies update yet ? */
16413bb06f8SThomas Gleixner 	if (last_jiffies_update == 0) {
16513bb06f8SThomas Gleixner 		u32 rem;
16613bb06f8SThomas Gleixner 
16713bb06f8SThomas Gleixner 		/*
16813bb06f8SThomas Gleixner 		 * Ensure that the tick is aligned to a multiple of
16913bb06f8SThomas Gleixner 		 * TICK_NSEC.
17013bb06f8SThomas Gleixner 		 */
17113bb06f8SThomas Gleixner 		div_u64_rem(tick_next_period, TICK_NSEC, &rem);
17213bb06f8SThomas Gleixner 		if (rem)
17313bb06f8SThomas Gleixner 			tick_next_period += TICK_NSEC - rem;
17413bb06f8SThomas Gleixner 
17579bf2bb3SThomas Gleixner 		last_jiffies_update = tick_next_period;
17613bb06f8SThomas Gleixner 	}
17779bf2bb3SThomas Gleixner 	period = last_jiffies_update;
178e5d4d175SThomas Gleixner 	write_seqcount_end(&jiffies_seq);
179e5d4d175SThomas Gleixner 	raw_spin_unlock(&jiffies_lock);
18079bf2bb3SThomas Gleixner 	return period;
18179bf2bb3SThomas Gleixner }
18279bf2bb3SThomas Gleixner 
183a1ff03cdSFrederic Weisbecker #define MAX_STALLED_JIFFIES 5
184a1ff03cdSFrederic Weisbecker 
tick_sched_do_timer(struct tick_sched * ts,ktime_t now)185ff7de620SRafael J. Wysocki static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
1865bb96226SFrederic Weisbecker {
1875bb96226SFrederic Weisbecker 	int cpu = smp_processor_id();
1885bb96226SFrederic Weisbecker 
1893451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
1905bb96226SFrederic Weisbecker 	/*
1915bb96226SFrederic Weisbecker 	 * Check if the do_timer duty was dropped. We don't care about
1920de7611aSIngo Molnar 	 * concurrency: This happens only when the CPU in charge went
1930de7611aSIngo Molnar 	 * into a long sleep. If two CPUs happen to assign themselves to
1945bb96226SFrederic Weisbecker 	 * this duty, then the jiffies update is still serialized by
1959c3f9e28SThomas Gleixner 	 * jiffies_lock.
19608ae95f4SNicholas Piggin 	 *
19708ae95f4SNicholas Piggin 	 * If nohz_full is enabled, this should not happen because the
19808ae95f4SNicholas Piggin 	 * tick_do_timer_cpu never relinquishes.
1995bb96226SFrederic Weisbecker 	 */
20008ae95f4SNicholas Piggin 	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
20108ae95f4SNicholas Piggin #ifdef CONFIG_NO_HZ_FULL
20240e97e42SPaul Gortmaker 		WARN_ON_ONCE(tick_nohz_full_running);
20308ae95f4SNicholas Piggin #endif
2045bb96226SFrederic Weisbecker 		tick_do_timer_cpu = cpu;
20508ae95f4SNicholas Piggin 	}
2065bb96226SFrederic Weisbecker #endif
2075bb96226SFrederic Weisbecker 
2085bb96226SFrederic Weisbecker 	/* Check, if the jiffies need an update */
2095bb96226SFrederic Weisbecker 	if (tick_do_timer_cpu == cpu)
2105bb96226SFrederic Weisbecker 		tick_do_update_jiffies64(now);
211ff7de620SRafael J. Wysocki 
212a1ff03cdSFrederic Weisbecker 	/*
213a1ff03cdSFrederic Weisbecker 	 * If jiffies update stalled for too long (timekeeper in stop_machine()
214a1ff03cdSFrederic Weisbecker 	 * or VMEXIT'ed for several msecs), force an update.
215a1ff03cdSFrederic Weisbecker 	 */
216a1ff03cdSFrederic Weisbecker 	if (ts->last_tick_jiffies != jiffies) {
217a1ff03cdSFrederic Weisbecker 		ts->stalled_jiffies = 0;
218a1ff03cdSFrederic Weisbecker 		ts->last_tick_jiffies = READ_ONCE(jiffies);
219a1ff03cdSFrederic Weisbecker 	} else {
220a1ff03cdSFrederic Weisbecker 		if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) {
221a1ff03cdSFrederic Weisbecker 			tick_do_update_jiffies64(now);
222a1ff03cdSFrederic Weisbecker 			ts->stalled_jiffies = 0;
223a1ff03cdSFrederic Weisbecker 			ts->last_tick_jiffies = READ_ONCE(jiffies);
224a1ff03cdSFrederic Weisbecker 		}
225a1ff03cdSFrederic Weisbecker 	}
226a1ff03cdSFrederic Weisbecker 
227ff7de620SRafael J. Wysocki 	if (ts->inidle)
228ff7de620SRafael J. Wysocki 		ts->got_idle_tick = 1;
2295bb96226SFrederic Weisbecker }
2305bb96226SFrederic Weisbecker 
tick_sched_handle(struct tick_sched * ts,struct pt_regs * regs)2319e8f559bSFrederic Weisbecker static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
2329e8f559bSFrederic Weisbecker {
2333451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
2349e8f559bSFrederic Weisbecker 	/*
2359e8f559bSFrederic Weisbecker 	 * When we are idle and the tick is stopped, we have to touch
2369e8f559bSFrederic Weisbecker 	 * the watchdog as we might not schedule for a really long
2379e8f559bSFrederic Weisbecker 	 * time. This happens on complete idle SMP systems while
2389e8f559bSFrederic Weisbecker 	 * waiting on the login prompt. We also increment the "start of
2399e8f559bSFrederic Weisbecker 	 * idle" jiffy stamp so the idle accounting adjustment we do
2409e8f559bSFrederic Weisbecker 	 * when we go busy again does not account too much ticks.
2419e8f559bSFrederic Weisbecker 	 */
2429e8f559bSFrederic Weisbecker 	if (ts->tick_stopped) {
24303e0d461STejun Heo 		touch_softlockup_watchdog_sched();
2449e8f559bSFrederic Weisbecker 		if (is_idle_task(current))
2459e8f559bSFrederic Weisbecker 			ts->idle_jiffies++;
246411fe24eSFrederic Weisbecker 		/*
247411fe24eSFrederic Weisbecker 		 * In case the current tick fired too early past its expected
248411fe24eSFrederic Weisbecker 		 * expiration, make sure we don't bypass the next clock reprogramming
249411fe24eSFrederic Weisbecker 		 * to the same deadline.
250411fe24eSFrederic Weisbecker 		 */
251411fe24eSFrederic Weisbecker 		ts->next_tick = 0;
2529e8f559bSFrederic Weisbecker 	}
25394a57140SFrederic Weisbecker #endif
2549e8f559bSFrederic Weisbecker 	update_process_times(user_mode(regs));
2559e8f559bSFrederic Weisbecker 	profile_tick(CPU_PROFILING);
2569e8f559bSFrederic Weisbecker }
2577809998aSArnd Bergmann #endif
2589e8f559bSFrederic Weisbecker 
259c5bfece2SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
260460775dfSFrederic Weisbecker cpumask_var_t tick_nohz_full_mask;
261f268c373SFrederic Weisbecker EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
26273867dcdSFrederic Weisbecker bool tick_nohz_full_running;
263ae9e557bSPaul E. McKenney EXPORT_SYMBOL_GPL(tick_nohz_full_running);
264f009a7a7SFrederic Weisbecker static atomic_t tick_dep_mask;
265a831881bSFrederic Weisbecker 
check_tick_dependency(atomic_t * dep)266f009a7a7SFrederic Weisbecker static bool check_tick_dependency(atomic_t *dep)
267d027d45dSFrederic Weisbecker {
268f009a7a7SFrederic Weisbecker 	int val = atomic_read(dep);
269f009a7a7SFrederic Weisbecker 
270f009a7a7SFrederic Weisbecker 	if (val & TICK_DEP_MASK_POSIX_TIMER) {
271e6e6cc22SFrederic Weisbecker 		trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
272f009a7a7SFrederic Weisbecker 		return true;
273d027d45dSFrederic Weisbecker 	}
274d027d45dSFrederic Weisbecker 
275f009a7a7SFrederic Weisbecker 	if (val & TICK_DEP_MASK_PERF_EVENTS) {
276e6e6cc22SFrederic Weisbecker 		trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
277f009a7a7SFrederic Weisbecker 		return true;
278d027d45dSFrederic Weisbecker 	}
279d027d45dSFrederic Weisbecker 
280f009a7a7SFrederic Weisbecker 	if (val & TICK_DEP_MASK_SCHED) {
281e6e6cc22SFrederic Weisbecker 		trace_tick_stop(0, TICK_DEP_MASK_SCHED);
282f009a7a7SFrederic Weisbecker 		return true;
283d027d45dSFrederic Weisbecker 	}
284d027d45dSFrederic Weisbecker 
285f009a7a7SFrederic Weisbecker 	if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
286e6e6cc22SFrederic Weisbecker 		trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
287f009a7a7SFrederic Weisbecker 		return true;
288f009a7a7SFrederic Weisbecker 	}
289f009a7a7SFrederic Weisbecker 
29001b4c399SFrederic Weisbecker 	if (val & TICK_DEP_MASK_RCU) {
29101b4c399SFrederic Weisbecker 		trace_tick_stop(0, TICK_DEP_MASK_RCU);
29201b4c399SFrederic Weisbecker 		return true;
29301b4c399SFrederic Weisbecker 	}
29401b4c399SFrederic Weisbecker 
295db7b464dSZqiang 	if (val & TICK_DEP_MASK_RCU_EXP) {
296db7b464dSZqiang 		trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP);
297db7b464dSZqiang 		return true;
298db7b464dSZqiang 	}
299db7b464dSZqiang 
300f009a7a7SFrederic Weisbecker 	return false;
301d027d45dSFrederic Weisbecker }
302d027d45dSFrederic Weisbecker 
can_stop_full_tick(int cpu,struct tick_sched * ts)30357ccdf44SWanpeng Li static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
3049014c45dSFrederic Weisbecker {
305ebf3adbaSFrederic Weisbecker 	lockdep_assert_irqs_disabled();
3069014c45dSFrederic Weisbecker 
30757ccdf44SWanpeng Li 	if (unlikely(!cpu_online(cpu)))
30857ccdf44SWanpeng Li 		return false;
30957ccdf44SWanpeng Li 
310f009a7a7SFrederic Weisbecker 	if (check_tick_dependency(&tick_dep_mask))
311d027d45dSFrederic Weisbecker 		return false;
312d027d45dSFrederic Weisbecker 
313f009a7a7SFrederic Weisbecker 	if (check_tick_dependency(&ts->tick_dep_mask))
314d027d45dSFrederic Weisbecker 		return false;
315d027d45dSFrederic Weisbecker 
316f009a7a7SFrederic Weisbecker 	if (check_tick_dependency(&current->tick_dep_mask))
317d027d45dSFrederic Weisbecker 		return false;
318d027d45dSFrederic Weisbecker 
319f009a7a7SFrederic Weisbecker 	if (check_tick_dependency(&current->signal->tick_dep_mask))
320d027d45dSFrederic Weisbecker 		return false;
321d027d45dSFrederic Weisbecker 
3229014c45dSFrederic Weisbecker 	return true;
3239014c45dSFrederic Weisbecker }
3249014c45dSFrederic Weisbecker 
nohz_full_kick_func(struct irq_work * work)325d027d45dSFrederic Weisbecker static void nohz_full_kick_func(struct irq_work *work)
32676c24fb0SFrederic Weisbecker {
32773738a95SFrederic Weisbecker 	/* Empty, the tick restart happens on tick_nohz_irq_exit() */
32876c24fb0SFrederic Weisbecker }
32976c24fb0SFrederic Weisbecker 
3307a9f50a0SPeter Zijlstra static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) =
3317a9f50a0SPeter Zijlstra 	IRQ_WORK_INIT_HARD(nohz_full_kick_func);
33276c24fb0SFrederic Weisbecker 
33376c24fb0SFrederic Weisbecker /*
33440bea039SFrederic Weisbecker  * Kick this CPU if it's full dynticks in order to force it to
33540bea039SFrederic Weisbecker  * re-evaluate its dependency on the tick and restart it if necessary.
33640bea039SFrederic Weisbecker  * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
33740bea039SFrederic Weisbecker  * is NMI safe.
33840bea039SFrederic Weisbecker  */
tick_nohz_full_kick(void)339555e0c1eSFrederic Weisbecker static void tick_nohz_full_kick(void)
34040bea039SFrederic Weisbecker {
34140bea039SFrederic Weisbecker 	if (!tick_nohz_full_cpu(smp_processor_id()))
34240bea039SFrederic Weisbecker 		return;
34340bea039SFrederic Weisbecker 
34456e4dea8SChristoph Lameter 	irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
34540bea039SFrederic Weisbecker }
34640bea039SFrederic Weisbecker 
34740bea039SFrederic Weisbecker /*
3483d36aebcSFrederic Weisbecker  * Kick the CPU if it's full dynticks in order to force it to
34976c24fb0SFrederic Weisbecker  * re-evaluate its dependency on the tick and restart it if necessary.
35076c24fb0SFrederic Weisbecker  */
tick_nohz_full_kick_cpu(int cpu)3513d36aebcSFrederic Weisbecker void tick_nohz_full_kick_cpu(int cpu)
35276c24fb0SFrederic Weisbecker {
3533d36aebcSFrederic Weisbecker 	if (!tick_nohz_full_cpu(cpu))
3543d36aebcSFrederic Weisbecker 		return;
3553d36aebcSFrederic Weisbecker 
3563d36aebcSFrederic Weisbecker 	irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
35776c24fb0SFrederic Weisbecker }
35876c24fb0SFrederic Weisbecker 
tick_nohz_kick_task(struct task_struct * tsk)35929721b85SFrederic Weisbecker static void tick_nohz_kick_task(struct task_struct *tsk)
36029721b85SFrederic Weisbecker {
361a1dfb631SMarcelo Tosatti 	int cpu;
362a1dfb631SMarcelo Tosatti 
363a1dfb631SMarcelo Tosatti 	/*
364a1dfb631SMarcelo Tosatti 	 * If the task is not running, run_posix_cpu_timers()
365a1dfb631SMarcelo Tosatti 	 * has nothing to elapse, IPI can then be spared.
366a1dfb631SMarcelo Tosatti 	 *
367a1dfb631SMarcelo Tosatti 	 * activate_task()                      STORE p->tick_dep_mask
368a1dfb631SMarcelo Tosatti 	 *   STORE p->on_rq
369a1dfb631SMarcelo Tosatti 	 * __schedule() (switch to task 'p')    smp_mb() (atomic_fetch_or())
370a1dfb631SMarcelo Tosatti 	 *   LOCK rq->lock                      LOAD p->on_rq
371a1dfb631SMarcelo Tosatti 	 *   smp_mb__after_spin_lock()
372a1dfb631SMarcelo Tosatti 	 *   tick_nohz_task_switch()
373a1dfb631SMarcelo Tosatti 	 *     LOAD p->tick_dep_mask
374a1dfb631SMarcelo Tosatti 	 */
375a1dfb631SMarcelo Tosatti 	if (!sched_task_on_rq(tsk))
376a1dfb631SMarcelo Tosatti 		return;
37729721b85SFrederic Weisbecker 
37829721b85SFrederic Weisbecker 	/*
37929721b85SFrederic Weisbecker 	 * If the task concurrently migrates to another CPU,
38029721b85SFrederic Weisbecker 	 * we guarantee it sees the new tick dependency upon
38129721b85SFrederic Weisbecker 	 * schedule.
38229721b85SFrederic Weisbecker 	 *
38329721b85SFrederic Weisbecker 	 * set_task_cpu(p, cpu);
38429721b85SFrederic Weisbecker 	 *   STORE p->cpu = @cpu
38529721b85SFrederic Weisbecker 	 * __schedule() (switch to task 'p')
38629721b85SFrederic Weisbecker 	 *   LOCK rq->lock
38729721b85SFrederic Weisbecker 	 *   smp_mb__after_spin_lock()          STORE p->tick_dep_mask
38829721b85SFrederic Weisbecker 	 *   tick_nohz_task_switch()            smp_mb() (atomic_fetch_or())
38929721b85SFrederic Weisbecker 	 *      LOAD p->tick_dep_mask           LOAD p->cpu
39029721b85SFrederic Weisbecker 	 */
391a1dfb631SMarcelo Tosatti 	cpu = task_cpu(tsk);
39229721b85SFrederic Weisbecker 
39329721b85SFrederic Weisbecker 	preempt_disable();
39429721b85SFrederic Weisbecker 	if (cpu_online(cpu))
39529721b85SFrederic Weisbecker 		tick_nohz_full_kick_cpu(cpu);
39629721b85SFrederic Weisbecker 	preempt_enable();
39729721b85SFrederic Weisbecker }
39829721b85SFrederic Weisbecker 
39976c24fb0SFrederic Weisbecker /*
40076c24fb0SFrederic Weisbecker  * Kick all full dynticks CPUs in order to force these to re-evaluate
40176c24fb0SFrederic Weisbecker  * their dependency on the tick and restart it if necessary.
40276c24fb0SFrederic Weisbecker  */
tick_nohz_full_kick_all(void)403b7878300SFrederic Weisbecker static void tick_nohz_full_kick_all(void)
40476c24fb0SFrederic Weisbecker {
4058537bb95SFrederic Weisbecker 	int cpu;
4068537bb95SFrederic Weisbecker 
40773867dcdSFrederic Weisbecker 	if (!tick_nohz_full_running)
40876c24fb0SFrederic Weisbecker 		return;
40976c24fb0SFrederic Weisbecker 
41076c24fb0SFrederic Weisbecker 	preempt_disable();
4118537bb95SFrederic Weisbecker 	for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
4128537bb95SFrederic Weisbecker 		tick_nohz_full_kick_cpu(cpu);
41376c24fb0SFrederic Weisbecker 	preempt_enable();
41476c24fb0SFrederic Weisbecker }
41576c24fb0SFrederic Weisbecker 
tick_nohz_dep_set_all(atomic_t * dep,enum tick_dep_bits bit)416f009a7a7SFrederic Weisbecker static void tick_nohz_dep_set_all(atomic_t *dep,
417d027d45dSFrederic Weisbecker 				  enum tick_dep_bits bit)
418d027d45dSFrederic Weisbecker {
419f009a7a7SFrederic Weisbecker 	int prev;
420d027d45dSFrederic Weisbecker 
421a1cc5bcfSPeter Zijlstra 	prev = atomic_fetch_or(BIT(bit), dep);
422d027d45dSFrederic Weisbecker 	if (!prev)
423d027d45dSFrederic Weisbecker 		tick_nohz_full_kick_all();
424d027d45dSFrederic Weisbecker }
425d027d45dSFrederic Weisbecker 
426d027d45dSFrederic Weisbecker /*
427d027d45dSFrederic Weisbecker  * Set a global tick dependency. Used by perf events that rely on freq and
428d027d45dSFrederic Weisbecker  * by unstable clock.
429d027d45dSFrederic Weisbecker  */
tick_nohz_dep_set(enum tick_dep_bits bit)430d027d45dSFrederic Weisbecker void tick_nohz_dep_set(enum tick_dep_bits bit)
431d027d45dSFrederic Weisbecker {
432d027d45dSFrederic Weisbecker 	tick_nohz_dep_set_all(&tick_dep_mask, bit);
433d027d45dSFrederic Weisbecker }
434d027d45dSFrederic Weisbecker 
tick_nohz_dep_clear(enum tick_dep_bits bit)435d027d45dSFrederic Weisbecker void tick_nohz_dep_clear(enum tick_dep_bits bit)
436d027d45dSFrederic Weisbecker {
437f009a7a7SFrederic Weisbecker 	atomic_andnot(BIT(bit), &tick_dep_mask);
438d027d45dSFrederic Weisbecker }
439d027d45dSFrederic Weisbecker 
440d027d45dSFrederic Weisbecker /*
441d027d45dSFrederic Weisbecker  * Set per-CPU tick dependency. Used by scheduler and perf events in order to
442d027d45dSFrederic Weisbecker  * manage events throttling.
443d027d45dSFrederic Weisbecker  */
tick_nohz_dep_set_cpu(int cpu,enum tick_dep_bits bit)444d027d45dSFrederic Weisbecker void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
445d027d45dSFrederic Weisbecker {
446f009a7a7SFrederic Weisbecker 	int prev;
447d027d45dSFrederic Weisbecker 	struct tick_sched *ts;
448d027d45dSFrederic Weisbecker 
449d027d45dSFrederic Weisbecker 	ts = per_cpu_ptr(&tick_cpu_sched, cpu);
450d027d45dSFrederic Weisbecker 
451a1cc5bcfSPeter Zijlstra 	prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
452d027d45dSFrederic Weisbecker 	if (!prev) {
453d027d45dSFrederic Weisbecker 		preempt_disable();
454d027d45dSFrederic Weisbecker 		/* Perf needs local kick that is NMI safe */
455d027d45dSFrederic Weisbecker 		if (cpu == smp_processor_id()) {
456d027d45dSFrederic Weisbecker 			tick_nohz_full_kick();
457d027d45dSFrederic Weisbecker 		} else {
458d027d45dSFrederic Weisbecker 			/* Remote irq work not NMI-safe */
459d027d45dSFrederic Weisbecker 			if (!WARN_ON_ONCE(in_nmi()))
460d027d45dSFrederic Weisbecker 				tick_nohz_full_kick_cpu(cpu);
461d027d45dSFrederic Weisbecker 		}
462d027d45dSFrederic Weisbecker 		preempt_enable();
463d027d45dSFrederic Weisbecker 	}
464d027d45dSFrederic Weisbecker }
46501b4c399SFrederic Weisbecker EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu);
466d027d45dSFrederic Weisbecker 
tick_nohz_dep_clear_cpu(int cpu,enum tick_dep_bits bit)467d027d45dSFrederic Weisbecker void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
468d027d45dSFrederic Weisbecker {
469d027d45dSFrederic Weisbecker 	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
470d027d45dSFrederic Weisbecker 
471f009a7a7SFrederic Weisbecker 	atomic_andnot(BIT(bit), &ts->tick_dep_mask);
472d027d45dSFrederic Weisbecker }
47301b4c399SFrederic Weisbecker EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
474d027d45dSFrederic Weisbecker 
475d027d45dSFrederic Weisbecker /*
4763c8920e2SFrederic Weisbecker  * Set a per-task tick dependency. RCU need this. Also posix CPU timers
4773c8920e2SFrederic Weisbecker  * in order to elapse per task timers.
478d027d45dSFrederic Weisbecker  */
tick_nohz_dep_set_task(struct task_struct * tsk,enum tick_dep_bits bit)479d027d45dSFrederic Weisbecker void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
480d027d45dSFrederic Weisbecker {
48129721b85SFrederic Weisbecker 	if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask))
48229721b85SFrederic Weisbecker 		tick_nohz_kick_task(tsk);
483d027d45dSFrederic Weisbecker }
484ae9e557bSPaul E. McKenney EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task);
485d027d45dSFrederic Weisbecker 
tick_nohz_dep_clear_task(struct task_struct * tsk,enum tick_dep_bits bit)486d027d45dSFrederic Weisbecker void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
487d027d45dSFrederic Weisbecker {
488f009a7a7SFrederic Weisbecker 	atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
489d027d45dSFrederic Weisbecker }
490ae9e557bSPaul E. McKenney EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task);
491d027d45dSFrederic Weisbecker 
492d027d45dSFrederic Weisbecker /*
493d027d45dSFrederic Weisbecker  * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
494d027d45dSFrederic Weisbecker  * per process timers.
495d027d45dSFrederic Weisbecker  */
tick_nohz_dep_set_signal(struct task_struct * tsk,enum tick_dep_bits bit)4961e4ca26dSMarcelo Tosatti void tick_nohz_dep_set_signal(struct task_struct *tsk,
4971e4ca26dSMarcelo Tosatti 			      enum tick_dep_bits bit)
498d027d45dSFrederic Weisbecker {
4991e4ca26dSMarcelo Tosatti 	int prev;
5001e4ca26dSMarcelo Tosatti 	struct signal_struct *sig = tsk->signal;
5011e4ca26dSMarcelo Tosatti 
5021e4ca26dSMarcelo Tosatti 	prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask);
5031e4ca26dSMarcelo Tosatti 	if (!prev) {
5041e4ca26dSMarcelo Tosatti 		struct task_struct *t;
5051e4ca26dSMarcelo Tosatti 
5061e4ca26dSMarcelo Tosatti 		lockdep_assert_held(&tsk->sighand->siglock);
5071e4ca26dSMarcelo Tosatti 		__for_each_thread(sig, t)
5081e4ca26dSMarcelo Tosatti 			tick_nohz_kick_task(t);
5091e4ca26dSMarcelo Tosatti 	}
510d027d45dSFrederic Weisbecker }
511d027d45dSFrederic Weisbecker 
tick_nohz_dep_clear_signal(struct signal_struct * sig,enum tick_dep_bits bit)512d027d45dSFrederic Weisbecker void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
513d027d45dSFrederic Weisbecker {
514f009a7a7SFrederic Weisbecker 	atomic_andnot(BIT(bit), &sig->tick_dep_mask);
515d027d45dSFrederic Weisbecker }
516d027d45dSFrederic Weisbecker 
51799e5ada9SFrederic Weisbecker /*
51899e5ada9SFrederic Weisbecker  * Re-evaluate the need for the tick as we switch the current task.
51999e5ada9SFrederic Weisbecker  * It might need the tick due to per task/process properties:
5200de7611aSIngo Molnar  * perf events, posix CPU timers, ...
52199e5ada9SFrederic Weisbecker  */
__tick_nohz_task_switch(void)522de734f89SFrederic Weisbecker void __tick_nohz_task_switch(void)
52399e5ada9SFrederic Weisbecker {
524d027d45dSFrederic Weisbecker 	struct tick_sched *ts;
52599e5ada9SFrederic Weisbecker 
5266296ace4SLi Zhong 	if (!tick_nohz_full_cpu(smp_processor_id()))
5270fdcccfaSPeter Zijlstra 		return;
5286296ace4SLi Zhong 
529d027d45dSFrederic Weisbecker 	ts = this_cpu_ptr(&tick_cpu_sched);
53099e5ada9SFrederic Weisbecker 
531d027d45dSFrederic Weisbecker 	if (ts->tick_stopped) {
532f009a7a7SFrederic Weisbecker 		if (atomic_read(&current->tick_dep_mask) ||
533f009a7a7SFrederic Weisbecker 		    atomic_read(&current->signal->tick_dep_mask))
534d027d45dSFrederic Weisbecker 			tick_nohz_full_kick();
535d027d45dSFrederic Weisbecker 	}
53699e5ada9SFrederic Weisbecker }
53799e5ada9SFrederic Weisbecker 
5386f1982feSFrederic Weisbecker /* Get the boot-time nohz CPU list from the kernel parameters. */
tick_nohz_full_setup(cpumask_var_t cpumask)5396f1982feSFrederic Weisbecker void __init tick_nohz_full_setup(cpumask_var_t cpumask)
540a831881bSFrederic Weisbecker {
54173867dcdSFrederic Weisbecker 	alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
5426f1982feSFrederic Weisbecker 	cpumask_copy(tick_nohz_full_mask, cpumask);
54373867dcdSFrederic Weisbecker 	tick_nohz_full_running = true;
544a831881bSFrederic Weisbecker }
545a831881bSFrederic Weisbecker 
tick_nohz_cpu_hotpluggable(unsigned int cpu)54658d76682SJoel Fernandes (Google) bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
547a382bf93SFrederic Weisbecker {
548a382bf93SFrederic Weisbecker 	/*
54908ae95f4SNicholas Piggin 	 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound
55008ae95f4SNicholas Piggin 	 * timers, workqueues, timekeeping, ...) on behalf of full dynticks
5517c8bb6cbSFrederic Weisbecker 	 * CPUs. It must remain online when nohz full is enabled.
552a382bf93SFrederic Weisbecker 	 */
55373867dcdSFrederic Weisbecker 	if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
55458d76682SJoel Fernandes (Google) 		return false;
55558d76682SJoel Fernandes (Google) 	return true;
55658d76682SJoel Fernandes (Google) }
55758d76682SJoel Fernandes (Google) 
tick_nohz_cpu_down(unsigned int cpu)55858d76682SJoel Fernandes (Google) static int tick_nohz_cpu_down(unsigned int cpu)
55958d76682SJoel Fernandes (Google) {
56058d76682SJoel Fernandes (Google) 	return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
561a382bf93SFrederic Weisbecker }
562a382bf93SFrederic Weisbecker 
tick_nohz_init(void)563d1e43fa5SFrederic Weisbecker void __init tick_nohz_init(void)
564a831881bSFrederic Weisbecker {
56531eff243SSebastian Andrzej Siewior 	int cpu, ret;
566d1e43fa5SFrederic Weisbecker 
567a7c8655bSPaul E. McKenney 	if (!tick_nohz_full_running)
568d1e43fa5SFrederic Weisbecker 		return;
569d1e43fa5SFrederic Weisbecker 
5709b01f5bfSFrederic Weisbecker 	/*
5719b01f5bfSFrederic Weisbecker 	 * Full dynticks uses irq work to drive the tick rescheduling on safe
5729b01f5bfSFrederic Weisbecker 	 * locking contexts. But then we need irq work to raise its own
5739b01f5bfSFrederic Weisbecker 	 * interrupts to avoid circular dependency on the tick
5749b01f5bfSFrederic Weisbecker 	 */
5759b01f5bfSFrederic Weisbecker 	if (!arch_irq_work_has_interrupt()) {
576a395d6a7SJoe Perches 		pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
5779b01f5bfSFrederic Weisbecker 		cpumask_clear(tick_nohz_full_mask);
5789b01f5bfSFrederic Weisbecker 		tick_nohz_full_running = false;
5799b01f5bfSFrederic Weisbecker 		return;
5809b01f5bfSFrederic Weisbecker 	}
5819b01f5bfSFrederic Weisbecker 
58208ae95f4SNicholas Piggin 	if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
58308ae95f4SNicholas Piggin 			!IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
5844327b15fSFrederic Weisbecker 		cpu = smp_processor_id();
5854327b15fSFrederic Weisbecker 
5864327b15fSFrederic Weisbecker 		if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
58708ae95f4SNicholas Piggin 			pr_warn("NO_HZ: Clearing %d from nohz_full range "
58808ae95f4SNicholas Piggin 				"for timekeeping\n", cpu);
5894327b15fSFrederic Weisbecker 			cpumask_clear_cpu(cpu, tick_nohz_full_mask);
5904327b15fSFrederic Weisbecker 		}
59108ae95f4SNicholas Piggin 	}
5924327b15fSFrederic Weisbecker 
59373867dcdSFrederic Weisbecker 	for_each_cpu(cpu, tick_nohz_full_mask)
5942a0aafceSFrederic Weisbecker 		ct_cpu_track_user(cpu);
5952e709338SFrederic Weisbecker 
59631eff243SSebastian Andrzej Siewior 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
59731eff243SSebastian Andrzej Siewior 					"kernel/nohz:predown", NULL,
59831eff243SSebastian Andrzej Siewior 					tick_nohz_cpu_down);
59931eff243SSebastian Andrzej Siewior 	WARN_ON(ret < 0);
600ffda22c1STejun Heo 	pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
601ffda22c1STejun Heo 		cpumask_pr_args(tick_nohz_full_mask));
602a831881bSFrederic Weisbecker }
603a831881bSFrederic Weisbecker #endif
604a831881bSFrederic Weisbecker 
60579bf2bb3SThomas Gleixner /*
60679bf2bb3SThomas Gleixner  * NOHZ - aka dynamic tick functionality
60779bf2bb3SThomas Gleixner  */
6083451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
60979bf2bb3SThomas Gleixner /*
61079bf2bb3SThomas Gleixner  * NO HZ enabled ?
61179bf2bb3SThomas Gleixner  */
6124cc7ecb7SKees Cook bool tick_nohz_enabled __read_mostly  = true;
613bc7a34b8SThomas Gleixner unsigned long tick_nohz_active  __read_mostly;
61479bf2bb3SThomas Gleixner /*
61579bf2bb3SThomas Gleixner  * Enable / Disable tickless mode
61679bf2bb3SThomas Gleixner  */
setup_tick_nohz(char * str)61779bf2bb3SThomas Gleixner static int __init setup_tick_nohz(char *str)
61879bf2bb3SThomas Gleixner {
6194cc7ecb7SKees Cook 	return (kstrtobool(str, &tick_nohz_enabled) == 0);
62079bf2bb3SThomas Gleixner }
62179bf2bb3SThomas Gleixner 
62279bf2bb3SThomas Gleixner __setup("nohz=", setup_tick_nohz);
62379bf2bb3SThomas Gleixner 
tick_nohz_tick_stopped(void)624a3642983SFrederic Weisbecker bool tick_nohz_tick_stopped(void)
625c1797bafSThomas Gleixner {
6262bc629a6SFrederic Weisbecker 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
6272bc629a6SFrederic Weisbecker 
6282bc629a6SFrederic Weisbecker 	return ts->tick_stopped;
629c1797bafSThomas Gleixner }
630c1797bafSThomas Gleixner 
tick_nohz_tick_stopped_cpu(int cpu)63122ab8bc0SFrederic Weisbecker bool tick_nohz_tick_stopped_cpu(int cpu)
63222ab8bc0SFrederic Weisbecker {
63322ab8bc0SFrederic Weisbecker 	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
63422ab8bc0SFrederic Weisbecker 
63522ab8bc0SFrederic Weisbecker 	return ts->tick_stopped;
63622ab8bc0SFrederic Weisbecker }
63722ab8bc0SFrederic Weisbecker 
63879bf2bb3SThomas Gleixner /**
63979bf2bb3SThomas Gleixner  * tick_nohz_update_jiffies - update jiffies when idle was interrupted
64079bf2bb3SThomas Gleixner  *
64179bf2bb3SThomas Gleixner  * Called from interrupt entry when the CPU was idle
64279bf2bb3SThomas Gleixner  *
64379bf2bb3SThomas Gleixner  * In case the sched_tick was stopped on this CPU, we have to check if jiffies
64479bf2bb3SThomas Gleixner  * must be updated. Otherwise an interrupt handler could use a stale jiffy
6450de7611aSIngo Molnar  * value. We do this unconditionally on any CPU, as we don't know whether the
6460de7611aSIngo Molnar  * CPU, which has the update task assigned is in a long sleep.
64779bf2bb3SThomas Gleixner  */
tick_nohz_update_jiffies(ktime_t now)648eed3b9cfSMartin Schwidefsky static void tick_nohz_update_jiffies(ktime_t now)
64979bf2bb3SThomas Gleixner {
65079bf2bb3SThomas Gleixner 	unsigned long flags;
65179bf2bb3SThomas Gleixner 
652e8fcaa5cSFrederic Weisbecker 	__this_cpu_write(tick_cpu_sched.idle_waketime, now);
65379bf2bb3SThomas Gleixner 
65479bf2bb3SThomas Gleixner 	local_irq_save(flags);
65579bf2bb3SThomas Gleixner 	tick_do_update_jiffies64(now);
65679bf2bb3SThomas Gleixner 	local_irq_restore(flags);
65702ff3755SIngo Molnar 
65803e0d461STejun Heo 	touch_softlockup_watchdog_sched();
65979bf2bb3SThomas Gleixner }
66079bf2bb3SThomas Gleixner 
tick_nohz_stop_idle(struct tick_sched * ts,ktime_t now)66107b65a80SFrederic Weisbecker static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
662595aac48SArjan van de Ven {
663595aac48SArjan van de Ven 	ktime_t delta;
664595aac48SArjan van de Ven 
66507b65a80SFrederic Weisbecker 	if (WARN_ON_ONCE(!ts->idle_active))
66607b65a80SFrederic Weisbecker 		return;
66707b65a80SFrederic Weisbecker 
668595aac48SArjan van de Ven 	delta = ktime_sub(now, ts->idle_entrytime);
66907b65a80SFrederic Weisbecker 
670620a30faSFrederic Weisbecker 	write_seqcount_begin(&ts->idle_sleeptime_seq);
67107b65a80SFrederic Weisbecker 	if (nr_iowait_cpu(smp_processor_id()) > 0)
6720224cf4cSArjan van de Ven 		ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
6736beea0cdSMichal Hocko 	else
6746beea0cdSMichal Hocko 		ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
67507b65a80SFrederic Weisbecker 
6768c7b09f4SArjan van de Ven 	ts->idle_entrytime = now;
6776378ddb5SVenki Pallipadi 	ts->idle_active = 0;
678620a30faSFrederic Weisbecker 	write_seqcount_end(&ts->idle_sleeptime_seq);
67956c7426bSPeter Zijlstra 
680ac1e843fSPeter Zijlstra 	sched_clock_idle_wakeup_event();
6816378ddb5SVenki Pallipadi }
6826378ddb5SVenki Pallipadi 
tick_nohz_start_idle(struct tick_sched * ts)6830e776768SRafael J. Wysocki static void tick_nohz_start_idle(struct tick_sched *ts)
6846378ddb5SVenki Pallipadi {
685620a30faSFrederic Weisbecker 	write_seqcount_begin(&ts->idle_sleeptime_seq);
6860e776768SRafael J. Wysocki 	ts->idle_entrytime = ktime_get();
6876378ddb5SVenki Pallipadi 	ts->idle_active = 1;
688620a30faSFrederic Weisbecker 	write_seqcount_end(&ts->idle_sleeptime_seq);
689620a30faSFrederic Weisbecker 
69056c7426bSPeter Zijlstra 	sched_clock_idle_sleep_event();
6916378ddb5SVenki Pallipadi }
6926378ddb5SVenki Pallipadi 
get_cpu_sleep_time_us(struct tick_sched * ts,ktime_t * sleeptime,bool compute_delta,u64 * last_update_time)69307b65a80SFrederic Weisbecker static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime,
69407b65a80SFrederic Weisbecker 				 bool compute_delta, u64 *last_update_time)
69507b65a80SFrederic Weisbecker {
69607b65a80SFrederic Weisbecker 	ktime_t now, idle;
697620a30faSFrederic Weisbecker 	unsigned int seq;
69807b65a80SFrederic Weisbecker 
69907b65a80SFrederic Weisbecker 	if (!tick_nohz_active)
70007b65a80SFrederic Weisbecker 		return -1;
70107b65a80SFrederic Weisbecker 
70207b65a80SFrederic Weisbecker 	now = ktime_get();
70307b65a80SFrederic Weisbecker 	if (last_update_time)
70407b65a80SFrederic Weisbecker 		*last_update_time = ktime_to_us(now);
70507b65a80SFrederic Weisbecker 
706620a30faSFrederic Weisbecker 	do {
707620a30faSFrederic Weisbecker 		seq = read_seqcount_begin(&ts->idle_sleeptime_seq);
708620a30faSFrederic Weisbecker 
70907b65a80SFrederic Weisbecker 		if (ts->idle_active && compute_delta) {
71007b65a80SFrederic Weisbecker 			ktime_t delta = ktime_sub(now, ts->idle_entrytime);
71107b65a80SFrederic Weisbecker 
71207b65a80SFrederic Weisbecker 			idle = ktime_add(*sleeptime, delta);
71307b65a80SFrederic Weisbecker 		} else {
71407b65a80SFrederic Weisbecker 			idle = *sleeptime;
71507b65a80SFrederic Weisbecker 		}
716620a30faSFrederic Weisbecker 	} while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq));
71707b65a80SFrederic Weisbecker 
71807b65a80SFrederic Weisbecker 	return ktime_to_us(idle);
71907b65a80SFrederic Weisbecker 
72007b65a80SFrederic Weisbecker }
72107b65a80SFrederic Weisbecker 
722b1f724c3SArjan van de Ven /**
7230de7611aSIngo Molnar  * get_cpu_idle_time_us - get the total idle time of a CPU
724b1f724c3SArjan van de Ven  * @cpu: CPU number to query
72509a1d34fSMichal Hocko  * @last_update_time: variable to store update time in. Do not update
72609a1d34fSMichal Hocko  * counters if NULL.
727b1f724c3SArjan van de Ven  *
7286168f8edSWei Jiangang  * Return the cumulative idle time (since boot) for a given
729ead70b75SFrederic Weisbecker  * CPU, in microseconds. Note this is partially broken due to
730ead70b75SFrederic Weisbecker  * the counter of iowait tasks that can be remotely updated without
731ead70b75SFrederic Weisbecker  * any synchronization. Therefore it is possible to observe backward
732ead70b75SFrederic Weisbecker  * values within two consecutive reads.
733b1f724c3SArjan van de Ven  *
734b1f724c3SArjan van de Ven  * This time is measured via accounting rather than sampling,
735b1f724c3SArjan van de Ven  * and is as accurate as ktime_get() is.
736b1f724c3SArjan van de Ven  *
737b1f724c3SArjan van de Ven  * This function returns -1 if NOHZ is not enabled.
738b1f724c3SArjan van de Ven  */
get_cpu_idle_time_us(int cpu,u64 * last_update_time)7396378ddb5SVenki Pallipadi u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
7406378ddb5SVenki Pallipadi {
7416378ddb5SVenki Pallipadi 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
7426378ddb5SVenki Pallipadi 
74307b65a80SFrederic Weisbecker 	return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime,
74407b65a80SFrederic Weisbecker 				     !nr_iowait_cpu(cpu), last_update_time);
7456378ddb5SVenki Pallipadi }
7468083e4adSvenkatesh.pallipadi@intel.com EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
7476378ddb5SVenki Pallipadi 
7486beea0cdSMichal Hocko /**
7490de7611aSIngo Molnar  * get_cpu_iowait_time_us - get the total iowait time of a CPU
7500224cf4cSArjan van de Ven  * @cpu: CPU number to query
75109a1d34fSMichal Hocko  * @last_update_time: variable to store update time in. Do not update
75209a1d34fSMichal Hocko  * counters if NULL.
7530224cf4cSArjan van de Ven  *
7546168f8edSWei Jiangang  * Return the cumulative iowait time (since boot) for a given
755ead70b75SFrederic Weisbecker  * CPU, in microseconds. Note this is partially broken due to
756ead70b75SFrederic Weisbecker  * the counter of iowait tasks that can be remotely updated without
757ead70b75SFrederic Weisbecker  * any synchronization. Therefore it is possible to observe backward
758ead70b75SFrederic Weisbecker  * values within two consecutive reads.
7590224cf4cSArjan van de Ven  *
7600224cf4cSArjan van de Ven  * This time is measured via accounting rather than sampling,
7610224cf4cSArjan van de Ven  * and is as accurate as ktime_get() is.
7620224cf4cSArjan van de Ven  *
7630224cf4cSArjan van de Ven  * This function returns -1 if NOHZ is not enabled.
7640224cf4cSArjan van de Ven  */
get_cpu_iowait_time_us(int cpu,u64 * last_update_time)7650224cf4cSArjan van de Ven u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
7660224cf4cSArjan van de Ven {
7670224cf4cSArjan van de Ven 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
7680224cf4cSArjan van de Ven 
76907b65a80SFrederic Weisbecker 	return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime,
77007b65a80SFrederic Weisbecker 				     nr_iowait_cpu(cpu), last_update_time);
7710224cf4cSArjan van de Ven }
7720224cf4cSArjan van de Ven EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
7730224cf4cSArjan van de Ven 
tick_nohz_restart(struct tick_sched * ts,ktime_t now)7740ff53d09SThomas Gleixner static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
7750ff53d09SThomas Gleixner {
7760ff53d09SThomas Gleixner 	hrtimer_cancel(&ts->sched_timer);
7770ff53d09SThomas Gleixner 	hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
7780ff53d09SThomas Gleixner 
7790ff53d09SThomas Gleixner 	/* Forward the time to expire in the future */
780b9965449SThomas Gleixner 	hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
7810ff53d09SThomas Gleixner 
782902a9f9cSSebastian Andrzej Siewior 	if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
783902a9f9cSSebastian Andrzej Siewior 		hrtimer_start_expires(&ts->sched_timer,
784902a9f9cSSebastian Andrzej Siewior 				      HRTIMER_MODE_ABS_PINNED_HARD);
785902a9f9cSSebastian Andrzej Siewior 	} else {
7860ff53d09SThomas Gleixner 		tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
787902a9f9cSSebastian Andrzej Siewior 	}
788411fe24eSFrederic Weisbecker 
789411fe24eSFrederic Weisbecker 	/*
790411fe24eSFrederic Weisbecker 	 * Reset to make sure next tick stop doesn't get fooled by past
791411fe24eSFrederic Weisbecker 	 * cached clock deadline.
792411fe24eSFrederic Weisbecker 	 */
793411fe24eSFrederic Weisbecker 	ts->next_tick = 0;
7940ff53d09SThomas Gleixner }
7950ff53d09SThomas Gleixner 
local_timer_softirq_pending(void)7965d62c183SThomas Gleixner static inline bool local_timer_softirq_pending(void)
7975d62c183SThomas Gleixner {
79880d20d35SAnna-Maria Gleixner 	return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
7995d62c183SThomas Gleixner }
8005d62c183SThomas Gleixner 
tick_nohz_next_event(struct tick_sched * ts,int cpu)80123a8d888SRafael J. Wysocki static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
80279bf2bb3SThomas Gleixner {
80329845399SFrederic Weisbecker 	u64 basemono, next_tick, delta, expires;
804e1e41b6cSRasmus Villemoes 	unsigned long basejiff;
805e1e41b6cSRasmus Villemoes 	unsigned int seq;
806855a0fc3SFrederic Weisbecker 
80779bf2bb3SThomas Gleixner 	/* Read jiffies and the time when jiffies were updated last */
80879bf2bb3SThomas Gleixner 	do {
809e5d4d175SThomas Gleixner 		seq = read_seqcount_begin(&jiffies_seq);
8102456e855SThomas Gleixner 		basemono = last_jiffies_update;
811c1ad348bSThomas Gleixner 		basejiff = jiffies;
812e5d4d175SThomas Gleixner 	} while (read_seqcount_retry(&jiffies_seq, seq));
813c1ad348bSThomas Gleixner 	ts->last_jiffies = basejiff;
81423a8d888SRafael J. Wysocki 	ts->timer_expires_base = basemono;
81579bf2bb3SThomas Gleixner 
8165d62c183SThomas Gleixner 	/*
8175d62c183SThomas Gleixner 	 * Keep the periodic tick, when RCU, architecture or irq_work
8185d62c183SThomas Gleixner 	 * requests it.
8195d62c183SThomas Gleixner 	 * Aside of that check whether the local timer softirq is
8205d62c183SThomas Gleixner 	 * pending. If so its a bad idea to call get_next_timer_interrupt()
8215d62c183SThomas Gleixner 	 * because there is an already expired timer, so it will request
8224bf07f65SIngo Molnar 	 * immediate expiry, which rearms the hardware timer with a
8235d62c183SThomas Gleixner 	 * minimal delta which brings us back to this place
8245d62c183SThomas Gleixner 	 * immediately. Lather, rinse and repeat...
8255d62c183SThomas Gleixner 	 */
82629845399SFrederic Weisbecker 	if (rcu_needs_cpu() || arch_needs_cpu() ||
8275d62c183SThomas Gleixner 	    irq_work_needs_cpu() || local_timer_softirq_pending()) {
828c1ad348bSThomas Gleixner 		next_tick = basemono + TICK_NSEC;
8293c5d92a0SMartin Schwidefsky 	} else {
830c1ad348bSThomas Gleixner 		/*
831c1ad348bSThomas Gleixner 		 * Get the next pending timer. If high resolution
832c1ad348bSThomas Gleixner 		 * timers are enabled this only takes the timer wheel
833c1ad348bSThomas Gleixner 		 * timers into account. If high resolution timers are
834c1ad348bSThomas Gleixner 		 * disabled this also looks at the next expiring
835c1ad348bSThomas Gleixner 		 * hrtimer.
836c1ad348bSThomas Gleixner 		 */
83729845399SFrederic Weisbecker 		next_tick = get_next_timer_interrupt(basejiff, basemono);
83829845399SFrederic Weisbecker 		ts->next_timer = next_tick;
8393c5d92a0SMartin Schwidefsky 	}
84047aa8b6cSIngo Molnar 
841c1ad348bSThomas Gleixner 	/*
842c1ad348bSThomas Gleixner 	 * If the tick is due in the next period, keep it ticking or
84382bbe34bSPeter Zijlstra 	 * force prod the timer.
844c1ad348bSThomas Gleixner 	 */
845c1ad348bSThomas Gleixner 	delta = next_tick - basemono;
846c1ad348bSThomas Gleixner 	if (delta <= (u64)TICK_NSEC) {
847a683f390SThomas Gleixner 		/*
848a683f390SThomas Gleixner 		 * Tell the timer code that the base is not idle, i.e. undo
849a683f390SThomas Gleixner 		 * the effect of get_next_timer_interrupt():
850a683f390SThomas Gleixner 		 */
851a683f390SThomas Gleixner 		timer_clear_idle();
85282bbe34bSPeter Zijlstra 		/*
85382bbe34bSPeter Zijlstra 		 * We've not stopped the tick yet, and there's a timer in the
85482bbe34bSPeter Zijlstra 		 * next period, so no point in stopping it either, bail.
85582bbe34bSPeter Zijlstra 		 */
856f99973e1SFrederic Weisbecker 		if (!ts->tick_stopped) {
85723a8d888SRafael J. Wysocki 			ts->timer_expires = 0;
858157d29e1SThomas Gleixner 			goto out;
859157d29e1SThomas Gleixner 		}
860157d29e1SThomas Gleixner 	}
86179bf2bb3SThomas Gleixner 
86200147449SWoodruff, Richard 	/*
86323a8d888SRafael J. Wysocki 	 * If this CPU is the one which had the do_timer() duty last, we limit
86423a8d888SRafael J. Wysocki 	 * the sleep time to the timekeeping max_deferment value.
865c1ad348bSThomas Gleixner 	 * Otherwise we can sleep as long as we want.
86600147449SWoodruff, Richard 	 */
867c1ad348bSThomas Gleixner 	delta = timekeeping_max_deferment();
86823a8d888SRafael J. Wysocki 	if (cpu != tick_do_timer_cpu &&
86923a8d888SRafael J. Wysocki 	    (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last))
870c1ad348bSThomas Gleixner 		delta = KTIME_MAX;
87127185016SThomas Gleixner 
872c1ad348bSThomas Gleixner 	/* Calculate the next expiry time */
873c1ad348bSThomas Gleixner 	if (delta < (KTIME_MAX - basemono))
874c1ad348bSThomas Gleixner 		expires = basemono + delta;
87527185016SThomas Gleixner 	else
876c1ad348bSThomas Gleixner 		expires = KTIME_MAX;
877c1ad348bSThomas Gleixner 
87823a8d888SRafael J. Wysocki 	ts->timer_expires = min_t(u64, expires, next_tick);
87923a8d888SRafael J. Wysocki 
88023a8d888SRafael J. Wysocki out:
88123a8d888SRafael J. Wysocki 	return ts->timer_expires;
88223a8d888SRafael J. Wysocki }
88323a8d888SRafael J. Wysocki 
tick_nohz_stop_tick(struct tick_sched * ts,int cpu)88423a8d888SRafael J. Wysocki static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
88523a8d888SRafael J. Wysocki {
88623a8d888SRafael J. Wysocki 	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
88723a8d888SRafael J. Wysocki 	u64 basemono = ts->timer_expires_base;
88823a8d888SRafael J. Wysocki 	u64 expires = ts->timer_expires;
88923a8d888SRafael J. Wysocki 	ktime_t tick = expires;
89023a8d888SRafael J. Wysocki 
89123a8d888SRafael J. Wysocki 	/* Make sure we won't be trying to stop it twice in a row. */
89223a8d888SRafael J. Wysocki 	ts->timer_expires_base = 0;
89323a8d888SRafael J. Wysocki 
89423a8d888SRafael J. Wysocki 	/*
89523a8d888SRafael J. Wysocki 	 * If this CPU is the one which updates jiffies, then give up
89623a8d888SRafael J. Wysocki 	 * the assignment and let it be taken by the CPU which runs
89723a8d888SRafael J. Wysocki 	 * the tick timer next, which might be this CPU as well. If we
89823a8d888SRafael J. Wysocki 	 * don't drop this here the jiffies might be stale and
89923a8d888SRafael J. Wysocki 	 * do_timer() never invoked. Keep track of the fact that it
90023a8d888SRafael J. Wysocki 	 * was the one which had the do_timer() duty last.
90123a8d888SRafael J. Wysocki 	 */
90223a8d888SRafael J. Wysocki 	if (cpu == tick_do_timer_cpu) {
90323a8d888SRafael J. Wysocki 		tick_do_timer_cpu = TICK_DO_TIMER_NONE;
90423a8d888SRafael J. Wysocki 		ts->do_timer_last = 1;
90523a8d888SRafael J. Wysocki 	} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
90623a8d888SRafael J. Wysocki 		ts->do_timer_last = 0;
90723a8d888SRafael J. Wysocki 	}
90800147449SWoodruff, Richard 
90900147449SWoodruff, Richard 	/* Skip reprogram of event if its not changed */
910411fe24eSFrederic Weisbecker 	if (ts->tick_stopped && (expires == ts->next_tick)) {
911411fe24eSFrederic Weisbecker 		/* Sanity check: make sure clockevent is actually programmed */
912d4af6d93SFrederic Weisbecker 		if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
91323a8d888SRafael J. Wysocki 			return;
91400147449SWoodruff, Richard 
915411fe24eSFrederic Weisbecker 		WARN_ON_ONCE(1);
916411fe24eSFrederic Weisbecker 		printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
917411fe24eSFrederic Weisbecker 			    basemono, ts->next_tick, dev->next_event,
918411fe24eSFrederic Weisbecker 			    hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer));
919ce6cf9a1SFrederic Weisbecker 	}
92079bf2bb3SThomas Gleixner 
92179bf2bb3SThomas Gleixner 	/*
92279bf2bb3SThomas Gleixner 	 * nohz_stop_sched_tick can be called several times before
92379bf2bb3SThomas Gleixner 	 * the nohz_restart_sched_tick is called. This happens when
92479bf2bb3SThomas Gleixner 	 * interrupts arrive which do not cause a reschedule. In the
92579bf2bb3SThomas Gleixner 	 * first call we save the current tick time, so we can restart
92679bf2bb3SThomas Gleixner 	 * the scheduler tick in nohz_restart_sched_tick.
92779bf2bb3SThomas Gleixner 	 */
92879bf2bb3SThomas Gleixner 	if (!ts->tick_stopped) {
9293c85d6dbSFrederic Weisbecker 		calc_load_nohz_start();
93062cb1188SPeter Zijlstra 		quiet_vmstat();
93146cb4b7cSSiddha, Suresh B 
932f5d411c9SFrederic Weisbecker 		ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
93379bf2bb3SThomas Gleixner 		ts->tick_stopped = 1;
934e6e6cc22SFrederic Weisbecker 		trace_tick_stop(1, TICK_DEP_MASK_NONE);
93579bf2bb3SThomas Gleixner 	}
936d3ed7824SThomas Gleixner 
937411fe24eSFrederic Weisbecker 	ts->next_tick = tick;
938411fe24eSFrederic Weisbecker 
939eaad084bSThomas Gleixner 	/*
940c1ad348bSThomas Gleixner 	 * If the expiration time == KTIME_MAX, then we simply stop
941c1ad348bSThomas Gleixner 	 * the tick timer.
942eaad084bSThomas Gleixner 	 */
943c1ad348bSThomas Gleixner 	if (unlikely(expires == KTIME_MAX)) {
944eaad084bSThomas Gleixner 		if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
945eaad084bSThomas Gleixner 			hrtimer_cancel(&ts->sched_timer);
94662c1256dSNicholas Piggin 		else
94762c1256dSNicholas Piggin 			tick_program_event(KTIME_MAX, 1);
94823a8d888SRafael J. Wysocki 		return;
949eaad084bSThomas Gleixner 	}
950eaad084bSThomas Gleixner 
9511f71adddSThomas Gleixner 	if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
952902a9f9cSSebastian Andrzej Siewior 		hrtimer_start(&ts->sched_timer, tick,
953902a9f9cSSebastian Andrzej Siewior 			      HRTIMER_MODE_ABS_PINNED_HARD);
9541f71adddSThomas Gleixner 	} else {
955d4af6d93SFrederic Weisbecker 		hrtimer_set_expires(&ts->sched_timer, tick);
956c1ad348bSThomas Gleixner 		tick_program_event(tick, 1);
957280f0677SFrederic Weisbecker 	}
9581f71adddSThomas Gleixner }
959280f0677SFrederic Weisbecker 
tick_nohz_retain_tick(struct tick_sched * ts)96023a8d888SRafael J. Wysocki static void tick_nohz_retain_tick(struct tick_sched *ts)
96123a8d888SRafael J. Wysocki {
96223a8d888SRafael J. Wysocki 	ts->timer_expires_base = 0;
96323a8d888SRafael J. Wysocki }
96423a8d888SRafael J. Wysocki 
96523a8d888SRafael J. Wysocki #ifdef CONFIG_NO_HZ_FULL
tick_nohz_stop_sched_tick(struct tick_sched * ts,int cpu)96623a8d888SRafael J. Wysocki static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu)
96723a8d888SRafael J. Wysocki {
96823a8d888SRafael J. Wysocki 	if (tick_nohz_next_event(ts, cpu))
96923a8d888SRafael J. Wysocki 		tick_nohz_stop_tick(ts, cpu);
97023a8d888SRafael J. Wysocki 	else
97123a8d888SRafael J. Wysocki 		tick_nohz_retain_tick(ts);
97223a8d888SRafael J. Wysocki }
97323a8d888SRafael J. Wysocki #endif /* CONFIG_NO_HZ_FULL */
97423a8d888SRafael J. Wysocki 
tick_nohz_restart_sched_tick(struct tick_sched * ts,ktime_t now)9751f41906aSFrederic Weisbecker static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
97659d2c7caSFrederic Weisbecker {
97759d2c7caSFrederic Weisbecker 	/* Update jiffies first */
97859d2c7caSFrederic Weisbecker 	tick_do_update_jiffies64(now);
97959d2c7caSFrederic Weisbecker 
980a683f390SThomas Gleixner 	/*
981a683f390SThomas Gleixner 	 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
982a683f390SThomas Gleixner 	 * the clock forward checks in the enqueue path:
983a683f390SThomas Gleixner 	 */
984a683f390SThomas Gleixner 	timer_clear_idle();
985a683f390SThomas Gleixner 
9863c85d6dbSFrederic Weisbecker 	calc_load_nohz_stop();
98703e0d461STejun Heo 	touch_softlockup_watchdog_sched();
98859d2c7caSFrederic Weisbecker 	/*
98959d2c7caSFrederic Weisbecker 	 * Cancel the scheduled timer and restore the tick
99059d2c7caSFrederic Weisbecker 	 */
99159d2c7caSFrederic Weisbecker 	ts->tick_stopped  = 0;
99259d2c7caSFrederic Weisbecker 	tick_nohz_restart(ts, now);
99359d2c7caSFrederic Weisbecker }
99473738a95SFrederic Weisbecker 
__tick_nohz_full_update_tick(struct tick_sched * ts,ktime_t now)995a5183862SYunfeng Ye static void __tick_nohz_full_update_tick(struct tick_sched *ts,
996a5183862SYunfeng Ye 					 ktime_t now)
9975811d996SFrederic Weisbecker {
9985811d996SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
9995811d996SFrederic Weisbecker 	int cpu = smp_processor_id();
10005811d996SFrederic Weisbecker 
1001a5183862SYunfeng Ye 	if (can_stop_full_tick(cpu, ts))
1002a5183862SYunfeng Ye 		tick_nohz_stop_sched_tick(ts, cpu);
1003a5183862SYunfeng Ye 	else if (ts->tick_stopped)
1004a5183862SYunfeng Ye 		tick_nohz_restart_sched_tick(ts, now);
1005a5183862SYunfeng Ye #endif
1006a5183862SYunfeng Ye }
1007a5183862SYunfeng Ye 
tick_nohz_full_update_tick(struct tick_sched * ts)1008a5183862SYunfeng Ye static void tick_nohz_full_update_tick(struct tick_sched *ts)
1009a5183862SYunfeng Ye {
1010a5183862SYunfeng Ye 	if (!tick_nohz_full_cpu(smp_processor_id()))
10115811d996SFrederic Weisbecker 		return;
10125811d996SFrederic Weisbecker 
10135811d996SFrederic Weisbecker 	if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
10145811d996SFrederic Weisbecker 		return;
10155811d996SFrederic Weisbecker 
1016a5183862SYunfeng Ye 	__tick_nohz_full_update_tick(ts, ktime_get());
10175811d996SFrederic Weisbecker }
10185811d996SFrederic Weisbecker 
10190345691bSFrederic Weisbecker /*
10200345691bSFrederic Weisbecker  * A pending softirq outside an IRQ (or softirq disabled section) context
10210345691bSFrederic Weisbecker  * should be waiting for ksoftirqd to handle it. Therefore we shouldn't
10220345691bSFrederic Weisbecker  * reach here due to the need_resched() early check in can_stop_idle_tick().
10230345691bSFrederic Weisbecker  *
10240345691bSFrederic Weisbecker  * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the
10250345691bSFrederic Weisbecker  * cpu_down() process, softirqs can still be raised while ksoftirqd is parked,
10260345691bSFrederic Weisbecker  * triggering the below since wakep_softirqd() is ignored.
10270345691bSFrederic Weisbecker  *
10280345691bSFrederic Weisbecker  */
report_idle_softirq(void)10290345691bSFrederic Weisbecker static bool report_idle_softirq(void)
10300345691bSFrederic Weisbecker {
10310345691bSFrederic Weisbecker 	static int ratelimit;
10320345691bSFrederic Weisbecker 	unsigned int pending = local_softirq_pending();
10330345691bSFrederic Weisbecker 
10340345691bSFrederic Weisbecker 	if (likely(!pending))
10350345691bSFrederic Weisbecker 		return false;
10360345691bSFrederic Weisbecker 
10370345691bSFrederic Weisbecker 	/* Some softirqs claim to be safe against hotplug and ksoftirqd parking */
10380345691bSFrederic Weisbecker 	if (!cpu_active(smp_processor_id())) {
10390345691bSFrederic Weisbecker 		pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK;
10400345691bSFrederic Weisbecker 		if (!pending)
10410345691bSFrederic Weisbecker 			return false;
10420345691bSFrederic Weisbecker 	}
10430345691bSFrederic Weisbecker 
1044a7e282c7SWen Yang 	if (ratelimit >= 10)
10450345691bSFrederic Weisbecker 		return false;
10460345691bSFrederic Weisbecker 
10470345691bSFrederic Weisbecker 	/* On RT, softirqs handling may be waiting on some lock */
104896c1fa04SPaul Gortmaker 	if (local_bh_blocked())
10490345691bSFrederic Weisbecker 		return false;
10500345691bSFrederic Weisbecker 
10510345691bSFrederic Weisbecker 	pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
10520345691bSFrederic Weisbecker 		pending);
10530345691bSFrederic Weisbecker 	ratelimit++;
10540345691bSFrederic Weisbecker 
10550345691bSFrederic Weisbecker 	return true;
10560345691bSFrederic Weisbecker }
10570345691bSFrederic Weisbecker 
can_stop_idle_tick(int cpu,struct tick_sched * ts)10585b39939aSFrederic Weisbecker static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
10595b39939aSFrederic Weisbecker {
10605b39939aSFrederic Weisbecker 	/*
10610de7611aSIngo Molnar 	 * If this CPU is offline and it is the one which updates
10625b39939aSFrederic Weisbecker 	 * jiffies, then give up the assignment and let it be taken by
10630de7611aSIngo Molnar 	 * the CPU which runs the tick timer next. If we don't drop
10645b39939aSFrederic Weisbecker 	 * this here the jiffies might be stale and do_timer() never
10655b39939aSFrederic Weisbecker 	 * invoked.
10665b39939aSFrederic Weisbecker 	 */
10675b39939aSFrederic Weisbecker 	if (unlikely(!cpu_online(cpu))) {
10685b39939aSFrederic Weisbecker 		if (cpu == tick_do_timer_cpu)
10695b39939aSFrederic Weisbecker 			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
1070411fe24eSFrederic Weisbecker 		/*
1071411fe24eSFrederic Weisbecker 		 * Make sure the CPU doesn't get fooled by obsolete tick
1072411fe24eSFrederic Weisbecker 		 * deadline if it comes back online later.
1073411fe24eSFrederic Weisbecker 		 */
1074411fe24eSFrederic Weisbecker 		ts->next_tick = 0;
1075f7ea0fd6SThomas Gleixner 		return false;
10765b39939aSFrederic Weisbecker 	}
10775b39939aSFrederic Weisbecker 
107823a8d888SRafael J. Wysocki 	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
10795b39939aSFrederic Weisbecker 		return false;
10805b39939aSFrederic Weisbecker 
10815b39939aSFrederic Weisbecker 	if (need_resched())
10825b39939aSFrederic Weisbecker 		return false;
10835b39939aSFrederic Weisbecker 
10840345691bSFrederic Weisbecker 	if (unlikely(report_idle_softirq()))
10855b39939aSFrederic Weisbecker 		return false;
10865b39939aSFrederic Weisbecker 
1087460775dfSFrederic Weisbecker 	if (tick_nohz_full_enabled()) {
1088a382bf93SFrederic Weisbecker 		/*
1089a382bf93SFrederic Weisbecker 		 * Keep the tick alive to guarantee timekeeping progression
1090a382bf93SFrederic Weisbecker 		 * if there are full dynticks CPUs around
1091a382bf93SFrederic Weisbecker 		 */
1092a382bf93SFrederic Weisbecker 		if (tick_do_timer_cpu == cpu)
1093a382bf93SFrederic Weisbecker 			return false;
109408ae95f4SNicholas Piggin 
109508ae95f4SNicholas Piggin 		/* Should not happen for nohz-full */
109608ae95f4SNicholas Piggin 		if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
1097a382bf93SFrederic Weisbecker 			return false;
1098a382bf93SFrederic Weisbecker 	}
1099a382bf93SFrederic Weisbecker 
11005b39939aSFrederic Weisbecker 	return true;
11015b39939aSFrederic Weisbecker }
11025b39939aSFrederic Weisbecker 
1103289dafedSFrederic Weisbecker /**
1104289dafedSFrederic Weisbecker  * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
1105289dafedSFrederic Weisbecker  *
1106289dafedSFrederic Weisbecker  * When the next event is more than a tick into the future, stop the idle tick
1107289dafedSFrederic Weisbecker  */
tick_nohz_idle_stop_tick(void)1108289dafedSFrederic Weisbecker void tick_nohz_idle_stop_tick(void)
110919f5f736SFrederic Weisbecker {
1110289dafedSFrederic Weisbecker 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
11115b39939aSFrederic Weisbecker 	int cpu = smp_processor_id();
1112289dafedSFrederic Weisbecker 	ktime_t expires;
11135b39939aSFrederic Weisbecker 
1114554c8aa8SRafael J. Wysocki 	/*
1115554c8aa8SRafael J. Wysocki 	 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the
1116554c8aa8SRafael J. Wysocki 	 * tick timer expiration time is known already.
1117554c8aa8SRafael J. Wysocki 	 */
1118554c8aa8SRafael J. Wysocki 	if (ts->timer_expires_base)
1119554c8aa8SRafael J. Wysocki 		expires = ts->timer_expires;
1120554c8aa8SRafael J. Wysocki 	else if (can_stop_idle_tick(cpu, ts))
112123a8d888SRafael J. Wysocki 		expires = tick_nohz_next_event(ts, cpu);
1122554c8aa8SRafael J. Wysocki 	else
1123554c8aa8SRafael J. Wysocki 		return;
112419f5f736SFrederic Weisbecker 
11255b39939aSFrederic Weisbecker 	ts->idle_calls++;
112684bf1bccSFrederic Weisbecker 
11272456e855SThomas Gleixner 	if (expires > 0LL) {
112823a8d888SRafael J. Wysocki 		int was_stopped = ts->tick_stopped;
112923a8d888SRafael J. Wysocki 
113023a8d888SRafael J. Wysocki 		tick_nohz_stop_tick(ts, cpu);
113123a8d888SRafael J. Wysocki 
113284bf1bccSFrederic Weisbecker 		ts->idle_sleeps++;
113384bf1bccSFrederic Weisbecker 		ts->idle_expires = expires;
11342ac0d98fSFrederic Weisbecker 
1135a0db971eSFrederic Weisbecker 		if (!was_stopped && ts->tick_stopped) {
11362ac0d98fSFrederic Weisbecker 			ts->idle_jiffies = ts->last_jiffies;
1137a0db971eSFrederic Weisbecker 			nohz_balance_enter_idle(cpu);
1138a0db971eSFrederic Weisbecker 		}
113923a8d888SRafael J. Wysocki 	} else {
114023a8d888SRafael J. Wysocki 		tick_nohz_retain_tick(ts);
114119f5f736SFrederic Weisbecker 	}
1142280f0677SFrederic Weisbecker }
1143280f0677SFrederic Weisbecker 
tick_nohz_idle_retain_tick(void)1144554c8aa8SRafael J. Wysocki void tick_nohz_idle_retain_tick(void)
1145554c8aa8SRafael J. Wysocki {
1146554c8aa8SRafael J. Wysocki 	tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched));
1147554c8aa8SRafael J. Wysocki 	/*
1148554c8aa8SRafael J. Wysocki 	 * Undo the effect of get_next_timer_interrupt() called from
1149554c8aa8SRafael J. Wysocki 	 * tick_nohz_next_event().
1150554c8aa8SRafael J. Wysocki 	 */
1151554c8aa8SRafael J. Wysocki 	timer_clear_idle();
1152554c8aa8SRafael J. Wysocki }
1153554c8aa8SRafael J. Wysocki 
11540e776768SRafael J. Wysocki /**
11550e776768SRafael J. Wysocki  * tick_nohz_idle_enter - prepare for entering idle on the current CPU
11560e776768SRafael J. Wysocki  *
1157280f0677SFrederic Weisbecker  * Called when we start the idle loop.
1158280f0677SFrederic Weisbecker  */
tick_nohz_idle_enter(void)11591268fbc7SFrederic Weisbecker void tick_nohz_idle_enter(void)
1160280f0677SFrederic Weisbecker {
1161280f0677SFrederic Weisbecker 	struct tick_sched *ts;
1162280f0677SFrederic Weisbecker 
1163ebf3adbaSFrederic Weisbecker 	lockdep_assert_irqs_enabled();
11640db49b72SLinus Torvalds 
11651268fbc7SFrederic Weisbecker 	local_irq_disable();
11661268fbc7SFrederic Weisbecker 
116722127e93SChristoph Lameter 	ts = this_cpu_ptr(&tick_cpu_sched);
116823a8d888SRafael J. Wysocki 
116923a8d888SRafael J. Wysocki 	WARN_ON_ONCE(ts->timer_expires_base);
117023a8d888SRafael J. Wysocki 
1171280f0677SFrederic Weisbecker 	ts->inidle = 1;
11720e776768SRafael J. Wysocki 	tick_nohz_start_idle(ts);
11731268fbc7SFrederic Weisbecker 
11741268fbc7SFrederic Weisbecker 	local_irq_enable();
1175280f0677SFrederic Weisbecker }
1176280f0677SFrederic Weisbecker 
1177280f0677SFrederic Weisbecker /**
1178280f0677SFrederic Weisbecker  * tick_nohz_irq_exit - update next tick event from interrupt exit
1179280f0677SFrederic Weisbecker  *
1180280f0677SFrederic Weisbecker  * When an interrupt fires while we are idle and it doesn't cause
1181280f0677SFrederic Weisbecker  * a reschedule, it may still add, modify or delete a timer, enqueue
1182280f0677SFrederic Weisbecker  * an RCU callback, etc...
1183280f0677SFrederic Weisbecker  * So we need to re-calculate and reprogram the next tick event.
1184280f0677SFrederic Weisbecker  */
tick_nohz_irq_exit(void)1185280f0677SFrederic Weisbecker void tick_nohz_irq_exit(void)
1186280f0677SFrederic Weisbecker {
118722127e93SChristoph Lameter 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1188280f0677SFrederic Weisbecker 
118914851912SRafael J. Wysocki 	if (ts->inidle)
11900e776768SRafael J. Wysocki 		tick_nohz_start_idle(ts);
119114851912SRafael J. Wysocki 	else
119273738a95SFrederic Weisbecker 		tick_nohz_full_update_tick(ts);
11935811d996SFrederic Weisbecker }
119479bf2bb3SThomas Gleixner 
119579bf2bb3SThomas Gleixner /**
119645f1ff59SRafael J. Wysocki  * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
11974f86d3a8SLen Brown  */
tick_nohz_idle_got_tick(void)119845f1ff59SRafael J. Wysocki bool tick_nohz_idle_got_tick(void)
11994f86d3a8SLen Brown {
120022127e93SChristoph Lameter 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
12014f86d3a8SLen Brown 
12022bc629a6SFrederic Weisbecker 	if (ts->got_idle_tick) {
12032bc629a6SFrederic Weisbecker 		ts->got_idle_tick = 0;
120445f1ff59SRafael J. Wysocki 		return true;
120545f1ff59SRafael J. Wysocki 	}
120645f1ff59SRafael J. Wysocki 	return false;
120745f1ff59SRafael J. Wysocki }
120845f1ff59SRafael J. Wysocki 
120945f1ff59SRafael J. Wysocki /**
12106f9b83acSUlf Hansson  * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
12116f9b83acSUlf Hansson  * or the tick, whatever that expires first. Note that, if the tick has been
12126f9b83acSUlf Hansson  * stopped, it returns the next hrtimer.
12136f9b83acSUlf Hansson  *
12146f9b83acSUlf Hansson  * Called from power state control code with interrupts disabled
12156f9b83acSUlf Hansson  */
tick_nohz_get_next_hrtimer(void)12166f9b83acSUlf Hansson ktime_t tick_nohz_get_next_hrtimer(void)
12176f9b83acSUlf Hansson {
12186f9b83acSUlf Hansson 	return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
12196f9b83acSUlf Hansson }
12206f9b83acSUlf Hansson 
12216f9b83acSUlf Hansson /**
1222554c8aa8SRafael J. Wysocki  * tick_nohz_get_sleep_length - return the expected length of the current sleep
1223296bb1e5SRafael J. Wysocki  * @delta_next: duration until the next event if the tick cannot be stopped
12244f86d3a8SLen Brown  *
12254c81cb7eSRafael J. Wysocki  * Called from power state control code with interrupts disabled.
12264c81cb7eSRafael J. Wysocki  *
12274c81cb7eSRafael J. Wysocki  * The return value of this function and/or the value returned by it through the
12284c81cb7eSRafael J. Wysocki  * @delta_next pointer can be negative which must be taken into account by its
12294c81cb7eSRafael J. Wysocki  * callers.
12304f86d3a8SLen Brown  */
tick_nohz_get_sleep_length(ktime_t * delta_next)1231296bb1e5SRafael J. Wysocki ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
12324f86d3a8SLen Brown {
1233554c8aa8SRafael J. Wysocki 	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
12344f86d3a8SLen Brown 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1235554c8aa8SRafael J. Wysocki 	int cpu = smp_processor_id();
1236554c8aa8SRafael J. Wysocki 	/*
1237554c8aa8SRafael J. Wysocki 	 * The idle entry time is expected to be a sufficient approximation of
1238554c8aa8SRafael J. Wysocki 	 * the current time at this point.
1239554c8aa8SRafael J. Wysocki 	 */
1240554c8aa8SRafael J. Wysocki 	ktime_t now = ts->idle_entrytime;
1241554c8aa8SRafael J. Wysocki 	ktime_t next_event;
12424f86d3a8SLen Brown 
1243554c8aa8SRafael J. Wysocki 	WARN_ON_ONCE(!ts->inidle);
1244554c8aa8SRafael J. Wysocki 
1245296bb1e5SRafael J. Wysocki 	*delta_next = ktime_sub(dev->next_event, now);
1246296bb1e5SRafael J. Wysocki 
1247554c8aa8SRafael J. Wysocki 	if (!can_stop_idle_tick(cpu, ts))
1248296bb1e5SRafael J. Wysocki 		return *delta_next;
1249554c8aa8SRafael J. Wysocki 
1250554c8aa8SRafael J. Wysocki 	next_event = tick_nohz_next_event(ts, cpu);
1251554c8aa8SRafael J. Wysocki 	if (!next_event)
1252296bb1e5SRafael J. Wysocki 		return *delta_next;
1253554c8aa8SRafael J. Wysocki 
1254554c8aa8SRafael J. Wysocki 	/*
1255554c8aa8SRafael J. Wysocki 	 * If the next highres timer to expire is earlier than next_event, the
1256554c8aa8SRafael J. Wysocki 	 * idle governor needs to know that.
1257554c8aa8SRafael J. Wysocki 	 */
1258554c8aa8SRafael J. Wysocki 	next_event = min_t(u64, next_event,
1259554c8aa8SRafael J. Wysocki 			   hrtimer_next_event_without(&ts->sched_timer));
1260554c8aa8SRafael J. Wysocki 
1261554c8aa8SRafael J. Wysocki 	return ktime_sub(next_event, now);
12624f86d3a8SLen Brown }
12634f86d3a8SLen Brown 
1264b7eaf1aaSRafael J. Wysocki /**
1265466a2b42SJoel Fernandes  * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1266466a2b42SJoel Fernandes  * for a particular CPU.
1267466a2b42SJoel Fernandes  *
1268466a2b42SJoel Fernandes  * Called from the schedutil frequency scaling governor in scheduler context.
1269466a2b42SJoel Fernandes  */
tick_nohz_get_idle_calls_cpu(int cpu)1270466a2b42SJoel Fernandes unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
1271466a2b42SJoel Fernandes {
1272466a2b42SJoel Fernandes 	struct tick_sched *ts = tick_get_tick_sched(cpu);
1273466a2b42SJoel Fernandes 
1274466a2b42SJoel Fernandes 	return ts->idle_calls;
1275466a2b42SJoel Fernandes }
1276466a2b42SJoel Fernandes 
1277466a2b42SJoel Fernandes /**
1278b7eaf1aaSRafael J. Wysocki  * tick_nohz_get_idle_calls - return the current idle calls counter value
1279b7eaf1aaSRafael J. Wysocki  *
1280b7eaf1aaSRafael J. Wysocki  * Called from the schedutil frequency scaling governor in scheduler context.
1281b7eaf1aaSRafael J. Wysocki  */
tick_nohz_get_idle_calls(void)1282b7eaf1aaSRafael J. Wysocki unsigned long tick_nohz_get_idle_calls(void)
1283b7eaf1aaSRafael J. Wysocki {
1284b7eaf1aaSRafael J. Wysocki 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1285b7eaf1aaSRafael J. Wysocki 
1286b7eaf1aaSRafael J. Wysocki 	return ts->idle_calls;
1287b7eaf1aaSRafael J. Wysocki }
1288b7eaf1aaSRafael J. Wysocki 
tick_nohz_account_idle_time(struct tick_sched * ts,ktime_t now)128996c9b903SYunfeng Ye static void tick_nohz_account_idle_time(struct tick_sched *ts,
129096c9b903SYunfeng Ye 					ktime_t now)
12912ac0d98fSFrederic Weisbecker {
12922ac0d98fSFrederic Weisbecker 	unsigned long ticks;
12933f4724eaSFrederic Weisbecker 
129496c9b903SYunfeng Ye 	ts->idle_exittime = now;
129596c9b903SYunfeng Ye 
1296e44fcb4bSFrederic Weisbecker 	if (vtime_accounting_enabled_this_cpu())
12973f4724eaSFrederic Weisbecker 		return;
129879bf2bb3SThomas Gleixner 	/*
129979bf2bb3SThomas Gleixner 	 * We stopped the tick in idle. Update process times would miss the
130079bf2bb3SThomas Gleixner 	 * time we slept as update_process_times does only a 1 tick
130179bf2bb3SThomas Gleixner 	 * accounting. Enforce that this is accounted to idle !
130279bf2bb3SThomas Gleixner 	 */
130379bf2bb3SThomas Gleixner 	ticks = jiffies - ts->idle_jiffies;
130479bf2bb3SThomas Gleixner 	/*
130579bf2bb3SThomas Gleixner 	 * We might be one off. Do not randomly account a huge number of ticks!
130679bf2bb3SThomas Gleixner 	 */
130779741dd3SMartin Schwidefsky 	if (ticks && ticks < LONG_MAX)
130879741dd3SMartin Schwidefsky 		account_idle_ticks(ticks);
13092aaf709aSRafael J. Wysocki }
13102aaf709aSRafael J. Wysocki 
tick_nohz_idle_restart_tick(void)13112aaf709aSRafael J. Wysocki void tick_nohz_idle_restart_tick(void)
13122aaf709aSRafael J. Wysocki {
13132aaf709aSRafael J. Wysocki 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
13142aaf709aSRafael J. Wysocki 
1315a5183862SYunfeng Ye 	if (ts->tick_stopped) {
131696c9b903SYunfeng Ye 		ktime_t now = ktime_get();
131796c9b903SYunfeng Ye 		tick_nohz_restart_sched_tick(ts, now);
131896c9b903SYunfeng Ye 		tick_nohz_account_idle_time(ts, now);
1319a5183862SYunfeng Ye 	}
1320a5183862SYunfeng Ye }
1321a5183862SYunfeng Ye 
tick_nohz_idle_update_tick(struct tick_sched * ts,ktime_t now)1322a5183862SYunfeng Ye static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now)
1323a5183862SYunfeng Ye {
1324a5183862SYunfeng Ye 	if (tick_nohz_full_cpu(smp_processor_id()))
1325a5183862SYunfeng Ye 		__tick_nohz_full_update_tick(ts, now);
1326a5183862SYunfeng Ye 	else
1327a5183862SYunfeng Ye 		tick_nohz_restart_sched_tick(ts, now);
1328a5183862SYunfeng Ye 
132996c9b903SYunfeng Ye 	tick_nohz_account_idle_time(ts, now);
13302aaf709aSRafael J. Wysocki }
13312aaf709aSRafael J. Wysocki 
133279bf2bb3SThomas Gleixner /**
133379bf2bb3SThomas Gleixner  * tick_nohz_idle_exit - restart the idle tick from the idle task
133479bf2bb3SThomas Gleixner  *
133579bf2bb3SThomas Gleixner  * Restart the idle tick when the CPU is woken up from idle
133679bf2bb3SThomas Gleixner  * This also exit the RCU extended quiescent state. The CPU
133779bf2bb3SThomas Gleixner  * can use RCU again after this function is called.
133879bf2bb3SThomas Gleixner  */
tick_nohz_idle_exit(void)133979bf2bb3SThomas Gleixner void tick_nohz_idle_exit(void)
134079bf2bb3SThomas Gleixner {
13414a32fea9SChristoph Lameter 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1342bbe9a70aSArnd Bergmann 	bool idle_active, tick_stopped;
134379bf2bb3SThomas Gleixner 	ktime_t now;
134479bf2bb3SThomas Gleixner 
134579bf2bb3SThomas Gleixner 	local_irq_disable();
134679bf2bb3SThomas Gleixner 
134779bf2bb3SThomas Gleixner 	WARN_ON_ONCE(!ts->inidle);
134823a8d888SRafael J. Wysocki 	WARN_ON_ONCE(ts->timer_expires_base);
134979bf2bb3SThomas Gleixner 
135079bf2bb3SThomas Gleixner 	ts->inidle = 0;
1351bbe9a70aSArnd Bergmann 	idle_active = ts->idle_active;
1352bbe9a70aSArnd Bergmann 	tick_stopped = ts->tick_stopped;
135379bf2bb3SThomas Gleixner 
1354bbe9a70aSArnd Bergmann 	if (idle_active || tick_stopped)
135579bf2bb3SThomas Gleixner 		now = ktime_get();
135679bf2bb3SThomas Gleixner 
1357bbe9a70aSArnd Bergmann 	if (idle_active)
1358e8fcaa5cSFrederic Weisbecker 		tick_nohz_stop_idle(ts, now);
135979bf2bb3SThomas Gleixner 
1360bbe9a70aSArnd Bergmann 	if (tick_stopped)
1361a5183862SYunfeng Ye 		tick_nohz_idle_update_tick(ts, now);
136279bf2bb3SThomas Gleixner 
136379bf2bb3SThomas Gleixner 	local_irq_enable();
136479bf2bb3SThomas Gleixner }
136579bf2bb3SThomas Gleixner 
136679bf2bb3SThomas Gleixner /*
136779bf2bb3SThomas Gleixner  * The nohz low res interrupt handler
136879bf2bb3SThomas Gleixner  */
tick_nohz_handler(struct clock_event_device * dev)136979bf2bb3SThomas Gleixner static void tick_nohz_handler(struct clock_event_device *dev)
137079bf2bb3SThomas Gleixner {
137122127e93SChristoph Lameter 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
137279bf2bb3SThomas Gleixner 	struct pt_regs *regs = get_irq_regs();
137379bf2bb3SThomas Gleixner 	ktime_t now = ktime_get();
137479bf2bb3SThomas Gleixner 
13752456e855SThomas Gleixner 	dev->next_event = KTIME_MAX;
137679bf2bb3SThomas Gleixner 
1377ff7de620SRafael J. Wysocki 	tick_sched_do_timer(ts, now);
13789e8f559bSFrederic Weisbecker 	tick_sched_handle(ts, regs);
137979bf2bb3SThomas Gleixner 
138062c1256dSNicholas Piggin 	if (unlikely(ts->tick_stopped)) {
138162c1256dSNicholas Piggin 		/*
138262c1256dSNicholas Piggin 		 * The clockevent device is not reprogrammed, so change the
138362c1256dSNicholas Piggin 		 * clock event device to ONESHOT_STOPPED to avoid spurious
138462c1256dSNicholas Piggin 		 * interrupts on devices which might not be truly one shot.
138562c1256dSNicholas Piggin 		 */
138662c1256dSNicholas Piggin 		tick_program_event(KTIME_MAX, 1);
1387b5e995e6SViresh Kumar 		return;
138862c1256dSNicholas Piggin 	}
1389b5e995e6SViresh Kumar 
1390b9965449SThomas Gleixner 	hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
13910ff53d09SThomas Gleixner 	tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
139279bf2bb3SThomas Gleixner }
139379bf2bb3SThomas Gleixner 
tick_nohz_activate(struct tick_sched * ts,int mode)1394bc7a34b8SThomas Gleixner static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
1395bc7a34b8SThomas Gleixner {
1396bc7a34b8SThomas Gleixner 	if (!tick_nohz_enabled)
1397bc7a34b8SThomas Gleixner 		return;
1398bc7a34b8SThomas Gleixner 	ts->nohz_mode = mode;
1399bc7a34b8SThomas Gleixner 	/* One update is enough */
1400bc7a34b8SThomas Gleixner 	if (!test_and_set_bit(0, &tick_nohz_active))
1401ae67badaSThomas Gleixner 		timers_update_nohz();
1402bc7a34b8SThomas Gleixner }
1403bc7a34b8SThomas Gleixner 
140479bf2bb3SThomas Gleixner /**
140579bf2bb3SThomas Gleixner  * tick_nohz_switch_to_nohz - switch to nohz mode
140679bf2bb3SThomas Gleixner  */
tick_nohz_switch_to_nohz(void)140779bf2bb3SThomas Gleixner static void tick_nohz_switch_to_nohz(void)
140879bf2bb3SThomas Gleixner {
140922127e93SChristoph Lameter 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
141079bf2bb3SThomas Gleixner 	ktime_t next;
141179bf2bb3SThomas Gleixner 
141227630532SViresh Kumar 	if (!tick_nohz_enabled)
141379bf2bb3SThomas Gleixner 		return;
141479bf2bb3SThomas Gleixner 
14156b442bc8SThomas Gleixner 	if (tick_switch_to_oneshot(tick_nohz_handler))
141679bf2bb3SThomas Gleixner 		return;
14176b442bc8SThomas Gleixner 
141879bf2bb3SThomas Gleixner 	/*
141979bf2bb3SThomas Gleixner 	 * Recycle the hrtimer in ts, so we can share the
142079bf2bb3SThomas Gleixner 	 * hrtimer_forward with the highres code.
142179bf2bb3SThomas Gleixner 	 */
142271fed982SSebastian Andrzej Siewior 	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
142379bf2bb3SThomas Gleixner 	/* Get the next period */
142479bf2bb3SThomas Gleixner 	next = tick_init_jiffy_update();
142579bf2bb3SThomas Gleixner 
1426cc584b21SArjan van de Ven 	hrtimer_set_expires(&ts->sched_timer, next);
1427b9965449SThomas Gleixner 	hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
14281ca8ec53SWanpeng Li 	tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1429bc7a34b8SThomas Gleixner 	tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
143079bf2bb3SThomas Gleixner }
143179bf2bb3SThomas Gleixner 
tick_nohz_irq_enter(void)14325acac1beSFrederic Weisbecker static inline void tick_nohz_irq_enter(void)
1433eed3b9cfSMartin Schwidefsky {
14344a32fea9SChristoph Lameter 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1435eed3b9cfSMartin Schwidefsky 	ktime_t now;
1436eed3b9cfSMartin Schwidefsky 
1437eed3b9cfSMartin Schwidefsky 	if (!ts->idle_active && !ts->tick_stopped)
1438eed3b9cfSMartin Schwidefsky 		return;
1439eed3b9cfSMartin Schwidefsky 	now = ktime_get();
1440eed3b9cfSMartin Schwidefsky 	if (ts->idle_active)
1441e8fcaa5cSFrederic Weisbecker 		tick_nohz_stop_idle(ts, now);
144253e87e3cSFrederic Weisbecker 	/*
144353e87e3cSFrederic Weisbecker 	 * If all CPUs are idle. We may need to update a stale jiffies value.
144453e87e3cSFrederic Weisbecker 	 * Note nohz_full is a special case: a timekeeper is guaranteed to stay
144553e87e3cSFrederic Weisbecker 	 * alive but it might be busy looping with interrupts disabled in some
144653e87e3cSFrederic Weisbecker 	 * rare case (typically stop machine). So we must make sure we have a
144753e87e3cSFrederic Weisbecker 	 * last resort.
144853e87e3cSFrederic Weisbecker 	 */
1449ff006732SThomas Gleixner 	if (ts->tick_stopped)
1450eed3b9cfSMartin Schwidefsky 		tick_nohz_update_jiffies(now);
1451eed3b9cfSMartin Schwidefsky }
1452eed3b9cfSMartin Schwidefsky 
145379bf2bb3SThomas Gleixner #else
145479bf2bb3SThomas Gleixner 
tick_nohz_switch_to_nohz(void)145579bf2bb3SThomas Gleixner static inline void tick_nohz_switch_to_nohz(void) { }
tick_nohz_irq_enter(void)14565acac1beSFrederic Weisbecker static inline void tick_nohz_irq_enter(void) { }
tick_nohz_activate(struct tick_sched * ts,int mode)1457bc7a34b8SThomas Gleixner static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { }
145879bf2bb3SThomas Gleixner 
14593451d024SFrederic Weisbecker #endif /* CONFIG_NO_HZ_COMMON */
146079bf2bb3SThomas Gleixner 
146179bf2bb3SThomas Gleixner /*
1462719254faSThomas Gleixner  * Called from irq_enter to notify about the possible interruption of idle()
1463719254faSThomas Gleixner  */
tick_irq_enter(void)14645acac1beSFrederic Weisbecker void tick_irq_enter(void)
1465719254faSThomas Gleixner {
1466e8fcaa5cSFrederic Weisbecker 	tick_check_oneshot_broadcast_this_cpu();
14675acac1beSFrederic Weisbecker 	tick_nohz_irq_enter();
1468719254faSThomas Gleixner }
1469719254faSThomas Gleixner 
1470719254faSThomas Gleixner /*
147179bf2bb3SThomas Gleixner  * High resolution timer specific code
147279bf2bb3SThomas Gleixner  */
147379bf2bb3SThomas Gleixner #ifdef CONFIG_HIGH_RES_TIMERS
147479bf2bb3SThomas Gleixner /*
14754c9dc641SPavel Machek  * We rearm the timer until we get disabled by the idle code.
1476351f181fSChuansheng Liu  * Called with interrupts disabled.
147779bf2bb3SThomas Gleixner  */
tick_sched_timer(struct hrtimer * timer)147879bf2bb3SThomas Gleixner static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
147979bf2bb3SThomas Gleixner {
148079bf2bb3SThomas Gleixner 	struct tick_sched *ts =
148179bf2bb3SThomas Gleixner 		container_of(timer, struct tick_sched, sched_timer);
148279bf2bb3SThomas Gleixner 	struct pt_regs *regs = get_irq_regs();
148379bf2bb3SThomas Gleixner 	ktime_t now = ktime_get();
1484d3ed7824SThomas Gleixner 
1485ff7de620SRafael J. Wysocki 	tick_sched_do_timer(ts, now);
148679bf2bb3SThomas Gleixner 
148779bf2bb3SThomas Gleixner 	/*
148879bf2bb3SThomas Gleixner 	 * Do not call, when we are not in irq context and have
148979bf2bb3SThomas Gleixner 	 * no valid regs pointer
149079bf2bb3SThomas Gleixner 	 */
14919e8f559bSFrederic Weisbecker 	if (regs)
14929e8f559bSFrederic Weisbecker 		tick_sched_handle(ts, regs);
14937c259045SFrederic Weisbecker 	else
14947c259045SFrederic Weisbecker 		ts->next_tick = 0;
149579bf2bb3SThomas Gleixner 
14962a16fc93SViresh Kumar 	/* No need to reprogram if we are in idle or full dynticks mode */
14972a16fc93SViresh Kumar 	if (unlikely(ts->tick_stopped))
14982a16fc93SViresh Kumar 		return HRTIMER_NORESTART;
14992a16fc93SViresh Kumar 
1500b9965449SThomas Gleixner 	hrtimer_forward(timer, now, TICK_NSEC);
150179bf2bb3SThomas Gleixner 
150279bf2bb3SThomas Gleixner 	return HRTIMER_RESTART;
150379bf2bb3SThomas Gleixner }
150479bf2bb3SThomas Gleixner 
15055307c955SMike Galbraith static int sched_skew_tick;
15065307c955SMike Galbraith 
skew_tick(char * str)150762cf20b3SThomas Gleixner static int __init skew_tick(char *str)
150862cf20b3SThomas Gleixner {
150962cf20b3SThomas Gleixner 	get_option(&str, &sched_skew_tick);
151062cf20b3SThomas Gleixner 
151162cf20b3SThomas Gleixner 	return 0;
151262cf20b3SThomas Gleixner }
151362cf20b3SThomas Gleixner early_param("skew_tick", skew_tick);
151462cf20b3SThomas Gleixner 
151579bf2bb3SThomas Gleixner /**
151679bf2bb3SThomas Gleixner  * tick_setup_sched_timer - setup the tick emulation timer
151779bf2bb3SThomas Gleixner  */
tick_setup_sched_timer(void)151879bf2bb3SThomas Gleixner void tick_setup_sched_timer(void)
151979bf2bb3SThomas Gleixner {
152022127e93SChristoph Lameter 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
152179bf2bb3SThomas Gleixner 	ktime_t now = ktime_get();
152279bf2bb3SThomas Gleixner 
152379bf2bb3SThomas Gleixner 	/*
152479bf2bb3SThomas Gleixner 	 * Emulate tick processing via per-CPU hrtimers:
152579bf2bb3SThomas Gleixner 	 */
1526902a9f9cSSebastian Andrzej Siewior 	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
152779bf2bb3SThomas Gleixner 	ts->sched_timer.function = tick_sched_timer;
152879bf2bb3SThomas Gleixner 
15290de7611aSIngo Molnar 	/* Get the next period (per-CPU) */
1530cc584b21SArjan van de Ven 	hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
153179bf2bb3SThomas Gleixner 
15329c3f9e28SThomas Gleixner 	/* Offset the tick to avert jiffies_lock contention. */
15335307c955SMike Galbraith 	if (sched_skew_tick) {
1534b9965449SThomas Gleixner 		u64 offset = TICK_NSEC >> 1;
15355307c955SMike Galbraith 		do_div(offset, num_possible_cpus());
15365307c955SMike Galbraith 		offset *= smp_processor_id();
15375307c955SMike Galbraith 		hrtimer_add_expires_ns(&ts->sched_timer, offset);
15385307c955SMike Galbraith 	}
15395307c955SMike Galbraith 
1540b9965449SThomas Gleixner 	hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
1541902a9f9cSSebastian Andrzej Siewior 	hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
1542bc7a34b8SThomas Gleixner 	tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
154379bf2bb3SThomas Gleixner }
15443c4fbe5eSMiao Xie #endif /* HIGH_RES_TIMERS */
154579bf2bb3SThomas Gleixner 
15463451d024SFrederic Weisbecker #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
tick_cancel_sched_timer(int cpu)154779bf2bb3SThomas Gleixner void tick_cancel_sched_timer(int cpu)
154879bf2bb3SThomas Gleixner {
154979bf2bb3SThomas Gleixner 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
15507fc3dd35SHeiko Carstens 	ktime_t idle_sleeptime, iowait_sleeptime;
1551*500ad5daSTim Chen 	unsigned long idle_calls, idle_sleeps;
155279bf2bb3SThomas Gleixner 
15533c4fbe5eSMiao Xie # ifdef CONFIG_HIGH_RES_TIMERS
155479bf2bb3SThomas Gleixner 	if (ts->sched_timer.base)
155579bf2bb3SThomas Gleixner 		hrtimer_cancel(&ts->sched_timer);
15563c4fbe5eSMiao Xie # endif
1557a7901766SKarsten Wiese 
15587fc3dd35SHeiko Carstens 	idle_sleeptime = ts->idle_sleeptime;
15597fc3dd35SHeiko Carstens 	iowait_sleeptime = ts->iowait_sleeptime;
1560*500ad5daSTim Chen 	idle_calls = ts->idle_calls;
1561*500ad5daSTim Chen 	idle_sleeps = ts->idle_sleeps;
15624b0c0f29SThomas Gleixner 	memset(ts, 0, sizeof(*ts));
15637fc3dd35SHeiko Carstens 	ts->idle_sleeptime = idle_sleeptime;
15647fc3dd35SHeiko Carstens 	ts->iowait_sleeptime = iowait_sleeptime;
1565*500ad5daSTim Chen 	ts->idle_calls = idle_calls;
1566*500ad5daSTim Chen 	ts->idle_sleeps = idle_sleeps;
156779bf2bb3SThomas Gleixner }
15683c4fbe5eSMiao Xie #endif
156979bf2bb3SThomas Gleixner 
15709c95bc25SJiapeng Chong /*
157179bf2bb3SThomas Gleixner  * Async notification about clocksource changes
157279bf2bb3SThomas Gleixner  */
tick_clock_notify(void)157379bf2bb3SThomas Gleixner void tick_clock_notify(void)
157479bf2bb3SThomas Gleixner {
157579bf2bb3SThomas Gleixner 	int cpu;
157679bf2bb3SThomas Gleixner 
157779bf2bb3SThomas Gleixner 	for_each_possible_cpu(cpu)
157879bf2bb3SThomas Gleixner 		set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
157979bf2bb3SThomas Gleixner }
158079bf2bb3SThomas Gleixner 
158179bf2bb3SThomas Gleixner /*
158279bf2bb3SThomas Gleixner  * Async notification about clock event changes
158379bf2bb3SThomas Gleixner  */
tick_oneshot_notify(void)158479bf2bb3SThomas Gleixner void tick_oneshot_notify(void)
158579bf2bb3SThomas Gleixner {
158622127e93SChristoph Lameter 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
158779bf2bb3SThomas Gleixner 
158879bf2bb3SThomas Gleixner 	set_bit(0, &ts->check_clocks);
158979bf2bb3SThomas Gleixner }
159079bf2bb3SThomas Gleixner 
15919c95bc25SJiapeng Chong /*
159279bf2bb3SThomas Gleixner  * Check, if a change happened, which makes oneshot possible.
159379bf2bb3SThomas Gleixner  *
159479bf2bb3SThomas Gleixner  * Called cyclic from the hrtimer softirq (driven by the timer
159579bf2bb3SThomas Gleixner  * softirq) allow_nohz signals, that we can switch into low-res nohz
159679bf2bb3SThomas Gleixner  * mode, because high resolution timers are disabled (either compile
15976b442bc8SThomas Gleixner  * or runtime). Called with interrupts disabled.
159879bf2bb3SThomas Gleixner  */
tick_check_oneshot_change(int allow_nohz)159979bf2bb3SThomas Gleixner int tick_check_oneshot_change(int allow_nohz)
160079bf2bb3SThomas Gleixner {
160122127e93SChristoph Lameter 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
160279bf2bb3SThomas Gleixner 
160379bf2bb3SThomas Gleixner 	if (!test_and_clear_bit(0, &ts->check_clocks))
160479bf2bb3SThomas Gleixner 		return 0;
160579bf2bb3SThomas Gleixner 
160679bf2bb3SThomas Gleixner 	if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
160779bf2bb3SThomas Gleixner 		return 0;
160879bf2bb3SThomas Gleixner 
1609cf4fc6cbSLi Zefan 	if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
161079bf2bb3SThomas Gleixner 		return 0;
161179bf2bb3SThomas Gleixner 
161279bf2bb3SThomas Gleixner 	if (!allow_nohz)
161379bf2bb3SThomas Gleixner 		return 1;
161479bf2bb3SThomas Gleixner 
161579bf2bb3SThomas Gleixner 	tick_nohz_switch_to_nohz();
161679bf2bb3SThomas Gleixner 	return 0;
161779bf2bb3SThomas Gleixner }
1618