xref: /openbmc/linux/kernel/time/tick-sched.c (revision db7b464d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4  *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5  *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
6  *
7  *  No idle tick implementation for low and high resolution timers
8  *
9  *  Started by: Thomas Gleixner and Ingo Molnar
10  */
11 #include <linux/cpu.h>
12 #include <linux/err.h>
13 #include <linux/hrtimer.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/percpu.h>
17 #include <linux/nmi.h>
18 #include <linux/profile.h>
19 #include <linux/sched/signal.h>
20 #include <linux/sched/clock.h>
21 #include <linux/sched/stat.h>
22 #include <linux/sched/nohz.h>
23 #include <linux/sched/loadavg.h>
24 #include <linux/module.h>
25 #include <linux/irq_work.h>
26 #include <linux/posix-timers.h>
27 #include <linux/context_tracking.h>
28 #include <linux/mm.h>
29 
30 #include <asm/irq_regs.h>
31 
32 #include "tick-internal.h"
33 
34 #include <trace/events/timer.h>
35 
36 /*
37  * Per-CPU nohz control structure
38  */
39 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
40 
41 struct tick_sched *tick_get_tick_sched(int cpu)
42 {
43 	return &per_cpu(tick_cpu_sched, cpu);
44 }
45 
46 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
47 /*
48  * The time, when the last jiffy update happened. Write access must hold
49  * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a
50  * consistent view of jiffies and last_jiffies_update.
51  */
52 static ktime_t last_jiffies_update;
53 
54 /*
55  * Must be called with interrupts disabled !
56  */
57 static void tick_do_update_jiffies64(ktime_t now)
58 {
59 	unsigned long ticks = 1;
60 	ktime_t delta, nextp;
61 
62 	/*
63 	 * 64bit can do a quick check without holding jiffies lock and
64 	 * without looking at the sequence count. The smp_load_acquire()
65 	 * pairs with the update done later in this function.
66 	 *
67 	 * 32bit cannot do that because the store of tick_next_period
68 	 * consists of two 32bit stores and the first store could move it
69 	 * to a random point in the future.
70 	 */
71 	if (IS_ENABLED(CONFIG_64BIT)) {
72 		if (ktime_before(now, smp_load_acquire(&tick_next_period)))
73 			return;
74 	} else {
75 		unsigned int seq;
76 
77 		/*
78 		 * Avoid contention on jiffies_lock and protect the quick
79 		 * check with the sequence count.
80 		 */
81 		do {
82 			seq = read_seqcount_begin(&jiffies_seq);
83 			nextp = tick_next_period;
84 		} while (read_seqcount_retry(&jiffies_seq, seq));
85 
86 		if (ktime_before(now, nextp))
87 			return;
88 	}
89 
90 	/* Quick check failed, i.e. update is required. */
91 	raw_spin_lock(&jiffies_lock);
92 	/*
93 	 * Reevaluate with the lock held. Another CPU might have done the
94 	 * update already.
95 	 */
96 	if (ktime_before(now, tick_next_period)) {
97 		raw_spin_unlock(&jiffies_lock);
98 		return;
99 	}
100 
101 	write_seqcount_begin(&jiffies_seq);
102 
103 	delta = ktime_sub(now, tick_next_period);
104 	if (unlikely(delta >= TICK_NSEC)) {
105 		/* Slow path for long idle sleep times */
106 		s64 incr = TICK_NSEC;
107 
108 		ticks += ktime_divns(delta, incr);
109 
110 		last_jiffies_update = ktime_add_ns(last_jiffies_update,
111 						   incr * ticks);
112 	} else {
113 		last_jiffies_update = ktime_add_ns(last_jiffies_update,
114 						   TICK_NSEC);
115 	}
116 
117 	/* Advance jiffies to complete the jiffies_seq protected job */
118 	jiffies_64 += ticks;
119 
120 	/*
121 	 * Keep the tick_next_period variable up to date.
122 	 */
123 	nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC);
124 
125 	if (IS_ENABLED(CONFIG_64BIT)) {
126 		/*
127 		 * Pairs with smp_load_acquire() in the lockless quick
128 		 * check above and ensures that the update to jiffies_64 is
129 		 * not reordered vs. the store to tick_next_period, neither
130 		 * by the compiler nor by the CPU.
131 		 */
132 		smp_store_release(&tick_next_period, nextp);
133 	} else {
134 		/*
135 		 * A plain store is good enough on 32bit as the quick check
136 		 * above is protected by the sequence count.
137 		 */
138 		tick_next_period = nextp;
139 	}
140 
141 	/*
142 	 * Release the sequence count. calc_global_load() below is not
143 	 * protected by it, but jiffies_lock needs to be held to prevent
144 	 * concurrent invocations.
145 	 */
146 	write_seqcount_end(&jiffies_seq);
147 
148 	calc_global_load();
149 
150 	raw_spin_unlock(&jiffies_lock);
151 	update_wall_time();
152 }
153 
154 /*
155  * Initialize and return retrieve the jiffies update.
156  */
157 static ktime_t tick_init_jiffy_update(void)
158 {
159 	ktime_t period;
160 
161 	raw_spin_lock(&jiffies_lock);
162 	write_seqcount_begin(&jiffies_seq);
163 	/* Did we start the jiffies update yet ? */
164 	if (last_jiffies_update == 0)
165 		last_jiffies_update = tick_next_period;
166 	period = last_jiffies_update;
167 	write_seqcount_end(&jiffies_seq);
168 	raw_spin_unlock(&jiffies_lock);
169 	return period;
170 }
171 
172 #define MAX_STALLED_JIFFIES 5
173 
174 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
175 {
176 	int cpu = smp_processor_id();
177 
178 #ifdef CONFIG_NO_HZ_COMMON
179 	/*
180 	 * Check if the do_timer duty was dropped. We don't care about
181 	 * concurrency: This happens only when the CPU in charge went
182 	 * into a long sleep. If two CPUs happen to assign themselves to
183 	 * this duty, then the jiffies update is still serialized by
184 	 * jiffies_lock.
185 	 *
186 	 * If nohz_full is enabled, this should not happen because the
187 	 * tick_do_timer_cpu never relinquishes.
188 	 */
189 	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
190 #ifdef CONFIG_NO_HZ_FULL
191 		WARN_ON_ONCE(tick_nohz_full_running);
192 #endif
193 		tick_do_timer_cpu = cpu;
194 	}
195 #endif
196 
197 	/* Check, if the jiffies need an update */
198 	if (tick_do_timer_cpu == cpu)
199 		tick_do_update_jiffies64(now);
200 
201 	/*
202 	 * If jiffies update stalled for too long (timekeeper in stop_machine()
203 	 * or VMEXIT'ed for several msecs), force an update.
204 	 */
205 	if (ts->last_tick_jiffies != jiffies) {
206 		ts->stalled_jiffies = 0;
207 		ts->last_tick_jiffies = READ_ONCE(jiffies);
208 	} else {
209 		if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) {
210 			tick_do_update_jiffies64(now);
211 			ts->stalled_jiffies = 0;
212 			ts->last_tick_jiffies = READ_ONCE(jiffies);
213 		}
214 	}
215 
216 	if (ts->inidle)
217 		ts->got_idle_tick = 1;
218 }
219 
220 static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
221 {
222 #ifdef CONFIG_NO_HZ_COMMON
223 	/*
224 	 * When we are idle and the tick is stopped, we have to touch
225 	 * the watchdog as we might not schedule for a really long
226 	 * time. This happens on complete idle SMP systems while
227 	 * waiting on the login prompt. We also increment the "start of
228 	 * idle" jiffy stamp so the idle accounting adjustment we do
229 	 * when we go busy again does not account too much ticks.
230 	 */
231 	if (ts->tick_stopped) {
232 		touch_softlockup_watchdog_sched();
233 		if (is_idle_task(current))
234 			ts->idle_jiffies++;
235 		/*
236 		 * In case the current tick fired too early past its expected
237 		 * expiration, make sure we don't bypass the next clock reprogramming
238 		 * to the same deadline.
239 		 */
240 		ts->next_tick = 0;
241 	}
242 #endif
243 	update_process_times(user_mode(regs));
244 	profile_tick(CPU_PROFILING);
245 }
246 #endif
247 
248 #ifdef CONFIG_NO_HZ_FULL
249 cpumask_var_t tick_nohz_full_mask;
250 EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
251 bool tick_nohz_full_running;
252 EXPORT_SYMBOL_GPL(tick_nohz_full_running);
253 static atomic_t tick_dep_mask;
254 
255 static bool check_tick_dependency(atomic_t *dep)
256 {
257 	int val = atomic_read(dep);
258 
259 	if (val & TICK_DEP_MASK_POSIX_TIMER) {
260 		trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
261 		return true;
262 	}
263 
264 	if (val & TICK_DEP_MASK_PERF_EVENTS) {
265 		trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
266 		return true;
267 	}
268 
269 	if (val & TICK_DEP_MASK_SCHED) {
270 		trace_tick_stop(0, TICK_DEP_MASK_SCHED);
271 		return true;
272 	}
273 
274 	if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
275 		trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
276 		return true;
277 	}
278 
279 	if (val & TICK_DEP_MASK_RCU) {
280 		trace_tick_stop(0, TICK_DEP_MASK_RCU);
281 		return true;
282 	}
283 
284 	if (val & TICK_DEP_MASK_RCU_EXP) {
285 		trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP);
286 		return true;
287 	}
288 
289 	return false;
290 }
291 
292 static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
293 {
294 	lockdep_assert_irqs_disabled();
295 
296 	if (unlikely(!cpu_online(cpu)))
297 		return false;
298 
299 	if (check_tick_dependency(&tick_dep_mask))
300 		return false;
301 
302 	if (check_tick_dependency(&ts->tick_dep_mask))
303 		return false;
304 
305 	if (check_tick_dependency(&current->tick_dep_mask))
306 		return false;
307 
308 	if (check_tick_dependency(&current->signal->tick_dep_mask))
309 		return false;
310 
311 	return true;
312 }
313 
314 static void nohz_full_kick_func(struct irq_work *work)
315 {
316 	/* Empty, the tick restart happens on tick_nohz_irq_exit() */
317 }
318 
319 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) =
320 	IRQ_WORK_INIT_HARD(nohz_full_kick_func);
321 
322 /*
323  * Kick this CPU if it's full dynticks in order to force it to
324  * re-evaluate its dependency on the tick and restart it if necessary.
325  * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
326  * is NMI safe.
327  */
328 static void tick_nohz_full_kick(void)
329 {
330 	if (!tick_nohz_full_cpu(smp_processor_id()))
331 		return;
332 
333 	irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
334 }
335 
336 /*
337  * Kick the CPU if it's full dynticks in order to force it to
338  * re-evaluate its dependency on the tick and restart it if necessary.
339  */
340 void tick_nohz_full_kick_cpu(int cpu)
341 {
342 	if (!tick_nohz_full_cpu(cpu))
343 		return;
344 
345 	irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
346 }
347 
348 static void tick_nohz_kick_task(struct task_struct *tsk)
349 {
350 	int cpu;
351 
352 	/*
353 	 * If the task is not running, run_posix_cpu_timers()
354 	 * has nothing to elapse, IPI can then be spared.
355 	 *
356 	 * activate_task()                      STORE p->tick_dep_mask
357 	 *   STORE p->on_rq
358 	 * __schedule() (switch to task 'p')    smp_mb() (atomic_fetch_or())
359 	 *   LOCK rq->lock                      LOAD p->on_rq
360 	 *   smp_mb__after_spin_lock()
361 	 *   tick_nohz_task_switch()
362 	 *     LOAD p->tick_dep_mask
363 	 */
364 	if (!sched_task_on_rq(tsk))
365 		return;
366 
367 	/*
368 	 * If the task concurrently migrates to another CPU,
369 	 * we guarantee it sees the new tick dependency upon
370 	 * schedule.
371 	 *
372 	 * set_task_cpu(p, cpu);
373 	 *   STORE p->cpu = @cpu
374 	 * __schedule() (switch to task 'p')
375 	 *   LOCK rq->lock
376 	 *   smp_mb__after_spin_lock()          STORE p->tick_dep_mask
377 	 *   tick_nohz_task_switch()            smp_mb() (atomic_fetch_or())
378 	 *      LOAD p->tick_dep_mask           LOAD p->cpu
379 	 */
380 	cpu = task_cpu(tsk);
381 
382 	preempt_disable();
383 	if (cpu_online(cpu))
384 		tick_nohz_full_kick_cpu(cpu);
385 	preempt_enable();
386 }
387 
388 /*
389  * Kick all full dynticks CPUs in order to force these to re-evaluate
390  * their dependency on the tick and restart it if necessary.
391  */
392 static void tick_nohz_full_kick_all(void)
393 {
394 	int cpu;
395 
396 	if (!tick_nohz_full_running)
397 		return;
398 
399 	preempt_disable();
400 	for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
401 		tick_nohz_full_kick_cpu(cpu);
402 	preempt_enable();
403 }
404 
405 static void tick_nohz_dep_set_all(atomic_t *dep,
406 				  enum tick_dep_bits bit)
407 {
408 	int prev;
409 
410 	prev = atomic_fetch_or(BIT(bit), dep);
411 	if (!prev)
412 		tick_nohz_full_kick_all();
413 }
414 
415 /*
416  * Set a global tick dependency. Used by perf events that rely on freq and
417  * by unstable clock.
418  */
419 void tick_nohz_dep_set(enum tick_dep_bits bit)
420 {
421 	tick_nohz_dep_set_all(&tick_dep_mask, bit);
422 }
423 
424 void tick_nohz_dep_clear(enum tick_dep_bits bit)
425 {
426 	atomic_andnot(BIT(bit), &tick_dep_mask);
427 }
428 
429 /*
430  * Set per-CPU tick dependency. Used by scheduler and perf events in order to
431  * manage events throttling.
432  */
433 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
434 {
435 	int prev;
436 	struct tick_sched *ts;
437 
438 	ts = per_cpu_ptr(&tick_cpu_sched, cpu);
439 
440 	prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
441 	if (!prev) {
442 		preempt_disable();
443 		/* Perf needs local kick that is NMI safe */
444 		if (cpu == smp_processor_id()) {
445 			tick_nohz_full_kick();
446 		} else {
447 			/* Remote irq work not NMI-safe */
448 			if (!WARN_ON_ONCE(in_nmi()))
449 				tick_nohz_full_kick_cpu(cpu);
450 		}
451 		preempt_enable();
452 	}
453 }
454 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu);
455 
456 void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
457 {
458 	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
459 
460 	atomic_andnot(BIT(bit), &ts->tick_dep_mask);
461 }
462 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
463 
464 /*
465  * Set a per-task tick dependency. RCU need this. Also posix CPU timers
466  * in order to elapse per task timers.
467  */
468 void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
469 {
470 	if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask))
471 		tick_nohz_kick_task(tsk);
472 }
473 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task);
474 
475 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
476 {
477 	atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
478 }
479 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task);
480 
481 /*
482  * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
483  * per process timers.
484  */
485 void tick_nohz_dep_set_signal(struct task_struct *tsk,
486 			      enum tick_dep_bits bit)
487 {
488 	int prev;
489 	struct signal_struct *sig = tsk->signal;
490 
491 	prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask);
492 	if (!prev) {
493 		struct task_struct *t;
494 
495 		lockdep_assert_held(&tsk->sighand->siglock);
496 		__for_each_thread(sig, t)
497 			tick_nohz_kick_task(t);
498 	}
499 }
500 
501 void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
502 {
503 	atomic_andnot(BIT(bit), &sig->tick_dep_mask);
504 }
505 
506 /*
507  * Re-evaluate the need for the tick as we switch the current task.
508  * It might need the tick due to per task/process properties:
509  * perf events, posix CPU timers, ...
510  */
511 void __tick_nohz_task_switch(void)
512 {
513 	struct tick_sched *ts;
514 
515 	if (!tick_nohz_full_cpu(smp_processor_id()))
516 		return;
517 
518 	ts = this_cpu_ptr(&tick_cpu_sched);
519 
520 	if (ts->tick_stopped) {
521 		if (atomic_read(&current->tick_dep_mask) ||
522 		    atomic_read(&current->signal->tick_dep_mask))
523 			tick_nohz_full_kick();
524 	}
525 }
526 
527 /* Get the boot-time nohz CPU list from the kernel parameters. */
528 void __init tick_nohz_full_setup(cpumask_var_t cpumask)
529 {
530 	alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
531 	cpumask_copy(tick_nohz_full_mask, cpumask);
532 	tick_nohz_full_running = true;
533 }
534 
535 bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
536 {
537 	/*
538 	 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound
539 	 * timers, workqueues, timekeeping, ...) on behalf of full dynticks
540 	 * CPUs. It must remain online when nohz full is enabled.
541 	 */
542 	if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
543 		return false;
544 	return true;
545 }
546 
547 static int tick_nohz_cpu_down(unsigned int cpu)
548 {
549 	return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
550 }
551 
552 void __init tick_nohz_init(void)
553 {
554 	int cpu, ret;
555 
556 	if (!tick_nohz_full_running)
557 		return;
558 
559 	/*
560 	 * Full dynticks uses irq work to drive the tick rescheduling on safe
561 	 * locking contexts. But then we need irq work to raise its own
562 	 * interrupts to avoid circular dependency on the tick
563 	 */
564 	if (!arch_irq_work_has_interrupt()) {
565 		pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
566 		cpumask_clear(tick_nohz_full_mask);
567 		tick_nohz_full_running = false;
568 		return;
569 	}
570 
571 	if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
572 			!IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
573 		cpu = smp_processor_id();
574 
575 		if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
576 			pr_warn("NO_HZ: Clearing %d from nohz_full range "
577 				"for timekeeping\n", cpu);
578 			cpumask_clear_cpu(cpu, tick_nohz_full_mask);
579 		}
580 	}
581 
582 	for_each_cpu(cpu, tick_nohz_full_mask)
583 		ct_cpu_track_user(cpu);
584 
585 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
586 					"kernel/nohz:predown", NULL,
587 					tick_nohz_cpu_down);
588 	WARN_ON(ret < 0);
589 	pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
590 		cpumask_pr_args(tick_nohz_full_mask));
591 }
592 #endif
593 
594 /*
595  * NOHZ - aka dynamic tick functionality
596  */
597 #ifdef CONFIG_NO_HZ_COMMON
598 /*
599  * NO HZ enabled ?
600  */
601 bool tick_nohz_enabled __read_mostly  = true;
602 unsigned long tick_nohz_active  __read_mostly;
603 /*
604  * Enable / Disable tickless mode
605  */
606 static int __init setup_tick_nohz(char *str)
607 {
608 	return (kstrtobool(str, &tick_nohz_enabled) == 0);
609 }
610 
611 __setup("nohz=", setup_tick_nohz);
612 
613 bool tick_nohz_tick_stopped(void)
614 {
615 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
616 
617 	return ts->tick_stopped;
618 }
619 
620 bool tick_nohz_tick_stopped_cpu(int cpu)
621 {
622 	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
623 
624 	return ts->tick_stopped;
625 }
626 
627 /**
628  * tick_nohz_update_jiffies - update jiffies when idle was interrupted
629  *
630  * Called from interrupt entry when the CPU was idle
631  *
632  * In case the sched_tick was stopped on this CPU, we have to check if jiffies
633  * must be updated. Otherwise an interrupt handler could use a stale jiffy
634  * value. We do this unconditionally on any CPU, as we don't know whether the
635  * CPU, which has the update task assigned is in a long sleep.
636  */
637 static void tick_nohz_update_jiffies(ktime_t now)
638 {
639 	unsigned long flags;
640 
641 	__this_cpu_write(tick_cpu_sched.idle_waketime, now);
642 
643 	local_irq_save(flags);
644 	tick_do_update_jiffies64(now);
645 	local_irq_restore(flags);
646 
647 	touch_softlockup_watchdog_sched();
648 }
649 
650 /*
651  * Updates the per-CPU time idle statistics counters
652  */
653 static void
654 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
655 {
656 	ktime_t delta;
657 
658 	if (ts->idle_active) {
659 		delta = ktime_sub(now, ts->idle_entrytime);
660 		if (nr_iowait_cpu(cpu) > 0)
661 			ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
662 		else
663 			ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
664 		ts->idle_entrytime = now;
665 	}
666 
667 	if (last_update_time)
668 		*last_update_time = ktime_to_us(now);
669 
670 }
671 
672 static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
673 {
674 	update_ts_time_stats(smp_processor_id(), ts, now, NULL);
675 	ts->idle_active = 0;
676 
677 	sched_clock_idle_wakeup_event();
678 }
679 
680 static void tick_nohz_start_idle(struct tick_sched *ts)
681 {
682 	ts->idle_entrytime = ktime_get();
683 	ts->idle_active = 1;
684 	sched_clock_idle_sleep_event();
685 }
686 
687 /**
688  * get_cpu_idle_time_us - get the total idle time of a CPU
689  * @cpu: CPU number to query
690  * @last_update_time: variable to store update time in. Do not update
691  * counters if NULL.
692  *
693  * Return the cumulative idle time (since boot) for a given
694  * CPU, in microseconds.
695  *
696  * This time is measured via accounting rather than sampling,
697  * and is as accurate as ktime_get() is.
698  *
699  * This function returns -1 if NOHZ is not enabled.
700  */
701 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
702 {
703 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
704 	ktime_t now, idle;
705 
706 	if (!tick_nohz_active)
707 		return -1;
708 
709 	now = ktime_get();
710 	if (last_update_time) {
711 		update_ts_time_stats(cpu, ts, now, last_update_time);
712 		idle = ts->idle_sleeptime;
713 	} else {
714 		if (ts->idle_active && !nr_iowait_cpu(cpu)) {
715 			ktime_t delta = ktime_sub(now, ts->idle_entrytime);
716 
717 			idle = ktime_add(ts->idle_sleeptime, delta);
718 		} else {
719 			idle = ts->idle_sleeptime;
720 		}
721 	}
722 
723 	return ktime_to_us(idle);
724 
725 }
726 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
727 
728 /**
729  * get_cpu_iowait_time_us - get the total iowait time of a CPU
730  * @cpu: CPU number to query
731  * @last_update_time: variable to store update time in. Do not update
732  * counters if NULL.
733  *
734  * Return the cumulative iowait time (since boot) for a given
735  * CPU, in microseconds.
736  *
737  * This time is measured via accounting rather than sampling,
738  * and is as accurate as ktime_get() is.
739  *
740  * This function returns -1 if NOHZ is not enabled.
741  */
742 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
743 {
744 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
745 	ktime_t now, iowait;
746 
747 	if (!tick_nohz_active)
748 		return -1;
749 
750 	now = ktime_get();
751 	if (last_update_time) {
752 		update_ts_time_stats(cpu, ts, now, last_update_time);
753 		iowait = ts->iowait_sleeptime;
754 	} else {
755 		if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
756 			ktime_t delta = ktime_sub(now, ts->idle_entrytime);
757 
758 			iowait = ktime_add(ts->iowait_sleeptime, delta);
759 		} else {
760 			iowait = ts->iowait_sleeptime;
761 		}
762 	}
763 
764 	return ktime_to_us(iowait);
765 }
766 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
767 
768 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
769 {
770 	hrtimer_cancel(&ts->sched_timer);
771 	hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
772 
773 	/* Forward the time to expire in the future */
774 	hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
775 
776 	if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
777 		hrtimer_start_expires(&ts->sched_timer,
778 				      HRTIMER_MODE_ABS_PINNED_HARD);
779 	} else {
780 		tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
781 	}
782 
783 	/*
784 	 * Reset to make sure next tick stop doesn't get fooled by past
785 	 * cached clock deadline.
786 	 */
787 	ts->next_tick = 0;
788 }
789 
790 static inline bool local_timer_softirq_pending(void)
791 {
792 	return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
793 }
794 
795 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
796 {
797 	u64 basemono, next_tick, delta, expires;
798 	unsigned long basejiff;
799 	unsigned int seq;
800 
801 	/* Read jiffies and the time when jiffies were updated last */
802 	do {
803 		seq = read_seqcount_begin(&jiffies_seq);
804 		basemono = last_jiffies_update;
805 		basejiff = jiffies;
806 	} while (read_seqcount_retry(&jiffies_seq, seq));
807 	ts->last_jiffies = basejiff;
808 	ts->timer_expires_base = basemono;
809 
810 	/*
811 	 * Keep the periodic tick, when RCU, architecture or irq_work
812 	 * requests it.
813 	 * Aside of that check whether the local timer softirq is
814 	 * pending. If so its a bad idea to call get_next_timer_interrupt()
815 	 * because there is an already expired timer, so it will request
816 	 * immediate expiry, which rearms the hardware timer with a
817 	 * minimal delta which brings us back to this place
818 	 * immediately. Lather, rinse and repeat...
819 	 */
820 	if (rcu_needs_cpu() || arch_needs_cpu() ||
821 	    irq_work_needs_cpu() || local_timer_softirq_pending()) {
822 		next_tick = basemono + TICK_NSEC;
823 	} else {
824 		/*
825 		 * Get the next pending timer. If high resolution
826 		 * timers are enabled this only takes the timer wheel
827 		 * timers into account. If high resolution timers are
828 		 * disabled this also looks at the next expiring
829 		 * hrtimer.
830 		 */
831 		next_tick = get_next_timer_interrupt(basejiff, basemono);
832 		ts->next_timer = next_tick;
833 	}
834 
835 	/*
836 	 * If the tick is due in the next period, keep it ticking or
837 	 * force prod the timer.
838 	 */
839 	delta = next_tick - basemono;
840 	if (delta <= (u64)TICK_NSEC) {
841 		/*
842 		 * Tell the timer code that the base is not idle, i.e. undo
843 		 * the effect of get_next_timer_interrupt():
844 		 */
845 		timer_clear_idle();
846 		/*
847 		 * We've not stopped the tick yet, and there's a timer in the
848 		 * next period, so no point in stopping it either, bail.
849 		 */
850 		if (!ts->tick_stopped) {
851 			ts->timer_expires = 0;
852 			goto out;
853 		}
854 	}
855 
856 	/*
857 	 * If this CPU is the one which had the do_timer() duty last, we limit
858 	 * the sleep time to the timekeeping max_deferment value.
859 	 * Otherwise we can sleep as long as we want.
860 	 */
861 	delta = timekeeping_max_deferment();
862 	if (cpu != tick_do_timer_cpu &&
863 	    (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last))
864 		delta = KTIME_MAX;
865 
866 	/* Calculate the next expiry time */
867 	if (delta < (KTIME_MAX - basemono))
868 		expires = basemono + delta;
869 	else
870 		expires = KTIME_MAX;
871 
872 	ts->timer_expires = min_t(u64, expires, next_tick);
873 
874 out:
875 	return ts->timer_expires;
876 }
877 
878 static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
879 {
880 	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
881 	u64 basemono = ts->timer_expires_base;
882 	u64 expires = ts->timer_expires;
883 	ktime_t tick = expires;
884 
885 	/* Make sure we won't be trying to stop it twice in a row. */
886 	ts->timer_expires_base = 0;
887 
888 	/*
889 	 * If this CPU is the one which updates jiffies, then give up
890 	 * the assignment and let it be taken by the CPU which runs
891 	 * the tick timer next, which might be this CPU as well. If we
892 	 * don't drop this here the jiffies might be stale and
893 	 * do_timer() never invoked. Keep track of the fact that it
894 	 * was the one which had the do_timer() duty last.
895 	 */
896 	if (cpu == tick_do_timer_cpu) {
897 		tick_do_timer_cpu = TICK_DO_TIMER_NONE;
898 		ts->do_timer_last = 1;
899 	} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
900 		ts->do_timer_last = 0;
901 	}
902 
903 	/* Skip reprogram of event if its not changed */
904 	if (ts->tick_stopped && (expires == ts->next_tick)) {
905 		/* Sanity check: make sure clockevent is actually programmed */
906 		if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
907 			return;
908 
909 		WARN_ON_ONCE(1);
910 		printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
911 			    basemono, ts->next_tick, dev->next_event,
912 			    hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer));
913 	}
914 
915 	/*
916 	 * nohz_stop_sched_tick can be called several times before
917 	 * the nohz_restart_sched_tick is called. This happens when
918 	 * interrupts arrive which do not cause a reschedule. In the
919 	 * first call we save the current tick time, so we can restart
920 	 * the scheduler tick in nohz_restart_sched_tick.
921 	 */
922 	if (!ts->tick_stopped) {
923 		calc_load_nohz_start();
924 		quiet_vmstat();
925 
926 		ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
927 		ts->tick_stopped = 1;
928 		trace_tick_stop(1, TICK_DEP_MASK_NONE);
929 	}
930 
931 	ts->next_tick = tick;
932 
933 	/*
934 	 * If the expiration time == KTIME_MAX, then we simply stop
935 	 * the tick timer.
936 	 */
937 	if (unlikely(expires == KTIME_MAX)) {
938 		if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
939 			hrtimer_cancel(&ts->sched_timer);
940 		else
941 			tick_program_event(KTIME_MAX, 1);
942 		return;
943 	}
944 
945 	if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
946 		hrtimer_start(&ts->sched_timer, tick,
947 			      HRTIMER_MODE_ABS_PINNED_HARD);
948 	} else {
949 		hrtimer_set_expires(&ts->sched_timer, tick);
950 		tick_program_event(tick, 1);
951 	}
952 }
953 
954 static void tick_nohz_retain_tick(struct tick_sched *ts)
955 {
956 	ts->timer_expires_base = 0;
957 }
958 
959 #ifdef CONFIG_NO_HZ_FULL
960 static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu)
961 {
962 	if (tick_nohz_next_event(ts, cpu))
963 		tick_nohz_stop_tick(ts, cpu);
964 	else
965 		tick_nohz_retain_tick(ts);
966 }
967 #endif /* CONFIG_NO_HZ_FULL */
968 
969 static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
970 {
971 	/* Update jiffies first */
972 	tick_do_update_jiffies64(now);
973 
974 	/*
975 	 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
976 	 * the clock forward checks in the enqueue path:
977 	 */
978 	timer_clear_idle();
979 
980 	calc_load_nohz_stop();
981 	touch_softlockup_watchdog_sched();
982 	/*
983 	 * Cancel the scheduled timer and restore the tick
984 	 */
985 	ts->tick_stopped  = 0;
986 	tick_nohz_restart(ts, now);
987 }
988 
989 static void __tick_nohz_full_update_tick(struct tick_sched *ts,
990 					 ktime_t now)
991 {
992 #ifdef CONFIG_NO_HZ_FULL
993 	int cpu = smp_processor_id();
994 
995 	if (can_stop_full_tick(cpu, ts))
996 		tick_nohz_stop_sched_tick(ts, cpu);
997 	else if (ts->tick_stopped)
998 		tick_nohz_restart_sched_tick(ts, now);
999 #endif
1000 }
1001 
1002 static void tick_nohz_full_update_tick(struct tick_sched *ts)
1003 {
1004 	if (!tick_nohz_full_cpu(smp_processor_id()))
1005 		return;
1006 
1007 	if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
1008 		return;
1009 
1010 	__tick_nohz_full_update_tick(ts, ktime_get());
1011 }
1012 
1013 /*
1014  * A pending softirq outside an IRQ (or softirq disabled section) context
1015  * should be waiting for ksoftirqd to handle it. Therefore we shouldn't
1016  * reach here due to the need_resched() early check in can_stop_idle_tick().
1017  *
1018  * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the
1019  * cpu_down() process, softirqs can still be raised while ksoftirqd is parked,
1020  * triggering the below since wakep_softirqd() is ignored.
1021  *
1022  */
1023 static bool report_idle_softirq(void)
1024 {
1025 	static int ratelimit;
1026 	unsigned int pending = local_softirq_pending();
1027 
1028 	if (likely(!pending))
1029 		return false;
1030 
1031 	/* Some softirqs claim to be safe against hotplug and ksoftirqd parking */
1032 	if (!cpu_active(smp_processor_id())) {
1033 		pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK;
1034 		if (!pending)
1035 			return false;
1036 	}
1037 
1038 	if (ratelimit < 10)
1039 		return false;
1040 
1041 	/* On RT, softirqs handling may be waiting on some lock */
1042 	if (!local_bh_blocked())
1043 		return false;
1044 
1045 	pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
1046 		pending);
1047 	ratelimit++;
1048 
1049 	return true;
1050 }
1051 
1052 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
1053 {
1054 	/*
1055 	 * If this CPU is offline and it is the one which updates
1056 	 * jiffies, then give up the assignment and let it be taken by
1057 	 * the CPU which runs the tick timer next. If we don't drop
1058 	 * this here the jiffies might be stale and do_timer() never
1059 	 * invoked.
1060 	 */
1061 	if (unlikely(!cpu_online(cpu))) {
1062 		if (cpu == tick_do_timer_cpu)
1063 			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
1064 		/*
1065 		 * Make sure the CPU doesn't get fooled by obsolete tick
1066 		 * deadline if it comes back online later.
1067 		 */
1068 		ts->next_tick = 0;
1069 		return false;
1070 	}
1071 
1072 	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
1073 		return false;
1074 
1075 	if (need_resched())
1076 		return false;
1077 
1078 	if (unlikely(report_idle_softirq()))
1079 		return false;
1080 
1081 	if (tick_nohz_full_enabled()) {
1082 		/*
1083 		 * Keep the tick alive to guarantee timekeeping progression
1084 		 * if there are full dynticks CPUs around
1085 		 */
1086 		if (tick_do_timer_cpu == cpu)
1087 			return false;
1088 
1089 		/* Should not happen for nohz-full */
1090 		if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
1091 			return false;
1092 	}
1093 
1094 	return true;
1095 }
1096 
1097 static void __tick_nohz_idle_stop_tick(struct tick_sched *ts)
1098 {
1099 	ktime_t expires;
1100 	int cpu = smp_processor_id();
1101 
1102 	/*
1103 	 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the
1104 	 * tick timer expiration time is known already.
1105 	 */
1106 	if (ts->timer_expires_base)
1107 		expires = ts->timer_expires;
1108 	else if (can_stop_idle_tick(cpu, ts))
1109 		expires = tick_nohz_next_event(ts, cpu);
1110 	else
1111 		return;
1112 
1113 	ts->idle_calls++;
1114 
1115 	if (expires > 0LL) {
1116 		int was_stopped = ts->tick_stopped;
1117 
1118 		tick_nohz_stop_tick(ts, cpu);
1119 
1120 		ts->idle_sleeps++;
1121 		ts->idle_expires = expires;
1122 
1123 		if (!was_stopped && ts->tick_stopped) {
1124 			ts->idle_jiffies = ts->last_jiffies;
1125 			nohz_balance_enter_idle(cpu);
1126 		}
1127 	} else {
1128 		tick_nohz_retain_tick(ts);
1129 	}
1130 }
1131 
1132 /**
1133  * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
1134  *
1135  * When the next event is more than a tick into the future, stop the idle tick
1136  */
1137 void tick_nohz_idle_stop_tick(void)
1138 {
1139 	__tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched));
1140 }
1141 
1142 void tick_nohz_idle_retain_tick(void)
1143 {
1144 	tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched));
1145 	/*
1146 	 * Undo the effect of get_next_timer_interrupt() called from
1147 	 * tick_nohz_next_event().
1148 	 */
1149 	timer_clear_idle();
1150 }
1151 
1152 /**
1153  * tick_nohz_idle_enter - prepare for entering idle on the current CPU
1154  *
1155  * Called when we start the idle loop.
1156  */
1157 void tick_nohz_idle_enter(void)
1158 {
1159 	struct tick_sched *ts;
1160 
1161 	lockdep_assert_irqs_enabled();
1162 
1163 	local_irq_disable();
1164 
1165 	ts = this_cpu_ptr(&tick_cpu_sched);
1166 
1167 	WARN_ON_ONCE(ts->timer_expires_base);
1168 
1169 	ts->inidle = 1;
1170 	tick_nohz_start_idle(ts);
1171 
1172 	local_irq_enable();
1173 }
1174 
1175 /**
1176  * tick_nohz_irq_exit - update next tick event from interrupt exit
1177  *
1178  * When an interrupt fires while we are idle and it doesn't cause
1179  * a reschedule, it may still add, modify or delete a timer, enqueue
1180  * an RCU callback, etc...
1181  * So we need to re-calculate and reprogram the next tick event.
1182  */
1183 void tick_nohz_irq_exit(void)
1184 {
1185 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1186 
1187 	if (ts->inidle)
1188 		tick_nohz_start_idle(ts);
1189 	else
1190 		tick_nohz_full_update_tick(ts);
1191 }
1192 
1193 /**
1194  * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
1195  */
1196 bool tick_nohz_idle_got_tick(void)
1197 {
1198 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1199 
1200 	if (ts->got_idle_tick) {
1201 		ts->got_idle_tick = 0;
1202 		return true;
1203 	}
1204 	return false;
1205 }
1206 
1207 /**
1208  * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
1209  * or the tick, whatever that expires first. Note that, if the tick has been
1210  * stopped, it returns the next hrtimer.
1211  *
1212  * Called from power state control code with interrupts disabled
1213  */
1214 ktime_t tick_nohz_get_next_hrtimer(void)
1215 {
1216 	return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
1217 }
1218 
1219 /**
1220  * tick_nohz_get_sleep_length - return the expected length of the current sleep
1221  * @delta_next: duration until the next event if the tick cannot be stopped
1222  *
1223  * Called from power state control code with interrupts disabled.
1224  *
1225  * The return value of this function and/or the value returned by it through the
1226  * @delta_next pointer can be negative which must be taken into account by its
1227  * callers.
1228  */
1229 ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
1230 {
1231 	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
1232 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1233 	int cpu = smp_processor_id();
1234 	/*
1235 	 * The idle entry time is expected to be a sufficient approximation of
1236 	 * the current time at this point.
1237 	 */
1238 	ktime_t now = ts->idle_entrytime;
1239 	ktime_t next_event;
1240 
1241 	WARN_ON_ONCE(!ts->inidle);
1242 
1243 	*delta_next = ktime_sub(dev->next_event, now);
1244 
1245 	if (!can_stop_idle_tick(cpu, ts))
1246 		return *delta_next;
1247 
1248 	next_event = tick_nohz_next_event(ts, cpu);
1249 	if (!next_event)
1250 		return *delta_next;
1251 
1252 	/*
1253 	 * If the next highres timer to expire is earlier than next_event, the
1254 	 * idle governor needs to know that.
1255 	 */
1256 	next_event = min_t(u64, next_event,
1257 			   hrtimer_next_event_without(&ts->sched_timer));
1258 
1259 	return ktime_sub(next_event, now);
1260 }
1261 
1262 /**
1263  * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1264  * for a particular CPU.
1265  *
1266  * Called from the schedutil frequency scaling governor in scheduler context.
1267  */
1268 unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
1269 {
1270 	struct tick_sched *ts = tick_get_tick_sched(cpu);
1271 
1272 	return ts->idle_calls;
1273 }
1274 
1275 /**
1276  * tick_nohz_get_idle_calls - return the current idle calls counter value
1277  *
1278  * Called from the schedutil frequency scaling governor in scheduler context.
1279  */
1280 unsigned long tick_nohz_get_idle_calls(void)
1281 {
1282 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1283 
1284 	return ts->idle_calls;
1285 }
1286 
1287 static void tick_nohz_account_idle_time(struct tick_sched *ts,
1288 					ktime_t now)
1289 {
1290 	unsigned long ticks;
1291 
1292 	ts->idle_exittime = now;
1293 
1294 	if (vtime_accounting_enabled_this_cpu())
1295 		return;
1296 	/*
1297 	 * We stopped the tick in idle. Update process times would miss the
1298 	 * time we slept as update_process_times does only a 1 tick
1299 	 * accounting. Enforce that this is accounted to idle !
1300 	 */
1301 	ticks = jiffies - ts->idle_jiffies;
1302 	/*
1303 	 * We might be one off. Do not randomly account a huge number of ticks!
1304 	 */
1305 	if (ticks && ticks < LONG_MAX)
1306 		account_idle_ticks(ticks);
1307 }
1308 
1309 void tick_nohz_idle_restart_tick(void)
1310 {
1311 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1312 
1313 	if (ts->tick_stopped) {
1314 		ktime_t now = ktime_get();
1315 		tick_nohz_restart_sched_tick(ts, now);
1316 		tick_nohz_account_idle_time(ts, now);
1317 	}
1318 }
1319 
1320 static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now)
1321 {
1322 	if (tick_nohz_full_cpu(smp_processor_id()))
1323 		__tick_nohz_full_update_tick(ts, now);
1324 	else
1325 		tick_nohz_restart_sched_tick(ts, now);
1326 
1327 	tick_nohz_account_idle_time(ts, now);
1328 }
1329 
1330 /**
1331  * tick_nohz_idle_exit - restart the idle tick from the idle task
1332  *
1333  * Restart the idle tick when the CPU is woken up from idle
1334  * This also exit the RCU extended quiescent state. The CPU
1335  * can use RCU again after this function is called.
1336  */
1337 void tick_nohz_idle_exit(void)
1338 {
1339 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1340 	bool idle_active, tick_stopped;
1341 	ktime_t now;
1342 
1343 	local_irq_disable();
1344 
1345 	WARN_ON_ONCE(!ts->inidle);
1346 	WARN_ON_ONCE(ts->timer_expires_base);
1347 
1348 	ts->inidle = 0;
1349 	idle_active = ts->idle_active;
1350 	tick_stopped = ts->tick_stopped;
1351 
1352 	if (idle_active || tick_stopped)
1353 		now = ktime_get();
1354 
1355 	if (idle_active)
1356 		tick_nohz_stop_idle(ts, now);
1357 
1358 	if (tick_stopped)
1359 		tick_nohz_idle_update_tick(ts, now);
1360 
1361 	local_irq_enable();
1362 }
1363 
1364 /*
1365  * The nohz low res interrupt handler
1366  */
1367 static void tick_nohz_handler(struct clock_event_device *dev)
1368 {
1369 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1370 	struct pt_regs *regs = get_irq_regs();
1371 	ktime_t now = ktime_get();
1372 
1373 	dev->next_event = KTIME_MAX;
1374 
1375 	tick_sched_do_timer(ts, now);
1376 	tick_sched_handle(ts, regs);
1377 
1378 	if (unlikely(ts->tick_stopped)) {
1379 		/*
1380 		 * The clockevent device is not reprogrammed, so change the
1381 		 * clock event device to ONESHOT_STOPPED to avoid spurious
1382 		 * interrupts on devices which might not be truly one shot.
1383 		 */
1384 		tick_program_event(KTIME_MAX, 1);
1385 		return;
1386 	}
1387 
1388 	hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
1389 	tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1390 }
1391 
1392 static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
1393 {
1394 	if (!tick_nohz_enabled)
1395 		return;
1396 	ts->nohz_mode = mode;
1397 	/* One update is enough */
1398 	if (!test_and_set_bit(0, &tick_nohz_active))
1399 		timers_update_nohz();
1400 }
1401 
1402 /**
1403  * tick_nohz_switch_to_nohz - switch to nohz mode
1404  */
1405 static void tick_nohz_switch_to_nohz(void)
1406 {
1407 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1408 	ktime_t next;
1409 
1410 	if (!tick_nohz_enabled)
1411 		return;
1412 
1413 	if (tick_switch_to_oneshot(tick_nohz_handler))
1414 		return;
1415 
1416 	/*
1417 	 * Recycle the hrtimer in ts, so we can share the
1418 	 * hrtimer_forward with the highres code.
1419 	 */
1420 	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
1421 	/* Get the next period */
1422 	next = tick_init_jiffy_update();
1423 
1424 	hrtimer_set_expires(&ts->sched_timer, next);
1425 	hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
1426 	tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1427 	tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
1428 }
1429 
1430 static inline void tick_nohz_irq_enter(void)
1431 {
1432 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1433 	ktime_t now;
1434 
1435 	if (!ts->idle_active && !ts->tick_stopped)
1436 		return;
1437 	now = ktime_get();
1438 	if (ts->idle_active)
1439 		tick_nohz_stop_idle(ts, now);
1440 	/*
1441 	 * If all CPUs are idle. We may need to update a stale jiffies value.
1442 	 * Note nohz_full is a special case: a timekeeper is guaranteed to stay
1443 	 * alive but it might be busy looping with interrupts disabled in some
1444 	 * rare case (typically stop machine). So we must make sure we have a
1445 	 * last resort.
1446 	 */
1447 	if (ts->tick_stopped)
1448 		tick_nohz_update_jiffies(now);
1449 }
1450 
1451 #else
1452 
1453 static inline void tick_nohz_switch_to_nohz(void) { }
1454 static inline void tick_nohz_irq_enter(void) { }
1455 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { }
1456 
1457 #endif /* CONFIG_NO_HZ_COMMON */
1458 
1459 /*
1460  * Called from irq_enter to notify about the possible interruption of idle()
1461  */
1462 void tick_irq_enter(void)
1463 {
1464 	tick_check_oneshot_broadcast_this_cpu();
1465 	tick_nohz_irq_enter();
1466 }
1467 
1468 /*
1469  * High resolution timer specific code
1470  */
1471 #ifdef CONFIG_HIGH_RES_TIMERS
1472 /*
1473  * We rearm the timer until we get disabled by the idle code.
1474  * Called with interrupts disabled.
1475  */
1476 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
1477 {
1478 	struct tick_sched *ts =
1479 		container_of(timer, struct tick_sched, sched_timer);
1480 	struct pt_regs *regs = get_irq_regs();
1481 	ktime_t now = ktime_get();
1482 
1483 	tick_sched_do_timer(ts, now);
1484 
1485 	/*
1486 	 * Do not call, when we are not in irq context and have
1487 	 * no valid regs pointer
1488 	 */
1489 	if (regs)
1490 		tick_sched_handle(ts, regs);
1491 	else
1492 		ts->next_tick = 0;
1493 
1494 	/* No need to reprogram if we are in idle or full dynticks mode */
1495 	if (unlikely(ts->tick_stopped))
1496 		return HRTIMER_NORESTART;
1497 
1498 	hrtimer_forward(timer, now, TICK_NSEC);
1499 
1500 	return HRTIMER_RESTART;
1501 }
1502 
1503 static int sched_skew_tick;
1504 
1505 static int __init skew_tick(char *str)
1506 {
1507 	get_option(&str, &sched_skew_tick);
1508 
1509 	return 0;
1510 }
1511 early_param("skew_tick", skew_tick);
1512 
1513 /**
1514  * tick_setup_sched_timer - setup the tick emulation timer
1515  */
1516 void tick_setup_sched_timer(void)
1517 {
1518 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1519 	ktime_t now = ktime_get();
1520 
1521 	/*
1522 	 * Emulate tick processing via per-CPU hrtimers:
1523 	 */
1524 	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
1525 	ts->sched_timer.function = tick_sched_timer;
1526 
1527 	/* Get the next period (per-CPU) */
1528 	hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
1529 
1530 	/* Offset the tick to avert jiffies_lock contention. */
1531 	if (sched_skew_tick) {
1532 		u64 offset = TICK_NSEC >> 1;
1533 		do_div(offset, num_possible_cpus());
1534 		offset *= smp_processor_id();
1535 		hrtimer_add_expires_ns(&ts->sched_timer, offset);
1536 	}
1537 
1538 	hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
1539 	hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
1540 	tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
1541 }
1542 #endif /* HIGH_RES_TIMERS */
1543 
1544 #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
1545 void tick_cancel_sched_timer(int cpu)
1546 {
1547 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1548 
1549 # ifdef CONFIG_HIGH_RES_TIMERS
1550 	if (ts->sched_timer.base)
1551 		hrtimer_cancel(&ts->sched_timer);
1552 # endif
1553 
1554 	memset(ts, 0, sizeof(*ts));
1555 }
1556 #endif
1557 
1558 /*
1559  * Async notification about clocksource changes
1560  */
1561 void tick_clock_notify(void)
1562 {
1563 	int cpu;
1564 
1565 	for_each_possible_cpu(cpu)
1566 		set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
1567 }
1568 
1569 /*
1570  * Async notification about clock event changes
1571  */
1572 void tick_oneshot_notify(void)
1573 {
1574 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1575 
1576 	set_bit(0, &ts->check_clocks);
1577 }
1578 
1579 /*
1580  * Check, if a change happened, which makes oneshot possible.
1581  *
1582  * Called cyclic from the hrtimer softirq (driven by the timer
1583  * softirq) allow_nohz signals, that we can switch into low-res nohz
1584  * mode, because high resolution timers are disabled (either compile
1585  * or runtime). Called with interrupts disabled.
1586  */
1587 int tick_check_oneshot_change(int allow_nohz)
1588 {
1589 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1590 
1591 	if (!test_and_clear_bit(0, &ts->check_clocks))
1592 		return 0;
1593 
1594 	if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
1595 		return 0;
1596 
1597 	if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
1598 		return 0;
1599 
1600 	if (!allow_nohz)
1601 		return 1;
1602 
1603 	tick_nohz_switch_to_nohz();
1604 	return 0;
1605 }
1606