1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2cf37b6b4SNicolas Pitre /*
3a92057e1SIngo Molnar * Generic entry points for the idle threads and
4a92057e1SIngo Molnar * implementation of the idle task scheduling class.
5a92057e1SIngo Molnar *
6a92057e1SIngo Molnar * (NOTE: these are not related to SCHED_IDLE batch scheduled
7a92057e1SIngo Molnar * tasks which are handled in sched/fair.c )
8cf37b6b4SNicolas Pitre */
9cf37b6b4SNicolas Pitre
106727ad9eSChris Metcalf /* Linker adds these: start and end of __cpuidle functions */
116727ad9eSChris Metcalf extern char __cpuidle_text_start[], __cpuidle_text_end[];
126727ad9eSChris Metcalf
13faad3849SRafael J. Wysocki /**
14faad3849SRafael J. Wysocki * sched_idle_set_state - Record idle state for the current CPU.
15faad3849SRafael J. Wysocki * @idle_state: State to record.
16faad3849SRafael J. Wysocki */
sched_idle_set_state(struct cpuidle_state * idle_state)17faad3849SRafael J. Wysocki void sched_idle_set_state(struct cpuidle_state *idle_state)
18faad3849SRafael J. Wysocki {
19faad3849SRafael J. Wysocki idle_set_state(this_rq(), idle_state);
20faad3849SRafael J. Wysocki }
21faad3849SRafael J. Wysocki
22cf37b6b4SNicolas Pitre static int __read_mostly cpu_idle_force_poll;
23cf37b6b4SNicolas Pitre
cpu_idle_poll_ctrl(bool enable)24cf37b6b4SNicolas Pitre void cpu_idle_poll_ctrl(bool enable)
25cf37b6b4SNicolas Pitre {
26cf37b6b4SNicolas Pitre if (enable) {
27cf37b6b4SNicolas Pitre cpu_idle_force_poll++;
28cf37b6b4SNicolas Pitre } else {
29cf37b6b4SNicolas Pitre cpu_idle_force_poll--;
30cf37b6b4SNicolas Pitre WARN_ON_ONCE(cpu_idle_force_poll < 0);
31cf37b6b4SNicolas Pitre }
32cf37b6b4SNicolas Pitre }
33cf37b6b4SNicolas Pitre
34cf37b6b4SNicolas Pitre #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
cpu_idle_poll_setup(char * __unused)35cf37b6b4SNicolas Pitre static int __init cpu_idle_poll_setup(char *__unused)
36cf37b6b4SNicolas Pitre {
37cf37b6b4SNicolas Pitre cpu_idle_force_poll = 1;
38a92057e1SIngo Molnar
39cf37b6b4SNicolas Pitre return 1;
40cf37b6b4SNicolas Pitre }
41cf37b6b4SNicolas Pitre __setup("nohlt", cpu_idle_poll_setup);
42cf37b6b4SNicolas Pitre
cpu_idle_nopoll_setup(char * __unused)43cf37b6b4SNicolas Pitre static int __init cpu_idle_nopoll_setup(char *__unused)
44cf37b6b4SNicolas Pitre {
45cf37b6b4SNicolas Pitre cpu_idle_force_poll = 0;
46a92057e1SIngo Molnar
47cf37b6b4SNicolas Pitre return 1;
48cf37b6b4SNicolas Pitre }
49cf37b6b4SNicolas Pitre __setup("hlt", cpu_idle_nopoll_setup);
50cf37b6b4SNicolas Pitre #endif
51cf37b6b4SNicolas Pitre
cpu_idle_poll(void)526727ad9eSChris Metcalf static noinline int __cpuidle cpu_idle_poll(void)
53cf37b6b4SNicolas Pitre {
54a01353cfSPeter Zijlstra instrumentation_begin();
551098582aSPeter Zijlstra trace_cpu_idle(0, smp_processor_id());
569babcd79SDaniel Bristot de Oliveira stop_critical_timings();
57a01353cfSPeter Zijlstra ct_cpuidle_enter();
58a92057e1SIngo Molnar
59a01353cfSPeter Zijlstra raw_local_irq_enable();
60ff6f2d29SPreeti U Murthy while (!tif_need_resched() &&
61ff6f2d29SPreeti U Murthy (cpu_idle_force_poll || tick_check_broadcast_expired()))
62cf37b6b4SNicolas Pitre cpu_relax();
63a01353cfSPeter Zijlstra raw_local_irq_disable();
641098582aSPeter Zijlstra
65a01353cfSPeter Zijlstra ct_cpuidle_exit();
661098582aSPeter Zijlstra start_critical_timings();
671098582aSPeter Zijlstra trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
68a01353cfSPeter Zijlstra local_irq_enable();
69a01353cfSPeter Zijlstra instrumentation_end();
70a92057e1SIngo Molnar
71cf37b6b4SNicolas Pitre return 1;
72cf37b6b4SNicolas Pitre }
73cf37b6b4SNicolas Pitre
74cf37b6b4SNicolas Pitre /* Weak implementations for optional arch specific functions */
arch_cpu_idle_prepare(void)75cf37b6b4SNicolas Pitre void __weak arch_cpu_idle_prepare(void) { }
arch_cpu_idle_enter(void)76cf37b6b4SNicolas Pitre void __weak arch_cpu_idle_enter(void) { }
arch_cpu_idle_exit(void)77cf37b6b4SNicolas Pitre void __weak arch_cpu_idle_exit(void) { }
arch_cpu_idle_dead(void)78071c44e4SJosh Poimboeuf void __weak __noreturn arch_cpu_idle_dead(void) { while (1); }
arch_cpu_idle(void)79cf37b6b4SNicolas Pitre void __weak arch_cpu_idle(void)
80cf37b6b4SNicolas Pitre {
81cf37b6b4SNicolas Pitre cpu_idle_force_poll = 1;
82cf37b6b4SNicolas Pitre }
83cf37b6b4SNicolas Pitre
84827a5aefSRafael J. Wysocki /**
85827a5aefSRafael J. Wysocki * default_idle_call - Default CPU idle routine.
86827a5aefSRafael J. Wysocki *
87827a5aefSRafael J. Wysocki * To use when the cpuidle framework cannot be used.
8882f66327SRafael J. Wysocki */
default_idle_call(void)896727ad9eSChris Metcalf void __cpuidle default_idle_call(void)
90827a5aefSRafael J. Wysocki {
91a01353cfSPeter Zijlstra instrumentation_begin();
92a01353cfSPeter Zijlstra if (!current_clr_polling_and_test()) {
939864f5b5SPeter Zijlstra trace_cpu_idle(1, smp_processor_id());
9463caae84SLucas Stach stop_critical_timings();
9558c644baSPeter Zijlstra
96a01353cfSPeter Zijlstra ct_cpuidle_enter();
9782f66327SRafael J. Wysocki arch_cpu_idle();
98a01353cfSPeter Zijlstra ct_cpuidle_exit();
9958c644baSPeter Zijlstra
10063caae84SLucas Stach start_critical_timings();
1019864f5b5SPeter Zijlstra trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
10263caae84SLucas Stach }
103a01353cfSPeter Zijlstra local_irq_enable();
104a01353cfSPeter Zijlstra instrumentation_end();
10582f66327SRafael J. Wysocki }
10682f66327SRafael J. Wysocki
call_cpuidle_s2idle(struct cpuidle_driver * drv,struct cpuidle_device * dev)10710e8b11eSRafael J. Wysocki static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
10810e8b11eSRafael J. Wysocki struct cpuidle_device *dev)
10910e8b11eSRafael J. Wysocki {
11010e8b11eSRafael J. Wysocki if (current_clr_polling_and_test())
11110e8b11eSRafael J. Wysocki return -EBUSY;
11210e8b11eSRafael J. Wysocki
11310e8b11eSRafael J. Wysocki return cpuidle_enter_s2idle(drv, dev);
11410e8b11eSRafael J. Wysocki }
11510e8b11eSRafael J. Wysocki
call_cpuidle(struct cpuidle_driver * drv,struct cpuidle_device * dev,int next_state)116bcf6ad8aSRafael J. Wysocki static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
117bcf6ad8aSRafael J. Wysocki int next_state)
118bcf6ad8aSRafael J. Wysocki {
119bcf6ad8aSRafael J. Wysocki /*
120bcf6ad8aSRafael J. Wysocki * The idle task must be scheduled, it is pointless to go to idle, just
121bcf6ad8aSRafael J. Wysocki * update no idle residency and return.
122bcf6ad8aSRafael J. Wysocki */
123bcf6ad8aSRafael J. Wysocki if (current_clr_polling_and_test()) {
124c1d51f68SRafael J. Wysocki dev->last_residency_ns = 0;
125bcf6ad8aSRafael J. Wysocki local_irq_enable();
126bcf6ad8aSRafael J. Wysocki return -EBUSY;
127bcf6ad8aSRafael J. Wysocki }
128bcf6ad8aSRafael J. Wysocki
129bcf6ad8aSRafael J. Wysocki /*
130bcf6ad8aSRafael J. Wysocki * Enter the idle state previously returned by the governor decision.
131bcf6ad8aSRafael J. Wysocki * This function will block until an interrupt occurs and will take
132bcf6ad8aSRafael J. Wysocki * care of re-enabling the local interrupts
133bcf6ad8aSRafael J. Wysocki */
134827a5aefSRafael J. Wysocki return cpuidle_enter(drv, dev, next_state);
135bcf6ad8aSRafael J. Wysocki }
136bcf6ad8aSRafael J. Wysocki
13730cdd69eSDaniel Lezcano /**
13830cdd69eSDaniel Lezcano * cpuidle_idle_call - the main idle function
13930cdd69eSDaniel Lezcano *
14030cdd69eSDaniel Lezcano * NOTE: no locks or semaphores should be used here
14182c65d60SAndy Lutomirski *
1423b03706fSIngo Molnar * On architectures that support TIF_POLLING_NRFLAG, is called with polling
14382c65d60SAndy Lutomirski * set, and it returns with polling set. If it ever stops polling, it
14482c65d60SAndy Lutomirski * must clear the polling bit.
14530cdd69eSDaniel Lezcano */
cpuidle_idle_call(void)14608c373e5SRafael J. Wysocki static void cpuidle_idle_call(void)
14730cdd69eSDaniel Lezcano {
1489bd616e3SCatalin Marinas struct cpuidle_device *dev = cpuidle_get_device();
14930cdd69eSDaniel Lezcano struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
15037352273SPeter Zijlstra int next_state, entered_state;
15130cdd69eSDaniel Lezcano
152a1d028bdSDaniel Lezcano /*
153a1d028bdSDaniel Lezcano * Check if the idle task must be rescheduled. If it is the
154c444117fSPeter Zijlstra * case, exit the function after re-enabling the local irq.
155a1d028bdSDaniel Lezcano */
156c444117fSPeter Zijlstra if (need_resched()) {
1578ca3c642SDaniel Lezcano local_irq_enable();
15808c373e5SRafael J. Wysocki return;
1598ca3c642SDaniel Lezcano }
1608ca3c642SDaniel Lezcano
161a1d028bdSDaniel Lezcano /*
162ed98c349SRafael J. Wysocki * The RCU framework needs to be told that we are entering an idle
163ed98c349SRafael J. Wysocki * section, so no more rcu read side critical sections and one more
164a1d028bdSDaniel Lezcano * step to the grace period
165a1d028bdSDaniel Lezcano */
166c8cc7d4dSDaniel Lezcano
16782f66327SRafael J. Wysocki if (cpuidle_not_available(drv, dev)) {
168ed98c349SRafael J. Wysocki tick_nohz_idle_stop_tick();
169ed98c349SRafael J. Wysocki
17082f66327SRafael J. Wysocki default_idle_call();
17182f66327SRafael J. Wysocki goto exit_idle;
17282f66327SRafael J. Wysocki }
173ef2b22acSRafael J. Wysocki
174a1d028bdSDaniel Lezcano /*
175f02f4f9dSRafael J. Wysocki * Suspend-to-idle ("s2idle") is a system state in which all user space
17638106313SRafael J. Wysocki * has been frozen, all I/O devices have been suspended and the only
1773e0de271SHewenliang * activity happens here and in interrupts (if any). In that case bypass
1783b03706fSIngo Molnar * the cpuidle governor and go straight for the deepest idle state
17938106313SRafael J. Wysocki * available. Possibly also suspend the local tick and the entire
18038106313SRafael J. Wysocki * timekeeping to prevent timer interrupts from kicking us out of idle
18138106313SRafael J. Wysocki * until a proper wakeup interrupt happens.
18238106313SRafael J. Wysocki */
183bb8313b6SJacob Pan
184c55b51a0SDaniel Lezcano if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
1855aa9ba63SDaniel Lezcano u64 max_latency_ns;
1865aa9ba63SDaniel Lezcano
187f02f4f9dSRafael J. Wysocki if (idle_should_enter_s2idle()) {
188ed98c349SRafael J. Wysocki
18910e8b11eSRafael J. Wysocki entered_state = call_cpuidle_s2idle(drv, dev);
19010e8b11eSRafael J. Wysocki if (entered_state > 0)
19138106313SRafael J. Wysocki goto exit_idle;
192ed98c349SRafael J. Wysocki
1935aa9ba63SDaniel Lezcano max_latency_ns = U64_MAX;
1945aa9ba63SDaniel Lezcano } else {
1955aa9ba63SDaniel Lezcano max_latency_ns = dev->forced_idle_latency_limit_ns;
196bb8313b6SJacob Pan }
19738106313SRafael J. Wysocki
198ed98c349SRafael J. Wysocki tick_nohz_idle_stop_tick();
199ed98c349SRafael J. Wysocki
2005aa9ba63SDaniel Lezcano next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
201bcf6ad8aSRafael J. Wysocki call_cpuidle(drv, dev, next_state);
202ef2b22acSRafael J. Wysocki } else {
20345f1ff59SRafael J. Wysocki bool stop_tick = true;
20445f1ff59SRafael J. Wysocki
20538106313SRafael J. Wysocki /*
20652c324f8SRafael J. Wysocki * Ask the cpuidle framework to choose a convenient idle state.
207a1d028bdSDaniel Lezcano */
20845f1ff59SRafael J. Wysocki next_state = cpuidle_select(drv, dev, &stop_tick);
209554c8aa8SRafael J. Wysocki
2107059b366SRafael J. Wysocki if (stop_tick || tick_nohz_tick_stopped())
211554c8aa8SRafael J. Wysocki tick_nohz_idle_stop_tick();
212554c8aa8SRafael J. Wysocki else
213554c8aa8SRafael J. Wysocki tick_nohz_idle_retain_tick();
214554c8aa8SRafael J. Wysocki
215bcf6ad8aSRafael J. Wysocki entered_state = call_cpuidle(drv, dev, next_state);
216a1d028bdSDaniel Lezcano /*
21737352273SPeter Zijlstra * Give the governor an opportunity to reflect on the outcome
218a1d028bdSDaniel Lezcano */
21930cdd69eSDaniel Lezcano cpuidle_reflect(dev, entered_state);
220bcf6ad8aSRafael J. Wysocki }
2218ca3c642SDaniel Lezcano
22237352273SPeter Zijlstra exit_idle:
2238ca3c642SDaniel Lezcano __current_set_polling();
2248ca3c642SDaniel Lezcano
225a1d028bdSDaniel Lezcano /*
22637352273SPeter Zijlstra * It is up to the idle functions to reenable local interrupts
227a1d028bdSDaniel Lezcano */
228c8cc7d4dSDaniel Lezcano if (WARN_ON_ONCE(irqs_disabled()))
229c8cc7d4dSDaniel Lezcano local_irq_enable();
23030cdd69eSDaniel Lezcano }
23130cdd69eSDaniel Lezcano
232cf37b6b4SNicolas Pitre /*
233cf37b6b4SNicolas Pitre * Generic idle loop implementation
23482c65d60SAndy Lutomirski *
23582c65d60SAndy Lutomirski * Called with polling cleared.
236cf37b6b4SNicolas Pitre */
do_idle(void)237c1de45caSPeter Zijlstra static void do_idle(void)
238cf37b6b4SNicolas Pitre {
23954b933c6SCheng Jian int cpu = smp_processor_id();
240c6f88654SVincent Guittot
241c6f88654SVincent Guittot /*
242c6f88654SVincent Guittot * Check if we need to update blocked load
243c6f88654SVincent Guittot */
244c6f88654SVincent Guittot nohz_run_idle_balance(cpu);
245c6f88654SVincent Guittot
24682c65d60SAndy Lutomirski /*
24782c65d60SAndy Lutomirski * If the arch has a polling bit, we maintain an invariant:
24882c65d60SAndy Lutomirski *
249c1de45caSPeter Zijlstra * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
250c1de45caSPeter Zijlstra * rq->idle). This means that, if rq->idle has the polling bit set,
251c1de45caSPeter Zijlstra * then setting need_resched is guaranteed to cause the CPU to
252c1de45caSPeter Zijlstra * reschedule.
25382c65d60SAndy Lutomirski */
25482c65d60SAndy Lutomirski
25582c65d60SAndy Lutomirski __current_set_polling();
256cf37b6b4SNicolas Pitre tick_nohz_idle_enter();
257cf37b6b4SNicolas Pitre
258cf37b6b4SNicolas Pitre while (!need_resched()) {
259cf37b6b4SNicolas Pitre rmb();
260cf37b6b4SNicolas Pitre
261e78a7614SPeter Zijlstra local_irq_disable();
262e78a7614SPeter Zijlstra
26354b933c6SCheng Jian if (cpu_is_offline(cpu)) {
264e78a7614SPeter Zijlstra tick_nohz_idle_stop_tick();
265e69aab13SThomas Gleixner cpuhp_report_idle_dead();
266cf37b6b4SNicolas Pitre arch_cpu_idle_dead();
267528a25b0SPaul E. McKenney }
268cf37b6b4SNicolas Pitre
269cf37b6b4SNicolas Pitre arch_cpu_idle_enter();
27043789ef3SFrederic Weisbecker rcu_nocb_flush_deferred_wakeup();
271cf37b6b4SNicolas Pitre
272cf37b6b4SNicolas Pitre /*
273c1de45caSPeter Zijlstra * In poll mode we reenable interrupts and spin. Also if we
274c1de45caSPeter Zijlstra * detected in the wakeup from idle path that the tick
275c1de45caSPeter Zijlstra * broadcast device expired for us, we don't want to go deep
276c1de45caSPeter Zijlstra * idle as we know that the IPI is going to arrive right away.
277cf37b6b4SNicolas Pitre */
2782aaf709aSRafael J. Wysocki if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
2792aaf709aSRafael J. Wysocki tick_nohz_idle_restart_tick();
280cf37b6b4SNicolas Pitre cpu_idle_poll();
2812aaf709aSRafael J. Wysocki } else {
282c8cc7d4dSDaniel Lezcano cpuidle_idle_call();
2832aaf709aSRafael J. Wysocki }
284cf37b6b4SNicolas Pitre arch_cpu_idle_exit();
285cf37b6b4SNicolas Pitre }
28606d50c65SPeter Zijlstra
28706d50c65SPeter Zijlstra /*
288c1de45caSPeter Zijlstra * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
289c1de45caSPeter Zijlstra * be set, propagate it into PREEMPT_NEED_RESCHED.
29006d50c65SPeter Zijlstra *
291c1de45caSPeter Zijlstra * This is required because for polling idle loops we will not have had
292c1de45caSPeter Zijlstra * an IPI to fold the state for us.
29306d50c65SPeter Zijlstra */
29406d50c65SPeter Zijlstra preempt_set_need_resched();
295cf37b6b4SNicolas Pitre tick_nohz_idle_exit();
29682c65d60SAndy Lutomirski __current_clr_polling();
29782c65d60SAndy Lutomirski
29882c65d60SAndy Lutomirski /*
299c1de45caSPeter Zijlstra * We promise to call sched_ttwu_pending() and reschedule if
300c1de45caSPeter Zijlstra * need_resched() is set while polling is set. That means that clearing
301c1de45caSPeter Zijlstra * polling needs to be visible before doing these things.
30282c65d60SAndy Lutomirski */
30382c65d60SAndy Lutomirski smp_mb__after_atomic();
30482c65d60SAndy Lutomirski
305b2a02fc4SPeter Zijlstra /*
306b2a02fc4SPeter Zijlstra * RCU relies on this call to be done outside of an RCU read-side
307b2a02fc4SPeter Zijlstra * critical section.
308b2a02fc4SPeter Zijlstra */
30916bf5a5eSThomas Gleixner flush_smp_call_function_queue();
3108663effbSSteven Rostedt (VMware) schedule_idle();
311d83a7cb3SJosh Poimboeuf
312d83a7cb3SJosh Poimboeuf if (unlikely(klp_patch_pending(current)))
313d83a7cb3SJosh Poimboeuf klp_update_patch_state(current);
314cf37b6b4SNicolas Pitre }
315cf37b6b4SNicolas Pitre
cpu_in_idle(unsigned long pc)3166727ad9eSChris Metcalf bool cpu_in_idle(unsigned long pc)
3176727ad9eSChris Metcalf {
3186727ad9eSChris Metcalf return pc >= (unsigned long)__cpuidle_text_start &&
3196727ad9eSChris Metcalf pc < (unsigned long)__cpuidle_text_end;
3206727ad9eSChris Metcalf }
3216727ad9eSChris Metcalf
322c1de45caSPeter Zijlstra struct idle_timer {
323c1de45caSPeter Zijlstra struct hrtimer timer;
324c1de45caSPeter Zijlstra int done;
325c1de45caSPeter Zijlstra };
326c1de45caSPeter Zijlstra
idle_inject_timer_fn(struct hrtimer * timer)327c1de45caSPeter Zijlstra static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
328c1de45caSPeter Zijlstra {
329c1de45caSPeter Zijlstra struct idle_timer *it = container_of(timer, struct idle_timer, timer);
330c1de45caSPeter Zijlstra
331c1de45caSPeter Zijlstra WRITE_ONCE(it->done, 1);
332c1de45caSPeter Zijlstra set_tsk_need_resched(current);
333c1de45caSPeter Zijlstra
334c1de45caSPeter Zijlstra return HRTIMER_NORESTART;
335c1de45caSPeter Zijlstra }
336c1de45caSPeter Zijlstra
play_idle_precise(u64 duration_ns,u64 latency_ns)337c55b51a0SDaniel Lezcano void play_idle_precise(u64 duration_ns, u64 latency_ns)
338c1de45caSPeter Zijlstra {
339c1de45caSPeter Zijlstra struct idle_timer it;
340c1de45caSPeter Zijlstra
341c1de45caSPeter Zijlstra /*
342c1de45caSPeter Zijlstra * Only FIFO tasks can disable the tick since they don't need the forced
343c1de45caSPeter Zijlstra * preemption.
344c1de45caSPeter Zijlstra */
345c1de45caSPeter Zijlstra WARN_ON_ONCE(current->policy != SCHED_FIFO);
346c1de45caSPeter Zijlstra WARN_ON_ONCE(current->nr_cpus_allowed != 1);
347c1de45caSPeter Zijlstra WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
348c1de45caSPeter Zijlstra WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
349c55b51a0SDaniel Lezcano WARN_ON_ONCE(!duration_ns);
350618758edSMathieu Desnoyers WARN_ON_ONCE(current->mm);
351c1de45caSPeter Zijlstra
352c1de45caSPeter Zijlstra rcu_sleep_check();
353c1de45caSPeter Zijlstra preempt_disable();
354c1de45caSPeter Zijlstra current->flags |= PF_IDLE;
355c55b51a0SDaniel Lezcano cpuidle_use_deepest_state(latency_ns);
356c1de45caSPeter Zijlstra
357c1de45caSPeter Zijlstra it.done = 0;
35898484179SSebastian Andrzej Siewior hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
359c1de45caSPeter Zijlstra it.timer.function = idle_inject_timer_fn;
360c55b51a0SDaniel Lezcano hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
36198484179SSebastian Andrzej Siewior HRTIMER_MODE_REL_PINNED_HARD);
362c1de45caSPeter Zijlstra
363c1de45caSPeter Zijlstra while (!READ_ONCE(it.done))
364c1de45caSPeter Zijlstra do_idle();
365c1de45caSPeter Zijlstra
366c55b51a0SDaniel Lezcano cpuidle_use_deepest_state(0);
367c1de45caSPeter Zijlstra current->flags &= ~PF_IDLE;
368c1de45caSPeter Zijlstra
369c1de45caSPeter Zijlstra preempt_fold_need_resched();
370c1de45caSPeter Zijlstra preempt_enable();
371c1de45caSPeter Zijlstra }
372c55b51a0SDaniel Lezcano EXPORT_SYMBOL_GPL(play_idle_precise);
373c1de45caSPeter Zijlstra
cpu_startup_entry(enum cpuhp_state state)374cf37b6b4SNicolas Pitre void cpu_startup_entry(enum cpuhp_state state)
375cf37b6b4SNicolas Pitre {
376cff9b233SLiam R. Howlett current->flags |= PF_IDLE;
377cf37b6b4SNicolas Pitre arch_cpu_idle_prepare();
3788df3e07eSThomas Gleixner cpuhp_online_idle(state);
379c1de45caSPeter Zijlstra while (1)
380c1de45caSPeter Zijlstra do_idle();
381cf37b6b4SNicolas Pitre }
382a92057e1SIngo Molnar
383a92057e1SIngo Molnar /*
384a92057e1SIngo Molnar * idle-task scheduling class.
385a92057e1SIngo Molnar */
386a92057e1SIngo Molnar
387a92057e1SIngo Molnar #ifdef CONFIG_SMP
388a92057e1SIngo Molnar static int
select_task_rq_idle(struct task_struct * p,int cpu,int flags)3893aef1551SValentin Schneider select_task_rq_idle(struct task_struct *p, int cpu, int flags)
390a92057e1SIngo Molnar {
391a92057e1SIngo Molnar return task_cpu(p); /* IDLE tasks as never migrated */
392a92057e1SIngo Molnar }
3936e2df058SPeter Zijlstra
3946e2df058SPeter Zijlstra static int
balance_idle(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)3956e2df058SPeter Zijlstra balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
3966e2df058SPeter Zijlstra {
3976e2df058SPeter Zijlstra return WARN_ON_ONCE(1);
3986e2df058SPeter Zijlstra }
399a92057e1SIngo Molnar #endif
400a92057e1SIngo Molnar
401a92057e1SIngo Molnar /*
402a92057e1SIngo Molnar * Idle tasks are unconditionally rescheduled:
403a92057e1SIngo Molnar */
wakeup_preempt_idle(struct rq * rq,struct task_struct * p,int flags)404*b2f7d750SIngo Molnar static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
405a92057e1SIngo Molnar {
406a92057e1SIngo Molnar resched_curr(rq);
407a92057e1SIngo Molnar }
408a92057e1SIngo Molnar
put_prev_task_idle(struct rq * rq,struct task_struct * prev)4096e2df058SPeter Zijlstra static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
41003b7fad1SPeter Zijlstra {
41103b7fad1SPeter Zijlstra }
41203b7fad1SPeter Zijlstra
set_next_task_idle(struct rq * rq,struct task_struct * next,bool first)413a0e813f2SPeter Zijlstra static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
41403b7fad1SPeter Zijlstra {
41503b7fad1SPeter Zijlstra update_idle_core(rq);
41603b7fad1SPeter Zijlstra schedstat_inc(rq->sched_goidle);
41703b7fad1SPeter Zijlstra }
41803b7fad1SPeter Zijlstra
41921f56ffeSPeter Zijlstra #ifdef CONFIG_SMP
pick_task_idle(struct rq * rq)42021f56ffeSPeter Zijlstra static struct task_struct *pick_task_idle(struct rq *rq)
42121f56ffeSPeter Zijlstra {
42221f56ffeSPeter Zijlstra return rq->idle;
42321f56ffeSPeter Zijlstra }
42421f56ffeSPeter Zijlstra #endif
42521f56ffeSPeter Zijlstra
pick_next_task_idle(struct rq * rq)42698c2f700SPeter Zijlstra struct task_struct *pick_next_task_idle(struct rq *rq)
427a92057e1SIngo Molnar {
42803b7fad1SPeter Zijlstra struct task_struct *next = rq->idle;
429a92057e1SIngo Molnar
430a0e813f2SPeter Zijlstra set_next_task_idle(rq, next, true);
43103b7fad1SPeter Zijlstra
43203b7fad1SPeter Zijlstra return next;
433a92057e1SIngo Molnar }
434a92057e1SIngo Molnar
435a92057e1SIngo Molnar /*
436a92057e1SIngo Molnar * It is not legal to sleep in the idle task - print a warning
437a92057e1SIngo Molnar * message if some code attempts to do it:
438a92057e1SIngo Molnar */
439a92057e1SIngo Molnar static void
dequeue_task_idle(struct rq * rq,struct task_struct * p,int flags)440a92057e1SIngo Molnar dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
441a92057e1SIngo Molnar {
4425cb9eaa3SPeter Zijlstra raw_spin_rq_unlock_irq(rq);
443a92057e1SIngo Molnar printk(KERN_ERR "bad: scheduling from the idle thread!\n");
444a92057e1SIngo Molnar dump_stack();
4455cb9eaa3SPeter Zijlstra raw_spin_rq_lock_irq(rq);
446a92057e1SIngo Molnar }
447a92057e1SIngo Molnar
448a92057e1SIngo Molnar /*
449a92057e1SIngo Molnar * scheduler tick hitting a task of our scheduling class.
450a92057e1SIngo Molnar *
451a92057e1SIngo Molnar * NOTE: This function can be called remotely by the tick offload that
452a92057e1SIngo Molnar * goes along full dynticks. Therefore no local assumption can be made
453a92057e1SIngo Molnar * and everything must be accessed through the @rq and @curr passed in
454a92057e1SIngo Molnar * parameters.
455a92057e1SIngo Molnar */
task_tick_idle(struct rq * rq,struct task_struct * curr,int queued)456a92057e1SIngo Molnar static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
457a92057e1SIngo Molnar {
458a92057e1SIngo Molnar }
459a92057e1SIngo Molnar
switched_to_idle(struct rq * rq,struct task_struct * p)460a92057e1SIngo Molnar static void switched_to_idle(struct rq *rq, struct task_struct *p)
461a92057e1SIngo Molnar {
462a92057e1SIngo Molnar BUG();
463a92057e1SIngo Molnar }
464a92057e1SIngo Molnar
465a92057e1SIngo Molnar static void
prio_changed_idle(struct rq * rq,struct task_struct * p,int oldprio)466a92057e1SIngo Molnar prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
467a92057e1SIngo Molnar {
468a92057e1SIngo Molnar BUG();
469a92057e1SIngo Molnar }
470a92057e1SIngo Molnar
update_curr_idle(struct rq * rq)471a92057e1SIngo Molnar static void update_curr_idle(struct rq *rq)
472a92057e1SIngo Molnar {
473a92057e1SIngo Molnar }
474a92057e1SIngo Molnar
475a92057e1SIngo Molnar /*
476a92057e1SIngo Molnar * Simple, special scheduling class for the per-CPU idle tasks:
477a92057e1SIngo Molnar */
47843c31ac0SPeter Zijlstra DEFINE_SCHED_CLASS(idle) = {
47943c31ac0SPeter Zijlstra
480a92057e1SIngo Molnar /* no enqueue/yield_task for idle tasks */
481a92057e1SIngo Molnar
482a92057e1SIngo Molnar /* dequeue is not valid, we print a debug message there: */
483a92057e1SIngo Molnar .dequeue_task = dequeue_task_idle,
484a92057e1SIngo Molnar
485*b2f7d750SIngo Molnar .wakeup_preempt = wakeup_preempt_idle,
486a92057e1SIngo Molnar
487a92057e1SIngo Molnar .pick_next_task = pick_next_task_idle,
488a92057e1SIngo Molnar .put_prev_task = put_prev_task_idle,
48903b7fad1SPeter Zijlstra .set_next_task = set_next_task_idle,
490a92057e1SIngo Molnar
491a92057e1SIngo Molnar #ifdef CONFIG_SMP
4926e2df058SPeter Zijlstra .balance = balance_idle,
49321f56ffeSPeter Zijlstra .pick_task = pick_task_idle,
494a92057e1SIngo Molnar .select_task_rq = select_task_rq_idle,
495a92057e1SIngo Molnar .set_cpus_allowed = set_cpus_allowed_common,
496a92057e1SIngo Molnar #endif
497a92057e1SIngo Molnar
498a92057e1SIngo Molnar .task_tick = task_tick_idle,
499a92057e1SIngo Molnar
500a92057e1SIngo Molnar .prio_changed = prio_changed_idle,
501a92057e1SIngo Molnar .switched_to = switched_to_idle,
502a92057e1SIngo Molnar .update_curr = update_curr_idle,
503a92057e1SIngo Molnar };
504