xref: /openbmc/linux/kernel/sched/idle.c (revision 92a2c6b2)
1 /*
2  * Generic entry point for the idle threads
3  */
4 #include <linux/sched.h>
5 #include <linux/cpu.h>
6 #include <linux/cpuidle.h>
7 #include <linux/tick.h>
8 #include <linux/mm.h>
9 #include <linux/stackprotector.h>
10 #include <linux/suspend.h>
11 
12 #include <asm/tlb.h>
13 
14 #include <trace/events/power.h>
15 
16 #include "sched.h"
17 
18 static int __read_mostly cpu_idle_force_poll;
19 
20 void cpu_idle_poll_ctrl(bool enable)
21 {
22 	if (enable) {
23 		cpu_idle_force_poll++;
24 	} else {
25 		cpu_idle_force_poll--;
26 		WARN_ON_ONCE(cpu_idle_force_poll < 0);
27 	}
28 }
29 
30 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
31 static int __init cpu_idle_poll_setup(char *__unused)
32 {
33 	cpu_idle_force_poll = 1;
34 	return 1;
35 }
36 __setup("nohlt", cpu_idle_poll_setup);
37 
38 static int __init cpu_idle_nopoll_setup(char *__unused)
39 {
40 	cpu_idle_force_poll = 0;
41 	return 1;
42 }
43 __setup("hlt", cpu_idle_nopoll_setup);
44 #endif
45 
46 static inline int cpu_idle_poll(void)
47 {
48 	rcu_idle_enter();
49 	trace_cpu_idle_rcuidle(0, smp_processor_id());
50 	local_irq_enable();
51 	while (!tif_need_resched() &&
52 		(cpu_idle_force_poll || tick_check_broadcast_expired()))
53 		cpu_relax();
54 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
55 	rcu_idle_exit();
56 	return 1;
57 }
58 
59 /* Weak implementations for optional arch specific functions */
60 void __weak arch_cpu_idle_prepare(void) { }
61 void __weak arch_cpu_idle_enter(void) { }
62 void __weak arch_cpu_idle_exit(void) { }
63 void __weak arch_cpu_idle_dead(void) { }
64 void __weak arch_cpu_idle(void)
65 {
66 	cpu_idle_force_poll = 1;
67 	local_irq_enable();
68 }
69 
70 /**
71  * cpuidle_idle_call - the main idle function
72  *
73  * NOTE: no locks or semaphores should be used here
74  *
75  * On archs that support TIF_POLLING_NRFLAG, is called with polling
76  * set, and it returns with polling set.  If it ever stops polling, it
77  * must clear the polling bit.
78  */
79 static void cpuidle_idle_call(void)
80 {
81 	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
82 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
83 	int next_state, entered_state;
84 	unsigned int broadcast;
85 
86 	/*
87 	 * Check if the idle task must be rescheduled. If it is the
88 	 * case, exit the function after re-enabling the local irq.
89 	 */
90 	if (need_resched()) {
91 		local_irq_enable();
92 		return;
93 	}
94 
95 	/*
96 	 * During the idle period, stop measuring the disabled irqs
97 	 * critical sections latencies
98 	 */
99 	stop_critical_timings();
100 
101 	/*
102 	 * Tell the RCU framework we are entering an idle section,
103 	 * so no more rcu read side critical sections and one more
104 	 * step to the grace period
105 	 */
106 	rcu_idle_enter();
107 
108 	/*
109 	 * Suspend-to-idle ("freeze") is a system state in which all user space
110 	 * has been frozen, all I/O devices have been suspended and the only
111 	 * activity happens here and in iterrupts (if any).  In that case bypass
112 	 * the cpuidle governor and go stratight for the deepest idle state
113 	 * available.  Possibly also suspend the local tick and the entire
114 	 * timekeeping to prevent timer interrupts from kicking us out of idle
115 	 * until a proper wakeup interrupt happens.
116 	 */
117 	if (idle_should_freeze()) {
118 		cpuidle_enter_freeze();
119 		local_irq_enable();
120 		goto exit_idle;
121 	}
122 
123 	/*
124 	 * Ask the cpuidle framework to choose a convenient idle state.
125 	 * Fall back to the default arch idle method on errors.
126 	 */
127 	next_state = cpuidle_select(drv, dev);
128 	if (next_state < 0) {
129 use_default:
130 		/*
131 		 * We can't use the cpuidle framework, let's use the default
132 		 * idle routine.
133 		 */
134 		if (current_clr_polling_and_test())
135 			local_irq_enable();
136 		else
137 			arch_cpu_idle();
138 
139 		goto exit_idle;
140 	}
141 
142 
143 	/*
144 	 * The idle task must be scheduled, it is pointless to
145 	 * go to idle, just update no idle residency and get
146 	 * out of this function
147 	 */
148 	if (current_clr_polling_and_test()) {
149 		dev->last_residency = 0;
150 		entered_state = next_state;
151 		local_irq_enable();
152 		goto exit_idle;
153 	}
154 
155 	broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP;
156 
157 	/*
158 	 * Tell the time framework to switch to a broadcast timer
159 	 * because our local timer will be shutdown. If a local timer
160 	 * is used from another cpu as a broadcast timer, this call may
161 	 * fail if it is not available
162 	 */
163 	if (broadcast &&
164 	    clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
165 		goto use_default;
166 
167 	/* Take note of the planned idle state. */
168 	idle_set_state(this_rq(), &drv->states[next_state]);
169 
170 	/*
171 	 * Enter the idle state previously returned by the governor decision.
172 	 * This function will block until an interrupt occurs and will take
173 	 * care of re-enabling the local interrupts
174 	 */
175 	entered_state = cpuidle_enter(drv, dev, next_state);
176 
177 	/* The cpu is no longer idle or about to enter idle. */
178 	idle_set_state(this_rq(), NULL);
179 
180 	if (broadcast)
181 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
182 
183 	/*
184 	 * Give the governor an opportunity to reflect on the outcome
185 	 */
186 	cpuidle_reflect(dev, entered_state);
187 
188 exit_idle:
189 	__current_set_polling();
190 
191 	/*
192 	 * It is up to the idle functions to reenable local interrupts
193 	 */
194 	if (WARN_ON_ONCE(irqs_disabled()))
195 		local_irq_enable();
196 
197 	rcu_idle_exit();
198 	start_critical_timings();
199 }
200 
201 /*
202  * Generic idle loop implementation
203  *
204  * Called with polling cleared.
205  */
206 static void cpu_idle_loop(void)
207 {
208 	while (1) {
209 		/*
210 		 * If the arch has a polling bit, we maintain an invariant:
211 		 *
212 		 * Our polling bit is clear if we're not scheduled (i.e. if
213 		 * rq->curr != rq->idle).  This means that, if rq->idle has
214 		 * the polling bit set, then setting need_resched is
215 		 * guaranteed to cause the cpu to reschedule.
216 		 */
217 
218 		__current_set_polling();
219 		tick_nohz_idle_enter();
220 
221 		while (!need_resched()) {
222 			check_pgt_cache();
223 			rmb();
224 
225 			if (cpu_is_offline(smp_processor_id()))
226 				arch_cpu_idle_dead();
227 
228 			local_irq_disable();
229 			arch_cpu_idle_enter();
230 
231 			/*
232 			 * In poll mode we reenable interrupts and spin.
233 			 *
234 			 * Also if we detected in the wakeup from idle
235 			 * path that the tick broadcast device expired
236 			 * for us, we don't want to go deep idle as we
237 			 * know that the IPI is going to arrive right
238 			 * away
239 			 */
240 			if (cpu_idle_force_poll || tick_check_broadcast_expired())
241 				cpu_idle_poll();
242 			else
243 				cpuidle_idle_call();
244 
245 			arch_cpu_idle_exit();
246 		}
247 
248 		/*
249 		 * Since we fell out of the loop above, we know
250 		 * TIF_NEED_RESCHED must be set, propagate it into
251 		 * PREEMPT_NEED_RESCHED.
252 		 *
253 		 * This is required because for polling idle loops we will
254 		 * not have had an IPI to fold the state for us.
255 		 */
256 		preempt_set_need_resched();
257 		tick_nohz_idle_exit();
258 		__current_clr_polling();
259 
260 		/*
261 		 * We promise to call sched_ttwu_pending and reschedule
262 		 * if need_resched is set while polling is set.  That
263 		 * means that clearing polling needs to be visible
264 		 * before doing these things.
265 		 */
266 		smp_mb__after_atomic();
267 
268 		sched_ttwu_pending();
269 		schedule_preempt_disabled();
270 	}
271 }
272 
273 void cpu_startup_entry(enum cpuhp_state state)
274 {
275 	/*
276 	 * This #ifdef needs to die, but it's too late in the cycle to
277 	 * make this generic (arm and sh have never invoked the canary
278 	 * init for the non boot cpus!). Will be fixed in 3.11
279 	 */
280 #ifdef CONFIG_X86
281 	/*
282 	 * If we're the non-boot CPU, nothing set the stack canary up
283 	 * for us. The boot CPU already has it initialized but no harm
284 	 * in doing it again. This is a good place for updating it, as
285 	 * we wont ever return from this function (so the invalid
286 	 * canaries already on the stack wont ever trigger).
287 	 */
288 	boot_init_stack_canary();
289 #endif
290 	arch_cpu_idle_prepare();
291 	cpu_idle_loop();
292 }
293