idle.c (bb8313b603eb8fd52de48a079bfcd72dcab2ef1e) | idle.c (c1de45ca831acee9b72c9320dde447edafadb43f) |
---|---|
1/* 2 * Generic entry point for the idle threads 3 */ 4#include <linux/sched.h> 5#include <linux/cpu.h> 6#include <linux/cpuidle.h> 7#include <linux/cpuhotplug.h> 8#include <linux/tick.h> --- 191 unchanged lines hidden (view full) --- 200 rcu_idle_exit(); 201} 202 203/* 204 * Generic idle loop implementation 205 * 206 * Called with polling cleared. 207 */ | 1/* 2 * Generic entry point for the idle threads 3 */ 4#include <linux/sched.h> 5#include <linux/cpu.h> 6#include <linux/cpuidle.h> 7#include <linux/cpuhotplug.h> 8#include <linux/tick.h> --- 191 unchanged lines hidden (view full) --- 200 rcu_idle_exit(); 201} 202 203/* 204 * Generic idle loop implementation 205 * 206 * Called with polling cleared. 207 */ |
208static void cpu_idle_loop(void) | 208static void do_idle(void) |
209{ | 209{ |
210 int cpu = smp_processor_id(); | 210 /* 211 * If the arch has a polling bit, we maintain an invariant: 212 * 213 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr != 214 * rq->idle). This means that, if rq->idle has the polling bit set, 215 * then setting need_resched is guaranteed to cause the CPU to 216 * reschedule. 217 */ |
211 | 218 |
212 while (1) { 213 /* 214 * If the arch has a polling bit, we maintain an invariant: 215 * 216 * Our polling bit is clear if we're not scheduled (i.e. if 217 * rq->curr != rq->idle). This means that, if rq->idle has 218 * the polling bit set, then setting need_resched is 219 * guaranteed to cause the cpu to reschedule. 220 */ | 219 __current_set_polling(); 220 tick_nohz_idle_enter(); |
221 | 221 |
222 __current_set_polling(); 223 quiet_vmstat(); 224 tick_nohz_idle_enter(); | 222 while (!need_resched()) { 223 check_pgt_cache(); 224 rmb(); |
225 | 225 |
226 while (!need_resched()) { 227 check_pgt_cache(); 228 rmb(); 229 230 if (cpu_is_offline(cpu)) { 231 cpuhp_report_idle_dead(); 232 arch_cpu_idle_dead(); 233 } 234 235 local_irq_disable(); 236 arch_cpu_idle_enter(); 237 238 /* 239 * In poll mode we reenable interrupts and spin. 240 * 241 * Also if we detected in the wakeup from idle 242 * path that the tick broadcast device expired 243 * for us, we don't want to go deep idle as we 244 * know that the IPI is going to arrive right 245 * away 246 */ 247 if (cpu_idle_force_poll || tick_check_broadcast_expired()) 248 cpu_idle_poll(); 249 else 250 cpuidle_idle_call(); 251 252 arch_cpu_idle_exit(); | 226 if (cpu_is_offline(smp_processor_id())) { 227 cpuhp_report_idle_dead(); 228 arch_cpu_idle_dead(); |
253 } 254 | 229 } 230 |
255 /* 256 * Since we fell out of the loop above, we know 257 * TIF_NEED_RESCHED must be set, propagate it into 258 * PREEMPT_NEED_RESCHED. 259 * 260 * This is required because for polling idle loops we will 261 * not have had an IPI to fold the state for us. 262 */ 263 preempt_set_need_resched(); 264 tick_nohz_idle_exit(); 265 __current_clr_polling(); | 231 local_irq_disable(); 232 arch_cpu_idle_enter(); |
266 267 /* | 233 234 /* |
268 * We promise to call sched_ttwu_pending and reschedule 269 * if need_resched is set while polling is set. That 270 * means that clearing polling needs to be visible 271 * before doing these things. | 235 * In poll mode we reenable interrupts and spin. Also if we 236 * detected in the wakeup from idle path that the tick 237 * broadcast device expired for us, we don't want to go deep 238 * idle as we know that the IPI is going to arrive right away. |
272 */ | 239 */ |
273 smp_mb__after_atomic(); 274 275 sched_ttwu_pending(); 276 schedule_preempt_disabled(); | 240 if (cpu_idle_force_poll || tick_check_broadcast_expired()) 241 cpu_idle_poll(); 242 else 243 cpuidle_idle_call(); 244 arch_cpu_idle_exit(); |
277 } | 245 } |
246 247 /* 248 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must 249 * be set, propagate it into PREEMPT_NEED_RESCHED. 250 * 251 * This is required because for polling idle loops we will not have had 252 * an IPI to fold the state for us. 253 */ 254 preempt_set_need_resched(); 255 tick_nohz_idle_exit(); 256 __current_clr_polling(); 257 258 /* 259 * We promise to call sched_ttwu_pending() and reschedule if 260 * need_resched() is set while polling is set. That means that clearing 261 * polling needs to be visible before doing these things. 262 */ 263 smp_mb__after_atomic(); 264 265 sched_ttwu_pending(); 266 schedule_preempt_disabled(); |
|
278} 279 280bool cpu_in_idle(unsigned long pc) 281{ 282 return pc >= (unsigned long)__cpuidle_text_start && 283 pc < (unsigned long)__cpuidle_text_end; 284} 285 | 267} 268 269bool cpu_in_idle(unsigned long pc) 270{ 271 return pc >= (unsigned long)__cpuidle_text_start && 272 pc < (unsigned long)__cpuidle_text_end; 273} 274 |
275struct idle_timer { 276 struct hrtimer timer; 277 int done; 278}; 279 280static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer) 281{ 282 struct idle_timer *it = container_of(timer, struct idle_timer, timer); 283 284 WRITE_ONCE(it->done, 1); 285 set_tsk_need_resched(current); 286 287 return HRTIMER_NORESTART; 288} 289 290void play_idle(unsigned long duration_ms) 291{ 292 struct idle_timer it; 293 294 /* 295 * Only FIFO tasks can disable the tick since they don't need the forced 296 * preemption. 297 */ 298 WARN_ON_ONCE(current->policy != SCHED_FIFO); 299 WARN_ON_ONCE(current->nr_cpus_allowed != 1); 300 WARN_ON_ONCE(!(current->flags & PF_KTHREAD)); 301 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); 302 WARN_ON_ONCE(!duration_ms); 303 304 rcu_sleep_check(); 305 preempt_disable(); 306 current->flags |= PF_IDLE; 307 cpuidle_use_deepest_state(true); 308 309 it.done = 0; 310 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 311 it.timer.function = idle_inject_timer_fn; 312 hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED); 313 314 while (!READ_ONCE(it.done)) 315 do_idle(); 316 317 cpuidle_use_deepest_state(false); 318 current->flags &= ~PF_IDLE; 319 320 preempt_fold_need_resched(); 321 preempt_enable(); 322} 323EXPORT_SYMBOL_GPL(play_idle); 324 |
|
286void cpu_startup_entry(enum cpuhp_state state) 287{ 288 /* 289 * This #ifdef needs to die, but it's too late in the cycle to 290 * make this generic (arm and sh have never invoked the canary 291 * init for the non boot cpus!). Will be fixed in 3.11 292 */ 293#ifdef CONFIG_X86 294 /* 295 * If we're the non-boot CPU, nothing set the stack canary up 296 * for us. The boot CPU already has it initialized but no harm 297 * in doing it again. This is a good place for updating it, as 298 * we wont ever return from this function (so the invalid 299 * canaries already on the stack wont ever trigger). 300 */ 301 boot_init_stack_canary(); 302#endif 303 arch_cpu_idle_prepare(); 304 cpuhp_online_idle(state); | 325void cpu_startup_entry(enum cpuhp_state state) 326{ 327 /* 328 * This #ifdef needs to die, but it's too late in the cycle to 329 * make this generic (arm and sh have never invoked the canary 330 * init for the non boot cpus!). Will be fixed in 3.11 331 */ 332#ifdef CONFIG_X86 333 /* 334 * If we're the non-boot CPU, nothing set the stack canary up 335 * for us. The boot CPU already has it initialized but no harm 336 * in doing it again. This is a good place for updating it, as 337 * we wont ever return from this function (so the invalid 338 * canaries already on the stack wont ever trigger). 339 */ 340 boot_init_stack_canary(); 341#endif 342 arch_cpu_idle_prepare(); 343 cpuhp_online_idle(state); |
305 cpu_idle_loop(); | 344 while (1) 345 do_idle(); |
306} | 346} |