1 /* 2 * Generic entry point for the idle threads 3 */ 4 #include <linux/sched.h> 5 #include <linux/cpu.h> 6 #include <linux/cpuidle.h> 7 #include <linux/tick.h> 8 #include <linux/mm.h> 9 #include <linux/stackprotector.h> 10 #include <linux/suspend.h> 11 12 #include <asm/tlb.h> 13 14 #include <trace/events/power.h> 15 16 #include "sched.h" 17 18 static int __read_mostly cpu_idle_force_poll; 19 20 void cpu_idle_poll_ctrl(bool enable) 21 { 22 if (enable) { 23 cpu_idle_force_poll++; 24 } else { 25 cpu_idle_force_poll--; 26 WARN_ON_ONCE(cpu_idle_force_poll < 0); 27 } 28 } 29 30 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP 31 static int __init cpu_idle_poll_setup(char *__unused) 32 { 33 cpu_idle_force_poll = 1; 34 return 1; 35 } 36 __setup("nohlt", cpu_idle_poll_setup); 37 38 static int __init cpu_idle_nopoll_setup(char *__unused) 39 { 40 cpu_idle_force_poll = 0; 41 return 1; 42 } 43 __setup("hlt", cpu_idle_nopoll_setup); 44 #endif 45 46 static inline int cpu_idle_poll(void) 47 { 48 rcu_idle_enter(); 49 trace_cpu_idle_rcuidle(0, smp_processor_id()); 50 local_irq_enable(); 51 while (!tif_need_resched() && 52 (cpu_idle_force_poll || tick_check_broadcast_expired())) 53 cpu_relax(); 54 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 55 rcu_idle_exit(); 56 return 1; 57 } 58 59 /* Weak implementations for optional arch specific functions */ 60 void __weak arch_cpu_idle_prepare(void) { } 61 void __weak arch_cpu_idle_enter(void) { } 62 void __weak arch_cpu_idle_exit(void) { } 63 void __weak arch_cpu_idle_dead(void) { } 64 void __weak arch_cpu_idle(void) 65 { 66 cpu_idle_force_poll = 1; 67 local_irq_enable(); 68 } 69 70 /** 71 * cpuidle_idle_call - the main idle function 72 * 73 * NOTE: no locks or semaphores should be used here 74 * 75 * On archs that support TIF_POLLING_NRFLAG, is called with polling 76 * set, and it returns with polling set. If it ever stops polling, it 77 * must clear the polling bit. 78 */ 79 static void cpuidle_idle_call(void) 80 { 81 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 82 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 83 int next_state, entered_state; 84 unsigned int broadcast; 85 bool reflect; 86 87 /* 88 * Check if the idle task must be rescheduled. If it is the 89 * case, exit the function after re-enabling the local irq. 90 */ 91 if (need_resched()) { 92 local_irq_enable(); 93 return; 94 } 95 96 /* 97 * During the idle period, stop measuring the disabled irqs 98 * critical sections latencies 99 */ 100 stop_critical_timings(); 101 102 /* 103 * Tell the RCU framework we are entering an idle section, 104 * so no more rcu read side critical sections and one more 105 * step to the grace period 106 */ 107 rcu_idle_enter(); 108 109 if (cpuidle_not_available(drv, dev)) 110 goto use_default; 111 112 /* 113 * Suspend-to-idle ("freeze") is a system state in which all user space 114 * has been frozen, all I/O devices have been suspended and the only 115 * activity happens here and in iterrupts (if any). In that case bypass 116 * the cpuidle governor and go stratight for the deepest idle state 117 * available. Possibly also suspend the local tick and the entire 118 * timekeeping to prevent timer interrupts from kicking us out of idle 119 * until a proper wakeup interrupt happens. 120 */ 121 if (idle_should_freeze()) { 122 entered_state = cpuidle_enter_freeze(drv, dev); 123 if (entered_state >= 0) { 124 local_irq_enable(); 125 goto exit_idle; 126 } 127 128 reflect = false; 129 next_state = cpuidle_find_deepest_state(drv, dev); 130 } else { 131 reflect = true; 132 /* 133 * Ask the cpuidle framework to choose a convenient idle state. 134 */ 135 next_state = cpuidle_select(drv, dev); 136 } 137 /* Fall back to the default arch idle method on errors. */ 138 if (next_state < 0) 139 goto use_default; 140 141 /* 142 * The idle task must be scheduled, it is pointless to 143 * go to idle, just update no idle residency and get 144 * out of this function 145 */ 146 if (current_clr_polling_and_test()) { 147 dev->last_residency = 0; 148 entered_state = next_state; 149 local_irq_enable(); 150 goto exit_idle; 151 } 152 153 broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP; 154 155 /* 156 * Tell the time framework to switch to a broadcast timer 157 * because our local timer will be shutdown. If a local timer 158 * is used from another cpu as a broadcast timer, this call may 159 * fail if it is not available 160 */ 161 if (broadcast && tick_broadcast_enter()) 162 goto use_default; 163 164 /* Take note of the planned idle state. */ 165 idle_set_state(this_rq(), &drv->states[next_state]); 166 167 /* 168 * Enter the idle state previously returned by the governor decision. 169 * This function will block until an interrupt occurs and will take 170 * care of re-enabling the local interrupts 171 */ 172 entered_state = cpuidle_enter(drv, dev, next_state); 173 174 /* The cpu is no longer idle or about to enter idle. */ 175 idle_set_state(this_rq(), NULL); 176 177 if (broadcast) 178 tick_broadcast_exit(); 179 180 /* 181 * Give the governor an opportunity to reflect on the outcome 182 */ 183 if (reflect) 184 cpuidle_reflect(dev, entered_state); 185 186 exit_idle: 187 __current_set_polling(); 188 189 /* 190 * It is up to the idle functions to reenable local interrupts 191 */ 192 if (WARN_ON_ONCE(irqs_disabled())) 193 local_irq_enable(); 194 195 rcu_idle_exit(); 196 start_critical_timings(); 197 return; 198 199 use_default: 200 /* 201 * We can't use the cpuidle framework, let's use the default 202 * idle routine. 203 */ 204 if (current_clr_polling_and_test()) 205 local_irq_enable(); 206 else 207 arch_cpu_idle(); 208 209 goto exit_idle; 210 } 211 212 DEFINE_PER_CPU(bool, cpu_dead_idle); 213 214 /* 215 * Generic idle loop implementation 216 * 217 * Called with polling cleared. 218 */ 219 static void cpu_idle_loop(void) 220 { 221 while (1) { 222 /* 223 * If the arch has a polling bit, we maintain an invariant: 224 * 225 * Our polling bit is clear if we're not scheduled (i.e. if 226 * rq->curr != rq->idle). This means that, if rq->idle has 227 * the polling bit set, then setting need_resched is 228 * guaranteed to cause the cpu to reschedule. 229 */ 230 231 __current_set_polling(); 232 tick_nohz_idle_enter(); 233 234 while (!need_resched()) { 235 check_pgt_cache(); 236 rmb(); 237 238 if (cpu_is_offline(smp_processor_id())) { 239 rcu_cpu_notify(NULL, CPU_DYING_IDLE, 240 (void *)(long)smp_processor_id()); 241 smp_mb(); /* all activity before dead. */ 242 this_cpu_write(cpu_dead_idle, true); 243 arch_cpu_idle_dead(); 244 } 245 246 local_irq_disable(); 247 arch_cpu_idle_enter(); 248 249 /* 250 * In poll mode we reenable interrupts and spin. 251 * 252 * Also if we detected in the wakeup from idle 253 * path that the tick broadcast device expired 254 * for us, we don't want to go deep idle as we 255 * know that the IPI is going to arrive right 256 * away 257 */ 258 if (cpu_idle_force_poll || tick_check_broadcast_expired()) 259 cpu_idle_poll(); 260 else 261 cpuidle_idle_call(); 262 263 arch_cpu_idle_exit(); 264 } 265 266 /* 267 * Since we fell out of the loop above, we know 268 * TIF_NEED_RESCHED must be set, propagate it into 269 * PREEMPT_NEED_RESCHED. 270 * 271 * This is required because for polling idle loops we will 272 * not have had an IPI to fold the state for us. 273 */ 274 preempt_set_need_resched(); 275 tick_nohz_idle_exit(); 276 __current_clr_polling(); 277 278 /* 279 * We promise to call sched_ttwu_pending and reschedule 280 * if need_resched is set while polling is set. That 281 * means that clearing polling needs to be visible 282 * before doing these things. 283 */ 284 smp_mb__after_atomic(); 285 286 sched_ttwu_pending(); 287 schedule_preempt_disabled(); 288 } 289 } 290 291 void cpu_startup_entry(enum cpuhp_state state) 292 { 293 /* 294 * This #ifdef needs to die, but it's too late in the cycle to 295 * make this generic (arm and sh have never invoked the canary 296 * init for the non boot cpus!). Will be fixed in 3.11 297 */ 298 #ifdef CONFIG_X86 299 /* 300 * If we're the non-boot CPU, nothing set the stack canary up 301 * for us. The boot CPU already has it initialized but no harm 302 * in doing it again. This is a good place for updating it, as 303 * we wont ever return from this function (so the invalid 304 * canaries already on the stack wont ever trigger). 305 */ 306 boot_init_stack_canary(); 307 #endif 308 arch_cpu_idle_prepare(); 309 cpu_idle_loop(); 310 } 311