1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic entry points for the idle threads and 4 * implementation of the idle task scheduling class. 5 * 6 * (NOTE: these are not related to SCHED_IDLE batch scheduled 7 * tasks which are handled in sched/fair.c ) 8 */ 9 #include "sched.h" 10 11 #include <trace/events/power.h> 12 13 /* Linker adds these: start and end of __cpuidle functions */ 14 extern char __cpuidle_text_start[], __cpuidle_text_end[]; 15 16 /** 17 * sched_idle_set_state - Record idle state for the current CPU. 18 * @idle_state: State to record. 19 */ 20 void sched_idle_set_state(struct cpuidle_state *idle_state) 21 { 22 idle_set_state(this_rq(), idle_state); 23 } 24 25 static int __read_mostly cpu_idle_force_poll; 26 27 void cpu_idle_poll_ctrl(bool enable) 28 { 29 if (enable) { 30 cpu_idle_force_poll++; 31 } else { 32 cpu_idle_force_poll--; 33 WARN_ON_ONCE(cpu_idle_force_poll < 0); 34 } 35 } 36 37 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP 38 static int __init cpu_idle_poll_setup(char *__unused) 39 { 40 cpu_idle_force_poll = 1; 41 42 return 1; 43 } 44 __setup("nohlt", cpu_idle_poll_setup); 45 46 static int __init cpu_idle_nopoll_setup(char *__unused) 47 { 48 cpu_idle_force_poll = 0; 49 50 return 1; 51 } 52 __setup("hlt", cpu_idle_nopoll_setup); 53 #endif 54 55 static noinline int __cpuidle cpu_idle_poll(void) 56 { 57 rcu_idle_enter(); 58 trace_cpu_idle_rcuidle(0, smp_processor_id()); 59 local_irq_enable(); 60 stop_critical_timings(); 61 62 while (!tif_need_resched() && 63 (cpu_idle_force_poll || tick_check_broadcast_expired())) 64 cpu_relax(); 65 start_critical_timings(); 66 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 67 rcu_idle_exit(); 68 69 return 1; 70 } 71 72 /* Weak implementations for optional arch specific functions */ 73 void __weak arch_cpu_idle_prepare(void) { } 74 void __weak arch_cpu_idle_enter(void) { } 75 void __weak arch_cpu_idle_exit(void) { } 76 void __weak arch_cpu_idle_dead(void) { } 77 void __weak arch_cpu_idle(void) 78 { 79 cpu_idle_force_poll = 1; 80 local_irq_enable(); 81 } 82 83 /** 84 * default_idle_call - Default CPU idle routine. 85 * 86 * To use when the cpuidle framework cannot be used. 87 */ 88 void __cpuidle default_idle_call(void) 89 { 90 if (current_clr_polling_and_test()) { 91 local_irq_enable(); 92 } else { 93 stop_critical_timings(); 94 arch_cpu_idle(); 95 start_critical_timings(); 96 } 97 } 98 99 static int call_cpuidle_s2idle(struct cpuidle_driver *drv, 100 struct cpuidle_device *dev) 101 { 102 if (current_clr_polling_and_test()) 103 return -EBUSY; 104 105 return cpuidle_enter_s2idle(drv, dev); 106 } 107 108 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, 109 int next_state) 110 { 111 /* 112 * The idle task must be scheduled, it is pointless to go to idle, just 113 * update no idle residency and return. 114 */ 115 if (current_clr_polling_and_test()) { 116 dev->last_residency_ns = 0; 117 local_irq_enable(); 118 return -EBUSY; 119 } 120 121 /* 122 * Enter the idle state previously returned by the governor decision. 123 * This function will block until an interrupt occurs and will take 124 * care of re-enabling the local interrupts 125 */ 126 return cpuidle_enter(drv, dev, next_state); 127 } 128 129 /** 130 * cpuidle_idle_call - the main idle function 131 * 132 * NOTE: no locks or semaphores should be used here 133 * 134 * On archs that support TIF_POLLING_NRFLAG, is called with polling 135 * set, and it returns with polling set. If it ever stops polling, it 136 * must clear the polling bit. 137 */ 138 static void cpuidle_idle_call(void) 139 { 140 struct cpuidle_device *dev = cpuidle_get_device(); 141 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 142 int next_state, entered_state; 143 144 /* 145 * Check if the idle task must be rescheduled. If it is the 146 * case, exit the function after re-enabling the local irq. 147 */ 148 if (need_resched()) { 149 local_irq_enable(); 150 return; 151 } 152 153 /* 154 * The RCU framework needs to be told that we are entering an idle 155 * section, so no more rcu read side critical sections and one more 156 * step to the grace period 157 */ 158 159 if (cpuidle_not_available(drv, dev)) { 160 tick_nohz_idle_stop_tick(); 161 rcu_idle_enter(); 162 163 default_idle_call(); 164 goto exit_idle; 165 } 166 167 /* 168 * Suspend-to-idle ("s2idle") is a system state in which all user space 169 * has been frozen, all I/O devices have been suspended and the only 170 * activity happens here and in interrupts (if any). In that case bypass 171 * the cpuidle governor and go stratight for the deepest idle state 172 * available. Possibly also suspend the local tick and the entire 173 * timekeeping to prevent timer interrupts from kicking us out of idle 174 * until a proper wakeup interrupt happens. 175 */ 176 177 if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) { 178 u64 max_latency_ns; 179 180 if (idle_should_enter_s2idle()) { 181 rcu_idle_enter(); 182 183 entered_state = call_cpuidle_s2idle(drv, dev); 184 if (entered_state > 0) 185 goto exit_idle; 186 187 rcu_idle_exit(); 188 189 max_latency_ns = U64_MAX; 190 } else { 191 max_latency_ns = dev->forced_idle_latency_limit_ns; 192 } 193 194 tick_nohz_idle_stop_tick(); 195 rcu_idle_enter(); 196 197 next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns); 198 call_cpuidle(drv, dev, next_state); 199 } else { 200 bool stop_tick = true; 201 202 /* 203 * Ask the cpuidle framework to choose a convenient idle state. 204 */ 205 next_state = cpuidle_select(drv, dev, &stop_tick); 206 207 if (stop_tick || tick_nohz_tick_stopped()) 208 tick_nohz_idle_stop_tick(); 209 else 210 tick_nohz_idle_retain_tick(); 211 212 rcu_idle_enter(); 213 214 entered_state = call_cpuidle(drv, dev, next_state); 215 /* 216 * Give the governor an opportunity to reflect on the outcome 217 */ 218 cpuidle_reflect(dev, entered_state); 219 } 220 221 exit_idle: 222 __current_set_polling(); 223 224 /* 225 * It is up to the idle functions to reenable local interrupts 226 */ 227 if (WARN_ON_ONCE(irqs_disabled())) 228 local_irq_enable(); 229 230 rcu_idle_exit(); 231 } 232 233 /* 234 * Generic idle loop implementation 235 * 236 * Called with polling cleared. 237 */ 238 static void do_idle(void) 239 { 240 int cpu = smp_processor_id(); 241 /* 242 * If the arch has a polling bit, we maintain an invariant: 243 * 244 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr != 245 * rq->idle). This means that, if rq->idle has the polling bit set, 246 * then setting need_resched is guaranteed to cause the CPU to 247 * reschedule. 248 */ 249 250 __current_set_polling(); 251 tick_nohz_idle_enter(); 252 253 while (!need_resched()) { 254 rmb(); 255 256 local_irq_disable(); 257 258 if (cpu_is_offline(cpu)) { 259 tick_nohz_idle_stop_tick(); 260 cpuhp_report_idle_dead(); 261 arch_cpu_idle_dead(); 262 } 263 264 arch_cpu_idle_enter(); 265 266 /* 267 * In poll mode we reenable interrupts and spin. Also if we 268 * detected in the wakeup from idle path that the tick 269 * broadcast device expired for us, we don't want to go deep 270 * idle as we know that the IPI is going to arrive right away. 271 */ 272 if (cpu_idle_force_poll || tick_check_broadcast_expired()) { 273 tick_nohz_idle_restart_tick(); 274 cpu_idle_poll(); 275 } else { 276 cpuidle_idle_call(); 277 } 278 arch_cpu_idle_exit(); 279 } 280 281 /* 282 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must 283 * be set, propagate it into PREEMPT_NEED_RESCHED. 284 * 285 * This is required because for polling idle loops we will not have had 286 * an IPI to fold the state for us. 287 */ 288 preempt_set_need_resched(); 289 tick_nohz_idle_exit(); 290 __current_clr_polling(); 291 292 /* 293 * We promise to call sched_ttwu_pending() and reschedule if 294 * need_resched() is set while polling is set. That means that clearing 295 * polling needs to be visible before doing these things. 296 */ 297 smp_mb__after_atomic(); 298 299 /* 300 * RCU relies on this call to be done outside of an RCU read-side 301 * critical section. 302 */ 303 flush_smp_call_function_from_idle(); 304 schedule_idle(); 305 306 if (unlikely(klp_patch_pending(current))) 307 klp_update_patch_state(current); 308 } 309 310 bool cpu_in_idle(unsigned long pc) 311 { 312 return pc >= (unsigned long)__cpuidle_text_start && 313 pc < (unsigned long)__cpuidle_text_end; 314 } 315 316 struct idle_timer { 317 struct hrtimer timer; 318 int done; 319 }; 320 321 static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer) 322 { 323 struct idle_timer *it = container_of(timer, struct idle_timer, timer); 324 325 WRITE_ONCE(it->done, 1); 326 set_tsk_need_resched(current); 327 328 return HRTIMER_NORESTART; 329 } 330 331 void play_idle_precise(u64 duration_ns, u64 latency_ns) 332 { 333 struct idle_timer it; 334 335 /* 336 * Only FIFO tasks can disable the tick since they don't need the forced 337 * preemption. 338 */ 339 WARN_ON_ONCE(current->policy != SCHED_FIFO); 340 WARN_ON_ONCE(current->nr_cpus_allowed != 1); 341 WARN_ON_ONCE(!(current->flags & PF_KTHREAD)); 342 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); 343 WARN_ON_ONCE(!duration_ns); 344 345 rcu_sleep_check(); 346 preempt_disable(); 347 current->flags |= PF_IDLE; 348 cpuidle_use_deepest_state(latency_ns); 349 350 it.done = 0; 351 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 352 it.timer.function = idle_inject_timer_fn; 353 hrtimer_start(&it.timer, ns_to_ktime(duration_ns), 354 HRTIMER_MODE_REL_PINNED); 355 356 while (!READ_ONCE(it.done)) 357 do_idle(); 358 359 cpuidle_use_deepest_state(0); 360 current->flags &= ~PF_IDLE; 361 362 preempt_fold_need_resched(); 363 preempt_enable(); 364 } 365 EXPORT_SYMBOL_GPL(play_idle_precise); 366 367 void cpu_startup_entry(enum cpuhp_state state) 368 { 369 arch_cpu_idle_prepare(); 370 cpuhp_online_idle(state); 371 while (1) 372 do_idle(); 373 } 374 375 /* 376 * idle-task scheduling class. 377 */ 378 379 #ifdef CONFIG_SMP 380 static int 381 select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) 382 { 383 return task_cpu(p); /* IDLE tasks as never migrated */ 384 } 385 386 static int 387 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 388 { 389 return WARN_ON_ONCE(1); 390 } 391 #endif 392 393 /* 394 * Idle tasks are unconditionally rescheduled: 395 */ 396 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) 397 { 398 resched_curr(rq); 399 } 400 401 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) 402 { 403 } 404 405 static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) 406 { 407 update_idle_core(rq); 408 schedstat_inc(rq->sched_goidle); 409 } 410 411 struct task_struct *pick_next_task_idle(struct rq *rq) 412 { 413 struct task_struct *next = rq->idle; 414 415 set_next_task_idle(rq, next, true); 416 417 return next; 418 } 419 420 /* 421 * It is not legal to sleep in the idle task - print a warning 422 * message if some code attempts to do it: 423 */ 424 static void 425 dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) 426 { 427 raw_spin_unlock_irq(&rq->lock); 428 printk(KERN_ERR "bad: scheduling from the idle thread!\n"); 429 dump_stack(); 430 raw_spin_lock_irq(&rq->lock); 431 } 432 433 /* 434 * scheduler tick hitting a task of our scheduling class. 435 * 436 * NOTE: This function can be called remotely by the tick offload that 437 * goes along full dynticks. Therefore no local assumption can be made 438 * and everything must be accessed through the @rq and @curr passed in 439 * parameters. 440 */ 441 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) 442 { 443 } 444 445 static void switched_to_idle(struct rq *rq, struct task_struct *p) 446 { 447 BUG(); 448 } 449 450 static void 451 prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) 452 { 453 BUG(); 454 } 455 456 static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) 457 { 458 return 0; 459 } 460 461 static void update_curr_idle(struct rq *rq) 462 { 463 } 464 465 /* 466 * Simple, special scheduling class for the per-CPU idle tasks: 467 */ 468 const struct sched_class idle_sched_class = { 469 /* .next is NULL */ 470 /* no enqueue/yield_task for idle tasks */ 471 472 /* dequeue is not valid, we print a debug message there: */ 473 .dequeue_task = dequeue_task_idle, 474 475 .check_preempt_curr = check_preempt_curr_idle, 476 477 .pick_next_task = pick_next_task_idle, 478 .put_prev_task = put_prev_task_idle, 479 .set_next_task = set_next_task_idle, 480 481 #ifdef CONFIG_SMP 482 .balance = balance_idle, 483 .select_task_rq = select_task_rq_idle, 484 .set_cpus_allowed = set_cpus_allowed_common, 485 #endif 486 487 .task_tick = task_tick_idle, 488 489 .get_rr_interval = get_rr_interval_idle, 490 491 .prio_changed = prio_changed_idle, 492 .switched_to = switched_to_idle, 493 .update_curr = update_curr_idle, 494 }; 495