1 /* 2 * The idle loop for all SuperH platforms. 3 * 4 * Copyright (C) 2002 - 2009 Paul Mundt 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 */ 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/mm.h> 13 #include <linux/pm.h> 14 #include <linux/tick.h> 15 #include <linux/preempt.h> 16 #include <linux/thread_info.h> 17 #include <linux/irqflags.h> 18 #include <linux/smp.h> 19 #include <linux/cpuidle.h> 20 #include <linux/atomic.h> 21 #include <asm/pgalloc.h> 22 #include <asm/smp.h> 23 #include <asm/bl_bit.h> 24 25 void (*pm_idle)(void); 26 27 static int hlt_counter; 28 29 static int __init nohlt_setup(char *__unused) 30 { 31 hlt_counter = 1; 32 return 1; 33 } 34 __setup("nohlt", nohlt_setup); 35 36 static int __init hlt_setup(char *__unused) 37 { 38 hlt_counter = 0; 39 return 1; 40 } 41 __setup("hlt", hlt_setup); 42 43 static inline int hlt_works(void) 44 { 45 return !hlt_counter; 46 } 47 48 /* 49 * On SMP it's slightly faster (but much more power-consuming!) 50 * to poll the ->work.need_resched flag instead of waiting for the 51 * cross-CPU IPI to arrive. Use this option with caution. 52 */ 53 static void poll_idle(void) 54 { 55 local_irq_enable(); 56 while (!need_resched()) 57 cpu_relax(); 58 } 59 60 void default_idle(void) 61 { 62 if (hlt_works()) { 63 clear_thread_flag(TIF_POLLING_NRFLAG); 64 smp_mb__after_clear_bit(); 65 66 set_bl_bit(); 67 if (!need_resched()) { 68 local_irq_enable(); 69 cpu_sleep(); 70 } else 71 local_irq_enable(); 72 73 set_thread_flag(TIF_POLLING_NRFLAG); 74 clear_bl_bit(); 75 } else 76 poll_idle(); 77 } 78 79 /* 80 * The idle thread. There's no useful work to be done, so just try to conserve 81 * power and have a low exit latency (ie sit in a loop waiting for somebody to 82 * say that they'd like to reschedule) 83 */ 84 void cpu_idle(void) 85 { 86 unsigned int cpu = smp_processor_id(); 87 88 set_thread_flag(TIF_POLLING_NRFLAG); 89 90 /* endless idle loop with no priority at all */ 91 while (1) { 92 tick_nohz_idle_enter(); 93 rcu_idle_enter(); 94 95 while (!need_resched()) { 96 check_pgt_cache(); 97 rmb(); 98 99 if (cpu_is_offline(cpu)) 100 play_dead(); 101 102 local_irq_disable(); 103 /* Don't trace irqs off for idle */ 104 stop_critical_timings(); 105 if (cpuidle_idle_call()) 106 pm_idle(); 107 /* 108 * Sanity check to ensure that pm_idle() returns 109 * with IRQs enabled 110 */ 111 WARN_ON(irqs_disabled()); 112 start_critical_timings(); 113 } 114 115 rcu_idle_exit(); 116 tick_nohz_idle_exit(); 117 schedule_preempt_disabled(); 118 } 119 } 120 121 void __init select_idle_routine(void) 122 { 123 /* 124 * If a platform has set its own idle routine, leave it alone. 125 */ 126 if (pm_idle) 127 return; 128 129 if (hlt_works()) 130 pm_idle = default_idle; 131 else 132 pm_idle = poll_idle; 133 } 134 135 void stop_this_cpu(void *unused) 136 { 137 local_irq_disable(); 138 set_cpu_online(smp_processor_id(), false); 139 140 for (;;) 141 cpu_sleep(); 142 } 143