1 /* 2 * The idle loop for all SuperH platforms. 3 * 4 * Copyright (C) 2002 - 2009 Paul Mundt 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 */ 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/mm.h> 13 #include <linux/pm.h> 14 #include <linux/tick.h> 15 #include <linux/preempt.h> 16 #include <linux/thread_info.h> 17 #include <linux/irqflags.h> 18 #include <linux/smp.h> 19 #include <asm/pgalloc.h> 20 #include <asm/system.h> 21 #include <asm/atomic.h> 22 23 void (*pm_idle)(void) = NULL; 24 25 static int hlt_counter; 26 27 static int __init nohlt_setup(char *__unused) 28 { 29 hlt_counter = 1; 30 return 1; 31 } 32 __setup("nohlt", nohlt_setup); 33 34 static int __init hlt_setup(char *__unused) 35 { 36 hlt_counter = 0; 37 return 1; 38 } 39 __setup("hlt", hlt_setup); 40 41 static inline int hlt_works(void) 42 { 43 return !hlt_counter; 44 } 45 46 /* 47 * On SMP it's slightly faster (but much more power-consuming!) 48 * to poll the ->work.need_resched flag instead of waiting for the 49 * cross-CPU IPI to arrive. Use this option with caution. 50 */ 51 static void poll_idle(void) 52 { 53 local_irq_enable(); 54 while (!need_resched()) 55 cpu_relax(); 56 } 57 58 void default_idle(void) 59 { 60 if (hlt_works()) { 61 clear_thread_flag(TIF_POLLING_NRFLAG); 62 smp_mb__after_clear_bit(); 63 64 set_bl_bit(); 65 if (!need_resched()) { 66 local_irq_enable(); 67 cpu_sleep(); 68 } else 69 local_irq_enable(); 70 71 set_thread_flag(TIF_POLLING_NRFLAG); 72 clear_bl_bit(); 73 } else 74 poll_idle(); 75 } 76 77 /* 78 * The idle thread. There's no useful work to be done, so just try to conserve 79 * power and have a low exit latency (ie sit in a loop waiting for somebody to 80 * say that they'd like to reschedule) 81 */ 82 void cpu_idle(void) 83 { 84 unsigned int cpu = smp_processor_id(); 85 86 set_thread_flag(TIF_POLLING_NRFLAG); 87 88 /* endless idle loop with no priority at all */ 89 while (1) { 90 tick_nohz_stop_sched_tick(1); 91 92 while (!need_resched() && cpu_online(cpu)) { 93 check_pgt_cache(); 94 rmb(); 95 96 local_irq_disable(); 97 /* Don't trace irqs off for idle */ 98 stop_critical_timings(); 99 pm_idle(); 100 /* 101 * Sanity check to ensure that pm_idle() returns 102 * with IRQs enabled 103 */ 104 WARN_ON(irqs_disabled()); 105 start_critical_timings(); 106 } 107 108 tick_nohz_restart_sched_tick(); 109 preempt_enable_no_resched(); 110 schedule(); 111 preempt_disable(); 112 } 113 } 114 115 void __init select_idle_routine(void) 116 { 117 /* 118 * If a platform has set its own idle routine, leave it alone. 119 */ 120 if (pm_idle) 121 return; 122 123 if (hlt_works()) 124 pm_idle = default_idle; 125 else 126 pm_idle = poll_idle; 127 } 128 129 static void do_nothing(void *unused) 130 { 131 } 132 133 void stop_this_cpu(void *unused) 134 { 135 local_irq_disable(); 136 cpu_clear(smp_processor_id(), cpu_online_map); 137 138 for (;;) 139 cpu_sleep(); 140 } 141 142 /* 143 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of 144 * pm_idle and update to new pm_idle value. Required while changing pm_idle 145 * handler on SMP systems. 146 * 147 * Caller must have changed pm_idle to the new value before the call. Old 148 * pm_idle value will not be used by any CPU after the return of this function. 149 */ 150 void cpu_idle_wait(void) 151 { 152 smp_mb(); 153 /* kick all the CPUs so that they exit out of pm_idle */ 154 smp_call_function(do_nothing, NULL, 1); 155 } 156 EXPORT_SYMBOL_GPL(cpu_idle_wait); 157