1 /* 2 * cpuidle-pseries - idle state cpuidle driver. 3 * Adapted from drivers/idle/intel_idle.c and 4 * drivers/acpi/processor_idle.c 5 * 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/moduleparam.h> 12 #include <linux/cpuidle.h> 13 #include <linux/cpu.h> 14 #include <linux/notifier.h> 15 16 #include <asm/paca.h> 17 #include <asm/reg.h> 18 #include <asm/machdep.h> 19 #include <asm/firmware.h> 20 #include <asm/runlatch.h> 21 #include <asm/plpar_wrappers.h> 22 23 struct cpuidle_driver pseries_idle_driver = { 24 .name = "pseries_idle", 25 .owner = THIS_MODULE, 26 }; 27 28 static int max_idle_state __read_mostly; 29 static struct cpuidle_state *cpuidle_state_table __read_mostly; 30 static u64 snooze_timeout __read_mostly; 31 static bool snooze_timeout_en __read_mostly; 32 33 static inline void idle_loop_prolog(unsigned long *in_purr) 34 { 35 ppc64_runlatch_off(); 36 *in_purr = mfspr(SPRN_PURR); 37 /* 38 * Indicate to the HV that we are idle. Now would be 39 * a good time to find other work to dispatch. 40 */ 41 get_lppaca()->idle = 1; 42 } 43 44 static inline void idle_loop_epilog(unsigned long in_purr) 45 { 46 u64 wait_cycles; 47 48 wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles); 49 wait_cycles += mfspr(SPRN_PURR) - in_purr; 50 get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles); 51 get_lppaca()->idle = 0; 52 53 if (irqs_disabled()) 54 local_irq_enable(); 55 ppc64_runlatch_on(); 56 } 57 58 static int snooze_loop(struct cpuidle_device *dev, 59 struct cpuidle_driver *drv, 60 int index) 61 { 62 unsigned long in_purr; 63 u64 snooze_exit_time; 64 65 set_thread_flag(TIF_POLLING_NRFLAG); 66 67 idle_loop_prolog(&in_purr); 68 local_irq_enable(); 69 snooze_exit_time = get_tb() + snooze_timeout; 70 71 while (!need_resched()) { 72 HMT_low(); 73 HMT_very_low(); 74 if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) { 75 /* 76 * Task has not woken up but we are exiting the polling 77 * loop anyway. Require a barrier after polling is 78 * cleared to order subsequent test of need_resched(). 79 */ 80 clear_thread_flag(TIF_POLLING_NRFLAG); 81 smp_mb(); 82 break; 83 } 84 } 85 86 HMT_medium(); 87 clear_thread_flag(TIF_POLLING_NRFLAG); 88 89 idle_loop_epilog(in_purr); 90 91 return index; 92 } 93 94 static void check_and_cede_processor(void) 95 { 96 /* 97 * Ensure our interrupt state is properly tracked, 98 * also checks if no interrupt has occurred while we 99 * were soft-disabled 100 */ 101 if (prep_irq_for_idle()) { 102 cede_processor(); 103 #ifdef CONFIG_TRACE_IRQFLAGS 104 /* Ensure that H_CEDE returns with IRQs on */ 105 if (WARN_ON(!(mfmsr() & MSR_EE))) 106 __hard_irq_enable(); 107 #endif 108 } 109 } 110 111 static int dedicated_cede_loop(struct cpuidle_device *dev, 112 struct cpuidle_driver *drv, 113 int index) 114 { 115 unsigned long in_purr; 116 117 idle_loop_prolog(&in_purr); 118 get_lppaca()->donate_dedicated_cpu = 1; 119 120 HMT_medium(); 121 check_and_cede_processor(); 122 123 get_lppaca()->donate_dedicated_cpu = 0; 124 125 idle_loop_epilog(in_purr); 126 127 return index; 128 } 129 130 static int shared_cede_loop(struct cpuidle_device *dev, 131 struct cpuidle_driver *drv, 132 int index) 133 { 134 unsigned long in_purr; 135 136 idle_loop_prolog(&in_purr); 137 138 /* 139 * Yield the processor to the hypervisor. We return if 140 * an external interrupt occurs (which are driven prior 141 * to returning here) or if a prod occurs from another 142 * processor. When returning here, external interrupts 143 * are enabled. 144 */ 145 check_and_cede_processor(); 146 147 idle_loop_epilog(in_purr); 148 149 return index; 150 } 151 152 /* 153 * States for dedicated partition case. 154 */ 155 static struct cpuidle_state dedicated_states[] = { 156 { /* Snooze */ 157 .name = "snooze", 158 .desc = "snooze", 159 .exit_latency = 0, 160 .target_residency = 0, 161 .enter = &snooze_loop }, 162 { /* CEDE */ 163 .name = "CEDE", 164 .desc = "CEDE", 165 .exit_latency = 10, 166 .target_residency = 100, 167 .enter = &dedicated_cede_loop }, 168 }; 169 170 /* 171 * States for shared partition case. 172 */ 173 static struct cpuidle_state shared_states[] = { 174 { /* Shared Cede */ 175 .name = "Shared Cede", 176 .desc = "Shared Cede", 177 .exit_latency = 0, 178 .target_residency = 0, 179 .enter = &shared_cede_loop }, 180 }; 181 182 static int pseries_cpuidle_cpu_online(unsigned int cpu) 183 { 184 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); 185 186 if (dev && cpuidle_get_driver()) { 187 cpuidle_pause_and_lock(); 188 cpuidle_enable_device(dev); 189 cpuidle_resume_and_unlock(); 190 } 191 return 0; 192 } 193 194 static int pseries_cpuidle_cpu_dead(unsigned int cpu) 195 { 196 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); 197 198 if (dev && cpuidle_get_driver()) { 199 cpuidle_pause_and_lock(); 200 cpuidle_disable_device(dev); 201 cpuidle_resume_and_unlock(); 202 } 203 return 0; 204 } 205 206 /* 207 * pseries_cpuidle_driver_init() 208 */ 209 static int pseries_cpuidle_driver_init(void) 210 { 211 int idle_state; 212 struct cpuidle_driver *drv = &pseries_idle_driver; 213 214 drv->state_count = 0; 215 216 for (idle_state = 0; idle_state < max_idle_state; ++idle_state) { 217 /* Is the state not enabled? */ 218 if (cpuidle_state_table[idle_state].enter == NULL) 219 continue; 220 221 drv->states[drv->state_count] = /* structure copy */ 222 cpuidle_state_table[idle_state]; 223 224 drv->state_count += 1; 225 } 226 227 return 0; 228 } 229 230 /* 231 * pseries_idle_probe() 232 * Choose state table for shared versus dedicated partition 233 */ 234 static int pseries_idle_probe(void) 235 { 236 237 if (cpuidle_disable != IDLE_NO_OVERRIDE) 238 return -ENODEV; 239 240 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 241 if (lppaca_shared_proc(get_lppaca())) { 242 cpuidle_state_table = shared_states; 243 max_idle_state = ARRAY_SIZE(shared_states); 244 } else { 245 cpuidle_state_table = dedicated_states; 246 max_idle_state = ARRAY_SIZE(dedicated_states); 247 } 248 } else 249 return -ENODEV; 250 251 if (max_idle_state > 1) { 252 snooze_timeout_en = true; 253 snooze_timeout = cpuidle_state_table[1].target_residency * 254 tb_ticks_per_usec; 255 } 256 return 0; 257 } 258 259 static int __init pseries_processor_idle_init(void) 260 { 261 int retval; 262 263 retval = pseries_idle_probe(); 264 if (retval) 265 return retval; 266 267 pseries_cpuidle_driver_init(); 268 retval = cpuidle_register(&pseries_idle_driver, NULL); 269 if (retval) { 270 printk(KERN_DEBUG "Registration of pseries driver failed.\n"); 271 return retval; 272 } 273 274 retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 275 "cpuidle/pseries:online", 276 pseries_cpuidle_cpu_online, NULL); 277 WARN_ON(retval < 0); 278 retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD, 279 "cpuidle/pseries:DEAD", NULL, 280 pseries_cpuidle_cpu_dead); 281 WARN_ON(retval < 0); 282 printk(KERN_DEBUG "pseries_idle_driver registered\n"); 283 return 0; 284 } 285 286 device_initcall(pseries_processor_idle_init); 287