1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * cpuidle-pseries - idle state cpuidle driver. 4 * Adapted from drivers/idle/intel_idle.c and 5 * drivers/acpi/processor_idle.c 6 * 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/moduleparam.h> 13 #include <linux/cpuidle.h> 14 #include <linux/cpu.h> 15 #include <linux/notifier.h> 16 17 #include <asm/paca.h> 18 #include <asm/reg.h> 19 #include <asm/machdep.h> 20 #include <asm/firmware.h> 21 #include <asm/runlatch.h> 22 #include <asm/plpar_wrappers.h> 23 24 struct cpuidle_driver pseries_idle_driver = { 25 .name = "pseries_idle", 26 .owner = THIS_MODULE, 27 }; 28 29 static int max_idle_state __read_mostly; 30 static struct cpuidle_state *cpuidle_state_table __read_mostly; 31 static u64 snooze_timeout __read_mostly; 32 static bool snooze_timeout_en __read_mostly; 33 34 static inline void idle_loop_prolog(unsigned long *in_purr) 35 { 36 ppc64_runlatch_off(); 37 *in_purr = mfspr(SPRN_PURR); 38 /* 39 * Indicate to the HV that we are idle. Now would be 40 * a good time to find other work to dispatch. 41 */ 42 get_lppaca()->idle = 1; 43 } 44 45 static inline void idle_loop_epilog(unsigned long in_purr) 46 { 47 u64 wait_cycles; 48 49 wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles); 50 wait_cycles += mfspr(SPRN_PURR) - in_purr; 51 get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles); 52 get_lppaca()->idle = 0; 53 54 if (irqs_disabled()) 55 local_irq_enable(); 56 ppc64_runlatch_on(); 57 } 58 59 static int snooze_loop(struct cpuidle_device *dev, 60 struct cpuidle_driver *drv, 61 int index) 62 { 63 unsigned long in_purr; 64 u64 snooze_exit_time; 65 66 set_thread_flag(TIF_POLLING_NRFLAG); 67 68 idle_loop_prolog(&in_purr); 69 local_irq_enable(); 70 snooze_exit_time = get_tb() + snooze_timeout; 71 72 while (!need_resched()) { 73 HMT_low(); 74 HMT_very_low(); 75 if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) { 76 /* 77 * Task has not woken up but we are exiting the polling 78 * loop anyway. Require a barrier after polling is 79 * cleared to order subsequent test of need_resched(). 80 */ 81 clear_thread_flag(TIF_POLLING_NRFLAG); 82 smp_mb(); 83 break; 84 } 85 } 86 87 HMT_medium(); 88 clear_thread_flag(TIF_POLLING_NRFLAG); 89 90 idle_loop_epilog(in_purr); 91 92 return index; 93 } 94 95 static void check_and_cede_processor(void) 96 { 97 /* 98 * Ensure our interrupt state is properly tracked, 99 * also checks if no interrupt has occurred while we 100 * were soft-disabled 101 */ 102 if (prep_irq_for_idle()) { 103 cede_processor(); 104 #ifdef CONFIG_TRACE_IRQFLAGS 105 /* Ensure that H_CEDE returns with IRQs on */ 106 if (WARN_ON(!(mfmsr() & MSR_EE))) 107 __hard_irq_enable(); 108 #endif 109 } 110 } 111 112 static int dedicated_cede_loop(struct cpuidle_device *dev, 113 struct cpuidle_driver *drv, 114 int index) 115 { 116 unsigned long in_purr; 117 118 idle_loop_prolog(&in_purr); 119 get_lppaca()->donate_dedicated_cpu = 1; 120 121 HMT_medium(); 122 check_and_cede_processor(); 123 124 get_lppaca()->donate_dedicated_cpu = 0; 125 126 idle_loop_epilog(in_purr); 127 128 return index; 129 } 130 131 static int shared_cede_loop(struct cpuidle_device *dev, 132 struct cpuidle_driver *drv, 133 int index) 134 { 135 unsigned long in_purr; 136 137 idle_loop_prolog(&in_purr); 138 139 /* 140 * Yield the processor to the hypervisor. We return if 141 * an external interrupt occurs (which are driven prior 142 * to returning here) or if a prod occurs from another 143 * processor. When returning here, external interrupts 144 * are enabled. 145 */ 146 check_and_cede_processor(); 147 148 idle_loop_epilog(in_purr); 149 150 return index; 151 } 152 153 /* 154 * States for dedicated partition case. 155 */ 156 static struct cpuidle_state dedicated_states[] = { 157 { /* Snooze */ 158 .name = "snooze", 159 .desc = "snooze", 160 .exit_latency = 0, 161 .target_residency = 0, 162 .enter = &snooze_loop }, 163 { /* CEDE */ 164 .name = "CEDE", 165 .desc = "CEDE", 166 .exit_latency = 10, 167 .target_residency = 100, 168 .enter = &dedicated_cede_loop }, 169 }; 170 171 /* 172 * States for shared partition case. 173 */ 174 static struct cpuidle_state shared_states[] = { 175 { /* Shared Cede */ 176 .name = "Shared Cede", 177 .desc = "Shared Cede", 178 .exit_latency = 0, 179 .target_residency = 0, 180 .enter = &shared_cede_loop }, 181 }; 182 183 static int pseries_cpuidle_cpu_online(unsigned int cpu) 184 { 185 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); 186 187 if (dev && cpuidle_get_driver()) { 188 cpuidle_pause_and_lock(); 189 cpuidle_enable_device(dev); 190 cpuidle_resume_and_unlock(); 191 } 192 return 0; 193 } 194 195 static int pseries_cpuidle_cpu_dead(unsigned int cpu) 196 { 197 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); 198 199 if (dev && cpuidle_get_driver()) { 200 cpuidle_pause_and_lock(); 201 cpuidle_disable_device(dev); 202 cpuidle_resume_and_unlock(); 203 } 204 return 0; 205 } 206 207 /* 208 * pseries_cpuidle_driver_init() 209 */ 210 static int pseries_cpuidle_driver_init(void) 211 { 212 int idle_state; 213 struct cpuidle_driver *drv = &pseries_idle_driver; 214 215 drv->state_count = 0; 216 217 for (idle_state = 0; idle_state < max_idle_state; ++idle_state) { 218 /* Is the state not enabled? */ 219 if (cpuidle_state_table[idle_state].enter == NULL) 220 continue; 221 222 drv->states[drv->state_count] = /* structure copy */ 223 cpuidle_state_table[idle_state]; 224 225 drv->state_count += 1; 226 } 227 228 return 0; 229 } 230 231 /* 232 * pseries_idle_probe() 233 * Choose state table for shared versus dedicated partition 234 */ 235 static int pseries_idle_probe(void) 236 { 237 238 if (cpuidle_disable != IDLE_NO_OVERRIDE) 239 return -ENODEV; 240 241 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 242 if (lppaca_shared_proc(get_lppaca())) { 243 cpuidle_state_table = shared_states; 244 max_idle_state = ARRAY_SIZE(shared_states); 245 } else { 246 cpuidle_state_table = dedicated_states; 247 max_idle_state = ARRAY_SIZE(dedicated_states); 248 } 249 } else 250 return -ENODEV; 251 252 if (max_idle_state > 1) { 253 snooze_timeout_en = true; 254 snooze_timeout = cpuidle_state_table[1].target_residency * 255 tb_ticks_per_usec; 256 } 257 return 0; 258 } 259 260 static int __init pseries_processor_idle_init(void) 261 { 262 int retval; 263 264 retval = pseries_idle_probe(); 265 if (retval) 266 return retval; 267 268 pseries_cpuidle_driver_init(); 269 retval = cpuidle_register(&pseries_idle_driver, NULL); 270 if (retval) { 271 printk(KERN_DEBUG "Registration of pseries driver failed.\n"); 272 return retval; 273 } 274 275 retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 276 "cpuidle/pseries:online", 277 pseries_cpuidle_cpu_online, NULL); 278 WARN_ON(retval < 0); 279 retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD, 280 "cpuidle/pseries:DEAD", NULL, 281 pseries_cpuidle_cpu_dead); 282 WARN_ON(retval < 0); 283 printk(KERN_DEBUG "pseries_idle_driver registered\n"); 284 return 0; 285 } 286 287 device_initcall(pseries_processor_idle_init); 288