1 /* 2 * intel_idle.c - native hardware idle loop for modern Intel processors 3 * 4 * Copyright (c) 2010, Intel Corporation. 5 * Len Brown <len.brown@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program; if not, write to the Free Software Foundation, Inc., 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 */ 20 21 /* 22 * intel_idle is a cpuidle driver that loads on specific Intel processors 23 * in lieu of the legacy ACPI processor_idle driver. The intent is to 24 * make Linux more efficient on these processors, as intel_idle knows 25 * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. 26 */ 27 28 /* 29 * Design Assumptions 30 * 31 * All CPUs have same idle states as boot CPU 32 * 33 * Chipset BM_STS (bus master status) bit is a NOP 34 * for preventing entry into deep C-stats 35 */ 36 37 /* 38 * Known limitations 39 * 40 * The driver currently initializes for_each_online_cpu() upon modprobe. 41 * It it unaware of subsequent processors hot-added to the system. 42 * This means that if you boot with maxcpus=n and later online 43 * processors above n, those processors will use C1 only. 44 * 45 * ACPI has a .suspend hack to turn off deep c-statees during suspend 46 * to avoid complications with the lapic timer workaround. 47 * Have not seen issues with suspend, but may need same workaround here. 48 * 49 * There is currently no kernel-based automatic probing/loading mechanism 50 * if the driver is built as a module. 51 */ 52 53 /* un-comment DEBUG to enable pr_debug() statements */ 54 #define DEBUG 55 56 #include <linux/kernel.h> 57 #include <linux/cpuidle.h> 58 #include <linux/clockchips.h> 59 #include <linux/hrtimer.h> /* ktime_get_real() */ 60 #include <trace/events/power.h> 61 #include <linux/sched.h> 62 #include <asm/mwait.h> 63 64 #define INTEL_IDLE_VERSION "0.4" 65 #define PREFIX "intel_idle: " 66 67 static struct cpuidle_driver intel_idle_driver = { 68 .name = "intel_idle", 69 .owner = THIS_MODULE, 70 }; 71 /* intel_idle.max_cstate=0 disables driver */ 72 static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; 73 74 static unsigned int mwait_substates; 75 76 /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ 77 static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ 78 79 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 80 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 81 82 static struct cpuidle_state *cpuidle_state_table; 83 84 /* 85 * States are indexed by the cstate number, 86 * which is also the index into the MWAIT hint array. 87 * Thus C0 is a dummy. 88 */ 89 static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { 90 { /* MWAIT C0 */ }, 91 { /* MWAIT C1 */ 92 .name = "NHM-C1", 93 .desc = "MWAIT 0x00", 94 .driver_data = (void *) 0x00, 95 .flags = CPUIDLE_FLAG_TIME_VALID, 96 .exit_latency = 3, 97 .target_residency = 6, 98 .enter = &intel_idle }, 99 { /* MWAIT C2 */ 100 .name = "NHM-C3", 101 .desc = "MWAIT 0x10", 102 .driver_data = (void *) 0x10, 103 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 104 .exit_latency = 20, 105 .target_residency = 80, 106 .enter = &intel_idle }, 107 { /* MWAIT C3 */ 108 .name = "NHM-C6", 109 .desc = "MWAIT 0x20", 110 .driver_data = (void *) 0x20, 111 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 112 .exit_latency = 200, 113 .target_residency = 800, 114 .enter = &intel_idle }, 115 }; 116 117 static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { 118 { /* MWAIT C0 */ }, 119 { /* MWAIT C1 */ 120 .name = "SNB-C1", 121 .desc = "MWAIT 0x00", 122 .driver_data = (void *) 0x00, 123 .flags = CPUIDLE_FLAG_TIME_VALID, 124 .exit_latency = 1, 125 .target_residency = 4, 126 .enter = &intel_idle }, 127 { /* MWAIT C2 */ 128 .name = "SNB-C3", 129 .desc = "MWAIT 0x10", 130 .driver_data = (void *) 0x10, 131 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 132 .exit_latency = 80, 133 .target_residency = 160, 134 .enter = &intel_idle }, 135 { /* MWAIT C3 */ 136 .name = "SNB-C6", 137 .desc = "MWAIT 0x20", 138 .driver_data = (void *) 0x20, 139 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 140 .exit_latency = 104, 141 .target_residency = 208, 142 .enter = &intel_idle }, 143 { /* MWAIT C4 */ 144 .name = "SNB-C7", 145 .desc = "MWAIT 0x30", 146 .driver_data = (void *) 0x30, 147 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 148 .exit_latency = 109, 149 .target_residency = 300, 150 .enter = &intel_idle }, 151 }; 152 153 static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { 154 { /* MWAIT C0 */ }, 155 { /* MWAIT C1 */ 156 .name = "ATM-C1", 157 .desc = "MWAIT 0x00", 158 .driver_data = (void *) 0x00, 159 .flags = CPUIDLE_FLAG_TIME_VALID, 160 .exit_latency = 1, 161 .target_residency = 4, 162 .enter = &intel_idle }, 163 { /* MWAIT C2 */ 164 .name = "ATM-C2", 165 .desc = "MWAIT 0x10", 166 .driver_data = (void *) 0x10, 167 .flags = CPUIDLE_FLAG_TIME_VALID, 168 .exit_latency = 20, 169 .target_residency = 80, 170 .enter = &intel_idle }, 171 { /* MWAIT C3 */ }, 172 { /* MWAIT C4 */ 173 .name = "ATM-C4", 174 .desc = "MWAIT 0x30", 175 .driver_data = (void *) 0x30, 176 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 177 .exit_latency = 100, 178 .target_residency = 400, 179 .enter = &intel_idle }, 180 { /* MWAIT C5 */ }, 181 { /* MWAIT C6 */ 182 .name = "ATM-C6", 183 .desc = "MWAIT 0x52", 184 .driver_data = (void *) 0x52, 185 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 186 .exit_latency = 140, 187 .target_residency = 560, 188 .enter = &intel_idle }, 189 }; 190 191 /** 192 * intel_idle 193 * @dev: cpuidle_device 194 * @state: cpuidle state 195 * 196 */ 197 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) 198 { 199 unsigned long ecx = 1; /* break on interrupt flag */ 200 unsigned long eax = (unsigned long)cpuidle_get_statedata(state); 201 unsigned int cstate; 202 ktime_t kt_before, kt_after; 203 s64 usec_delta; 204 int cpu = smp_processor_id(); 205 206 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; 207 208 local_irq_disable(); 209 210 /* 211 * leave_mm() to avoid costly and often unnecessary wakeups 212 * for flushing the user TLB's associated with the active mm. 213 */ 214 if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) 215 leave_mm(cpu); 216 217 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 218 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 219 220 kt_before = ktime_get_real(); 221 222 stop_critical_timings(); 223 trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu); 224 trace_cpu_idle((eax >> 4) + 1, cpu); 225 if (!need_resched()) { 226 227 __monitor((void *)¤t_thread_info()->flags, 0, 0); 228 smp_mb(); 229 if (!need_resched()) 230 __mwait(eax, ecx); 231 } 232 233 start_critical_timings(); 234 235 kt_after = ktime_get_real(); 236 usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); 237 238 local_irq_enable(); 239 240 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 241 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); 242 243 return usec_delta; 244 } 245 246 /* 247 * intel_idle_probe() 248 */ 249 static int intel_idle_probe(void) 250 { 251 unsigned int eax, ebx, ecx; 252 253 if (max_cstate == 0) { 254 pr_debug(PREFIX "disabled\n"); 255 return -EPERM; 256 } 257 258 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 259 return -ENODEV; 260 261 if (!boot_cpu_has(X86_FEATURE_MWAIT)) 262 return -ENODEV; 263 264 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) 265 return -ENODEV; 266 267 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); 268 269 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || 270 !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) 271 return -ENODEV; 272 273 pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); 274 275 276 if (boot_cpu_data.x86 != 6) /* family 6 */ 277 return -ENODEV; 278 279 switch (boot_cpu_data.x86_model) { 280 281 case 0x1A: /* Core i7, Xeon 5500 series */ 282 case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */ 283 case 0x1F: /* Core i7 and i5 Processor - Nehalem */ 284 case 0x2E: /* Nehalem-EX Xeon */ 285 case 0x2F: /* Westmere-EX Xeon */ 286 case 0x25: /* Westmere */ 287 case 0x2C: /* Westmere */ 288 cpuidle_state_table = nehalem_cstates; 289 break; 290 291 case 0x1C: /* 28 - Atom Processor */ 292 case 0x26: /* 38 - Lincroft Atom Processor */ 293 cpuidle_state_table = atom_cstates; 294 break; 295 296 case 0x2A: /* SNB */ 297 case 0x2D: /* SNB Xeon */ 298 cpuidle_state_table = snb_cstates; 299 break; 300 301 default: 302 pr_debug(PREFIX "does not run on family %d model %d\n", 303 boot_cpu_data.x86, boot_cpu_data.x86_model); 304 return -ENODEV; 305 } 306 307 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ 308 lapic_timer_reliable_states = 0xFFFFFFFF; 309 310 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 311 " model 0x%X\n", boot_cpu_data.x86_model); 312 313 pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", 314 lapic_timer_reliable_states); 315 return 0; 316 } 317 318 /* 319 * intel_idle_cpuidle_devices_uninit() 320 * unregister, free cpuidle_devices 321 */ 322 static void intel_idle_cpuidle_devices_uninit(void) 323 { 324 int i; 325 struct cpuidle_device *dev; 326 327 for_each_online_cpu(i) { 328 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); 329 cpuidle_unregister_device(dev); 330 } 331 332 free_percpu(intel_idle_cpuidle_devices); 333 return; 334 } 335 /* 336 * intel_idle_cpuidle_devices_init() 337 * allocate, initialize, register cpuidle_devices 338 */ 339 static int intel_idle_cpuidle_devices_init(void) 340 { 341 int i, cstate; 342 struct cpuidle_device *dev; 343 344 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); 345 if (intel_idle_cpuidle_devices == NULL) 346 return -ENOMEM; 347 348 for_each_online_cpu(i) { 349 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); 350 351 dev->state_count = 1; 352 353 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { 354 int num_substates; 355 356 if (cstate > max_cstate) { 357 printk(PREFIX "max_cstate %d reached\n", 358 max_cstate); 359 break; 360 } 361 362 /* does the state exist in CPUID.MWAIT? */ 363 num_substates = (mwait_substates >> ((cstate) * 4)) 364 & MWAIT_SUBSTATE_MASK; 365 if (num_substates == 0) 366 continue; 367 /* is the state not enabled? */ 368 if (cpuidle_state_table[cstate].enter == NULL) { 369 /* does the driver not know about the state? */ 370 if (*cpuidle_state_table[cstate].name == '\0') 371 pr_debug(PREFIX "unaware of model 0x%x" 372 " MWAIT %d please" 373 " contact lenb@kernel.org", 374 boot_cpu_data.x86_model, cstate); 375 continue; 376 } 377 378 if ((cstate > 2) && 379 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 380 mark_tsc_unstable("TSC halts in idle" 381 " states deeper than C2"); 382 383 dev->states[dev->state_count] = /* structure copy */ 384 cpuidle_state_table[cstate]; 385 386 dev->state_count += 1; 387 } 388 389 dev->cpu = i; 390 if (cpuidle_register_device(dev)) { 391 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", 392 i); 393 intel_idle_cpuidle_devices_uninit(); 394 return -EIO; 395 } 396 } 397 398 return 0; 399 } 400 401 402 static int __init intel_idle_init(void) 403 { 404 int retval; 405 406 retval = intel_idle_probe(); 407 if (retval) 408 return retval; 409 410 retval = cpuidle_register_driver(&intel_idle_driver); 411 if (retval) { 412 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", 413 cpuidle_get_driver()->name); 414 return retval; 415 } 416 417 retval = intel_idle_cpuidle_devices_init(); 418 if (retval) { 419 cpuidle_unregister_driver(&intel_idle_driver); 420 return retval; 421 } 422 423 return 0; 424 } 425 426 static void __exit intel_idle_exit(void) 427 { 428 intel_idle_cpuidle_devices_uninit(); 429 cpuidle_unregister_driver(&intel_idle_driver); 430 431 return; 432 } 433 434 module_init(intel_idle_init); 435 module_exit(intel_idle_exit); 436 437 module_param(max_cstate, int, 0444); 438 439 MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); 440 MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); 441 MODULE_LICENSE("GPL"); 442