11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * processor_idle - idle state submodule to the ACPI processor driver 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 51da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6c5ab81caSDominik Brodowski * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 71da177e4SLinus Torvalds * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 81da177e4SLinus Torvalds * - Added processor hotplug support 902df8b93SVenkatesh Pallipadi * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 1002df8b93SVenkatesh Pallipadi * - Added support for C3 on SMP 111da177e4SLinus Torvalds * 121da177e4SLinus Torvalds * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 151da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 161da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or (at 171da177e4SLinus Torvalds * your option) any later version. 181da177e4SLinus Torvalds * 191da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, but 201da177e4SLinus Torvalds * WITHOUT ANY WARRANTY; without even the implied warranty of 211da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 221da177e4SLinus Torvalds * General Public License for more details. 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * You should have received a copy of the GNU General Public License along 251da177e4SLinus Torvalds * with this program; if not, write to the Free Software Foundation, Inc., 261da177e4SLinus Torvalds * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 271da177e4SLinus Torvalds * 281da177e4SLinus Torvalds * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds #include <linux/kernel.h> 321da177e4SLinus Torvalds #include <linux/module.h> 331da177e4SLinus Torvalds #include <linux/init.h> 341da177e4SLinus Torvalds #include <linux/cpufreq.h> 351da177e4SLinus Torvalds #include <linux/proc_fs.h> 361da177e4SLinus Torvalds #include <linux/seq_file.h> 371da177e4SLinus Torvalds #include <linux/acpi.h> 381da177e4SLinus Torvalds #include <linux/dmi.h> 391da177e4SLinus Torvalds #include <linux/moduleparam.h> 404e57b681STim Schmielau #include <linux/sched.h> /* need_resched() */ 41f011e2e2SMark Gross #include <linux/pm_qos_params.h> 42e9e2cdb4SThomas Gleixner #include <linux/clockchips.h> 434f86d3a8SLen Brown #include <linux/cpuidle.h> 441da177e4SLinus Torvalds 453434933bSThomas Gleixner /* 463434933bSThomas Gleixner * Include the apic definitions for x86 to have the APIC timer related defines 473434933bSThomas Gleixner * available also for UP (on SMP it gets magically included via linux/smp.h). 483434933bSThomas Gleixner * asm/acpi.h is not an option, as it would require more include magic. Also 493434933bSThomas Gleixner * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 503434933bSThomas Gleixner */ 513434933bSThomas Gleixner #ifdef CONFIG_X86 523434933bSThomas Gleixner #include <asm/apic.h> 533434933bSThomas Gleixner #endif 543434933bSThomas Gleixner 551da177e4SLinus Torvalds #include <asm/io.h> 561da177e4SLinus Torvalds #include <asm/uaccess.h> 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds #include <acpi/acpi_bus.h> 591da177e4SLinus Torvalds #include <acpi/processor.h> 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds #define ACPI_PROCESSOR_COMPONENT 0x01000000 621da177e4SLinus Torvalds #define ACPI_PROCESSOR_CLASS "processor" 631da177e4SLinus Torvalds #define _COMPONENT ACPI_PROCESSOR_COMPONENT 64f52fd66dSLen Brown ACPI_MODULE_NAME("processor_idle"); 651da177e4SLinus Torvalds #define ACPI_PROCESSOR_FILE_POWER "power" 661da177e4SLinus Torvalds #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) 672aa44d05SIngo Molnar #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 684f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 691da177e4SLinus Torvalds #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 701da177e4SLinus Torvalds #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 71b6835052SAndreas Mohr static void (*pm_idle_save) (void) __read_mostly; 724f86d3a8SLen Brown #else 734f86d3a8SLen Brown #define C2_OVERHEAD 1 /* 1us */ 744f86d3a8SLen Brown #define C3_OVERHEAD 1 /* 1us */ 754f86d3a8SLen Brown #endif 764f86d3a8SLen Brown #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) 771da177e4SLinus Torvalds 784f86d3a8SLen Brown static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 795b3f0e6cSVenki Pallipadi #ifdef CONFIG_CPU_IDLE 804f86d3a8SLen Brown module_param(max_cstate, uint, 0000); 815b3f0e6cSVenki Pallipadi #else 825b3f0e6cSVenki Pallipadi module_param(max_cstate, uint, 0644); 835b3f0e6cSVenki Pallipadi #endif 84b6835052SAndreas Mohr static unsigned int nocst __read_mostly; 851da177e4SLinus Torvalds module_param(nocst, uint, 0000); 861da177e4SLinus Torvalds 874f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 881da177e4SLinus Torvalds /* 891da177e4SLinus Torvalds * bm_history -- bit-mask with a bit per jiffy of bus-master activity 901da177e4SLinus Torvalds * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms 911da177e4SLinus Torvalds * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms 921da177e4SLinus Torvalds * 100 HZ: 0x0000000F: 4 jiffies = 40ms 931da177e4SLinus Torvalds * reduce history for more aggressive entry into C3 941da177e4SLinus Torvalds */ 95b6835052SAndreas Mohr static unsigned int bm_history __read_mostly = 964be44fcdSLen Brown (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); 971da177e4SLinus Torvalds module_param(bm_history, uint, 0644); 984f86d3a8SLen Brown 994f86d3a8SLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr); 1004f86d3a8SLen Brown 1014f86d3a8SLen Brown #endif 1021da177e4SLinus Torvalds 1031da177e4SLinus Torvalds /* 1041da177e4SLinus Torvalds * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 1051da177e4SLinus Torvalds * For now disable this. Probably a bug somewhere else. 1061da177e4SLinus Torvalds * 1071da177e4SLinus Torvalds * To skip this limit, boot/load with a large max_cstate limit. 1081da177e4SLinus Torvalds */ 1091855256cSJeff Garzik static int set_max_cstate(const struct dmi_system_id *id) 1101da177e4SLinus Torvalds { 1111da177e4SLinus Torvalds if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 1121da177e4SLinus Torvalds return 0; 1131da177e4SLinus Torvalds 1143d35600aSLen Brown printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." 1151da177e4SLinus Torvalds " Override with \"processor.max_cstate=%d\"\n", id->ident, 1163d35600aSLen Brown (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 1171da177e4SLinus Torvalds 1183d35600aSLen Brown max_cstate = (long)id->driver_data; 1191da177e4SLinus Torvalds 1201da177e4SLinus Torvalds return 0; 1211da177e4SLinus Torvalds } 1221da177e4SLinus Torvalds 1237ded5689SAshok Raj /* Actually this shouldn't be __cpuinitdata, would be better to fix the 1247ded5689SAshok Raj callers to only run once -AK */ 1257ded5689SAshok Raj static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 126335f16beSDavid Shaohua Li { set_max_cstate, "IBM ThinkPad R40e", { 127876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 128f831335dSBartlomiej Swiercz DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1}, 129f831335dSBartlomiej Swiercz { set_max_cstate, "IBM ThinkPad R40e", { 130f831335dSBartlomiej Swiercz DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 131876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1}, 132876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 133876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 134876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1}, 135876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 136876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 137876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1}, 138876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 139876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 140876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1}, 141876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 142876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 143876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1}, 144876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 145876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 146876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1}, 147876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 148876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 149876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1}, 150876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 151876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 152876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1}, 153876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 154876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 155876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1}, 156876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 157876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 158876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1}, 159876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 160876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 161876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1}, 162876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 163876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 164876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1}, 165876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 166876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 167876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1}, 168876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 169876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 170876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1}, 171876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 172876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 173876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1}, 174335f16beSDavid Shaohua Li { set_max_cstate, "Medion 41700", { 175876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 176876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1}, 177335f16beSDavid Shaohua Li { set_max_cstate, "Clevo 5600D", { 178876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 179876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 180335f16beSDavid Shaohua Li (void *)2}, 1811da177e4SLinus Torvalds {}, 1821da177e4SLinus Torvalds }; 1831da177e4SLinus Torvalds 1844be44fcdSLen Brown static inline u32 ticks_elapsed(u32 t1, u32 t2) 1851da177e4SLinus Torvalds { 1861da177e4SLinus Torvalds if (t2 >= t1) 1871da177e4SLinus Torvalds return (t2 - t1); 188cee324b1SAlexey Starikovskiy else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) 1891da177e4SLinus Torvalds return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 1901da177e4SLinus Torvalds else 1911da177e4SLinus Torvalds return ((0xFFFFFFFF - t1) + t2); 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds 1944f86d3a8SLen Brown static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) 1954f86d3a8SLen Brown { 1964f86d3a8SLen Brown if (t2 >= t1) 1974f86d3a8SLen Brown return PM_TIMER_TICKS_TO_US(t2 - t1); 1984f86d3a8SLen Brown else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) 1994f86d3a8SLen Brown return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 2004f86d3a8SLen Brown else 2014f86d3a8SLen Brown return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); 2024f86d3a8SLen Brown } 2034f86d3a8SLen Brown 2042e906655Svenkatesh.pallipadi@intel.com /* 2052e906655Svenkatesh.pallipadi@intel.com * Callers should disable interrupts before the call and enable 2062e906655Svenkatesh.pallipadi@intel.com * interrupts after return. 2072e906655Svenkatesh.pallipadi@intel.com */ 208ddc081a1SVenkatesh Pallipadi static void acpi_safe_halt(void) 209ddc081a1SVenkatesh Pallipadi { 210ddc081a1SVenkatesh Pallipadi current_thread_info()->status &= ~TS_POLLING; 211ddc081a1SVenkatesh Pallipadi /* 212ddc081a1SVenkatesh Pallipadi * TS_POLLING-cleared state must be visible before we 213ddc081a1SVenkatesh Pallipadi * test NEED_RESCHED: 214ddc081a1SVenkatesh Pallipadi */ 215ddc081a1SVenkatesh Pallipadi smp_mb(); 216ddc081a1SVenkatesh Pallipadi if (!need_resched()) 217ddc081a1SVenkatesh Pallipadi safe_halt(); 218ddc081a1SVenkatesh Pallipadi current_thread_info()->status |= TS_POLLING; 219ddc081a1SVenkatesh Pallipadi } 220ddc081a1SVenkatesh Pallipadi 2214f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 2224f86d3a8SLen Brown 2231da177e4SLinus Torvalds static void 2244be44fcdSLen Brown acpi_processor_power_activate(struct acpi_processor *pr, 2251da177e4SLinus Torvalds struct acpi_processor_cx *new) 2261da177e4SLinus Torvalds { 2271da177e4SLinus Torvalds struct acpi_processor_cx *old; 2281da177e4SLinus Torvalds 2291da177e4SLinus Torvalds if (!pr || !new) 2301da177e4SLinus Torvalds return; 2311da177e4SLinus Torvalds 2321da177e4SLinus Torvalds old = pr->power.state; 2331da177e4SLinus Torvalds 2341da177e4SLinus Torvalds if (old) 2351da177e4SLinus Torvalds old->promotion.count = 0; 2361da177e4SLinus Torvalds new->demotion.count = 0; 2371da177e4SLinus Torvalds 2381da177e4SLinus Torvalds /* Cleanup from old state. */ 2391da177e4SLinus Torvalds if (old) { 2401da177e4SLinus Torvalds switch (old->type) { 2411da177e4SLinus Torvalds case ACPI_STATE_C3: 2421da177e4SLinus Torvalds /* Disable bus master reload */ 24302df8b93SVenkatesh Pallipadi if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) 244d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 2451da177e4SLinus Torvalds break; 2461da177e4SLinus Torvalds } 2471da177e4SLinus Torvalds } 2481da177e4SLinus Torvalds 2491da177e4SLinus Torvalds /* Prepare to use new state. */ 2501da177e4SLinus Torvalds switch (new->type) { 2511da177e4SLinus Torvalds case ACPI_STATE_C3: 2521da177e4SLinus Torvalds /* Enable bus master reload */ 25302df8b93SVenkatesh Pallipadi if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) 254d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 2551da177e4SLinus Torvalds break; 2561da177e4SLinus Torvalds } 2571da177e4SLinus Torvalds 2581da177e4SLinus Torvalds pr->power.state = new; 2591da177e4SLinus Torvalds 2601da177e4SLinus Torvalds return; 2611da177e4SLinus Torvalds } 2621da177e4SLinus Torvalds 26302df8b93SVenkatesh Pallipadi static atomic_t c3_cpu_count; 26402df8b93SVenkatesh Pallipadi 265991528d7SVenkatesh Pallipadi /* Common C-state entry for C2, C3, .. */ 266991528d7SVenkatesh Pallipadi static void acpi_cstate_enter(struct acpi_processor_cx *cstate) 267991528d7SVenkatesh Pallipadi { 268bc71bec9Svenkatesh.pallipadi@intel.com if (cstate->entry_method == ACPI_CSTATE_FFH) { 269991528d7SVenkatesh Pallipadi /* Call into architectural FFH based C-state */ 270991528d7SVenkatesh Pallipadi acpi_processor_ffh_cstate_enter(cstate); 271991528d7SVenkatesh Pallipadi } else { 272991528d7SVenkatesh Pallipadi int unused; 273991528d7SVenkatesh Pallipadi /* IO port based C-state */ 274991528d7SVenkatesh Pallipadi inb(cstate->address); 275991528d7SVenkatesh Pallipadi /* Dummy wait op - must do something useless after P_LVL2 read 276991528d7SVenkatesh Pallipadi because chipsets cannot guarantee that STPCLK# signal 277991528d7SVenkatesh Pallipadi gets asserted in time to freeze execution properly. */ 278cee324b1SAlexey Starikovskiy unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 279991528d7SVenkatesh Pallipadi } 280991528d7SVenkatesh Pallipadi } 2814f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */ 282991528d7SVenkatesh Pallipadi 283169a0abbSThomas Gleixner #ifdef ARCH_APICTIMER_STOPS_ON_C3 284169a0abbSThomas Gleixner 285169a0abbSThomas Gleixner /* 286169a0abbSThomas Gleixner * Some BIOS implementations switch to C3 in the published C2 state. 287296d93cdSLinus Torvalds * This seems to be a common problem on AMD boxen, but other vendors 288296d93cdSLinus Torvalds * are affected too. We pick the most conservative approach: we assume 289296d93cdSLinus Torvalds * that the local APIC stops in both C2 and C3. 290169a0abbSThomas Gleixner */ 291169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr, 292169a0abbSThomas Gleixner struct acpi_processor_cx *cx) 293169a0abbSThomas Gleixner { 294169a0abbSThomas Gleixner struct acpi_processor_power *pwr = &pr->power; 295e585bef8SThomas Gleixner u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 296169a0abbSThomas Gleixner 297169a0abbSThomas Gleixner /* 298169a0abbSThomas Gleixner * Check, if one of the previous states already marked the lapic 299169a0abbSThomas Gleixner * unstable 300169a0abbSThomas Gleixner */ 301169a0abbSThomas Gleixner if (pwr->timer_broadcast_on_state < state) 302169a0abbSThomas Gleixner return; 303169a0abbSThomas Gleixner 304e585bef8SThomas Gleixner if (cx->type >= type) 305169a0abbSThomas Gleixner pr->power.timer_broadcast_on_state = state; 306169a0abbSThomas Gleixner } 307169a0abbSThomas Gleixner 308169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) 309169a0abbSThomas Gleixner { 310e9e2cdb4SThomas Gleixner unsigned long reason; 311e9e2cdb4SThomas Gleixner 312e9e2cdb4SThomas Gleixner reason = pr->power.timer_broadcast_on_state < INT_MAX ? 313e9e2cdb4SThomas Gleixner CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 314e9e2cdb4SThomas Gleixner 315e9e2cdb4SThomas Gleixner clockevents_notify(reason, &pr->id); 316e9e2cdb4SThomas Gleixner } 317e9e2cdb4SThomas Gleixner 318e9e2cdb4SThomas Gleixner /* Power(C) State timer broadcast control */ 319e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr, 320e9e2cdb4SThomas Gleixner struct acpi_processor_cx *cx, 321e9e2cdb4SThomas Gleixner int broadcast) 322e9e2cdb4SThomas Gleixner { 323e9e2cdb4SThomas Gleixner int state = cx - pr->power.states; 324e9e2cdb4SThomas Gleixner 325e9e2cdb4SThomas Gleixner if (state >= pr->power.timer_broadcast_on_state) { 326e9e2cdb4SThomas Gleixner unsigned long reason; 327e9e2cdb4SThomas Gleixner 328e9e2cdb4SThomas Gleixner reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : 329e9e2cdb4SThomas Gleixner CLOCK_EVT_NOTIFY_BROADCAST_EXIT; 330e9e2cdb4SThomas Gleixner clockevents_notify(reason, &pr->id); 331e9e2cdb4SThomas Gleixner } 332169a0abbSThomas Gleixner } 333169a0abbSThomas Gleixner 334169a0abbSThomas Gleixner #else 335169a0abbSThomas Gleixner 336169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr, 337169a0abbSThomas Gleixner struct acpi_processor_cx *cstate) { } 338169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } 339e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr, 340e9e2cdb4SThomas Gleixner struct acpi_processor_cx *cx, 341e9e2cdb4SThomas Gleixner int broadcast) 342e9e2cdb4SThomas Gleixner { 343e9e2cdb4SThomas Gleixner } 344169a0abbSThomas Gleixner 345169a0abbSThomas Gleixner #endif 346169a0abbSThomas Gleixner 347b04e7bdbSThomas Gleixner /* 348b04e7bdbSThomas Gleixner * Suspend / resume control 349b04e7bdbSThomas Gleixner */ 350b04e7bdbSThomas Gleixner static int acpi_idle_suspend; 351b04e7bdbSThomas Gleixner 352b04e7bdbSThomas Gleixner int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 353b04e7bdbSThomas Gleixner { 354b04e7bdbSThomas Gleixner acpi_idle_suspend = 1; 355b04e7bdbSThomas Gleixner return 0; 356b04e7bdbSThomas Gleixner } 357b04e7bdbSThomas Gleixner 358b04e7bdbSThomas Gleixner int acpi_processor_resume(struct acpi_device * device) 359b04e7bdbSThomas Gleixner { 360b04e7bdbSThomas Gleixner acpi_idle_suspend = 0; 361b04e7bdbSThomas Gleixner return 0; 362b04e7bdbSThomas Gleixner } 363b04e7bdbSThomas Gleixner 364ddb25f9aSAndi Kleen #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 365ddb25f9aSAndi Kleen static int tsc_halts_in_c(int state) 366ddb25f9aSAndi Kleen { 367ddb25f9aSAndi Kleen switch (boot_cpu_data.x86_vendor) { 368ddb25f9aSAndi Kleen case X86_VENDOR_AMD: 369ddb25f9aSAndi Kleen /* 370ddb25f9aSAndi Kleen * AMD Fam10h TSC will tick in all 371ddb25f9aSAndi Kleen * C/P/S0/S1 states when this bit is set. 372ddb25f9aSAndi Kleen */ 373ddb25f9aSAndi Kleen if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 374ddb25f9aSAndi Kleen return 0; 375ddb25f9aSAndi Kleen /*FALL THROUGH*/ 376ddb25f9aSAndi Kleen case X86_VENDOR_INTEL: 377ddb25f9aSAndi Kleen /* Several cases known where TSC halts in C2 too */ 378ddb25f9aSAndi Kleen default: 379ddb25f9aSAndi Kleen return state > ACPI_STATE_C1; 380ddb25f9aSAndi Kleen } 381ddb25f9aSAndi Kleen } 382ddb25f9aSAndi Kleen #endif 383ddb25f9aSAndi Kleen 3844f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 3851da177e4SLinus Torvalds static void acpi_processor_idle(void) 3861da177e4SLinus Torvalds { 3871da177e4SLinus Torvalds struct acpi_processor *pr = NULL; 3881da177e4SLinus Torvalds struct acpi_processor_cx *cx = NULL; 3891da177e4SLinus Torvalds struct acpi_processor_cx *next_state = NULL; 3901da177e4SLinus Torvalds int sleep_ticks = 0; 3911da177e4SLinus Torvalds u32 t1, t2 = 0; 3921da177e4SLinus Torvalds 3931da177e4SLinus Torvalds /* 3941da177e4SLinus Torvalds * Interrupts must be disabled during bus mastering calculations and 3951da177e4SLinus Torvalds * for C2/C3 transitions. 3961da177e4SLinus Torvalds */ 3971da177e4SLinus Torvalds local_irq_disable(); 3981da177e4SLinus Torvalds 399d5a3d32aSVenkatesh Pallipadi pr = processors[smp_processor_id()]; 400d5a3d32aSVenkatesh Pallipadi if (!pr) { 401d5a3d32aSVenkatesh Pallipadi local_irq_enable(); 402d5a3d32aSVenkatesh Pallipadi return; 403d5a3d32aSVenkatesh Pallipadi } 404d5a3d32aSVenkatesh Pallipadi 4051da177e4SLinus Torvalds /* 4061da177e4SLinus Torvalds * Check whether we truly need to go idle, or should 4071da177e4SLinus Torvalds * reschedule: 4081da177e4SLinus Torvalds */ 4091da177e4SLinus Torvalds if (unlikely(need_resched())) { 4101da177e4SLinus Torvalds local_irq_enable(); 4111da177e4SLinus Torvalds return; 4121da177e4SLinus Torvalds } 4131da177e4SLinus Torvalds 4141da177e4SLinus Torvalds cx = pr->power.state; 415b04e7bdbSThomas Gleixner if (!cx || acpi_idle_suspend) { 41664c7c8f8SNick Piggin if (pm_idle_save) 41764c7c8f8SNick Piggin pm_idle_save(); 41864c7c8f8SNick Piggin else 41964c7c8f8SNick Piggin acpi_safe_halt(); 4202e906655Svenkatesh.pallipadi@intel.com 4212e906655Svenkatesh.pallipadi@intel.com local_irq_enable(); 42264c7c8f8SNick Piggin return; 42364c7c8f8SNick Piggin } 4241da177e4SLinus Torvalds 4251da177e4SLinus Torvalds /* 4261da177e4SLinus Torvalds * Check BM Activity 4271da177e4SLinus Torvalds * ----------------- 4281da177e4SLinus Torvalds * Check for bus mastering activity (if required), record, and check 4291da177e4SLinus Torvalds * for demotion. 4301da177e4SLinus Torvalds */ 4311da177e4SLinus Torvalds if (pr->flags.bm_check) { 4321da177e4SLinus Torvalds u32 bm_status = 0; 4331da177e4SLinus Torvalds unsigned long diff = jiffies - pr->power.bm_check_timestamp; 4341da177e4SLinus Torvalds 435c5ab81caSDominik Brodowski if (diff > 31) 436c5ab81caSDominik Brodowski diff = 31; 4371da177e4SLinus Torvalds 438c5ab81caSDominik Brodowski pr->power.bm_activity <<= diff; 4391da177e4SLinus Torvalds 440d8c71b6dSBob Moore acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 4411da177e4SLinus Torvalds if (bm_status) { 442c5ab81caSDominik Brodowski pr->power.bm_activity |= 0x1; 443d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 4441da177e4SLinus Torvalds } 4451da177e4SLinus Torvalds /* 4461da177e4SLinus Torvalds * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 4471da177e4SLinus Torvalds * the true state of bus mastering activity; forcing us to 4481da177e4SLinus Torvalds * manually check the BMIDEA bit of each IDE channel. 4491da177e4SLinus Torvalds */ 4501da177e4SLinus Torvalds else if (errata.piix4.bmisx) { 4511da177e4SLinus Torvalds if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 4521da177e4SLinus Torvalds || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 453c5ab81caSDominik Brodowski pr->power.bm_activity |= 0x1; 4541da177e4SLinus Torvalds } 4551da177e4SLinus Torvalds 4561da177e4SLinus Torvalds pr->power.bm_check_timestamp = jiffies; 4571da177e4SLinus Torvalds 4581da177e4SLinus Torvalds /* 459c4a001b1SDominik Brodowski * If bus mastering is or was active this jiffy, demote 4601da177e4SLinus Torvalds * to avoid a faulty transition. Note that the processor 4611da177e4SLinus Torvalds * won't enter a low-power state during this call (to this 462c4a001b1SDominik Brodowski * function) but should upon the next. 4631da177e4SLinus Torvalds * 4641da177e4SLinus Torvalds * TBD: A better policy might be to fallback to the demotion 4651da177e4SLinus Torvalds * state (use it for this quantum only) istead of 4661da177e4SLinus Torvalds * demoting -- and rely on duration as our sole demotion 4671da177e4SLinus Torvalds * qualification. This may, however, introduce DMA 4681da177e4SLinus Torvalds * issues (e.g. floppy DMA transfer overrun/underrun). 4691da177e4SLinus Torvalds */ 470c4a001b1SDominik Brodowski if ((pr->power.bm_activity & 0x1) && 471c4a001b1SDominik Brodowski cx->demotion.threshold.bm) { 4721da177e4SLinus Torvalds local_irq_enable(); 4731da177e4SLinus Torvalds next_state = cx->demotion.state; 4741da177e4SLinus Torvalds goto end; 4751da177e4SLinus Torvalds } 4761da177e4SLinus Torvalds } 4771da177e4SLinus Torvalds 4784c033552SVenkatesh Pallipadi #ifdef CONFIG_HOTPLUG_CPU 4794c033552SVenkatesh Pallipadi /* 4804c033552SVenkatesh Pallipadi * Check for P_LVL2_UP flag before entering C2 and above on 4814c033552SVenkatesh Pallipadi * an SMP system. We do it here instead of doing it at _CST/P_LVL 4824c033552SVenkatesh Pallipadi * detection phase, to work cleanly with logical CPU hotplug. 4834c033552SVenkatesh Pallipadi */ 4844c033552SVenkatesh Pallipadi if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 485cee324b1SAlexey Starikovskiy !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 4861e483969SDavid Shaohua Li cx = &pr->power.states[ACPI_STATE_C1]; 4874c033552SVenkatesh Pallipadi #endif 4881e483969SDavid Shaohua Li 4891da177e4SLinus Torvalds /* 4901da177e4SLinus Torvalds * Sleep: 4911da177e4SLinus Torvalds * ------ 4921da177e4SLinus Torvalds * Invoke the current Cx state to put the processor to sleep. 4931da177e4SLinus Torvalds */ 4942a298a35SNick Piggin if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { 495495ab9c0SAndi Kleen current_thread_info()->status &= ~TS_POLLING; 4960888f06aSIngo Molnar /* 4970888f06aSIngo Molnar * TS_POLLING-cleared state must be visible before we 4980888f06aSIngo Molnar * test NEED_RESCHED: 4990888f06aSIngo Molnar */ 5000888f06aSIngo Molnar smp_mb(); 5012a298a35SNick Piggin if (need_resched()) { 502495ab9c0SAndi Kleen current_thread_info()->status |= TS_POLLING; 503af2eb17bSLinus Torvalds local_irq_enable(); 5042a298a35SNick Piggin return; 5052a298a35SNick Piggin } 5062a298a35SNick Piggin } 5072a298a35SNick Piggin 5081da177e4SLinus Torvalds switch (cx->type) { 5091da177e4SLinus Torvalds 5101da177e4SLinus Torvalds case ACPI_STATE_C1: 5111da177e4SLinus Torvalds /* 5121da177e4SLinus Torvalds * Invoke C1. 5131da177e4SLinus Torvalds * Use the appropriate idle routine, the one that would 5141da177e4SLinus Torvalds * be used without acpi C-states. 5151da177e4SLinus Torvalds */ 5161da177e4SLinus Torvalds if (pm_idle_save) 5171da177e4SLinus Torvalds pm_idle_save(); 5181da177e4SLinus Torvalds else 51964c7c8f8SNick Piggin acpi_safe_halt(); 52064c7c8f8SNick Piggin 5211da177e4SLinus Torvalds /* 5221da177e4SLinus Torvalds * TBD: Can't get time duration while in C1, as resumes 5231da177e4SLinus Torvalds * go to an ISR rather than here. Need to instrument 5241da177e4SLinus Torvalds * base interrupt handler. 5252aa44d05SIngo Molnar * 5262aa44d05SIngo Molnar * Note: the TSC better not stop in C1, sched_clock() will 5272aa44d05SIngo Molnar * skew otherwise. 5281da177e4SLinus Torvalds */ 5291da177e4SLinus Torvalds sleep_ticks = 0xFFFFFFFF; 5302e906655Svenkatesh.pallipadi@intel.com local_irq_enable(); 5311da177e4SLinus Torvalds break; 5321da177e4SLinus Torvalds 5331da177e4SLinus Torvalds case ACPI_STATE_C2: 5341da177e4SLinus Torvalds /* Get start time (ticks) */ 535cee324b1SAlexey Starikovskiy t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 5362aa44d05SIngo Molnar /* Tell the scheduler that we are going deep-idle: */ 5372aa44d05SIngo Molnar sched_clock_idle_sleep_event(); 5381da177e4SLinus Torvalds /* Invoke C2 */ 539e9e2cdb4SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 1); 540991528d7SVenkatesh Pallipadi acpi_cstate_enter(cx); 5411da177e4SLinus Torvalds /* Get end time (ticks) */ 542cee324b1SAlexey Starikovskiy t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 543539eb11eSjohn stultz 5440aa366f3STony Luck #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 545539eb11eSjohn stultz /* TSC halts in C2, so notify users */ 546ddb25f9aSAndi Kleen if (tsc_halts_in_c(ACPI_STATE_C2)) 5475a90cf20Sjohn stultz mark_tsc_unstable("possible TSC halt in C2"); 548539eb11eSjohn stultz #endif 5492aa44d05SIngo Molnar /* Compute time (ticks) that we were actually asleep */ 5502aa44d05SIngo Molnar sleep_ticks = ticks_elapsed(t1, t2); 5512aa44d05SIngo Molnar 5522aa44d05SIngo Molnar /* Tell the scheduler how much we idled: */ 5532aa44d05SIngo Molnar sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 5542aa44d05SIngo Molnar 5551da177e4SLinus Torvalds /* Re-enable interrupts */ 5561da177e4SLinus Torvalds local_irq_enable(); 5572aa44d05SIngo Molnar /* Do not account our idle-switching overhead: */ 5582aa44d05SIngo Molnar sleep_ticks -= cx->latency_ticks + C2_OVERHEAD; 5592aa44d05SIngo Molnar 560495ab9c0SAndi Kleen current_thread_info()->status |= TS_POLLING; 561e9e2cdb4SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 0); 5621da177e4SLinus Torvalds break; 5631da177e4SLinus Torvalds 5641da177e4SLinus Torvalds case ACPI_STATE_C3: 565bde6f5f5SVenki Pallipadi acpi_unlazy_tlb(smp_processor_id()); 56618eab855SVenkatesh Pallipadi /* 567e17bcb43SThomas Gleixner * Must be done before busmaster disable as we might 568e17bcb43SThomas Gleixner * need to access HPET ! 569e17bcb43SThomas Gleixner */ 570e17bcb43SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 1); 571e17bcb43SThomas Gleixner /* 57218eab855SVenkatesh Pallipadi * disable bus master 57318eab855SVenkatesh Pallipadi * bm_check implies we need ARB_DIS 57418eab855SVenkatesh Pallipadi * !bm_check implies we need cache flush 57518eab855SVenkatesh Pallipadi * bm_control implies whether we can do ARB_DIS 57618eab855SVenkatesh Pallipadi * 57718eab855SVenkatesh Pallipadi * That leaves a case where bm_check is set and bm_control is 57818eab855SVenkatesh Pallipadi * not set. In that case we cannot do much, we enter C3 57918eab855SVenkatesh Pallipadi * without doing anything. 58018eab855SVenkatesh Pallipadi */ 58118eab855SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 58202df8b93SVenkatesh Pallipadi if (atomic_inc_return(&c3_cpu_count) == 58302df8b93SVenkatesh Pallipadi num_online_cpus()) { 58402df8b93SVenkatesh Pallipadi /* 58502df8b93SVenkatesh Pallipadi * All CPUs are trying to go to C3 58602df8b93SVenkatesh Pallipadi * Disable bus master arbitration 58702df8b93SVenkatesh Pallipadi */ 588d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 58902df8b93SVenkatesh Pallipadi } 59018eab855SVenkatesh Pallipadi } else if (!pr->flags.bm_check) { 59102df8b93SVenkatesh Pallipadi /* SMP with no shared cache... Invalidate cache */ 59202df8b93SVenkatesh Pallipadi ACPI_FLUSH_CPU_CACHE(); 59302df8b93SVenkatesh Pallipadi } 59402df8b93SVenkatesh Pallipadi 5951da177e4SLinus Torvalds /* Get start time (ticks) */ 596cee324b1SAlexey Starikovskiy t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 5971da177e4SLinus Torvalds /* Invoke C3 */ 5982aa44d05SIngo Molnar /* Tell the scheduler that we are going deep-idle: */ 5992aa44d05SIngo Molnar sched_clock_idle_sleep_event(); 600991528d7SVenkatesh Pallipadi acpi_cstate_enter(cx); 6011da177e4SLinus Torvalds /* Get end time (ticks) */ 602cee324b1SAlexey Starikovskiy t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 60318eab855SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 6041da177e4SLinus Torvalds /* Enable bus master arbitration */ 60502df8b93SVenkatesh Pallipadi atomic_dec(&c3_cpu_count); 606d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); 60702df8b93SVenkatesh Pallipadi } 60802df8b93SVenkatesh Pallipadi 6090aa366f3STony Luck #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 610539eb11eSjohn stultz /* TSC halts in C3, so notify users */ 611ddb25f9aSAndi Kleen if (tsc_halts_in_c(ACPI_STATE_C3)) 6125a90cf20Sjohn stultz mark_tsc_unstable("TSC halts in C3"); 613539eb11eSjohn stultz #endif 6142aa44d05SIngo Molnar /* Compute time (ticks) that we were actually asleep */ 6152aa44d05SIngo Molnar sleep_ticks = ticks_elapsed(t1, t2); 6162aa44d05SIngo Molnar /* Tell the scheduler how much we idled: */ 6172aa44d05SIngo Molnar sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 6182aa44d05SIngo Molnar 6191da177e4SLinus Torvalds /* Re-enable interrupts */ 6201da177e4SLinus Torvalds local_irq_enable(); 6212aa44d05SIngo Molnar /* Do not account our idle-switching overhead: */ 6222aa44d05SIngo Molnar sleep_ticks -= cx->latency_ticks + C3_OVERHEAD; 6232aa44d05SIngo Molnar 624495ab9c0SAndi Kleen current_thread_info()->status |= TS_POLLING; 625e9e2cdb4SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 0); 6261da177e4SLinus Torvalds break; 6271da177e4SLinus Torvalds 6281da177e4SLinus Torvalds default: 6291da177e4SLinus Torvalds local_irq_enable(); 6301da177e4SLinus Torvalds return; 6311da177e4SLinus Torvalds } 632a3c6598fSDominik Brodowski cx->usage++; 633a3c6598fSDominik Brodowski if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0)) 634a3c6598fSDominik Brodowski cx->time += sleep_ticks; 6351da177e4SLinus Torvalds 6361da177e4SLinus Torvalds next_state = pr->power.state; 6371da177e4SLinus Torvalds 6381e483969SDavid Shaohua Li #ifdef CONFIG_HOTPLUG_CPU 6391e483969SDavid Shaohua Li /* Don't do promotion/demotion */ 6401e483969SDavid Shaohua Li if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && 641cee324b1SAlexey Starikovskiy !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) { 6421e483969SDavid Shaohua Li next_state = cx; 6431e483969SDavid Shaohua Li goto end; 6441e483969SDavid Shaohua Li } 6451e483969SDavid Shaohua Li #endif 6461e483969SDavid Shaohua Li 6471da177e4SLinus Torvalds /* 6481da177e4SLinus Torvalds * Promotion? 6491da177e4SLinus Torvalds * ---------- 6501da177e4SLinus Torvalds * Track the number of longs (time asleep is greater than threshold) 6511da177e4SLinus Torvalds * and promote when the count threshold is reached. Note that bus 6521da177e4SLinus Torvalds * mastering activity may prevent promotions. 6531da177e4SLinus Torvalds * Do not promote above max_cstate. 6541da177e4SLinus Torvalds */ 6551da177e4SLinus Torvalds if (cx->promotion.state && 6561da177e4SLinus Torvalds ((cx->promotion.state - pr->power.states) <= max_cstate)) { 6575c87579eSArjan van de Ven if (sleep_ticks > cx->promotion.threshold.ticks && 658f011e2e2SMark Gross cx->promotion.state->latency <= 659f011e2e2SMark Gross pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { 6601da177e4SLinus Torvalds cx->promotion.count++; 6611da177e4SLinus Torvalds cx->demotion.count = 0; 6624be44fcdSLen Brown if (cx->promotion.count >= 6634be44fcdSLen Brown cx->promotion.threshold.count) { 6641da177e4SLinus Torvalds if (pr->flags.bm_check) { 6654be44fcdSLen Brown if (! 6664be44fcdSLen Brown (pr->power.bm_activity & cx-> 6674be44fcdSLen Brown promotion.threshold.bm)) { 6684be44fcdSLen Brown next_state = 6694be44fcdSLen Brown cx->promotion.state; 6701da177e4SLinus Torvalds goto end; 6711da177e4SLinus Torvalds } 6724be44fcdSLen Brown } else { 6731da177e4SLinus Torvalds next_state = cx->promotion.state; 6741da177e4SLinus Torvalds goto end; 6751da177e4SLinus Torvalds } 6761da177e4SLinus Torvalds } 6771da177e4SLinus Torvalds } 6781da177e4SLinus Torvalds } 6791da177e4SLinus Torvalds 6801da177e4SLinus Torvalds /* 6811da177e4SLinus Torvalds * Demotion? 6821da177e4SLinus Torvalds * --------- 6831da177e4SLinus Torvalds * Track the number of shorts (time asleep is less than time threshold) 6841da177e4SLinus Torvalds * and demote when the usage threshold is reached. 6851da177e4SLinus Torvalds */ 6861da177e4SLinus Torvalds if (cx->demotion.state) { 6871da177e4SLinus Torvalds if (sleep_ticks < cx->demotion.threshold.ticks) { 6881da177e4SLinus Torvalds cx->demotion.count++; 6891da177e4SLinus Torvalds cx->promotion.count = 0; 6901da177e4SLinus Torvalds if (cx->demotion.count >= cx->demotion.threshold.count) { 6911da177e4SLinus Torvalds next_state = cx->demotion.state; 6921da177e4SLinus Torvalds goto end; 6931da177e4SLinus Torvalds } 6941da177e4SLinus Torvalds } 6951da177e4SLinus Torvalds } 6961da177e4SLinus Torvalds 6971da177e4SLinus Torvalds end: 6981da177e4SLinus Torvalds /* 6991da177e4SLinus Torvalds * Demote if current state exceeds max_cstate 7005c87579eSArjan van de Ven * or if the latency of the current state is unacceptable 7011da177e4SLinus Torvalds */ 7025c87579eSArjan van de Ven if ((pr->power.state - pr->power.states) > max_cstate || 703f011e2e2SMark Gross pr->power.state->latency > 704f011e2e2SMark Gross pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { 7051da177e4SLinus Torvalds if (cx->demotion.state) 7061da177e4SLinus Torvalds next_state = cx->demotion.state; 7071da177e4SLinus Torvalds } 7081da177e4SLinus Torvalds 7091da177e4SLinus Torvalds /* 7101da177e4SLinus Torvalds * New Cx State? 7111da177e4SLinus Torvalds * ------------- 7121da177e4SLinus Torvalds * If we're going to start using a new Cx state we must clean up 7131da177e4SLinus Torvalds * from the previous and prepare to use the new. 7141da177e4SLinus Torvalds */ 7151da177e4SLinus Torvalds if (next_state != pr->power.state) 7161da177e4SLinus Torvalds acpi_processor_power_activate(pr, next_state); 7171da177e4SLinus Torvalds } 7181da177e4SLinus Torvalds 7194be44fcdSLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr) 7201da177e4SLinus Torvalds { 7211da177e4SLinus Torvalds unsigned int i; 7221da177e4SLinus Torvalds unsigned int state_is_set = 0; 7231da177e4SLinus Torvalds struct acpi_processor_cx *lower = NULL; 7241da177e4SLinus Torvalds struct acpi_processor_cx *higher = NULL; 7251da177e4SLinus Torvalds struct acpi_processor_cx *cx; 7261da177e4SLinus Torvalds 7271da177e4SLinus Torvalds 7281da177e4SLinus Torvalds if (!pr) 729d550d98dSPatrick Mochel return -EINVAL; 7301da177e4SLinus Torvalds 7311da177e4SLinus Torvalds /* 7321da177e4SLinus Torvalds * This function sets the default Cx state policy (OS idle handler). 7331da177e4SLinus Torvalds * Our scheme is to promote quickly to C2 but more conservatively 7341da177e4SLinus Torvalds * to C3. We're favoring C2 for its characteristics of low latency 7351da177e4SLinus Torvalds * (quick response), good power savings, and ability to allow bus 7361da177e4SLinus Torvalds * mastering activity. Note that the Cx state policy is completely 7371da177e4SLinus Torvalds * customizable and can be altered dynamically. 7381da177e4SLinus Torvalds */ 7391da177e4SLinus Torvalds 7401da177e4SLinus Torvalds /* startup state */ 7411da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 7421da177e4SLinus Torvalds cx = &pr->power.states[i]; 7431da177e4SLinus Torvalds if (!cx->valid) 7441da177e4SLinus Torvalds continue; 7451da177e4SLinus Torvalds 7461da177e4SLinus Torvalds if (!state_is_set) 7471da177e4SLinus Torvalds pr->power.state = cx; 7481da177e4SLinus Torvalds state_is_set++; 7491da177e4SLinus Torvalds break; 7501da177e4SLinus Torvalds } 7511da177e4SLinus Torvalds 7521da177e4SLinus Torvalds if (!state_is_set) 753d550d98dSPatrick Mochel return -ENODEV; 7541da177e4SLinus Torvalds 7551da177e4SLinus Torvalds /* demotion */ 7561da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 7571da177e4SLinus Torvalds cx = &pr->power.states[i]; 7581da177e4SLinus Torvalds if (!cx->valid) 7591da177e4SLinus Torvalds continue; 7601da177e4SLinus Torvalds 7611da177e4SLinus Torvalds if (lower) { 7621da177e4SLinus Torvalds cx->demotion.state = lower; 7631da177e4SLinus Torvalds cx->demotion.threshold.ticks = cx->latency_ticks; 7641da177e4SLinus Torvalds cx->demotion.threshold.count = 1; 7651da177e4SLinus Torvalds if (cx->type == ACPI_STATE_C3) 7661da177e4SLinus Torvalds cx->demotion.threshold.bm = bm_history; 7671da177e4SLinus Torvalds } 7681da177e4SLinus Torvalds 7691da177e4SLinus Torvalds lower = cx; 7701da177e4SLinus Torvalds } 7711da177e4SLinus Torvalds 7721da177e4SLinus Torvalds /* promotion */ 7731da177e4SLinus Torvalds for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { 7741da177e4SLinus Torvalds cx = &pr->power.states[i]; 7751da177e4SLinus Torvalds if (!cx->valid) 7761da177e4SLinus Torvalds continue; 7771da177e4SLinus Torvalds 7781da177e4SLinus Torvalds if (higher) { 7791da177e4SLinus Torvalds cx->promotion.state = higher; 7801da177e4SLinus Torvalds cx->promotion.threshold.ticks = cx->latency_ticks; 7811da177e4SLinus Torvalds if (cx->type >= ACPI_STATE_C2) 7821da177e4SLinus Torvalds cx->promotion.threshold.count = 4; 7831da177e4SLinus Torvalds else 7841da177e4SLinus Torvalds cx->promotion.threshold.count = 10; 7851da177e4SLinus Torvalds if (higher->type == ACPI_STATE_C3) 7861da177e4SLinus Torvalds cx->promotion.threshold.bm = bm_history; 7871da177e4SLinus Torvalds } 7881da177e4SLinus Torvalds 7891da177e4SLinus Torvalds higher = cx; 7901da177e4SLinus Torvalds } 7911da177e4SLinus Torvalds 792d550d98dSPatrick Mochel return 0; 7931da177e4SLinus Torvalds } 7944f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */ 7951da177e4SLinus Torvalds 7961da177e4SLinus Torvalds static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 7971da177e4SLinus Torvalds { 7981da177e4SLinus Torvalds 7991da177e4SLinus Torvalds if (!pr) 800d550d98dSPatrick Mochel return -EINVAL; 8011da177e4SLinus Torvalds 8021da177e4SLinus Torvalds if (!pr->pblk) 803d550d98dSPatrick Mochel return -ENODEV; 8041da177e4SLinus Torvalds 8051da177e4SLinus Torvalds /* if info is obtained from pblk/fadt, type equals state */ 8061da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 8071da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 8081da177e4SLinus Torvalds 8094c033552SVenkatesh Pallipadi #ifndef CONFIG_HOTPLUG_CPU 8104c033552SVenkatesh Pallipadi /* 8114c033552SVenkatesh Pallipadi * Check for P_LVL2_UP flag before entering C2 and above on 8124c033552SVenkatesh Pallipadi * an SMP system. 8134c033552SVenkatesh Pallipadi */ 814ad71860aSAlexey Starikovskiy if ((num_online_cpus() > 1) && 815cee324b1SAlexey Starikovskiy !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 816d550d98dSPatrick Mochel return -ENODEV; 8174c033552SVenkatesh Pallipadi #endif 8184c033552SVenkatesh Pallipadi 8191da177e4SLinus Torvalds /* determine C2 and C3 address from pblk */ 8201da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 8211da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 8221da177e4SLinus Torvalds 8231da177e4SLinus Torvalds /* determine latencies from FADT */ 824cee324b1SAlexey Starikovskiy pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; 825cee324b1SAlexey Starikovskiy pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; 8261da177e4SLinus Torvalds 8271da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 8281da177e4SLinus Torvalds "lvl2[0x%08x] lvl3[0x%08x]\n", 8291da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].address, 8301da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].address)); 8311da177e4SLinus Torvalds 832d550d98dSPatrick Mochel return 0; 8331da177e4SLinus Torvalds } 8341da177e4SLinus Torvalds 835991528d7SVenkatesh Pallipadi static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 836acf05f4bSVenkatesh Pallipadi { 837991528d7SVenkatesh Pallipadi if (!pr->power.states[ACPI_STATE_C1].valid) { 838cf824788SJanosch Machowinski /* set the first C-State to C1 */ 839991528d7SVenkatesh Pallipadi /* all processors need to support C1 */ 840acf05f4bSVenkatesh Pallipadi pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 841acf05f4bSVenkatesh Pallipadi pr->power.states[ACPI_STATE_C1].valid = 1; 842991528d7SVenkatesh Pallipadi } 843991528d7SVenkatesh Pallipadi /* the C0 state only exists as a filler in our array */ 844991528d7SVenkatesh Pallipadi pr->power.states[ACPI_STATE_C0].valid = 1; 845d550d98dSPatrick Mochel return 0; 846acf05f4bSVenkatesh Pallipadi } 847acf05f4bSVenkatesh Pallipadi 8481da177e4SLinus Torvalds static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 8491da177e4SLinus Torvalds { 8501da177e4SLinus Torvalds acpi_status status = 0; 8511da177e4SLinus Torvalds acpi_integer count; 852cf824788SJanosch Machowinski int current_count; 8531da177e4SLinus Torvalds int i; 8541da177e4SLinus Torvalds struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 8551da177e4SLinus Torvalds union acpi_object *cst; 8561da177e4SLinus Torvalds 8571da177e4SLinus Torvalds 8581da177e4SLinus Torvalds if (nocst) 859d550d98dSPatrick Mochel return -ENODEV; 8601da177e4SLinus Torvalds 861991528d7SVenkatesh Pallipadi current_count = 0; 8621da177e4SLinus Torvalds 8631da177e4SLinus Torvalds status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 8641da177e4SLinus Torvalds if (ACPI_FAILURE(status)) { 8651da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 866d550d98dSPatrick Mochel return -ENODEV; 8671da177e4SLinus Torvalds } 8681da177e4SLinus Torvalds 86950dd0969SJan Engelhardt cst = buffer.pointer; 8701da177e4SLinus Torvalds 8711da177e4SLinus Torvalds /* There must be at least 2 elements */ 8721da177e4SLinus Torvalds if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 8736468463aSLen Brown printk(KERN_ERR PREFIX "not enough elements in _CST\n"); 8741da177e4SLinus Torvalds status = -EFAULT; 8751da177e4SLinus Torvalds goto end; 8761da177e4SLinus Torvalds } 8771da177e4SLinus Torvalds 8781da177e4SLinus Torvalds count = cst->package.elements[0].integer.value; 8791da177e4SLinus Torvalds 8801da177e4SLinus Torvalds /* Validate number of power states. */ 8811da177e4SLinus Torvalds if (count < 1 || count != cst->package.count - 1) { 8826468463aSLen Brown printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); 8831da177e4SLinus Torvalds status = -EFAULT; 8841da177e4SLinus Torvalds goto end; 8851da177e4SLinus Torvalds } 8861da177e4SLinus Torvalds 8871da177e4SLinus Torvalds /* Tell driver that at least _CST is supported. */ 8881da177e4SLinus Torvalds pr->flags.has_cst = 1; 8891da177e4SLinus Torvalds 8901da177e4SLinus Torvalds for (i = 1; i <= count; i++) { 8911da177e4SLinus Torvalds union acpi_object *element; 8921da177e4SLinus Torvalds union acpi_object *obj; 8931da177e4SLinus Torvalds struct acpi_power_register *reg; 8941da177e4SLinus Torvalds struct acpi_processor_cx cx; 8951da177e4SLinus Torvalds 8961da177e4SLinus Torvalds memset(&cx, 0, sizeof(cx)); 8971da177e4SLinus Torvalds 89850dd0969SJan Engelhardt element = &(cst->package.elements[i]); 8991da177e4SLinus Torvalds if (element->type != ACPI_TYPE_PACKAGE) 9001da177e4SLinus Torvalds continue; 9011da177e4SLinus Torvalds 9021da177e4SLinus Torvalds if (element->package.count != 4) 9031da177e4SLinus Torvalds continue; 9041da177e4SLinus Torvalds 90550dd0969SJan Engelhardt obj = &(element->package.elements[0]); 9061da177e4SLinus Torvalds 9071da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_BUFFER) 9081da177e4SLinus Torvalds continue; 9091da177e4SLinus Torvalds 9101da177e4SLinus Torvalds reg = (struct acpi_power_register *)obj->buffer.pointer; 9111da177e4SLinus Torvalds 9121da177e4SLinus Torvalds if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 9131da177e4SLinus Torvalds (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 9141da177e4SLinus Torvalds continue; 9151da177e4SLinus Torvalds 9161da177e4SLinus Torvalds /* There should be an easy way to extract an integer... */ 91750dd0969SJan Engelhardt obj = &(element->package.elements[1]); 9181da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 9191da177e4SLinus Torvalds continue; 9201da177e4SLinus Torvalds 9211da177e4SLinus Torvalds cx.type = obj->integer.value; 922991528d7SVenkatesh Pallipadi /* 923991528d7SVenkatesh Pallipadi * Some buggy BIOSes won't list C1 in _CST - 924991528d7SVenkatesh Pallipadi * Let acpi_processor_get_power_info_default() handle them later 925991528d7SVenkatesh Pallipadi */ 926991528d7SVenkatesh Pallipadi if (i == 1 && cx.type != ACPI_STATE_C1) 927991528d7SVenkatesh Pallipadi current_count++; 9281da177e4SLinus Torvalds 929991528d7SVenkatesh Pallipadi cx.address = reg->address; 930991528d7SVenkatesh Pallipadi cx.index = current_count + 1; 9311da177e4SLinus Torvalds 932bc71bec9Svenkatesh.pallipadi@intel.com cx.entry_method = ACPI_CSTATE_SYSTEMIO; 933991528d7SVenkatesh Pallipadi if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 934991528d7SVenkatesh Pallipadi if (acpi_processor_ffh_cstate_probe 935991528d7SVenkatesh Pallipadi (pr->id, &cx, reg) == 0) { 936bc71bec9Svenkatesh.pallipadi@intel.com cx.entry_method = ACPI_CSTATE_FFH; 937bc71bec9Svenkatesh.pallipadi@intel.com } else if (cx.type == ACPI_STATE_C1) { 938991528d7SVenkatesh Pallipadi /* 939991528d7SVenkatesh Pallipadi * C1 is a special case where FIXED_HARDWARE 940991528d7SVenkatesh Pallipadi * can be handled in non-MWAIT way as well. 941991528d7SVenkatesh Pallipadi * In that case, save this _CST entry info. 942991528d7SVenkatesh Pallipadi * Otherwise, ignore this info and continue. 943991528d7SVenkatesh Pallipadi */ 944bc71bec9Svenkatesh.pallipadi@intel.com cx.entry_method = ACPI_CSTATE_HALT; 945bc71bec9Svenkatesh.pallipadi@intel.com } else { 9461da177e4SLinus Torvalds continue; 947991528d7SVenkatesh Pallipadi } 948991528d7SVenkatesh Pallipadi } 9491da177e4SLinus Torvalds 95050dd0969SJan Engelhardt obj = &(element->package.elements[2]); 9511da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 9521da177e4SLinus Torvalds continue; 9531da177e4SLinus Torvalds 9541da177e4SLinus Torvalds cx.latency = obj->integer.value; 9551da177e4SLinus Torvalds 95650dd0969SJan Engelhardt obj = &(element->package.elements[3]); 9571da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 9581da177e4SLinus Torvalds continue; 9591da177e4SLinus Torvalds 9601da177e4SLinus Torvalds cx.power = obj->integer.value; 9611da177e4SLinus Torvalds 962cf824788SJanosch Machowinski current_count++; 963cf824788SJanosch Machowinski memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 964cf824788SJanosch Machowinski 965cf824788SJanosch Machowinski /* 966cf824788SJanosch Machowinski * We support total ACPI_PROCESSOR_MAX_POWER - 1 967cf824788SJanosch Machowinski * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 968cf824788SJanosch Machowinski */ 969cf824788SJanosch Machowinski if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 970cf824788SJanosch Machowinski printk(KERN_WARNING 971cf824788SJanosch Machowinski "Limiting number of power states to max (%d)\n", 972cf824788SJanosch Machowinski ACPI_PROCESSOR_MAX_POWER); 973cf824788SJanosch Machowinski printk(KERN_WARNING 974cf824788SJanosch Machowinski "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 975cf824788SJanosch Machowinski break; 976cf824788SJanosch Machowinski } 9771da177e4SLinus Torvalds } 9781da177e4SLinus Torvalds 9794be44fcdSLen Brown ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 980cf824788SJanosch Machowinski current_count)); 9811da177e4SLinus Torvalds 9821da177e4SLinus Torvalds /* Validate number of power states discovered */ 983cf824788SJanosch Machowinski if (current_count < 2) 9846d93c648SVenkatesh Pallipadi status = -EFAULT; 9851da177e4SLinus Torvalds 9861da177e4SLinus Torvalds end: 98702438d87SLen Brown kfree(buffer.pointer); 9881da177e4SLinus Torvalds 989d550d98dSPatrick Mochel return status; 9901da177e4SLinus Torvalds } 9911da177e4SLinus Torvalds 9921da177e4SLinus Torvalds static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) 9931da177e4SLinus Torvalds { 9941da177e4SLinus Torvalds 9951da177e4SLinus Torvalds if (!cx->address) 996d550d98dSPatrick Mochel return; 9971da177e4SLinus Torvalds 9981da177e4SLinus Torvalds /* 9991da177e4SLinus Torvalds * C2 latency must be less than or equal to 100 10001da177e4SLinus Torvalds * microseconds. 10011da177e4SLinus Torvalds */ 10021da177e4SLinus Torvalds else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 10031da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 10044be44fcdSLen Brown "latency too large [%d]\n", cx->latency)); 1005d550d98dSPatrick Mochel return; 10061da177e4SLinus Torvalds } 10071da177e4SLinus Torvalds 10081da177e4SLinus Torvalds /* 10091da177e4SLinus Torvalds * Otherwise we've met all of our C2 requirements. 10101da177e4SLinus Torvalds * Normalize the C2 latency to expidite policy 10111da177e4SLinus Torvalds */ 10121da177e4SLinus Torvalds cx->valid = 1; 10134f86d3a8SLen Brown 10144f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 10151da177e4SLinus Torvalds cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 10164f86d3a8SLen Brown #else 10174f86d3a8SLen Brown cx->latency_ticks = cx->latency; 10184f86d3a8SLen Brown #endif 10191da177e4SLinus Torvalds 1020d550d98dSPatrick Mochel return; 10211da177e4SLinus Torvalds } 10221da177e4SLinus Torvalds 10234be44fcdSLen Brown static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 10241da177e4SLinus Torvalds struct acpi_processor_cx *cx) 10251da177e4SLinus Torvalds { 102602df8b93SVenkatesh Pallipadi static int bm_check_flag; 102702df8b93SVenkatesh Pallipadi 10281da177e4SLinus Torvalds 10291da177e4SLinus Torvalds if (!cx->address) 1030d550d98dSPatrick Mochel return; 10311da177e4SLinus Torvalds 10321da177e4SLinus Torvalds /* 10331da177e4SLinus Torvalds * C3 latency must be less than or equal to 1000 10341da177e4SLinus Torvalds * microseconds. 10351da177e4SLinus Torvalds */ 10361da177e4SLinus Torvalds else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 10371da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 10384be44fcdSLen Brown "latency too large [%d]\n", cx->latency)); 1039d550d98dSPatrick Mochel return; 10401da177e4SLinus Torvalds } 10411da177e4SLinus Torvalds 10421da177e4SLinus Torvalds /* 10431da177e4SLinus Torvalds * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 10441da177e4SLinus Torvalds * DMA transfers are used by any ISA device to avoid livelock. 10451da177e4SLinus Torvalds * Note that we could disable Type-F DMA (as recommended by 10461da177e4SLinus Torvalds * the erratum), but this is known to disrupt certain ISA 10471da177e4SLinus Torvalds * devices thus we take the conservative approach. 10481da177e4SLinus Torvalds */ 10491da177e4SLinus Torvalds else if (errata.piix4.fdma) { 10501da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 10511da177e4SLinus Torvalds "C3 not supported on PIIX4 with Type-F DMA\n")); 1052d550d98dSPatrick Mochel return; 10531da177e4SLinus Torvalds } 10541da177e4SLinus Torvalds 105502df8b93SVenkatesh Pallipadi /* All the logic here assumes flags.bm_check is same across all CPUs */ 105602df8b93SVenkatesh Pallipadi if (!bm_check_flag) { 105702df8b93SVenkatesh Pallipadi /* Determine whether bm_check is needed based on CPU */ 105802df8b93SVenkatesh Pallipadi acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 105902df8b93SVenkatesh Pallipadi bm_check_flag = pr->flags.bm_check; 106002df8b93SVenkatesh Pallipadi } else { 106102df8b93SVenkatesh Pallipadi pr->flags.bm_check = bm_check_flag; 106202df8b93SVenkatesh Pallipadi } 106302df8b93SVenkatesh Pallipadi 106402df8b93SVenkatesh Pallipadi if (pr->flags.bm_check) { 106502df8b93SVenkatesh Pallipadi if (!pr->flags.bm_control) { 1066ed3110efSVenki Pallipadi if (pr->flags.has_cst != 1) { 1067ed3110efSVenki Pallipadi /* bus mastering control is necessary */ 106802df8b93SVenkatesh Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1069ed3110efSVenki Pallipadi "C3 support requires BM control\n")); 1070ed3110efSVenki Pallipadi return; 1071ed3110efSVenki Pallipadi } else { 1072ed3110efSVenki Pallipadi /* Here we enter C3 without bus mastering */ 1073ed3110efSVenki Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1074ed3110efSVenki Pallipadi "C3 support without BM control\n")); 1075ed3110efSVenki Pallipadi } 107602df8b93SVenkatesh Pallipadi } 107702df8b93SVenkatesh Pallipadi } else { 107802df8b93SVenkatesh Pallipadi /* 107902df8b93SVenkatesh Pallipadi * WBINVD should be set in fadt, for C3 state to be 108002df8b93SVenkatesh Pallipadi * supported on when bm_check is not required. 108102df8b93SVenkatesh Pallipadi */ 1082cee324b1SAlexey Starikovskiy if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 108302df8b93SVenkatesh Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 108402df8b93SVenkatesh Pallipadi "Cache invalidation should work properly" 108502df8b93SVenkatesh Pallipadi " for C3 to be enabled on SMP systems\n")); 1086d550d98dSPatrick Mochel return; 108702df8b93SVenkatesh Pallipadi } 1088d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 108902df8b93SVenkatesh Pallipadi } 109002df8b93SVenkatesh Pallipadi 10911da177e4SLinus Torvalds /* 10921da177e4SLinus Torvalds * Otherwise we've met all of our C3 requirements. 10931da177e4SLinus Torvalds * Normalize the C3 latency to expidite policy. Enable 10941da177e4SLinus Torvalds * checking of bus mastering status (bm_check) so we can 10951da177e4SLinus Torvalds * use this in our C3 policy 10961da177e4SLinus Torvalds */ 10971da177e4SLinus Torvalds cx->valid = 1; 10984f86d3a8SLen Brown 10994f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 11001da177e4SLinus Torvalds cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 11014f86d3a8SLen Brown #else 11024f86d3a8SLen Brown cx->latency_ticks = cx->latency; 11034f86d3a8SLen Brown #endif 11041da177e4SLinus Torvalds 1105d550d98dSPatrick Mochel return; 11061da177e4SLinus Torvalds } 11071da177e4SLinus Torvalds 11081da177e4SLinus Torvalds static int acpi_processor_power_verify(struct acpi_processor *pr) 11091da177e4SLinus Torvalds { 11101da177e4SLinus Torvalds unsigned int i; 11111da177e4SLinus Torvalds unsigned int working = 0; 11126eb0a0fdSVenkatesh Pallipadi 1113169a0abbSThomas Gleixner pr->power.timer_broadcast_on_state = INT_MAX; 11146eb0a0fdSVenkatesh Pallipadi 11151da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 11161da177e4SLinus Torvalds struct acpi_processor_cx *cx = &pr->power.states[i]; 11171da177e4SLinus Torvalds 11181da177e4SLinus Torvalds switch (cx->type) { 11191da177e4SLinus Torvalds case ACPI_STATE_C1: 11201da177e4SLinus Torvalds cx->valid = 1; 11211da177e4SLinus Torvalds break; 11221da177e4SLinus Torvalds 11231da177e4SLinus Torvalds case ACPI_STATE_C2: 11241da177e4SLinus Torvalds acpi_processor_power_verify_c2(cx); 1125296d93cdSLinus Torvalds if (cx->valid) 1126169a0abbSThomas Gleixner acpi_timer_check_state(i, pr, cx); 11271da177e4SLinus Torvalds break; 11281da177e4SLinus Torvalds 11291da177e4SLinus Torvalds case ACPI_STATE_C3: 11301da177e4SLinus Torvalds acpi_processor_power_verify_c3(pr, cx); 1131296d93cdSLinus Torvalds if (cx->valid) 1132169a0abbSThomas Gleixner acpi_timer_check_state(i, pr, cx); 11331da177e4SLinus Torvalds break; 11341da177e4SLinus Torvalds } 11351da177e4SLinus Torvalds 11361da177e4SLinus Torvalds if (cx->valid) 11371da177e4SLinus Torvalds working++; 11381da177e4SLinus Torvalds } 11391da177e4SLinus Torvalds 1140169a0abbSThomas Gleixner acpi_propagate_timer_broadcast(pr); 1141bd663347SAndi Kleen 11421da177e4SLinus Torvalds return (working); 11431da177e4SLinus Torvalds } 11441da177e4SLinus Torvalds 11454be44fcdSLen Brown static int acpi_processor_get_power_info(struct acpi_processor *pr) 11461da177e4SLinus Torvalds { 11471da177e4SLinus Torvalds unsigned int i; 11481da177e4SLinus Torvalds int result; 11491da177e4SLinus Torvalds 11501da177e4SLinus Torvalds 11511da177e4SLinus Torvalds /* NOTE: the idle thread may not be running while calling 11521da177e4SLinus Torvalds * this function */ 11531da177e4SLinus Torvalds 1154991528d7SVenkatesh Pallipadi /* Zero initialize all the C-states info. */ 1155991528d7SVenkatesh Pallipadi memset(pr->power.states, 0, sizeof(pr->power.states)); 1156991528d7SVenkatesh Pallipadi 11571da177e4SLinus Torvalds result = acpi_processor_get_power_info_cst(pr); 11586d93c648SVenkatesh Pallipadi if (result == -ENODEV) 1159c5a114f1SDarrick J. Wong result = acpi_processor_get_power_info_fadt(pr); 11606d93c648SVenkatesh Pallipadi 1161991528d7SVenkatesh Pallipadi if (result) 1162991528d7SVenkatesh Pallipadi return result; 1163991528d7SVenkatesh Pallipadi 1164991528d7SVenkatesh Pallipadi acpi_processor_get_power_info_default(pr); 1165991528d7SVenkatesh Pallipadi 1166cf824788SJanosch Machowinski pr->power.count = acpi_processor_power_verify(pr); 11671da177e4SLinus Torvalds 11684f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 11691da177e4SLinus Torvalds /* 11701da177e4SLinus Torvalds * Set Default Policy 11711da177e4SLinus Torvalds * ------------------ 11721da177e4SLinus Torvalds * Now that we know which states are supported, set the default 11731da177e4SLinus Torvalds * policy. Note that this policy can be changed dynamically 11741da177e4SLinus Torvalds * (e.g. encourage deeper sleeps to conserve battery life when 11751da177e4SLinus Torvalds * not on AC). 11761da177e4SLinus Torvalds */ 11771da177e4SLinus Torvalds result = acpi_processor_set_power_policy(pr); 11781da177e4SLinus Torvalds if (result) 1179d550d98dSPatrick Mochel return result; 11804f86d3a8SLen Brown #endif 11811da177e4SLinus Torvalds 11821da177e4SLinus Torvalds /* 11831da177e4SLinus Torvalds * if one state of type C2 or C3 is available, mark this 11841da177e4SLinus Torvalds * CPU as being "idle manageable" 11851da177e4SLinus Torvalds */ 11861da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 1187acf05f4bSVenkatesh Pallipadi if (pr->power.states[i].valid) { 11881da177e4SLinus Torvalds pr->power.count = i; 11892203d6edSLinus Torvalds if (pr->power.states[i].type >= ACPI_STATE_C2) 11901da177e4SLinus Torvalds pr->flags.power = 1; 11911da177e4SLinus Torvalds } 1192acf05f4bSVenkatesh Pallipadi } 11931da177e4SLinus Torvalds 1194d550d98dSPatrick Mochel return 0; 11951da177e4SLinus Torvalds } 11961da177e4SLinus Torvalds 11971da177e4SLinus Torvalds static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) 11981da177e4SLinus Torvalds { 119950dd0969SJan Engelhardt struct acpi_processor *pr = seq->private; 12001da177e4SLinus Torvalds unsigned int i; 12011da177e4SLinus Torvalds 12021da177e4SLinus Torvalds 12031da177e4SLinus Torvalds if (!pr) 12041da177e4SLinus Torvalds goto end; 12051da177e4SLinus Torvalds 12061da177e4SLinus Torvalds seq_printf(seq, "active state: C%zd\n" 12071da177e4SLinus Torvalds "max_cstate: C%d\n" 12085c87579eSArjan van de Ven "bus master activity: %08x\n" 12095c87579eSArjan van de Ven "maximum allowed latency: %d usec\n", 12101da177e4SLinus Torvalds pr->power.state ? pr->power.state - pr->power.states : 0, 12115c87579eSArjan van de Ven max_cstate, (unsigned)pr->power.bm_activity, 1212f011e2e2SMark Gross pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)); 12131da177e4SLinus Torvalds 12141da177e4SLinus Torvalds seq_puts(seq, "states:\n"); 12151da177e4SLinus Torvalds 12161da177e4SLinus Torvalds for (i = 1; i <= pr->power.count; i++) { 12171da177e4SLinus Torvalds seq_printf(seq, " %cC%d: ", 12184be44fcdSLen Brown (&pr->power.states[i] == 12194be44fcdSLen Brown pr->power.state ? '*' : ' '), i); 12201da177e4SLinus Torvalds 12211da177e4SLinus Torvalds if (!pr->power.states[i].valid) { 12221da177e4SLinus Torvalds seq_puts(seq, "<not supported>\n"); 12231da177e4SLinus Torvalds continue; 12241da177e4SLinus Torvalds } 12251da177e4SLinus Torvalds 12261da177e4SLinus Torvalds switch (pr->power.states[i].type) { 12271da177e4SLinus Torvalds case ACPI_STATE_C1: 12281da177e4SLinus Torvalds seq_printf(seq, "type[C1] "); 12291da177e4SLinus Torvalds break; 12301da177e4SLinus Torvalds case ACPI_STATE_C2: 12311da177e4SLinus Torvalds seq_printf(seq, "type[C2] "); 12321da177e4SLinus Torvalds break; 12331da177e4SLinus Torvalds case ACPI_STATE_C3: 12341da177e4SLinus Torvalds seq_printf(seq, "type[C3] "); 12351da177e4SLinus Torvalds break; 12361da177e4SLinus Torvalds default: 12371da177e4SLinus Torvalds seq_printf(seq, "type[--] "); 12381da177e4SLinus Torvalds break; 12391da177e4SLinus Torvalds } 12401da177e4SLinus Torvalds 12411da177e4SLinus Torvalds if (pr->power.states[i].promotion.state) 12421da177e4SLinus Torvalds seq_printf(seq, "promotion[C%zd] ", 12431da177e4SLinus Torvalds (pr->power.states[i].promotion.state - 12441da177e4SLinus Torvalds pr->power.states)); 12451da177e4SLinus Torvalds else 12461da177e4SLinus Torvalds seq_puts(seq, "promotion[--] "); 12471da177e4SLinus Torvalds 12481da177e4SLinus Torvalds if (pr->power.states[i].demotion.state) 12491da177e4SLinus Torvalds seq_printf(seq, "demotion[C%zd] ", 12501da177e4SLinus Torvalds (pr->power.states[i].demotion.state - 12511da177e4SLinus Torvalds pr->power.states)); 12521da177e4SLinus Torvalds else 12531da177e4SLinus Torvalds seq_puts(seq, "demotion[--] "); 12541da177e4SLinus Torvalds 1255a3c6598fSDominik Brodowski seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 12561da177e4SLinus Torvalds pr->power.states[i].latency, 1257a3c6598fSDominik Brodowski pr->power.states[i].usage, 1258b0b7eaafSAlexey Starikovskiy (unsigned long long)pr->power.states[i].time); 12591da177e4SLinus Torvalds } 12601da177e4SLinus Torvalds 12611da177e4SLinus Torvalds end: 1262d550d98dSPatrick Mochel return 0; 12631da177e4SLinus Torvalds } 12641da177e4SLinus Torvalds 12651da177e4SLinus Torvalds static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) 12661da177e4SLinus Torvalds { 12671da177e4SLinus Torvalds return single_open(file, acpi_processor_power_seq_show, 12681da177e4SLinus Torvalds PDE(inode)->data); 12691da177e4SLinus Torvalds } 12701da177e4SLinus Torvalds 1271d7508032SArjan van de Ven static const struct file_operations acpi_processor_power_fops = { 12721da177e4SLinus Torvalds .open = acpi_processor_power_open_fs, 12731da177e4SLinus Torvalds .read = seq_read, 12741da177e4SLinus Torvalds .llseek = seq_lseek, 12751da177e4SLinus Torvalds .release = single_release, 12761da177e4SLinus Torvalds }; 12771da177e4SLinus Torvalds 12784f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 12794f86d3a8SLen Brown 12804f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr) 12814f86d3a8SLen Brown { 12824f86d3a8SLen Brown int result = 0; 12834f86d3a8SLen Brown 12844f86d3a8SLen Brown 12854f86d3a8SLen Brown if (!pr) 12864f86d3a8SLen Brown return -EINVAL; 12874f86d3a8SLen Brown 12884f86d3a8SLen Brown if (nocst) { 12894f86d3a8SLen Brown return -ENODEV; 12904f86d3a8SLen Brown } 12914f86d3a8SLen Brown 12924f86d3a8SLen Brown if (!pr->flags.power_setup_done) 12934f86d3a8SLen Brown return -ENODEV; 12944f86d3a8SLen Brown 12954f86d3a8SLen Brown /* Fall back to the default idle loop */ 12964f86d3a8SLen Brown pm_idle = pm_idle_save; 12974f86d3a8SLen Brown synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ 12984f86d3a8SLen Brown 12994f86d3a8SLen Brown pr->flags.power = 0; 13004f86d3a8SLen Brown result = acpi_processor_get_power_info(pr); 13014f86d3a8SLen Brown if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) 13024f86d3a8SLen Brown pm_idle = acpi_processor_idle; 13034f86d3a8SLen Brown 13044f86d3a8SLen Brown return result; 13054f86d3a8SLen Brown } 13064f86d3a8SLen Brown 13071fec74a9SAndrew Morton #ifdef CONFIG_SMP 13085c87579eSArjan van de Ven static void smp_callback(void *v) 13095c87579eSArjan van de Ven { 13105c87579eSArjan van de Ven /* we already woke the CPU up, nothing more to do */ 13115c87579eSArjan van de Ven } 13125c87579eSArjan van de Ven 13135c87579eSArjan van de Ven /* 13145c87579eSArjan van de Ven * This function gets called when a part of the kernel has a new latency 13155c87579eSArjan van de Ven * requirement. This means we need to get all processors out of their C-state, 13165c87579eSArjan van de Ven * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 13175c87579eSArjan van de Ven * wakes them all right up. 13185c87579eSArjan van de Ven */ 13195c87579eSArjan van de Ven static int acpi_processor_latency_notify(struct notifier_block *b, 13205c87579eSArjan van de Ven unsigned long l, void *v) 13215c87579eSArjan van de Ven { 13225c87579eSArjan van de Ven smp_call_function(smp_callback, NULL, 0, 1); 13235c87579eSArjan van de Ven return NOTIFY_OK; 13245c87579eSArjan van de Ven } 13255c87579eSArjan van de Ven 13265c87579eSArjan van de Ven static struct notifier_block acpi_processor_latency_notifier = { 13275c87579eSArjan van de Ven .notifier_call = acpi_processor_latency_notify, 13285c87579eSArjan van de Ven }; 13294f86d3a8SLen Brown 13301fec74a9SAndrew Morton #endif 13315c87579eSArjan van de Ven 13324f86d3a8SLen Brown #else /* CONFIG_CPU_IDLE */ 13334f86d3a8SLen Brown 13344f86d3a8SLen Brown /** 13354f86d3a8SLen Brown * acpi_idle_bm_check - checks if bus master activity was detected 13364f86d3a8SLen Brown */ 13374f86d3a8SLen Brown static int acpi_idle_bm_check(void) 13384f86d3a8SLen Brown { 13394f86d3a8SLen Brown u32 bm_status = 0; 13404f86d3a8SLen Brown 13414f86d3a8SLen Brown acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 13424f86d3a8SLen Brown if (bm_status) 13434f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 13444f86d3a8SLen Brown /* 13454f86d3a8SLen Brown * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 13464f86d3a8SLen Brown * the true state of bus mastering activity; forcing us to 13474f86d3a8SLen Brown * manually check the BMIDEA bit of each IDE channel. 13484f86d3a8SLen Brown */ 13494f86d3a8SLen Brown else if (errata.piix4.bmisx) { 13504f86d3a8SLen Brown if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 13514f86d3a8SLen Brown || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 13524f86d3a8SLen Brown bm_status = 1; 13534f86d3a8SLen Brown } 13544f86d3a8SLen Brown return bm_status; 13554f86d3a8SLen Brown } 13564f86d3a8SLen Brown 13574f86d3a8SLen Brown /** 13584f86d3a8SLen Brown * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state 13594f86d3a8SLen Brown * @pr: the processor 13604f86d3a8SLen Brown * @target: the new target state 13614f86d3a8SLen Brown */ 13624f86d3a8SLen Brown static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, 13634f86d3a8SLen Brown struct acpi_processor_cx *target) 13644f86d3a8SLen Brown { 13654f86d3a8SLen Brown if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { 13664f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 13674f86d3a8SLen Brown pr->flags.bm_rld_set = 0; 13684f86d3a8SLen Brown } 13694f86d3a8SLen Brown 13704f86d3a8SLen Brown if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { 13714f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 13724f86d3a8SLen Brown pr->flags.bm_rld_set = 1; 13734f86d3a8SLen Brown } 13744f86d3a8SLen Brown } 13754f86d3a8SLen Brown 13764f86d3a8SLen Brown /** 13774f86d3a8SLen Brown * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 13784f86d3a8SLen Brown * @cx: cstate data 1379bc71bec9Svenkatesh.pallipadi@intel.com * 1380bc71bec9Svenkatesh.pallipadi@intel.com * Caller disables interrupt before call and enables interrupt after return. 13814f86d3a8SLen Brown */ 13824f86d3a8SLen Brown static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 13834f86d3a8SLen Brown { 1384bc71bec9Svenkatesh.pallipadi@intel.com if (cx->entry_method == ACPI_CSTATE_FFH) { 13854f86d3a8SLen Brown /* Call into architectural FFH based C-state */ 13864f86d3a8SLen Brown acpi_processor_ffh_cstate_enter(cx); 1387bc71bec9Svenkatesh.pallipadi@intel.com } else if (cx->entry_method == ACPI_CSTATE_HALT) { 1388bc71bec9Svenkatesh.pallipadi@intel.com acpi_safe_halt(); 13894f86d3a8SLen Brown } else { 13904f86d3a8SLen Brown int unused; 13914f86d3a8SLen Brown /* IO port based C-state */ 13924f86d3a8SLen Brown inb(cx->address); 13934f86d3a8SLen Brown /* Dummy wait op - must do something useless after P_LVL2 read 13944f86d3a8SLen Brown because chipsets cannot guarantee that STPCLK# signal 13954f86d3a8SLen Brown gets asserted in time to freeze execution properly. */ 13964f86d3a8SLen Brown unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 13974f86d3a8SLen Brown } 13984f86d3a8SLen Brown } 13994f86d3a8SLen Brown 14004f86d3a8SLen Brown /** 14014f86d3a8SLen Brown * acpi_idle_enter_c1 - enters an ACPI C1 state-type 14024f86d3a8SLen Brown * @dev: the target CPU 14034f86d3a8SLen Brown * @state: the state data 14044f86d3a8SLen Brown * 14054f86d3a8SLen Brown * This is equivalent to the HALT instruction. 14064f86d3a8SLen Brown */ 14074f86d3a8SLen Brown static int acpi_idle_enter_c1(struct cpuidle_device *dev, 14084f86d3a8SLen Brown struct cpuidle_state *state) 14094f86d3a8SLen Brown { 14109b12e18cSvenkatesh.pallipadi@intel.com u32 t1, t2; 14114f86d3a8SLen Brown struct acpi_processor *pr; 14124f86d3a8SLen Brown struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 14139b12e18cSvenkatesh.pallipadi@intel.com 14144f86d3a8SLen Brown pr = processors[smp_processor_id()]; 14154f86d3a8SLen Brown 14164f86d3a8SLen Brown if (unlikely(!pr)) 14174f86d3a8SLen Brown return 0; 14184f86d3a8SLen Brown 14192e906655Svenkatesh.pallipadi@intel.com local_irq_disable(); 14204f86d3a8SLen Brown if (pr->flags.bm_check) 14214f86d3a8SLen Brown acpi_idle_update_bm_rld(pr, cx); 14224f86d3a8SLen Brown 14239b12e18cSvenkatesh.pallipadi@intel.com t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1424bc71bec9Svenkatesh.pallipadi@intel.com acpi_idle_do_entry(cx); 14259b12e18cSvenkatesh.pallipadi@intel.com t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 14264f86d3a8SLen Brown 14272e906655Svenkatesh.pallipadi@intel.com local_irq_enable(); 14284f86d3a8SLen Brown cx->usage++; 14294f86d3a8SLen Brown 14309b12e18cSvenkatesh.pallipadi@intel.com return ticks_elapsed_in_us(t1, t2); 14314f86d3a8SLen Brown } 14324f86d3a8SLen Brown 14334f86d3a8SLen Brown /** 14344f86d3a8SLen Brown * acpi_idle_enter_simple - enters an ACPI state without BM handling 14354f86d3a8SLen Brown * @dev: the target CPU 14364f86d3a8SLen Brown * @state: the state data 14374f86d3a8SLen Brown */ 14384f86d3a8SLen Brown static int acpi_idle_enter_simple(struct cpuidle_device *dev, 14394f86d3a8SLen Brown struct cpuidle_state *state) 14404f86d3a8SLen Brown { 14414f86d3a8SLen Brown struct acpi_processor *pr; 14424f86d3a8SLen Brown struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 14434f86d3a8SLen Brown u32 t1, t2; 144450629118SVenkatesh Pallipadi int sleep_ticks = 0; 144550629118SVenkatesh Pallipadi 14464f86d3a8SLen Brown pr = processors[smp_processor_id()]; 14474f86d3a8SLen Brown 14484f86d3a8SLen Brown if (unlikely(!pr)) 14494f86d3a8SLen Brown return 0; 14504f86d3a8SLen Brown 1451e196441bSLen Brown if (acpi_idle_suspend) 1452e196441bSLen Brown return(acpi_idle_enter_c1(dev, state)); 1453e196441bSLen Brown 14544f86d3a8SLen Brown local_irq_disable(); 14554f86d3a8SLen Brown current_thread_info()->status &= ~TS_POLLING; 14564f86d3a8SLen Brown /* 14574f86d3a8SLen Brown * TS_POLLING-cleared state must be visible before we test 14584f86d3a8SLen Brown * NEED_RESCHED: 14594f86d3a8SLen Brown */ 14604f86d3a8SLen Brown smp_mb(); 14614f86d3a8SLen Brown 14624f86d3a8SLen Brown if (unlikely(need_resched())) { 14634f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 14644f86d3a8SLen Brown local_irq_enable(); 14654f86d3a8SLen Brown return 0; 14664f86d3a8SLen Brown } 14674f86d3a8SLen Brown 1468bde6f5f5SVenki Pallipadi acpi_unlazy_tlb(smp_processor_id()); 1469e17bcb43SThomas Gleixner /* 1470e17bcb43SThomas Gleixner * Must be done before busmaster disable as we might need to 1471e17bcb43SThomas Gleixner * access HPET ! 1472e17bcb43SThomas Gleixner */ 1473e17bcb43SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 1); 1474e17bcb43SThomas Gleixner 1475e17bcb43SThomas Gleixner if (pr->flags.bm_check) 1476e17bcb43SThomas Gleixner acpi_idle_update_bm_rld(pr, cx); 1477e17bcb43SThomas Gleixner 14784f86d3a8SLen Brown if (cx->type == ACPI_STATE_C3) 14794f86d3a8SLen Brown ACPI_FLUSH_CPU_CACHE(); 14804f86d3a8SLen Brown 14814f86d3a8SLen Brown t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 148250629118SVenkatesh Pallipadi /* Tell the scheduler that we are going deep-idle: */ 148350629118SVenkatesh Pallipadi sched_clock_idle_sleep_event(); 14844f86d3a8SLen Brown acpi_idle_do_entry(cx); 14854f86d3a8SLen Brown t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 14864f86d3a8SLen Brown 14874f86d3a8SLen Brown #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 14884f86d3a8SLen Brown /* TSC could halt in idle, so notify users */ 1489ddb25f9aSAndi Kleen if (tsc_halts_in_c(cx->type)) 14904f86d3a8SLen Brown mark_tsc_unstable("TSC halts in idle");; 14914f86d3a8SLen Brown #endif 149250629118SVenkatesh Pallipadi sleep_ticks = ticks_elapsed(t1, t2); 149350629118SVenkatesh Pallipadi 149450629118SVenkatesh Pallipadi /* Tell the scheduler how much we idled: */ 149550629118SVenkatesh Pallipadi sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 14964f86d3a8SLen Brown 14974f86d3a8SLen Brown local_irq_enable(); 14984f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 14994f86d3a8SLen Brown 15004f86d3a8SLen Brown cx->usage++; 15014f86d3a8SLen Brown 15024f86d3a8SLen Brown acpi_state_timer_broadcast(pr, cx, 0); 150350629118SVenkatesh Pallipadi cx->time += sleep_ticks; 15044f86d3a8SLen Brown return ticks_elapsed_in_us(t1, t2); 15054f86d3a8SLen Brown } 15064f86d3a8SLen Brown 15074f86d3a8SLen Brown static int c3_cpu_count; 15084f86d3a8SLen Brown static DEFINE_SPINLOCK(c3_lock); 15094f86d3a8SLen Brown 15104f86d3a8SLen Brown /** 15114f86d3a8SLen Brown * acpi_idle_enter_bm - enters C3 with proper BM handling 15124f86d3a8SLen Brown * @dev: the target CPU 15134f86d3a8SLen Brown * @state: the state data 15144f86d3a8SLen Brown * 15154f86d3a8SLen Brown * If BM is detected, the deepest non-C3 idle state is entered instead. 15164f86d3a8SLen Brown */ 15174f86d3a8SLen Brown static int acpi_idle_enter_bm(struct cpuidle_device *dev, 15184f86d3a8SLen Brown struct cpuidle_state *state) 15194f86d3a8SLen Brown { 15204f86d3a8SLen Brown struct acpi_processor *pr; 15214f86d3a8SLen Brown struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 15224f86d3a8SLen Brown u32 t1, t2; 152350629118SVenkatesh Pallipadi int sleep_ticks = 0; 152450629118SVenkatesh Pallipadi 15254f86d3a8SLen Brown pr = processors[smp_processor_id()]; 15264f86d3a8SLen Brown 15274f86d3a8SLen Brown if (unlikely(!pr)) 15284f86d3a8SLen Brown return 0; 15294f86d3a8SLen Brown 1530e196441bSLen Brown if (acpi_idle_suspend) 1531e196441bSLen Brown return(acpi_idle_enter_c1(dev, state)); 1532e196441bSLen Brown 1533ddc081a1SVenkatesh Pallipadi if (acpi_idle_bm_check()) { 1534ddc081a1SVenkatesh Pallipadi if (dev->safe_state) { 1535ddc081a1SVenkatesh Pallipadi return dev->safe_state->enter(dev, dev->safe_state); 1536ddc081a1SVenkatesh Pallipadi } else { 15372e906655Svenkatesh.pallipadi@intel.com local_irq_disable(); 1538ddc081a1SVenkatesh Pallipadi acpi_safe_halt(); 15392e906655Svenkatesh.pallipadi@intel.com local_irq_enable(); 1540ddc081a1SVenkatesh Pallipadi return 0; 1541ddc081a1SVenkatesh Pallipadi } 1542ddc081a1SVenkatesh Pallipadi } 1543ddc081a1SVenkatesh Pallipadi 15444f86d3a8SLen Brown local_irq_disable(); 15454f86d3a8SLen Brown current_thread_info()->status &= ~TS_POLLING; 15464f86d3a8SLen Brown /* 15474f86d3a8SLen Brown * TS_POLLING-cleared state must be visible before we test 15484f86d3a8SLen Brown * NEED_RESCHED: 15494f86d3a8SLen Brown */ 15504f86d3a8SLen Brown smp_mb(); 15514f86d3a8SLen Brown 15524f86d3a8SLen Brown if (unlikely(need_resched())) { 15534f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 15544f86d3a8SLen Brown local_irq_enable(); 15554f86d3a8SLen Brown return 0; 15564f86d3a8SLen Brown } 15574f86d3a8SLen Brown 155850629118SVenkatesh Pallipadi /* Tell the scheduler that we are going deep-idle: */ 155950629118SVenkatesh Pallipadi sched_clock_idle_sleep_event(); 15604f86d3a8SLen Brown /* 15614f86d3a8SLen Brown * Must be done before busmaster disable as we might need to 15624f86d3a8SLen Brown * access HPET ! 15634f86d3a8SLen Brown */ 15644f86d3a8SLen Brown acpi_state_timer_broadcast(pr, cx, 1); 15654f86d3a8SLen Brown 15664f86d3a8SLen Brown acpi_idle_update_bm_rld(pr, cx); 15674f86d3a8SLen Brown 1568c9c860e5SVenkatesh Pallipadi /* 1569c9c860e5SVenkatesh Pallipadi * disable bus master 1570c9c860e5SVenkatesh Pallipadi * bm_check implies we need ARB_DIS 1571c9c860e5SVenkatesh Pallipadi * !bm_check implies we need cache flush 1572c9c860e5SVenkatesh Pallipadi * bm_control implies whether we can do ARB_DIS 1573c9c860e5SVenkatesh Pallipadi * 1574c9c860e5SVenkatesh Pallipadi * That leaves a case where bm_check is set and bm_control is 1575c9c860e5SVenkatesh Pallipadi * not set. In that case we cannot do much, we enter C3 1576c9c860e5SVenkatesh Pallipadi * without doing anything. 1577c9c860e5SVenkatesh Pallipadi */ 1578c9c860e5SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 15794f86d3a8SLen Brown spin_lock(&c3_lock); 15804f86d3a8SLen Brown c3_cpu_count++; 15814f86d3a8SLen Brown /* Disable bus master arbitration when all CPUs are in C3 */ 15824f86d3a8SLen Brown if (c3_cpu_count == num_online_cpus()) 15834f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 15844f86d3a8SLen Brown spin_unlock(&c3_lock); 1585c9c860e5SVenkatesh Pallipadi } else if (!pr->flags.bm_check) { 1586c9c860e5SVenkatesh Pallipadi ACPI_FLUSH_CPU_CACHE(); 1587c9c860e5SVenkatesh Pallipadi } 15884f86d3a8SLen Brown 15894f86d3a8SLen Brown t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 15904f86d3a8SLen Brown acpi_idle_do_entry(cx); 15914f86d3a8SLen Brown t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 15924f86d3a8SLen Brown 15934f86d3a8SLen Brown /* Re-enable bus master arbitration */ 1594c9c860e5SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 1595c9c860e5SVenkatesh Pallipadi spin_lock(&c3_lock); 15964f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); 15974f86d3a8SLen Brown c3_cpu_count--; 15984f86d3a8SLen Brown spin_unlock(&c3_lock); 15994f86d3a8SLen Brown } 16004f86d3a8SLen Brown 16014f86d3a8SLen Brown #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 16024f86d3a8SLen Brown /* TSC could halt in idle, so notify users */ 1603ddb25f9aSAndi Kleen if (tsc_halts_in_c(ACPI_STATE_C3)) 16044f86d3a8SLen Brown mark_tsc_unstable("TSC halts in idle"); 16054f86d3a8SLen Brown #endif 160650629118SVenkatesh Pallipadi sleep_ticks = ticks_elapsed(t1, t2); 160750629118SVenkatesh Pallipadi /* Tell the scheduler how much we idled: */ 160850629118SVenkatesh Pallipadi sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 16094f86d3a8SLen Brown 16104f86d3a8SLen Brown local_irq_enable(); 16114f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 16124f86d3a8SLen Brown 16134f86d3a8SLen Brown cx->usage++; 16144f86d3a8SLen Brown 16154f86d3a8SLen Brown acpi_state_timer_broadcast(pr, cx, 0); 161650629118SVenkatesh Pallipadi cx->time += sleep_ticks; 16174f86d3a8SLen Brown return ticks_elapsed_in_us(t1, t2); 16184f86d3a8SLen Brown } 16194f86d3a8SLen Brown 16204f86d3a8SLen Brown struct cpuidle_driver acpi_idle_driver = { 16214f86d3a8SLen Brown .name = "acpi_idle", 16224f86d3a8SLen Brown .owner = THIS_MODULE, 16234f86d3a8SLen Brown }; 16244f86d3a8SLen Brown 16254f86d3a8SLen Brown /** 16264f86d3a8SLen Brown * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE 16274f86d3a8SLen Brown * @pr: the ACPI processor 16284f86d3a8SLen Brown */ 16294f86d3a8SLen Brown static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) 16304f86d3a8SLen Brown { 16314f86d3a8SLen Brown int i, count = 0; 16324f86d3a8SLen Brown struct acpi_processor_cx *cx; 16334f86d3a8SLen Brown struct cpuidle_state *state; 16344f86d3a8SLen Brown struct cpuidle_device *dev = &pr->power.dev; 16354f86d3a8SLen Brown 16364f86d3a8SLen Brown if (!pr->flags.power_setup_done) 16374f86d3a8SLen Brown return -EINVAL; 16384f86d3a8SLen Brown 16394f86d3a8SLen Brown if (pr->flags.power == 0) { 16404f86d3a8SLen Brown return -EINVAL; 16414f86d3a8SLen Brown } 16424f86d3a8SLen Brown 16434f86d3a8SLen Brown for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 16444f86d3a8SLen Brown cx = &pr->power.states[i]; 16454f86d3a8SLen Brown state = &dev->states[count]; 16464f86d3a8SLen Brown 16474f86d3a8SLen Brown if (!cx->valid) 16484f86d3a8SLen Brown continue; 16494f86d3a8SLen Brown 16504f86d3a8SLen Brown #ifdef CONFIG_HOTPLUG_CPU 16514f86d3a8SLen Brown if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 16524f86d3a8SLen Brown !pr->flags.has_cst && 16534f86d3a8SLen Brown !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 16544f86d3a8SLen Brown continue; 16554f86d3a8SLen Brown #endif 16564f86d3a8SLen Brown cpuidle_set_statedata(state, cx); 16574f86d3a8SLen Brown 16584f86d3a8SLen Brown snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 16594f86d3a8SLen Brown state->exit_latency = cx->latency; 16604f86d3a8SLen Brown state->target_residency = cx->latency * 6; 16614f86d3a8SLen Brown state->power_usage = cx->power; 16624f86d3a8SLen Brown 16634f86d3a8SLen Brown state->flags = 0; 16644f86d3a8SLen Brown switch (cx->type) { 16654f86d3a8SLen Brown case ACPI_STATE_C1: 16664f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_SHALLOW; 16679b12e18cSvenkatesh.pallipadi@intel.com state->flags |= CPUIDLE_FLAG_TIME_VALID; 16684f86d3a8SLen Brown state->enter = acpi_idle_enter_c1; 1669ddc081a1SVenkatesh Pallipadi dev->safe_state = state; 16704f86d3a8SLen Brown break; 16714f86d3a8SLen Brown 16724f86d3a8SLen Brown case ACPI_STATE_C2: 16734f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_BALANCED; 16744f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_TIME_VALID; 16754f86d3a8SLen Brown state->enter = acpi_idle_enter_simple; 1676ddc081a1SVenkatesh Pallipadi dev->safe_state = state; 16774f86d3a8SLen Brown break; 16784f86d3a8SLen Brown 16794f86d3a8SLen Brown case ACPI_STATE_C3: 16804f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_DEEP; 16814f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_TIME_VALID; 16824f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_CHECK_BM; 16834f86d3a8SLen Brown state->enter = pr->flags.bm_check ? 16844f86d3a8SLen Brown acpi_idle_enter_bm : 16854f86d3a8SLen Brown acpi_idle_enter_simple; 16864f86d3a8SLen Brown break; 16874f86d3a8SLen Brown } 16884f86d3a8SLen Brown 16894f86d3a8SLen Brown count++; 16904f86d3a8SLen Brown } 16914f86d3a8SLen Brown 16924f86d3a8SLen Brown dev->state_count = count; 16934f86d3a8SLen Brown 16944f86d3a8SLen Brown if (!count) 16954f86d3a8SLen Brown return -EINVAL; 16964f86d3a8SLen Brown 16974f86d3a8SLen Brown return 0; 16984f86d3a8SLen Brown } 16994f86d3a8SLen Brown 17004f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr) 17014f86d3a8SLen Brown { 17024f86d3a8SLen Brown int ret; 17034f86d3a8SLen Brown 17044f86d3a8SLen Brown if (!pr) 17054f86d3a8SLen Brown return -EINVAL; 17064f86d3a8SLen Brown 17074f86d3a8SLen Brown if (nocst) { 17084f86d3a8SLen Brown return -ENODEV; 17094f86d3a8SLen Brown } 17104f86d3a8SLen Brown 17114f86d3a8SLen Brown if (!pr->flags.power_setup_done) 17124f86d3a8SLen Brown return -ENODEV; 17134f86d3a8SLen Brown 17144f86d3a8SLen Brown cpuidle_pause_and_lock(); 17154f86d3a8SLen Brown cpuidle_disable_device(&pr->power.dev); 17164f86d3a8SLen Brown acpi_processor_get_power_info(pr); 17174f86d3a8SLen Brown acpi_processor_setup_cpuidle(pr); 17184f86d3a8SLen Brown ret = cpuidle_enable_device(&pr->power.dev); 17194f86d3a8SLen Brown cpuidle_resume_and_unlock(); 17204f86d3a8SLen Brown 17214f86d3a8SLen Brown return ret; 17224f86d3a8SLen Brown } 17234f86d3a8SLen Brown 17244f86d3a8SLen Brown #endif /* CONFIG_CPU_IDLE */ 17254f86d3a8SLen Brown 17267af8b660SPierre Ossman int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 17274be44fcdSLen Brown struct acpi_device *device) 17281da177e4SLinus Torvalds { 17291da177e4SLinus Torvalds acpi_status status = 0; 1730b6835052SAndreas Mohr static int first_run; 17311da177e4SLinus Torvalds struct proc_dir_entry *entry = NULL; 17321da177e4SLinus Torvalds unsigned int i; 17331da177e4SLinus Torvalds 17341da177e4SLinus Torvalds 17351da177e4SLinus Torvalds if (!first_run) { 17361da177e4SLinus Torvalds dmi_check_system(processor_power_dmi_table); 1737c1c30634SAlexey Starikovskiy max_cstate = acpi_processor_cstate_check(max_cstate); 17381da177e4SLinus Torvalds if (max_cstate < ACPI_C_STATES_MAX) 17394be44fcdSLen Brown printk(KERN_NOTICE 17404be44fcdSLen Brown "ACPI: processor limited to max C-state %d\n", 17414be44fcdSLen Brown max_cstate); 17421da177e4SLinus Torvalds first_run++; 17434f86d3a8SLen Brown #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP) 1744f011e2e2SMark Gross pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, 1745f011e2e2SMark Gross &acpi_processor_latency_notifier); 17461fec74a9SAndrew Morton #endif 17471da177e4SLinus Torvalds } 17481da177e4SLinus Torvalds 174902df8b93SVenkatesh Pallipadi if (!pr) 1750d550d98dSPatrick Mochel return -EINVAL; 175102df8b93SVenkatesh Pallipadi 1752cee324b1SAlexey Starikovskiy if (acpi_gbl_FADT.cst_control && !nocst) { 17534be44fcdSLen Brown status = 1754cee324b1SAlexey Starikovskiy acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); 17551da177e4SLinus Torvalds if (ACPI_FAILURE(status)) { 1756a6fc6720SThomas Renninger ACPI_EXCEPTION((AE_INFO, status, 1757a6fc6720SThomas Renninger "Notifying BIOS of _CST ability failed")); 17581da177e4SLinus Torvalds } 17591da177e4SLinus Torvalds } 17601da177e4SLinus Torvalds 17611da177e4SLinus Torvalds acpi_processor_get_power_info(pr); 17624f86d3a8SLen Brown pr->flags.power_setup_done = 1; 17631da177e4SLinus Torvalds 17641da177e4SLinus Torvalds /* 17651da177e4SLinus Torvalds * Install the idle handler if processor power management is supported. 17661da177e4SLinus Torvalds * Note that we use previously set idle handler will be used on 17671da177e4SLinus Torvalds * platforms that only support C1. 17681da177e4SLinus Torvalds */ 17691da177e4SLinus Torvalds if ((pr->flags.power) && (!boot_option_idle_override)) { 17704f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE 17714f86d3a8SLen Brown acpi_processor_setup_cpuidle(pr); 17724f86d3a8SLen Brown pr->power.dev.cpu = pr->id; 17734f86d3a8SLen Brown if (cpuidle_register_device(&pr->power.dev)) 17744f86d3a8SLen Brown return -EIO; 17754f86d3a8SLen Brown #endif 17764f86d3a8SLen Brown 17771da177e4SLinus Torvalds printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); 17781da177e4SLinus Torvalds for (i = 1; i <= pr->power.count; i++) 17791da177e4SLinus Torvalds if (pr->power.states[i].valid) 17804be44fcdSLen Brown printk(" C%d[C%d]", i, 17814be44fcdSLen Brown pr->power.states[i].type); 17821da177e4SLinus Torvalds printk(")\n"); 17831da177e4SLinus Torvalds 17844f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 17851da177e4SLinus Torvalds if (pr->id == 0) { 17861da177e4SLinus Torvalds pm_idle_save = pm_idle; 17871da177e4SLinus Torvalds pm_idle = acpi_processor_idle; 17881da177e4SLinus Torvalds } 17894f86d3a8SLen Brown #endif 17901da177e4SLinus Torvalds } 17911da177e4SLinus Torvalds 17921da177e4SLinus Torvalds /* 'power' [R] */ 17931da177e4SLinus Torvalds entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, 17941da177e4SLinus Torvalds S_IRUGO, acpi_device_dir(device)); 17951da177e4SLinus Torvalds if (!entry) 1796a6fc6720SThomas Renninger return -EIO; 17971da177e4SLinus Torvalds else { 17981da177e4SLinus Torvalds entry->proc_fops = &acpi_processor_power_fops; 17991da177e4SLinus Torvalds entry->data = acpi_driver_data(device); 18001da177e4SLinus Torvalds entry->owner = THIS_MODULE; 18011da177e4SLinus Torvalds } 18021da177e4SLinus Torvalds 1803d550d98dSPatrick Mochel return 0; 18041da177e4SLinus Torvalds } 18051da177e4SLinus Torvalds 18064be44fcdSLen Brown int acpi_processor_power_exit(struct acpi_processor *pr, 18074be44fcdSLen Brown struct acpi_device *device) 18081da177e4SLinus Torvalds { 18094f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE 18104f86d3a8SLen Brown if ((pr->flags.power) && (!boot_option_idle_override)) 18114f86d3a8SLen Brown cpuidle_unregister_device(&pr->power.dev); 18124f86d3a8SLen Brown #endif 18131da177e4SLinus Torvalds pr->flags.power_setup_done = 0; 18141da177e4SLinus Torvalds 18151da177e4SLinus Torvalds if (acpi_device_dir(device)) 18164be44fcdSLen Brown remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, 18174be44fcdSLen Brown acpi_device_dir(device)); 18181da177e4SLinus Torvalds 18194f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 18204f86d3a8SLen Brown 18211da177e4SLinus Torvalds /* Unregister the idle handler when processor #0 is removed. */ 18221da177e4SLinus Torvalds if (pr->id == 0) { 18231da177e4SLinus Torvalds pm_idle = pm_idle_save; 18241da177e4SLinus Torvalds 18251da177e4SLinus Torvalds /* 18261da177e4SLinus Torvalds * We are about to unload the current idle thread pm callback 18271da177e4SLinus Torvalds * (pm_idle), Wait for all processors to update cached/local 18281da177e4SLinus Torvalds * copies of pm_idle before proceeding. 18291da177e4SLinus Torvalds */ 18301da177e4SLinus Torvalds cpu_idle_wait(); 18311fec74a9SAndrew Morton #ifdef CONFIG_SMP 1832f011e2e2SMark Gross pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY, 1833f011e2e2SMark Gross &acpi_processor_latency_notifier); 18341fec74a9SAndrew Morton #endif 18351da177e4SLinus Torvalds } 18364f86d3a8SLen Brown #endif 18371da177e4SLinus Torvalds 1838d550d98dSPatrick Mochel return 0; 18391da177e4SLinus Torvalds } 1840