11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * processor_idle - idle state submodule to the ACPI processor driver 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 51da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6c5ab81caSDominik Brodowski * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 71da177e4SLinus Torvalds * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 81da177e4SLinus Torvalds * - Added processor hotplug support 902df8b93SVenkatesh Pallipadi * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 1002df8b93SVenkatesh Pallipadi * - Added support for C3 on SMP 111da177e4SLinus Torvalds * 121da177e4SLinus Torvalds * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 151da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 161da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or (at 171da177e4SLinus Torvalds * your option) any later version. 181da177e4SLinus Torvalds * 191da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, but 201da177e4SLinus Torvalds * WITHOUT ANY WARRANTY; without even the implied warranty of 211da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 221da177e4SLinus Torvalds * General Public License for more details. 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * You should have received a copy of the GNU General Public License along 251da177e4SLinus Torvalds * with this program; if not, write to the Free Software Foundation, Inc., 261da177e4SLinus Torvalds * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 271da177e4SLinus Torvalds * 281da177e4SLinus Torvalds * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds #include <linux/kernel.h> 321da177e4SLinus Torvalds #include <linux/module.h> 331da177e4SLinus Torvalds #include <linux/init.h> 341da177e4SLinus Torvalds #include <linux/cpufreq.h> 351da177e4SLinus Torvalds #include <linux/proc_fs.h> 361da177e4SLinus Torvalds #include <linux/seq_file.h> 371da177e4SLinus Torvalds #include <linux/acpi.h> 381da177e4SLinus Torvalds #include <linux/dmi.h> 391da177e4SLinus Torvalds #include <linux/moduleparam.h> 404e57b681STim Schmielau #include <linux/sched.h> /* need_resched() */ 415c87579eSArjan van de Ven #include <linux/latency.h> 42e9e2cdb4SThomas Gleixner #include <linux/clockchips.h> 434f86d3a8SLen Brown #include <linux/cpuidle.h> 441da177e4SLinus Torvalds 453434933bSThomas Gleixner /* 463434933bSThomas Gleixner * Include the apic definitions for x86 to have the APIC timer related defines 473434933bSThomas Gleixner * available also for UP (on SMP it gets magically included via linux/smp.h). 483434933bSThomas Gleixner * asm/acpi.h is not an option, as it would require more include magic. Also 493434933bSThomas Gleixner * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 503434933bSThomas Gleixner */ 513434933bSThomas Gleixner #ifdef CONFIG_X86 523434933bSThomas Gleixner #include <asm/apic.h> 533434933bSThomas Gleixner #endif 543434933bSThomas Gleixner 551da177e4SLinus Torvalds #include <asm/io.h> 561da177e4SLinus Torvalds #include <asm/uaccess.h> 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds #include <acpi/acpi_bus.h> 591da177e4SLinus Torvalds #include <acpi/processor.h> 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds #define ACPI_PROCESSOR_COMPONENT 0x01000000 621da177e4SLinus Torvalds #define ACPI_PROCESSOR_CLASS "processor" 631da177e4SLinus Torvalds #define _COMPONENT ACPI_PROCESSOR_COMPONENT 64f52fd66dSLen Brown ACPI_MODULE_NAME("processor_idle"); 651da177e4SLinus Torvalds #define ACPI_PROCESSOR_FILE_POWER "power" 661da177e4SLinus Torvalds #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) 672aa44d05SIngo Molnar #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 684f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 691da177e4SLinus Torvalds #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 701da177e4SLinus Torvalds #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 71b6835052SAndreas Mohr static void (*pm_idle_save) (void) __read_mostly; 724f86d3a8SLen Brown #else 734f86d3a8SLen Brown #define C2_OVERHEAD 1 /* 1us */ 744f86d3a8SLen Brown #define C3_OVERHEAD 1 /* 1us */ 754f86d3a8SLen Brown #endif 764f86d3a8SLen Brown #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) 771da177e4SLinus Torvalds 784f86d3a8SLen Brown static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 795b3f0e6cSVenki Pallipadi #ifdef CONFIG_CPU_IDLE 804f86d3a8SLen Brown module_param(max_cstate, uint, 0000); 815b3f0e6cSVenki Pallipadi #else 825b3f0e6cSVenki Pallipadi module_param(max_cstate, uint, 0644); 835b3f0e6cSVenki Pallipadi #endif 84b6835052SAndreas Mohr static unsigned int nocst __read_mostly; 851da177e4SLinus Torvalds module_param(nocst, uint, 0000); 861da177e4SLinus Torvalds 874f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 881da177e4SLinus Torvalds /* 891da177e4SLinus Torvalds * bm_history -- bit-mask with a bit per jiffy of bus-master activity 901da177e4SLinus Torvalds * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms 911da177e4SLinus Torvalds * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms 921da177e4SLinus Torvalds * 100 HZ: 0x0000000F: 4 jiffies = 40ms 931da177e4SLinus Torvalds * reduce history for more aggressive entry into C3 941da177e4SLinus Torvalds */ 95b6835052SAndreas Mohr static unsigned int bm_history __read_mostly = 964be44fcdSLen Brown (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); 971da177e4SLinus Torvalds module_param(bm_history, uint, 0644); 984f86d3a8SLen Brown 994f86d3a8SLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr); 1004f86d3a8SLen Brown 1014f86d3a8SLen Brown #endif 1021da177e4SLinus Torvalds 1031da177e4SLinus Torvalds /* 1041da177e4SLinus Torvalds * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 1051da177e4SLinus Torvalds * For now disable this. Probably a bug somewhere else. 1061da177e4SLinus Torvalds * 1071da177e4SLinus Torvalds * To skip this limit, boot/load with a large max_cstate limit. 1081da177e4SLinus Torvalds */ 1091855256cSJeff Garzik static int set_max_cstate(const struct dmi_system_id *id) 1101da177e4SLinus Torvalds { 1111da177e4SLinus Torvalds if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 1121da177e4SLinus Torvalds return 0; 1131da177e4SLinus Torvalds 1143d35600aSLen Brown printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." 1151da177e4SLinus Torvalds " Override with \"processor.max_cstate=%d\"\n", id->ident, 1163d35600aSLen Brown (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 1171da177e4SLinus Torvalds 1183d35600aSLen Brown max_cstate = (long)id->driver_data; 1191da177e4SLinus Torvalds 1201da177e4SLinus Torvalds return 0; 1211da177e4SLinus Torvalds } 1221da177e4SLinus Torvalds 1237ded5689SAshok Raj /* Actually this shouldn't be __cpuinitdata, would be better to fix the 1247ded5689SAshok Raj callers to only run once -AK */ 1257ded5689SAshok Raj static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 126335f16beSDavid Shaohua Li { set_max_cstate, "IBM ThinkPad R40e", { 127876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 128f831335dSBartlomiej Swiercz DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1}, 129f831335dSBartlomiej Swiercz { set_max_cstate, "IBM ThinkPad R40e", { 130f831335dSBartlomiej Swiercz DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 131876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1}, 132876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 133876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 134876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1}, 135876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 136876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 137876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1}, 138876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 139876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 140876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1}, 141876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 142876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 143876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1}, 144876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 145876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 146876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1}, 147876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 148876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 149876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1}, 150876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 151876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 152876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1}, 153876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 154876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 155876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1}, 156876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 157876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 158876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1}, 159876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 160876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 161876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1}, 162876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 163876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 164876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1}, 165876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 166876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 167876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1}, 168876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 169876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 170876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1}, 171876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 172876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 173876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1}, 174335f16beSDavid Shaohua Li { set_max_cstate, "Medion 41700", { 175876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 176876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1}, 177335f16beSDavid Shaohua Li { set_max_cstate, "Clevo 5600D", { 178876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 179876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 180335f16beSDavid Shaohua Li (void *)2}, 1811da177e4SLinus Torvalds {}, 1821da177e4SLinus Torvalds }; 1831da177e4SLinus Torvalds 1844be44fcdSLen Brown static inline u32 ticks_elapsed(u32 t1, u32 t2) 1851da177e4SLinus Torvalds { 1861da177e4SLinus Torvalds if (t2 >= t1) 1871da177e4SLinus Torvalds return (t2 - t1); 188cee324b1SAlexey Starikovskiy else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) 1891da177e4SLinus Torvalds return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 1901da177e4SLinus Torvalds else 1911da177e4SLinus Torvalds return ((0xFFFFFFFF - t1) + t2); 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds 1944f86d3a8SLen Brown static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) 1954f86d3a8SLen Brown { 1964f86d3a8SLen Brown if (t2 >= t1) 1974f86d3a8SLen Brown return PM_TIMER_TICKS_TO_US(t2 - t1); 1984f86d3a8SLen Brown else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) 1994f86d3a8SLen Brown return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 2004f86d3a8SLen Brown else 2014f86d3a8SLen Brown return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); 2024f86d3a8SLen Brown } 2034f86d3a8SLen Brown 204ddc081a1SVenkatesh Pallipadi static void acpi_safe_halt(void) 205ddc081a1SVenkatesh Pallipadi { 206ddc081a1SVenkatesh Pallipadi current_thread_info()->status &= ~TS_POLLING; 207ddc081a1SVenkatesh Pallipadi /* 208ddc081a1SVenkatesh Pallipadi * TS_POLLING-cleared state must be visible before we 209ddc081a1SVenkatesh Pallipadi * test NEED_RESCHED: 210ddc081a1SVenkatesh Pallipadi */ 211ddc081a1SVenkatesh Pallipadi smp_mb(); 212ddc081a1SVenkatesh Pallipadi if (!need_resched()) 213ddc081a1SVenkatesh Pallipadi safe_halt(); 214ddc081a1SVenkatesh Pallipadi current_thread_info()->status |= TS_POLLING; 215ddc081a1SVenkatesh Pallipadi } 216ddc081a1SVenkatesh Pallipadi 2174f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 2184f86d3a8SLen Brown 2191da177e4SLinus Torvalds static void 2204be44fcdSLen Brown acpi_processor_power_activate(struct acpi_processor *pr, 2211da177e4SLinus Torvalds struct acpi_processor_cx *new) 2221da177e4SLinus Torvalds { 2231da177e4SLinus Torvalds struct acpi_processor_cx *old; 2241da177e4SLinus Torvalds 2251da177e4SLinus Torvalds if (!pr || !new) 2261da177e4SLinus Torvalds return; 2271da177e4SLinus Torvalds 2281da177e4SLinus Torvalds old = pr->power.state; 2291da177e4SLinus Torvalds 2301da177e4SLinus Torvalds if (old) 2311da177e4SLinus Torvalds old->promotion.count = 0; 2321da177e4SLinus Torvalds new->demotion.count = 0; 2331da177e4SLinus Torvalds 2341da177e4SLinus Torvalds /* Cleanup from old state. */ 2351da177e4SLinus Torvalds if (old) { 2361da177e4SLinus Torvalds switch (old->type) { 2371da177e4SLinus Torvalds case ACPI_STATE_C3: 2381da177e4SLinus Torvalds /* Disable bus master reload */ 23902df8b93SVenkatesh Pallipadi if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) 240d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 2411da177e4SLinus Torvalds break; 2421da177e4SLinus Torvalds } 2431da177e4SLinus Torvalds } 2441da177e4SLinus Torvalds 2451da177e4SLinus Torvalds /* Prepare to use new state. */ 2461da177e4SLinus Torvalds switch (new->type) { 2471da177e4SLinus Torvalds case ACPI_STATE_C3: 2481da177e4SLinus Torvalds /* Enable bus master reload */ 24902df8b93SVenkatesh Pallipadi if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) 250d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 2511da177e4SLinus Torvalds break; 2521da177e4SLinus Torvalds } 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds pr->power.state = new; 2551da177e4SLinus Torvalds 2561da177e4SLinus Torvalds return; 2571da177e4SLinus Torvalds } 2581da177e4SLinus Torvalds 25902df8b93SVenkatesh Pallipadi static atomic_t c3_cpu_count; 26002df8b93SVenkatesh Pallipadi 261991528d7SVenkatesh Pallipadi /* Common C-state entry for C2, C3, .. */ 262991528d7SVenkatesh Pallipadi static void acpi_cstate_enter(struct acpi_processor_cx *cstate) 263991528d7SVenkatesh Pallipadi { 264991528d7SVenkatesh Pallipadi if (cstate->space_id == ACPI_CSTATE_FFH) { 265991528d7SVenkatesh Pallipadi /* Call into architectural FFH based C-state */ 266991528d7SVenkatesh Pallipadi acpi_processor_ffh_cstate_enter(cstate); 267991528d7SVenkatesh Pallipadi } else { 268991528d7SVenkatesh Pallipadi int unused; 269991528d7SVenkatesh Pallipadi /* IO port based C-state */ 270991528d7SVenkatesh Pallipadi inb(cstate->address); 271991528d7SVenkatesh Pallipadi /* Dummy wait op - must do something useless after P_LVL2 read 272991528d7SVenkatesh Pallipadi because chipsets cannot guarantee that STPCLK# signal 273991528d7SVenkatesh Pallipadi gets asserted in time to freeze execution properly. */ 274cee324b1SAlexey Starikovskiy unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 275991528d7SVenkatesh Pallipadi } 276991528d7SVenkatesh Pallipadi } 2774f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */ 278991528d7SVenkatesh Pallipadi 279169a0abbSThomas Gleixner #ifdef ARCH_APICTIMER_STOPS_ON_C3 280169a0abbSThomas Gleixner 281169a0abbSThomas Gleixner /* 282169a0abbSThomas Gleixner * Some BIOS implementations switch to C3 in the published C2 state. 283296d93cdSLinus Torvalds * This seems to be a common problem on AMD boxen, but other vendors 284296d93cdSLinus Torvalds * are affected too. We pick the most conservative approach: we assume 285296d93cdSLinus Torvalds * that the local APIC stops in both C2 and C3. 286169a0abbSThomas Gleixner */ 287169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr, 288169a0abbSThomas Gleixner struct acpi_processor_cx *cx) 289169a0abbSThomas Gleixner { 290169a0abbSThomas Gleixner struct acpi_processor_power *pwr = &pr->power; 291e585bef8SThomas Gleixner u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 292169a0abbSThomas Gleixner 293169a0abbSThomas Gleixner /* 294169a0abbSThomas Gleixner * Check, if one of the previous states already marked the lapic 295169a0abbSThomas Gleixner * unstable 296169a0abbSThomas Gleixner */ 297169a0abbSThomas Gleixner if (pwr->timer_broadcast_on_state < state) 298169a0abbSThomas Gleixner return; 299169a0abbSThomas Gleixner 300e585bef8SThomas Gleixner if (cx->type >= type) 301169a0abbSThomas Gleixner pr->power.timer_broadcast_on_state = state; 302169a0abbSThomas Gleixner } 303169a0abbSThomas Gleixner 304169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) 305169a0abbSThomas Gleixner { 306e9e2cdb4SThomas Gleixner unsigned long reason; 307e9e2cdb4SThomas Gleixner 308e9e2cdb4SThomas Gleixner reason = pr->power.timer_broadcast_on_state < INT_MAX ? 309e9e2cdb4SThomas Gleixner CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 310e9e2cdb4SThomas Gleixner 311e9e2cdb4SThomas Gleixner clockevents_notify(reason, &pr->id); 312e9e2cdb4SThomas Gleixner } 313e9e2cdb4SThomas Gleixner 314e9e2cdb4SThomas Gleixner /* Power(C) State timer broadcast control */ 315e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr, 316e9e2cdb4SThomas Gleixner struct acpi_processor_cx *cx, 317e9e2cdb4SThomas Gleixner int broadcast) 318e9e2cdb4SThomas Gleixner { 319e9e2cdb4SThomas Gleixner int state = cx - pr->power.states; 320e9e2cdb4SThomas Gleixner 321e9e2cdb4SThomas Gleixner if (state >= pr->power.timer_broadcast_on_state) { 322e9e2cdb4SThomas Gleixner unsigned long reason; 323e9e2cdb4SThomas Gleixner 324e9e2cdb4SThomas Gleixner reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : 325e9e2cdb4SThomas Gleixner CLOCK_EVT_NOTIFY_BROADCAST_EXIT; 326e9e2cdb4SThomas Gleixner clockevents_notify(reason, &pr->id); 327e9e2cdb4SThomas Gleixner } 328169a0abbSThomas Gleixner } 329169a0abbSThomas Gleixner 330169a0abbSThomas Gleixner #else 331169a0abbSThomas Gleixner 332169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr, 333169a0abbSThomas Gleixner struct acpi_processor_cx *cstate) { } 334169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } 335e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr, 336e9e2cdb4SThomas Gleixner struct acpi_processor_cx *cx, 337e9e2cdb4SThomas Gleixner int broadcast) 338e9e2cdb4SThomas Gleixner { 339e9e2cdb4SThomas Gleixner } 340169a0abbSThomas Gleixner 341169a0abbSThomas Gleixner #endif 342169a0abbSThomas Gleixner 343b04e7bdbSThomas Gleixner /* 344b04e7bdbSThomas Gleixner * Suspend / resume control 345b04e7bdbSThomas Gleixner */ 346b04e7bdbSThomas Gleixner static int acpi_idle_suspend; 347b04e7bdbSThomas Gleixner 348b04e7bdbSThomas Gleixner int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 349b04e7bdbSThomas Gleixner { 350b04e7bdbSThomas Gleixner acpi_idle_suspend = 1; 351b04e7bdbSThomas Gleixner return 0; 352b04e7bdbSThomas Gleixner } 353b04e7bdbSThomas Gleixner 354b04e7bdbSThomas Gleixner int acpi_processor_resume(struct acpi_device * device) 355b04e7bdbSThomas Gleixner { 356b04e7bdbSThomas Gleixner acpi_idle_suspend = 0; 357b04e7bdbSThomas Gleixner return 0; 358b04e7bdbSThomas Gleixner } 359b04e7bdbSThomas Gleixner 3604f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 3611da177e4SLinus Torvalds static void acpi_processor_idle(void) 3621da177e4SLinus Torvalds { 3631da177e4SLinus Torvalds struct acpi_processor *pr = NULL; 3641da177e4SLinus Torvalds struct acpi_processor_cx *cx = NULL; 3651da177e4SLinus Torvalds struct acpi_processor_cx *next_state = NULL; 3661da177e4SLinus Torvalds int sleep_ticks = 0; 3671da177e4SLinus Torvalds u32 t1, t2 = 0; 3681da177e4SLinus Torvalds 3691da177e4SLinus Torvalds /* 3701da177e4SLinus Torvalds * Interrupts must be disabled during bus mastering calculations and 3711da177e4SLinus Torvalds * for C2/C3 transitions. 3721da177e4SLinus Torvalds */ 3731da177e4SLinus Torvalds local_irq_disable(); 3741da177e4SLinus Torvalds 375d5a3d32aSVenkatesh Pallipadi pr = processors[smp_processor_id()]; 376d5a3d32aSVenkatesh Pallipadi if (!pr) { 377d5a3d32aSVenkatesh Pallipadi local_irq_enable(); 378d5a3d32aSVenkatesh Pallipadi return; 379d5a3d32aSVenkatesh Pallipadi } 380d5a3d32aSVenkatesh Pallipadi 3811da177e4SLinus Torvalds /* 3821da177e4SLinus Torvalds * Check whether we truly need to go idle, or should 3831da177e4SLinus Torvalds * reschedule: 3841da177e4SLinus Torvalds */ 3851da177e4SLinus Torvalds if (unlikely(need_resched())) { 3861da177e4SLinus Torvalds local_irq_enable(); 3871da177e4SLinus Torvalds return; 3881da177e4SLinus Torvalds } 3891da177e4SLinus Torvalds 3901da177e4SLinus Torvalds cx = pr->power.state; 391b04e7bdbSThomas Gleixner if (!cx || acpi_idle_suspend) { 39264c7c8f8SNick Piggin if (pm_idle_save) 39364c7c8f8SNick Piggin pm_idle_save(); 39464c7c8f8SNick Piggin else 39564c7c8f8SNick Piggin acpi_safe_halt(); 39664c7c8f8SNick Piggin return; 39764c7c8f8SNick Piggin } 3981da177e4SLinus Torvalds 3991da177e4SLinus Torvalds /* 4001da177e4SLinus Torvalds * Check BM Activity 4011da177e4SLinus Torvalds * ----------------- 4021da177e4SLinus Torvalds * Check for bus mastering activity (if required), record, and check 4031da177e4SLinus Torvalds * for demotion. 4041da177e4SLinus Torvalds */ 4051da177e4SLinus Torvalds if (pr->flags.bm_check) { 4061da177e4SLinus Torvalds u32 bm_status = 0; 4071da177e4SLinus Torvalds unsigned long diff = jiffies - pr->power.bm_check_timestamp; 4081da177e4SLinus Torvalds 409c5ab81caSDominik Brodowski if (diff > 31) 410c5ab81caSDominik Brodowski diff = 31; 4111da177e4SLinus Torvalds 412c5ab81caSDominik Brodowski pr->power.bm_activity <<= diff; 4131da177e4SLinus Torvalds 414d8c71b6dSBob Moore acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 4151da177e4SLinus Torvalds if (bm_status) { 416c5ab81caSDominik Brodowski pr->power.bm_activity |= 0x1; 417d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 4181da177e4SLinus Torvalds } 4191da177e4SLinus Torvalds /* 4201da177e4SLinus Torvalds * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 4211da177e4SLinus Torvalds * the true state of bus mastering activity; forcing us to 4221da177e4SLinus Torvalds * manually check the BMIDEA bit of each IDE channel. 4231da177e4SLinus Torvalds */ 4241da177e4SLinus Torvalds else if (errata.piix4.bmisx) { 4251da177e4SLinus Torvalds if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 4261da177e4SLinus Torvalds || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 427c5ab81caSDominik Brodowski pr->power.bm_activity |= 0x1; 4281da177e4SLinus Torvalds } 4291da177e4SLinus Torvalds 4301da177e4SLinus Torvalds pr->power.bm_check_timestamp = jiffies; 4311da177e4SLinus Torvalds 4321da177e4SLinus Torvalds /* 433c4a001b1SDominik Brodowski * If bus mastering is or was active this jiffy, demote 4341da177e4SLinus Torvalds * to avoid a faulty transition. Note that the processor 4351da177e4SLinus Torvalds * won't enter a low-power state during this call (to this 436c4a001b1SDominik Brodowski * function) but should upon the next. 4371da177e4SLinus Torvalds * 4381da177e4SLinus Torvalds * TBD: A better policy might be to fallback to the demotion 4391da177e4SLinus Torvalds * state (use it for this quantum only) istead of 4401da177e4SLinus Torvalds * demoting -- and rely on duration as our sole demotion 4411da177e4SLinus Torvalds * qualification. This may, however, introduce DMA 4421da177e4SLinus Torvalds * issues (e.g. floppy DMA transfer overrun/underrun). 4431da177e4SLinus Torvalds */ 444c4a001b1SDominik Brodowski if ((pr->power.bm_activity & 0x1) && 445c4a001b1SDominik Brodowski cx->demotion.threshold.bm) { 4461da177e4SLinus Torvalds local_irq_enable(); 4471da177e4SLinus Torvalds next_state = cx->demotion.state; 4481da177e4SLinus Torvalds goto end; 4491da177e4SLinus Torvalds } 4501da177e4SLinus Torvalds } 4511da177e4SLinus Torvalds 4524c033552SVenkatesh Pallipadi #ifdef CONFIG_HOTPLUG_CPU 4534c033552SVenkatesh Pallipadi /* 4544c033552SVenkatesh Pallipadi * Check for P_LVL2_UP flag before entering C2 and above on 4554c033552SVenkatesh Pallipadi * an SMP system. We do it here instead of doing it at _CST/P_LVL 4564c033552SVenkatesh Pallipadi * detection phase, to work cleanly with logical CPU hotplug. 4574c033552SVenkatesh Pallipadi */ 4584c033552SVenkatesh Pallipadi if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 459cee324b1SAlexey Starikovskiy !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 4601e483969SDavid Shaohua Li cx = &pr->power.states[ACPI_STATE_C1]; 4614c033552SVenkatesh Pallipadi #endif 4621e483969SDavid Shaohua Li 4631da177e4SLinus Torvalds /* 4641da177e4SLinus Torvalds * Sleep: 4651da177e4SLinus Torvalds * ------ 4661da177e4SLinus Torvalds * Invoke the current Cx state to put the processor to sleep. 4671da177e4SLinus Torvalds */ 4682a298a35SNick Piggin if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { 469495ab9c0SAndi Kleen current_thread_info()->status &= ~TS_POLLING; 4700888f06aSIngo Molnar /* 4710888f06aSIngo Molnar * TS_POLLING-cleared state must be visible before we 4720888f06aSIngo Molnar * test NEED_RESCHED: 4730888f06aSIngo Molnar */ 4740888f06aSIngo Molnar smp_mb(); 4752a298a35SNick Piggin if (need_resched()) { 476495ab9c0SAndi Kleen current_thread_info()->status |= TS_POLLING; 477af2eb17bSLinus Torvalds local_irq_enable(); 4782a298a35SNick Piggin return; 4792a298a35SNick Piggin } 4802a298a35SNick Piggin } 4812a298a35SNick Piggin 4821da177e4SLinus Torvalds switch (cx->type) { 4831da177e4SLinus Torvalds 4841da177e4SLinus Torvalds case ACPI_STATE_C1: 4851da177e4SLinus Torvalds /* 4861da177e4SLinus Torvalds * Invoke C1. 4871da177e4SLinus Torvalds * Use the appropriate idle routine, the one that would 4881da177e4SLinus Torvalds * be used without acpi C-states. 4891da177e4SLinus Torvalds */ 4901da177e4SLinus Torvalds if (pm_idle_save) 4911da177e4SLinus Torvalds pm_idle_save(); 4921da177e4SLinus Torvalds else 49364c7c8f8SNick Piggin acpi_safe_halt(); 49464c7c8f8SNick Piggin 4951da177e4SLinus Torvalds /* 4961da177e4SLinus Torvalds * TBD: Can't get time duration while in C1, as resumes 4971da177e4SLinus Torvalds * go to an ISR rather than here. Need to instrument 4981da177e4SLinus Torvalds * base interrupt handler. 4992aa44d05SIngo Molnar * 5002aa44d05SIngo Molnar * Note: the TSC better not stop in C1, sched_clock() will 5012aa44d05SIngo Molnar * skew otherwise. 5021da177e4SLinus Torvalds */ 5031da177e4SLinus Torvalds sleep_ticks = 0xFFFFFFFF; 5041da177e4SLinus Torvalds break; 5051da177e4SLinus Torvalds 5061da177e4SLinus Torvalds case ACPI_STATE_C2: 5071da177e4SLinus Torvalds /* Get start time (ticks) */ 508cee324b1SAlexey Starikovskiy t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 5092aa44d05SIngo Molnar /* Tell the scheduler that we are going deep-idle: */ 5102aa44d05SIngo Molnar sched_clock_idle_sleep_event(); 5111da177e4SLinus Torvalds /* Invoke C2 */ 512e9e2cdb4SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 1); 513991528d7SVenkatesh Pallipadi acpi_cstate_enter(cx); 5141da177e4SLinus Torvalds /* Get end time (ticks) */ 515cee324b1SAlexey Starikovskiy t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 516539eb11eSjohn stultz 5170aa366f3STony Luck #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 518539eb11eSjohn stultz /* TSC halts in C2, so notify users */ 5195a90cf20Sjohn stultz mark_tsc_unstable("possible TSC halt in C2"); 520539eb11eSjohn stultz #endif 5212aa44d05SIngo Molnar /* Compute time (ticks) that we were actually asleep */ 5222aa44d05SIngo Molnar sleep_ticks = ticks_elapsed(t1, t2); 5232aa44d05SIngo Molnar 5242aa44d05SIngo Molnar /* Tell the scheduler how much we idled: */ 5252aa44d05SIngo Molnar sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 5262aa44d05SIngo Molnar 5271da177e4SLinus Torvalds /* Re-enable interrupts */ 5281da177e4SLinus Torvalds local_irq_enable(); 5292aa44d05SIngo Molnar /* Do not account our idle-switching overhead: */ 5302aa44d05SIngo Molnar sleep_ticks -= cx->latency_ticks + C2_OVERHEAD; 5312aa44d05SIngo Molnar 532495ab9c0SAndi Kleen current_thread_info()->status |= TS_POLLING; 533e9e2cdb4SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 0); 5341da177e4SLinus Torvalds break; 5351da177e4SLinus Torvalds 5361da177e4SLinus Torvalds case ACPI_STATE_C3: 53718eab855SVenkatesh Pallipadi /* 538e17bcb43SThomas Gleixner * Must be done before busmaster disable as we might 539e17bcb43SThomas Gleixner * need to access HPET ! 540e17bcb43SThomas Gleixner */ 541e17bcb43SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 1); 542e17bcb43SThomas Gleixner /* 54318eab855SVenkatesh Pallipadi * disable bus master 54418eab855SVenkatesh Pallipadi * bm_check implies we need ARB_DIS 54518eab855SVenkatesh Pallipadi * !bm_check implies we need cache flush 54618eab855SVenkatesh Pallipadi * bm_control implies whether we can do ARB_DIS 54718eab855SVenkatesh Pallipadi * 54818eab855SVenkatesh Pallipadi * That leaves a case where bm_check is set and bm_control is 54918eab855SVenkatesh Pallipadi * not set. In that case we cannot do much, we enter C3 55018eab855SVenkatesh Pallipadi * without doing anything. 55118eab855SVenkatesh Pallipadi */ 55218eab855SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 55302df8b93SVenkatesh Pallipadi if (atomic_inc_return(&c3_cpu_count) == 55402df8b93SVenkatesh Pallipadi num_online_cpus()) { 55502df8b93SVenkatesh Pallipadi /* 55602df8b93SVenkatesh Pallipadi * All CPUs are trying to go to C3 55702df8b93SVenkatesh Pallipadi * Disable bus master arbitration 55802df8b93SVenkatesh Pallipadi */ 559d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 56002df8b93SVenkatesh Pallipadi } 56118eab855SVenkatesh Pallipadi } else if (!pr->flags.bm_check) { 56202df8b93SVenkatesh Pallipadi /* SMP with no shared cache... Invalidate cache */ 56302df8b93SVenkatesh Pallipadi ACPI_FLUSH_CPU_CACHE(); 56402df8b93SVenkatesh Pallipadi } 56502df8b93SVenkatesh Pallipadi 5661da177e4SLinus Torvalds /* Get start time (ticks) */ 567cee324b1SAlexey Starikovskiy t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 5681da177e4SLinus Torvalds /* Invoke C3 */ 5692aa44d05SIngo Molnar /* Tell the scheduler that we are going deep-idle: */ 5702aa44d05SIngo Molnar sched_clock_idle_sleep_event(); 571991528d7SVenkatesh Pallipadi acpi_cstate_enter(cx); 5721da177e4SLinus Torvalds /* Get end time (ticks) */ 573cee324b1SAlexey Starikovskiy t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 57418eab855SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 5751da177e4SLinus Torvalds /* Enable bus master arbitration */ 57602df8b93SVenkatesh Pallipadi atomic_dec(&c3_cpu_count); 577d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); 57802df8b93SVenkatesh Pallipadi } 57902df8b93SVenkatesh Pallipadi 5800aa366f3STony Luck #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 581539eb11eSjohn stultz /* TSC halts in C3, so notify users */ 5825a90cf20Sjohn stultz mark_tsc_unstable("TSC halts in C3"); 583539eb11eSjohn stultz #endif 5842aa44d05SIngo Molnar /* Compute time (ticks) that we were actually asleep */ 5852aa44d05SIngo Molnar sleep_ticks = ticks_elapsed(t1, t2); 5862aa44d05SIngo Molnar /* Tell the scheduler how much we idled: */ 5872aa44d05SIngo Molnar sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 5882aa44d05SIngo Molnar 5891da177e4SLinus Torvalds /* Re-enable interrupts */ 5901da177e4SLinus Torvalds local_irq_enable(); 5912aa44d05SIngo Molnar /* Do not account our idle-switching overhead: */ 5922aa44d05SIngo Molnar sleep_ticks -= cx->latency_ticks + C3_OVERHEAD; 5932aa44d05SIngo Molnar 594495ab9c0SAndi Kleen current_thread_info()->status |= TS_POLLING; 595e9e2cdb4SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 0); 5961da177e4SLinus Torvalds break; 5971da177e4SLinus Torvalds 5981da177e4SLinus Torvalds default: 5991da177e4SLinus Torvalds local_irq_enable(); 6001da177e4SLinus Torvalds return; 6011da177e4SLinus Torvalds } 602a3c6598fSDominik Brodowski cx->usage++; 603a3c6598fSDominik Brodowski if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0)) 604a3c6598fSDominik Brodowski cx->time += sleep_ticks; 6051da177e4SLinus Torvalds 6061da177e4SLinus Torvalds next_state = pr->power.state; 6071da177e4SLinus Torvalds 6081e483969SDavid Shaohua Li #ifdef CONFIG_HOTPLUG_CPU 6091e483969SDavid Shaohua Li /* Don't do promotion/demotion */ 6101e483969SDavid Shaohua Li if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && 611cee324b1SAlexey Starikovskiy !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) { 6121e483969SDavid Shaohua Li next_state = cx; 6131e483969SDavid Shaohua Li goto end; 6141e483969SDavid Shaohua Li } 6151e483969SDavid Shaohua Li #endif 6161e483969SDavid Shaohua Li 6171da177e4SLinus Torvalds /* 6181da177e4SLinus Torvalds * Promotion? 6191da177e4SLinus Torvalds * ---------- 6201da177e4SLinus Torvalds * Track the number of longs (time asleep is greater than threshold) 6211da177e4SLinus Torvalds * and promote when the count threshold is reached. Note that bus 6221da177e4SLinus Torvalds * mastering activity may prevent promotions. 6231da177e4SLinus Torvalds * Do not promote above max_cstate. 6241da177e4SLinus Torvalds */ 6251da177e4SLinus Torvalds if (cx->promotion.state && 6261da177e4SLinus Torvalds ((cx->promotion.state - pr->power.states) <= max_cstate)) { 6275c87579eSArjan van de Ven if (sleep_ticks > cx->promotion.threshold.ticks && 6285c87579eSArjan van de Ven cx->promotion.state->latency <= system_latency_constraint()) { 6291da177e4SLinus Torvalds cx->promotion.count++; 6301da177e4SLinus Torvalds cx->demotion.count = 0; 6314be44fcdSLen Brown if (cx->promotion.count >= 6324be44fcdSLen Brown cx->promotion.threshold.count) { 6331da177e4SLinus Torvalds if (pr->flags.bm_check) { 6344be44fcdSLen Brown if (! 6354be44fcdSLen Brown (pr->power.bm_activity & cx-> 6364be44fcdSLen Brown promotion.threshold.bm)) { 6374be44fcdSLen Brown next_state = 6384be44fcdSLen Brown cx->promotion.state; 6391da177e4SLinus Torvalds goto end; 6401da177e4SLinus Torvalds } 6414be44fcdSLen Brown } else { 6421da177e4SLinus Torvalds next_state = cx->promotion.state; 6431da177e4SLinus Torvalds goto end; 6441da177e4SLinus Torvalds } 6451da177e4SLinus Torvalds } 6461da177e4SLinus Torvalds } 6471da177e4SLinus Torvalds } 6481da177e4SLinus Torvalds 6491da177e4SLinus Torvalds /* 6501da177e4SLinus Torvalds * Demotion? 6511da177e4SLinus Torvalds * --------- 6521da177e4SLinus Torvalds * Track the number of shorts (time asleep is less than time threshold) 6531da177e4SLinus Torvalds * and demote when the usage threshold is reached. 6541da177e4SLinus Torvalds */ 6551da177e4SLinus Torvalds if (cx->demotion.state) { 6561da177e4SLinus Torvalds if (sleep_ticks < cx->demotion.threshold.ticks) { 6571da177e4SLinus Torvalds cx->demotion.count++; 6581da177e4SLinus Torvalds cx->promotion.count = 0; 6591da177e4SLinus Torvalds if (cx->demotion.count >= cx->demotion.threshold.count) { 6601da177e4SLinus Torvalds next_state = cx->demotion.state; 6611da177e4SLinus Torvalds goto end; 6621da177e4SLinus Torvalds } 6631da177e4SLinus Torvalds } 6641da177e4SLinus Torvalds } 6651da177e4SLinus Torvalds 6661da177e4SLinus Torvalds end: 6671da177e4SLinus Torvalds /* 6681da177e4SLinus Torvalds * Demote if current state exceeds max_cstate 6695c87579eSArjan van de Ven * or if the latency of the current state is unacceptable 6701da177e4SLinus Torvalds */ 6715c87579eSArjan van de Ven if ((pr->power.state - pr->power.states) > max_cstate || 6725c87579eSArjan van de Ven pr->power.state->latency > system_latency_constraint()) { 6731da177e4SLinus Torvalds if (cx->demotion.state) 6741da177e4SLinus Torvalds next_state = cx->demotion.state; 6751da177e4SLinus Torvalds } 6761da177e4SLinus Torvalds 6771da177e4SLinus Torvalds /* 6781da177e4SLinus Torvalds * New Cx State? 6791da177e4SLinus Torvalds * ------------- 6801da177e4SLinus Torvalds * If we're going to start using a new Cx state we must clean up 6811da177e4SLinus Torvalds * from the previous and prepare to use the new. 6821da177e4SLinus Torvalds */ 6831da177e4SLinus Torvalds if (next_state != pr->power.state) 6841da177e4SLinus Torvalds acpi_processor_power_activate(pr, next_state); 6851da177e4SLinus Torvalds } 6861da177e4SLinus Torvalds 6874be44fcdSLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr) 6881da177e4SLinus Torvalds { 6891da177e4SLinus Torvalds unsigned int i; 6901da177e4SLinus Torvalds unsigned int state_is_set = 0; 6911da177e4SLinus Torvalds struct acpi_processor_cx *lower = NULL; 6921da177e4SLinus Torvalds struct acpi_processor_cx *higher = NULL; 6931da177e4SLinus Torvalds struct acpi_processor_cx *cx; 6941da177e4SLinus Torvalds 6951da177e4SLinus Torvalds 6961da177e4SLinus Torvalds if (!pr) 697d550d98dSPatrick Mochel return -EINVAL; 6981da177e4SLinus Torvalds 6991da177e4SLinus Torvalds /* 7001da177e4SLinus Torvalds * This function sets the default Cx state policy (OS idle handler). 7011da177e4SLinus Torvalds * Our scheme is to promote quickly to C2 but more conservatively 7021da177e4SLinus Torvalds * to C3. We're favoring C2 for its characteristics of low latency 7031da177e4SLinus Torvalds * (quick response), good power savings, and ability to allow bus 7041da177e4SLinus Torvalds * mastering activity. Note that the Cx state policy is completely 7051da177e4SLinus Torvalds * customizable and can be altered dynamically. 7061da177e4SLinus Torvalds */ 7071da177e4SLinus Torvalds 7081da177e4SLinus Torvalds /* startup state */ 7091da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 7101da177e4SLinus Torvalds cx = &pr->power.states[i]; 7111da177e4SLinus Torvalds if (!cx->valid) 7121da177e4SLinus Torvalds continue; 7131da177e4SLinus Torvalds 7141da177e4SLinus Torvalds if (!state_is_set) 7151da177e4SLinus Torvalds pr->power.state = cx; 7161da177e4SLinus Torvalds state_is_set++; 7171da177e4SLinus Torvalds break; 7181da177e4SLinus Torvalds } 7191da177e4SLinus Torvalds 7201da177e4SLinus Torvalds if (!state_is_set) 721d550d98dSPatrick Mochel return -ENODEV; 7221da177e4SLinus Torvalds 7231da177e4SLinus Torvalds /* demotion */ 7241da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 7251da177e4SLinus Torvalds cx = &pr->power.states[i]; 7261da177e4SLinus Torvalds if (!cx->valid) 7271da177e4SLinus Torvalds continue; 7281da177e4SLinus Torvalds 7291da177e4SLinus Torvalds if (lower) { 7301da177e4SLinus Torvalds cx->demotion.state = lower; 7311da177e4SLinus Torvalds cx->demotion.threshold.ticks = cx->latency_ticks; 7321da177e4SLinus Torvalds cx->demotion.threshold.count = 1; 7331da177e4SLinus Torvalds if (cx->type == ACPI_STATE_C3) 7341da177e4SLinus Torvalds cx->demotion.threshold.bm = bm_history; 7351da177e4SLinus Torvalds } 7361da177e4SLinus Torvalds 7371da177e4SLinus Torvalds lower = cx; 7381da177e4SLinus Torvalds } 7391da177e4SLinus Torvalds 7401da177e4SLinus Torvalds /* promotion */ 7411da177e4SLinus Torvalds for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { 7421da177e4SLinus Torvalds cx = &pr->power.states[i]; 7431da177e4SLinus Torvalds if (!cx->valid) 7441da177e4SLinus Torvalds continue; 7451da177e4SLinus Torvalds 7461da177e4SLinus Torvalds if (higher) { 7471da177e4SLinus Torvalds cx->promotion.state = higher; 7481da177e4SLinus Torvalds cx->promotion.threshold.ticks = cx->latency_ticks; 7491da177e4SLinus Torvalds if (cx->type >= ACPI_STATE_C2) 7501da177e4SLinus Torvalds cx->promotion.threshold.count = 4; 7511da177e4SLinus Torvalds else 7521da177e4SLinus Torvalds cx->promotion.threshold.count = 10; 7531da177e4SLinus Torvalds if (higher->type == ACPI_STATE_C3) 7541da177e4SLinus Torvalds cx->promotion.threshold.bm = bm_history; 7551da177e4SLinus Torvalds } 7561da177e4SLinus Torvalds 7571da177e4SLinus Torvalds higher = cx; 7581da177e4SLinus Torvalds } 7591da177e4SLinus Torvalds 760d550d98dSPatrick Mochel return 0; 7611da177e4SLinus Torvalds } 7624f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */ 7631da177e4SLinus Torvalds 7641da177e4SLinus Torvalds static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 7651da177e4SLinus Torvalds { 7661da177e4SLinus Torvalds 7671da177e4SLinus Torvalds if (!pr) 768d550d98dSPatrick Mochel return -EINVAL; 7691da177e4SLinus Torvalds 7701da177e4SLinus Torvalds if (!pr->pblk) 771d550d98dSPatrick Mochel return -ENODEV; 7721da177e4SLinus Torvalds 7731da177e4SLinus Torvalds /* if info is obtained from pblk/fadt, type equals state */ 7741da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 7751da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 7761da177e4SLinus Torvalds 7774c033552SVenkatesh Pallipadi #ifndef CONFIG_HOTPLUG_CPU 7784c033552SVenkatesh Pallipadi /* 7794c033552SVenkatesh Pallipadi * Check for P_LVL2_UP flag before entering C2 and above on 7804c033552SVenkatesh Pallipadi * an SMP system. 7814c033552SVenkatesh Pallipadi */ 782ad71860aSAlexey Starikovskiy if ((num_online_cpus() > 1) && 783cee324b1SAlexey Starikovskiy !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 784d550d98dSPatrick Mochel return -ENODEV; 7854c033552SVenkatesh Pallipadi #endif 7864c033552SVenkatesh Pallipadi 7871da177e4SLinus Torvalds /* determine C2 and C3 address from pblk */ 7881da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 7891da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 7901da177e4SLinus Torvalds 7911da177e4SLinus Torvalds /* determine latencies from FADT */ 792cee324b1SAlexey Starikovskiy pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; 793cee324b1SAlexey Starikovskiy pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; 7941da177e4SLinus Torvalds 7951da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 7961da177e4SLinus Torvalds "lvl2[0x%08x] lvl3[0x%08x]\n", 7971da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].address, 7981da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].address)); 7991da177e4SLinus Torvalds 800d550d98dSPatrick Mochel return 0; 8011da177e4SLinus Torvalds } 8021da177e4SLinus Torvalds 803991528d7SVenkatesh Pallipadi static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 804acf05f4bSVenkatesh Pallipadi { 805991528d7SVenkatesh Pallipadi if (!pr->power.states[ACPI_STATE_C1].valid) { 806cf824788SJanosch Machowinski /* set the first C-State to C1 */ 807991528d7SVenkatesh Pallipadi /* all processors need to support C1 */ 808acf05f4bSVenkatesh Pallipadi pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 809acf05f4bSVenkatesh Pallipadi pr->power.states[ACPI_STATE_C1].valid = 1; 810991528d7SVenkatesh Pallipadi } 811991528d7SVenkatesh Pallipadi /* the C0 state only exists as a filler in our array */ 812991528d7SVenkatesh Pallipadi pr->power.states[ACPI_STATE_C0].valid = 1; 813d550d98dSPatrick Mochel return 0; 814acf05f4bSVenkatesh Pallipadi } 815acf05f4bSVenkatesh Pallipadi 8161da177e4SLinus Torvalds static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 8171da177e4SLinus Torvalds { 8181da177e4SLinus Torvalds acpi_status status = 0; 8191da177e4SLinus Torvalds acpi_integer count; 820cf824788SJanosch Machowinski int current_count; 8211da177e4SLinus Torvalds int i; 8221da177e4SLinus Torvalds struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 8231da177e4SLinus Torvalds union acpi_object *cst; 8241da177e4SLinus Torvalds 8251da177e4SLinus Torvalds 8261da177e4SLinus Torvalds if (nocst) 827d550d98dSPatrick Mochel return -ENODEV; 8281da177e4SLinus Torvalds 829991528d7SVenkatesh Pallipadi current_count = 0; 8301da177e4SLinus Torvalds 8311da177e4SLinus Torvalds status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 8321da177e4SLinus Torvalds if (ACPI_FAILURE(status)) { 8331da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 834d550d98dSPatrick Mochel return -ENODEV; 8351da177e4SLinus Torvalds } 8361da177e4SLinus Torvalds 83750dd0969SJan Engelhardt cst = buffer.pointer; 8381da177e4SLinus Torvalds 8391da177e4SLinus Torvalds /* There must be at least 2 elements */ 8401da177e4SLinus Torvalds if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 8416468463aSLen Brown printk(KERN_ERR PREFIX "not enough elements in _CST\n"); 8421da177e4SLinus Torvalds status = -EFAULT; 8431da177e4SLinus Torvalds goto end; 8441da177e4SLinus Torvalds } 8451da177e4SLinus Torvalds 8461da177e4SLinus Torvalds count = cst->package.elements[0].integer.value; 8471da177e4SLinus Torvalds 8481da177e4SLinus Torvalds /* Validate number of power states. */ 8491da177e4SLinus Torvalds if (count < 1 || count != cst->package.count - 1) { 8506468463aSLen Brown printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); 8511da177e4SLinus Torvalds status = -EFAULT; 8521da177e4SLinus Torvalds goto end; 8531da177e4SLinus Torvalds } 8541da177e4SLinus Torvalds 8551da177e4SLinus Torvalds /* Tell driver that at least _CST is supported. */ 8561da177e4SLinus Torvalds pr->flags.has_cst = 1; 8571da177e4SLinus Torvalds 8581da177e4SLinus Torvalds for (i = 1; i <= count; i++) { 8591da177e4SLinus Torvalds union acpi_object *element; 8601da177e4SLinus Torvalds union acpi_object *obj; 8611da177e4SLinus Torvalds struct acpi_power_register *reg; 8621da177e4SLinus Torvalds struct acpi_processor_cx cx; 8631da177e4SLinus Torvalds 8641da177e4SLinus Torvalds memset(&cx, 0, sizeof(cx)); 8651da177e4SLinus Torvalds 86650dd0969SJan Engelhardt element = &(cst->package.elements[i]); 8671da177e4SLinus Torvalds if (element->type != ACPI_TYPE_PACKAGE) 8681da177e4SLinus Torvalds continue; 8691da177e4SLinus Torvalds 8701da177e4SLinus Torvalds if (element->package.count != 4) 8711da177e4SLinus Torvalds continue; 8721da177e4SLinus Torvalds 87350dd0969SJan Engelhardt obj = &(element->package.elements[0]); 8741da177e4SLinus Torvalds 8751da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_BUFFER) 8761da177e4SLinus Torvalds continue; 8771da177e4SLinus Torvalds 8781da177e4SLinus Torvalds reg = (struct acpi_power_register *)obj->buffer.pointer; 8791da177e4SLinus Torvalds 8801da177e4SLinus Torvalds if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 8811da177e4SLinus Torvalds (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 8821da177e4SLinus Torvalds continue; 8831da177e4SLinus Torvalds 8841da177e4SLinus Torvalds /* There should be an easy way to extract an integer... */ 88550dd0969SJan Engelhardt obj = &(element->package.elements[1]); 8861da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 8871da177e4SLinus Torvalds continue; 8881da177e4SLinus Torvalds 8891da177e4SLinus Torvalds cx.type = obj->integer.value; 890991528d7SVenkatesh Pallipadi /* 891991528d7SVenkatesh Pallipadi * Some buggy BIOSes won't list C1 in _CST - 892991528d7SVenkatesh Pallipadi * Let acpi_processor_get_power_info_default() handle them later 893991528d7SVenkatesh Pallipadi */ 894991528d7SVenkatesh Pallipadi if (i == 1 && cx.type != ACPI_STATE_C1) 895991528d7SVenkatesh Pallipadi current_count++; 8961da177e4SLinus Torvalds 897991528d7SVenkatesh Pallipadi cx.address = reg->address; 898991528d7SVenkatesh Pallipadi cx.index = current_count + 1; 8991da177e4SLinus Torvalds 900991528d7SVenkatesh Pallipadi cx.space_id = ACPI_CSTATE_SYSTEMIO; 901991528d7SVenkatesh Pallipadi if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 902991528d7SVenkatesh Pallipadi if (acpi_processor_ffh_cstate_probe 903991528d7SVenkatesh Pallipadi (pr->id, &cx, reg) == 0) { 904991528d7SVenkatesh Pallipadi cx.space_id = ACPI_CSTATE_FFH; 905991528d7SVenkatesh Pallipadi } else if (cx.type != ACPI_STATE_C1) { 906991528d7SVenkatesh Pallipadi /* 907991528d7SVenkatesh Pallipadi * C1 is a special case where FIXED_HARDWARE 908991528d7SVenkatesh Pallipadi * can be handled in non-MWAIT way as well. 909991528d7SVenkatesh Pallipadi * In that case, save this _CST entry info. 910991528d7SVenkatesh Pallipadi * That is, we retain space_id of SYSTEM_IO for 911991528d7SVenkatesh Pallipadi * halt based C1. 912991528d7SVenkatesh Pallipadi * Otherwise, ignore this info and continue. 913991528d7SVenkatesh Pallipadi */ 9141da177e4SLinus Torvalds continue; 915991528d7SVenkatesh Pallipadi } 916991528d7SVenkatesh Pallipadi } 9171da177e4SLinus Torvalds 91850dd0969SJan Engelhardt obj = &(element->package.elements[2]); 9191da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 9201da177e4SLinus Torvalds continue; 9211da177e4SLinus Torvalds 9221da177e4SLinus Torvalds cx.latency = obj->integer.value; 9231da177e4SLinus Torvalds 92450dd0969SJan Engelhardt obj = &(element->package.elements[3]); 9251da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 9261da177e4SLinus Torvalds continue; 9271da177e4SLinus Torvalds 9281da177e4SLinus Torvalds cx.power = obj->integer.value; 9291da177e4SLinus Torvalds 930cf824788SJanosch Machowinski current_count++; 931cf824788SJanosch Machowinski memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 932cf824788SJanosch Machowinski 933cf824788SJanosch Machowinski /* 934cf824788SJanosch Machowinski * We support total ACPI_PROCESSOR_MAX_POWER - 1 935cf824788SJanosch Machowinski * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 936cf824788SJanosch Machowinski */ 937cf824788SJanosch Machowinski if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 938cf824788SJanosch Machowinski printk(KERN_WARNING 939cf824788SJanosch Machowinski "Limiting number of power states to max (%d)\n", 940cf824788SJanosch Machowinski ACPI_PROCESSOR_MAX_POWER); 941cf824788SJanosch Machowinski printk(KERN_WARNING 942cf824788SJanosch Machowinski "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 943cf824788SJanosch Machowinski break; 944cf824788SJanosch Machowinski } 9451da177e4SLinus Torvalds } 9461da177e4SLinus Torvalds 9474be44fcdSLen Brown ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 948cf824788SJanosch Machowinski current_count)); 9491da177e4SLinus Torvalds 9501da177e4SLinus Torvalds /* Validate number of power states discovered */ 951cf824788SJanosch Machowinski if (current_count < 2) 9526d93c648SVenkatesh Pallipadi status = -EFAULT; 9531da177e4SLinus Torvalds 9541da177e4SLinus Torvalds end: 95502438d87SLen Brown kfree(buffer.pointer); 9561da177e4SLinus Torvalds 957d550d98dSPatrick Mochel return status; 9581da177e4SLinus Torvalds } 9591da177e4SLinus Torvalds 9601da177e4SLinus Torvalds static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) 9611da177e4SLinus Torvalds { 9621da177e4SLinus Torvalds 9631da177e4SLinus Torvalds if (!cx->address) 964d550d98dSPatrick Mochel return; 9651da177e4SLinus Torvalds 9661da177e4SLinus Torvalds /* 9671da177e4SLinus Torvalds * C2 latency must be less than or equal to 100 9681da177e4SLinus Torvalds * microseconds. 9691da177e4SLinus Torvalds */ 9701da177e4SLinus Torvalds else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 9711da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 9724be44fcdSLen Brown "latency too large [%d]\n", cx->latency)); 973d550d98dSPatrick Mochel return; 9741da177e4SLinus Torvalds } 9751da177e4SLinus Torvalds 9761da177e4SLinus Torvalds /* 9771da177e4SLinus Torvalds * Otherwise we've met all of our C2 requirements. 9781da177e4SLinus Torvalds * Normalize the C2 latency to expidite policy 9791da177e4SLinus Torvalds */ 9801da177e4SLinus Torvalds cx->valid = 1; 9814f86d3a8SLen Brown 9824f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 9831da177e4SLinus Torvalds cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 9844f86d3a8SLen Brown #else 9854f86d3a8SLen Brown cx->latency_ticks = cx->latency; 9864f86d3a8SLen Brown #endif 9871da177e4SLinus Torvalds 988d550d98dSPatrick Mochel return; 9891da177e4SLinus Torvalds } 9901da177e4SLinus Torvalds 9914be44fcdSLen Brown static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 9921da177e4SLinus Torvalds struct acpi_processor_cx *cx) 9931da177e4SLinus Torvalds { 99402df8b93SVenkatesh Pallipadi static int bm_check_flag; 99502df8b93SVenkatesh Pallipadi 9961da177e4SLinus Torvalds 9971da177e4SLinus Torvalds if (!cx->address) 998d550d98dSPatrick Mochel return; 9991da177e4SLinus Torvalds 10001da177e4SLinus Torvalds /* 10011da177e4SLinus Torvalds * C3 latency must be less than or equal to 1000 10021da177e4SLinus Torvalds * microseconds. 10031da177e4SLinus Torvalds */ 10041da177e4SLinus Torvalds else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 10051da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 10064be44fcdSLen Brown "latency too large [%d]\n", cx->latency)); 1007d550d98dSPatrick Mochel return; 10081da177e4SLinus Torvalds } 10091da177e4SLinus Torvalds 10101da177e4SLinus Torvalds /* 10111da177e4SLinus Torvalds * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 10121da177e4SLinus Torvalds * DMA transfers are used by any ISA device to avoid livelock. 10131da177e4SLinus Torvalds * Note that we could disable Type-F DMA (as recommended by 10141da177e4SLinus Torvalds * the erratum), but this is known to disrupt certain ISA 10151da177e4SLinus Torvalds * devices thus we take the conservative approach. 10161da177e4SLinus Torvalds */ 10171da177e4SLinus Torvalds else if (errata.piix4.fdma) { 10181da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 10191da177e4SLinus Torvalds "C3 not supported on PIIX4 with Type-F DMA\n")); 1020d550d98dSPatrick Mochel return; 10211da177e4SLinus Torvalds } 10221da177e4SLinus Torvalds 102302df8b93SVenkatesh Pallipadi /* All the logic here assumes flags.bm_check is same across all CPUs */ 102402df8b93SVenkatesh Pallipadi if (!bm_check_flag) { 102502df8b93SVenkatesh Pallipadi /* Determine whether bm_check is needed based on CPU */ 102602df8b93SVenkatesh Pallipadi acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 102702df8b93SVenkatesh Pallipadi bm_check_flag = pr->flags.bm_check; 102802df8b93SVenkatesh Pallipadi } else { 102902df8b93SVenkatesh Pallipadi pr->flags.bm_check = bm_check_flag; 103002df8b93SVenkatesh Pallipadi } 103102df8b93SVenkatesh Pallipadi 103202df8b93SVenkatesh Pallipadi if (pr->flags.bm_check) { 103302df8b93SVenkatesh Pallipadi if (!pr->flags.bm_control) { 1034ed3110efSVenki Pallipadi if (pr->flags.has_cst != 1) { 1035ed3110efSVenki Pallipadi /* bus mastering control is necessary */ 103602df8b93SVenkatesh Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1037ed3110efSVenki Pallipadi "C3 support requires BM control\n")); 1038ed3110efSVenki Pallipadi return; 1039ed3110efSVenki Pallipadi } else { 1040ed3110efSVenki Pallipadi /* Here we enter C3 without bus mastering */ 1041ed3110efSVenki Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1042ed3110efSVenki Pallipadi "C3 support without BM control\n")); 1043ed3110efSVenki Pallipadi } 104402df8b93SVenkatesh Pallipadi } 104502df8b93SVenkatesh Pallipadi } else { 104602df8b93SVenkatesh Pallipadi /* 104702df8b93SVenkatesh Pallipadi * WBINVD should be set in fadt, for C3 state to be 104802df8b93SVenkatesh Pallipadi * supported on when bm_check is not required. 104902df8b93SVenkatesh Pallipadi */ 1050cee324b1SAlexey Starikovskiy if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 105102df8b93SVenkatesh Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 105202df8b93SVenkatesh Pallipadi "Cache invalidation should work properly" 105302df8b93SVenkatesh Pallipadi " for C3 to be enabled on SMP systems\n")); 1054d550d98dSPatrick Mochel return; 105502df8b93SVenkatesh Pallipadi } 1056d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 105702df8b93SVenkatesh Pallipadi } 105802df8b93SVenkatesh Pallipadi 10591da177e4SLinus Torvalds /* 10601da177e4SLinus Torvalds * Otherwise we've met all of our C3 requirements. 10611da177e4SLinus Torvalds * Normalize the C3 latency to expidite policy. Enable 10621da177e4SLinus Torvalds * checking of bus mastering status (bm_check) so we can 10631da177e4SLinus Torvalds * use this in our C3 policy 10641da177e4SLinus Torvalds */ 10651da177e4SLinus Torvalds cx->valid = 1; 10664f86d3a8SLen Brown 10674f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 10681da177e4SLinus Torvalds cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 10694f86d3a8SLen Brown #else 10704f86d3a8SLen Brown cx->latency_ticks = cx->latency; 10714f86d3a8SLen Brown #endif 10721da177e4SLinus Torvalds 1073d550d98dSPatrick Mochel return; 10741da177e4SLinus Torvalds } 10751da177e4SLinus Torvalds 10761da177e4SLinus Torvalds static int acpi_processor_power_verify(struct acpi_processor *pr) 10771da177e4SLinus Torvalds { 10781da177e4SLinus Torvalds unsigned int i; 10791da177e4SLinus Torvalds unsigned int working = 0; 10806eb0a0fdSVenkatesh Pallipadi 1081169a0abbSThomas Gleixner pr->power.timer_broadcast_on_state = INT_MAX; 10826eb0a0fdSVenkatesh Pallipadi 10831da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 10841da177e4SLinus Torvalds struct acpi_processor_cx *cx = &pr->power.states[i]; 10851da177e4SLinus Torvalds 10861da177e4SLinus Torvalds switch (cx->type) { 10871da177e4SLinus Torvalds case ACPI_STATE_C1: 10881da177e4SLinus Torvalds cx->valid = 1; 10891da177e4SLinus Torvalds break; 10901da177e4SLinus Torvalds 10911da177e4SLinus Torvalds case ACPI_STATE_C2: 10921da177e4SLinus Torvalds acpi_processor_power_verify_c2(cx); 1093296d93cdSLinus Torvalds if (cx->valid) 1094169a0abbSThomas Gleixner acpi_timer_check_state(i, pr, cx); 10951da177e4SLinus Torvalds break; 10961da177e4SLinus Torvalds 10971da177e4SLinus Torvalds case ACPI_STATE_C3: 10981da177e4SLinus Torvalds acpi_processor_power_verify_c3(pr, cx); 1099296d93cdSLinus Torvalds if (cx->valid) 1100169a0abbSThomas Gleixner acpi_timer_check_state(i, pr, cx); 11011da177e4SLinus Torvalds break; 11021da177e4SLinus Torvalds } 11031da177e4SLinus Torvalds 11041da177e4SLinus Torvalds if (cx->valid) 11051da177e4SLinus Torvalds working++; 11061da177e4SLinus Torvalds } 11071da177e4SLinus Torvalds 1108169a0abbSThomas Gleixner acpi_propagate_timer_broadcast(pr); 1109bd663347SAndi Kleen 11101da177e4SLinus Torvalds return (working); 11111da177e4SLinus Torvalds } 11121da177e4SLinus Torvalds 11134be44fcdSLen Brown static int acpi_processor_get_power_info(struct acpi_processor *pr) 11141da177e4SLinus Torvalds { 11151da177e4SLinus Torvalds unsigned int i; 11161da177e4SLinus Torvalds int result; 11171da177e4SLinus Torvalds 11181da177e4SLinus Torvalds 11191da177e4SLinus Torvalds /* NOTE: the idle thread may not be running while calling 11201da177e4SLinus Torvalds * this function */ 11211da177e4SLinus Torvalds 1122991528d7SVenkatesh Pallipadi /* Zero initialize all the C-states info. */ 1123991528d7SVenkatesh Pallipadi memset(pr->power.states, 0, sizeof(pr->power.states)); 1124991528d7SVenkatesh Pallipadi 11251da177e4SLinus Torvalds result = acpi_processor_get_power_info_cst(pr); 11266d93c648SVenkatesh Pallipadi if (result == -ENODEV) 1127c5a114f1SDarrick J. Wong result = acpi_processor_get_power_info_fadt(pr); 11286d93c648SVenkatesh Pallipadi 1129991528d7SVenkatesh Pallipadi if (result) 1130991528d7SVenkatesh Pallipadi return result; 1131991528d7SVenkatesh Pallipadi 1132991528d7SVenkatesh Pallipadi acpi_processor_get_power_info_default(pr); 1133991528d7SVenkatesh Pallipadi 1134cf824788SJanosch Machowinski pr->power.count = acpi_processor_power_verify(pr); 11351da177e4SLinus Torvalds 11364f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 11371da177e4SLinus Torvalds /* 11381da177e4SLinus Torvalds * Set Default Policy 11391da177e4SLinus Torvalds * ------------------ 11401da177e4SLinus Torvalds * Now that we know which states are supported, set the default 11411da177e4SLinus Torvalds * policy. Note that this policy can be changed dynamically 11421da177e4SLinus Torvalds * (e.g. encourage deeper sleeps to conserve battery life when 11431da177e4SLinus Torvalds * not on AC). 11441da177e4SLinus Torvalds */ 11451da177e4SLinus Torvalds result = acpi_processor_set_power_policy(pr); 11461da177e4SLinus Torvalds if (result) 1147d550d98dSPatrick Mochel return result; 11484f86d3a8SLen Brown #endif 11491da177e4SLinus Torvalds 11501da177e4SLinus Torvalds /* 11511da177e4SLinus Torvalds * if one state of type C2 or C3 is available, mark this 11521da177e4SLinus Torvalds * CPU as being "idle manageable" 11531da177e4SLinus Torvalds */ 11541da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 1155acf05f4bSVenkatesh Pallipadi if (pr->power.states[i].valid) { 11561da177e4SLinus Torvalds pr->power.count = i; 11572203d6edSLinus Torvalds if (pr->power.states[i].type >= ACPI_STATE_C2) 11581da177e4SLinus Torvalds pr->flags.power = 1; 11591da177e4SLinus Torvalds } 1160acf05f4bSVenkatesh Pallipadi } 11611da177e4SLinus Torvalds 1162d550d98dSPatrick Mochel return 0; 11631da177e4SLinus Torvalds } 11641da177e4SLinus Torvalds 11651da177e4SLinus Torvalds static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) 11661da177e4SLinus Torvalds { 116750dd0969SJan Engelhardt struct acpi_processor *pr = seq->private; 11681da177e4SLinus Torvalds unsigned int i; 11691da177e4SLinus Torvalds 11701da177e4SLinus Torvalds 11711da177e4SLinus Torvalds if (!pr) 11721da177e4SLinus Torvalds goto end; 11731da177e4SLinus Torvalds 11741da177e4SLinus Torvalds seq_printf(seq, "active state: C%zd\n" 11751da177e4SLinus Torvalds "max_cstate: C%d\n" 11765c87579eSArjan van de Ven "bus master activity: %08x\n" 11775c87579eSArjan van de Ven "maximum allowed latency: %d usec\n", 11781da177e4SLinus Torvalds pr->power.state ? pr->power.state - pr->power.states : 0, 11795c87579eSArjan van de Ven max_cstate, (unsigned)pr->power.bm_activity, 11805c87579eSArjan van de Ven system_latency_constraint()); 11811da177e4SLinus Torvalds 11821da177e4SLinus Torvalds seq_puts(seq, "states:\n"); 11831da177e4SLinus Torvalds 11841da177e4SLinus Torvalds for (i = 1; i <= pr->power.count; i++) { 11851da177e4SLinus Torvalds seq_printf(seq, " %cC%d: ", 11864be44fcdSLen Brown (&pr->power.states[i] == 11874be44fcdSLen Brown pr->power.state ? '*' : ' '), i); 11881da177e4SLinus Torvalds 11891da177e4SLinus Torvalds if (!pr->power.states[i].valid) { 11901da177e4SLinus Torvalds seq_puts(seq, "<not supported>\n"); 11911da177e4SLinus Torvalds continue; 11921da177e4SLinus Torvalds } 11931da177e4SLinus Torvalds 11941da177e4SLinus Torvalds switch (pr->power.states[i].type) { 11951da177e4SLinus Torvalds case ACPI_STATE_C1: 11961da177e4SLinus Torvalds seq_printf(seq, "type[C1] "); 11971da177e4SLinus Torvalds break; 11981da177e4SLinus Torvalds case ACPI_STATE_C2: 11991da177e4SLinus Torvalds seq_printf(seq, "type[C2] "); 12001da177e4SLinus Torvalds break; 12011da177e4SLinus Torvalds case ACPI_STATE_C3: 12021da177e4SLinus Torvalds seq_printf(seq, "type[C3] "); 12031da177e4SLinus Torvalds break; 12041da177e4SLinus Torvalds default: 12051da177e4SLinus Torvalds seq_printf(seq, "type[--] "); 12061da177e4SLinus Torvalds break; 12071da177e4SLinus Torvalds } 12081da177e4SLinus Torvalds 12091da177e4SLinus Torvalds if (pr->power.states[i].promotion.state) 12101da177e4SLinus Torvalds seq_printf(seq, "promotion[C%zd] ", 12111da177e4SLinus Torvalds (pr->power.states[i].promotion.state - 12121da177e4SLinus Torvalds pr->power.states)); 12131da177e4SLinus Torvalds else 12141da177e4SLinus Torvalds seq_puts(seq, "promotion[--] "); 12151da177e4SLinus Torvalds 12161da177e4SLinus Torvalds if (pr->power.states[i].demotion.state) 12171da177e4SLinus Torvalds seq_printf(seq, "demotion[C%zd] ", 12181da177e4SLinus Torvalds (pr->power.states[i].demotion.state - 12191da177e4SLinus Torvalds pr->power.states)); 12201da177e4SLinus Torvalds else 12211da177e4SLinus Torvalds seq_puts(seq, "demotion[--] "); 12221da177e4SLinus Torvalds 1223a3c6598fSDominik Brodowski seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 12241da177e4SLinus Torvalds pr->power.states[i].latency, 1225a3c6598fSDominik Brodowski pr->power.states[i].usage, 1226b0b7eaafSAlexey Starikovskiy (unsigned long long)pr->power.states[i].time); 12271da177e4SLinus Torvalds } 12281da177e4SLinus Torvalds 12291da177e4SLinus Torvalds end: 1230d550d98dSPatrick Mochel return 0; 12311da177e4SLinus Torvalds } 12321da177e4SLinus Torvalds 12331da177e4SLinus Torvalds static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) 12341da177e4SLinus Torvalds { 12351da177e4SLinus Torvalds return single_open(file, acpi_processor_power_seq_show, 12361da177e4SLinus Torvalds PDE(inode)->data); 12371da177e4SLinus Torvalds } 12381da177e4SLinus Torvalds 1239d7508032SArjan van de Ven static const struct file_operations acpi_processor_power_fops = { 12401da177e4SLinus Torvalds .open = acpi_processor_power_open_fs, 12411da177e4SLinus Torvalds .read = seq_read, 12421da177e4SLinus Torvalds .llseek = seq_lseek, 12431da177e4SLinus Torvalds .release = single_release, 12441da177e4SLinus Torvalds }; 12451da177e4SLinus Torvalds 12464f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 12474f86d3a8SLen Brown 12484f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr) 12494f86d3a8SLen Brown { 12504f86d3a8SLen Brown int result = 0; 12514f86d3a8SLen Brown 12524f86d3a8SLen Brown 12534f86d3a8SLen Brown if (!pr) 12544f86d3a8SLen Brown return -EINVAL; 12554f86d3a8SLen Brown 12564f86d3a8SLen Brown if (nocst) { 12574f86d3a8SLen Brown return -ENODEV; 12584f86d3a8SLen Brown } 12594f86d3a8SLen Brown 12604f86d3a8SLen Brown if (!pr->flags.power_setup_done) 12614f86d3a8SLen Brown return -ENODEV; 12624f86d3a8SLen Brown 12634f86d3a8SLen Brown /* Fall back to the default idle loop */ 12644f86d3a8SLen Brown pm_idle = pm_idle_save; 12654f86d3a8SLen Brown synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ 12664f86d3a8SLen Brown 12674f86d3a8SLen Brown pr->flags.power = 0; 12684f86d3a8SLen Brown result = acpi_processor_get_power_info(pr); 12694f86d3a8SLen Brown if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) 12704f86d3a8SLen Brown pm_idle = acpi_processor_idle; 12714f86d3a8SLen Brown 12724f86d3a8SLen Brown return result; 12734f86d3a8SLen Brown } 12744f86d3a8SLen Brown 12751fec74a9SAndrew Morton #ifdef CONFIG_SMP 12765c87579eSArjan van de Ven static void smp_callback(void *v) 12775c87579eSArjan van de Ven { 12785c87579eSArjan van de Ven /* we already woke the CPU up, nothing more to do */ 12795c87579eSArjan van de Ven } 12805c87579eSArjan van de Ven 12815c87579eSArjan van de Ven /* 12825c87579eSArjan van de Ven * This function gets called when a part of the kernel has a new latency 12835c87579eSArjan van de Ven * requirement. This means we need to get all processors out of their C-state, 12845c87579eSArjan van de Ven * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 12855c87579eSArjan van de Ven * wakes them all right up. 12865c87579eSArjan van de Ven */ 12875c87579eSArjan van de Ven static int acpi_processor_latency_notify(struct notifier_block *b, 12885c87579eSArjan van de Ven unsigned long l, void *v) 12895c87579eSArjan van de Ven { 12905c87579eSArjan van de Ven smp_call_function(smp_callback, NULL, 0, 1); 12915c87579eSArjan van de Ven return NOTIFY_OK; 12925c87579eSArjan van de Ven } 12935c87579eSArjan van de Ven 12945c87579eSArjan van de Ven static struct notifier_block acpi_processor_latency_notifier = { 12955c87579eSArjan van de Ven .notifier_call = acpi_processor_latency_notify, 12965c87579eSArjan van de Ven }; 12974f86d3a8SLen Brown 12981fec74a9SAndrew Morton #endif 12995c87579eSArjan van de Ven 13004f86d3a8SLen Brown #else /* CONFIG_CPU_IDLE */ 13014f86d3a8SLen Brown 13024f86d3a8SLen Brown /** 13034f86d3a8SLen Brown * acpi_idle_bm_check - checks if bus master activity was detected 13044f86d3a8SLen Brown */ 13054f86d3a8SLen Brown static int acpi_idle_bm_check(void) 13064f86d3a8SLen Brown { 13074f86d3a8SLen Brown u32 bm_status = 0; 13084f86d3a8SLen Brown 13094f86d3a8SLen Brown acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 13104f86d3a8SLen Brown if (bm_status) 13114f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 13124f86d3a8SLen Brown /* 13134f86d3a8SLen Brown * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 13144f86d3a8SLen Brown * the true state of bus mastering activity; forcing us to 13154f86d3a8SLen Brown * manually check the BMIDEA bit of each IDE channel. 13164f86d3a8SLen Brown */ 13174f86d3a8SLen Brown else if (errata.piix4.bmisx) { 13184f86d3a8SLen Brown if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 13194f86d3a8SLen Brown || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 13204f86d3a8SLen Brown bm_status = 1; 13214f86d3a8SLen Brown } 13224f86d3a8SLen Brown return bm_status; 13234f86d3a8SLen Brown } 13244f86d3a8SLen Brown 13254f86d3a8SLen Brown /** 13264f86d3a8SLen Brown * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state 13274f86d3a8SLen Brown * @pr: the processor 13284f86d3a8SLen Brown * @target: the new target state 13294f86d3a8SLen Brown */ 13304f86d3a8SLen Brown static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, 13314f86d3a8SLen Brown struct acpi_processor_cx *target) 13324f86d3a8SLen Brown { 13334f86d3a8SLen Brown if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { 13344f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 13354f86d3a8SLen Brown pr->flags.bm_rld_set = 0; 13364f86d3a8SLen Brown } 13374f86d3a8SLen Brown 13384f86d3a8SLen Brown if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { 13394f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 13404f86d3a8SLen Brown pr->flags.bm_rld_set = 1; 13414f86d3a8SLen Brown } 13424f86d3a8SLen Brown } 13434f86d3a8SLen Brown 13444f86d3a8SLen Brown /** 13454f86d3a8SLen Brown * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 13464f86d3a8SLen Brown * @cx: cstate data 13474f86d3a8SLen Brown */ 13484f86d3a8SLen Brown static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 13494f86d3a8SLen Brown { 13504f86d3a8SLen Brown if (cx->space_id == ACPI_CSTATE_FFH) { 13514f86d3a8SLen Brown /* Call into architectural FFH based C-state */ 13524f86d3a8SLen Brown acpi_processor_ffh_cstate_enter(cx); 13534f86d3a8SLen Brown } else { 13544f86d3a8SLen Brown int unused; 13554f86d3a8SLen Brown /* IO port based C-state */ 13564f86d3a8SLen Brown inb(cx->address); 13574f86d3a8SLen Brown /* Dummy wait op - must do something useless after P_LVL2 read 13584f86d3a8SLen Brown because chipsets cannot guarantee that STPCLK# signal 13594f86d3a8SLen Brown gets asserted in time to freeze execution properly. */ 13604f86d3a8SLen Brown unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 13614f86d3a8SLen Brown } 13624f86d3a8SLen Brown } 13634f86d3a8SLen Brown 13644f86d3a8SLen Brown /** 13654f86d3a8SLen Brown * acpi_idle_enter_c1 - enters an ACPI C1 state-type 13664f86d3a8SLen Brown * @dev: the target CPU 13674f86d3a8SLen Brown * @state: the state data 13684f86d3a8SLen Brown * 13694f86d3a8SLen Brown * This is equivalent to the HALT instruction. 13704f86d3a8SLen Brown */ 13714f86d3a8SLen Brown static int acpi_idle_enter_c1(struct cpuidle_device *dev, 13724f86d3a8SLen Brown struct cpuidle_state *state) 13734f86d3a8SLen Brown { 13744f86d3a8SLen Brown struct acpi_processor *pr; 13754f86d3a8SLen Brown struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 13764f86d3a8SLen Brown pr = processors[smp_processor_id()]; 13774f86d3a8SLen Brown 13784f86d3a8SLen Brown if (unlikely(!pr)) 13794f86d3a8SLen Brown return 0; 13804f86d3a8SLen Brown 13814f86d3a8SLen Brown if (pr->flags.bm_check) 13824f86d3a8SLen Brown acpi_idle_update_bm_rld(pr, cx); 13834f86d3a8SLen Brown 1384ddc081a1SVenkatesh Pallipadi acpi_safe_halt(); 13854f86d3a8SLen Brown 13864f86d3a8SLen Brown cx->usage++; 13874f86d3a8SLen Brown 13884f86d3a8SLen Brown return 0; 13894f86d3a8SLen Brown } 13904f86d3a8SLen Brown 13914f86d3a8SLen Brown /** 13924f86d3a8SLen Brown * acpi_idle_enter_simple - enters an ACPI state without BM handling 13934f86d3a8SLen Brown * @dev: the target CPU 13944f86d3a8SLen Brown * @state: the state data 13954f86d3a8SLen Brown */ 13964f86d3a8SLen Brown static int acpi_idle_enter_simple(struct cpuidle_device *dev, 13974f86d3a8SLen Brown struct cpuidle_state *state) 13984f86d3a8SLen Brown { 13994f86d3a8SLen Brown struct acpi_processor *pr; 14004f86d3a8SLen Brown struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 14014f86d3a8SLen Brown u32 t1, t2; 140250629118SVenkatesh Pallipadi int sleep_ticks = 0; 140350629118SVenkatesh Pallipadi 14044f86d3a8SLen Brown pr = processors[smp_processor_id()]; 14054f86d3a8SLen Brown 14064f86d3a8SLen Brown if (unlikely(!pr)) 14074f86d3a8SLen Brown return 0; 14084f86d3a8SLen Brown 1409e196441bSLen Brown if (acpi_idle_suspend) 1410e196441bSLen Brown return(acpi_idle_enter_c1(dev, state)); 1411e196441bSLen Brown 14124f86d3a8SLen Brown local_irq_disable(); 14134f86d3a8SLen Brown current_thread_info()->status &= ~TS_POLLING; 14144f86d3a8SLen Brown /* 14154f86d3a8SLen Brown * TS_POLLING-cleared state must be visible before we test 14164f86d3a8SLen Brown * NEED_RESCHED: 14174f86d3a8SLen Brown */ 14184f86d3a8SLen Brown smp_mb(); 14194f86d3a8SLen Brown 14204f86d3a8SLen Brown if (unlikely(need_resched())) { 14214f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 14224f86d3a8SLen Brown local_irq_enable(); 14234f86d3a8SLen Brown return 0; 14244f86d3a8SLen Brown } 14254f86d3a8SLen Brown 1426e17bcb43SThomas Gleixner /* 1427e17bcb43SThomas Gleixner * Must be done before busmaster disable as we might need to 1428e17bcb43SThomas Gleixner * access HPET ! 1429e17bcb43SThomas Gleixner */ 1430e17bcb43SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 1); 1431e17bcb43SThomas Gleixner 1432e17bcb43SThomas Gleixner if (pr->flags.bm_check) 1433e17bcb43SThomas Gleixner acpi_idle_update_bm_rld(pr, cx); 1434e17bcb43SThomas Gleixner 14354f86d3a8SLen Brown if (cx->type == ACPI_STATE_C3) 14364f86d3a8SLen Brown ACPI_FLUSH_CPU_CACHE(); 14374f86d3a8SLen Brown 14384f86d3a8SLen Brown t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 143950629118SVenkatesh Pallipadi /* Tell the scheduler that we are going deep-idle: */ 144050629118SVenkatesh Pallipadi sched_clock_idle_sleep_event(); 14414f86d3a8SLen Brown acpi_idle_do_entry(cx); 14424f86d3a8SLen Brown t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 14434f86d3a8SLen Brown 14444f86d3a8SLen Brown #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 14454f86d3a8SLen Brown /* TSC could halt in idle, so notify users */ 14464f86d3a8SLen Brown mark_tsc_unstable("TSC halts in idle");; 14474f86d3a8SLen Brown #endif 144850629118SVenkatesh Pallipadi sleep_ticks = ticks_elapsed(t1, t2); 144950629118SVenkatesh Pallipadi 145050629118SVenkatesh Pallipadi /* Tell the scheduler how much we idled: */ 145150629118SVenkatesh Pallipadi sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 14524f86d3a8SLen Brown 14534f86d3a8SLen Brown local_irq_enable(); 14544f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 14554f86d3a8SLen Brown 14564f86d3a8SLen Brown cx->usage++; 14574f86d3a8SLen Brown 14584f86d3a8SLen Brown acpi_state_timer_broadcast(pr, cx, 0); 145950629118SVenkatesh Pallipadi cx->time += sleep_ticks; 14604f86d3a8SLen Brown return ticks_elapsed_in_us(t1, t2); 14614f86d3a8SLen Brown } 14624f86d3a8SLen Brown 14634f86d3a8SLen Brown static int c3_cpu_count; 14644f86d3a8SLen Brown static DEFINE_SPINLOCK(c3_lock); 14654f86d3a8SLen Brown 14664f86d3a8SLen Brown /** 14674f86d3a8SLen Brown * acpi_idle_enter_bm - enters C3 with proper BM handling 14684f86d3a8SLen Brown * @dev: the target CPU 14694f86d3a8SLen Brown * @state: the state data 14704f86d3a8SLen Brown * 14714f86d3a8SLen Brown * If BM is detected, the deepest non-C3 idle state is entered instead. 14724f86d3a8SLen Brown */ 14734f86d3a8SLen Brown static int acpi_idle_enter_bm(struct cpuidle_device *dev, 14744f86d3a8SLen Brown struct cpuidle_state *state) 14754f86d3a8SLen Brown { 14764f86d3a8SLen Brown struct acpi_processor *pr; 14774f86d3a8SLen Brown struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 14784f86d3a8SLen Brown u32 t1, t2; 147950629118SVenkatesh Pallipadi int sleep_ticks = 0; 148050629118SVenkatesh Pallipadi 14814f86d3a8SLen Brown pr = processors[smp_processor_id()]; 14824f86d3a8SLen Brown 14834f86d3a8SLen Brown if (unlikely(!pr)) 14844f86d3a8SLen Brown return 0; 14854f86d3a8SLen Brown 1486e196441bSLen Brown if (acpi_idle_suspend) 1487e196441bSLen Brown return(acpi_idle_enter_c1(dev, state)); 1488e196441bSLen Brown 1489ddc081a1SVenkatesh Pallipadi if (acpi_idle_bm_check()) { 1490ddc081a1SVenkatesh Pallipadi if (dev->safe_state) { 1491ddc081a1SVenkatesh Pallipadi return dev->safe_state->enter(dev, dev->safe_state); 1492ddc081a1SVenkatesh Pallipadi } else { 1493ddc081a1SVenkatesh Pallipadi acpi_safe_halt(); 1494ddc081a1SVenkatesh Pallipadi return 0; 1495ddc081a1SVenkatesh Pallipadi } 1496ddc081a1SVenkatesh Pallipadi } 1497ddc081a1SVenkatesh Pallipadi 14984f86d3a8SLen Brown local_irq_disable(); 14994f86d3a8SLen Brown current_thread_info()->status &= ~TS_POLLING; 15004f86d3a8SLen Brown /* 15014f86d3a8SLen Brown * TS_POLLING-cleared state must be visible before we test 15024f86d3a8SLen Brown * NEED_RESCHED: 15034f86d3a8SLen Brown */ 15044f86d3a8SLen Brown smp_mb(); 15054f86d3a8SLen Brown 15064f86d3a8SLen Brown if (unlikely(need_resched())) { 15074f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 15084f86d3a8SLen Brown local_irq_enable(); 15094f86d3a8SLen Brown return 0; 15104f86d3a8SLen Brown } 15114f86d3a8SLen Brown 151250629118SVenkatesh Pallipadi /* Tell the scheduler that we are going deep-idle: */ 151350629118SVenkatesh Pallipadi sched_clock_idle_sleep_event(); 15144f86d3a8SLen Brown /* 15154f86d3a8SLen Brown * Must be done before busmaster disable as we might need to 15164f86d3a8SLen Brown * access HPET ! 15174f86d3a8SLen Brown */ 15184f86d3a8SLen Brown acpi_state_timer_broadcast(pr, cx, 1); 15194f86d3a8SLen Brown 15204f86d3a8SLen Brown acpi_idle_update_bm_rld(pr, cx); 15214f86d3a8SLen Brown 1522c9c860e5SVenkatesh Pallipadi /* 1523c9c860e5SVenkatesh Pallipadi * disable bus master 1524c9c860e5SVenkatesh Pallipadi * bm_check implies we need ARB_DIS 1525c9c860e5SVenkatesh Pallipadi * !bm_check implies we need cache flush 1526c9c860e5SVenkatesh Pallipadi * bm_control implies whether we can do ARB_DIS 1527c9c860e5SVenkatesh Pallipadi * 1528c9c860e5SVenkatesh Pallipadi * That leaves a case where bm_check is set and bm_control is 1529c9c860e5SVenkatesh Pallipadi * not set. In that case we cannot do much, we enter C3 1530c9c860e5SVenkatesh Pallipadi * without doing anything. 1531c9c860e5SVenkatesh Pallipadi */ 1532c9c860e5SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 15334f86d3a8SLen Brown spin_lock(&c3_lock); 15344f86d3a8SLen Brown c3_cpu_count++; 15354f86d3a8SLen Brown /* Disable bus master arbitration when all CPUs are in C3 */ 15364f86d3a8SLen Brown if (c3_cpu_count == num_online_cpus()) 15374f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 15384f86d3a8SLen Brown spin_unlock(&c3_lock); 1539c9c860e5SVenkatesh Pallipadi } else if (!pr->flags.bm_check) { 1540c9c860e5SVenkatesh Pallipadi ACPI_FLUSH_CPU_CACHE(); 1541c9c860e5SVenkatesh Pallipadi } 15424f86d3a8SLen Brown 15434f86d3a8SLen Brown t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 15444f86d3a8SLen Brown acpi_idle_do_entry(cx); 15454f86d3a8SLen Brown t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 15464f86d3a8SLen Brown 15474f86d3a8SLen Brown /* Re-enable bus master arbitration */ 1548c9c860e5SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 1549c9c860e5SVenkatesh Pallipadi spin_lock(&c3_lock); 15504f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); 15514f86d3a8SLen Brown c3_cpu_count--; 15524f86d3a8SLen Brown spin_unlock(&c3_lock); 15534f86d3a8SLen Brown } 15544f86d3a8SLen Brown 15554f86d3a8SLen Brown #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 15564f86d3a8SLen Brown /* TSC could halt in idle, so notify users */ 15574f86d3a8SLen Brown mark_tsc_unstable("TSC halts in idle"); 15584f86d3a8SLen Brown #endif 155950629118SVenkatesh Pallipadi sleep_ticks = ticks_elapsed(t1, t2); 156050629118SVenkatesh Pallipadi /* Tell the scheduler how much we idled: */ 156150629118SVenkatesh Pallipadi sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 15624f86d3a8SLen Brown 15634f86d3a8SLen Brown local_irq_enable(); 15644f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 15654f86d3a8SLen Brown 15664f86d3a8SLen Brown cx->usage++; 15674f86d3a8SLen Brown 15684f86d3a8SLen Brown acpi_state_timer_broadcast(pr, cx, 0); 156950629118SVenkatesh Pallipadi cx->time += sleep_ticks; 15704f86d3a8SLen Brown return ticks_elapsed_in_us(t1, t2); 15714f86d3a8SLen Brown } 15724f86d3a8SLen Brown 15734f86d3a8SLen Brown struct cpuidle_driver acpi_idle_driver = { 15744f86d3a8SLen Brown .name = "acpi_idle", 15754f86d3a8SLen Brown .owner = THIS_MODULE, 15764f86d3a8SLen Brown }; 15774f86d3a8SLen Brown 15784f86d3a8SLen Brown /** 15794f86d3a8SLen Brown * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE 15804f86d3a8SLen Brown * @pr: the ACPI processor 15814f86d3a8SLen Brown */ 15824f86d3a8SLen Brown static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) 15834f86d3a8SLen Brown { 15844f86d3a8SLen Brown int i, count = 0; 15854f86d3a8SLen Brown struct acpi_processor_cx *cx; 15864f86d3a8SLen Brown struct cpuidle_state *state; 15874f86d3a8SLen Brown struct cpuidle_device *dev = &pr->power.dev; 15884f86d3a8SLen Brown 15894f86d3a8SLen Brown if (!pr->flags.power_setup_done) 15904f86d3a8SLen Brown return -EINVAL; 15914f86d3a8SLen Brown 15924f86d3a8SLen Brown if (pr->flags.power == 0) { 15934f86d3a8SLen Brown return -EINVAL; 15944f86d3a8SLen Brown } 15954f86d3a8SLen Brown 15964f86d3a8SLen Brown for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 15974f86d3a8SLen Brown cx = &pr->power.states[i]; 15984f86d3a8SLen Brown state = &dev->states[count]; 15994f86d3a8SLen Brown 16004f86d3a8SLen Brown if (!cx->valid) 16014f86d3a8SLen Brown continue; 16024f86d3a8SLen Brown 16034f86d3a8SLen Brown #ifdef CONFIG_HOTPLUG_CPU 16044f86d3a8SLen Brown if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 16054f86d3a8SLen Brown !pr->flags.has_cst && 16064f86d3a8SLen Brown !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 16074f86d3a8SLen Brown continue; 16084f86d3a8SLen Brown #endif 16094f86d3a8SLen Brown cpuidle_set_statedata(state, cx); 16104f86d3a8SLen Brown 16114f86d3a8SLen Brown snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 16124f86d3a8SLen Brown state->exit_latency = cx->latency; 16134f86d3a8SLen Brown state->target_residency = cx->latency * 6; 16144f86d3a8SLen Brown state->power_usage = cx->power; 16154f86d3a8SLen Brown 16164f86d3a8SLen Brown state->flags = 0; 16174f86d3a8SLen Brown switch (cx->type) { 16184f86d3a8SLen Brown case ACPI_STATE_C1: 16194f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_SHALLOW; 16204f86d3a8SLen Brown state->enter = acpi_idle_enter_c1; 1621ddc081a1SVenkatesh Pallipadi dev->safe_state = state; 16224f86d3a8SLen Brown break; 16234f86d3a8SLen Brown 16244f86d3a8SLen Brown case ACPI_STATE_C2: 16254f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_BALANCED; 16264f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_TIME_VALID; 16274f86d3a8SLen Brown state->enter = acpi_idle_enter_simple; 1628ddc081a1SVenkatesh Pallipadi dev->safe_state = state; 16294f86d3a8SLen Brown break; 16304f86d3a8SLen Brown 16314f86d3a8SLen Brown case ACPI_STATE_C3: 16324f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_DEEP; 16334f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_TIME_VALID; 16344f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_CHECK_BM; 16354f86d3a8SLen Brown state->enter = pr->flags.bm_check ? 16364f86d3a8SLen Brown acpi_idle_enter_bm : 16374f86d3a8SLen Brown acpi_idle_enter_simple; 16384f86d3a8SLen Brown break; 16394f86d3a8SLen Brown } 16404f86d3a8SLen Brown 16414f86d3a8SLen Brown count++; 16424f86d3a8SLen Brown } 16434f86d3a8SLen Brown 16444f86d3a8SLen Brown dev->state_count = count; 16454f86d3a8SLen Brown 16464f86d3a8SLen Brown if (!count) 16474f86d3a8SLen Brown return -EINVAL; 16484f86d3a8SLen Brown 16494f86d3a8SLen Brown return 0; 16504f86d3a8SLen Brown } 16514f86d3a8SLen Brown 16524f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr) 16534f86d3a8SLen Brown { 16544f86d3a8SLen Brown int ret; 16554f86d3a8SLen Brown 16564f86d3a8SLen Brown if (!pr) 16574f86d3a8SLen Brown return -EINVAL; 16584f86d3a8SLen Brown 16594f86d3a8SLen Brown if (nocst) { 16604f86d3a8SLen Brown return -ENODEV; 16614f86d3a8SLen Brown } 16624f86d3a8SLen Brown 16634f86d3a8SLen Brown if (!pr->flags.power_setup_done) 16644f86d3a8SLen Brown return -ENODEV; 16654f86d3a8SLen Brown 16664f86d3a8SLen Brown cpuidle_pause_and_lock(); 16674f86d3a8SLen Brown cpuidle_disable_device(&pr->power.dev); 16684f86d3a8SLen Brown acpi_processor_get_power_info(pr); 16694f86d3a8SLen Brown acpi_processor_setup_cpuidle(pr); 16704f86d3a8SLen Brown ret = cpuidle_enable_device(&pr->power.dev); 16714f86d3a8SLen Brown cpuidle_resume_and_unlock(); 16724f86d3a8SLen Brown 16734f86d3a8SLen Brown return ret; 16744f86d3a8SLen Brown } 16754f86d3a8SLen Brown 16764f86d3a8SLen Brown #endif /* CONFIG_CPU_IDLE */ 16774f86d3a8SLen Brown 16787af8b660SPierre Ossman int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 16794be44fcdSLen Brown struct acpi_device *device) 16801da177e4SLinus Torvalds { 16811da177e4SLinus Torvalds acpi_status status = 0; 1682b6835052SAndreas Mohr static int first_run; 16831da177e4SLinus Torvalds struct proc_dir_entry *entry = NULL; 16841da177e4SLinus Torvalds unsigned int i; 16851da177e4SLinus Torvalds 16861da177e4SLinus Torvalds 16871da177e4SLinus Torvalds if (!first_run) { 16881da177e4SLinus Torvalds dmi_check_system(processor_power_dmi_table); 1689c1c30634SAlexey Starikovskiy max_cstate = acpi_processor_cstate_check(max_cstate); 16901da177e4SLinus Torvalds if (max_cstate < ACPI_C_STATES_MAX) 16914be44fcdSLen Brown printk(KERN_NOTICE 16924be44fcdSLen Brown "ACPI: processor limited to max C-state %d\n", 16934be44fcdSLen Brown max_cstate); 16941da177e4SLinus Torvalds first_run++; 16954f86d3a8SLen Brown #if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP) 16965c87579eSArjan van de Ven register_latency_notifier(&acpi_processor_latency_notifier); 16971fec74a9SAndrew Morton #endif 16981da177e4SLinus Torvalds } 16991da177e4SLinus Torvalds 170002df8b93SVenkatesh Pallipadi if (!pr) 1701d550d98dSPatrick Mochel return -EINVAL; 170202df8b93SVenkatesh Pallipadi 1703cee324b1SAlexey Starikovskiy if (acpi_gbl_FADT.cst_control && !nocst) { 17044be44fcdSLen Brown status = 1705cee324b1SAlexey Starikovskiy acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); 17061da177e4SLinus Torvalds if (ACPI_FAILURE(status)) { 1707a6fc6720SThomas Renninger ACPI_EXCEPTION((AE_INFO, status, 1708a6fc6720SThomas Renninger "Notifying BIOS of _CST ability failed")); 17091da177e4SLinus Torvalds } 17101da177e4SLinus Torvalds } 17111da177e4SLinus Torvalds 17121da177e4SLinus Torvalds acpi_processor_get_power_info(pr); 17134f86d3a8SLen Brown pr->flags.power_setup_done = 1; 17141da177e4SLinus Torvalds 17151da177e4SLinus Torvalds /* 17161da177e4SLinus Torvalds * Install the idle handler if processor power management is supported. 17171da177e4SLinus Torvalds * Note that we use previously set idle handler will be used on 17181da177e4SLinus Torvalds * platforms that only support C1. 17191da177e4SLinus Torvalds */ 17201da177e4SLinus Torvalds if ((pr->flags.power) && (!boot_option_idle_override)) { 17214f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE 17224f86d3a8SLen Brown acpi_processor_setup_cpuidle(pr); 17234f86d3a8SLen Brown pr->power.dev.cpu = pr->id; 17244f86d3a8SLen Brown if (cpuidle_register_device(&pr->power.dev)) 17254f86d3a8SLen Brown return -EIO; 17264f86d3a8SLen Brown #endif 17274f86d3a8SLen Brown 17281da177e4SLinus Torvalds printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); 17291da177e4SLinus Torvalds for (i = 1; i <= pr->power.count; i++) 17301da177e4SLinus Torvalds if (pr->power.states[i].valid) 17314be44fcdSLen Brown printk(" C%d[C%d]", i, 17324be44fcdSLen Brown pr->power.states[i].type); 17331da177e4SLinus Torvalds printk(")\n"); 17341da177e4SLinus Torvalds 17354f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 17361da177e4SLinus Torvalds if (pr->id == 0) { 17371da177e4SLinus Torvalds pm_idle_save = pm_idle; 17381da177e4SLinus Torvalds pm_idle = acpi_processor_idle; 17391da177e4SLinus Torvalds } 17404f86d3a8SLen Brown #endif 17411da177e4SLinus Torvalds } 17421da177e4SLinus Torvalds 17431da177e4SLinus Torvalds /* 'power' [R] */ 17441da177e4SLinus Torvalds entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, 17451da177e4SLinus Torvalds S_IRUGO, acpi_device_dir(device)); 17461da177e4SLinus Torvalds if (!entry) 1747a6fc6720SThomas Renninger return -EIO; 17481da177e4SLinus Torvalds else { 17491da177e4SLinus Torvalds entry->proc_fops = &acpi_processor_power_fops; 17501da177e4SLinus Torvalds entry->data = acpi_driver_data(device); 17511da177e4SLinus Torvalds entry->owner = THIS_MODULE; 17521da177e4SLinus Torvalds } 17531da177e4SLinus Torvalds 1754d550d98dSPatrick Mochel return 0; 17551da177e4SLinus Torvalds } 17561da177e4SLinus Torvalds 17574be44fcdSLen Brown int acpi_processor_power_exit(struct acpi_processor *pr, 17584be44fcdSLen Brown struct acpi_device *device) 17591da177e4SLinus Torvalds { 17604f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE 17614f86d3a8SLen Brown if ((pr->flags.power) && (!boot_option_idle_override)) 17624f86d3a8SLen Brown cpuidle_unregister_device(&pr->power.dev); 17634f86d3a8SLen Brown #endif 17641da177e4SLinus Torvalds pr->flags.power_setup_done = 0; 17651da177e4SLinus Torvalds 17661da177e4SLinus Torvalds if (acpi_device_dir(device)) 17674be44fcdSLen Brown remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, 17684be44fcdSLen Brown acpi_device_dir(device)); 17691da177e4SLinus Torvalds 17704f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 17714f86d3a8SLen Brown 17721da177e4SLinus Torvalds /* Unregister the idle handler when processor #0 is removed. */ 17731da177e4SLinus Torvalds if (pr->id == 0) { 17741da177e4SLinus Torvalds pm_idle = pm_idle_save; 17751da177e4SLinus Torvalds 17761da177e4SLinus Torvalds /* 17771da177e4SLinus Torvalds * We are about to unload the current idle thread pm callback 17781da177e4SLinus Torvalds * (pm_idle), Wait for all processors to update cached/local 17791da177e4SLinus Torvalds * copies of pm_idle before proceeding. 17801da177e4SLinus Torvalds */ 17811da177e4SLinus Torvalds cpu_idle_wait(); 17821fec74a9SAndrew Morton #ifdef CONFIG_SMP 17835c87579eSArjan van de Ven unregister_latency_notifier(&acpi_processor_latency_notifier); 17841fec74a9SAndrew Morton #endif 17851da177e4SLinus Torvalds } 17864f86d3a8SLen Brown #endif 17871da177e4SLinus Torvalds 1788d550d98dSPatrick Mochel return 0; 17891da177e4SLinus Torvalds } 1790