11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * processor_idle - idle state submodule to the ACPI processor driver 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 51da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6c5ab81caSDominik Brodowski * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 71da177e4SLinus Torvalds * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 81da177e4SLinus Torvalds * - Added processor hotplug support 902df8b93SVenkatesh Pallipadi * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 1002df8b93SVenkatesh Pallipadi * - Added support for C3 on SMP 111da177e4SLinus Torvalds * 121da177e4SLinus Torvalds * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 151da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 161da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or (at 171da177e4SLinus Torvalds * your option) any later version. 181da177e4SLinus Torvalds * 191da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, but 201da177e4SLinus Torvalds * WITHOUT ANY WARRANTY; without even the implied warranty of 211da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 221da177e4SLinus Torvalds * General Public License for more details. 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * You should have received a copy of the GNU General Public License along 251da177e4SLinus Torvalds * with this program; if not, write to the Free Software Foundation, Inc., 261da177e4SLinus Torvalds * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 271da177e4SLinus Torvalds * 281da177e4SLinus Torvalds * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds #include <linux/kernel.h> 321da177e4SLinus Torvalds #include <linux/module.h> 331da177e4SLinus Torvalds #include <linux/init.h> 341da177e4SLinus Torvalds #include <linux/cpufreq.h> 351da177e4SLinus Torvalds #include <linux/proc_fs.h> 361da177e4SLinus Torvalds #include <linux/seq_file.h> 371da177e4SLinus Torvalds #include <linux/acpi.h> 381da177e4SLinus Torvalds #include <linux/dmi.h> 391da177e4SLinus Torvalds #include <linux/moduleparam.h> 404e57b681STim Schmielau #include <linux/sched.h> /* need_resched() */ 41f011e2e2SMark Gross #include <linux/pm_qos_params.h> 42e9e2cdb4SThomas Gleixner #include <linux/clockchips.h> 434f86d3a8SLen Brown #include <linux/cpuidle.h> 441da177e4SLinus Torvalds 453434933bSThomas Gleixner /* 463434933bSThomas Gleixner * Include the apic definitions for x86 to have the APIC timer related defines 473434933bSThomas Gleixner * available also for UP (on SMP it gets magically included via linux/smp.h). 483434933bSThomas Gleixner * asm/acpi.h is not an option, as it would require more include magic. Also 493434933bSThomas Gleixner * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 503434933bSThomas Gleixner */ 513434933bSThomas Gleixner #ifdef CONFIG_X86 523434933bSThomas Gleixner #include <asm/apic.h> 533434933bSThomas Gleixner #endif 543434933bSThomas Gleixner 551da177e4SLinus Torvalds #include <asm/io.h> 561da177e4SLinus Torvalds #include <asm/uaccess.h> 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds #include <acpi/acpi_bus.h> 591da177e4SLinus Torvalds #include <acpi/processor.h> 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds #define ACPI_PROCESSOR_COMPONENT 0x01000000 621da177e4SLinus Torvalds #define ACPI_PROCESSOR_CLASS "processor" 631da177e4SLinus Torvalds #define _COMPONENT ACPI_PROCESSOR_COMPONENT 64f52fd66dSLen Brown ACPI_MODULE_NAME("processor_idle"); 651da177e4SLinus Torvalds #define ACPI_PROCESSOR_FILE_POWER "power" 661da177e4SLinus Torvalds #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) 672aa44d05SIngo Molnar #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 684f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 691da177e4SLinus Torvalds #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 701da177e4SLinus Torvalds #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 71b6835052SAndreas Mohr static void (*pm_idle_save) (void) __read_mostly; 724f86d3a8SLen Brown #else 734f86d3a8SLen Brown #define C2_OVERHEAD 1 /* 1us */ 744f86d3a8SLen Brown #define C3_OVERHEAD 1 /* 1us */ 754f86d3a8SLen Brown #endif 764f86d3a8SLen Brown #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) 771da177e4SLinus Torvalds 784f86d3a8SLen Brown static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 795b3f0e6cSVenki Pallipadi #ifdef CONFIG_CPU_IDLE 804f86d3a8SLen Brown module_param(max_cstate, uint, 0000); 815b3f0e6cSVenki Pallipadi #else 825b3f0e6cSVenki Pallipadi module_param(max_cstate, uint, 0644); 835b3f0e6cSVenki Pallipadi #endif 84b6835052SAndreas Mohr static unsigned int nocst __read_mostly; 851da177e4SLinus Torvalds module_param(nocst, uint, 0000); 861da177e4SLinus Torvalds 874f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 881da177e4SLinus Torvalds /* 891da177e4SLinus Torvalds * bm_history -- bit-mask with a bit per jiffy of bus-master activity 901da177e4SLinus Torvalds * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms 911da177e4SLinus Torvalds * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms 921da177e4SLinus Torvalds * 100 HZ: 0x0000000F: 4 jiffies = 40ms 931da177e4SLinus Torvalds * reduce history for more aggressive entry into C3 941da177e4SLinus Torvalds */ 95b6835052SAndreas Mohr static unsigned int bm_history __read_mostly = 964be44fcdSLen Brown (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); 971da177e4SLinus Torvalds module_param(bm_history, uint, 0644); 984f86d3a8SLen Brown 994f86d3a8SLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr); 1004f86d3a8SLen Brown 1014963f620SLen Brown #else /* CONFIG_CPU_IDLE */ 10225de5718SLen Brown static unsigned int latency_factor __read_mostly = 2; 1034963f620SLen Brown module_param(latency_factor, uint, 0644); 1044f86d3a8SLen Brown #endif 1051da177e4SLinus Torvalds 1061da177e4SLinus Torvalds /* 1071da177e4SLinus Torvalds * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 1081da177e4SLinus Torvalds * For now disable this. Probably a bug somewhere else. 1091da177e4SLinus Torvalds * 1101da177e4SLinus Torvalds * To skip this limit, boot/load with a large max_cstate limit. 1111da177e4SLinus Torvalds */ 1121855256cSJeff Garzik static int set_max_cstate(const struct dmi_system_id *id) 1131da177e4SLinus Torvalds { 1141da177e4SLinus Torvalds if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 1151da177e4SLinus Torvalds return 0; 1161da177e4SLinus Torvalds 1173d35600aSLen Brown printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." 1181da177e4SLinus Torvalds " Override with \"processor.max_cstate=%d\"\n", id->ident, 1193d35600aSLen Brown (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 1201da177e4SLinus Torvalds 1213d35600aSLen Brown max_cstate = (long)id->driver_data; 1221da177e4SLinus Torvalds 1231da177e4SLinus Torvalds return 0; 1241da177e4SLinus Torvalds } 1251da177e4SLinus Torvalds 1267ded5689SAshok Raj /* Actually this shouldn't be __cpuinitdata, would be better to fix the 1277ded5689SAshok Raj callers to only run once -AK */ 1287ded5689SAshok Raj static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 129335f16beSDavid Shaohua Li { set_max_cstate, "IBM ThinkPad R40e", { 130876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 131f831335dSBartlomiej Swiercz DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1}, 132f831335dSBartlomiej Swiercz { set_max_cstate, "IBM ThinkPad R40e", { 133f831335dSBartlomiej Swiercz DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 134876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1}, 135876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 136876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 137876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1}, 138876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 139876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 140876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1}, 141876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 142876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 143876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1}, 144876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 145876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 146876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1}, 147876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 148876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 149876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1}, 150876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 151876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 152876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1}, 153876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 154876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 155876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1}, 156876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 157876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 158876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1}, 159876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 160876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 161876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1}, 162876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 163876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 164876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1}, 165876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 166876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 167876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1}, 168876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 169876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 170876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1}, 171876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 172876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 173876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1}, 174876c184bSThomas Rosner { set_max_cstate, "IBM ThinkPad R40e", { 175876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 176876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1}, 177335f16beSDavid Shaohua Li { set_max_cstate, "Medion 41700", { 178876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 179876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1}, 180335f16beSDavid Shaohua Li { set_max_cstate, "Clevo 5600D", { 181876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 182876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 183335f16beSDavid Shaohua Li (void *)2}, 1841da177e4SLinus Torvalds {}, 1851da177e4SLinus Torvalds }; 1861da177e4SLinus Torvalds 1874be44fcdSLen Brown static inline u32 ticks_elapsed(u32 t1, u32 t2) 1881da177e4SLinus Torvalds { 1891da177e4SLinus Torvalds if (t2 >= t1) 1901da177e4SLinus Torvalds return (t2 - t1); 191cee324b1SAlexey Starikovskiy else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) 1921da177e4SLinus Torvalds return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 1931da177e4SLinus Torvalds else 1941da177e4SLinus Torvalds return ((0xFFFFFFFF - t1) + t2); 1951da177e4SLinus Torvalds } 1961da177e4SLinus Torvalds 1974f86d3a8SLen Brown static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) 1984f86d3a8SLen Brown { 1994f86d3a8SLen Brown if (t2 >= t1) 2004f86d3a8SLen Brown return PM_TIMER_TICKS_TO_US(t2 - t1); 2014f86d3a8SLen Brown else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) 2024f86d3a8SLen Brown return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 2034f86d3a8SLen Brown else 2044f86d3a8SLen Brown return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); 2054f86d3a8SLen Brown } 2064f86d3a8SLen Brown 2072e906655Svenkatesh.pallipadi@intel.com /* 2082e906655Svenkatesh.pallipadi@intel.com * Callers should disable interrupts before the call and enable 2092e906655Svenkatesh.pallipadi@intel.com * interrupts after return. 2102e906655Svenkatesh.pallipadi@intel.com */ 211ddc081a1SVenkatesh Pallipadi static void acpi_safe_halt(void) 212ddc081a1SVenkatesh Pallipadi { 213ddc081a1SVenkatesh Pallipadi current_thread_info()->status &= ~TS_POLLING; 214ddc081a1SVenkatesh Pallipadi /* 215ddc081a1SVenkatesh Pallipadi * TS_POLLING-cleared state must be visible before we 216ddc081a1SVenkatesh Pallipadi * test NEED_RESCHED: 217ddc081a1SVenkatesh Pallipadi */ 218ddc081a1SVenkatesh Pallipadi smp_mb(); 21971e93d15SVenki Pallipadi if (!need_resched()) { 220ddc081a1SVenkatesh Pallipadi safe_halt(); 22171e93d15SVenki Pallipadi local_irq_disable(); 22271e93d15SVenki Pallipadi } 223ddc081a1SVenkatesh Pallipadi current_thread_info()->status |= TS_POLLING; 224ddc081a1SVenkatesh Pallipadi } 225ddc081a1SVenkatesh Pallipadi 2264f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 2274f86d3a8SLen Brown 2281da177e4SLinus Torvalds static void 2294be44fcdSLen Brown acpi_processor_power_activate(struct acpi_processor *pr, 2301da177e4SLinus Torvalds struct acpi_processor_cx *new) 2311da177e4SLinus Torvalds { 2321da177e4SLinus Torvalds struct acpi_processor_cx *old; 2331da177e4SLinus Torvalds 2341da177e4SLinus Torvalds if (!pr || !new) 2351da177e4SLinus Torvalds return; 2361da177e4SLinus Torvalds 2371da177e4SLinus Torvalds old = pr->power.state; 2381da177e4SLinus Torvalds 2391da177e4SLinus Torvalds if (old) 2401da177e4SLinus Torvalds old->promotion.count = 0; 2411da177e4SLinus Torvalds new->demotion.count = 0; 2421da177e4SLinus Torvalds 2431da177e4SLinus Torvalds /* Cleanup from old state. */ 2441da177e4SLinus Torvalds if (old) { 2451da177e4SLinus Torvalds switch (old->type) { 2461da177e4SLinus Torvalds case ACPI_STATE_C3: 2471da177e4SLinus Torvalds /* Disable bus master reload */ 24802df8b93SVenkatesh Pallipadi if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) 249d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 2501da177e4SLinus Torvalds break; 2511da177e4SLinus Torvalds } 2521da177e4SLinus Torvalds } 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds /* Prepare to use new state. */ 2551da177e4SLinus Torvalds switch (new->type) { 2561da177e4SLinus Torvalds case ACPI_STATE_C3: 2571da177e4SLinus Torvalds /* Enable bus master reload */ 25802df8b93SVenkatesh Pallipadi if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) 259d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 2601da177e4SLinus Torvalds break; 2611da177e4SLinus Torvalds } 2621da177e4SLinus Torvalds 2631da177e4SLinus Torvalds pr->power.state = new; 2641da177e4SLinus Torvalds 2651da177e4SLinus Torvalds return; 2661da177e4SLinus Torvalds } 2671da177e4SLinus Torvalds 26802df8b93SVenkatesh Pallipadi static atomic_t c3_cpu_count; 26902df8b93SVenkatesh Pallipadi 270991528d7SVenkatesh Pallipadi /* Common C-state entry for C2, C3, .. */ 271991528d7SVenkatesh Pallipadi static void acpi_cstate_enter(struct acpi_processor_cx *cstate) 272991528d7SVenkatesh Pallipadi { 273bc71bec9Svenkatesh.pallipadi@intel.com if (cstate->entry_method == ACPI_CSTATE_FFH) { 274991528d7SVenkatesh Pallipadi /* Call into architectural FFH based C-state */ 275991528d7SVenkatesh Pallipadi acpi_processor_ffh_cstate_enter(cstate); 276991528d7SVenkatesh Pallipadi } else { 277991528d7SVenkatesh Pallipadi int unused; 278991528d7SVenkatesh Pallipadi /* IO port based C-state */ 279991528d7SVenkatesh Pallipadi inb(cstate->address); 280991528d7SVenkatesh Pallipadi /* Dummy wait op - must do something useless after P_LVL2 read 281991528d7SVenkatesh Pallipadi because chipsets cannot guarantee that STPCLK# signal 282991528d7SVenkatesh Pallipadi gets asserted in time to freeze execution properly. */ 283cee324b1SAlexey Starikovskiy unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 284991528d7SVenkatesh Pallipadi } 285991528d7SVenkatesh Pallipadi } 2864f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */ 287991528d7SVenkatesh Pallipadi 288169a0abbSThomas Gleixner #ifdef ARCH_APICTIMER_STOPS_ON_C3 289169a0abbSThomas Gleixner 290169a0abbSThomas Gleixner /* 291169a0abbSThomas Gleixner * Some BIOS implementations switch to C3 in the published C2 state. 292296d93cdSLinus Torvalds * This seems to be a common problem on AMD boxen, but other vendors 293296d93cdSLinus Torvalds * are affected too. We pick the most conservative approach: we assume 294296d93cdSLinus Torvalds * that the local APIC stops in both C2 and C3. 295169a0abbSThomas Gleixner */ 296169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr, 297169a0abbSThomas Gleixner struct acpi_processor_cx *cx) 298169a0abbSThomas Gleixner { 299169a0abbSThomas Gleixner struct acpi_processor_power *pwr = &pr->power; 300e585bef8SThomas Gleixner u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 301169a0abbSThomas Gleixner 302169a0abbSThomas Gleixner /* 303169a0abbSThomas Gleixner * Check, if one of the previous states already marked the lapic 304169a0abbSThomas Gleixner * unstable 305169a0abbSThomas Gleixner */ 306169a0abbSThomas Gleixner if (pwr->timer_broadcast_on_state < state) 307169a0abbSThomas Gleixner return; 308169a0abbSThomas Gleixner 309e585bef8SThomas Gleixner if (cx->type >= type) 310169a0abbSThomas Gleixner pr->power.timer_broadcast_on_state = state; 311169a0abbSThomas Gleixner } 312169a0abbSThomas Gleixner 313169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) 314169a0abbSThomas Gleixner { 315e9e2cdb4SThomas Gleixner unsigned long reason; 316e9e2cdb4SThomas Gleixner 317e9e2cdb4SThomas Gleixner reason = pr->power.timer_broadcast_on_state < INT_MAX ? 318e9e2cdb4SThomas Gleixner CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 319e9e2cdb4SThomas Gleixner 320e9e2cdb4SThomas Gleixner clockevents_notify(reason, &pr->id); 321e9e2cdb4SThomas Gleixner } 322e9e2cdb4SThomas Gleixner 323e9e2cdb4SThomas Gleixner /* Power(C) State timer broadcast control */ 324e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr, 325e9e2cdb4SThomas Gleixner struct acpi_processor_cx *cx, 326e9e2cdb4SThomas Gleixner int broadcast) 327e9e2cdb4SThomas Gleixner { 328e9e2cdb4SThomas Gleixner int state = cx - pr->power.states; 329e9e2cdb4SThomas Gleixner 330e9e2cdb4SThomas Gleixner if (state >= pr->power.timer_broadcast_on_state) { 331e9e2cdb4SThomas Gleixner unsigned long reason; 332e9e2cdb4SThomas Gleixner 333e9e2cdb4SThomas Gleixner reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : 334e9e2cdb4SThomas Gleixner CLOCK_EVT_NOTIFY_BROADCAST_EXIT; 335e9e2cdb4SThomas Gleixner clockevents_notify(reason, &pr->id); 336e9e2cdb4SThomas Gleixner } 337169a0abbSThomas Gleixner } 338169a0abbSThomas Gleixner 339169a0abbSThomas Gleixner #else 340169a0abbSThomas Gleixner 341169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr, 342169a0abbSThomas Gleixner struct acpi_processor_cx *cstate) { } 343169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } 344e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr, 345e9e2cdb4SThomas Gleixner struct acpi_processor_cx *cx, 346e9e2cdb4SThomas Gleixner int broadcast) 347e9e2cdb4SThomas Gleixner { 348e9e2cdb4SThomas Gleixner } 349169a0abbSThomas Gleixner 350169a0abbSThomas Gleixner #endif 351169a0abbSThomas Gleixner 352b04e7bdbSThomas Gleixner /* 353b04e7bdbSThomas Gleixner * Suspend / resume control 354b04e7bdbSThomas Gleixner */ 355b04e7bdbSThomas Gleixner static int acpi_idle_suspend; 356b04e7bdbSThomas Gleixner 357b04e7bdbSThomas Gleixner int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 358b04e7bdbSThomas Gleixner { 359b04e7bdbSThomas Gleixner acpi_idle_suspend = 1; 360b04e7bdbSThomas Gleixner return 0; 361b04e7bdbSThomas Gleixner } 362b04e7bdbSThomas Gleixner 363b04e7bdbSThomas Gleixner int acpi_processor_resume(struct acpi_device * device) 364b04e7bdbSThomas Gleixner { 365b04e7bdbSThomas Gleixner acpi_idle_suspend = 0; 366b04e7bdbSThomas Gleixner return 0; 367b04e7bdbSThomas Gleixner } 368b04e7bdbSThomas Gleixner 36961331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) 370ddb25f9aSAndi Kleen static int tsc_halts_in_c(int state) 371ddb25f9aSAndi Kleen { 372ddb25f9aSAndi Kleen switch (boot_cpu_data.x86_vendor) { 373ddb25f9aSAndi Kleen case X86_VENDOR_AMD: 374ddb25f9aSAndi Kleen /* 375ddb25f9aSAndi Kleen * AMD Fam10h TSC will tick in all 376ddb25f9aSAndi Kleen * C/P/S0/S1 states when this bit is set. 377ddb25f9aSAndi Kleen */ 378ddb25f9aSAndi Kleen if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 379ddb25f9aSAndi Kleen return 0; 380ddb25f9aSAndi Kleen /*FALL THROUGH*/ 381ddb25f9aSAndi Kleen case X86_VENDOR_INTEL: 382ddb25f9aSAndi Kleen /* Several cases known where TSC halts in C2 too */ 383ddb25f9aSAndi Kleen default: 384ddb25f9aSAndi Kleen return state > ACPI_STATE_C1; 385ddb25f9aSAndi Kleen } 386ddb25f9aSAndi Kleen } 387ddb25f9aSAndi Kleen #endif 388ddb25f9aSAndi Kleen 3894f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 3901da177e4SLinus Torvalds static void acpi_processor_idle(void) 3911da177e4SLinus Torvalds { 3921da177e4SLinus Torvalds struct acpi_processor *pr = NULL; 3931da177e4SLinus Torvalds struct acpi_processor_cx *cx = NULL; 3941da177e4SLinus Torvalds struct acpi_processor_cx *next_state = NULL; 3951da177e4SLinus Torvalds int sleep_ticks = 0; 3961da177e4SLinus Torvalds u32 t1, t2 = 0; 3971da177e4SLinus Torvalds 3981da177e4SLinus Torvalds /* 3991da177e4SLinus Torvalds * Interrupts must be disabled during bus mastering calculations and 4001da177e4SLinus Torvalds * for C2/C3 transitions. 4011da177e4SLinus Torvalds */ 4021da177e4SLinus Torvalds local_irq_disable(); 4031da177e4SLinus Torvalds 404d5a3d32aSVenkatesh Pallipadi pr = processors[smp_processor_id()]; 405d5a3d32aSVenkatesh Pallipadi if (!pr) { 406d5a3d32aSVenkatesh Pallipadi local_irq_enable(); 407d5a3d32aSVenkatesh Pallipadi return; 408d5a3d32aSVenkatesh Pallipadi } 409d5a3d32aSVenkatesh Pallipadi 4101da177e4SLinus Torvalds /* 4111da177e4SLinus Torvalds * Check whether we truly need to go idle, or should 4121da177e4SLinus Torvalds * reschedule: 4131da177e4SLinus Torvalds */ 4141da177e4SLinus Torvalds if (unlikely(need_resched())) { 4151da177e4SLinus Torvalds local_irq_enable(); 4161da177e4SLinus Torvalds return; 4171da177e4SLinus Torvalds } 4181da177e4SLinus Torvalds 4191da177e4SLinus Torvalds cx = pr->power.state; 420b04e7bdbSThomas Gleixner if (!cx || acpi_idle_suspend) { 4217f424a8bSPeter Zijlstra if (pm_idle_save) { 4227f424a8bSPeter Zijlstra pm_idle_save(); /* enables IRQs */ 4237f424a8bSPeter Zijlstra } else { 42464c7c8f8SNick Piggin acpi_safe_halt(); 4252e906655Svenkatesh.pallipadi@intel.com local_irq_enable(); 4267f424a8bSPeter Zijlstra } 42771e93d15SVenki Pallipadi 42864c7c8f8SNick Piggin return; 42964c7c8f8SNick Piggin } 4301da177e4SLinus Torvalds 4311da177e4SLinus Torvalds /* 4321da177e4SLinus Torvalds * Check BM Activity 4331da177e4SLinus Torvalds * ----------------- 4341da177e4SLinus Torvalds * Check for bus mastering activity (if required), record, and check 4351da177e4SLinus Torvalds * for demotion. 4361da177e4SLinus Torvalds */ 4371da177e4SLinus Torvalds if (pr->flags.bm_check) { 4381da177e4SLinus Torvalds u32 bm_status = 0; 4391da177e4SLinus Torvalds unsigned long diff = jiffies - pr->power.bm_check_timestamp; 4401da177e4SLinus Torvalds 441c5ab81caSDominik Brodowski if (diff > 31) 442c5ab81caSDominik Brodowski diff = 31; 4431da177e4SLinus Torvalds 444c5ab81caSDominik Brodowski pr->power.bm_activity <<= diff; 4451da177e4SLinus Torvalds 446d8c71b6dSBob Moore acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 4471da177e4SLinus Torvalds if (bm_status) { 448c5ab81caSDominik Brodowski pr->power.bm_activity |= 0x1; 449d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 4501da177e4SLinus Torvalds } 4511da177e4SLinus Torvalds /* 4521da177e4SLinus Torvalds * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 4531da177e4SLinus Torvalds * the true state of bus mastering activity; forcing us to 4541da177e4SLinus Torvalds * manually check the BMIDEA bit of each IDE channel. 4551da177e4SLinus Torvalds */ 4561da177e4SLinus Torvalds else if (errata.piix4.bmisx) { 4571da177e4SLinus Torvalds if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 4581da177e4SLinus Torvalds || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 459c5ab81caSDominik Brodowski pr->power.bm_activity |= 0x1; 4601da177e4SLinus Torvalds } 4611da177e4SLinus Torvalds 4621da177e4SLinus Torvalds pr->power.bm_check_timestamp = jiffies; 4631da177e4SLinus Torvalds 4641da177e4SLinus Torvalds /* 465c4a001b1SDominik Brodowski * If bus mastering is or was active this jiffy, demote 4661da177e4SLinus Torvalds * to avoid a faulty transition. Note that the processor 4671da177e4SLinus Torvalds * won't enter a low-power state during this call (to this 468c4a001b1SDominik Brodowski * function) but should upon the next. 4691da177e4SLinus Torvalds * 4701da177e4SLinus Torvalds * TBD: A better policy might be to fallback to the demotion 4711da177e4SLinus Torvalds * state (use it for this quantum only) istead of 4721da177e4SLinus Torvalds * demoting -- and rely on duration as our sole demotion 4731da177e4SLinus Torvalds * qualification. This may, however, introduce DMA 4741da177e4SLinus Torvalds * issues (e.g. floppy DMA transfer overrun/underrun). 4751da177e4SLinus Torvalds */ 476c4a001b1SDominik Brodowski if ((pr->power.bm_activity & 0x1) && 477c4a001b1SDominik Brodowski cx->demotion.threshold.bm) { 4781da177e4SLinus Torvalds local_irq_enable(); 4791da177e4SLinus Torvalds next_state = cx->demotion.state; 4801da177e4SLinus Torvalds goto end; 4811da177e4SLinus Torvalds } 4821da177e4SLinus Torvalds } 4831da177e4SLinus Torvalds 4844c033552SVenkatesh Pallipadi #ifdef CONFIG_HOTPLUG_CPU 4854c033552SVenkatesh Pallipadi /* 4864c033552SVenkatesh Pallipadi * Check for P_LVL2_UP flag before entering C2 and above on 4874c033552SVenkatesh Pallipadi * an SMP system. We do it here instead of doing it at _CST/P_LVL 4884c033552SVenkatesh Pallipadi * detection phase, to work cleanly with logical CPU hotplug. 4894c033552SVenkatesh Pallipadi */ 4904c033552SVenkatesh Pallipadi if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 491cee324b1SAlexey Starikovskiy !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 4921e483969SDavid Shaohua Li cx = &pr->power.states[ACPI_STATE_C1]; 4934c033552SVenkatesh Pallipadi #endif 4941e483969SDavid Shaohua Li 4951da177e4SLinus Torvalds /* 4961da177e4SLinus Torvalds * Sleep: 4971da177e4SLinus Torvalds * ------ 4981da177e4SLinus Torvalds * Invoke the current Cx state to put the processor to sleep. 4991da177e4SLinus Torvalds */ 5002a298a35SNick Piggin if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { 501495ab9c0SAndi Kleen current_thread_info()->status &= ~TS_POLLING; 5020888f06aSIngo Molnar /* 5030888f06aSIngo Molnar * TS_POLLING-cleared state must be visible before we 5040888f06aSIngo Molnar * test NEED_RESCHED: 5050888f06aSIngo Molnar */ 5060888f06aSIngo Molnar smp_mb(); 5072a298a35SNick Piggin if (need_resched()) { 508495ab9c0SAndi Kleen current_thread_info()->status |= TS_POLLING; 509af2eb17bSLinus Torvalds local_irq_enable(); 5102a298a35SNick Piggin return; 5112a298a35SNick Piggin } 5122a298a35SNick Piggin } 5132a298a35SNick Piggin 5141da177e4SLinus Torvalds switch (cx->type) { 5151da177e4SLinus Torvalds 5161da177e4SLinus Torvalds case ACPI_STATE_C1: 5171da177e4SLinus Torvalds /* 5181da177e4SLinus Torvalds * Invoke C1. 5191da177e4SLinus Torvalds * Use the appropriate idle routine, the one that would 5201da177e4SLinus Torvalds * be used without acpi C-states. 5211da177e4SLinus Torvalds */ 5227f424a8bSPeter Zijlstra if (pm_idle_save) { 5237f424a8bSPeter Zijlstra pm_idle_save(); /* enables IRQs */ 5247f424a8bSPeter Zijlstra } else { 52564c7c8f8SNick Piggin acpi_safe_halt(); 5267f424a8bSPeter Zijlstra local_irq_enable(); 5277f424a8bSPeter Zijlstra } 52864c7c8f8SNick Piggin 5291da177e4SLinus Torvalds /* 5301da177e4SLinus Torvalds * TBD: Can't get time duration while in C1, as resumes 5311da177e4SLinus Torvalds * go to an ISR rather than here. Need to instrument 5321da177e4SLinus Torvalds * base interrupt handler. 5332aa44d05SIngo Molnar * 5342aa44d05SIngo Molnar * Note: the TSC better not stop in C1, sched_clock() will 5352aa44d05SIngo Molnar * skew otherwise. 5361da177e4SLinus Torvalds */ 5371da177e4SLinus Torvalds sleep_ticks = 0xFFFFFFFF; 53871e93d15SVenki Pallipadi 5391da177e4SLinus Torvalds break; 5401da177e4SLinus Torvalds 5411da177e4SLinus Torvalds case ACPI_STATE_C2: 5421da177e4SLinus Torvalds /* Get start time (ticks) */ 543cee324b1SAlexey Starikovskiy t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 5442aa44d05SIngo Molnar /* Tell the scheduler that we are going deep-idle: */ 5452aa44d05SIngo Molnar sched_clock_idle_sleep_event(); 5461da177e4SLinus Torvalds /* Invoke C2 */ 547e9e2cdb4SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 1); 548991528d7SVenkatesh Pallipadi acpi_cstate_enter(cx); 5491da177e4SLinus Torvalds /* Get end time (ticks) */ 550cee324b1SAlexey Starikovskiy t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 551539eb11eSjohn stultz 55261331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) 553539eb11eSjohn stultz /* TSC halts in C2, so notify users */ 554ddb25f9aSAndi Kleen if (tsc_halts_in_c(ACPI_STATE_C2)) 5555a90cf20Sjohn stultz mark_tsc_unstable("possible TSC halt in C2"); 556539eb11eSjohn stultz #endif 5572aa44d05SIngo Molnar /* Compute time (ticks) that we were actually asleep */ 5582aa44d05SIngo Molnar sleep_ticks = ticks_elapsed(t1, t2); 5592aa44d05SIngo Molnar 5602aa44d05SIngo Molnar /* Tell the scheduler how much we idled: */ 5612aa44d05SIngo Molnar sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 5622aa44d05SIngo Molnar 5631da177e4SLinus Torvalds /* Re-enable interrupts */ 5641da177e4SLinus Torvalds local_irq_enable(); 5652aa44d05SIngo Molnar /* Do not account our idle-switching overhead: */ 5662aa44d05SIngo Molnar sleep_ticks -= cx->latency_ticks + C2_OVERHEAD; 5672aa44d05SIngo Molnar 568495ab9c0SAndi Kleen current_thread_info()->status |= TS_POLLING; 569e9e2cdb4SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 0); 5701da177e4SLinus Torvalds break; 5711da177e4SLinus Torvalds 5721da177e4SLinus Torvalds case ACPI_STATE_C3: 573bde6f5f5SVenki Pallipadi acpi_unlazy_tlb(smp_processor_id()); 57418eab855SVenkatesh Pallipadi /* 575e17bcb43SThomas Gleixner * Must be done before busmaster disable as we might 576e17bcb43SThomas Gleixner * need to access HPET ! 577e17bcb43SThomas Gleixner */ 578e17bcb43SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 1); 579e17bcb43SThomas Gleixner /* 58018eab855SVenkatesh Pallipadi * disable bus master 58118eab855SVenkatesh Pallipadi * bm_check implies we need ARB_DIS 58218eab855SVenkatesh Pallipadi * !bm_check implies we need cache flush 58318eab855SVenkatesh Pallipadi * bm_control implies whether we can do ARB_DIS 58418eab855SVenkatesh Pallipadi * 58518eab855SVenkatesh Pallipadi * That leaves a case where bm_check is set and bm_control is 58618eab855SVenkatesh Pallipadi * not set. In that case we cannot do much, we enter C3 58718eab855SVenkatesh Pallipadi * without doing anything. 58818eab855SVenkatesh Pallipadi */ 58918eab855SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 59002df8b93SVenkatesh Pallipadi if (atomic_inc_return(&c3_cpu_count) == 59102df8b93SVenkatesh Pallipadi num_online_cpus()) { 59202df8b93SVenkatesh Pallipadi /* 59302df8b93SVenkatesh Pallipadi * All CPUs are trying to go to C3 59402df8b93SVenkatesh Pallipadi * Disable bus master arbitration 59502df8b93SVenkatesh Pallipadi */ 596d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 59702df8b93SVenkatesh Pallipadi } 59818eab855SVenkatesh Pallipadi } else if (!pr->flags.bm_check) { 59902df8b93SVenkatesh Pallipadi /* SMP with no shared cache... Invalidate cache */ 60002df8b93SVenkatesh Pallipadi ACPI_FLUSH_CPU_CACHE(); 60102df8b93SVenkatesh Pallipadi } 60202df8b93SVenkatesh Pallipadi 6031da177e4SLinus Torvalds /* Get start time (ticks) */ 604cee324b1SAlexey Starikovskiy t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 6051da177e4SLinus Torvalds /* Invoke C3 */ 6062aa44d05SIngo Molnar /* Tell the scheduler that we are going deep-idle: */ 6072aa44d05SIngo Molnar sched_clock_idle_sleep_event(); 608991528d7SVenkatesh Pallipadi acpi_cstate_enter(cx); 6091da177e4SLinus Torvalds /* Get end time (ticks) */ 610cee324b1SAlexey Starikovskiy t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 61118eab855SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 6121da177e4SLinus Torvalds /* Enable bus master arbitration */ 61302df8b93SVenkatesh Pallipadi atomic_dec(&c3_cpu_count); 614d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); 61502df8b93SVenkatesh Pallipadi } 61602df8b93SVenkatesh Pallipadi 61761331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) 618539eb11eSjohn stultz /* TSC halts in C3, so notify users */ 619ddb25f9aSAndi Kleen if (tsc_halts_in_c(ACPI_STATE_C3)) 6205a90cf20Sjohn stultz mark_tsc_unstable("TSC halts in C3"); 621539eb11eSjohn stultz #endif 6222aa44d05SIngo Molnar /* Compute time (ticks) that we were actually asleep */ 6232aa44d05SIngo Molnar sleep_ticks = ticks_elapsed(t1, t2); 6242aa44d05SIngo Molnar /* Tell the scheduler how much we idled: */ 6252aa44d05SIngo Molnar sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 6262aa44d05SIngo Molnar 6271da177e4SLinus Torvalds /* Re-enable interrupts */ 6281da177e4SLinus Torvalds local_irq_enable(); 6292aa44d05SIngo Molnar /* Do not account our idle-switching overhead: */ 6302aa44d05SIngo Molnar sleep_ticks -= cx->latency_ticks + C3_OVERHEAD; 6312aa44d05SIngo Molnar 632495ab9c0SAndi Kleen current_thread_info()->status |= TS_POLLING; 633e9e2cdb4SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 0); 6341da177e4SLinus Torvalds break; 6351da177e4SLinus Torvalds 6361da177e4SLinus Torvalds default: 6371da177e4SLinus Torvalds local_irq_enable(); 6381da177e4SLinus Torvalds return; 6391da177e4SLinus Torvalds } 640a3c6598fSDominik Brodowski cx->usage++; 641a3c6598fSDominik Brodowski if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0)) 642a3c6598fSDominik Brodowski cx->time += sleep_ticks; 6431da177e4SLinus Torvalds 6441da177e4SLinus Torvalds next_state = pr->power.state; 6451da177e4SLinus Torvalds 6461e483969SDavid Shaohua Li #ifdef CONFIG_HOTPLUG_CPU 6471e483969SDavid Shaohua Li /* Don't do promotion/demotion */ 6481e483969SDavid Shaohua Li if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && 649cee324b1SAlexey Starikovskiy !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) { 6501e483969SDavid Shaohua Li next_state = cx; 6511e483969SDavid Shaohua Li goto end; 6521e483969SDavid Shaohua Li } 6531e483969SDavid Shaohua Li #endif 6541e483969SDavid Shaohua Li 6551da177e4SLinus Torvalds /* 6561da177e4SLinus Torvalds * Promotion? 6571da177e4SLinus Torvalds * ---------- 6581da177e4SLinus Torvalds * Track the number of longs (time asleep is greater than threshold) 6591da177e4SLinus Torvalds * and promote when the count threshold is reached. Note that bus 6601da177e4SLinus Torvalds * mastering activity may prevent promotions. 6611da177e4SLinus Torvalds * Do not promote above max_cstate. 6621da177e4SLinus Torvalds */ 6631da177e4SLinus Torvalds if (cx->promotion.state && 6641da177e4SLinus Torvalds ((cx->promotion.state - pr->power.states) <= max_cstate)) { 6655c87579eSArjan van de Ven if (sleep_ticks > cx->promotion.threshold.ticks && 666f011e2e2SMark Gross cx->promotion.state->latency <= 667f011e2e2SMark Gross pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { 6681da177e4SLinus Torvalds cx->promotion.count++; 6691da177e4SLinus Torvalds cx->demotion.count = 0; 6704be44fcdSLen Brown if (cx->promotion.count >= 6714be44fcdSLen Brown cx->promotion.threshold.count) { 6721da177e4SLinus Torvalds if (pr->flags.bm_check) { 6734be44fcdSLen Brown if (! 6744be44fcdSLen Brown (pr->power.bm_activity & cx-> 6754be44fcdSLen Brown promotion.threshold.bm)) { 6764be44fcdSLen Brown next_state = 6774be44fcdSLen Brown cx->promotion.state; 6781da177e4SLinus Torvalds goto end; 6791da177e4SLinus Torvalds } 6804be44fcdSLen Brown } else { 6811da177e4SLinus Torvalds next_state = cx->promotion.state; 6821da177e4SLinus Torvalds goto end; 6831da177e4SLinus Torvalds } 6841da177e4SLinus Torvalds } 6851da177e4SLinus Torvalds } 6861da177e4SLinus Torvalds } 6871da177e4SLinus Torvalds 6881da177e4SLinus Torvalds /* 6891da177e4SLinus Torvalds * Demotion? 6901da177e4SLinus Torvalds * --------- 6911da177e4SLinus Torvalds * Track the number of shorts (time asleep is less than time threshold) 6921da177e4SLinus Torvalds * and demote when the usage threshold is reached. 6931da177e4SLinus Torvalds */ 6941da177e4SLinus Torvalds if (cx->demotion.state) { 6951da177e4SLinus Torvalds if (sleep_ticks < cx->demotion.threshold.ticks) { 6961da177e4SLinus Torvalds cx->demotion.count++; 6971da177e4SLinus Torvalds cx->promotion.count = 0; 6981da177e4SLinus Torvalds if (cx->demotion.count >= cx->demotion.threshold.count) { 6991da177e4SLinus Torvalds next_state = cx->demotion.state; 7001da177e4SLinus Torvalds goto end; 7011da177e4SLinus Torvalds } 7021da177e4SLinus Torvalds } 7031da177e4SLinus Torvalds } 7041da177e4SLinus Torvalds 7051da177e4SLinus Torvalds end: 7061da177e4SLinus Torvalds /* 7071da177e4SLinus Torvalds * Demote if current state exceeds max_cstate 7085c87579eSArjan van de Ven * or if the latency of the current state is unacceptable 7091da177e4SLinus Torvalds */ 7105c87579eSArjan van de Ven if ((pr->power.state - pr->power.states) > max_cstate || 711f011e2e2SMark Gross pr->power.state->latency > 712f011e2e2SMark Gross pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { 7131da177e4SLinus Torvalds if (cx->demotion.state) 7141da177e4SLinus Torvalds next_state = cx->demotion.state; 7151da177e4SLinus Torvalds } 7161da177e4SLinus Torvalds 7171da177e4SLinus Torvalds /* 7181da177e4SLinus Torvalds * New Cx State? 7191da177e4SLinus Torvalds * ------------- 7201da177e4SLinus Torvalds * If we're going to start using a new Cx state we must clean up 7211da177e4SLinus Torvalds * from the previous and prepare to use the new. 7221da177e4SLinus Torvalds */ 7231da177e4SLinus Torvalds if (next_state != pr->power.state) 7241da177e4SLinus Torvalds acpi_processor_power_activate(pr, next_state); 7251da177e4SLinus Torvalds } 7261da177e4SLinus Torvalds 7274be44fcdSLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr) 7281da177e4SLinus Torvalds { 7291da177e4SLinus Torvalds unsigned int i; 7301da177e4SLinus Torvalds unsigned int state_is_set = 0; 7311da177e4SLinus Torvalds struct acpi_processor_cx *lower = NULL; 7321da177e4SLinus Torvalds struct acpi_processor_cx *higher = NULL; 7331da177e4SLinus Torvalds struct acpi_processor_cx *cx; 7341da177e4SLinus Torvalds 7351da177e4SLinus Torvalds 7361da177e4SLinus Torvalds if (!pr) 737d550d98dSPatrick Mochel return -EINVAL; 7381da177e4SLinus Torvalds 7391da177e4SLinus Torvalds /* 7401da177e4SLinus Torvalds * This function sets the default Cx state policy (OS idle handler). 7411da177e4SLinus Torvalds * Our scheme is to promote quickly to C2 but more conservatively 7421da177e4SLinus Torvalds * to C3. We're favoring C2 for its characteristics of low latency 7431da177e4SLinus Torvalds * (quick response), good power savings, and ability to allow bus 7441da177e4SLinus Torvalds * mastering activity. Note that the Cx state policy is completely 7451da177e4SLinus Torvalds * customizable and can be altered dynamically. 7461da177e4SLinus Torvalds */ 7471da177e4SLinus Torvalds 7481da177e4SLinus Torvalds /* startup state */ 7491da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 7501da177e4SLinus Torvalds cx = &pr->power.states[i]; 7511da177e4SLinus Torvalds if (!cx->valid) 7521da177e4SLinus Torvalds continue; 7531da177e4SLinus Torvalds 7541da177e4SLinus Torvalds if (!state_is_set) 7551da177e4SLinus Torvalds pr->power.state = cx; 7561da177e4SLinus Torvalds state_is_set++; 7571da177e4SLinus Torvalds break; 7581da177e4SLinus Torvalds } 7591da177e4SLinus Torvalds 7601da177e4SLinus Torvalds if (!state_is_set) 761d550d98dSPatrick Mochel return -ENODEV; 7621da177e4SLinus Torvalds 7631da177e4SLinus Torvalds /* demotion */ 7641da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 7651da177e4SLinus Torvalds cx = &pr->power.states[i]; 7661da177e4SLinus Torvalds if (!cx->valid) 7671da177e4SLinus Torvalds continue; 7681da177e4SLinus Torvalds 7691da177e4SLinus Torvalds if (lower) { 7701da177e4SLinus Torvalds cx->demotion.state = lower; 7711da177e4SLinus Torvalds cx->demotion.threshold.ticks = cx->latency_ticks; 7721da177e4SLinus Torvalds cx->demotion.threshold.count = 1; 7731da177e4SLinus Torvalds if (cx->type == ACPI_STATE_C3) 7741da177e4SLinus Torvalds cx->demotion.threshold.bm = bm_history; 7751da177e4SLinus Torvalds } 7761da177e4SLinus Torvalds 7771da177e4SLinus Torvalds lower = cx; 7781da177e4SLinus Torvalds } 7791da177e4SLinus Torvalds 7801da177e4SLinus Torvalds /* promotion */ 7811da177e4SLinus Torvalds for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { 7821da177e4SLinus Torvalds cx = &pr->power.states[i]; 7831da177e4SLinus Torvalds if (!cx->valid) 7841da177e4SLinus Torvalds continue; 7851da177e4SLinus Torvalds 7861da177e4SLinus Torvalds if (higher) { 7871da177e4SLinus Torvalds cx->promotion.state = higher; 7881da177e4SLinus Torvalds cx->promotion.threshold.ticks = cx->latency_ticks; 7891da177e4SLinus Torvalds if (cx->type >= ACPI_STATE_C2) 7901da177e4SLinus Torvalds cx->promotion.threshold.count = 4; 7911da177e4SLinus Torvalds else 7921da177e4SLinus Torvalds cx->promotion.threshold.count = 10; 7931da177e4SLinus Torvalds if (higher->type == ACPI_STATE_C3) 7941da177e4SLinus Torvalds cx->promotion.threshold.bm = bm_history; 7951da177e4SLinus Torvalds } 7961da177e4SLinus Torvalds 7971da177e4SLinus Torvalds higher = cx; 7981da177e4SLinus Torvalds } 7991da177e4SLinus Torvalds 800d550d98dSPatrick Mochel return 0; 8011da177e4SLinus Torvalds } 8024f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */ 8031da177e4SLinus Torvalds 8041da177e4SLinus Torvalds static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 8051da177e4SLinus Torvalds { 8061da177e4SLinus Torvalds 8071da177e4SLinus Torvalds if (!pr) 808d550d98dSPatrick Mochel return -EINVAL; 8091da177e4SLinus Torvalds 8101da177e4SLinus Torvalds if (!pr->pblk) 811d550d98dSPatrick Mochel return -ENODEV; 8121da177e4SLinus Torvalds 8131da177e4SLinus Torvalds /* if info is obtained from pblk/fadt, type equals state */ 8141da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 8151da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 8161da177e4SLinus Torvalds 8174c033552SVenkatesh Pallipadi #ifndef CONFIG_HOTPLUG_CPU 8184c033552SVenkatesh Pallipadi /* 8194c033552SVenkatesh Pallipadi * Check for P_LVL2_UP flag before entering C2 and above on 8204c033552SVenkatesh Pallipadi * an SMP system. 8214c033552SVenkatesh Pallipadi */ 822ad71860aSAlexey Starikovskiy if ((num_online_cpus() > 1) && 823cee324b1SAlexey Starikovskiy !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 824d550d98dSPatrick Mochel return -ENODEV; 8254c033552SVenkatesh Pallipadi #endif 8264c033552SVenkatesh Pallipadi 8271da177e4SLinus Torvalds /* determine C2 and C3 address from pblk */ 8281da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 8291da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 8301da177e4SLinus Torvalds 8311da177e4SLinus Torvalds /* determine latencies from FADT */ 832cee324b1SAlexey Starikovskiy pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; 833cee324b1SAlexey Starikovskiy pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; 8341da177e4SLinus Torvalds 8351da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 8361da177e4SLinus Torvalds "lvl2[0x%08x] lvl3[0x%08x]\n", 8371da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].address, 8381da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].address)); 8391da177e4SLinus Torvalds 840d550d98dSPatrick Mochel return 0; 8411da177e4SLinus Torvalds } 8421da177e4SLinus Torvalds 843991528d7SVenkatesh Pallipadi static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 844acf05f4bSVenkatesh Pallipadi { 845991528d7SVenkatesh Pallipadi if (!pr->power.states[ACPI_STATE_C1].valid) { 846cf824788SJanosch Machowinski /* set the first C-State to C1 */ 847991528d7SVenkatesh Pallipadi /* all processors need to support C1 */ 848acf05f4bSVenkatesh Pallipadi pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 849acf05f4bSVenkatesh Pallipadi pr->power.states[ACPI_STATE_C1].valid = 1; 8500fda6b40SVenkatesh Pallipadi pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; 851991528d7SVenkatesh Pallipadi } 852991528d7SVenkatesh Pallipadi /* the C0 state only exists as a filler in our array */ 853991528d7SVenkatesh Pallipadi pr->power.states[ACPI_STATE_C0].valid = 1; 854d550d98dSPatrick Mochel return 0; 855acf05f4bSVenkatesh Pallipadi } 856acf05f4bSVenkatesh Pallipadi 8571da177e4SLinus Torvalds static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 8581da177e4SLinus Torvalds { 8591da177e4SLinus Torvalds acpi_status status = 0; 8601da177e4SLinus Torvalds acpi_integer count; 861cf824788SJanosch Machowinski int current_count; 8621da177e4SLinus Torvalds int i; 8631da177e4SLinus Torvalds struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 8641da177e4SLinus Torvalds union acpi_object *cst; 8651da177e4SLinus Torvalds 8661da177e4SLinus Torvalds 8671da177e4SLinus Torvalds if (nocst) 868d550d98dSPatrick Mochel return -ENODEV; 8691da177e4SLinus Torvalds 870991528d7SVenkatesh Pallipadi current_count = 0; 8711da177e4SLinus Torvalds 8721da177e4SLinus Torvalds status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 8731da177e4SLinus Torvalds if (ACPI_FAILURE(status)) { 8741da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 875d550d98dSPatrick Mochel return -ENODEV; 8761da177e4SLinus Torvalds } 8771da177e4SLinus Torvalds 87850dd0969SJan Engelhardt cst = buffer.pointer; 8791da177e4SLinus Torvalds 8801da177e4SLinus Torvalds /* There must be at least 2 elements */ 8811da177e4SLinus Torvalds if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 8826468463aSLen Brown printk(KERN_ERR PREFIX "not enough elements in _CST\n"); 8831da177e4SLinus Torvalds status = -EFAULT; 8841da177e4SLinus Torvalds goto end; 8851da177e4SLinus Torvalds } 8861da177e4SLinus Torvalds 8871da177e4SLinus Torvalds count = cst->package.elements[0].integer.value; 8881da177e4SLinus Torvalds 8891da177e4SLinus Torvalds /* Validate number of power states. */ 8901da177e4SLinus Torvalds if (count < 1 || count != cst->package.count - 1) { 8916468463aSLen Brown printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); 8921da177e4SLinus Torvalds status = -EFAULT; 8931da177e4SLinus Torvalds goto end; 8941da177e4SLinus Torvalds } 8951da177e4SLinus Torvalds 8961da177e4SLinus Torvalds /* Tell driver that at least _CST is supported. */ 8971da177e4SLinus Torvalds pr->flags.has_cst = 1; 8981da177e4SLinus Torvalds 8991da177e4SLinus Torvalds for (i = 1; i <= count; i++) { 9001da177e4SLinus Torvalds union acpi_object *element; 9011da177e4SLinus Torvalds union acpi_object *obj; 9021da177e4SLinus Torvalds struct acpi_power_register *reg; 9031da177e4SLinus Torvalds struct acpi_processor_cx cx; 9041da177e4SLinus Torvalds 9051da177e4SLinus Torvalds memset(&cx, 0, sizeof(cx)); 9061da177e4SLinus Torvalds 90750dd0969SJan Engelhardt element = &(cst->package.elements[i]); 9081da177e4SLinus Torvalds if (element->type != ACPI_TYPE_PACKAGE) 9091da177e4SLinus Torvalds continue; 9101da177e4SLinus Torvalds 9111da177e4SLinus Torvalds if (element->package.count != 4) 9121da177e4SLinus Torvalds continue; 9131da177e4SLinus Torvalds 91450dd0969SJan Engelhardt obj = &(element->package.elements[0]); 9151da177e4SLinus Torvalds 9161da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_BUFFER) 9171da177e4SLinus Torvalds continue; 9181da177e4SLinus Torvalds 9191da177e4SLinus Torvalds reg = (struct acpi_power_register *)obj->buffer.pointer; 9201da177e4SLinus Torvalds 9211da177e4SLinus Torvalds if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 9221da177e4SLinus Torvalds (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 9231da177e4SLinus Torvalds continue; 9241da177e4SLinus Torvalds 9251da177e4SLinus Torvalds /* There should be an easy way to extract an integer... */ 92650dd0969SJan Engelhardt obj = &(element->package.elements[1]); 9271da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 9281da177e4SLinus Torvalds continue; 9291da177e4SLinus Torvalds 9301da177e4SLinus Torvalds cx.type = obj->integer.value; 931991528d7SVenkatesh Pallipadi /* 932991528d7SVenkatesh Pallipadi * Some buggy BIOSes won't list C1 in _CST - 933991528d7SVenkatesh Pallipadi * Let acpi_processor_get_power_info_default() handle them later 934991528d7SVenkatesh Pallipadi */ 935991528d7SVenkatesh Pallipadi if (i == 1 && cx.type != ACPI_STATE_C1) 936991528d7SVenkatesh Pallipadi current_count++; 9371da177e4SLinus Torvalds 938991528d7SVenkatesh Pallipadi cx.address = reg->address; 939991528d7SVenkatesh Pallipadi cx.index = current_count + 1; 9401da177e4SLinus Torvalds 941bc71bec9Svenkatesh.pallipadi@intel.com cx.entry_method = ACPI_CSTATE_SYSTEMIO; 942991528d7SVenkatesh Pallipadi if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 943991528d7SVenkatesh Pallipadi if (acpi_processor_ffh_cstate_probe 944991528d7SVenkatesh Pallipadi (pr->id, &cx, reg) == 0) { 945bc71bec9Svenkatesh.pallipadi@intel.com cx.entry_method = ACPI_CSTATE_FFH; 946bc71bec9Svenkatesh.pallipadi@intel.com } else if (cx.type == ACPI_STATE_C1) { 947991528d7SVenkatesh Pallipadi /* 948991528d7SVenkatesh Pallipadi * C1 is a special case where FIXED_HARDWARE 949991528d7SVenkatesh Pallipadi * can be handled in non-MWAIT way as well. 950991528d7SVenkatesh Pallipadi * In that case, save this _CST entry info. 951991528d7SVenkatesh Pallipadi * Otherwise, ignore this info and continue. 952991528d7SVenkatesh Pallipadi */ 953bc71bec9Svenkatesh.pallipadi@intel.com cx.entry_method = ACPI_CSTATE_HALT; 9544fcb2fcdSVenkatesh Pallipadi snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 955bc71bec9Svenkatesh.pallipadi@intel.com } else { 9561da177e4SLinus Torvalds continue; 957991528d7SVenkatesh Pallipadi } 9584fcb2fcdSVenkatesh Pallipadi } else { 9594fcb2fcdSVenkatesh Pallipadi snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 9604fcb2fcdSVenkatesh Pallipadi cx.address); 961991528d7SVenkatesh Pallipadi } 9621da177e4SLinus Torvalds 9630fda6b40SVenkatesh Pallipadi if (cx.type == ACPI_STATE_C1) { 9640fda6b40SVenkatesh Pallipadi cx.valid = 1; 9650fda6b40SVenkatesh Pallipadi } 9664fcb2fcdSVenkatesh Pallipadi 96750dd0969SJan Engelhardt obj = &(element->package.elements[2]); 9681da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 9691da177e4SLinus Torvalds continue; 9701da177e4SLinus Torvalds 9711da177e4SLinus Torvalds cx.latency = obj->integer.value; 9721da177e4SLinus Torvalds 97350dd0969SJan Engelhardt obj = &(element->package.elements[3]); 9741da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 9751da177e4SLinus Torvalds continue; 9761da177e4SLinus Torvalds 9771da177e4SLinus Torvalds cx.power = obj->integer.value; 9781da177e4SLinus Torvalds 979cf824788SJanosch Machowinski current_count++; 980cf824788SJanosch Machowinski memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 981cf824788SJanosch Machowinski 982cf824788SJanosch Machowinski /* 983cf824788SJanosch Machowinski * We support total ACPI_PROCESSOR_MAX_POWER - 1 984cf824788SJanosch Machowinski * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 985cf824788SJanosch Machowinski */ 986cf824788SJanosch Machowinski if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 987cf824788SJanosch Machowinski printk(KERN_WARNING 988cf824788SJanosch Machowinski "Limiting number of power states to max (%d)\n", 989cf824788SJanosch Machowinski ACPI_PROCESSOR_MAX_POWER); 990cf824788SJanosch Machowinski printk(KERN_WARNING 991cf824788SJanosch Machowinski "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 992cf824788SJanosch Machowinski break; 993cf824788SJanosch Machowinski } 9941da177e4SLinus Torvalds } 9951da177e4SLinus Torvalds 9964be44fcdSLen Brown ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 997cf824788SJanosch Machowinski current_count)); 9981da177e4SLinus Torvalds 9991da177e4SLinus Torvalds /* Validate number of power states discovered */ 1000cf824788SJanosch Machowinski if (current_count < 2) 10016d93c648SVenkatesh Pallipadi status = -EFAULT; 10021da177e4SLinus Torvalds 10031da177e4SLinus Torvalds end: 100402438d87SLen Brown kfree(buffer.pointer); 10051da177e4SLinus Torvalds 1006d550d98dSPatrick Mochel return status; 10071da177e4SLinus Torvalds } 10081da177e4SLinus Torvalds 10091da177e4SLinus Torvalds static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) 10101da177e4SLinus Torvalds { 10111da177e4SLinus Torvalds 10121da177e4SLinus Torvalds if (!cx->address) 1013d550d98dSPatrick Mochel return; 10141da177e4SLinus Torvalds 10151da177e4SLinus Torvalds /* 10161da177e4SLinus Torvalds * C2 latency must be less than or equal to 100 10171da177e4SLinus Torvalds * microseconds. 10181da177e4SLinus Torvalds */ 10191da177e4SLinus Torvalds else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 10201da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 10214be44fcdSLen Brown "latency too large [%d]\n", cx->latency)); 1022d550d98dSPatrick Mochel return; 10231da177e4SLinus Torvalds } 10241da177e4SLinus Torvalds 10251da177e4SLinus Torvalds /* 10261da177e4SLinus Torvalds * Otherwise we've met all of our C2 requirements. 10271da177e4SLinus Torvalds * Normalize the C2 latency to expidite policy 10281da177e4SLinus Torvalds */ 10291da177e4SLinus Torvalds cx->valid = 1; 10304f86d3a8SLen Brown 10314f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 10321da177e4SLinus Torvalds cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 10334f86d3a8SLen Brown #else 10344f86d3a8SLen Brown cx->latency_ticks = cx->latency; 10354f86d3a8SLen Brown #endif 10361da177e4SLinus Torvalds 1037d550d98dSPatrick Mochel return; 10381da177e4SLinus Torvalds } 10391da177e4SLinus Torvalds 10404be44fcdSLen Brown static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 10411da177e4SLinus Torvalds struct acpi_processor_cx *cx) 10421da177e4SLinus Torvalds { 104302df8b93SVenkatesh Pallipadi static int bm_check_flag; 104402df8b93SVenkatesh Pallipadi 10451da177e4SLinus Torvalds 10461da177e4SLinus Torvalds if (!cx->address) 1047d550d98dSPatrick Mochel return; 10481da177e4SLinus Torvalds 10491da177e4SLinus Torvalds /* 10501da177e4SLinus Torvalds * C3 latency must be less than or equal to 1000 10511da177e4SLinus Torvalds * microseconds. 10521da177e4SLinus Torvalds */ 10531da177e4SLinus Torvalds else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 10541da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 10554be44fcdSLen Brown "latency too large [%d]\n", cx->latency)); 1056d550d98dSPatrick Mochel return; 10571da177e4SLinus Torvalds } 10581da177e4SLinus Torvalds 10591da177e4SLinus Torvalds /* 10601da177e4SLinus Torvalds * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 10611da177e4SLinus Torvalds * DMA transfers are used by any ISA device to avoid livelock. 10621da177e4SLinus Torvalds * Note that we could disable Type-F DMA (as recommended by 10631da177e4SLinus Torvalds * the erratum), but this is known to disrupt certain ISA 10641da177e4SLinus Torvalds * devices thus we take the conservative approach. 10651da177e4SLinus Torvalds */ 10661da177e4SLinus Torvalds else if (errata.piix4.fdma) { 10671da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 10681da177e4SLinus Torvalds "C3 not supported on PIIX4 with Type-F DMA\n")); 1069d550d98dSPatrick Mochel return; 10701da177e4SLinus Torvalds } 10711da177e4SLinus Torvalds 107202df8b93SVenkatesh Pallipadi /* All the logic here assumes flags.bm_check is same across all CPUs */ 107302df8b93SVenkatesh Pallipadi if (!bm_check_flag) { 107402df8b93SVenkatesh Pallipadi /* Determine whether bm_check is needed based on CPU */ 107502df8b93SVenkatesh Pallipadi acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 107602df8b93SVenkatesh Pallipadi bm_check_flag = pr->flags.bm_check; 107702df8b93SVenkatesh Pallipadi } else { 107802df8b93SVenkatesh Pallipadi pr->flags.bm_check = bm_check_flag; 107902df8b93SVenkatesh Pallipadi } 108002df8b93SVenkatesh Pallipadi 108102df8b93SVenkatesh Pallipadi if (pr->flags.bm_check) { 108202df8b93SVenkatesh Pallipadi if (!pr->flags.bm_control) { 1083ed3110efSVenki Pallipadi if (pr->flags.has_cst != 1) { 1084ed3110efSVenki Pallipadi /* bus mastering control is necessary */ 108502df8b93SVenkatesh Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1086ed3110efSVenki Pallipadi "C3 support requires BM control\n")); 1087ed3110efSVenki Pallipadi return; 1088ed3110efSVenki Pallipadi } else { 1089ed3110efSVenki Pallipadi /* Here we enter C3 without bus mastering */ 1090ed3110efSVenki Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1091ed3110efSVenki Pallipadi "C3 support without BM control\n")); 1092ed3110efSVenki Pallipadi } 109302df8b93SVenkatesh Pallipadi } 109402df8b93SVenkatesh Pallipadi } else { 109502df8b93SVenkatesh Pallipadi /* 109602df8b93SVenkatesh Pallipadi * WBINVD should be set in fadt, for C3 state to be 109702df8b93SVenkatesh Pallipadi * supported on when bm_check is not required. 109802df8b93SVenkatesh Pallipadi */ 1099cee324b1SAlexey Starikovskiy if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 110002df8b93SVenkatesh Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 110102df8b93SVenkatesh Pallipadi "Cache invalidation should work properly" 110202df8b93SVenkatesh Pallipadi " for C3 to be enabled on SMP systems\n")); 1103d550d98dSPatrick Mochel return; 110402df8b93SVenkatesh Pallipadi } 1105d8c71b6dSBob Moore acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 110602df8b93SVenkatesh Pallipadi } 110702df8b93SVenkatesh Pallipadi 11081da177e4SLinus Torvalds /* 11091da177e4SLinus Torvalds * Otherwise we've met all of our C3 requirements. 11101da177e4SLinus Torvalds * Normalize the C3 latency to expidite policy. Enable 11111da177e4SLinus Torvalds * checking of bus mastering status (bm_check) so we can 11121da177e4SLinus Torvalds * use this in our C3 policy 11131da177e4SLinus Torvalds */ 11141da177e4SLinus Torvalds cx->valid = 1; 11154f86d3a8SLen Brown 11164f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 11171da177e4SLinus Torvalds cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 11184f86d3a8SLen Brown #else 11194f86d3a8SLen Brown cx->latency_ticks = cx->latency; 11204f86d3a8SLen Brown #endif 11211da177e4SLinus Torvalds 1122d550d98dSPatrick Mochel return; 11231da177e4SLinus Torvalds } 11241da177e4SLinus Torvalds 11251da177e4SLinus Torvalds static int acpi_processor_power_verify(struct acpi_processor *pr) 11261da177e4SLinus Torvalds { 11271da177e4SLinus Torvalds unsigned int i; 11281da177e4SLinus Torvalds unsigned int working = 0; 11296eb0a0fdSVenkatesh Pallipadi 1130169a0abbSThomas Gleixner pr->power.timer_broadcast_on_state = INT_MAX; 11316eb0a0fdSVenkatesh Pallipadi 11321da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 11331da177e4SLinus Torvalds struct acpi_processor_cx *cx = &pr->power.states[i]; 11341da177e4SLinus Torvalds 11351da177e4SLinus Torvalds switch (cx->type) { 11361da177e4SLinus Torvalds case ACPI_STATE_C1: 11371da177e4SLinus Torvalds cx->valid = 1; 11381da177e4SLinus Torvalds break; 11391da177e4SLinus Torvalds 11401da177e4SLinus Torvalds case ACPI_STATE_C2: 11411da177e4SLinus Torvalds acpi_processor_power_verify_c2(cx); 1142296d93cdSLinus Torvalds if (cx->valid) 1143169a0abbSThomas Gleixner acpi_timer_check_state(i, pr, cx); 11441da177e4SLinus Torvalds break; 11451da177e4SLinus Torvalds 11461da177e4SLinus Torvalds case ACPI_STATE_C3: 11471da177e4SLinus Torvalds acpi_processor_power_verify_c3(pr, cx); 1148296d93cdSLinus Torvalds if (cx->valid) 1149169a0abbSThomas Gleixner acpi_timer_check_state(i, pr, cx); 11501da177e4SLinus Torvalds break; 11511da177e4SLinus Torvalds } 11521da177e4SLinus Torvalds 11531da177e4SLinus Torvalds if (cx->valid) 11541da177e4SLinus Torvalds working++; 11551da177e4SLinus Torvalds } 11561da177e4SLinus Torvalds 1157169a0abbSThomas Gleixner acpi_propagate_timer_broadcast(pr); 1158bd663347SAndi Kleen 11591da177e4SLinus Torvalds return (working); 11601da177e4SLinus Torvalds } 11611da177e4SLinus Torvalds 11624be44fcdSLen Brown static int acpi_processor_get_power_info(struct acpi_processor *pr) 11631da177e4SLinus Torvalds { 11641da177e4SLinus Torvalds unsigned int i; 11651da177e4SLinus Torvalds int result; 11661da177e4SLinus Torvalds 11671da177e4SLinus Torvalds 11681da177e4SLinus Torvalds /* NOTE: the idle thread may not be running while calling 11691da177e4SLinus Torvalds * this function */ 11701da177e4SLinus Torvalds 1171991528d7SVenkatesh Pallipadi /* Zero initialize all the C-states info. */ 1172991528d7SVenkatesh Pallipadi memset(pr->power.states, 0, sizeof(pr->power.states)); 1173991528d7SVenkatesh Pallipadi 11741da177e4SLinus Torvalds result = acpi_processor_get_power_info_cst(pr); 11756d93c648SVenkatesh Pallipadi if (result == -ENODEV) 1176c5a114f1SDarrick J. Wong result = acpi_processor_get_power_info_fadt(pr); 11776d93c648SVenkatesh Pallipadi 1178991528d7SVenkatesh Pallipadi if (result) 1179991528d7SVenkatesh Pallipadi return result; 1180991528d7SVenkatesh Pallipadi 1181991528d7SVenkatesh Pallipadi acpi_processor_get_power_info_default(pr); 1182991528d7SVenkatesh Pallipadi 1183cf824788SJanosch Machowinski pr->power.count = acpi_processor_power_verify(pr); 11841da177e4SLinus Torvalds 11854f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 11861da177e4SLinus Torvalds /* 11871da177e4SLinus Torvalds * Set Default Policy 11881da177e4SLinus Torvalds * ------------------ 11891da177e4SLinus Torvalds * Now that we know which states are supported, set the default 11901da177e4SLinus Torvalds * policy. Note that this policy can be changed dynamically 11911da177e4SLinus Torvalds * (e.g. encourage deeper sleeps to conserve battery life when 11921da177e4SLinus Torvalds * not on AC). 11931da177e4SLinus Torvalds */ 11941da177e4SLinus Torvalds result = acpi_processor_set_power_policy(pr); 11951da177e4SLinus Torvalds if (result) 1196d550d98dSPatrick Mochel return result; 11974f86d3a8SLen Brown #endif 11981da177e4SLinus Torvalds 11991da177e4SLinus Torvalds /* 12001da177e4SLinus Torvalds * if one state of type C2 or C3 is available, mark this 12011da177e4SLinus Torvalds * CPU as being "idle manageable" 12021da177e4SLinus Torvalds */ 12031da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 1204acf05f4bSVenkatesh Pallipadi if (pr->power.states[i].valid) { 12051da177e4SLinus Torvalds pr->power.count = i; 12062203d6edSLinus Torvalds if (pr->power.states[i].type >= ACPI_STATE_C2) 12071da177e4SLinus Torvalds pr->flags.power = 1; 12081da177e4SLinus Torvalds } 1209acf05f4bSVenkatesh Pallipadi } 12101da177e4SLinus Torvalds 1211d550d98dSPatrick Mochel return 0; 12121da177e4SLinus Torvalds } 12131da177e4SLinus Torvalds 12141da177e4SLinus Torvalds static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) 12151da177e4SLinus Torvalds { 121650dd0969SJan Engelhardt struct acpi_processor *pr = seq->private; 12171da177e4SLinus Torvalds unsigned int i; 12181da177e4SLinus Torvalds 12191da177e4SLinus Torvalds 12201da177e4SLinus Torvalds if (!pr) 12211da177e4SLinus Torvalds goto end; 12221da177e4SLinus Torvalds 12231da177e4SLinus Torvalds seq_printf(seq, "active state: C%zd\n" 12241da177e4SLinus Torvalds "max_cstate: C%d\n" 12255c87579eSArjan van de Ven "bus master activity: %08x\n" 12265c87579eSArjan van de Ven "maximum allowed latency: %d usec\n", 12271da177e4SLinus Torvalds pr->power.state ? pr->power.state - pr->power.states : 0, 12285c87579eSArjan van de Ven max_cstate, (unsigned)pr->power.bm_activity, 1229f011e2e2SMark Gross pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)); 12301da177e4SLinus Torvalds 12311da177e4SLinus Torvalds seq_puts(seq, "states:\n"); 12321da177e4SLinus Torvalds 12331da177e4SLinus Torvalds for (i = 1; i <= pr->power.count; i++) { 12341da177e4SLinus Torvalds seq_printf(seq, " %cC%d: ", 12354be44fcdSLen Brown (&pr->power.states[i] == 12364be44fcdSLen Brown pr->power.state ? '*' : ' '), i); 12371da177e4SLinus Torvalds 12381da177e4SLinus Torvalds if (!pr->power.states[i].valid) { 12391da177e4SLinus Torvalds seq_puts(seq, "<not supported>\n"); 12401da177e4SLinus Torvalds continue; 12411da177e4SLinus Torvalds } 12421da177e4SLinus Torvalds 12431da177e4SLinus Torvalds switch (pr->power.states[i].type) { 12441da177e4SLinus Torvalds case ACPI_STATE_C1: 12451da177e4SLinus Torvalds seq_printf(seq, "type[C1] "); 12461da177e4SLinus Torvalds break; 12471da177e4SLinus Torvalds case ACPI_STATE_C2: 12481da177e4SLinus Torvalds seq_printf(seq, "type[C2] "); 12491da177e4SLinus Torvalds break; 12501da177e4SLinus Torvalds case ACPI_STATE_C3: 12511da177e4SLinus Torvalds seq_printf(seq, "type[C3] "); 12521da177e4SLinus Torvalds break; 12531da177e4SLinus Torvalds default: 12541da177e4SLinus Torvalds seq_printf(seq, "type[--] "); 12551da177e4SLinus Torvalds break; 12561da177e4SLinus Torvalds } 12571da177e4SLinus Torvalds 12581da177e4SLinus Torvalds if (pr->power.states[i].promotion.state) 12591da177e4SLinus Torvalds seq_printf(seq, "promotion[C%zd] ", 12601da177e4SLinus Torvalds (pr->power.states[i].promotion.state - 12611da177e4SLinus Torvalds pr->power.states)); 12621da177e4SLinus Torvalds else 12631da177e4SLinus Torvalds seq_puts(seq, "promotion[--] "); 12641da177e4SLinus Torvalds 12651da177e4SLinus Torvalds if (pr->power.states[i].demotion.state) 12661da177e4SLinus Torvalds seq_printf(seq, "demotion[C%zd] ", 12671da177e4SLinus Torvalds (pr->power.states[i].demotion.state - 12681da177e4SLinus Torvalds pr->power.states)); 12691da177e4SLinus Torvalds else 12701da177e4SLinus Torvalds seq_puts(seq, "demotion[--] "); 12711da177e4SLinus Torvalds 1272a3c6598fSDominik Brodowski seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 12731da177e4SLinus Torvalds pr->power.states[i].latency, 1274a3c6598fSDominik Brodowski pr->power.states[i].usage, 1275b0b7eaafSAlexey Starikovskiy (unsigned long long)pr->power.states[i].time); 12761da177e4SLinus Torvalds } 12771da177e4SLinus Torvalds 12781da177e4SLinus Torvalds end: 1279d550d98dSPatrick Mochel return 0; 12801da177e4SLinus Torvalds } 12811da177e4SLinus Torvalds 12821da177e4SLinus Torvalds static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) 12831da177e4SLinus Torvalds { 12841da177e4SLinus Torvalds return single_open(file, acpi_processor_power_seq_show, 12851da177e4SLinus Torvalds PDE(inode)->data); 12861da177e4SLinus Torvalds } 12871da177e4SLinus Torvalds 1288d7508032SArjan van de Ven static const struct file_operations acpi_processor_power_fops = { 1289cf7acfabSDenis V. Lunev .owner = THIS_MODULE, 12901da177e4SLinus Torvalds .open = acpi_processor_power_open_fs, 12911da177e4SLinus Torvalds .read = seq_read, 12921da177e4SLinus Torvalds .llseek = seq_lseek, 12931da177e4SLinus Torvalds .release = single_release, 12941da177e4SLinus Torvalds }; 12951da177e4SLinus Torvalds 12964f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 12974f86d3a8SLen Brown 12984f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr) 12994f86d3a8SLen Brown { 13004f86d3a8SLen Brown int result = 0; 13014f86d3a8SLen Brown 130236a91358SVenkatesh Pallipadi if (boot_option_idle_override) 130336a91358SVenkatesh Pallipadi return 0; 13044f86d3a8SLen Brown 13054f86d3a8SLen Brown if (!pr) 13064f86d3a8SLen Brown return -EINVAL; 13074f86d3a8SLen Brown 13084f86d3a8SLen Brown if (nocst) { 13094f86d3a8SLen Brown return -ENODEV; 13104f86d3a8SLen Brown } 13114f86d3a8SLen Brown 13124f86d3a8SLen Brown if (!pr->flags.power_setup_done) 13134f86d3a8SLen Brown return -ENODEV; 13144f86d3a8SLen Brown 13154f86d3a8SLen Brown /* Fall back to the default idle loop */ 13164f86d3a8SLen Brown pm_idle = pm_idle_save; 13174f86d3a8SLen Brown synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ 13184f86d3a8SLen Brown 13194f86d3a8SLen Brown pr->flags.power = 0; 13204f86d3a8SLen Brown result = acpi_processor_get_power_info(pr); 13214f86d3a8SLen Brown if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) 13224f86d3a8SLen Brown pm_idle = acpi_processor_idle; 13234f86d3a8SLen Brown 13244f86d3a8SLen Brown return result; 13254f86d3a8SLen Brown } 13264f86d3a8SLen Brown 13271fec74a9SAndrew Morton #ifdef CONFIG_SMP 13285c87579eSArjan van de Ven static void smp_callback(void *v) 13295c87579eSArjan van de Ven { 13305c87579eSArjan van de Ven /* we already woke the CPU up, nothing more to do */ 13315c87579eSArjan van de Ven } 13325c87579eSArjan van de Ven 13335c87579eSArjan van de Ven /* 13345c87579eSArjan van de Ven * This function gets called when a part of the kernel has a new latency 13355c87579eSArjan van de Ven * requirement. This means we need to get all processors out of their C-state, 13365c87579eSArjan van de Ven * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 13375c87579eSArjan van de Ven * wakes them all right up. 13385c87579eSArjan van de Ven */ 13395c87579eSArjan van de Ven static int acpi_processor_latency_notify(struct notifier_block *b, 13405c87579eSArjan van de Ven unsigned long l, void *v) 13415c87579eSArjan van de Ven { 1342*8691e5a8SJens Axboe smp_call_function(smp_callback, NULL, 1); 13435c87579eSArjan van de Ven return NOTIFY_OK; 13445c87579eSArjan van de Ven } 13455c87579eSArjan van de Ven 13465c87579eSArjan van de Ven static struct notifier_block acpi_processor_latency_notifier = { 13475c87579eSArjan van de Ven .notifier_call = acpi_processor_latency_notify, 13485c87579eSArjan van de Ven }; 13494f86d3a8SLen Brown 13501fec74a9SAndrew Morton #endif 13515c87579eSArjan van de Ven 13524f86d3a8SLen Brown #else /* CONFIG_CPU_IDLE */ 13534f86d3a8SLen Brown 13544f86d3a8SLen Brown /** 13554f86d3a8SLen Brown * acpi_idle_bm_check - checks if bus master activity was detected 13564f86d3a8SLen Brown */ 13574f86d3a8SLen Brown static int acpi_idle_bm_check(void) 13584f86d3a8SLen Brown { 13594f86d3a8SLen Brown u32 bm_status = 0; 13604f86d3a8SLen Brown 13614f86d3a8SLen Brown acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 13624f86d3a8SLen Brown if (bm_status) 13634f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 13644f86d3a8SLen Brown /* 13654f86d3a8SLen Brown * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 13664f86d3a8SLen Brown * the true state of bus mastering activity; forcing us to 13674f86d3a8SLen Brown * manually check the BMIDEA bit of each IDE channel. 13684f86d3a8SLen Brown */ 13694f86d3a8SLen Brown else if (errata.piix4.bmisx) { 13704f86d3a8SLen Brown if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 13714f86d3a8SLen Brown || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 13724f86d3a8SLen Brown bm_status = 1; 13734f86d3a8SLen Brown } 13744f86d3a8SLen Brown return bm_status; 13754f86d3a8SLen Brown } 13764f86d3a8SLen Brown 13774f86d3a8SLen Brown /** 13784f86d3a8SLen Brown * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state 13794f86d3a8SLen Brown * @pr: the processor 13804f86d3a8SLen Brown * @target: the new target state 13814f86d3a8SLen Brown */ 13824f86d3a8SLen Brown static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, 13834f86d3a8SLen Brown struct acpi_processor_cx *target) 13844f86d3a8SLen Brown { 13854f86d3a8SLen Brown if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { 13864f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 13874f86d3a8SLen Brown pr->flags.bm_rld_set = 0; 13884f86d3a8SLen Brown } 13894f86d3a8SLen Brown 13904f86d3a8SLen Brown if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { 13914f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 13924f86d3a8SLen Brown pr->flags.bm_rld_set = 1; 13934f86d3a8SLen Brown } 13944f86d3a8SLen Brown } 13954f86d3a8SLen Brown 13964f86d3a8SLen Brown /** 13974f86d3a8SLen Brown * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 13984f86d3a8SLen Brown * @cx: cstate data 1399bc71bec9Svenkatesh.pallipadi@intel.com * 1400bc71bec9Svenkatesh.pallipadi@intel.com * Caller disables interrupt before call and enables interrupt after return. 14014f86d3a8SLen Brown */ 14024f86d3a8SLen Brown static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 14034f86d3a8SLen Brown { 1404bc71bec9Svenkatesh.pallipadi@intel.com if (cx->entry_method == ACPI_CSTATE_FFH) { 14054f86d3a8SLen Brown /* Call into architectural FFH based C-state */ 14064f86d3a8SLen Brown acpi_processor_ffh_cstate_enter(cx); 1407bc71bec9Svenkatesh.pallipadi@intel.com } else if (cx->entry_method == ACPI_CSTATE_HALT) { 1408bc71bec9Svenkatesh.pallipadi@intel.com acpi_safe_halt(); 14094f86d3a8SLen Brown } else { 14104f86d3a8SLen Brown int unused; 14114f86d3a8SLen Brown /* IO port based C-state */ 14124f86d3a8SLen Brown inb(cx->address); 14134f86d3a8SLen Brown /* Dummy wait op - must do something useless after P_LVL2 read 14144f86d3a8SLen Brown because chipsets cannot guarantee that STPCLK# signal 14154f86d3a8SLen Brown gets asserted in time to freeze execution properly. */ 14164f86d3a8SLen Brown unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 14174f86d3a8SLen Brown } 14184f86d3a8SLen Brown } 14194f86d3a8SLen Brown 14204f86d3a8SLen Brown /** 14214f86d3a8SLen Brown * acpi_idle_enter_c1 - enters an ACPI C1 state-type 14224f86d3a8SLen Brown * @dev: the target CPU 14234f86d3a8SLen Brown * @state: the state data 14244f86d3a8SLen Brown * 14254f86d3a8SLen Brown * This is equivalent to the HALT instruction. 14264f86d3a8SLen Brown */ 14274f86d3a8SLen Brown static int acpi_idle_enter_c1(struct cpuidle_device *dev, 14284f86d3a8SLen Brown struct cpuidle_state *state) 14294f86d3a8SLen Brown { 14309b12e18cSvenkatesh.pallipadi@intel.com u32 t1, t2; 14314f86d3a8SLen Brown struct acpi_processor *pr; 14324f86d3a8SLen Brown struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 14339b12e18cSvenkatesh.pallipadi@intel.com 14344f86d3a8SLen Brown pr = processors[smp_processor_id()]; 14354f86d3a8SLen Brown 14364f86d3a8SLen Brown if (unlikely(!pr)) 14374f86d3a8SLen Brown return 0; 14384f86d3a8SLen Brown 14392e906655Svenkatesh.pallipadi@intel.com local_irq_disable(); 1440b077fbadSVenkatesh Pallipadi 1441b077fbadSVenkatesh Pallipadi /* Do not access any ACPI IO ports in suspend path */ 1442b077fbadSVenkatesh Pallipadi if (acpi_idle_suspend) { 1443b077fbadSVenkatesh Pallipadi acpi_safe_halt(); 1444b077fbadSVenkatesh Pallipadi local_irq_enable(); 1445b077fbadSVenkatesh Pallipadi return 0; 1446b077fbadSVenkatesh Pallipadi } 1447b077fbadSVenkatesh Pallipadi 14484f86d3a8SLen Brown if (pr->flags.bm_check) 14494f86d3a8SLen Brown acpi_idle_update_bm_rld(pr, cx); 14504f86d3a8SLen Brown 14519b12e18cSvenkatesh.pallipadi@intel.com t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1452bc71bec9Svenkatesh.pallipadi@intel.com acpi_idle_do_entry(cx); 14539b12e18cSvenkatesh.pallipadi@intel.com t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 14544f86d3a8SLen Brown 14552e906655Svenkatesh.pallipadi@intel.com local_irq_enable(); 14564f86d3a8SLen Brown cx->usage++; 14574f86d3a8SLen Brown 14589b12e18cSvenkatesh.pallipadi@intel.com return ticks_elapsed_in_us(t1, t2); 14594f86d3a8SLen Brown } 14604f86d3a8SLen Brown 14614f86d3a8SLen Brown /** 14624f86d3a8SLen Brown * acpi_idle_enter_simple - enters an ACPI state without BM handling 14634f86d3a8SLen Brown * @dev: the target CPU 14644f86d3a8SLen Brown * @state: the state data 14654f86d3a8SLen Brown */ 14664f86d3a8SLen Brown static int acpi_idle_enter_simple(struct cpuidle_device *dev, 14674f86d3a8SLen Brown struct cpuidle_state *state) 14684f86d3a8SLen Brown { 14694f86d3a8SLen Brown struct acpi_processor *pr; 14704f86d3a8SLen Brown struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 14714f86d3a8SLen Brown u32 t1, t2; 147250629118SVenkatesh Pallipadi int sleep_ticks = 0; 147350629118SVenkatesh Pallipadi 14744f86d3a8SLen Brown pr = processors[smp_processor_id()]; 14754f86d3a8SLen Brown 14764f86d3a8SLen Brown if (unlikely(!pr)) 14774f86d3a8SLen Brown return 0; 14784f86d3a8SLen Brown 1479e196441bSLen Brown if (acpi_idle_suspend) 1480e196441bSLen Brown return(acpi_idle_enter_c1(dev, state)); 1481e196441bSLen Brown 14824f86d3a8SLen Brown local_irq_disable(); 14834f86d3a8SLen Brown current_thread_info()->status &= ~TS_POLLING; 14844f86d3a8SLen Brown /* 14854f86d3a8SLen Brown * TS_POLLING-cleared state must be visible before we test 14864f86d3a8SLen Brown * NEED_RESCHED: 14874f86d3a8SLen Brown */ 14884f86d3a8SLen Brown smp_mb(); 14894f86d3a8SLen Brown 14904f86d3a8SLen Brown if (unlikely(need_resched())) { 14914f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 14924f86d3a8SLen Brown local_irq_enable(); 14934f86d3a8SLen Brown return 0; 14944f86d3a8SLen Brown } 14954f86d3a8SLen Brown 1496e17bcb43SThomas Gleixner /* 1497e17bcb43SThomas Gleixner * Must be done before busmaster disable as we might need to 1498e17bcb43SThomas Gleixner * access HPET ! 1499e17bcb43SThomas Gleixner */ 1500e17bcb43SThomas Gleixner acpi_state_timer_broadcast(pr, cx, 1); 1501e17bcb43SThomas Gleixner 1502e17bcb43SThomas Gleixner if (pr->flags.bm_check) 1503e17bcb43SThomas Gleixner acpi_idle_update_bm_rld(pr, cx); 1504e17bcb43SThomas Gleixner 15054f86d3a8SLen Brown if (cx->type == ACPI_STATE_C3) 15064f86d3a8SLen Brown ACPI_FLUSH_CPU_CACHE(); 15074f86d3a8SLen Brown 15084f86d3a8SLen Brown t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 150950629118SVenkatesh Pallipadi /* Tell the scheduler that we are going deep-idle: */ 151050629118SVenkatesh Pallipadi sched_clock_idle_sleep_event(); 15114f86d3a8SLen Brown acpi_idle_do_entry(cx); 15124f86d3a8SLen Brown t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 15134f86d3a8SLen Brown 151461331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) 15154f86d3a8SLen Brown /* TSC could halt in idle, so notify users */ 1516ddb25f9aSAndi Kleen if (tsc_halts_in_c(cx->type)) 15174f86d3a8SLen Brown mark_tsc_unstable("TSC halts in idle");; 15184f86d3a8SLen Brown #endif 151950629118SVenkatesh Pallipadi sleep_ticks = ticks_elapsed(t1, t2); 152050629118SVenkatesh Pallipadi 152150629118SVenkatesh Pallipadi /* Tell the scheduler how much we idled: */ 152250629118SVenkatesh Pallipadi sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 15234f86d3a8SLen Brown 15244f86d3a8SLen Brown local_irq_enable(); 15254f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 15264f86d3a8SLen Brown 15274f86d3a8SLen Brown cx->usage++; 15284f86d3a8SLen Brown 15294f86d3a8SLen Brown acpi_state_timer_broadcast(pr, cx, 0); 153050629118SVenkatesh Pallipadi cx->time += sleep_ticks; 15314f86d3a8SLen Brown return ticks_elapsed_in_us(t1, t2); 15324f86d3a8SLen Brown } 15334f86d3a8SLen Brown 15344f86d3a8SLen Brown static int c3_cpu_count; 15354f86d3a8SLen Brown static DEFINE_SPINLOCK(c3_lock); 15364f86d3a8SLen Brown 15374f86d3a8SLen Brown /** 15384f86d3a8SLen Brown * acpi_idle_enter_bm - enters C3 with proper BM handling 15394f86d3a8SLen Brown * @dev: the target CPU 15404f86d3a8SLen Brown * @state: the state data 15414f86d3a8SLen Brown * 15424f86d3a8SLen Brown * If BM is detected, the deepest non-C3 idle state is entered instead. 15434f86d3a8SLen Brown */ 15444f86d3a8SLen Brown static int acpi_idle_enter_bm(struct cpuidle_device *dev, 15454f86d3a8SLen Brown struct cpuidle_state *state) 15464f86d3a8SLen Brown { 15474f86d3a8SLen Brown struct acpi_processor *pr; 15484f86d3a8SLen Brown struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 15494f86d3a8SLen Brown u32 t1, t2; 155050629118SVenkatesh Pallipadi int sleep_ticks = 0; 155150629118SVenkatesh Pallipadi 15524f86d3a8SLen Brown pr = processors[smp_processor_id()]; 15534f86d3a8SLen Brown 15544f86d3a8SLen Brown if (unlikely(!pr)) 15554f86d3a8SLen Brown return 0; 15564f86d3a8SLen Brown 1557e196441bSLen Brown if (acpi_idle_suspend) 1558e196441bSLen Brown return(acpi_idle_enter_c1(dev, state)); 1559e196441bSLen Brown 1560ddc081a1SVenkatesh Pallipadi if (acpi_idle_bm_check()) { 1561ddc081a1SVenkatesh Pallipadi if (dev->safe_state) { 1562ddc081a1SVenkatesh Pallipadi return dev->safe_state->enter(dev, dev->safe_state); 1563ddc081a1SVenkatesh Pallipadi } else { 15642e906655Svenkatesh.pallipadi@intel.com local_irq_disable(); 1565ddc081a1SVenkatesh Pallipadi acpi_safe_halt(); 15662e906655Svenkatesh.pallipadi@intel.com local_irq_enable(); 1567ddc081a1SVenkatesh Pallipadi return 0; 1568ddc081a1SVenkatesh Pallipadi } 1569ddc081a1SVenkatesh Pallipadi } 1570ddc081a1SVenkatesh Pallipadi 15714f86d3a8SLen Brown local_irq_disable(); 15724f86d3a8SLen Brown current_thread_info()->status &= ~TS_POLLING; 15734f86d3a8SLen Brown /* 15744f86d3a8SLen Brown * TS_POLLING-cleared state must be visible before we test 15754f86d3a8SLen Brown * NEED_RESCHED: 15764f86d3a8SLen Brown */ 15774f86d3a8SLen Brown smp_mb(); 15784f86d3a8SLen Brown 15794f86d3a8SLen Brown if (unlikely(need_resched())) { 15804f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 15814f86d3a8SLen Brown local_irq_enable(); 15824f86d3a8SLen Brown return 0; 15834f86d3a8SLen Brown } 15844f86d3a8SLen Brown 1585996520c1SVenki Pallipadi acpi_unlazy_tlb(smp_processor_id()); 1586996520c1SVenki Pallipadi 158750629118SVenkatesh Pallipadi /* Tell the scheduler that we are going deep-idle: */ 158850629118SVenkatesh Pallipadi sched_clock_idle_sleep_event(); 15894f86d3a8SLen Brown /* 15904f86d3a8SLen Brown * Must be done before busmaster disable as we might need to 15914f86d3a8SLen Brown * access HPET ! 15924f86d3a8SLen Brown */ 15934f86d3a8SLen Brown acpi_state_timer_broadcast(pr, cx, 1); 15944f86d3a8SLen Brown 15954f86d3a8SLen Brown acpi_idle_update_bm_rld(pr, cx); 15964f86d3a8SLen Brown 1597c9c860e5SVenkatesh Pallipadi /* 1598c9c860e5SVenkatesh Pallipadi * disable bus master 1599c9c860e5SVenkatesh Pallipadi * bm_check implies we need ARB_DIS 1600c9c860e5SVenkatesh Pallipadi * !bm_check implies we need cache flush 1601c9c860e5SVenkatesh Pallipadi * bm_control implies whether we can do ARB_DIS 1602c9c860e5SVenkatesh Pallipadi * 1603c9c860e5SVenkatesh Pallipadi * That leaves a case where bm_check is set and bm_control is 1604c9c860e5SVenkatesh Pallipadi * not set. In that case we cannot do much, we enter C3 1605c9c860e5SVenkatesh Pallipadi * without doing anything. 1606c9c860e5SVenkatesh Pallipadi */ 1607c9c860e5SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 16084f86d3a8SLen Brown spin_lock(&c3_lock); 16094f86d3a8SLen Brown c3_cpu_count++; 16104f86d3a8SLen Brown /* Disable bus master arbitration when all CPUs are in C3 */ 16114f86d3a8SLen Brown if (c3_cpu_count == num_online_cpus()) 16124f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 16134f86d3a8SLen Brown spin_unlock(&c3_lock); 1614c9c860e5SVenkatesh Pallipadi } else if (!pr->flags.bm_check) { 1615c9c860e5SVenkatesh Pallipadi ACPI_FLUSH_CPU_CACHE(); 1616c9c860e5SVenkatesh Pallipadi } 16174f86d3a8SLen Brown 16184f86d3a8SLen Brown t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 16194f86d3a8SLen Brown acpi_idle_do_entry(cx); 16204f86d3a8SLen Brown t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 16214f86d3a8SLen Brown 16224f86d3a8SLen Brown /* Re-enable bus master arbitration */ 1623c9c860e5SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 1624c9c860e5SVenkatesh Pallipadi spin_lock(&c3_lock); 16254f86d3a8SLen Brown acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); 16264f86d3a8SLen Brown c3_cpu_count--; 16274f86d3a8SLen Brown spin_unlock(&c3_lock); 16284f86d3a8SLen Brown } 16294f86d3a8SLen Brown 163061331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) 16314f86d3a8SLen Brown /* TSC could halt in idle, so notify users */ 1632ddb25f9aSAndi Kleen if (tsc_halts_in_c(ACPI_STATE_C3)) 16334f86d3a8SLen Brown mark_tsc_unstable("TSC halts in idle"); 16344f86d3a8SLen Brown #endif 163550629118SVenkatesh Pallipadi sleep_ticks = ticks_elapsed(t1, t2); 163650629118SVenkatesh Pallipadi /* Tell the scheduler how much we idled: */ 163750629118SVenkatesh Pallipadi sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 16384f86d3a8SLen Brown 16394f86d3a8SLen Brown local_irq_enable(); 16404f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 16414f86d3a8SLen Brown 16424f86d3a8SLen Brown cx->usage++; 16434f86d3a8SLen Brown 16444f86d3a8SLen Brown acpi_state_timer_broadcast(pr, cx, 0); 164550629118SVenkatesh Pallipadi cx->time += sleep_ticks; 16464f86d3a8SLen Brown return ticks_elapsed_in_us(t1, t2); 16474f86d3a8SLen Brown } 16484f86d3a8SLen Brown 16494f86d3a8SLen Brown struct cpuidle_driver acpi_idle_driver = { 16504f86d3a8SLen Brown .name = "acpi_idle", 16514f86d3a8SLen Brown .owner = THIS_MODULE, 16524f86d3a8SLen Brown }; 16534f86d3a8SLen Brown 16544f86d3a8SLen Brown /** 16554f86d3a8SLen Brown * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE 16564f86d3a8SLen Brown * @pr: the ACPI processor 16574f86d3a8SLen Brown */ 16584f86d3a8SLen Brown static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) 16594f86d3a8SLen Brown { 16609a0b8415Svenkatesh.pallipadi@intel.com int i, count = CPUIDLE_DRIVER_STATE_START; 16614f86d3a8SLen Brown struct acpi_processor_cx *cx; 16624f86d3a8SLen Brown struct cpuidle_state *state; 16634f86d3a8SLen Brown struct cpuidle_device *dev = &pr->power.dev; 16644f86d3a8SLen Brown 16654f86d3a8SLen Brown if (!pr->flags.power_setup_done) 16664f86d3a8SLen Brown return -EINVAL; 16674f86d3a8SLen Brown 16684f86d3a8SLen Brown if (pr->flags.power == 0) { 16694f86d3a8SLen Brown return -EINVAL; 16704f86d3a8SLen Brown } 16714f86d3a8SLen Brown 1672dcb84f33SVenkatesh Pallipadi dev->cpu = pr->id; 16734fcb2fcdSVenkatesh Pallipadi for (i = 0; i < CPUIDLE_STATE_MAX; i++) { 16744fcb2fcdSVenkatesh Pallipadi dev->states[i].name[0] = '\0'; 16754fcb2fcdSVenkatesh Pallipadi dev->states[i].desc[0] = '\0'; 16764fcb2fcdSVenkatesh Pallipadi } 16774fcb2fcdSVenkatesh Pallipadi 16784f86d3a8SLen Brown for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 16794f86d3a8SLen Brown cx = &pr->power.states[i]; 16804f86d3a8SLen Brown state = &dev->states[count]; 16814f86d3a8SLen Brown 16824f86d3a8SLen Brown if (!cx->valid) 16834f86d3a8SLen Brown continue; 16844f86d3a8SLen Brown 16854f86d3a8SLen Brown #ifdef CONFIG_HOTPLUG_CPU 16864f86d3a8SLen Brown if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 16874f86d3a8SLen Brown !pr->flags.has_cst && 16884f86d3a8SLen Brown !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 16894f86d3a8SLen Brown continue; 16904f86d3a8SLen Brown #endif 16914f86d3a8SLen Brown cpuidle_set_statedata(state, cx); 16924f86d3a8SLen Brown 16934f86d3a8SLen Brown snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 16944fcb2fcdSVenkatesh Pallipadi strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 16954f86d3a8SLen Brown state->exit_latency = cx->latency; 16964963f620SLen Brown state->target_residency = cx->latency * latency_factor; 16974f86d3a8SLen Brown state->power_usage = cx->power; 16984f86d3a8SLen Brown 16994f86d3a8SLen Brown state->flags = 0; 17004f86d3a8SLen Brown switch (cx->type) { 17014f86d3a8SLen Brown case ACPI_STATE_C1: 17024f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_SHALLOW; 17038e92b660SVenki Pallipadi if (cx->entry_method == ACPI_CSTATE_FFH) 17049b12e18cSvenkatesh.pallipadi@intel.com state->flags |= CPUIDLE_FLAG_TIME_VALID; 17058e92b660SVenki Pallipadi 17064f86d3a8SLen Brown state->enter = acpi_idle_enter_c1; 1707ddc081a1SVenkatesh Pallipadi dev->safe_state = state; 17084f86d3a8SLen Brown break; 17094f86d3a8SLen Brown 17104f86d3a8SLen Brown case ACPI_STATE_C2: 17114f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_BALANCED; 17124f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_TIME_VALID; 17134f86d3a8SLen Brown state->enter = acpi_idle_enter_simple; 1714ddc081a1SVenkatesh Pallipadi dev->safe_state = state; 17154f86d3a8SLen Brown break; 17164f86d3a8SLen Brown 17174f86d3a8SLen Brown case ACPI_STATE_C3: 17184f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_DEEP; 17194f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_TIME_VALID; 17204f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_CHECK_BM; 17214f86d3a8SLen Brown state->enter = pr->flags.bm_check ? 17224f86d3a8SLen Brown acpi_idle_enter_bm : 17234f86d3a8SLen Brown acpi_idle_enter_simple; 17244f86d3a8SLen Brown break; 17254f86d3a8SLen Brown } 17264f86d3a8SLen Brown 17274f86d3a8SLen Brown count++; 17289a0b8415Svenkatesh.pallipadi@intel.com if (count == CPUIDLE_STATE_MAX) 17299a0b8415Svenkatesh.pallipadi@intel.com break; 17304f86d3a8SLen Brown } 17314f86d3a8SLen Brown 17324f86d3a8SLen Brown dev->state_count = count; 17334f86d3a8SLen Brown 17344f86d3a8SLen Brown if (!count) 17354f86d3a8SLen Brown return -EINVAL; 17364f86d3a8SLen Brown 17374f86d3a8SLen Brown return 0; 17384f86d3a8SLen Brown } 17394f86d3a8SLen Brown 17404f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr) 17414f86d3a8SLen Brown { 1742dcb84f33SVenkatesh Pallipadi int ret = 0; 17434f86d3a8SLen Brown 174436a91358SVenkatesh Pallipadi if (boot_option_idle_override) 174536a91358SVenkatesh Pallipadi return 0; 174636a91358SVenkatesh Pallipadi 17474f86d3a8SLen Brown if (!pr) 17484f86d3a8SLen Brown return -EINVAL; 17494f86d3a8SLen Brown 17504f86d3a8SLen Brown if (nocst) { 17514f86d3a8SLen Brown return -ENODEV; 17524f86d3a8SLen Brown } 17534f86d3a8SLen Brown 17544f86d3a8SLen Brown if (!pr->flags.power_setup_done) 17554f86d3a8SLen Brown return -ENODEV; 17564f86d3a8SLen Brown 17574f86d3a8SLen Brown cpuidle_pause_and_lock(); 17584f86d3a8SLen Brown cpuidle_disable_device(&pr->power.dev); 17594f86d3a8SLen Brown acpi_processor_get_power_info(pr); 1760dcb84f33SVenkatesh Pallipadi if (pr->flags.power) { 17614f86d3a8SLen Brown acpi_processor_setup_cpuidle(pr); 17624f86d3a8SLen Brown ret = cpuidle_enable_device(&pr->power.dev); 1763dcb84f33SVenkatesh Pallipadi } 17644f86d3a8SLen Brown cpuidle_resume_and_unlock(); 17654f86d3a8SLen Brown 17664f86d3a8SLen Brown return ret; 17674f86d3a8SLen Brown } 17684f86d3a8SLen Brown 17694f86d3a8SLen Brown #endif /* CONFIG_CPU_IDLE */ 17704f86d3a8SLen Brown 17717af8b660SPierre Ossman int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 17724be44fcdSLen Brown struct acpi_device *device) 17731da177e4SLinus Torvalds { 17741da177e4SLinus Torvalds acpi_status status = 0; 1775b6835052SAndreas Mohr static int first_run; 17761da177e4SLinus Torvalds struct proc_dir_entry *entry = NULL; 17771da177e4SLinus Torvalds unsigned int i; 17781da177e4SLinus Torvalds 177936a91358SVenkatesh Pallipadi if (boot_option_idle_override) 178036a91358SVenkatesh Pallipadi return 0; 17811da177e4SLinus Torvalds 17821da177e4SLinus Torvalds if (!first_run) { 17831da177e4SLinus Torvalds dmi_check_system(processor_power_dmi_table); 1784c1c30634SAlexey Starikovskiy max_cstate = acpi_processor_cstate_check(max_cstate); 17851da177e4SLinus Torvalds if (max_cstate < ACPI_C_STATES_MAX) 17864be44fcdSLen Brown printk(KERN_NOTICE 17874be44fcdSLen Brown "ACPI: processor limited to max C-state %d\n", 17884be44fcdSLen Brown max_cstate); 17891da177e4SLinus Torvalds first_run++; 17904f86d3a8SLen Brown #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP) 1791f011e2e2SMark Gross pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, 1792f011e2e2SMark Gross &acpi_processor_latency_notifier); 17931fec74a9SAndrew Morton #endif 17941da177e4SLinus Torvalds } 17951da177e4SLinus Torvalds 179602df8b93SVenkatesh Pallipadi if (!pr) 1797d550d98dSPatrick Mochel return -EINVAL; 179802df8b93SVenkatesh Pallipadi 1799cee324b1SAlexey Starikovskiy if (acpi_gbl_FADT.cst_control && !nocst) { 18004be44fcdSLen Brown status = 1801cee324b1SAlexey Starikovskiy acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); 18021da177e4SLinus Torvalds if (ACPI_FAILURE(status)) { 1803a6fc6720SThomas Renninger ACPI_EXCEPTION((AE_INFO, status, 1804a6fc6720SThomas Renninger "Notifying BIOS of _CST ability failed")); 18051da177e4SLinus Torvalds } 18061da177e4SLinus Torvalds } 18071da177e4SLinus Torvalds 18081da177e4SLinus Torvalds acpi_processor_get_power_info(pr); 18094f86d3a8SLen Brown pr->flags.power_setup_done = 1; 18101da177e4SLinus Torvalds 18111da177e4SLinus Torvalds /* 18121da177e4SLinus Torvalds * Install the idle handler if processor power management is supported. 18131da177e4SLinus Torvalds * Note that we use previously set idle handler will be used on 18141da177e4SLinus Torvalds * platforms that only support C1. 18151da177e4SLinus Torvalds */ 181636a91358SVenkatesh Pallipadi if (pr->flags.power) { 18174f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE 18184f86d3a8SLen Brown acpi_processor_setup_cpuidle(pr); 18194f86d3a8SLen Brown if (cpuidle_register_device(&pr->power.dev)) 18204f86d3a8SLen Brown return -EIO; 18214f86d3a8SLen Brown #endif 18224f86d3a8SLen Brown 18231da177e4SLinus Torvalds printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); 18241da177e4SLinus Torvalds for (i = 1; i <= pr->power.count; i++) 18251da177e4SLinus Torvalds if (pr->power.states[i].valid) 18264be44fcdSLen Brown printk(" C%d[C%d]", i, 18274be44fcdSLen Brown pr->power.states[i].type); 18281da177e4SLinus Torvalds printk(")\n"); 18291da177e4SLinus Torvalds 18304f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 18311da177e4SLinus Torvalds if (pr->id == 0) { 18321da177e4SLinus Torvalds pm_idle_save = pm_idle; 18331da177e4SLinus Torvalds pm_idle = acpi_processor_idle; 18341da177e4SLinus Torvalds } 18354f86d3a8SLen Brown #endif 18361da177e4SLinus Torvalds } 18371da177e4SLinus Torvalds 18381da177e4SLinus Torvalds /* 'power' [R] */ 1839cf7acfabSDenis V. Lunev entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER, 1840cf7acfabSDenis V. Lunev S_IRUGO, acpi_device_dir(device), 1841cf7acfabSDenis V. Lunev &acpi_processor_power_fops, 1842cf7acfabSDenis V. Lunev acpi_driver_data(device)); 18431da177e4SLinus Torvalds if (!entry) 1844a6fc6720SThomas Renninger return -EIO; 1845d550d98dSPatrick Mochel return 0; 18461da177e4SLinus Torvalds } 18471da177e4SLinus Torvalds 18484be44fcdSLen Brown int acpi_processor_power_exit(struct acpi_processor *pr, 18494be44fcdSLen Brown struct acpi_device *device) 18501da177e4SLinus Torvalds { 185136a91358SVenkatesh Pallipadi if (boot_option_idle_override) 185236a91358SVenkatesh Pallipadi return 0; 185336a91358SVenkatesh Pallipadi 18544f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE 18554f86d3a8SLen Brown cpuidle_unregister_device(&pr->power.dev); 18564f86d3a8SLen Brown #endif 18571da177e4SLinus Torvalds pr->flags.power_setup_done = 0; 18581da177e4SLinus Torvalds 18591da177e4SLinus Torvalds if (acpi_device_dir(device)) 18604be44fcdSLen Brown remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, 18614be44fcdSLen Brown acpi_device_dir(device)); 18621da177e4SLinus Torvalds 18634f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE 18644f86d3a8SLen Brown 18651da177e4SLinus Torvalds /* Unregister the idle handler when processor #0 is removed. */ 18661da177e4SLinus Torvalds if (pr->id == 0) { 18671da177e4SLinus Torvalds pm_idle = pm_idle_save; 18681da177e4SLinus Torvalds 18691da177e4SLinus Torvalds /* 18701da177e4SLinus Torvalds * We are about to unload the current idle thread pm callback 18711da177e4SLinus Torvalds * (pm_idle), Wait for all processors to update cached/local 18721da177e4SLinus Torvalds * copies of pm_idle before proceeding. 18731da177e4SLinus Torvalds */ 18741da177e4SLinus Torvalds cpu_idle_wait(); 18751fec74a9SAndrew Morton #ifdef CONFIG_SMP 1876f011e2e2SMark Gross pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY, 1877f011e2e2SMark Gross &acpi_processor_latency_notifier); 18781fec74a9SAndrew Morton #endif 18791da177e4SLinus Torvalds } 18804f86d3a8SLen Brown #endif 18811da177e4SLinus Torvalds 1882d550d98dSPatrick Mochel return 0; 18831da177e4SLinus Torvalds } 1884