11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * processor_idle - idle state submodule to the ACPI processor driver 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 51da177e4SLinus Torvalds * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6c5ab81caSDominik Brodowski * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 71da177e4SLinus Torvalds * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 81da177e4SLinus Torvalds * - Added processor hotplug support 902df8b93SVenkatesh Pallipadi * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 1002df8b93SVenkatesh Pallipadi * - Added support for C3 on SMP 111da177e4SLinus Torvalds * 121da177e4SLinus Torvalds * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 151da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 161da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or (at 171da177e4SLinus Torvalds * your option) any later version. 181da177e4SLinus Torvalds * 191da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, but 201da177e4SLinus Torvalds * WITHOUT ANY WARRANTY; without even the implied warranty of 211da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 221da177e4SLinus Torvalds * General Public License for more details. 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * You should have received a copy of the GNU General Public License along 251da177e4SLinus Torvalds * with this program; if not, write to the Free Software Foundation, Inc., 261da177e4SLinus Torvalds * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 271da177e4SLinus Torvalds * 281da177e4SLinus Torvalds * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds #include <linux/kernel.h> 321da177e4SLinus Torvalds #include <linux/module.h> 331da177e4SLinus Torvalds #include <linux/init.h> 341da177e4SLinus Torvalds #include <linux/cpufreq.h> 355a0e3ad6STejun Heo #include <linux/slab.h> 361da177e4SLinus Torvalds #include <linux/acpi.h> 371da177e4SLinus Torvalds #include <linux/dmi.h> 381da177e4SLinus Torvalds #include <linux/moduleparam.h> 394e57b681STim Schmielau #include <linux/sched.h> /* need_resched() */ 40e8db0be1SJean Pihet #include <linux/pm_qos.h> 41e9e2cdb4SThomas Gleixner #include <linux/clockchips.h> 424f86d3a8SLen Brown #include <linux/cpuidle.h> 43ba84be23SRussell King #include <linux/irqflags.h> 441da177e4SLinus Torvalds 453434933bSThomas Gleixner /* 463434933bSThomas Gleixner * Include the apic definitions for x86 to have the APIC timer related defines 473434933bSThomas Gleixner * available also for UP (on SMP it gets magically included via linux/smp.h). 483434933bSThomas Gleixner * asm/acpi.h is not an option, as it would require more include magic. Also 493434933bSThomas Gleixner * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 503434933bSThomas Gleixner */ 513434933bSThomas Gleixner #ifdef CONFIG_X86 523434933bSThomas Gleixner #include <asm/apic.h> 533434933bSThomas Gleixner #endif 543434933bSThomas Gleixner 551da177e4SLinus Torvalds #include <asm/io.h> 561da177e4SLinus Torvalds #include <asm/uaccess.h> 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds #include <acpi/acpi_bus.h> 591da177e4SLinus Torvalds #include <acpi/processor.h> 60c1e3b377SZhao Yakui #include <asm/processor.h> 611da177e4SLinus Torvalds 62a192a958SLen Brown #define PREFIX "ACPI: " 63a192a958SLen Brown 641da177e4SLinus Torvalds #define ACPI_PROCESSOR_CLASS "processor" 651da177e4SLinus Torvalds #define _COMPONENT ACPI_PROCESSOR_COMPONENT 66f52fd66dSLen Brown ACPI_MODULE_NAME("processor_idle"); 672aa44d05SIngo Molnar #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 684f86d3a8SLen Brown #define C2_OVERHEAD 1 /* 1us */ 694f86d3a8SLen Brown #define C3_OVERHEAD 1 /* 1us */ 704f86d3a8SLen Brown #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) 711da177e4SLinus Torvalds 724f86d3a8SLen Brown static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 734f86d3a8SLen Brown module_param(max_cstate, uint, 0000); 74b6835052SAndreas Mohr static unsigned int nocst __read_mostly; 751da177e4SLinus Torvalds module_param(nocst, uint, 0000); 76d3e7e99fSLen Brown static int bm_check_disable __read_mostly; 77d3e7e99fSLen Brown module_param(bm_check_disable, uint, 0000); 781da177e4SLinus Torvalds 7925de5718SLen Brown static unsigned int latency_factor __read_mostly = 2; 804963f620SLen Brown module_param(latency_factor, uint, 0644); 811da177e4SLinus Torvalds 823d339dcbSDaniel Lezcano static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); 833d339dcbSDaniel Lezcano 84d1896049SThomas Renninger static int disabled_by_idle_boot_param(void) 85d1896049SThomas Renninger { 86d1896049SThomas Renninger return boot_option_idle_override == IDLE_POLL || 87d1896049SThomas Renninger boot_option_idle_override == IDLE_FORCE_MWAIT || 88d1896049SThomas Renninger boot_option_idle_override == IDLE_HALT; 89d1896049SThomas Renninger } 90d1896049SThomas Renninger 911da177e4SLinus Torvalds /* 921da177e4SLinus Torvalds * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 931da177e4SLinus Torvalds * For now disable this. Probably a bug somewhere else. 941da177e4SLinus Torvalds * 951da177e4SLinus Torvalds * To skip this limit, boot/load with a large max_cstate limit. 961da177e4SLinus Torvalds */ 971855256cSJeff Garzik static int set_max_cstate(const struct dmi_system_id *id) 981da177e4SLinus Torvalds { 991da177e4SLinus Torvalds if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 1001da177e4SLinus Torvalds return 0; 1011da177e4SLinus Torvalds 1023d35600aSLen Brown printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." 1031da177e4SLinus Torvalds " Override with \"processor.max_cstate=%d\"\n", id->ident, 1043d35600aSLen Brown (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 1051da177e4SLinus Torvalds 1063d35600aSLen Brown max_cstate = (long)id->driver_data; 1071da177e4SLinus Torvalds 1081da177e4SLinus Torvalds return 0; 1091da177e4SLinus Torvalds } 1101da177e4SLinus Torvalds 1117ded5689SAshok Raj /* Actually this shouldn't be __cpuinitdata, would be better to fix the 1127ded5689SAshok Raj callers to only run once -AK */ 1137ded5689SAshok Raj static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 114335f16beSDavid Shaohua Li { set_max_cstate, "Clevo 5600D", { 115876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 116876c184bSThomas Rosner DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 117335f16beSDavid Shaohua Li (void *)2}, 118370d5cd8SArjan van de Ven { set_max_cstate, "Pavilion zv5000", { 119370d5cd8SArjan van de Ven DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 120370d5cd8SArjan van de Ven DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, 121370d5cd8SArjan van de Ven (void *)1}, 122370d5cd8SArjan van de Ven { set_max_cstate, "Asus L8400B", { 123370d5cd8SArjan van de Ven DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 124370d5cd8SArjan van de Ven DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, 125370d5cd8SArjan van de Ven (void *)1}, 1261da177e4SLinus Torvalds {}, 1271da177e4SLinus Torvalds }; 1281da177e4SLinus Torvalds 1294f86d3a8SLen Brown 1302e906655Svenkatesh.pallipadi@intel.com /* 1312e906655Svenkatesh.pallipadi@intel.com * Callers should disable interrupts before the call and enable 1322e906655Svenkatesh.pallipadi@intel.com * interrupts after return. 1332e906655Svenkatesh.pallipadi@intel.com */ 134ddc081a1SVenkatesh Pallipadi static void acpi_safe_halt(void) 135ddc081a1SVenkatesh Pallipadi { 136ddc081a1SVenkatesh Pallipadi current_thread_info()->status &= ~TS_POLLING; 137ddc081a1SVenkatesh Pallipadi /* 138ddc081a1SVenkatesh Pallipadi * TS_POLLING-cleared state must be visible before we 139ddc081a1SVenkatesh Pallipadi * test NEED_RESCHED: 140ddc081a1SVenkatesh Pallipadi */ 141ddc081a1SVenkatesh Pallipadi smp_mb(); 14271e93d15SVenki Pallipadi if (!need_resched()) { 143ddc081a1SVenkatesh Pallipadi safe_halt(); 14471e93d15SVenki Pallipadi local_irq_disable(); 14571e93d15SVenki Pallipadi } 146ddc081a1SVenkatesh Pallipadi current_thread_info()->status |= TS_POLLING; 147ddc081a1SVenkatesh Pallipadi } 148ddc081a1SVenkatesh Pallipadi 149169a0abbSThomas Gleixner #ifdef ARCH_APICTIMER_STOPS_ON_C3 150169a0abbSThomas Gleixner 151169a0abbSThomas Gleixner /* 152169a0abbSThomas Gleixner * Some BIOS implementations switch to C3 in the published C2 state. 153296d93cdSLinus Torvalds * This seems to be a common problem on AMD boxen, but other vendors 154296d93cdSLinus Torvalds * are affected too. We pick the most conservative approach: we assume 155296d93cdSLinus Torvalds * that the local APIC stops in both C2 and C3. 156169a0abbSThomas Gleixner */ 1577e275cc4SLen Brown static void lapic_timer_check_state(int state, struct acpi_processor *pr, 158169a0abbSThomas Gleixner struct acpi_processor_cx *cx) 159169a0abbSThomas Gleixner { 160169a0abbSThomas Gleixner struct acpi_processor_power *pwr = &pr->power; 161e585bef8SThomas Gleixner u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 162169a0abbSThomas Gleixner 163db954b58SVenkatesh Pallipadi if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 164db954b58SVenkatesh Pallipadi return; 165db954b58SVenkatesh Pallipadi 16602c68a02SLen Brown if (amd_e400_c1e_detected) 16787ad57baSShaohua Li type = ACPI_STATE_C1; 16887ad57baSShaohua Li 169169a0abbSThomas Gleixner /* 170169a0abbSThomas Gleixner * Check, if one of the previous states already marked the lapic 171169a0abbSThomas Gleixner * unstable 172169a0abbSThomas Gleixner */ 173169a0abbSThomas Gleixner if (pwr->timer_broadcast_on_state < state) 174169a0abbSThomas Gleixner return; 175169a0abbSThomas Gleixner 176e585bef8SThomas Gleixner if (cx->type >= type) 177169a0abbSThomas Gleixner pr->power.timer_broadcast_on_state = state; 178169a0abbSThomas Gleixner } 179169a0abbSThomas Gleixner 180918aae42SHidetoshi Seto static void __lapic_timer_propagate_broadcast(void *arg) 181169a0abbSThomas Gleixner { 182f833bab8SSuresh Siddha struct acpi_processor *pr = (struct acpi_processor *) arg; 183e9e2cdb4SThomas Gleixner unsigned long reason; 184e9e2cdb4SThomas Gleixner 185e9e2cdb4SThomas Gleixner reason = pr->power.timer_broadcast_on_state < INT_MAX ? 186e9e2cdb4SThomas Gleixner CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 187e9e2cdb4SThomas Gleixner 188e9e2cdb4SThomas Gleixner clockevents_notify(reason, &pr->id); 189e9e2cdb4SThomas Gleixner } 190e9e2cdb4SThomas Gleixner 191918aae42SHidetoshi Seto static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) 192918aae42SHidetoshi Seto { 193918aae42SHidetoshi Seto smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast, 194918aae42SHidetoshi Seto (void *)pr, 1); 195918aae42SHidetoshi Seto } 196918aae42SHidetoshi Seto 197e9e2cdb4SThomas Gleixner /* Power(C) State timer broadcast control */ 1987e275cc4SLen Brown static void lapic_timer_state_broadcast(struct acpi_processor *pr, 199e9e2cdb4SThomas Gleixner struct acpi_processor_cx *cx, 200e9e2cdb4SThomas Gleixner int broadcast) 201e9e2cdb4SThomas Gleixner { 202e9e2cdb4SThomas Gleixner int state = cx - pr->power.states; 203e9e2cdb4SThomas Gleixner 204e9e2cdb4SThomas Gleixner if (state >= pr->power.timer_broadcast_on_state) { 205e9e2cdb4SThomas Gleixner unsigned long reason; 206e9e2cdb4SThomas Gleixner 207e9e2cdb4SThomas Gleixner reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : 208e9e2cdb4SThomas Gleixner CLOCK_EVT_NOTIFY_BROADCAST_EXIT; 209e9e2cdb4SThomas Gleixner clockevents_notify(reason, &pr->id); 210e9e2cdb4SThomas Gleixner } 211169a0abbSThomas Gleixner } 212169a0abbSThomas Gleixner 213169a0abbSThomas Gleixner #else 214169a0abbSThomas Gleixner 2157e275cc4SLen Brown static void lapic_timer_check_state(int state, struct acpi_processor *pr, 216169a0abbSThomas Gleixner struct acpi_processor_cx *cstate) { } 2177e275cc4SLen Brown static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } 2187e275cc4SLen Brown static void lapic_timer_state_broadcast(struct acpi_processor *pr, 219e9e2cdb4SThomas Gleixner struct acpi_processor_cx *cx, 220e9e2cdb4SThomas Gleixner int broadcast) 221e9e2cdb4SThomas Gleixner { 222e9e2cdb4SThomas Gleixner } 223169a0abbSThomas Gleixner 224169a0abbSThomas Gleixner #endif 225169a0abbSThomas Gleixner 226815ab0fdSLen Brown static u32 saved_bm_rld; 227815ab0fdSLen Brown 228815ab0fdSLen Brown static void acpi_idle_bm_rld_save(void) 229815ab0fdSLen Brown { 230815ab0fdSLen Brown acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); 231815ab0fdSLen Brown } 232815ab0fdSLen Brown static void acpi_idle_bm_rld_restore(void) 233815ab0fdSLen Brown { 234815ab0fdSLen Brown u32 resumed_bm_rld; 235815ab0fdSLen Brown 236815ab0fdSLen Brown acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); 237815ab0fdSLen Brown 238815ab0fdSLen Brown if (resumed_bm_rld != saved_bm_rld) 239815ab0fdSLen Brown acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); 240815ab0fdSLen Brown } 241b04e7bdbSThomas Gleixner 242e8110b64SRafael J. Wysocki int acpi_processor_suspend(struct device *dev) 243b04e7bdbSThomas Gleixner { 244815ab0fdSLen Brown acpi_idle_bm_rld_save(); 245b04e7bdbSThomas Gleixner return 0; 246b04e7bdbSThomas Gleixner } 247b04e7bdbSThomas Gleixner 248e8110b64SRafael J. Wysocki int acpi_processor_resume(struct device *dev) 249b04e7bdbSThomas Gleixner { 250815ab0fdSLen Brown acpi_idle_bm_rld_restore(); 251b04e7bdbSThomas Gleixner return 0; 252b04e7bdbSThomas Gleixner } 253b04e7bdbSThomas Gleixner 254592913ecSJohn Stultz #if defined(CONFIG_X86) 255520daf72SLen Brown static void tsc_check_state(int state) 256ddb25f9aSAndi Kleen { 257ddb25f9aSAndi Kleen switch (boot_cpu_data.x86_vendor) { 258ddb25f9aSAndi Kleen case X86_VENDOR_AMD: 25940fb1715SVenki Pallipadi case X86_VENDOR_INTEL: 260ddb25f9aSAndi Kleen /* 261ddb25f9aSAndi Kleen * AMD Fam10h TSC will tick in all 262ddb25f9aSAndi Kleen * C/P/S0/S1 states when this bit is set. 263ddb25f9aSAndi Kleen */ 26440fb1715SVenki Pallipadi if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 265520daf72SLen Brown return; 26640fb1715SVenki Pallipadi 267ddb25f9aSAndi Kleen /*FALL THROUGH*/ 268ddb25f9aSAndi Kleen default: 269520daf72SLen Brown /* TSC could halt in idle, so notify users */ 270520daf72SLen Brown if (state > ACPI_STATE_C1) 271520daf72SLen Brown mark_tsc_unstable("TSC halts in idle"); 272ddb25f9aSAndi Kleen } 273ddb25f9aSAndi Kleen } 274520daf72SLen Brown #else 275520daf72SLen Brown static void tsc_check_state(int state) { return; } 276ddb25f9aSAndi Kleen #endif 277ddb25f9aSAndi Kleen 2781da177e4SLinus Torvalds static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 2791da177e4SLinus Torvalds { 2801da177e4SLinus Torvalds 2811da177e4SLinus Torvalds if (!pr) 282d550d98dSPatrick Mochel return -EINVAL; 2831da177e4SLinus Torvalds 2841da177e4SLinus Torvalds if (!pr->pblk) 285d550d98dSPatrick Mochel return -ENODEV; 2861da177e4SLinus Torvalds 2871da177e4SLinus Torvalds /* if info is obtained from pblk/fadt, type equals state */ 2881da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 2891da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 2901da177e4SLinus Torvalds 2914c033552SVenkatesh Pallipadi #ifndef CONFIG_HOTPLUG_CPU 2924c033552SVenkatesh Pallipadi /* 2934c033552SVenkatesh Pallipadi * Check for P_LVL2_UP flag before entering C2 and above on 2944c033552SVenkatesh Pallipadi * an SMP system. 2954c033552SVenkatesh Pallipadi */ 296ad71860aSAlexey Starikovskiy if ((num_online_cpus() > 1) && 297cee324b1SAlexey Starikovskiy !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 298d550d98dSPatrick Mochel return -ENODEV; 2994c033552SVenkatesh Pallipadi #endif 3004c033552SVenkatesh Pallipadi 3011da177e4SLinus Torvalds /* determine C2 and C3 address from pblk */ 3021da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 3031da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds /* determine latencies from FADT */ 306ba494beeSBob Moore pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency; 307ba494beeSBob Moore pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency; 3081da177e4SLinus Torvalds 3095d76b6f6SLen Brown /* 3105d76b6f6SLen Brown * FADT specified C2 latency must be less than or equal to 3115d76b6f6SLen Brown * 100 microseconds. 3125d76b6f6SLen Brown */ 313ba494beeSBob Moore if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 3145d76b6f6SLen Brown ACPI_DEBUG_PRINT((ACPI_DB_INFO, 315ba494beeSBob Moore "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency)); 3165d76b6f6SLen Brown /* invalidate C2 */ 3175d76b6f6SLen Brown pr->power.states[ACPI_STATE_C2].address = 0; 3185d76b6f6SLen Brown } 3195d76b6f6SLen Brown 320a6d72c18SLen Brown /* 321a6d72c18SLen Brown * FADT supplied C3 latency must be less than or equal to 322a6d72c18SLen Brown * 1000 microseconds. 323a6d72c18SLen Brown */ 324ba494beeSBob Moore if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 325a6d72c18SLen Brown ACPI_DEBUG_PRINT((ACPI_DB_INFO, 326ba494beeSBob Moore "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency)); 327a6d72c18SLen Brown /* invalidate C3 */ 328a6d72c18SLen Brown pr->power.states[ACPI_STATE_C3].address = 0; 329a6d72c18SLen Brown } 330a6d72c18SLen Brown 3311da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 3321da177e4SLinus Torvalds "lvl2[0x%08x] lvl3[0x%08x]\n", 3331da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C2].address, 3341da177e4SLinus Torvalds pr->power.states[ACPI_STATE_C3].address)); 3351da177e4SLinus Torvalds 336d550d98dSPatrick Mochel return 0; 3371da177e4SLinus Torvalds } 3381da177e4SLinus Torvalds 339991528d7SVenkatesh Pallipadi static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 340acf05f4bSVenkatesh Pallipadi { 341991528d7SVenkatesh Pallipadi if (!pr->power.states[ACPI_STATE_C1].valid) { 342cf824788SJanosch Machowinski /* set the first C-State to C1 */ 343991528d7SVenkatesh Pallipadi /* all processors need to support C1 */ 344acf05f4bSVenkatesh Pallipadi pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 345acf05f4bSVenkatesh Pallipadi pr->power.states[ACPI_STATE_C1].valid = 1; 3460fda6b40SVenkatesh Pallipadi pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; 347991528d7SVenkatesh Pallipadi } 348991528d7SVenkatesh Pallipadi /* the C0 state only exists as a filler in our array */ 349991528d7SVenkatesh Pallipadi pr->power.states[ACPI_STATE_C0].valid = 1; 350d550d98dSPatrick Mochel return 0; 351acf05f4bSVenkatesh Pallipadi } 352acf05f4bSVenkatesh Pallipadi 3531da177e4SLinus Torvalds static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 3541da177e4SLinus Torvalds { 3551da177e4SLinus Torvalds acpi_status status = 0; 356439913ffSLin Ming u64 count; 357cf824788SJanosch Machowinski int current_count; 3581da177e4SLinus Torvalds int i; 3591da177e4SLinus Torvalds struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 3601da177e4SLinus Torvalds union acpi_object *cst; 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds 3631da177e4SLinus Torvalds if (nocst) 364d550d98dSPatrick Mochel return -ENODEV; 3651da177e4SLinus Torvalds 366991528d7SVenkatesh Pallipadi current_count = 0; 3671da177e4SLinus Torvalds 3681da177e4SLinus Torvalds status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 3691da177e4SLinus Torvalds if (ACPI_FAILURE(status)) { 3701da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 371d550d98dSPatrick Mochel return -ENODEV; 3721da177e4SLinus Torvalds } 3731da177e4SLinus Torvalds 37450dd0969SJan Engelhardt cst = buffer.pointer; 3751da177e4SLinus Torvalds 3761da177e4SLinus Torvalds /* There must be at least 2 elements */ 3771da177e4SLinus Torvalds if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 3786468463aSLen Brown printk(KERN_ERR PREFIX "not enough elements in _CST\n"); 3791da177e4SLinus Torvalds status = -EFAULT; 3801da177e4SLinus Torvalds goto end; 3811da177e4SLinus Torvalds } 3821da177e4SLinus Torvalds 3831da177e4SLinus Torvalds count = cst->package.elements[0].integer.value; 3841da177e4SLinus Torvalds 3851da177e4SLinus Torvalds /* Validate number of power states. */ 3861da177e4SLinus Torvalds if (count < 1 || count != cst->package.count - 1) { 3876468463aSLen Brown printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); 3881da177e4SLinus Torvalds status = -EFAULT; 3891da177e4SLinus Torvalds goto end; 3901da177e4SLinus Torvalds } 3911da177e4SLinus Torvalds 3921da177e4SLinus Torvalds /* Tell driver that at least _CST is supported. */ 3931da177e4SLinus Torvalds pr->flags.has_cst = 1; 3941da177e4SLinus Torvalds 3951da177e4SLinus Torvalds for (i = 1; i <= count; i++) { 3961da177e4SLinus Torvalds union acpi_object *element; 3971da177e4SLinus Torvalds union acpi_object *obj; 3981da177e4SLinus Torvalds struct acpi_power_register *reg; 3991da177e4SLinus Torvalds struct acpi_processor_cx cx; 4001da177e4SLinus Torvalds 4011da177e4SLinus Torvalds memset(&cx, 0, sizeof(cx)); 4021da177e4SLinus Torvalds 40350dd0969SJan Engelhardt element = &(cst->package.elements[i]); 4041da177e4SLinus Torvalds if (element->type != ACPI_TYPE_PACKAGE) 4051da177e4SLinus Torvalds continue; 4061da177e4SLinus Torvalds 4071da177e4SLinus Torvalds if (element->package.count != 4) 4081da177e4SLinus Torvalds continue; 4091da177e4SLinus Torvalds 41050dd0969SJan Engelhardt obj = &(element->package.elements[0]); 4111da177e4SLinus Torvalds 4121da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_BUFFER) 4131da177e4SLinus Torvalds continue; 4141da177e4SLinus Torvalds 4151da177e4SLinus Torvalds reg = (struct acpi_power_register *)obj->buffer.pointer; 4161da177e4SLinus Torvalds 4171da177e4SLinus Torvalds if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 4181da177e4SLinus Torvalds (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 4191da177e4SLinus Torvalds continue; 4201da177e4SLinus Torvalds 4211da177e4SLinus Torvalds /* There should be an easy way to extract an integer... */ 42250dd0969SJan Engelhardt obj = &(element->package.elements[1]); 4231da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 4241da177e4SLinus Torvalds continue; 4251da177e4SLinus Torvalds 4261da177e4SLinus Torvalds cx.type = obj->integer.value; 427991528d7SVenkatesh Pallipadi /* 428991528d7SVenkatesh Pallipadi * Some buggy BIOSes won't list C1 in _CST - 429991528d7SVenkatesh Pallipadi * Let acpi_processor_get_power_info_default() handle them later 430991528d7SVenkatesh Pallipadi */ 431991528d7SVenkatesh Pallipadi if (i == 1 && cx.type != ACPI_STATE_C1) 432991528d7SVenkatesh Pallipadi current_count++; 4331da177e4SLinus Torvalds 434991528d7SVenkatesh Pallipadi cx.address = reg->address; 435991528d7SVenkatesh Pallipadi cx.index = current_count + 1; 4361da177e4SLinus Torvalds 437bc71bec9Svenkatesh.pallipadi@intel.com cx.entry_method = ACPI_CSTATE_SYSTEMIO; 438991528d7SVenkatesh Pallipadi if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 439991528d7SVenkatesh Pallipadi if (acpi_processor_ffh_cstate_probe 440991528d7SVenkatesh Pallipadi (pr->id, &cx, reg) == 0) { 441bc71bec9Svenkatesh.pallipadi@intel.com cx.entry_method = ACPI_CSTATE_FFH; 442bc71bec9Svenkatesh.pallipadi@intel.com } else if (cx.type == ACPI_STATE_C1) { 443991528d7SVenkatesh Pallipadi /* 444991528d7SVenkatesh Pallipadi * C1 is a special case where FIXED_HARDWARE 445991528d7SVenkatesh Pallipadi * can be handled in non-MWAIT way as well. 446991528d7SVenkatesh Pallipadi * In that case, save this _CST entry info. 447991528d7SVenkatesh Pallipadi * Otherwise, ignore this info and continue. 448991528d7SVenkatesh Pallipadi */ 449bc71bec9Svenkatesh.pallipadi@intel.com cx.entry_method = ACPI_CSTATE_HALT; 4504fcb2fcdSVenkatesh Pallipadi snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 451bc71bec9Svenkatesh.pallipadi@intel.com } else { 4521da177e4SLinus Torvalds continue; 453991528d7SVenkatesh Pallipadi } 454da5e09a1SZhao Yakui if (cx.type == ACPI_STATE_C1 && 455d1896049SThomas Renninger (boot_option_idle_override == IDLE_NOMWAIT)) { 456c1e3b377SZhao Yakui /* 457c1e3b377SZhao Yakui * In most cases the C1 space_id obtained from 458c1e3b377SZhao Yakui * _CST object is FIXED_HARDWARE access mode. 459c1e3b377SZhao Yakui * But when the option of idle=halt is added, 460c1e3b377SZhao Yakui * the entry_method type should be changed from 461c1e3b377SZhao Yakui * CSTATE_FFH to CSTATE_HALT. 462da5e09a1SZhao Yakui * When the option of idle=nomwait is added, 463da5e09a1SZhao Yakui * the C1 entry_method type should be 464da5e09a1SZhao Yakui * CSTATE_HALT. 465c1e3b377SZhao Yakui */ 466c1e3b377SZhao Yakui cx.entry_method = ACPI_CSTATE_HALT; 467c1e3b377SZhao Yakui snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 468c1e3b377SZhao Yakui } 4694fcb2fcdSVenkatesh Pallipadi } else { 4704fcb2fcdSVenkatesh Pallipadi snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 4714fcb2fcdSVenkatesh Pallipadi cx.address); 472991528d7SVenkatesh Pallipadi } 4731da177e4SLinus Torvalds 4740fda6b40SVenkatesh Pallipadi if (cx.type == ACPI_STATE_C1) { 4750fda6b40SVenkatesh Pallipadi cx.valid = 1; 4760fda6b40SVenkatesh Pallipadi } 4774fcb2fcdSVenkatesh Pallipadi 47850dd0969SJan Engelhardt obj = &(element->package.elements[2]); 4791da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 4801da177e4SLinus Torvalds continue; 4811da177e4SLinus Torvalds 4821da177e4SLinus Torvalds cx.latency = obj->integer.value; 4831da177e4SLinus Torvalds 48450dd0969SJan Engelhardt obj = &(element->package.elements[3]); 4851da177e4SLinus Torvalds if (obj->type != ACPI_TYPE_INTEGER) 4861da177e4SLinus Torvalds continue; 4871da177e4SLinus Torvalds 488cf824788SJanosch Machowinski current_count++; 489cf824788SJanosch Machowinski memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 490cf824788SJanosch Machowinski 491cf824788SJanosch Machowinski /* 492cf824788SJanosch Machowinski * We support total ACPI_PROCESSOR_MAX_POWER - 1 493cf824788SJanosch Machowinski * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 494cf824788SJanosch Machowinski */ 495cf824788SJanosch Machowinski if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 496cf824788SJanosch Machowinski printk(KERN_WARNING 497cf824788SJanosch Machowinski "Limiting number of power states to max (%d)\n", 498cf824788SJanosch Machowinski ACPI_PROCESSOR_MAX_POWER); 499cf824788SJanosch Machowinski printk(KERN_WARNING 500cf824788SJanosch Machowinski "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 501cf824788SJanosch Machowinski break; 502cf824788SJanosch Machowinski } 5031da177e4SLinus Torvalds } 5041da177e4SLinus Torvalds 5054be44fcdSLen Brown ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 506cf824788SJanosch Machowinski current_count)); 5071da177e4SLinus Torvalds 5081da177e4SLinus Torvalds /* Validate number of power states discovered */ 509cf824788SJanosch Machowinski if (current_count < 2) 5106d93c648SVenkatesh Pallipadi status = -EFAULT; 5111da177e4SLinus Torvalds 5121da177e4SLinus Torvalds end: 51302438d87SLen Brown kfree(buffer.pointer); 5141da177e4SLinus Torvalds 515d550d98dSPatrick Mochel return status; 5161da177e4SLinus Torvalds } 5171da177e4SLinus Torvalds 5184be44fcdSLen Brown static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 5191da177e4SLinus Torvalds struct acpi_processor_cx *cx) 5201da177e4SLinus Torvalds { 521ee1ca48fSPallipadi, Venkatesh static int bm_check_flag = -1; 522ee1ca48fSPallipadi, Venkatesh static int bm_control_flag = -1; 52302df8b93SVenkatesh Pallipadi 5241da177e4SLinus Torvalds 5251da177e4SLinus Torvalds if (!cx->address) 526d550d98dSPatrick Mochel return; 5271da177e4SLinus Torvalds 5281da177e4SLinus Torvalds /* 5291da177e4SLinus Torvalds * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 5301da177e4SLinus Torvalds * DMA transfers are used by any ISA device to avoid livelock. 5311da177e4SLinus Torvalds * Note that we could disable Type-F DMA (as recommended by 5321da177e4SLinus Torvalds * the erratum), but this is known to disrupt certain ISA 5331da177e4SLinus Torvalds * devices thus we take the conservative approach. 5341da177e4SLinus Torvalds */ 5351da177e4SLinus Torvalds else if (errata.piix4.fdma) { 5361da177e4SLinus Torvalds ACPI_DEBUG_PRINT((ACPI_DB_INFO, 5371da177e4SLinus Torvalds "C3 not supported on PIIX4 with Type-F DMA\n")); 538d550d98dSPatrick Mochel return; 5391da177e4SLinus Torvalds } 5401da177e4SLinus Torvalds 54102df8b93SVenkatesh Pallipadi /* All the logic here assumes flags.bm_check is same across all CPUs */ 542ee1ca48fSPallipadi, Venkatesh if (bm_check_flag == -1) { 54302df8b93SVenkatesh Pallipadi /* Determine whether bm_check is needed based on CPU */ 54402df8b93SVenkatesh Pallipadi acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 54502df8b93SVenkatesh Pallipadi bm_check_flag = pr->flags.bm_check; 546ee1ca48fSPallipadi, Venkatesh bm_control_flag = pr->flags.bm_control; 54702df8b93SVenkatesh Pallipadi } else { 54802df8b93SVenkatesh Pallipadi pr->flags.bm_check = bm_check_flag; 549ee1ca48fSPallipadi, Venkatesh pr->flags.bm_control = bm_control_flag; 55002df8b93SVenkatesh Pallipadi } 55102df8b93SVenkatesh Pallipadi 55202df8b93SVenkatesh Pallipadi if (pr->flags.bm_check) { 55302df8b93SVenkatesh Pallipadi if (!pr->flags.bm_control) { 554ed3110efSVenki Pallipadi if (pr->flags.has_cst != 1) { 555ed3110efSVenki Pallipadi /* bus mastering control is necessary */ 55602df8b93SVenkatesh Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 557ed3110efSVenki Pallipadi "C3 support requires BM control\n")); 558ed3110efSVenki Pallipadi return; 559ed3110efSVenki Pallipadi } else { 560ed3110efSVenki Pallipadi /* Here we enter C3 without bus mastering */ 561ed3110efSVenki Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 562ed3110efSVenki Pallipadi "C3 support without BM control\n")); 563ed3110efSVenki Pallipadi } 56402df8b93SVenkatesh Pallipadi } 56502df8b93SVenkatesh Pallipadi } else { 56602df8b93SVenkatesh Pallipadi /* 56702df8b93SVenkatesh Pallipadi * WBINVD should be set in fadt, for C3 state to be 56802df8b93SVenkatesh Pallipadi * supported on when bm_check is not required. 56902df8b93SVenkatesh Pallipadi */ 570cee324b1SAlexey Starikovskiy if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 57102df8b93SVenkatesh Pallipadi ACPI_DEBUG_PRINT((ACPI_DB_INFO, 57202df8b93SVenkatesh Pallipadi "Cache invalidation should work properly" 57302df8b93SVenkatesh Pallipadi " for C3 to be enabled on SMP systems\n")); 574d550d98dSPatrick Mochel return; 57502df8b93SVenkatesh Pallipadi } 57602df8b93SVenkatesh Pallipadi } 57702df8b93SVenkatesh Pallipadi 5781da177e4SLinus Torvalds /* 5791da177e4SLinus Torvalds * Otherwise we've met all of our C3 requirements. 5801da177e4SLinus Torvalds * Normalize the C3 latency to expidite policy. Enable 5811da177e4SLinus Torvalds * checking of bus mastering status (bm_check) so we can 5821da177e4SLinus Torvalds * use this in our C3 policy 5831da177e4SLinus Torvalds */ 5841da177e4SLinus Torvalds cx->valid = 1; 5854f86d3a8SLen Brown 58631878dd8SLen Brown /* 58731878dd8SLen Brown * On older chipsets, BM_RLD needs to be set 58831878dd8SLen Brown * in order for Bus Master activity to wake the 58931878dd8SLen Brown * system from C3. Newer chipsets handle DMA 59031878dd8SLen Brown * during C3 automatically and BM_RLD is a NOP. 59131878dd8SLen Brown * In either case, the proper way to 59231878dd8SLen Brown * handle BM_RLD is to set it and leave it set. 59331878dd8SLen Brown */ 59450ffba1bSBob Moore acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 5951da177e4SLinus Torvalds 596d550d98dSPatrick Mochel return; 5971da177e4SLinus Torvalds } 5981da177e4SLinus Torvalds 5991da177e4SLinus Torvalds static int acpi_processor_power_verify(struct acpi_processor *pr) 6001da177e4SLinus Torvalds { 6011da177e4SLinus Torvalds unsigned int i; 6021da177e4SLinus Torvalds unsigned int working = 0; 6036eb0a0fdSVenkatesh Pallipadi 604169a0abbSThomas Gleixner pr->power.timer_broadcast_on_state = INT_MAX; 6056eb0a0fdSVenkatesh Pallipadi 606a0bf284bSLen Brown for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 6071da177e4SLinus Torvalds struct acpi_processor_cx *cx = &pr->power.states[i]; 6081da177e4SLinus Torvalds 6091da177e4SLinus Torvalds switch (cx->type) { 6101da177e4SLinus Torvalds case ACPI_STATE_C1: 6111da177e4SLinus Torvalds cx->valid = 1; 6121da177e4SLinus Torvalds break; 6131da177e4SLinus Torvalds 6141da177e4SLinus Torvalds case ACPI_STATE_C2: 615d22edd29SLen Brown if (!cx->address) 616d22edd29SLen Brown break; 617d22edd29SLen Brown cx->valid = 1; 6181da177e4SLinus Torvalds break; 6191da177e4SLinus Torvalds 6201da177e4SLinus Torvalds case ACPI_STATE_C3: 6211da177e4SLinus Torvalds acpi_processor_power_verify_c3(pr, cx); 6221da177e4SLinus Torvalds break; 6231da177e4SLinus Torvalds } 6247e275cc4SLen Brown if (!cx->valid) 6257e275cc4SLen Brown continue; 6261da177e4SLinus Torvalds 6277e275cc4SLen Brown lapic_timer_check_state(i, pr, cx); 6287e275cc4SLen Brown tsc_check_state(cx->type); 6291da177e4SLinus Torvalds working++; 6301da177e4SLinus Torvalds } 6311da177e4SLinus Torvalds 632918aae42SHidetoshi Seto lapic_timer_propagate_broadcast(pr); 633bd663347SAndi Kleen 6341da177e4SLinus Torvalds return (working); 6351da177e4SLinus Torvalds } 6361da177e4SLinus Torvalds 6374be44fcdSLen Brown static int acpi_processor_get_power_info(struct acpi_processor *pr) 6381da177e4SLinus Torvalds { 6391da177e4SLinus Torvalds unsigned int i; 6401da177e4SLinus Torvalds int result; 6411da177e4SLinus Torvalds 6421da177e4SLinus Torvalds 6431da177e4SLinus Torvalds /* NOTE: the idle thread may not be running while calling 6441da177e4SLinus Torvalds * this function */ 6451da177e4SLinus Torvalds 646991528d7SVenkatesh Pallipadi /* Zero initialize all the C-states info. */ 647991528d7SVenkatesh Pallipadi memset(pr->power.states, 0, sizeof(pr->power.states)); 648991528d7SVenkatesh Pallipadi 6491da177e4SLinus Torvalds result = acpi_processor_get_power_info_cst(pr); 6506d93c648SVenkatesh Pallipadi if (result == -ENODEV) 651c5a114f1SDarrick J. Wong result = acpi_processor_get_power_info_fadt(pr); 6526d93c648SVenkatesh Pallipadi 653991528d7SVenkatesh Pallipadi if (result) 654991528d7SVenkatesh Pallipadi return result; 655991528d7SVenkatesh Pallipadi 656991528d7SVenkatesh Pallipadi acpi_processor_get_power_info_default(pr); 657991528d7SVenkatesh Pallipadi 658cf824788SJanosch Machowinski pr->power.count = acpi_processor_power_verify(pr); 6591da177e4SLinus Torvalds 6601da177e4SLinus Torvalds /* 6611da177e4SLinus Torvalds * if one state of type C2 or C3 is available, mark this 6621da177e4SLinus Torvalds * CPU as being "idle manageable" 6631da177e4SLinus Torvalds */ 6641da177e4SLinus Torvalds for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 665acf05f4bSVenkatesh Pallipadi if (pr->power.states[i].valid) { 6661da177e4SLinus Torvalds pr->power.count = i; 6672203d6edSLinus Torvalds if (pr->power.states[i].type >= ACPI_STATE_C2) 6681da177e4SLinus Torvalds pr->flags.power = 1; 6691da177e4SLinus Torvalds } 670acf05f4bSVenkatesh Pallipadi } 6711da177e4SLinus Torvalds 672d550d98dSPatrick Mochel return 0; 6731da177e4SLinus Torvalds } 6741da177e4SLinus Torvalds 6754f86d3a8SLen Brown /** 6764f86d3a8SLen Brown * acpi_idle_bm_check - checks if bus master activity was detected 6774f86d3a8SLen Brown */ 6784f86d3a8SLen Brown static int acpi_idle_bm_check(void) 6794f86d3a8SLen Brown { 6804f86d3a8SLen Brown u32 bm_status = 0; 6814f86d3a8SLen Brown 682d3e7e99fSLen Brown if (bm_check_disable) 683d3e7e99fSLen Brown return 0; 684d3e7e99fSLen Brown 68550ffba1bSBob Moore acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 6864f86d3a8SLen Brown if (bm_status) 68750ffba1bSBob Moore acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 6884f86d3a8SLen Brown /* 6894f86d3a8SLen Brown * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 6904f86d3a8SLen Brown * the true state of bus mastering activity; forcing us to 6914f86d3a8SLen Brown * manually check the BMIDEA bit of each IDE channel. 6924f86d3a8SLen Brown */ 6934f86d3a8SLen Brown else if (errata.piix4.bmisx) { 6944f86d3a8SLen Brown if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 6954f86d3a8SLen Brown || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 6964f86d3a8SLen Brown bm_status = 1; 6974f86d3a8SLen Brown } 6984f86d3a8SLen Brown return bm_status; 6994f86d3a8SLen Brown } 7004f86d3a8SLen Brown 7014f86d3a8SLen Brown /** 7024f86d3a8SLen Brown * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 7034f86d3a8SLen Brown * @cx: cstate data 704bc71bec9Svenkatesh.pallipadi@intel.com * 705bc71bec9Svenkatesh.pallipadi@intel.com * Caller disables interrupt before call and enables interrupt after return. 7064f86d3a8SLen Brown */ 7074f86d3a8SLen Brown static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 7084f86d3a8SLen Brown { 709dcf30997SSteven Rostedt /* Don't trace irqs off for idle */ 710dcf30997SSteven Rostedt stop_critical_timings(); 711bc71bec9Svenkatesh.pallipadi@intel.com if (cx->entry_method == ACPI_CSTATE_FFH) { 7124f86d3a8SLen Brown /* Call into architectural FFH based C-state */ 7134f86d3a8SLen Brown acpi_processor_ffh_cstate_enter(cx); 714bc71bec9Svenkatesh.pallipadi@intel.com } else if (cx->entry_method == ACPI_CSTATE_HALT) { 715bc71bec9Svenkatesh.pallipadi@intel.com acpi_safe_halt(); 7164f86d3a8SLen Brown } else { 7174f86d3a8SLen Brown /* IO port based C-state */ 7184f86d3a8SLen Brown inb(cx->address); 7194f86d3a8SLen Brown /* Dummy wait op - must do something useless after P_LVL2 read 7204f86d3a8SLen Brown because chipsets cannot guarantee that STPCLK# signal 7214f86d3a8SLen Brown gets asserted in time to freeze execution properly. */ 722cfa806f0SAndi Kleen inl(acpi_gbl_FADT.xpm_timer_block.address); 7234f86d3a8SLen Brown } 724dcf30997SSteven Rostedt start_critical_timings(); 7254f86d3a8SLen Brown } 7264f86d3a8SLen Brown 7274f86d3a8SLen Brown /** 7284f86d3a8SLen Brown * acpi_idle_enter_c1 - enters an ACPI C1 state-type 7294f86d3a8SLen Brown * @dev: the target CPU 73046bcfad7SDeepthi Dharwar * @drv: cpuidle driver containing cpuidle state info 731e978aa7dSDeepthi Dharwar * @index: index of target state 7324f86d3a8SLen Brown * 7334f86d3a8SLen Brown * This is equivalent to the HALT instruction. 7344f86d3a8SLen Brown */ 7354f86d3a8SLen Brown static int acpi_idle_enter_c1(struct cpuidle_device *dev, 73646bcfad7SDeepthi Dharwar struct cpuidle_driver *drv, int index) 7374f86d3a8SLen Brown { 738ff69f2bbSalex.shi ktime_t kt1, kt2; 739ff69f2bbSalex.shi s64 idle_time; 7404f86d3a8SLen Brown struct acpi_processor *pr; 7414202735eSDeepthi Dharwar struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 7424202735eSDeepthi Dharwar struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); 7439b12e18cSvenkatesh.pallipadi@intel.com 7444a6f4fe8SChristoph Lameter pr = __this_cpu_read(processors); 745e978aa7dSDeepthi Dharwar dev->last_residency = 0; 7464f86d3a8SLen Brown 7474f86d3a8SLen Brown if (unlikely(!pr)) 748e978aa7dSDeepthi Dharwar return -EINVAL; 7494f86d3a8SLen Brown 7502e906655Svenkatesh.pallipadi@intel.com local_irq_disable(); 751b077fbadSVenkatesh Pallipadi 75275cc5235SDeepthi Dharwar 7537e275cc4SLen Brown lapic_timer_state_broadcast(pr, cx, 1); 754ff69f2bbSalex.shi kt1 = ktime_get_real(); 755bc71bec9Svenkatesh.pallipadi@intel.com acpi_idle_do_entry(cx); 756ff69f2bbSalex.shi kt2 = ktime_get_real(); 757ff69f2bbSalex.shi idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 7584f86d3a8SLen Brown 759e978aa7dSDeepthi Dharwar /* Update device last_residency*/ 760e978aa7dSDeepthi Dharwar dev->last_residency = (int)idle_time; 761e978aa7dSDeepthi Dharwar 7622e906655Svenkatesh.pallipadi@intel.com local_irq_enable(); 7637e275cc4SLen Brown lapic_timer_state_broadcast(pr, cx, 0); 7644f86d3a8SLen Brown 765e978aa7dSDeepthi Dharwar return index; 7664f86d3a8SLen Brown } 7674f86d3a8SLen Brown 7681a022e3fSBoris Ostrovsky 7691a022e3fSBoris Ostrovsky /** 7701a022e3fSBoris Ostrovsky * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) 7711a022e3fSBoris Ostrovsky * @dev: the target CPU 7721a022e3fSBoris Ostrovsky * @index: the index of suggested state 7731a022e3fSBoris Ostrovsky */ 7741a022e3fSBoris Ostrovsky static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) 7751a022e3fSBoris Ostrovsky { 7761a022e3fSBoris Ostrovsky struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 7771a022e3fSBoris Ostrovsky struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); 7781a022e3fSBoris Ostrovsky 7791a022e3fSBoris Ostrovsky ACPI_FLUSH_CPU_CACHE(); 7801a022e3fSBoris Ostrovsky 7811a022e3fSBoris Ostrovsky while (1) { 7821a022e3fSBoris Ostrovsky 7831a022e3fSBoris Ostrovsky if (cx->entry_method == ACPI_CSTATE_HALT) 78454f70077SLuck, Tony safe_halt(); 7851a022e3fSBoris Ostrovsky else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { 7861a022e3fSBoris Ostrovsky inb(cx->address); 7871a022e3fSBoris Ostrovsky /* See comment in acpi_idle_do_entry() */ 7881a022e3fSBoris Ostrovsky inl(acpi_gbl_FADT.xpm_timer_block.address); 7891a022e3fSBoris Ostrovsky } else 7901a022e3fSBoris Ostrovsky return -ENODEV; 7911a022e3fSBoris Ostrovsky } 7921a022e3fSBoris Ostrovsky 7931a022e3fSBoris Ostrovsky /* Never reached */ 7941a022e3fSBoris Ostrovsky return 0; 7951a022e3fSBoris Ostrovsky } 7961a022e3fSBoris Ostrovsky 7974f86d3a8SLen Brown /** 7984f86d3a8SLen Brown * acpi_idle_enter_simple - enters an ACPI state without BM handling 7994f86d3a8SLen Brown * @dev: the target CPU 80046bcfad7SDeepthi Dharwar * @drv: cpuidle driver with cpuidle state information 801e978aa7dSDeepthi Dharwar * @index: the index of suggested state 8024f86d3a8SLen Brown */ 8034f86d3a8SLen Brown static int acpi_idle_enter_simple(struct cpuidle_device *dev, 80446bcfad7SDeepthi Dharwar struct cpuidle_driver *drv, int index) 8054f86d3a8SLen Brown { 8064f86d3a8SLen Brown struct acpi_processor *pr; 8074202735eSDeepthi Dharwar struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 8084202735eSDeepthi Dharwar struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); 809ff69f2bbSalex.shi ktime_t kt1, kt2; 8102da513f5SVenkatesh Pallipadi s64 idle_time_ns; 811ff69f2bbSalex.shi s64 idle_time; 81250629118SVenkatesh Pallipadi 8134a6f4fe8SChristoph Lameter pr = __this_cpu_read(processors); 814e978aa7dSDeepthi Dharwar dev->last_residency = 0; 8154f86d3a8SLen Brown 8164f86d3a8SLen Brown if (unlikely(!pr)) 817e978aa7dSDeepthi Dharwar return -EINVAL; 818e196441bSLen Brown 8194f86d3a8SLen Brown local_irq_disable(); 82002cf4f98SLen Brown 82175cc5235SDeepthi Dharwar 822d306ebc2SPallipadi, Venkatesh if (cx->entry_method != ACPI_CSTATE_FFH) { 8234f86d3a8SLen Brown current_thread_info()->status &= ~TS_POLLING; 8244f86d3a8SLen Brown /* 8254f86d3a8SLen Brown * TS_POLLING-cleared state must be visible before we test 8264f86d3a8SLen Brown * NEED_RESCHED: 8274f86d3a8SLen Brown */ 8284f86d3a8SLen Brown smp_mb(); 8294f86d3a8SLen Brown 8304f86d3a8SLen Brown if (unlikely(need_resched())) { 8314f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 8324f86d3a8SLen Brown local_irq_enable(); 833e978aa7dSDeepthi Dharwar return -EINVAL; 8344f86d3a8SLen Brown } 83502cf4f98SLen Brown } 8364f86d3a8SLen Brown 837e17bcb43SThomas Gleixner /* 838e17bcb43SThomas Gleixner * Must be done before busmaster disable as we might need to 839e17bcb43SThomas Gleixner * access HPET ! 840e17bcb43SThomas Gleixner */ 8417e275cc4SLen Brown lapic_timer_state_broadcast(pr, cx, 1); 842e17bcb43SThomas Gleixner 8434f86d3a8SLen Brown if (cx->type == ACPI_STATE_C3) 8444f86d3a8SLen Brown ACPI_FLUSH_CPU_CACHE(); 8454f86d3a8SLen Brown 846ff69f2bbSalex.shi kt1 = ktime_get_real(); 84750629118SVenkatesh Pallipadi /* Tell the scheduler that we are going deep-idle: */ 84850629118SVenkatesh Pallipadi sched_clock_idle_sleep_event(); 8494f86d3a8SLen Brown acpi_idle_do_entry(cx); 850ff69f2bbSalex.shi kt2 = ktime_get_real(); 8512da513f5SVenkatesh Pallipadi idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); 8522da513f5SVenkatesh Pallipadi idle_time = idle_time_ns; 8532da513f5SVenkatesh Pallipadi do_div(idle_time, NSEC_PER_USEC); 8544f86d3a8SLen Brown 855e978aa7dSDeepthi Dharwar /* Update device last_residency*/ 856e978aa7dSDeepthi Dharwar dev->last_residency = (int)idle_time; 857e978aa7dSDeepthi Dharwar 85850629118SVenkatesh Pallipadi /* Tell the scheduler how much we idled: */ 8592da513f5SVenkatesh Pallipadi sched_clock_idle_wakeup_event(idle_time_ns); 8604f86d3a8SLen Brown 8614f86d3a8SLen Brown local_irq_enable(); 86202cf4f98SLen Brown if (cx->entry_method != ACPI_CSTATE_FFH) 8634f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 8644f86d3a8SLen Brown 8657e275cc4SLen Brown lapic_timer_state_broadcast(pr, cx, 0); 866e978aa7dSDeepthi Dharwar return index; 8674f86d3a8SLen Brown } 8684f86d3a8SLen Brown 8694f86d3a8SLen Brown static int c3_cpu_count; 870e12f65f7SThomas Gleixner static DEFINE_RAW_SPINLOCK(c3_lock); 8714f86d3a8SLen Brown 8724f86d3a8SLen Brown /** 8734f86d3a8SLen Brown * acpi_idle_enter_bm - enters C3 with proper BM handling 8744f86d3a8SLen Brown * @dev: the target CPU 87546bcfad7SDeepthi Dharwar * @drv: cpuidle driver containing state data 876e978aa7dSDeepthi Dharwar * @index: the index of suggested state 8774f86d3a8SLen Brown * 8784f86d3a8SLen Brown * If BM is detected, the deepest non-C3 idle state is entered instead. 8794f86d3a8SLen Brown */ 8804f86d3a8SLen Brown static int acpi_idle_enter_bm(struct cpuidle_device *dev, 88146bcfad7SDeepthi Dharwar struct cpuidle_driver *drv, int index) 8824f86d3a8SLen Brown { 8834f86d3a8SLen Brown struct acpi_processor *pr; 8844202735eSDeepthi Dharwar struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 8854202735eSDeepthi Dharwar struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); 886ff69f2bbSalex.shi ktime_t kt1, kt2; 8872da513f5SVenkatesh Pallipadi s64 idle_time_ns; 888ff69f2bbSalex.shi s64 idle_time; 889ff69f2bbSalex.shi 89050629118SVenkatesh Pallipadi 8914a6f4fe8SChristoph Lameter pr = __this_cpu_read(processors); 892e978aa7dSDeepthi Dharwar dev->last_residency = 0; 8934f86d3a8SLen Brown 8944f86d3a8SLen Brown if (unlikely(!pr)) 895e978aa7dSDeepthi Dharwar return -EINVAL; 8964f86d3a8SLen Brown 897718be4aaSLen Brown if (!cx->bm_sts_skip && acpi_idle_bm_check()) { 89846bcfad7SDeepthi Dharwar if (drv->safe_state_index >= 0) { 89946bcfad7SDeepthi Dharwar return drv->states[drv->safe_state_index].enter(dev, 90046bcfad7SDeepthi Dharwar drv, drv->safe_state_index); 901ddc081a1SVenkatesh Pallipadi } else { 9022e906655Svenkatesh.pallipadi@intel.com local_irq_disable(); 903ddc081a1SVenkatesh Pallipadi acpi_safe_halt(); 9042e906655Svenkatesh.pallipadi@intel.com local_irq_enable(); 90575cc5235SDeepthi Dharwar return -EBUSY; 906ddc081a1SVenkatesh Pallipadi } 907ddc081a1SVenkatesh Pallipadi } 908ddc081a1SVenkatesh Pallipadi 9094f86d3a8SLen Brown local_irq_disable(); 91002cf4f98SLen Brown 91175cc5235SDeepthi Dharwar 912d306ebc2SPallipadi, Venkatesh if (cx->entry_method != ACPI_CSTATE_FFH) { 9134f86d3a8SLen Brown current_thread_info()->status &= ~TS_POLLING; 9144f86d3a8SLen Brown /* 9154f86d3a8SLen Brown * TS_POLLING-cleared state must be visible before we test 9164f86d3a8SLen Brown * NEED_RESCHED: 9174f86d3a8SLen Brown */ 9184f86d3a8SLen Brown smp_mb(); 9194f86d3a8SLen Brown 9204f86d3a8SLen Brown if (unlikely(need_resched())) { 9214f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 9224f86d3a8SLen Brown local_irq_enable(); 923e978aa7dSDeepthi Dharwar return -EINVAL; 9244f86d3a8SLen Brown } 92502cf4f98SLen Brown } 9264f86d3a8SLen Brown 927996520c1SVenki Pallipadi acpi_unlazy_tlb(smp_processor_id()); 928996520c1SVenki Pallipadi 92950629118SVenkatesh Pallipadi /* Tell the scheduler that we are going deep-idle: */ 93050629118SVenkatesh Pallipadi sched_clock_idle_sleep_event(); 9314f86d3a8SLen Brown /* 9324f86d3a8SLen Brown * Must be done before busmaster disable as we might need to 9334f86d3a8SLen Brown * access HPET ! 9344f86d3a8SLen Brown */ 9357e275cc4SLen Brown lapic_timer_state_broadcast(pr, cx, 1); 9364f86d3a8SLen Brown 937f461ddeaSLen Brown kt1 = ktime_get_real(); 938c9c860e5SVenkatesh Pallipadi /* 939c9c860e5SVenkatesh Pallipadi * disable bus master 940c9c860e5SVenkatesh Pallipadi * bm_check implies we need ARB_DIS 941c9c860e5SVenkatesh Pallipadi * !bm_check implies we need cache flush 942c9c860e5SVenkatesh Pallipadi * bm_control implies whether we can do ARB_DIS 943c9c860e5SVenkatesh Pallipadi * 944c9c860e5SVenkatesh Pallipadi * That leaves a case where bm_check is set and bm_control is 945c9c860e5SVenkatesh Pallipadi * not set. In that case we cannot do much, we enter C3 946c9c860e5SVenkatesh Pallipadi * without doing anything. 947c9c860e5SVenkatesh Pallipadi */ 948c9c860e5SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 949e12f65f7SThomas Gleixner raw_spin_lock(&c3_lock); 9504f86d3a8SLen Brown c3_cpu_count++; 9514f86d3a8SLen Brown /* Disable bus master arbitration when all CPUs are in C3 */ 9524f86d3a8SLen Brown if (c3_cpu_count == num_online_cpus()) 95350ffba1bSBob Moore acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 954e12f65f7SThomas Gleixner raw_spin_unlock(&c3_lock); 955c9c860e5SVenkatesh Pallipadi } else if (!pr->flags.bm_check) { 956c9c860e5SVenkatesh Pallipadi ACPI_FLUSH_CPU_CACHE(); 957c9c860e5SVenkatesh Pallipadi } 9584f86d3a8SLen Brown 9594f86d3a8SLen Brown acpi_idle_do_entry(cx); 9604f86d3a8SLen Brown 9614f86d3a8SLen Brown /* Re-enable bus master arbitration */ 962c9c860e5SVenkatesh Pallipadi if (pr->flags.bm_check && pr->flags.bm_control) { 963e12f65f7SThomas Gleixner raw_spin_lock(&c3_lock); 96450ffba1bSBob Moore acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 9654f86d3a8SLen Brown c3_cpu_count--; 966e12f65f7SThomas Gleixner raw_spin_unlock(&c3_lock); 9674f86d3a8SLen Brown } 968f461ddeaSLen Brown kt2 = ktime_get_real(); 969157317baSZhao Yakui idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); 9702da513f5SVenkatesh Pallipadi idle_time = idle_time_ns; 9712da513f5SVenkatesh Pallipadi do_div(idle_time, NSEC_PER_USEC); 9724f86d3a8SLen Brown 973e978aa7dSDeepthi Dharwar /* Update device last_residency*/ 974e978aa7dSDeepthi Dharwar dev->last_residency = (int)idle_time; 975e978aa7dSDeepthi Dharwar 97650629118SVenkatesh Pallipadi /* Tell the scheduler how much we idled: */ 9772da513f5SVenkatesh Pallipadi sched_clock_idle_wakeup_event(idle_time_ns); 9784f86d3a8SLen Brown 9794f86d3a8SLen Brown local_irq_enable(); 98002cf4f98SLen Brown if (cx->entry_method != ACPI_CSTATE_FFH) 9814f86d3a8SLen Brown current_thread_info()->status |= TS_POLLING; 9824f86d3a8SLen Brown 9837e275cc4SLen Brown lapic_timer_state_broadcast(pr, cx, 0); 984e978aa7dSDeepthi Dharwar return index; 9854f86d3a8SLen Brown } 9864f86d3a8SLen Brown 9874f86d3a8SLen Brown struct cpuidle_driver acpi_idle_driver = { 9884f86d3a8SLen Brown .name = "acpi_idle", 9894f86d3a8SLen Brown .owner = THIS_MODULE, 9904f86d3a8SLen Brown }; 9914f86d3a8SLen Brown 9924f86d3a8SLen Brown /** 99346bcfad7SDeepthi Dharwar * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE 99446bcfad7SDeepthi Dharwar * device i.e. per-cpu data 99546bcfad7SDeepthi Dharwar * 9964f86d3a8SLen Brown * @pr: the ACPI processor 9974f86d3a8SLen Brown */ 99846bcfad7SDeepthi Dharwar static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) 9994f86d3a8SLen Brown { 10009a0b8415Svenkatesh.pallipadi@intel.com int i, count = CPUIDLE_DRIVER_STATE_START; 10014f86d3a8SLen Brown struct acpi_processor_cx *cx; 10024202735eSDeepthi Dharwar struct cpuidle_state_usage *state_usage; 10033d339dcbSDaniel Lezcano struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); 10044f86d3a8SLen Brown 10054f86d3a8SLen Brown if (!pr->flags.power_setup_done) 10064f86d3a8SLen Brown return -EINVAL; 10074f86d3a8SLen Brown 10084f86d3a8SLen Brown if (pr->flags.power == 0) { 10094f86d3a8SLen Brown return -EINVAL; 10104f86d3a8SLen Brown } 10114f86d3a8SLen Brown 1012dcb84f33SVenkatesh Pallipadi dev->cpu = pr->id; 10134fcb2fcdSVenkatesh Pallipadi 1014615dfd93SLen Brown if (max_cstate == 0) 1015615dfd93SLen Brown max_cstate = 1; 1016615dfd93SLen Brown 10174f86d3a8SLen Brown for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 10184f86d3a8SLen Brown cx = &pr->power.states[i]; 10194202735eSDeepthi Dharwar state_usage = &dev->states_usage[count]; 10204f86d3a8SLen Brown 10214f86d3a8SLen Brown if (!cx->valid) 10224f86d3a8SLen Brown continue; 10234f86d3a8SLen Brown 10244f86d3a8SLen Brown #ifdef CONFIG_HOTPLUG_CPU 10254f86d3a8SLen Brown if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 10264f86d3a8SLen Brown !pr->flags.has_cst && 10274f86d3a8SLen Brown !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 10284f86d3a8SLen Brown continue; 10294f86d3a8SLen Brown #endif 10304f86d3a8SLen Brown 10314202735eSDeepthi Dharwar cpuidle_set_statedata(state_usage, cx); 10324f86d3a8SLen Brown 103346bcfad7SDeepthi Dharwar count++; 103446bcfad7SDeepthi Dharwar if (count == CPUIDLE_STATE_MAX) 103546bcfad7SDeepthi Dharwar break; 103646bcfad7SDeepthi Dharwar } 103746bcfad7SDeepthi Dharwar 103846bcfad7SDeepthi Dharwar dev->state_count = count; 103946bcfad7SDeepthi Dharwar 104046bcfad7SDeepthi Dharwar if (!count) 104146bcfad7SDeepthi Dharwar return -EINVAL; 104246bcfad7SDeepthi Dharwar 104346bcfad7SDeepthi Dharwar return 0; 104446bcfad7SDeepthi Dharwar } 104546bcfad7SDeepthi Dharwar 104646bcfad7SDeepthi Dharwar /** 104746bcfad7SDeepthi Dharwar * acpi_processor_setup_cpuidle states- prepares and configures cpuidle 104846bcfad7SDeepthi Dharwar * global state data i.e. idle routines 104946bcfad7SDeepthi Dharwar * 105046bcfad7SDeepthi Dharwar * @pr: the ACPI processor 105146bcfad7SDeepthi Dharwar */ 105246bcfad7SDeepthi Dharwar static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) 105346bcfad7SDeepthi Dharwar { 105446bcfad7SDeepthi Dharwar int i, count = CPUIDLE_DRIVER_STATE_START; 105546bcfad7SDeepthi Dharwar struct acpi_processor_cx *cx; 105646bcfad7SDeepthi Dharwar struct cpuidle_state *state; 105746bcfad7SDeepthi Dharwar struct cpuidle_driver *drv = &acpi_idle_driver; 105846bcfad7SDeepthi Dharwar 105946bcfad7SDeepthi Dharwar if (!pr->flags.power_setup_done) 106046bcfad7SDeepthi Dharwar return -EINVAL; 106146bcfad7SDeepthi Dharwar 106246bcfad7SDeepthi Dharwar if (pr->flags.power == 0) 106346bcfad7SDeepthi Dharwar return -EINVAL; 106446bcfad7SDeepthi Dharwar 106546bcfad7SDeepthi Dharwar drv->safe_state_index = -1; 106646bcfad7SDeepthi Dharwar for (i = 0; i < CPUIDLE_STATE_MAX; i++) { 106746bcfad7SDeepthi Dharwar drv->states[i].name[0] = '\0'; 106846bcfad7SDeepthi Dharwar drv->states[i].desc[0] = '\0'; 106946bcfad7SDeepthi Dharwar } 107046bcfad7SDeepthi Dharwar 107146bcfad7SDeepthi Dharwar if (max_cstate == 0) 107246bcfad7SDeepthi Dharwar max_cstate = 1; 107346bcfad7SDeepthi Dharwar 107446bcfad7SDeepthi Dharwar for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 107546bcfad7SDeepthi Dharwar cx = &pr->power.states[i]; 107646bcfad7SDeepthi Dharwar 107746bcfad7SDeepthi Dharwar if (!cx->valid) 107846bcfad7SDeepthi Dharwar continue; 107946bcfad7SDeepthi Dharwar 108046bcfad7SDeepthi Dharwar #ifdef CONFIG_HOTPLUG_CPU 108146bcfad7SDeepthi Dharwar if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 108246bcfad7SDeepthi Dharwar !pr->flags.has_cst && 108346bcfad7SDeepthi Dharwar !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 108446bcfad7SDeepthi Dharwar continue; 108546bcfad7SDeepthi Dharwar #endif 108646bcfad7SDeepthi Dharwar 108746bcfad7SDeepthi Dharwar state = &drv->states[count]; 10884f86d3a8SLen Brown snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 10894fcb2fcdSVenkatesh Pallipadi strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 10904f86d3a8SLen Brown state->exit_latency = cx->latency; 10914963f620SLen Brown state->target_residency = cx->latency * latency_factor; 10924f86d3a8SLen Brown 10934f86d3a8SLen Brown state->flags = 0; 10944f86d3a8SLen Brown switch (cx->type) { 10954f86d3a8SLen Brown case ACPI_STATE_C1: 10968e92b660SVenki Pallipadi if (cx->entry_method == ACPI_CSTATE_FFH) 10979b12e18cSvenkatesh.pallipadi@intel.com state->flags |= CPUIDLE_FLAG_TIME_VALID; 10988e92b660SVenki Pallipadi 10994f86d3a8SLen Brown state->enter = acpi_idle_enter_c1; 11001a022e3fSBoris Ostrovsky state->enter_dead = acpi_idle_play_dead; 110146bcfad7SDeepthi Dharwar drv->safe_state_index = count; 11024f86d3a8SLen Brown break; 11034f86d3a8SLen Brown 11044f86d3a8SLen Brown case ACPI_STATE_C2: 11054f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_TIME_VALID; 11064f86d3a8SLen Brown state->enter = acpi_idle_enter_simple; 11071a022e3fSBoris Ostrovsky state->enter_dead = acpi_idle_play_dead; 110846bcfad7SDeepthi Dharwar drv->safe_state_index = count; 11094f86d3a8SLen Brown break; 11104f86d3a8SLen Brown 11114f86d3a8SLen Brown case ACPI_STATE_C3: 11124f86d3a8SLen Brown state->flags |= CPUIDLE_FLAG_TIME_VALID; 11134f86d3a8SLen Brown state->enter = pr->flags.bm_check ? 11144f86d3a8SLen Brown acpi_idle_enter_bm : 11154f86d3a8SLen Brown acpi_idle_enter_simple; 11164f86d3a8SLen Brown break; 11174f86d3a8SLen Brown } 11184f86d3a8SLen Brown 11194f86d3a8SLen Brown count++; 11209a0b8415Svenkatesh.pallipadi@intel.com if (count == CPUIDLE_STATE_MAX) 11219a0b8415Svenkatesh.pallipadi@intel.com break; 11224f86d3a8SLen Brown } 11234f86d3a8SLen Brown 112446bcfad7SDeepthi Dharwar drv->state_count = count; 11254f86d3a8SLen Brown 11264f86d3a8SLen Brown if (!count) 11274f86d3a8SLen Brown return -EINVAL; 11284f86d3a8SLen Brown 11294f86d3a8SLen Brown return 0; 11304f86d3a8SLen Brown } 11314f86d3a8SLen Brown 113246bcfad7SDeepthi Dharwar int acpi_processor_hotplug(struct acpi_processor *pr) 11334f86d3a8SLen Brown { 1134dcb84f33SVenkatesh Pallipadi int ret = 0; 1135e8b1b59dSWei Yongjun struct cpuidle_device *dev; 11364f86d3a8SLen Brown 1137d1896049SThomas Renninger if (disabled_by_idle_boot_param()) 113836a91358SVenkatesh Pallipadi return 0; 113936a91358SVenkatesh Pallipadi 11404f86d3a8SLen Brown if (!pr) 11414f86d3a8SLen Brown return -EINVAL; 11424f86d3a8SLen Brown 11434f86d3a8SLen Brown if (nocst) { 11444f86d3a8SLen Brown return -ENODEV; 11454f86d3a8SLen Brown } 11464f86d3a8SLen Brown 11474f86d3a8SLen Brown if (!pr->flags.power_setup_done) 11484f86d3a8SLen Brown return -ENODEV; 11494f86d3a8SLen Brown 1150e8b1b59dSWei Yongjun dev = per_cpu(acpi_cpuidle_device, pr->id); 11514f86d3a8SLen Brown cpuidle_pause_and_lock(); 11523d339dcbSDaniel Lezcano cpuidle_disable_device(dev); 11534f86d3a8SLen Brown acpi_processor_get_power_info(pr); 1154dcb84f33SVenkatesh Pallipadi if (pr->flags.power) { 115546bcfad7SDeepthi Dharwar acpi_processor_setup_cpuidle_cx(pr); 11563d339dcbSDaniel Lezcano ret = cpuidle_enable_device(dev); 1157dcb84f33SVenkatesh Pallipadi } 11584f86d3a8SLen Brown cpuidle_resume_and_unlock(); 11594f86d3a8SLen Brown 11604f86d3a8SLen Brown return ret; 11614f86d3a8SLen Brown } 11624f86d3a8SLen Brown 116346bcfad7SDeepthi Dharwar int acpi_processor_cst_has_changed(struct acpi_processor *pr) 116446bcfad7SDeepthi Dharwar { 116546bcfad7SDeepthi Dharwar int cpu; 116646bcfad7SDeepthi Dharwar struct acpi_processor *_pr; 11673d339dcbSDaniel Lezcano struct cpuidle_device *dev; 116846bcfad7SDeepthi Dharwar 116946bcfad7SDeepthi Dharwar if (disabled_by_idle_boot_param()) 117046bcfad7SDeepthi Dharwar return 0; 117146bcfad7SDeepthi Dharwar 117246bcfad7SDeepthi Dharwar if (!pr) 117346bcfad7SDeepthi Dharwar return -EINVAL; 117446bcfad7SDeepthi Dharwar 117546bcfad7SDeepthi Dharwar if (nocst) 117646bcfad7SDeepthi Dharwar return -ENODEV; 117746bcfad7SDeepthi Dharwar 117846bcfad7SDeepthi Dharwar if (!pr->flags.power_setup_done) 117946bcfad7SDeepthi Dharwar return -ENODEV; 118046bcfad7SDeepthi Dharwar 118146bcfad7SDeepthi Dharwar /* 118246bcfad7SDeepthi Dharwar * FIXME: Design the ACPI notification to make it once per 118346bcfad7SDeepthi Dharwar * system instead of once per-cpu. This condition is a hack 118446bcfad7SDeepthi Dharwar * to make the code that updates C-States be called once. 118546bcfad7SDeepthi Dharwar */ 118646bcfad7SDeepthi Dharwar 11879505626dSPaul E. McKenney if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { 118846bcfad7SDeepthi Dharwar 118946bcfad7SDeepthi Dharwar cpuidle_pause_and_lock(); 119046bcfad7SDeepthi Dharwar /* Protect against cpu-hotplug */ 119146bcfad7SDeepthi Dharwar get_online_cpus(); 119246bcfad7SDeepthi Dharwar 119346bcfad7SDeepthi Dharwar /* Disable all cpuidle devices */ 119446bcfad7SDeepthi Dharwar for_each_online_cpu(cpu) { 119546bcfad7SDeepthi Dharwar _pr = per_cpu(processors, cpu); 119646bcfad7SDeepthi Dharwar if (!_pr || !_pr->flags.power_setup_done) 119746bcfad7SDeepthi Dharwar continue; 11983d339dcbSDaniel Lezcano dev = per_cpu(acpi_cpuidle_device, cpu); 11993d339dcbSDaniel Lezcano cpuidle_disable_device(dev); 120046bcfad7SDeepthi Dharwar } 120146bcfad7SDeepthi Dharwar 120246bcfad7SDeepthi Dharwar /* Populate Updated C-state information */ 120346bcfad7SDeepthi Dharwar acpi_processor_setup_cpuidle_states(pr); 120446bcfad7SDeepthi Dharwar 120546bcfad7SDeepthi Dharwar /* Enable all cpuidle devices */ 120646bcfad7SDeepthi Dharwar for_each_online_cpu(cpu) { 120746bcfad7SDeepthi Dharwar _pr = per_cpu(processors, cpu); 120846bcfad7SDeepthi Dharwar if (!_pr || !_pr->flags.power_setup_done) 120946bcfad7SDeepthi Dharwar continue; 121046bcfad7SDeepthi Dharwar acpi_processor_get_power_info(_pr); 121146bcfad7SDeepthi Dharwar if (_pr->flags.power) { 121246bcfad7SDeepthi Dharwar acpi_processor_setup_cpuidle_cx(_pr); 12133d339dcbSDaniel Lezcano dev = per_cpu(acpi_cpuidle_device, cpu); 12143d339dcbSDaniel Lezcano cpuidle_enable_device(dev); 121546bcfad7SDeepthi Dharwar } 121646bcfad7SDeepthi Dharwar } 121746bcfad7SDeepthi Dharwar put_online_cpus(); 121846bcfad7SDeepthi Dharwar cpuidle_resume_and_unlock(); 121946bcfad7SDeepthi Dharwar } 122046bcfad7SDeepthi Dharwar 122146bcfad7SDeepthi Dharwar return 0; 122246bcfad7SDeepthi Dharwar } 122346bcfad7SDeepthi Dharwar 122446bcfad7SDeepthi Dharwar static int acpi_processor_registered; 122546bcfad7SDeepthi Dharwar 122638a991b6SDaniel Lezcano int __cpuinit acpi_processor_power_init(struct acpi_processor *pr) 12271da177e4SLinus Torvalds { 12281da177e4SLinus Torvalds acpi_status status = 0; 122946bcfad7SDeepthi Dharwar int retval; 12303d339dcbSDaniel Lezcano struct cpuidle_device *dev; 1231b6835052SAndreas Mohr static int first_run; 12321da177e4SLinus Torvalds 1233d1896049SThomas Renninger if (disabled_by_idle_boot_param()) 123436a91358SVenkatesh Pallipadi return 0; 12351da177e4SLinus Torvalds 12361da177e4SLinus Torvalds if (!first_run) { 12371da177e4SLinus Torvalds dmi_check_system(processor_power_dmi_table); 1238c1c30634SAlexey Starikovskiy max_cstate = acpi_processor_cstate_check(max_cstate); 12391da177e4SLinus Torvalds if (max_cstate < ACPI_C_STATES_MAX) 12404be44fcdSLen Brown printk(KERN_NOTICE 12414be44fcdSLen Brown "ACPI: processor limited to max C-state %d\n", 12424be44fcdSLen Brown max_cstate); 12431da177e4SLinus Torvalds first_run++; 12441da177e4SLinus Torvalds } 12451da177e4SLinus Torvalds 124602df8b93SVenkatesh Pallipadi if (!pr) 1247d550d98dSPatrick Mochel return -EINVAL; 124802df8b93SVenkatesh Pallipadi 1249cee324b1SAlexey Starikovskiy if (acpi_gbl_FADT.cst_control && !nocst) { 12504be44fcdSLen Brown status = 1251cee324b1SAlexey Starikovskiy acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); 12521da177e4SLinus Torvalds if (ACPI_FAILURE(status)) { 1253a6fc6720SThomas Renninger ACPI_EXCEPTION((AE_INFO, status, 1254a6fc6720SThomas Renninger "Notifying BIOS of _CST ability failed")); 12551da177e4SLinus Torvalds } 12561da177e4SLinus Torvalds } 12571da177e4SLinus Torvalds 12581da177e4SLinus Torvalds acpi_processor_get_power_info(pr); 12594f86d3a8SLen Brown pr->flags.power_setup_done = 1; 12601da177e4SLinus Torvalds 12611da177e4SLinus Torvalds /* 12621da177e4SLinus Torvalds * Install the idle handler if processor power management is supported. 12631da177e4SLinus Torvalds * Note that we use previously set idle handler will be used on 12641da177e4SLinus Torvalds * platforms that only support C1. 12651da177e4SLinus Torvalds */ 126636a91358SVenkatesh Pallipadi if (pr->flags.power) { 126746bcfad7SDeepthi Dharwar /* Register acpi_idle_driver if not already registered */ 126846bcfad7SDeepthi Dharwar if (!acpi_processor_registered) { 126946bcfad7SDeepthi Dharwar acpi_processor_setup_cpuidle_states(pr); 127046bcfad7SDeepthi Dharwar retval = cpuidle_register_driver(&acpi_idle_driver); 127146bcfad7SDeepthi Dharwar if (retval) 127246bcfad7SDeepthi Dharwar return retval; 127346bcfad7SDeepthi Dharwar printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", 127446bcfad7SDeepthi Dharwar acpi_idle_driver.name); 127546bcfad7SDeepthi Dharwar } 12763d339dcbSDaniel Lezcano 12773d339dcbSDaniel Lezcano dev = kzalloc(sizeof(*dev), GFP_KERNEL); 12783d339dcbSDaniel Lezcano if (!dev) 12793d339dcbSDaniel Lezcano return -ENOMEM; 12803d339dcbSDaniel Lezcano per_cpu(acpi_cpuidle_device, pr->id) = dev; 12813d339dcbSDaniel Lezcano 12823d339dcbSDaniel Lezcano acpi_processor_setup_cpuidle_cx(pr); 12833d339dcbSDaniel Lezcano 128446bcfad7SDeepthi Dharwar /* Register per-cpu cpuidle_device. Cpuidle driver 128546bcfad7SDeepthi Dharwar * must already be registered before registering device 128646bcfad7SDeepthi Dharwar */ 12873d339dcbSDaniel Lezcano retval = cpuidle_register_device(dev); 128846bcfad7SDeepthi Dharwar if (retval) { 128946bcfad7SDeepthi Dharwar if (acpi_processor_registered == 0) 129046bcfad7SDeepthi Dharwar cpuidle_unregister_driver(&acpi_idle_driver); 129146bcfad7SDeepthi Dharwar return retval; 129246bcfad7SDeepthi Dharwar } 129346bcfad7SDeepthi Dharwar acpi_processor_registered++; 12941da177e4SLinus Torvalds } 1295d550d98dSPatrick Mochel return 0; 12961da177e4SLinus Torvalds } 12971da177e4SLinus Torvalds 129838a991b6SDaniel Lezcano int acpi_processor_power_exit(struct acpi_processor *pr) 12991da177e4SLinus Torvalds { 13003d339dcbSDaniel Lezcano struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); 13013d339dcbSDaniel Lezcano 1302d1896049SThomas Renninger if (disabled_by_idle_boot_param()) 130336a91358SVenkatesh Pallipadi return 0; 130436a91358SVenkatesh Pallipadi 130546bcfad7SDeepthi Dharwar if (pr->flags.power) { 13063d339dcbSDaniel Lezcano cpuidle_unregister_device(dev); 130746bcfad7SDeepthi Dharwar acpi_processor_registered--; 130846bcfad7SDeepthi Dharwar if (acpi_processor_registered == 0) 130946bcfad7SDeepthi Dharwar cpuidle_unregister_driver(&acpi_idle_driver); 131046bcfad7SDeepthi Dharwar } 13111da177e4SLinus Torvalds 131246bcfad7SDeepthi Dharwar pr->flags.power_setup_done = 0; 1313d550d98dSPatrick Mochel return 0; 13141da177e4SLinus Torvalds } 1315