xref: /openbmc/linux/drivers/acpi/processor_idle.c (revision da5e09a1)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * processor_idle - idle state submodule to the ACPI processor driver
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
51da177e4SLinus Torvalds  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6c5ab81caSDominik Brodowski  *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
71da177e4SLinus Torvalds  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
81da177e4SLinus Torvalds  *  			- Added processor hotplug support
902df8b93SVenkatesh Pallipadi  *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
1002df8b93SVenkatesh Pallipadi  *  			- Added support for C3 on SMP
111da177e4SLinus Torvalds  *
121da177e4SLinus Torvalds  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  *  This program is free software; you can redistribute it and/or modify
151da177e4SLinus Torvalds  *  it under the terms of the GNU General Public License as published by
161da177e4SLinus Torvalds  *  the Free Software Foundation; either version 2 of the License, or (at
171da177e4SLinus Torvalds  *  your option) any later version.
181da177e4SLinus Torvalds  *
191da177e4SLinus Torvalds  *  This program is distributed in the hope that it will be useful, but
201da177e4SLinus Torvalds  *  WITHOUT ANY WARRANTY; without even the implied warranty of
211da177e4SLinus Torvalds  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
221da177e4SLinus Torvalds  *  General Public License for more details.
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *  You should have received a copy of the GNU General Public License along
251da177e4SLinus Torvalds  *  with this program; if not, write to the Free Software Foundation, Inc.,
261da177e4SLinus Torvalds  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
271da177e4SLinus Torvalds  *
281da177e4SLinus Torvalds  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
291da177e4SLinus Torvalds  */
301da177e4SLinus Torvalds 
311da177e4SLinus Torvalds #include <linux/kernel.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/init.h>
341da177e4SLinus Torvalds #include <linux/cpufreq.h>
351da177e4SLinus Torvalds #include <linux/proc_fs.h>
361da177e4SLinus Torvalds #include <linux/seq_file.h>
371da177e4SLinus Torvalds #include <linux/acpi.h>
381da177e4SLinus Torvalds #include <linux/dmi.h>
391da177e4SLinus Torvalds #include <linux/moduleparam.h>
404e57b681STim Schmielau #include <linux/sched.h>	/* need_resched() */
41f011e2e2SMark Gross #include <linux/pm_qos_params.h>
42e9e2cdb4SThomas Gleixner #include <linux/clockchips.h>
434f86d3a8SLen Brown #include <linux/cpuidle.h>
44c1e3b377SZhao Yakui #include <linux/cpuidle.h>
451da177e4SLinus Torvalds 
463434933bSThomas Gleixner /*
473434933bSThomas Gleixner  * Include the apic definitions for x86 to have the APIC timer related defines
483434933bSThomas Gleixner  * available also for UP (on SMP it gets magically included via linux/smp.h).
493434933bSThomas Gleixner  * asm/acpi.h is not an option, as it would require more include magic. Also
503434933bSThomas Gleixner  * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
513434933bSThomas Gleixner  */
523434933bSThomas Gleixner #ifdef CONFIG_X86
533434933bSThomas Gleixner #include <asm/apic.h>
543434933bSThomas Gleixner #endif
553434933bSThomas Gleixner 
561da177e4SLinus Torvalds #include <asm/io.h>
571da177e4SLinus Torvalds #include <asm/uaccess.h>
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds #include <acpi/acpi_bus.h>
601da177e4SLinus Torvalds #include <acpi/processor.h>
61c1e3b377SZhao Yakui #include <asm/processor.h>
621da177e4SLinus Torvalds 
631da177e4SLinus Torvalds #define ACPI_PROCESSOR_COMPONENT        0x01000000
641da177e4SLinus Torvalds #define ACPI_PROCESSOR_CLASS            "processor"
651da177e4SLinus Torvalds #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
66f52fd66dSLen Brown ACPI_MODULE_NAME("processor_idle");
671da177e4SLinus Torvalds #define ACPI_PROCESSOR_FILE_POWER	"power"
681da177e4SLinus Torvalds #define US_TO_PM_TIMER_TICKS(t)		((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
692aa44d05SIngo Molnar #define PM_TIMER_TICK_NS		(1000000000ULL/PM_TIMER_FREQUENCY)
704f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
711da177e4SLinus Torvalds #define C2_OVERHEAD			4	/* 1us (3.579 ticks per us) */
721da177e4SLinus Torvalds #define C3_OVERHEAD			4	/* 1us (3.579 ticks per us) */
73b6835052SAndreas Mohr static void (*pm_idle_save) (void) __read_mostly;
744f86d3a8SLen Brown #else
754f86d3a8SLen Brown #define C2_OVERHEAD			1	/* 1us */
764f86d3a8SLen Brown #define C3_OVERHEAD			1	/* 1us */
774f86d3a8SLen Brown #endif
784f86d3a8SLen Brown #define PM_TIMER_TICKS_TO_US(p)		(((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
791da177e4SLinus Torvalds 
804f86d3a8SLen Brown static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
815b3f0e6cSVenki Pallipadi #ifdef CONFIG_CPU_IDLE
824f86d3a8SLen Brown module_param(max_cstate, uint, 0000);
835b3f0e6cSVenki Pallipadi #else
845b3f0e6cSVenki Pallipadi module_param(max_cstate, uint, 0644);
855b3f0e6cSVenki Pallipadi #endif
86b6835052SAndreas Mohr static unsigned int nocst __read_mostly;
871da177e4SLinus Torvalds module_param(nocst, uint, 0000);
881da177e4SLinus Torvalds 
894f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
901da177e4SLinus Torvalds /*
911da177e4SLinus Torvalds  * bm_history -- bit-mask with a bit per jiffy of bus-master activity
921da177e4SLinus Torvalds  * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
931da177e4SLinus Torvalds  * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
941da177e4SLinus Torvalds  * 100 HZ: 0x0000000F: 4 jiffies = 40ms
951da177e4SLinus Torvalds  * reduce history for more aggressive entry into C3
961da177e4SLinus Torvalds  */
97b6835052SAndreas Mohr static unsigned int bm_history __read_mostly =
984be44fcdSLen Brown     (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
991da177e4SLinus Torvalds module_param(bm_history, uint, 0644);
1004f86d3a8SLen Brown 
1014f86d3a8SLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr);
1024f86d3a8SLen Brown 
1034963f620SLen Brown #else	/* CONFIG_CPU_IDLE */
10425de5718SLen Brown static unsigned int latency_factor __read_mostly = 2;
1054963f620SLen Brown module_param(latency_factor, uint, 0644);
1064f86d3a8SLen Brown #endif
1071da177e4SLinus Torvalds 
1081da177e4SLinus Torvalds /*
1091da177e4SLinus Torvalds  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
1101da177e4SLinus Torvalds  * For now disable this. Probably a bug somewhere else.
1111da177e4SLinus Torvalds  *
1121da177e4SLinus Torvalds  * To skip this limit, boot/load with a large max_cstate limit.
1131da177e4SLinus Torvalds  */
1141855256cSJeff Garzik static int set_max_cstate(const struct dmi_system_id *id)
1151da177e4SLinus Torvalds {
1161da177e4SLinus Torvalds 	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
1171da177e4SLinus Torvalds 		return 0;
1181da177e4SLinus Torvalds 
1193d35600aSLen Brown 	printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
1201da177e4SLinus Torvalds 	       " Override with \"processor.max_cstate=%d\"\n", id->ident,
1213d35600aSLen Brown 	       (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
1221da177e4SLinus Torvalds 
1233d35600aSLen Brown 	max_cstate = (long)id->driver_data;
1241da177e4SLinus Torvalds 
1251da177e4SLinus Torvalds 	return 0;
1261da177e4SLinus Torvalds }
1271da177e4SLinus Torvalds 
1287ded5689SAshok Raj /* Actually this shouldn't be __cpuinitdata, would be better to fix the
1297ded5689SAshok Raj    callers to only run once -AK */
1307ded5689SAshok Raj static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
131335f16beSDavid Shaohua Li 	{ set_max_cstate, "IBM ThinkPad R40e", {
132876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
133f831335dSBartlomiej Swiercz 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
134f831335dSBartlomiej Swiercz 	{ set_max_cstate, "IBM ThinkPad R40e", {
135f831335dSBartlomiej Swiercz 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
136876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
137876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
138876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
139876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
140876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
141876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
142876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
143876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
144876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
145876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
146876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
147876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
148876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
149876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
150876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
151876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
152876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
153876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
154876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
155876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
156876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
157876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
158876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
159876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
160876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
161876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
162876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
163876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
164876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
165876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
166876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
167876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
168876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
169876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
170876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
171876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
172876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
173876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
174876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
175876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
176876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
177876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
178876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
179335f16beSDavid Shaohua Li 	{ set_max_cstate, "Medion 41700", {
180876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
181876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
182335f16beSDavid Shaohua Li 	{ set_max_cstate, "Clevo 5600D", {
183876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
184876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
185335f16beSDavid Shaohua Li 	 (void *)2},
1861da177e4SLinus Torvalds 	{},
1871da177e4SLinus Torvalds };
1881da177e4SLinus Torvalds 
1894be44fcdSLen Brown static inline u32 ticks_elapsed(u32 t1, u32 t2)
1901da177e4SLinus Torvalds {
1911da177e4SLinus Torvalds 	if (t2 >= t1)
1921da177e4SLinus Torvalds 		return (t2 - t1);
193cee324b1SAlexey Starikovskiy 	else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
1941da177e4SLinus Torvalds 		return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
1951da177e4SLinus Torvalds 	else
1961da177e4SLinus Torvalds 		return ((0xFFFFFFFF - t1) + t2);
1971da177e4SLinus Torvalds }
1981da177e4SLinus Torvalds 
1994f86d3a8SLen Brown static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
2004f86d3a8SLen Brown {
2014f86d3a8SLen Brown 	if (t2 >= t1)
2024f86d3a8SLen Brown 		return PM_TIMER_TICKS_TO_US(t2 - t1);
2034f86d3a8SLen Brown 	else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
2044f86d3a8SLen Brown 		return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
2054f86d3a8SLen Brown 	else
2064f86d3a8SLen Brown 		return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
2074f86d3a8SLen Brown }
2084f86d3a8SLen Brown 
2092e906655Svenkatesh.pallipadi@intel.com /*
2102e906655Svenkatesh.pallipadi@intel.com  * Callers should disable interrupts before the call and enable
2112e906655Svenkatesh.pallipadi@intel.com  * interrupts after return.
2122e906655Svenkatesh.pallipadi@intel.com  */
213ddc081a1SVenkatesh Pallipadi static void acpi_safe_halt(void)
214ddc081a1SVenkatesh Pallipadi {
215ddc081a1SVenkatesh Pallipadi 	current_thread_info()->status &= ~TS_POLLING;
216ddc081a1SVenkatesh Pallipadi 	/*
217ddc081a1SVenkatesh Pallipadi 	 * TS_POLLING-cleared state must be visible before we
218ddc081a1SVenkatesh Pallipadi 	 * test NEED_RESCHED:
219ddc081a1SVenkatesh Pallipadi 	 */
220ddc081a1SVenkatesh Pallipadi 	smp_mb();
22171e93d15SVenki Pallipadi 	if (!need_resched()) {
222ddc081a1SVenkatesh Pallipadi 		safe_halt();
22371e93d15SVenki Pallipadi 		local_irq_disable();
22471e93d15SVenki Pallipadi 	}
225ddc081a1SVenkatesh Pallipadi 	current_thread_info()->status |= TS_POLLING;
226ddc081a1SVenkatesh Pallipadi }
227ddc081a1SVenkatesh Pallipadi 
2284f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
2294f86d3a8SLen Brown 
2301da177e4SLinus Torvalds static void
2314be44fcdSLen Brown acpi_processor_power_activate(struct acpi_processor *pr,
2321da177e4SLinus Torvalds 			      struct acpi_processor_cx *new)
2331da177e4SLinus Torvalds {
2341da177e4SLinus Torvalds 	struct acpi_processor_cx *old;
2351da177e4SLinus Torvalds 
2361da177e4SLinus Torvalds 	if (!pr || !new)
2371da177e4SLinus Torvalds 		return;
2381da177e4SLinus Torvalds 
2391da177e4SLinus Torvalds 	old = pr->power.state;
2401da177e4SLinus Torvalds 
2411da177e4SLinus Torvalds 	if (old)
2421da177e4SLinus Torvalds 		old->promotion.count = 0;
2431da177e4SLinus Torvalds 	new->demotion.count = 0;
2441da177e4SLinus Torvalds 
2451da177e4SLinus Torvalds 	/* Cleanup from old state. */
2461da177e4SLinus Torvalds 	if (old) {
2471da177e4SLinus Torvalds 		switch (old->type) {
2481da177e4SLinus Torvalds 		case ACPI_STATE_C3:
2491da177e4SLinus Torvalds 			/* Disable bus master reload */
25002df8b93SVenkatesh Pallipadi 			if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
251d8c71b6dSBob Moore 				acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
2521da177e4SLinus Torvalds 			break;
2531da177e4SLinus Torvalds 		}
2541da177e4SLinus Torvalds 	}
2551da177e4SLinus Torvalds 
2561da177e4SLinus Torvalds 	/* Prepare to use new state. */
2571da177e4SLinus Torvalds 	switch (new->type) {
2581da177e4SLinus Torvalds 	case ACPI_STATE_C3:
2591da177e4SLinus Torvalds 		/* Enable bus master reload */
26002df8b93SVenkatesh Pallipadi 		if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
261d8c71b6dSBob Moore 			acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
2621da177e4SLinus Torvalds 		break;
2631da177e4SLinus Torvalds 	}
2641da177e4SLinus Torvalds 
2651da177e4SLinus Torvalds 	pr->power.state = new;
2661da177e4SLinus Torvalds 
2671da177e4SLinus Torvalds 	return;
2681da177e4SLinus Torvalds }
2691da177e4SLinus Torvalds 
27002df8b93SVenkatesh Pallipadi static atomic_t c3_cpu_count;
27102df8b93SVenkatesh Pallipadi 
272991528d7SVenkatesh Pallipadi /* Common C-state entry for C2, C3, .. */
273991528d7SVenkatesh Pallipadi static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
274991528d7SVenkatesh Pallipadi {
275bc71bec9Svenkatesh.pallipadi@intel.com 	if (cstate->entry_method == ACPI_CSTATE_FFH) {
276991528d7SVenkatesh Pallipadi 		/* Call into architectural FFH based C-state */
277991528d7SVenkatesh Pallipadi 		acpi_processor_ffh_cstate_enter(cstate);
278991528d7SVenkatesh Pallipadi 	} else {
279991528d7SVenkatesh Pallipadi 		int unused;
280991528d7SVenkatesh Pallipadi 		/* IO port based C-state */
281991528d7SVenkatesh Pallipadi 		inb(cstate->address);
282991528d7SVenkatesh Pallipadi 		/* Dummy wait op - must do something useless after P_LVL2 read
283991528d7SVenkatesh Pallipadi 		   because chipsets cannot guarantee that STPCLK# signal
284991528d7SVenkatesh Pallipadi 		   gets asserted in time to freeze execution properly. */
285cee324b1SAlexey Starikovskiy 		unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
286991528d7SVenkatesh Pallipadi 	}
287991528d7SVenkatesh Pallipadi }
2884f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */
289991528d7SVenkatesh Pallipadi 
290169a0abbSThomas Gleixner #ifdef ARCH_APICTIMER_STOPS_ON_C3
291169a0abbSThomas Gleixner 
292169a0abbSThomas Gleixner /*
293169a0abbSThomas Gleixner  * Some BIOS implementations switch to C3 in the published C2 state.
294296d93cdSLinus Torvalds  * This seems to be a common problem on AMD boxen, but other vendors
295296d93cdSLinus Torvalds  * are affected too. We pick the most conservative approach: we assume
296296d93cdSLinus Torvalds  * that the local APIC stops in both C2 and C3.
297169a0abbSThomas Gleixner  */
298169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr,
299169a0abbSThomas Gleixner 				   struct acpi_processor_cx *cx)
300169a0abbSThomas Gleixner {
301169a0abbSThomas Gleixner 	struct acpi_processor_power *pwr = &pr->power;
302e585bef8SThomas Gleixner 	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
303169a0abbSThomas Gleixner 
304169a0abbSThomas Gleixner 	/*
305169a0abbSThomas Gleixner 	 * Check, if one of the previous states already marked the lapic
306169a0abbSThomas Gleixner 	 * unstable
307169a0abbSThomas Gleixner 	 */
308169a0abbSThomas Gleixner 	if (pwr->timer_broadcast_on_state < state)
309169a0abbSThomas Gleixner 		return;
310169a0abbSThomas Gleixner 
311e585bef8SThomas Gleixner 	if (cx->type >= type)
312169a0abbSThomas Gleixner 		pr->power.timer_broadcast_on_state = state;
313169a0abbSThomas Gleixner }
314169a0abbSThomas Gleixner 
315169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
316169a0abbSThomas Gleixner {
317e9e2cdb4SThomas Gleixner 	unsigned long reason;
318e9e2cdb4SThomas Gleixner 
319e9e2cdb4SThomas Gleixner 	reason = pr->power.timer_broadcast_on_state < INT_MAX ?
320e9e2cdb4SThomas Gleixner 		CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
321e9e2cdb4SThomas Gleixner 
322e9e2cdb4SThomas Gleixner 	clockevents_notify(reason, &pr->id);
323e9e2cdb4SThomas Gleixner }
324e9e2cdb4SThomas Gleixner 
325e9e2cdb4SThomas Gleixner /* Power(C) State timer broadcast control */
326e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr,
327e9e2cdb4SThomas Gleixner 				       struct acpi_processor_cx *cx,
328e9e2cdb4SThomas Gleixner 				       int broadcast)
329e9e2cdb4SThomas Gleixner {
330e9e2cdb4SThomas Gleixner 	int state = cx - pr->power.states;
331e9e2cdb4SThomas Gleixner 
332e9e2cdb4SThomas Gleixner 	if (state >= pr->power.timer_broadcast_on_state) {
333e9e2cdb4SThomas Gleixner 		unsigned long reason;
334e9e2cdb4SThomas Gleixner 
335e9e2cdb4SThomas Gleixner 		reason = broadcast ?  CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
336e9e2cdb4SThomas Gleixner 			CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
337e9e2cdb4SThomas Gleixner 		clockevents_notify(reason, &pr->id);
338e9e2cdb4SThomas Gleixner 	}
339169a0abbSThomas Gleixner }
340169a0abbSThomas Gleixner 
341169a0abbSThomas Gleixner #else
342169a0abbSThomas Gleixner 
343169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr,
344169a0abbSThomas Gleixner 				   struct acpi_processor_cx *cstate) { }
345169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
346e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr,
347e9e2cdb4SThomas Gleixner 				       struct acpi_processor_cx *cx,
348e9e2cdb4SThomas Gleixner 				       int broadcast)
349e9e2cdb4SThomas Gleixner {
350e9e2cdb4SThomas Gleixner }
351169a0abbSThomas Gleixner 
352169a0abbSThomas Gleixner #endif
353169a0abbSThomas Gleixner 
354b04e7bdbSThomas Gleixner /*
355b04e7bdbSThomas Gleixner  * Suspend / resume control
356b04e7bdbSThomas Gleixner  */
357b04e7bdbSThomas Gleixner static int acpi_idle_suspend;
358b04e7bdbSThomas Gleixner 
359b04e7bdbSThomas Gleixner int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
360b04e7bdbSThomas Gleixner {
361b04e7bdbSThomas Gleixner 	acpi_idle_suspend = 1;
362b04e7bdbSThomas Gleixner 	return 0;
363b04e7bdbSThomas Gleixner }
364b04e7bdbSThomas Gleixner 
365b04e7bdbSThomas Gleixner int acpi_processor_resume(struct acpi_device * device)
366b04e7bdbSThomas Gleixner {
367b04e7bdbSThomas Gleixner 	acpi_idle_suspend = 0;
368b04e7bdbSThomas Gleixner 	return 0;
369b04e7bdbSThomas Gleixner }
370b04e7bdbSThomas Gleixner 
37161331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
372ddb25f9aSAndi Kleen static int tsc_halts_in_c(int state)
373ddb25f9aSAndi Kleen {
374ddb25f9aSAndi Kleen 	switch (boot_cpu_data.x86_vendor) {
375ddb25f9aSAndi Kleen 	case X86_VENDOR_AMD:
376ddb25f9aSAndi Kleen 		/*
377ddb25f9aSAndi Kleen 		 * AMD Fam10h TSC will tick in all
378ddb25f9aSAndi Kleen 		 * C/P/S0/S1 states when this bit is set.
379ddb25f9aSAndi Kleen 		 */
380ddb25f9aSAndi Kleen 		if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
381ddb25f9aSAndi Kleen 			return 0;
382ddb25f9aSAndi Kleen 		/*FALL THROUGH*/
383ddb25f9aSAndi Kleen 	case X86_VENDOR_INTEL:
384ddb25f9aSAndi Kleen 		/* Several cases known where TSC halts in C2 too */
385ddb25f9aSAndi Kleen 	default:
386ddb25f9aSAndi Kleen 		return state > ACPI_STATE_C1;
387ddb25f9aSAndi Kleen 	}
388ddb25f9aSAndi Kleen }
389ddb25f9aSAndi Kleen #endif
390ddb25f9aSAndi Kleen 
3914f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
3921da177e4SLinus Torvalds static void acpi_processor_idle(void)
3931da177e4SLinus Torvalds {
3941da177e4SLinus Torvalds 	struct acpi_processor *pr = NULL;
3951da177e4SLinus Torvalds 	struct acpi_processor_cx *cx = NULL;
3961da177e4SLinus Torvalds 	struct acpi_processor_cx *next_state = NULL;
3971da177e4SLinus Torvalds 	int sleep_ticks = 0;
3981da177e4SLinus Torvalds 	u32 t1, t2 = 0;
3991da177e4SLinus Torvalds 
4001da177e4SLinus Torvalds 	/*
4011da177e4SLinus Torvalds 	 * Interrupts must be disabled during bus mastering calculations and
4021da177e4SLinus Torvalds 	 * for C2/C3 transitions.
4031da177e4SLinus Torvalds 	 */
4041da177e4SLinus Torvalds 	local_irq_disable();
4051da177e4SLinus Torvalds 
406706546d0SMike Travis 	pr = __get_cpu_var(processors);
407d5a3d32aSVenkatesh Pallipadi 	if (!pr) {
408d5a3d32aSVenkatesh Pallipadi 		local_irq_enable();
409d5a3d32aSVenkatesh Pallipadi 		return;
410d5a3d32aSVenkatesh Pallipadi 	}
411d5a3d32aSVenkatesh Pallipadi 
4121da177e4SLinus Torvalds 	/*
4131da177e4SLinus Torvalds 	 * Check whether we truly need to go idle, or should
4141da177e4SLinus Torvalds 	 * reschedule:
4151da177e4SLinus Torvalds 	 */
4161da177e4SLinus Torvalds 	if (unlikely(need_resched())) {
4171da177e4SLinus Torvalds 		local_irq_enable();
4181da177e4SLinus Torvalds 		return;
4191da177e4SLinus Torvalds 	}
4201da177e4SLinus Torvalds 
4211da177e4SLinus Torvalds 	cx = pr->power.state;
422b04e7bdbSThomas Gleixner 	if (!cx || acpi_idle_suspend) {
4237f424a8bSPeter Zijlstra 		if (pm_idle_save) {
4247f424a8bSPeter Zijlstra 			pm_idle_save(); /* enables IRQs */
4257f424a8bSPeter Zijlstra 		} else {
42664c7c8f8SNick Piggin 			acpi_safe_halt();
4272e906655Svenkatesh.pallipadi@intel.com 			local_irq_enable();
4287f424a8bSPeter Zijlstra 		}
42971e93d15SVenki Pallipadi 
43064c7c8f8SNick Piggin 		return;
43164c7c8f8SNick Piggin 	}
4321da177e4SLinus Torvalds 
4331da177e4SLinus Torvalds 	/*
4341da177e4SLinus Torvalds 	 * Check BM Activity
4351da177e4SLinus Torvalds 	 * -----------------
4361da177e4SLinus Torvalds 	 * Check for bus mastering activity (if required), record, and check
4371da177e4SLinus Torvalds 	 * for demotion.
4381da177e4SLinus Torvalds 	 */
4391da177e4SLinus Torvalds 	if (pr->flags.bm_check) {
4401da177e4SLinus Torvalds 		u32 bm_status = 0;
4411da177e4SLinus Torvalds 		unsigned long diff = jiffies - pr->power.bm_check_timestamp;
4421da177e4SLinus Torvalds 
443c5ab81caSDominik Brodowski 		if (diff > 31)
444c5ab81caSDominik Brodowski 			diff = 31;
4451da177e4SLinus Torvalds 
446c5ab81caSDominik Brodowski 		pr->power.bm_activity <<= diff;
4471da177e4SLinus Torvalds 
448d8c71b6dSBob Moore 		acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
4491da177e4SLinus Torvalds 		if (bm_status) {
450c5ab81caSDominik Brodowski 			pr->power.bm_activity |= 0x1;
451d8c71b6dSBob Moore 			acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
4521da177e4SLinus Torvalds 		}
4531da177e4SLinus Torvalds 		/*
4541da177e4SLinus Torvalds 		 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
4551da177e4SLinus Torvalds 		 * the true state of bus mastering activity; forcing us to
4561da177e4SLinus Torvalds 		 * manually check the BMIDEA bit of each IDE channel.
4571da177e4SLinus Torvalds 		 */
4581da177e4SLinus Torvalds 		else if (errata.piix4.bmisx) {
4591da177e4SLinus Torvalds 			if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
4601da177e4SLinus Torvalds 			    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
461c5ab81caSDominik Brodowski 				pr->power.bm_activity |= 0x1;
4621da177e4SLinus Torvalds 		}
4631da177e4SLinus Torvalds 
4641da177e4SLinus Torvalds 		pr->power.bm_check_timestamp = jiffies;
4651da177e4SLinus Torvalds 
4661da177e4SLinus Torvalds 		/*
467c4a001b1SDominik Brodowski 		 * If bus mastering is or was active this jiffy, demote
4681da177e4SLinus Torvalds 		 * to avoid a faulty transition.  Note that the processor
4691da177e4SLinus Torvalds 		 * won't enter a low-power state during this call (to this
470c4a001b1SDominik Brodowski 		 * function) but should upon the next.
4711da177e4SLinus Torvalds 		 *
4721da177e4SLinus Torvalds 		 * TBD: A better policy might be to fallback to the demotion
4731da177e4SLinus Torvalds 		 *      state (use it for this quantum only) istead of
4741da177e4SLinus Torvalds 		 *      demoting -- and rely on duration as our sole demotion
4751da177e4SLinus Torvalds 		 *      qualification.  This may, however, introduce DMA
4761da177e4SLinus Torvalds 		 *      issues (e.g. floppy DMA transfer overrun/underrun).
4771da177e4SLinus Torvalds 		 */
478c4a001b1SDominik Brodowski 		if ((pr->power.bm_activity & 0x1) &&
479c4a001b1SDominik Brodowski 		    cx->demotion.threshold.bm) {
4801da177e4SLinus Torvalds 			local_irq_enable();
4811da177e4SLinus Torvalds 			next_state = cx->demotion.state;
4821da177e4SLinus Torvalds 			goto end;
4831da177e4SLinus Torvalds 		}
4841da177e4SLinus Torvalds 	}
4851da177e4SLinus Torvalds 
4864c033552SVenkatesh Pallipadi #ifdef CONFIG_HOTPLUG_CPU
4874c033552SVenkatesh Pallipadi 	/*
4884c033552SVenkatesh Pallipadi 	 * Check for P_LVL2_UP flag before entering C2 and above on
4894c033552SVenkatesh Pallipadi 	 * an SMP system. We do it here instead of doing it at _CST/P_LVL
4904c033552SVenkatesh Pallipadi 	 * detection phase, to work cleanly with logical CPU hotplug.
4914c033552SVenkatesh Pallipadi 	 */
4924c033552SVenkatesh Pallipadi 	if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
493cee324b1SAlexey Starikovskiy 	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
4941e483969SDavid Shaohua Li 		cx = &pr->power.states[ACPI_STATE_C1];
4954c033552SVenkatesh Pallipadi #endif
4961e483969SDavid Shaohua Li 
4971da177e4SLinus Torvalds 	/*
4981da177e4SLinus Torvalds 	 * Sleep:
4991da177e4SLinus Torvalds 	 * ------
5001da177e4SLinus Torvalds 	 * Invoke the current Cx state to put the processor to sleep.
5011da177e4SLinus Torvalds 	 */
5022a298a35SNick Piggin 	if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
503495ab9c0SAndi Kleen 		current_thread_info()->status &= ~TS_POLLING;
5040888f06aSIngo Molnar 		/*
5050888f06aSIngo Molnar 		 * TS_POLLING-cleared state must be visible before we
5060888f06aSIngo Molnar 		 * test NEED_RESCHED:
5070888f06aSIngo Molnar 		 */
5080888f06aSIngo Molnar 		smp_mb();
5092a298a35SNick Piggin 		if (need_resched()) {
510495ab9c0SAndi Kleen 			current_thread_info()->status |= TS_POLLING;
511af2eb17bSLinus Torvalds 			local_irq_enable();
5122a298a35SNick Piggin 			return;
5132a298a35SNick Piggin 		}
5142a298a35SNick Piggin 	}
5152a298a35SNick Piggin 
5161da177e4SLinus Torvalds 	switch (cx->type) {
5171da177e4SLinus Torvalds 
5181da177e4SLinus Torvalds 	case ACPI_STATE_C1:
5191da177e4SLinus Torvalds 		/*
5201da177e4SLinus Torvalds 		 * Invoke C1.
5211da177e4SLinus Torvalds 		 * Use the appropriate idle routine, the one that would
5221da177e4SLinus Torvalds 		 * be used without acpi C-states.
5231da177e4SLinus Torvalds 		 */
5247f424a8bSPeter Zijlstra 		if (pm_idle_save) {
5257f424a8bSPeter Zijlstra 			pm_idle_save(); /* enables IRQs */
5267f424a8bSPeter Zijlstra 		} else {
52764c7c8f8SNick Piggin 			acpi_safe_halt();
5287f424a8bSPeter Zijlstra 			local_irq_enable();
5297f424a8bSPeter Zijlstra 		}
53064c7c8f8SNick Piggin 
5311da177e4SLinus Torvalds 		/*
5321da177e4SLinus Torvalds 		 * TBD: Can't get time duration while in C1, as resumes
5331da177e4SLinus Torvalds 		 *      go to an ISR rather than here.  Need to instrument
5341da177e4SLinus Torvalds 		 *      base interrupt handler.
5352aa44d05SIngo Molnar 		 *
5362aa44d05SIngo Molnar 		 * Note: the TSC better not stop in C1, sched_clock() will
5372aa44d05SIngo Molnar 		 *       skew otherwise.
5381da177e4SLinus Torvalds 		 */
5391da177e4SLinus Torvalds 		sleep_ticks = 0xFFFFFFFF;
54071e93d15SVenki Pallipadi 
5411da177e4SLinus Torvalds 		break;
5421da177e4SLinus Torvalds 
5431da177e4SLinus Torvalds 	case ACPI_STATE_C2:
5441da177e4SLinus Torvalds 		/* Get start time (ticks) */
545cee324b1SAlexey Starikovskiy 		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
5462aa44d05SIngo Molnar 		/* Tell the scheduler that we are going deep-idle: */
5472aa44d05SIngo Molnar 		sched_clock_idle_sleep_event();
5481da177e4SLinus Torvalds 		/* Invoke C2 */
549e9e2cdb4SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 1);
550991528d7SVenkatesh Pallipadi 		acpi_cstate_enter(cx);
5511da177e4SLinus Torvalds 		/* Get end time (ticks) */
552cee324b1SAlexey Starikovskiy 		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
553539eb11eSjohn stultz 
55461331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
555539eb11eSjohn stultz 		/* TSC halts in C2, so notify users */
556ddb25f9aSAndi Kleen 		if (tsc_halts_in_c(ACPI_STATE_C2))
5575a90cf20Sjohn stultz 			mark_tsc_unstable("possible TSC halt in C2");
558539eb11eSjohn stultz #endif
5592aa44d05SIngo Molnar 		/* Compute time (ticks) that we were actually asleep */
5602aa44d05SIngo Molnar 		sleep_ticks = ticks_elapsed(t1, t2);
5612aa44d05SIngo Molnar 
5622aa44d05SIngo Molnar 		/* Tell the scheduler how much we idled: */
5632aa44d05SIngo Molnar 		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
5642aa44d05SIngo Molnar 
5651da177e4SLinus Torvalds 		/* Re-enable interrupts */
5661da177e4SLinus Torvalds 		local_irq_enable();
5672aa44d05SIngo Molnar 		/* Do not account our idle-switching overhead: */
5682aa44d05SIngo Molnar 		sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
5692aa44d05SIngo Molnar 
570495ab9c0SAndi Kleen 		current_thread_info()->status |= TS_POLLING;
571e9e2cdb4SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 0);
5721da177e4SLinus Torvalds 		break;
5731da177e4SLinus Torvalds 
5741da177e4SLinus Torvalds 	case ACPI_STATE_C3:
575bde6f5f5SVenki Pallipadi 		acpi_unlazy_tlb(smp_processor_id());
57618eab855SVenkatesh Pallipadi 		/*
577e17bcb43SThomas Gleixner 		 * Must be done before busmaster disable as we might
578e17bcb43SThomas Gleixner 		 * need to access HPET !
579e17bcb43SThomas Gleixner 		 */
580e17bcb43SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 1);
581e17bcb43SThomas Gleixner 		/*
58218eab855SVenkatesh Pallipadi 		 * disable bus master
58318eab855SVenkatesh Pallipadi 		 * bm_check implies we need ARB_DIS
58418eab855SVenkatesh Pallipadi 		 * !bm_check implies we need cache flush
58518eab855SVenkatesh Pallipadi 		 * bm_control implies whether we can do ARB_DIS
58618eab855SVenkatesh Pallipadi 		 *
58718eab855SVenkatesh Pallipadi 		 * That leaves a case where bm_check is set and bm_control is
58818eab855SVenkatesh Pallipadi 		 * not set. In that case we cannot do much, we enter C3
58918eab855SVenkatesh Pallipadi 		 * without doing anything.
59018eab855SVenkatesh Pallipadi 		 */
59118eab855SVenkatesh Pallipadi 		if (pr->flags.bm_check && pr->flags.bm_control) {
59202df8b93SVenkatesh Pallipadi 			if (atomic_inc_return(&c3_cpu_count) ==
59302df8b93SVenkatesh Pallipadi 			    num_online_cpus()) {
59402df8b93SVenkatesh Pallipadi 				/*
59502df8b93SVenkatesh Pallipadi 				 * All CPUs are trying to go to C3
59602df8b93SVenkatesh Pallipadi 				 * Disable bus master arbitration
59702df8b93SVenkatesh Pallipadi 				 */
598d8c71b6dSBob Moore 				acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
59902df8b93SVenkatesh Pallipadi 			}
60018eab855SVenkatesh Pallipadi 		} else if (!pr->flags.bm_check) {
60102df8b93SVenkatesh Pallipadi 			/* SMP with no shared cache... Invalidate cache  */
60202df8b93SVenkatesh Pallipadi 			ACPI_FLUSH_CPU_CACHE();
60302df8b93SVenkatesh Pallipadi 		}
60402df8b93SVenkatesh Pallipadi 
6051da177e4SLinus Torvalds 		/* Get start time (ticks) */
606cee324b1SAlexey Starikovskiy 		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
6071da177e4SLinus Torvalds 		/* Invoke C3 */
6082aa44d05SIngo Molnar 		/* Tell the scheduler that we are going deep-idle: */
6092aa44d05SIngo Molnar 		sched_clock_idle_sleep_event();
610991528d7SVenkatesh Pallipadi 		acpi_cstate_enter(cx);
6111da177e4SLinus Torvalds 		/* Get end time (ticks) */
612cee324b1SAlexey Starikovskiy 		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
61318eab855SVenkatesh Pallipadi 		if (pr->flags.bm_check && pr->flags.bm_control) {
6141da177e4SLinus Torvalds 			/* Enable bus master arbitration */
61502df8b93SVenkatesh Pallipadi 			atomic_dec(&c3_cpu_count);
616d8c71b6dSBob Moore 			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
61702df8b93SVenkatesh Pallipadi 		}
61802df8b93SVenkatesh Pallipadi 
61961331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
620539eb11eSjohn stultz 		/* TSC halts in C3, so notify users */
621ddb25f9aSAndi Kleen 		if (tsc_halts_in_c(ACPI_STATE_C3))
6225a90cf20Sjohn stultz 			mark_tsc_unstable("TSC halts in C3");
623539eb11eSjohn stultz #endif
6242aa44d05SIngo Molnar 		/* Compute time (ticks) that we were actually asleep */
6252aa44d05SIngo Molnar 		sleep_ticks = ticks_elapsed(t1, t2);
6262aa44d05SIngo Molnar 		/* Tell the scheduler how much we idled: */
6272aa44d05SIngo Molnar 		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
6282aa44d05SIngo Molnar 
6291da177e4SLinus Torvalds 		/* Re-enable interrupts */
6301da177e4SLinus Torvalds 		local_irq_enable();
6312aa44d05SIngo Molnar 		/* Do not account our idle-switching overhead: */
6322aa44d05SIngo Molnar 		sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
6332aa44d05SIngo Molnar 
634495ab9c0SAndi Kleen 		current_thread_info()->status |= TS_POLLING;
635e9e2cdb4SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 0);
6361da177e4SLinus Torvalds 		break;
6371da177e4SLinus Torvalds 
6381da177e4SLinus Torvalds 	default:
6391da177e4SLinus Torvalds 		local_irq_enable();
6401da177e4SLinus Torvalds 		return;
6411da177e4SLinus Torvalds 	}
642a3c6598fSDominik Brodowski 	cx->usage++;
643a3c6598fSDominik Brodowski 	if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
644a3c6598fSDominik Brodowski 		cx->time += sleep_ticks;
6451da177e4SLinus Torvalds 
6461da177e4SLinus Torvalds 	next_state = pr->power.state;
6471da177e4SLinus Torvalds 
6481e483969SDavid Shaohua Li #ifdef CONFIG_HOTPLUG_CPU
6491e483969SDavid Shaohua Li 	/* Don't do promotion/demotion */
6501e483969SDavid Shaohua Li 	if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
651cee324b1SAlexey Starikovskiy 	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
6521e483969SDavid Shaohua Li 		next_state = cx;
6531e483969SDavid Shaohua Li 		goto end;
6541e483969SDavid Shaohua Li 	}
6551e483969SDavid Shaohua Li #endif
6561e483969SDavid Shaohua Li 
6571da177e4SLinus Torvalds 	/*
6581da177e4SLinus Torvalds 	 * Promotion?
6591da177e4SLinus Torvalds 	 * ----------
6601da177e4SLinus Torvalds 	 * Track the number of longs (time asleep is greater than threshold)
6611da177e4SLinus Torvalds 	 * and promote when the count threshold is reached.  Note that bus
6621da177e4SLinus Torvalds 	 * mastering activity may prevent promotions.
6631da177e4SLinus Torvalds 	 * Do not promote above max_cstate.
6641da177e4SLinus Torvalds 	 */
6651da177e4SLinus Torvalds 	if (cx->promotion.state &&
6661da177e4SLinus Torvalds 	    ((cx->promotion.state - pr->power.states) <= max_cstate)) {
6675c87579eSArjan van de Ven 		if (sleep_ticks > cx->promotion.threshold.ticks &&
668f011e2e2SMark Gross 		  cx->promotion.state->latency <=
669f011e2e2SMark Gross 				pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
6701da177e4SLinus Torvalds 			cx->promotion.count++;
6711da177e4SLinus Torvalds 			cx->demotion.count = 0;
6724be44fcdSLen Brown 			if (cx->promotion.count >=
6734be44fcdSLen Brown 			    cx->promotion.threshold.count) {
6741da177e4SLinus Torvalds 				if (pr->flags.bm_check) {
6754be44fcdSLen Brown 					if (!
6764be44fcdSLen Brown 					    (pr->power.bm_activity & cx->
6774be44fcdSLen Brown 					     promotion.threshold.bm)) {
6784be44fcdSLen Brown 						next_state =
6794be44fcdSLen Brown 						    cx->promotion.state;
6801da177e4SLinus Torvalds 						goto end;
6811da177e4SLinus Torvalds 					}
6824be44fcdSLen Brown 				} else {
6831da177e4SLinus Torvalds 					next_state = cx->promotion.state;
6841da177e4SLinus Torvalds 					goto end;
6851da177e4SLinus Torvalds 				}
6861da177e4SLinus Torvalds 			}
6871da177e4SLinus Torvalds 		}
6881da177e4SLinus Torvalds 	}
6891da177e4SLinus Torvalds 
6901da177e4SLinus Torvalds 	/*
6911da177e4SLinus Torvalds 	 * Demotion?
6921da177e4SLinus Torvalds 	 * ---------
6931da177e4SLinus Torvalds 	 * Track the number of shorts (time asleep is less than time threshold)
6941da177e4SLinus Torvalds 	 * and demote when the usage threshold is reached.
6951da177e4SLinus Torvalds 	 */
6961da177e4SLinus Torvalds 	if (cx->demotion.state) {
6971da177e4SLinus Torvalds 		if (sleep_ticks < cx->demotion.threshold.ticks) {
6981da177e4SLinus Torvalds 			cx->demotion.count++;
6991da177e4SLinus Torvalds 			cx->promotion.count = 0;
7001da177e4SLinus Torvalds 			if (cx->demotion.count >= cx->demotion.threshold.count) {
7011da177e4SLinus Torvalds 				next_state = cx->demotion.state;
7021da177e4SLinus Torvalds 				goto end;
7031da177e4SLinus Torvalds 			}
7041da177e4SLinus Torvalds 		}
7051da177e4SLinus Torvalds 	}
7061da177e4SLinus Torvalds 
7071da177e4SLinus Torvalds       end:
7081da177e4SLinus Torvalds 	/*
7091da177e4SLinus Torvalds 	 * Demote if current state exceeds max_cstate
7105c87579eSArjan van de Ven 	 * or if the latency of the current state is unacceptable
7111da177e4SLinus Torvalds 	 */
7125c87579eSArjan van de Ven 	if ((pr->power.state - pr->power.states) > max_cstate ||
713f011e2e2SMark Gross 		pr->power.state->latency >
714f011e2e2SMark Gross 				pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
7151da177e4SLinus Torvalds 		if (cx->demotion.state)
7161da177e4SLinus Torvalds 			next_state = cx->demotion.state;
7171da177e4SLinus Torvalds 	}
7181da177e4SLinus Torvalds 
7191da177e4SLinus Torvalds 	/*
7201da177e4SLinus Torvalds 	 * New Cx State?
7211da177e4SLinus Torvalds 	 * -------------
7221da177e4SLinus Torvalds 	 * If we're going to start using a new Cx state we must clean up
7231da177e4SLinus Torvalds 	 * from the previous and prepare to use the new.
7241da177e4SLinus Torvalds 	 */
7251da177e4SLinus Torvalds 	if (next_state != pr->power.state)
7261da177e4SLinus Torvalds 		acpi_processor_power_activate(pr, next_state);
7271da177e4SLinus Torvalds }
7281da177e4SLinus Torvalds 
7294be44fcdSLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr)
7301da177e4SLinus Torvalds {
7311da177e4SLinus Torvalds 	unsigned int i;
7321da177e4SLinus Torvalds 	unsigned int state_is_set = 0;
7331da177e4SLinus Torvalds 	struct acpi_processor_cx *lower = NULL;
7341da177e4SLinus Torvalds 	struct acpi_processor_cx *higher = NULL;
7351da177e4SLinus Torvalds 	struct acpi_processor_cx *cx;
7361da177e4SLinus Torvalds 
7371da177e4SLinus Torvalds 
7381da177e4SLinus Torvalds 	if (!pr)
739d550d98dSPatrick Mochel 		return -EINVAL;
7401da177e4SLinus Torvalds 
7411da177e4SLinus Torvalds 	/*
7421da177e4SLinus Torvalds 	 * This function sets the default Cx state policy (OS idle handler).
7431da177e4SLinus Torvalds 	 * Our scheme is to promote quickly to C2 but more conservatively
7441da177e4SLinus Torvalds 	 * to C3.  We're favoring C2  for its characteristics of low latency
7451da177e4SLinus Torvalds 	 * (quick response), good power savings, and ability to allow bus
7461da177e4SLinus Torvalds 	 * mastering activity.  Note that the Cx state policy is completely
7471da177e4SLinus Torvalds 	 * customizable and can be altered dynamically.
7481da177e4SLinus Torvalds 	 */
7491da177e4SLinus Torvalds 
7501da177e4SLinus Torvalds 	/* startup state */
7511da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
7521da177e4SLinus Torvalds 		cx = &pr->power.states[i];
7531da177e4SLinus Torvalds 		if (!cx->valid)
7541da177e4SLinus Torvalds 			continue;
7551da177e4SLinus Torvalds 
7561da177e4SLinus Torvalds 		if (!state_is_set)
7571da177e4SLinus Torvalds 			pr->power.state = cx;
7581da177e4SLinus Torvalds 		state_is_set++;
7591da177e4SLinus Torvalds 		break;
7601da177e4SLinus Torvalds 	}
7611da177e4SLinus Torvalds 
7621da177e4SLinus Torvalds 	if (!state_is_set)
763d550d98dSPatrick Mochel 		return -ENODEV;
7641da177e4SLinus Torvalds 
7651da177e4SLinus Torvalds 	/* demotion */
7661da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
7671da177e4SLinus Torvalds 		cx = &pr->power.states[i];
7681da177e4SLinus Torvalds 		if (!cx->valid)
7691da177e4SLinus Torvalds 			continue;
7701da177e4SLinus Torvalds 
7711da177e4SLinus Torvalds 		if (lower) {
7721da177e4SLinus Torvalds 			cx->demotion.state = lower;
7731da177e4SLinus Torvalds 			cx->demotion.threshold.ticks = cx->latency_ticks;
7741da177e4SLinus Torvalds 			cx->demotion.threshold.count = 1;
7751da177e4SLinus Torvalds 			if (cx->type == ACPI_STATE_C3)
7761da177e4SLinus Torvalds 				cx->demotion.threshold.bm = bm_history;
7771da177e4SLinus Torvalds 		}
7781da177e4SLinus Torvalds 
7791da177e4SLinus Torvalds 		lower = cx;
7801da177e4SLinus Torvalds 	}
7811da177e4SLinus Torvalds 
7821da177e4SLinus Torvalds 	/* promotion */
7831da177e4SLinus Torvalds 	for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
7841da177e4SLinus Torvalds 		cx = &pr->power.states[i];
7851da177e4SLinus Torvalds 		if (!cx->valid)
7861da177e4SLinus Torvalds 			continue;
7871da177e4SLinus Torvalds 
7881da177e4SLinus Torvalds 		if (higher) {
7891da177e4SLinus Torvalds 			cx->promotion.state = higher;
7901da177e4SLinus Torvalds 			cx->promotion.threshold.ticks = cx->latency_ticks;
7911da177e4SLinus Torvalds 			if (cx->type >= ACPI_STATE_C2)
7921da177e4SLinus Torvalds 				cx->promotion.threshold.count = 4;
7931da177e4SLinus Torvalds 			else
7941da177e4SLinus Torvalds 				cx->promotion.threshold.count = 10;
7951da177e4SLinus Torvalds 			if (higher->type == ACPI_STATE_C3)
7961da177e4SLinus Torvalds 				cx->promotion.threshold.bm = bm_history;
7971da177e4SLinus Torvalds 		}
7981da177e4SLinus Torvalds 
7991da177e4SLinus Torvalds 		higher = cx;
8001da177e4SLinus Torvalds 	}
8011da177e4SLinus Torvalds 
802d550d98dSPatrick Mochel 	return 0;
8031da177e4SLinus Torvalds }
8044f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */
8051da177e4SLinus Torvalds 
8061da177e4SLinus Torvalds static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
8071da177e4SLinus Torvalds {
8081da177e4SLinus Torvalds 
8091da177e4SLinus Torvalds 	if (!pr)
810d550d98dSPatrick Mochel 		return -EINVAL;
8111da177e4SLinus Torvalds 
8121da177e4SLinus Torvalds 	if (!pr->pblk)
813d550d98dSPatrick Mochel 		return -ENODEV;
8141da177e4SLinus Torvalds 
8151da177e4SLinus Torvalds 	/* if info is obtained from pblk/fadt, type equals state */
8161da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
8171da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
8181da177e4SLinus Torvalds 
8194c033552SVenkatesh Pallipadi #ifndef CONFIG_HOTPLUG_CPU
8204c033552SVenkatesh Pallipadi 	/*
8214c033552SVenkatesh Pallipadi 	 * Check for P_LVL2_UP flag before entering C2 and above on
8224c033552SVenkatesh Pallipadi 	 * an SMP system.
8234c033552SVenkatesh Pallipadi 	 */
824ad71860aSAlexey Starikovskiy 	if ((num_online_cpus() > 1) &&
825cee324b1SAlexey Starikovskiy 	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
826d550d98dSPatrick Mochel 		return -ENODEV;
8274c033552SVenkatesh Pallipadi #endif
8284c033552SVenkatesh Pallipadi 
8291da177e4SLinus Torvalds 	/* determine C2 and C3 address from pblk */
8301da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
8311da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
8321da177e4SLinus Torvalds 
8331da177e4SLinus Torvalds 	/* determine latencies from FADT */
834cee324b1SAlexey Starikovskiy 	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
835cee324b1SAlexey Starikovskiy 	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
8361da177e4SLinus Torvalds 
8371da177e4SLinus Torvalds 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
8381da177e4SLinus Torvalds 			  "lvl2[0x%08x] lvl3[0x%08x]\n",
8391da177e4SLinus Torvalds 			  pr->power.states[ACPI_STATE_C2].address,
8401da177e4SLinus Torvalds 			  pr->power.states[ACPI_STATE_C3].address));
8411da177e4SLinus Torvalds 
842d550d98dSPatrick Mochel 	return 0;
8431da177e4SLinus Torvalds }
8441da177e4SLinus Torvalds 
845991528d7SVenkatesh Pallipadi static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
846acf05f4bSVenkatesh Pallipadi {
847991528d7SVenkatesh Pallipadi 	if (!pr->power.states[ACPI_STATE_C1].valid) {
848cf824788SJanosch Machowinski 		/* set the first C-State to C1 */
849991528d7SVenkatesh Pallipadi 		/* all processors need to support C1 */
850acf05f4bSVenkatesh Pallipadi 		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
851acf05f4bSVenkatesh Pallipadi 		pr->power.states[ACPI_STATE_C1].valid = 1;
8520fda6b40SVenkatesh Pallipadi 		pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
853991528d7SVenkatesh Pallipadi 	}
854991528d7SVenkatesh Pallipadi 	/* the C0 state only exists as a filler in our array */
855991528d7SVenkatesh Pallipadi 	pr->power.states[ACPI_STATE_C0].valid = 1;
856d550d98dSPatrick Mochel 	return 0;
857acf05f4bSVenkatesh Pallipadi }
858acf05f4bSVenkatesh Pallipadi 
8591da177e4SLinus Torvalds static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
8601da177e4SLinus Torvalds {
8611da177e4SLinus Torvalds 	acpi_status status = 0;
8621da177e4SLinus Torvalds 	acpi_integer count;
863cf824788SJanosch Machowinski 	int current_count;
8641da177e4SLinus Torvalds 	int i;
8651da177e4SLinus Torvalds 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
8661da177e4SLinus Torvalds 	union acpi_object *cst;
8671da177e4SLinus Torvalds 
8681da177e4SLinus Torvalds 
8691da177e4SLinus Torvalds 	if (nocst)
870d550d98dSPatrick Mochel 		return -ENODEV;
8711da177e4SLinus Torvalds 
872991528d7SVenkatesh Pallipadi 	current_count = 0;
8731da177e4SLinus Torvalds 
8741da177e4SLinus Torvalds 	status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
8751da177e4SLinus Torvalds 	if (ACPI_FAILURE(status)) {
8761da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
877d550d98dSPatrick Mochel 		return -ENODEV;
8781da177e4SLinus Torvalds 	}
8791da177e4SLinus Torvalds 
88050dd0969SJan Engelhardt 	cst = buffer.pointer;
8811da177e4SLinus Torvalds 
8821da177e4SLinus Torvalds 	/* There must be at least 2 elements */
8831da177e4SLinus Torvalds 	if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
8846468463aSLen Brown 		printk(KERN_ERR PREFIX "not enough elements in _CST\n");
8851da177e4SLinus Torvalds 		status = -EFAULT;
8861da177e4SLinus Torvalds 		goto end;
8871da177e4SLinus Torvalds 	}
8881da177e4SLinus Torvalds 
8891da177e4SLinus Torvalds 	count = cst->package.elements[0].integer.value;
8901da177e4SLinus Torvalds 
8911da177e4SLinus Torvalds 	/* Validate number of power states. */
8921da177e4SLinus Torvalds 	if (count < 1 || count != cst->package.count - 1) {
8936468463aSLen Brown 		printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
8941da177e4SLinus Torvalds 		status = -EFAULT;
8951da177e4SLinus Torvalds 		goto end;
8961da177e4SLinus Torvalds 	}
8971da177e4SLinus Torvalds 
8981da177e4SLinus Torvalds 	/* Tell driver that at least _CST is supported. */
8991da177e4SLinus Torvalds 	pr->flags.has_cst = 1;
9001da177e4SLinus Torvalds 
9011da177e4SLinus Torvalds 	for (i = 1; i <= count; i++) {
9021da177e4SLinus Torvalds 		union acpi_object *element;
9031da177e4SLinus Torvalds 		union acpi_object *obj;
9041da177e4SLinus Torvalds 		struct acpi_power_register *reg;
9051da177e4SLinus Torvalds 		struct acpi_processor_cx cx;
9061da177e4SLinus Torvalds 
9071da177e4SLinus Torvalds 		memset(&cx, 0, sizeof(cx));
9081da177e4SLinus Torvalds 
90950dd0969SJan Engelhardt 		element = &(cst->package.elements[i]);
9101da177e4SLinus Torvalds 		if (element->type != ACPI_TYPE_PACKAGE)
9111da177e4SLinus Torvalds 			continue;
9121da177e4SLinus Torvalds 
9131da177e4SLinus Torvalds 		if (element->package.count != 4)
9141da177e4SLinus Torvalds 			continue;
9151da177e4SLinus Torvalds 
91650dd0969SJan Engelhardt 		obj = &(element->package.elements[0]);
9171da177e4SLinus Torvalds 
9181da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_BUFFER)
9191da177e4SLinus Torvalds 			continue;
9201da177e4SLinus Torvalds 
9211da177e4SLinus Torvalds 		reg = (struct acpi_power_register *)obj->buffer.pointer;
9221da177e4SLinus Torvalds 
9231da177e4SLinus Torvalds 		if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
9241da177e4SLinus Torvalds 		    (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
9251da177e4SLinus Torvalds 			continue;
9261da177e4SLinus Torvalds 
9271da177e4SLinus Torvalds 		/* There should be an easy way to extract an integer... */
92850dd0969SJan Engelhardt 		obj = &(element->package.elements[1]);
9291da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_INTEGER)
9301da177e4SLinus Torvalds 			continue;
9311da177e4SLinus Torvalds 
9321da177e4SLinus Torvalds 		cx.type = obj->integer.value;
933991528d7SVenkatesh Pallipadi 		/*
934991528d7SVenkatesh Pallipadi 		 * Some buggy BIOSes won't list C1 in _CST -
935991528d7SVenkatesh Pallipadi 		 * Let acpi_processor_get_power_info_default() handle them later
936991528d7SVenkatesh Pallipadi 		 */
937991528d7SVenkatesh Pallipadi 		if (i == 1 && cx.type != ACPI_STATE_C1)
938991528d7SVenkatesh Pallipadi 			current_count++;
9391da177e4SLinus Torvalds 
940991528d7SVenkatesh Pallipadi 		cx.address = reg->address;
941991528d7SVenkatesh Pallipadi 		cx.index = current_count + 1;
9421da177e4SLinus Torvalds 
943bc71bec9Svenkatesh.pallipadi@intel.com 		cx.entry_method = ACPI_CSTATE_SYSTEMIO;
944991528d7SVenkatesh Pallipadi 		if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
945991528d7SVenkatesh Pallipadi 			if (acpi_processor_ffh_cstate_probe
946991528d7SVenkatesh Pallipadi 					(pr->id, &cx, reg) == 0) {
947bc71bec9Svenkatesh.pallipadi@intel.com 				cx.entry_method = ACPI_CSTATE_FFH;
948bc71bec9Svenkatesh.pallipadi@intel.com 			} else if (cx.type == ACPI_STATE_C1) {
949991528d7SVenkatesh Pallipadi 				/*
950991528d7SVenkatesh Pallipadi 				 * C1 is a special case where FIXED_HARDWARE
951991528d7SVenkatesh Pallipadi 				 * can be handled in non-MWAIT way as well.
952991528d7SVenkatesh Pallipadi 				 * In that case, save this _CST entry info.
953991528d7SVenkatesh Pallipadi 				 * Otherwise, ignore this info and continue.
954991528d7SVenkatesh Pallipadi 				 */
955bc71bec9Svenkatesh.pallipadi@intel.com 				cx.entry_method = ACPI_CSTATE_HALT;
9564fcb2fcdSVenkatesh Pallipadi 				snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
957bc71bec9Svenkatesh.pallipadi@intel.com 			} else {
9581da177e4SLinus Torvalds 				continue;
959991528d7SVenkatesh Pallipadi 			}
960da5e09a1SZhao Yakui 			if (cx.type == ACPI_STATE_C1 &&
961da5e09a1SZhao Yakui 					(idle_halt || idle_nomwait)) {
962c1e3b377SZhao Yakui 				/*
963c1e3b377SZhao Yakui 				 * In most cases the C1 space_id obtained from
964c1e3b377SZhao Yakui 				 * _CST object is FIXED_HARDWARE access mode.
965c1e3b377SZhao Yakui 				 * But when the option of idle=halt is added,
966c1e3b377SZhao Yakui 				 * the entry_method type should be changed from
967c1e3b377SZhao Yakui 				 * CSTATE_FFH to CSTATE_HALT.
968da5e09a1SZhao Yakui 				 * When the option of idle=nomwait is added,
969da5e09a1SZhao Yakui 				 * the C1 entry_method type should be
970da5e09a1SZhao Yakui 				 * CSTATE_HALT.
971c1e3b377SZhao Yakui 				 */
972c1e3b377SZhao Yakui 				cx.entry_method = ACPI_CSTATE_HALT;
973c1e3b377SZhao Yakui 				snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
974c1e3b377SZhao Yakui 			}
9754fcb2fcdSVenkatesh Pallipadi 		} else {
9764fcb2fcdSVenkatesh Pallipadi 			snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
9774fcb2fcdSVenkatesh Pallipadi 				 cx.address);
978991528d7SVenkatesh Pallipadi 		}
9791da177e4SLinus Torvalds 
9800fda6b40SVenkatesh Pallipadi 		if (cx.type == ACPI_STATE_C1) {
9810fda6b40SVenkatesh Pallipadi 			cx.valid = 1;
9820fda6b40SVenkatesh Pallipadi 		}
9834fcb2fcdSVenkatesh Pallipadi 
98450dd0969SJan Engelhardt 		obj = &(element->package.elements[2]);
9851da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_INTEGER)
9861da177e4SLinus Torvalds 			continue;
9871da177e4SLinus Torvalds 
9881da177e4SLinus Torvalds 		cx.latency = obj->integer.value;
9891da177e4SLinus Torvalds 
99050dd0969SJan Engelhardt 		obj = &(element->package.elements[3]);
9911da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_INTEGER)
9921da177e4SLinus Torvalds 			continue;
9931da177e4SLinus Torvalds 
9941da177e4SLinus Torvalds 		cx.power = obj->integer.value;
9951da177e4SLinus Torvalds 
996cf824788SJanosch Machowinski 		current_count++;
997cf824788SJanosch Machowinski 		memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
998cf824788SJanosch Machowinski 
999cf824788SJanosch Machowinski 		/*
1000cf824788SJanosch Machowinski 		 * We support total ACPI_PROCESSOR_MAX_POWER - 1
1001cf824788SJanosch Machowinski 		 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
1002cf824788SJanosch Machowinski 		 */
1003cf824788SJanosch Machowinski 		if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
1004cf824788SJanosch Machowinski 			printk(KERN_WARNING
1005cf824788SJanosch Machowinski 			       "Limiting number of power states to max (%d)\n",
1006cf824788SJanosch Machowinski 			       ACPI_PROCESSOR_MAX_POWER);
1007cf824788SJanosch Machowinski 			printk(KERN_WARNING
1008cf824788SJanosch Machowinski 			       "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1009cf824788SJanosch Machowinski 			break;
1010cf824788SJanosch Machowinski 		}
10111da177e4SLinus Torvalds 	}
10121da177e4SLinus Torvalds 
10134be44fcdSLen Brown 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
1014cf824788SJanosch Machowinski 			  current_count));
10151da177e4SLinus Torvalds 
10161da177e4SLinus Torvalds 	/* Validate number of power states discovered */
1017cf824788SJanosch Machowinski 	if (current_count < 2)
10186d93c648SVenkatesh Pallipadi 		status = -EFAULT;
10191da177e4SLinus Torvalds 
10201da177e4SLinus Torvalds       end:
102102438d87SLen Brown 	kfree(buffer.pointer);
10221da177e4SLinus Torvalds 
1023d550d98dSPatrick Mochel 	return status;
10241da177e4SLinus Torvalds }
10251da177e4SLinus Torvalds 
10261da177e4SLinus Torvalds static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
10271da177e4SLinus Torvalds {
10281da177e4SLinus Torvalds 
10291da177e4SLinus Torvalds 	if (!cx->address)
1030d550d98dSPatrick Mochel 		return;
10311da177e4SLinus Torvalds 
10321da177e4SLinus Torvalds 	/*
10331da177e4SLinus Torvalds 	 * C2 latency must be less than or equal to 100
10341da177e4SLinus Torvalds 	 * microseconds.
10351da177e4SLinus Torvalds 	 */
10361da177e4SLinus Torvalds 	else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
10371da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
10384be44fcdSLen Brown 				  "latency too large [%d]\n", cx->latency));
1039d550d98dSPatrick Mochel 		return;
10401da177e4SLinus Torvalds 	}
10411da177e4SLinus Torvalds 
10421da177e4SLinus Torvalds 	/*
10431da177e4SLinus Torvalds 	 * Otherwise we've met all of our C2 requirements.
10441da177e4SLinus Torvalds 	 * Normalize the C2 latency to expidite policy
10451da177e4SLinus Torvalds 	 */
10461da177e4SLinus Torvalds 	cx->valid = 1;
10474f86d3a8SLen Brown 
10484f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
10491da177e4SLinus Torvalds 	cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
10504f86d3a8SLen Brown #else
10514f86d3a8SLen Brown 	cx->latency_ticks = cx->latency;
10524f86d3a8SLen Brown #endif
10531da177e4SLinus Torvalds 
1054d550d98dSPatrick Mochel 	return;
10551da177e4SLinus Torvalds }
10561da177e4SLinus Torvalds 
10574be44fcdSLen Brown static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
10581da177e4SLinus Torvalds 					   struct acpi_processor_cx *cx)
10591da177e4SLinus Torvalds {
106002df8b93SVenkatesh Pallipadi 	static int bm_check_flag;
106102df8b93SVenkatesh Pallipadi 
10621da177e4SLinus Torvalds 
10631da177e4SLinus Torvalds 	if (!cx->address)
1064d550d98dSPatrick Mochel 		return;
10651da177e4SLinus Torvalds 
10661da177e4SLinus Torvalds 	/*
10671da177e4SLinus Torvalds 	 * C3 latency must be less than or equal to 1000
10681da177e4SLinus Torvalds 	 * microseconds.
10691da177e4SLinus Torvalds 	 */
10701da177e4SLinus Torvalds 	else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
10711da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
10724be44fcdSLen Brown 				  "latency too large [%d]\n", cx->latency));
1073d550d98dSPatrick Mochel 		return;
10741da177e4SLinus Torvalds 	}
10751da177e4SLinus Torvalds 
10761da177e4SLinus Torvalds 	/*
10771da177e4SLinus Torvalds 	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
10781da177e4SLinus Torvalds 	 * DMA transfers are used by any ISA device to avoid livelock.
10791da177e4SLinus Torvalds 	 * Note that we could disable Type-F DMA (as recommended by
10801da177e4SLinus Torvalds 	 * the erratum), but this is known to disrupt certain ISA
10811da177e4SLinus Torvalds 	 * devices thus we take the conservative approach.
10821da177e4SLinus Torvalds 	 */
10831da177e4SLinus Torvalds 	else if (errata.piix4.fdma) {
10841da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
10851da177e4SLinus Torvalds 				  "C3 not supported on PIIX4 with Type-F DMA\n"));
1086d550d98dSPatrick Mochel 		return;
10871da177e4SLinus Torvalds 	}
10881da177e4SLinus Torvalds 
108902df8b93SVenkatesh Pallipadi 	/* All the logic here assumes flags.bm_check is same across all CPUs */
109002df8b93SVenkatesh Pallipadi 	if (!bm_check_flag) {
109102df8b93SVenkatesh Pallipadi 		/* Determine whether bm_check is needed based on CPU  */
109202df8b93SVenkatesh Pallipadi 		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
109302df8b93SVenkatesh Pallipadi 		bm_check_flag = pr->flags.bm_check;
109402df8b93SVenkatesh Pallipadi 	} else {
109502df8b93SVenkatesh Pallipadi 		pr->flags.bm_check = bm_check_flag;
109602df8b93SVenkatesh Pallipadi 	}
109702df8b93SVenkatesh Pallipadi 
109802df8b93SVenkatesh Pallipadi 	if (pr->flags.bm_check) {
109902df8b93SVenkatesh Pallipadi 		if (!pr->flags.bm_control) {
1100ed3110efSVenki Pallipadi 			if (pr->flags.has_cst != 1) {
1101ed3110efSVenki Pallipadi 				/* bus mastering control is necessary */
110202df8b93SVenkatesh Pallipadi 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1103ed3110efSVenki Pallipadi 					"C3 support requires BM control\n"));
1104ed3110efSVenki Pallipadi 				return;
1105ed3110efSVenki Pallipadi 			} else {
1106ed3110efSVenki Pallipadi 				/* Here we enter C3 without bus mastering */
1107ed3110efSVenki Pallipadi 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1108ed3110efSVenki Pallipadi 					"C3 support without BM control\n"));
1109ed3110efSVenki Pallipadi 			}
111002df8b93SVenkatesh Pallipadi 		}
111102df8b93SVenkatesh Pallipadi 	} else {
111202df8b93SVenkatesh Pallipadi 		/*
111302df8b93SVenkatesh Pallipadi 		 * WBINVD should be set in fadt, for C3 state to be
111402df8b93SVenkatesh Pallipadi 		 * supported on when bm_check is not required.
111502df8b93SVenkatesh Pallipadi 		 */
1116cee324b1SAlexey Starikovskiy 		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
111702df8b93SVenkatesh Pallipadi 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
111802df8b93SVenkatesh Pallipadi 					  "Cache invalidation should work properly"
111902df8b93SVenkatesh Pallipadi 					  " for C3 to be enabled on SMP systems\n"));
1120d550d98dSPatrick Mochel 			return;
112102df8b93SVenkatesh Pallipadi 		}
1122d8c71b6dSBob Moore 		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
112302df8b93SVenkatesh Pallipadi 	}
112402df8b93SVenkatesh Pallipadi 
11251da177e4SLinus Torvalds 	/*
11261da177e4SLinus Torvalds 	 * Otherwise we've met all of our C3 requirements.
11271da177e4SLinus Torvalds 	 * Normalize the C3 latency to expidite policy.  Enable
11281da177e4SLinus Torvalds 	 * checking of bus mastering status (bm_check) so we can
11291da177e4SLinus Torvalds 	 * use this in our C3 policy
11301da177e4SLinus Torvalds 	 */
11311da177e4SLinus Torvalds 	cx->valid = 1;
11324f86d3a8SLen Brown 
11334f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
11341da177e4SLinus Torvalds 	cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
11354f86d3a8SLen Brown #else
11364f86d3a8SLen Brown 	cx->latency_ticks = cx->latency;
11374f86d3a8SLen Brown #endif
11381da177e4SLinus Torvalds 
1139d550d98dSPatrick Mochel 	return;
11401da177e4SLinus Torvalds }
11411da177e4SLinus Torvalds 
11421da177e4SLinus Torvalds static int acpi_processor_power_verify(struct acpi_processor *pr)
11431da177e4SLinus Torvalds {
11441da177e4SLinus Torvalds 	unsigned int i;
11451da177e4SLinus Torvalds 	unsigned int working = 0;
11466eb0a0fdSVenkatesh Pallipadi 
1147169a0abbSThomas Gleixner 	pr->power.timer_broadcast_on_state = INT_MAX;
11486eb0a0fdSVenkatesh Pallipadi 
11491da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
11501da177e4SLinus Torvalds 		struct acpi_processor_cx *cx = &pr->power.states[i];
11511da177e4SLinus Torvalds 
11521da177e4SLinus Torvalds 		switch (cx->type) {
11531da177e4SLinus Torvalds 		case ACPI_STATE_C1:
11541da177e4SLinus Torvalds 			cx->valid = 1;
11551da177e4SLinus Torvalds 			break;
11561da177e4SLinus Torvalds 
11571da177e4SLinus Torvalds 		case ACPI_STATE_C2:
11581da177e4SLinus Torvalds 			acpi_processor_power_verify_c2(cx);
1159296d93cdSLinus Torvalds 			if (cx->valid)
1160169a0abbSThomas Gleixner 				acpi_timer_check_state(i, pr, cx);
11611da177e4SLinus Torvalds 			break;
11621da177e4SLinus Torvalds 
11631da177e4SLinus Torvalds 		case ACPI_STATE_C3:
11641da177e4SLinus Torvalds 			acpi_processor_power_verify_c3(pr, cx);
1165296d93cdSLinus Torvalds 			if (cx->valid)
1166169a0abbSThomas Gleixner 				acpi_timer_check_state(i, pr, cx);
11671da177e4SLinus Torvalds 			break;
11681da177e4SLinus Torvalds 		}
11691da177e4SLinus Torvalds 
11701da177e4SLinus Torvalds 		if (cx->valid)
11711da177e4SLinus Torvalds 			working++;
11721da177e4SLinus Torvalds 	}
11731da177e4SLinus Torvalds 
1174169a0abbSThomas Gleixner 	acpi_propagate_timer_broadcast(pr);
1175bd663347SAndi Kleen 
11761da177e4SLinus Torvalds 	return (working);
11771da177e4SLinus Torvalds }
11781da177e4SLinus Torvalds 
11794be44fcdSLen Brown static int acpi_processor_get_power_info(struct acpi_processor *pr)
11801da177e4SLinus Torvalds {
11811da177e4SLinus Torvalds 	unsigned int i;
11821da177e4SLinus Torvalds 	int result;
11831da177e4SLinus Torvalds 
11841da177e4SLinus Torvalds 
11851da177e4SLinus Torvalds 	/* NOTE: the idle thread may not be running while calling
11861da177e4SLinus Torvalds 	 * this function */
11871da177e4SLinus Torvalds 
1188991528d7SVenkatesh Pallipadi 	/* Zero initialize all the C-states info. */
1189991528d7SVenkatesh Pallipadi 	memset(pr->power.states, 0, sizeof(pr->power.states));
1190991528d7SVenkatesh Pallipadi 
11911da177e4SLinus Torvalds 	result = acpi_processor_get_power_info_cst(pr);
11926d93c648SVenkatesh Pallipadi 	if (result == -ENODEV)
1193c5a114f1SDarrick J. Wong 		result = acpi_processor_get_power_info_fadt(pr);
11946d93c648SVenkatesh Pallipadi 
1195991528d7SVenkatesh Pallipadi 	if (result)
1196991528d7SVenkatesh Pallipadi 		return result;
1197991528d7SVenkatesh Pallipadi 
1198991528d7SVenkatesh Pallipadi 	acpi_processor_get_power_info_default(pr);
1199991528d7SVenkatesh Pallipadi 
1200cf824788SJanosch Machowinski 	pr->power.count = acpi_processor_power_verify(pr);
12011da177e4SLinus Torvalds 
12024f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
12031da177e4SLinus Torvalds 	/*
12041da177e4SLinus Torvalds 	 * Set Default Policy
12051da177e4SLinus Torvalds 	 * ------------------
12061da177e4SLinus Torvalds 	 * Now that we know which states are supported, set the default
12071da177e4SLinus Torvalds 	 * policy.  Note that this policy can be changed dynamically
12081da177e4SLinus Torvalds 	 * (e.g. encourage deeper sleeps to conserve battery life when
12091da177e4SLinus Torvalds 	 * not on AC).
12101da177e4SLinus Torvalds 	 */
12111da177e4SLinus Torvalds 	result = acpi_processor_set_power_policy(pr);
12121da177e4SLinus Torvalds 	if (result)
1213d550d98dSPatrick Mochel 		return result;
12144f86d3a8SLen Brown #endif
12151da177e4SLinus Torvalds 
12161da177e4SLinus Torvalds 	/*
12171da177e4SLinus Torvalds 	 * if one state of type C2 or C3 is available, mark this
12181da177e4SLinus Torvalds 	 * CPU as being "idle manageable"
12191da177e4SLinus Torvalds 	 */
12201da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1221acf05f4bSVenkatesh Pallipadi 		if (pr->power.states[i].valid) {
12221da177e4SLinus Torvalds 			pr->power.count = i;
12232203d6edSLinus Torvalds 			if (pr->power.states[i].type >= ACPI_STATE_C2)
12241da177e4SLinus Torvalds 				pr->flags.power = 1;
12251da177e4SLinus Torvalds 		}
1226acf05f4bSVenkatesh Pallipadi 	}
12271da177e4SLinus Torvalds 
1228d550d98dSPatrick Mochel 	return 0;
12291da177e4SLinus Torvalds }
12301da177e4SLinus Torvalds 
12311da177e4SLinus Torvalds static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
12321da177e4SLinus Torvalds {
123350dd0969SJan Engelhardt 	struct acpi_processor *pr = seq->private;
12341da177e4SLinus Torvalds 	unsigned int i;
12351da177e4SLinus Torvalds 
12361da177e4SLinus Torvalds 
12371da177e4SLinus Torvalds 	if (!pr)
12381da177e4SLinus Torvalds 		goto end;
12391da177e4SLinus Torvalds 
12401da177e4SLinus Torvalds 	seq_printf(seq, "active state:            C%zd\n"
12411da177e4SLinus Torvalds 		   "max_cstate:              C%d\n"
12425c87579eSArjan van de Ven 		   "bus master activity:     %08x\n"
12435c87579eSArjan van de Ven 		   "maximum allowed latency: %d usec\n",
12441da177e4SLinus Torvalds 		   pr->power.state ? pr->power.state - pr->power.states : 0,
12455c87579eSArjan van de Ven 		   max_cstate, (unsigned)pr->power.bm_activity,
1246f011e2e2SMark Gross 		   pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
12471da177e4SLinus Torvalds 
12481da177e4SLinus Torvalds 	seq_puts(seq, "states:\n");
12491da177e4SLinus Torvalds 
12501da177e4SLinus Torvalds 	for (i = 1; i <= pr->power.count; i++) {
12511da177e4SLinus Torvalds 		seq_printf(seq, "   %cC%d:                  ",
12524be44fcdSLen Brown 			   (&pr->power.states[i] ==
12534be44fcdSLen Brown 			    pr->power.state ? '*' : ' '), i);
12541da177e4SLinus Torvalds 
12551da177e4SLinus Torvalds 		if (!pr->power.states[i].valid) {
12561da177e4SLinus Torvalds 			seq_puts(seq, "<not supported>\n");
12571da177e4SLinus Torvalds 			continue;
12581da177e4SLinus Torvalds 		}
12591da177e4SLinus Torvalds 
12601da177e4SLinus Torvalds 		switch (pr->power.states[i].type) {
12611da177e4SLinus Torvalds 		case ACPI_STATE_C1:
12621da177e4SLinus Torvalds 			seq_printf(seq, "type[C1] ");
12631da177e4SLinus Torvalds 			break;
12641da177e4SLinus Torvalds 		case ACPI_STATE_C2:
12651da177e4SLinus Torvalds 			seq_printf(seq, "type[C2] ");
12661da177e4SLinus Torvalds 			break;
12671da177e4SLinus Torvalds 		case ACPI_STATE_C3:
12681da177e4SLinus Torvalds 			seq_printf(seq, "type[C3] ");
12691da177e4SLinus Torvalds 			break;
12701da177e4SLinus Torvalds 		default:
12711da177e4SLinus Torvalds 			seq_printf(seq, "type[--] ");
12721da177e4SLinus Torvalds 			break;
12731da177e4SLinus Torvalds 		}
12741da177e4SLinus Torvalds 
12751da177e4SLinus Torvalds 		if (pr->power.states[i].promotion.state)
12761da177e4SLinus Torvalds 			seq_printf(seq, "promotion[C%zd] ",
12771da177e4SLinus Torvalds 				   (pr->power.states[i].promotion.state -
12781da177e4SLinus Torvalds 				    pr->power.states));
12791da177e4SLinus Torvalds 		else
12801da177e4SLinus Torvalds 			seq_puts(seq, "promotion[--] ");
12811da177e4SLinus Torvalds 
12821da177e4SLinus Torvalds 		if (pr->power.states[i].demotion.state)
12831da177e4SLinus Torvalds 			seq_printf(seq, "demotion[C%zd] ",
12841da177e4SLinus Torvalds 				   (pr->power.states[i].demotion.state -
12851da177e4SLinus Torvalds 				    pr->power.states));
12861da177e4SLinus Torvalds 		else
12871da177e4SLinus Torvalds 			seq_puts(seq, "demotion[--] ");
12881da177e4SLinus Torvalds 
1289a3c6598fSDominik Brodowski 		seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
12901da177e4SLinus Torvalds 			   pr->power.states[i].latency,
1291a3c6598fSDominik Brodowski 			   pr->power.states[i].usage,
1292b0b7eaafSAlexey Starikovskiy 			   (unsigned long long)pr->power.states[i].time);
12931da177e4SLinus Torvalds 	}
12941da177e4SLinus Torvalds 
12951da177e4SLinus Torvalds       end:
1296d550d98dSPatrick Mochel 	return 0;
12971da177e4SLinus Torvalds }
12981da177e4SLinus Torvalds 
12991da177e4SLinus Torvalds static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
13001da177e4SLinus Torvalds {
13011da177e4SLinus Torvalds 	return single_open(file, acpi_processor_power_seq_show,
13021da177e4SLinus Torvalds 			   PDE(inode)->data);
13031da177e4SLinus Torvalds }
13041da177e4SLinus Torvalds 
1305d7508032SArjan van de Ven static const struct file_operations acpi_processor_power_fops = {
1306cf7acfabSDenis V. Lunev 	.owner = THIS_MODULE,
13071da177e4SLinus Torvalds 	.open = acpi_processor_power_open_fs,
13081da177e4SLinus Torvalds 	.read = seq_read,
13091da177e4SLinus Torvalds 	.llseek = seq_lseek,
13101da177e4SLinus Torvalds 	.release = single_release,
13111da177e4SLinus Torvalds };
13121da177e4SLinus Torvalds 
13134f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
13144f86d3a8SLen Brown 
13154f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr)
13164f86d3a8SLen Brown {
13174f86d3a8SLen Brown 	int result = 0;
13184f86d3a8SLen Brown 
131936a91358SVenkatesh Pallipadi 	if (boot_option_idle_override)
132036a91358SVenkatesh Pallipadi 		return 0;
13214f86d3a8SLen Brown 
13224f86d3a8SLen Brown 	if (!pr)
13234f86d3a8SLen Brown 		return -EINVAL;
13244f86d3a8SLen Brown 
13254f86d3a8SLen Brown 	if (nocst) {
13264f86d3a8SLen Brown 		return -ENODEV;
13274f86d3a8SLen Brown 	}
13284f86d3a8SLen Brown 
13294f86d3a8SLen Brown 	if (!pr->flags.power_setup_done)
13304f86d3a8SLen Brown 		return -ENODEV;
13314f86d3a8SLen Brown 
13324f86d3a8SLen Brown 	/* Fall back to the default idle loop */
13334f86d3a8SLen Brown 	pm_idle = pm_idle_save;
13344f86d3a8SLen Brown 	synchronize_sched();	/* Relies on interrupts forcing exit from idle. */
13354f86d3a8SLen Brown 
13364f86d3a8SLen Brown 	pr->flags.power = 0;
13374f86d3a8SLen Brown 	result = acpi_processor_get_power_info(pr);
13384f86d3a8SLen Brown 	if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
13394f86d3a8SLen Brown 		pm_idle = acpi_processor_idle;
13404f86d3a8SLen Brown 
13414f86d3a8SLen Brown 	return result;
13424f86d3a8SLen Brown }
13434f86d3a8SLen Brown 
13441fec74a9SAndrew Morton #ifdef CONFIG_SMP
13455c87579eSArjan van de Ven static void smp_callback(void *v)
13465c87579eSArjan van de Ven {
13475c87579eSArjan van de Ven 	/* we already woke the CPU up, nothing more to do */
13485c87579eSArjan van de Ven }
13495c87579eSArjan van de Ven 
13505c87579eSArjan van de Ven /*
13515c87579eSArjan van de Ven  * This function gets called when a part of the kernel has a new latency
13525c87579eSArjan van de Ven  * requirement.  This means we need to get all processors out of their C-state,
13535c87579eSArjan van de Ven  * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
13545c87579eSArjan van de Ven  * wakes them all right up.
13555c87579eSArjan van de Ven  */
13565c87579eSArjan van de Ven static int acpi_processor_latency_notify(struct notifier_block *b,
13575c87579eSArjan van de Ven 		unsigned long l, void *v)
13585c87579eSArjan van de Ven {
13598691e5a8SJens Axboe 	smp_call_function(smp_callback, NULL, 1);
13605c87579eSArjan van de Ven 	return NOTIFY_OK;
13615c87579eSArjan van de Ven }
13625c87579eSArjan van de Ven 
13635c87579eSArjan van de Ven static struct notifier_block acpi_processor_latency_notifier = {
13645c87579eSArjan van de Ven 	.notifier_call = acpi_processor_latency_notify,
13655c87579eSArjan van de Ven };
13664f86d3a8SLen Brown 
13671fec74a9SAndrew Morton #endif
13685c87579eSArjan van de Ven 
13694f86d3a8SLen Brown #else /* CONFIG_CPU_IDLE */
13704f86d3a8SLen Brown 
13714f86d3a8SLen Brown /**
13724f86d3a8SLen Brown  * acpi_idle_bm_check - checks if bus master activity was detected
13734f86d3a8SLen Brown  */
13744f86d3a8SLen Brown static int acpi_idle_bm_check(void)
13754f86d3a8SLen Brown {
13764f86d3a8SLen Brown 	u32 bm_status = 0;
13774f86d3a8SLen Brown 
13784f86d3a8SLen Brown 	acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
13794f86d3a8SLen Brown 	if (bm_status)
13804f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
13814f86d3a8SLen Brown 	/*
13824f86d3a8SLen Brown 	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
13834f86d3a8SLen Brown 	 * the true state of bus mastering activity; forcing us to
13844f86d3a8SLen Brown 	 * manually check the BMIDEA bit of each IDE channel.
13854f86d3a8SLen Brown 	 */
13864f86d3a8SLen Brown 	else if (errata.piix4.bmisx) {
13874f86d3a8SLen Brown 		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
13884f86d3a8SLen Brown 		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
13894f86d3a8SLen Brown 			bm_status = 1;
13904f86d3a8SLen Brown 	}
13914f86d3a8SLen Brown 	return bm_status;
13924f86d3a8SLen Brown }
13934f86d3a8SLen Brown 
13944f86d3a8SLen Brown /**
13954f86d3a8SLen Brown  * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
13964f86d3a8SLen Brown  * @pr: the processor
13974f86d3a8SLen Brown  * @target: the new target state
13984f86d3a8SLen Brown  */
13994f86d3a8SLen Brown static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
14004f86d3a8SLen Brown 					   struct acpi_processor_cx *target)
14014f86d3a8SLen Brown {
14024f86d3a8SLen Brown 	if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
14034f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
14044f86d3a8SLen Brown 		pr->flags.bm_rld_set = 0;
14054f86d3a8SLen Brown 	}
14064f86d3a8SLen Brown 
14074f86d3a8SLen Brown 	if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
14084f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
14094f86d3a8SLen Brown 		pr->flags.bm_rld_set = 1;
14104f86d3a8SLen Brown 	}
14114f86d3a8SLen Brown }
14124f86d3a8SLen Brown 
14134f86d3a8SLen Brown /**
14144f86d3a8SLen Brown  * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
14154f86d3a8SLen Brown  * @cx: cstate data
1416bc71bec9Svenkatesh.pallipadi@intel.com  *
1417bc71bec9Svenkatesh.pallipadi@intel.com  * Caller disables interrupt before call and enables interrupt after return.
14184f86d3a8SLen Brown  */
14194f86d3a8SLen Brown static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
14204f86d3a8SLen Brown {
1421bc71bec9Svenkatesh.pallipadi@intel.com 	if (cx->entry_method == ACPI_CSTATE_FFH) {
14224f86d3a8SLen Brown 		/* Call into architectural FFH based C-state */
14234f86d3a8SLen Brown 		acpi_processor_ffh_cstate_enter(cx);
1424bc71bec9Svenkatesh.pallipadi@intel.com 	} else if (cx->entry_method == ACPI_CSTATE_HALT) {
1425bc71bec9Svenkatesh.pallipadi@intel.com 		acpi_safe_halt();
14264f86d3a8SLen Brown 	} else {
14274f86d3a8SLen Brown 		int unused;
14284f86d3a8SLen Brown 		/* IO port based C-state */
14294f86d3a8SLen Brown 		inb(cx->address);
14304f86d3a8SLen Brown 		/* Dummy wait op - must do something useless after P_LVL2 read
14314f86d3a8SLen Brown 		   because chipsets cannot guarantee that STPCLK# signal
14324f86d3a8SLen Brown 		   gets asserted in time to freeze execution properly. */
14334f86d3a8SLen Brown 		unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
14344f86d3a8SLen Brown 	}
14354f86d3a8SLen Brown }
14364f86d3a8SLen Brown 
14374f86d3a8SLen Brown /**
14384f86d3a8SLen Brown  * acpi_idle_enter_c1 - enters an ACPI C1 state-type
14394f86d3a8SLen Brown  * @dev: the target CPU
14404f86d3a8SLen Brown  * @state: the state data
14414f86d3a8SLen Brown  *
14424f86d3a8SLen Brown  * This is equivalent to the HALT instruction.
14434f86d3a8SLen Brown  */
14444f86d3a8SLen Brown static int acpi_idle_enter_c1(struct cpuidle_device *dev,
14454f86d3a8SLen Brown 			      struct cpuidle_state *state)
14464f86d3a8SLen Brown {
14479b12e18cSvenkatesh.pallipadi@intel.com 	u32 t1, t2;
14484f86d3a8SLen Brown 	struct acpi_processor *pr;
14494f86d3a8SLen Brown 	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
14509b12e18cSvenkatesh.pallipadi@intel.com 
1451706546d0SMike Travis 	pr = __get_cpu_var(processors);
14524f86d3a8SLen Brown 
14534f86d3a8SLen Brown 	if (unlikely(!pr))
14544f86d3a8SLen Brown 		return 0;
14554f86d3a8SLen Brown 
14562e906655Svenkatesh.pallipadi@intel.com 	local_irq_disable();
1457b077fbadSVenkatesh Pallipadi 
1458b077fbadSVenkatesh Pallipadi 	/* Do not access any ACPI IO ports in suspend path */
1459b077fbadSVenkatesh Pallipadi 	if (acpi_idle_suspend) {
1460b077fbadSVenkatesh Pallipadi 		acpi_safe_halt();
1461b077fbadSVenkatesh Pallipadi 		local_irq_enable();
1462b077fbadSVenkatesh Pallipadi 		return 0;
1463b077fbadSVenkatesh Pallipadi 	}
1464b077fbadSVenkatesh Pallipadi 
14654f86d3a8SLen Brown 	if (pr->flags.bm_check)
14664f86d3a8SLen Brown 		acpi_idle_update_bm_rld(pr, cx);
14674f86d3a8SLen Brown 
14689b12e18cSvenkatesh.pallipadi@intel.com 	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1469bc71bec9Svenkatesh.pallipadi@intel.com 	acpi_idle_do_entry(cx);
14709b12e18cSvenkatesh.pallipadi@intel.com 	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
14714f86d3a8SLen Brown 
14722e906655Svenkatesh.pallipadi@intel.com 	local_irq_enable();
14734f86d3a8SLen Brown 	cx->usage++;
14744f86d3a8SLen Brown 
14759b12e18cSvenkatesh.pallipadi@intel.com 	return ticks_elapsed_in_us(t1, t2);
14764f86d3a8SLen Brown }
14774f86d3a8SLen Brown 
14784f86d3a8SLen Brown /**
14794f86d3a8SLen Brown  * acpi_idle_enter_simple - enters an ACPI state without BM handling
14804f86d3a8SLen Brown  * @dev: the target CPU
14814f86d3a8SLen Brown  * @state: the state data
14824f86d3a8SLen Brown  */
14834f86d3a8SLen Brown static int acpi_idle_enter_simple(struct cpuidle_device *dev,
14844f86d3a8SLen Brown 				  struct cpuidle_state *state)
14854f86d3a8SLen Brown {
14864f86d3a8SLen Brown 	struct acpi_processor *pr;
14874f86d3a8SLen Brown 	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
14884f86d3a8SLen Brown 	u32 t1, t2;
148950629118SVenkatesh Pallipadi 	int sleep_ticks = 0;
149050629118SVenkatesh Pallipadi 
1491706546d0SMike Travis 	pr = __get_cpu_var(processors);
14924f86d3a8SLen Brown 
14934f86d3a8SLen Brown 	if (unlikely(!pr))
14944f86d3a8SLen Brown 		return 0;
14954f86d3a8SLen Brown 
1496e196441bSLen Brown 	if (acpi_idle_suspend)
1497e196441bSLen Brown 		return(acpi_idle_enter_c1(dev, state));
1498e196441bSLen Brown 
14994f86d3a8SLen Brown 	local_irq_disable();
15004f86d3a8SLen Brown 	current_thread_info()->status &= ~TS_POLLING;
15014f86d3a8SLen Brown 	/*
15024f86d3a8SLen Brown 	 * TS_POLLING-cleared state must be visible before we test
15034f86d3a8SLen Brown 	 * NEED_RESCHED:
15044f86d3a8SLen Brown 	 */
15054f86d3a8SLen Brown 	smp_mb();
15064f86d3a8SLen Brown 
15074f86d3a8SLen Brown 	if (unlikely(need_resched())) {
15084f86d3a8SLen Brown 		current_thread_info()->status |= TS_POLLING;
15094f86d3a8SLen Brown 		local_irq_enable();
15104f86d3a8SLen Brown 		return 0;
15114f86d3a8SLen Brown 	}
15124f86d3a8SLen Brown 
1513e17bcb43SThomas Gleixner 	/*
1514e17bcb43SThomas Gleixner 	 * Must be done before busmaster disable as we might need to
1515e17bcb43SThomas Gleixner 	 * access HPET !
1516e17bcb43SThomas Gleixner 	 */
1517e17bcb43SThomas Gleixner 	acpi_state_timer_broadcast(pr, cx, 1);
1518e17bcb43SThomas Gleixner 
1519e17bcb43SThomas Gleixner 	if (pr->flags.bm_check)
1520e17bcb43SThomas Gleixner 		acpi_idle_update_bm_rld(pr, cx);
1521e17bcb43SThomas Gleixner 
15224f86d3a8SLen Brown 	if (cx->type == ACPI_STATE_C3)
15234f86d3a8SLen Brown 		ACPI_FLUSH_CPU_CACHE();
15244f86d3a8SLen Brown 
15254f86d3a8SLen Brown 	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
152650629118SVenkatesh Pallipadi 	/* Tell the scheduler that we are going deep-idle: */
152750629118SVenkatesh Pallipadi 	sched_clock_idle_sleep_event();
15284f86d3a8SLen Brown 	acpi_idle_do_entry(cx);
15294f86d3a8SLen Brown 	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
15304f86d3a8SLen Brown 
153161331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
15324f86d3a8SLen Brown 	/* TSC could halt in idle, so notify users */
1533ddb25f9aSAndi Kleen 	if (tsc_halts_in_c(cx->type))
15344f86d3a8SLen Brown 		mark_tsc_unstable("TSC halts in idle");;
15354f86d3a8SLen Brown #endif
153650629118SVenkatesh Pallipadi 	sleep_ticks = ticks_elapsed(t1, t2);
153750629118SVenkatesh Pallipadi 
153850629118SVenkatesh Pallipadi 	/* Tell the scheduler how much we idled: */
153950629118SVenkatesh Pallipadi 	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
15404f86d3a8SLen Brown 
15414f86d3a8SLen Brown 	local_irq_enable();
15424f86d3a8SLen Brown 	current_thread_info()->status |= TS_POLLING;
15434f86d3a8SLen Brown 
15444f86d3a8SLen Brown 	cx->usage++;
15454f86d3a8SLen Brown 
15464f86d3a8SLen Brown 	acpi_state_timer_broadcast(pr, cx, 0);
154750629118SVenkatesh Pallipadi 	cx->time += sleep_ticks;
15484f86d3a8SLen Brown 	return ticks_elapsed_in_us(t1, t2);
15494f86d3a8SLen Brown }
15504f86d3a8SLen Brown 
15514f86d3a8SLen Brown static int c3_cpu_count;
15524f86d3a8SLen Brown static DEFINE_SPINLOCK(c3_lock);
15534f86d3a8SLen Brown 
15544f86d3a8SLen Brown /**
15554f86d3a8SLen Brown  * acpi_idle_enter_bm - enters C3 with proper BM handling
15564f86d3a8SLen Brown  * @dev: the target CPU
15574f86d3a8SLen Brown  * @state: the state data
15584f86d3a8SLen Brown  *
15594f86d3a8SLen Brown  * If BM is detected, the deepest non-C3 idle state is entered instead.
15604f86d3a8SLen Brown  */
15614f86d3a8SLen Brown static int acpi_idle_enter_bm(struct cpuidle_device *dev,
15624f86d3a8SLen Brown 			      struct cpuidle_state *state)
15634f86d3a8SLen Brown {
15644f86d3a8SLen Brown 	struct acpi_processor *pr;
15654f86d3a8SLen Brown 	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
15664f86d3a8SLen Brown 	u32 t1, t2;
156750629118SVenkatesh Pallipadi 	int sleep_ticks = 0;
156850629118SVenkatesh Pallipadi 
1569706546d0SMike Travis 	pr = __get_cpu_var(processors);
15704f86d3a8SLen Brown 
15714f86d3a8SLen Brown 	if (unlikely(!pr))
15724f86d3a8SLen Brown 		return 0;
15734f86d3a8SLen Brown 
1574e196441bSLen Brown 	if (acpi_idle_suspend)
1575e196441bSLen Brown 		return(acpi_idle_enter_c1(dev, state));
1576e196441bSLen Brown 
1577ddc081a1SVenkatesh Pallipadi 	if (acpi_idle_bm_check()) {
1578ddc081a1SVenkatesh Pallipadi 		if (dev->safe_state) {
1579ddc081a1SVenkatesh Pallipadi 			return dev->safe_state->enter(dev, dev->safe_state);
1580ddc081a1SVenkatesh Pallipadi 		} else {
15812e906655Svenkatesh.pallipadi@intel.com 			local_irq_disable();
1582ddc081a1SVenkatesh Pallipadi 			acpi_safe_halt();
15832e906655Svenkatesh.pallipadi@intel.com 			local_irq_enable();
1584ddc081a1SVenkatesh Pallipadi 			return 0;
1585ddc081a1SVenkatesh Pallipadi 		}
1586ddc081a1SVenkatesh Pallipadi 	}
1587ddc081a1SVenkatesh Pallipadi 
15884f86d3a8SLen Brown 	local_irq_disable();
15894f86d3a8SLen Brown 	current_thread_info()->status &= ~TS_POLLING;
15904f86d3a8SLen Brown 	/*
15914f86d3a8SLen Brown 	 * TS_POLLING-cleared state must be visible before we test
15924f86d3a8SLen Brown 	 * NEED_RESCHED:
15934f86d3a8SLen Brown 	 */
15944f86d3a8SLen Brown 	smp_mb();
15954f86d3a8SLen Brown 
15964f86d3a8SLen Brown 	if (unlikely(need_resched())) {
15974f86d3a8SLen Brown 		current_thread_info()->status |= TS_POLLING;
15984f86d3a8SLen Brown 		local_irq_enable();
15994f86d3a8SLen Brown 		return 0;
16004f86d3a8SLen Brown 	}
16014f86d3a8SLen Brown 
1602996520c1SVenki Pallipadi 	acpi_unlazy_tlb(smp_processor_id());
1603996520c1SVenki Pallipadi 
160450629118SVenkatesh Pallipadi 	/* Tell the scheduler that we are going deep-idle: */
160550629118SVenkatesh Pallipadi 	sched_clock_idle_sleep_event();
16064f86d3a8SLen Brown 	/*
16074f86d3a8SLen Brown 	 * Must be done before busmaster disable as we might need to
16084f86d3a8SLen Brown 	 * access HPET !
16094f86d3a8SLen Brown 	 */
16104f86d3a8SLen Brown 	acpi_state_timer_broadcast(pr, cx, 1);
16114f86d3a8SLen Brown 
16124f86d3a8SLen Brown 	acpi_idle_update_bm_rld(pr, cx);
16134f86d3a8SLen Brown 
1614c9c860e5SVenkatesh Pallipadi 	/*
1615c9c860e5SVenkatesh Pallipadi 	 * disable bus master
1616c9c860e5SVenkatesh Pallipadi 	 * bm_check implies we need ARB_DIS
1617c9c860e5SVenkatesh Pallipadi 	 * !bm_check implies we need cache flush
1618c9c860e5SVenkatesh Pallipadi 	 * bm_control implies whether we can do ARB_DIS
1619c9c860e5SVenkatesh Pallipadi 	 *
1620c9c860e5SVenkatesh Pallipadi 	 * That leaves a case where bm_check is set and bm_control is
1621c9c860e5SVenkatesh Pallipadi 	 * not set. In that case we cannot do much, we enter C3
1622c9c860e5SVenkatesh Pallipadi 	 * without doing anything.
1623c9c860e5SVenkatesh Pallipadi 	 */
1624c9c860e5SVenkatesh Pallipadi 	if (pr->flags.bm_check && pr->flags.bm_control) {
16254f86d3a8SLen Brown 		spin_lock(&c3_lock);
16264f86d3a8SLen Brown 		c3_cpu_count++;
16274f86d3a8SLen Brown 		/* Disable bus master arbitration when all CPUs are in C3 */
16284f86d3a8SLen Brown 		if (c3_cpu_count == num_online_cpus())
16294f86d3a8SLen Brown 			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
16304f86d3a8SLen Brown 		spin_unlock(&c3_lock);
1631c9c860e5SVenkatesh Pallipadi 	} else if (!pr->flags.bm_check) {
1632c9c860e5SVenkatesh Pallipadi 		ACPI_FLUSH_CPU_CACHE();
1633c9c860e5SVenkatesh Pallipadi 	}
16344f86d3a8SLen Brown 
16354f86d3a8SLen Brown 	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
16364f86d3a8SLen Brown 	acpi_idle_do_entry(cx);
16374f86d3a8SLen Brown 	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
16384f86d3a8SLen Brown 
16394f86d3a8SLen Brown 	/* Re-enable bus master arbitration */
1640c9c860e5SVenkatesh Pallipadi 	if (pr->flags.bm_check && pr->flags.bm_control) {
1641c9c860e5SVenkatesh Pallipadi 		spin_lock(&c3_lock);
16424f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
16434f86d3a8SLen Brown 		c3_cpu_count--;
16444f86d3a8SLen Brown 		spin_unlock(&c3_lock);
16454f86d3a8SLen Brown 	}
16464f86d3a8SLen Brown 
164761331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
16484f86d3a8SLen Brown 	/* TSC could halt in idle, so notify users */
1649ddb25f9aSAndi Kleen 	if (tsc_halts_in_c(ACPI_STATE_C3))
16504f86d3a8SLen Brown 		mark_tsc_unstable("TSC halts in idle");
16514f86d3a8SLen Brown #endif
165250629118SVenkatesh Pallipadi 	sleep_ticks = ticks_elapsed(t1, t2);
165350629118SVenkatesh Pallipadi 	/* Tell the scheduler how much we idled: */
165450629118SVenkatesh Pallipadi 	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
16554f86d3a8SLen Brown 
16564f86d3a8SLen Brown 	local_irq_enable();
16574f86d3a8SLen Brown 	current_thread_info()->status |= TS_POLLING;
16584f86d3a8SLen Brown 
16594f86d3a8SLen Brown 	cx->usage++;
16604f86d3a8SLen Brown 
16614f86d3a8SLen Brown 	acpi_state_timer_broadcast(pr, cx, 0);
166250629118SVenkatesh Pallipadi 	cx->time += sleep_ticks;
16634f86d3a8SLen Brown 	return ticks_elapsed_in_us(t1, t2);
16644f86d3a8SLen Brown }
16654f86d3a8SLen Brown 
16664f86d3a8SLen Brown struct cpuidle_driver acpi_idle_driver = {
16674f86d3a8SLen Brown 	.name =		"acpi_idle",
16684f86d3a8SLen Brown 	.owner =	THIS_MODULE,
16694f86d3a8SLen Brown };
16704f86d3a8SLen Brown 
16714f86d3a8SLen Brown /**
16724f86d3a8SLen Brown  * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
16734f86d3a8SLen Brown  * @pr: the ACPI processor
16744f86d3a8SLen Brown  */
16754f86d3a8SLen Brown static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
16764f86d3a8SLen Brown {
16779a0b8415Svenkatesh.pallipadi@intel.com 	int i, count = CPUIDLE_DRIVER_STATE_START;
16784f86d3a8SLen Brown 	struct acpi_processor_cx *cx;
16794f86d3a8SLen Brown 	struct cpuidle_state *state;
16804f86d3a8SLen Brown 	struct cpuidle_device *dev = &pr->power.dev;
16814f86d3a8SLen Brown 
16824f86d3a8SLen Brown 	if (!pr->flags.power_setup_done)
16834f86d3a8SLen Brown 		return -EINVAL;
16844f86d3a8SLen Brown 
16854f86d3a8SLen Brown 	if (pr->flags.power == 0) {
16864f86d3a8SLen Brown 		return -EINVAL;
16874f86d3a8SLen Brown 	}
16884f86d3a8SLen Brown 
1689dcb84f33SVenkatesh Pallipadi 	dev->cpu = pr->id;
16904fcb2fcdSVenkatesh Pallipadi 	for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
16914fcb2fcdSVenkatesh Pallipadi 		dev->states[i].name[0] = '\0';
16924fcb2fcdSVenkatesh Pallipadi 		dev->states[i].desc[0] = '\0';
16934fcb2fcdSVenkatesh Pallipadi 	}
16944fcb2fcdSVenkatesh Pallipadi 
16954f86d3a8SLen Brown 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
16964f86d3a8SLen Brown 		cx = &pr->power.states[i];
16974f86d3a8SLen Brown 		state = &dev->states[count];
16984f86d3a8SLen Brown 
16994f86d3a8SLen Brown 		if (!cx->valid)
17004f86d3a8SLen Brown 			continue;
17014f86d3a8SLen Brown 
17024f86d3a8SLen Brown #ifdef CONFIG_HOTPLUG_CPU
17034f86d3a8SLen Brown 		if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
17044f86d3a8SLen Brown 		    !pr->flags.has_cst &&
17054f86d3a8SLen Brown 		    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
17064f86d3a8SLen Brown 			continue;
17074f86d3a8SLen Brown #endif
17084f86d3a8SLen Brown 		cpuidle_set_statedata(state, cx);
17094f86d3a8SLen Brown 
17104f86d3a8SLen Brown 		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
17114fcb2fcdSVenkatesh Pallipadi 		strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
17124f86d3a8SLen Brown 		state->exit_latency = cx->latency;
17134963f620SLen Brown 		state->target_residency = cx->latency * latency_factor;
17144f86d3a8SLen Brown 		state->power_usage = cx->power;
17154f86d3a8SLen Brown 
17164f86d3a8SLen Brown 		state->flags = 0;
17174f86d3a8SLen Brown 		switch (cx->type) {
17184f86d3a8SLen Brown 			case ACPI_STATE_C1:
17194f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_SHALLOW;
17208e92b660SVenki Pallipadi 			if (cx->entry_method == ACPI_CSTATE_FFH)
17219b12e18cSvenkatesh.pallipadi@intel.com 				state->flags |= CPUIDLE_FLAG_TIME_VALID;
17228e92b660SVenki Pallipadi 
17234f86d3a8SLen Brown 			state->enter = acpi_idle_enter_c1;
1724ddc081a1SVenkatesh Pallipadi 			dev->safe_state = state;
17254f86d3a8SLen Brown 			break;
17264f86d3a8SLen Brown 
17274f86d3a8SLen Brown 			case ACPI_STATE_C2:
17284f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_BALANCED;
17294f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
17304f86d3a8SLen Brown 			state->enter = acpi_idle_enter_simple;
1731ddc081a1SVenkatesh Pallipadi 			dev->safe_state = state;
17324f86d3a8SLen Brown 			break;
17334f86d3a8SLen Brown 
17344f86d3a8SLen Brown 			case ACPI_STATE_C3:
17354f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_DEEP;
17364f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
17374f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_CHECK_BM;
17384f86d3a8SLen Brown 			state->enter = pr->flags.bm_check ?
17394f86d3a8SLen Brown 					acpi_idle_enter_bm :
17404f86d3a8SLen Brown 					acpi_idle_enter_simple;
17414f86d3a8SLen Brown 			break;
17424f86d3a8SLen Brown 		}
17434f86d3a8SLen Brown 
17444f86d3a8SLen Brown 		count++;
17459a0b8415Svenkatesh.pallipadi@intel.com 		if (count == CPUIDLE_STATE_MAX)
17469a0b8415Svenkatesh.pallipadi@intel.com 			break;
17474f86d3a8SLen Brown 	}
17484f86d3a8SLen Brown 
17494f86d3a8SLen Brown 	dev->state_count = count;
17504f86d3a8SLen Brown 
17514f86d3a8SLen Brown 	if (!count)
17524f86d3a8SLen Brown 		return -EINVAL;
17534f86d3a8SLen Brown 
17544f86d3a8SLen Brown 	return 0;
17554f86d3a8SLen Brown }
17564f86d3a8SLen Brown 
17574f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr)
17584f86d3a8SLen Brown {
1759dcb84f33SVenkatesh Pallipadi 	int ret = 0;
17604f86d3a8SLen Brown 
176136a91358SVenkatesh Pallipadi 	if (boot_option_idle_override)
176236a91358SVenkatesh Pallipadi 		return 0;
176336a91358SVenkatesh Pallipadi 
17644f86d3a8SLen Brown 	if (!pr)
17654f86d3a8SLen Brown 		return -EINVAL;
17664f86d3a8SLen Brown 
17674f86d3a8SLen Brown 	if (nocst) {
17684f86d3a8SLen Brown 		return -ENODEV;
17694f86d3a8SLen Brown 	}
17704f86d3a8SLen Brown 
17714f86d3a8SLen Brown 	if (!pr->flags.power_setup_done)
17724f86d3a8SLen Brown 		return -ENODEV;
17734f86d3a8SLen Brown 
17744f86d3a8SLen Brown 	cpuidle_pause_and_lock();
17754f86d3a8SLen Brown 	cpuidle_disable_device(&pr->power.dev);
17764f86d3a8SLen Brown 	acpi_processor_get_power_info(pr);
1777dcb84f33SVenkatesh Pallipadi 	if (pr->flags.power) {
17784f86d3a8SLen Brown 		acpi_processor_setup_cpuidle(pr);
17794f86d3a8SLen Brown 		ret = cpuidle_enable_device(&pr->power.dev);
1780dcb84f33SVenkatesh Pallipadi 	}
17814f86d3a8SLen Brown 	cpuidle_resume_and_unlock();
17824f86d3a8SLen Brown 
17834f86d3a8SLen Brown 	return ret;
17844f86d3a8SLen Brown }
17854f86d3a8SLen Brown 
17864f86d3a8SLen Brown #endif /* CONFIG_CPU_IDLE */
17874f86d3a8SLen Brown 
17887af8b660SPierre Ossman int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
17894be44fcdSLen Brown 			      struct acpi_device *device)
17901da177e4SLinus Torvalds {
17911da177e4SLinus Torvalds 	acpi_status status = 0;
1792b6835052SAndreas Mohr 	static int first_run;
17931da177e4SLinus Torvalds 	struct proc_dir_entry *entry = NULL;
17941da177e4SLinus Torvalds 	unsigned int i;
17951da177e4SLinus Torvalds 
179636a91358SVenkatesh Pallipadi 	if (boot_option_idle_override)
179736a91358SVenkatesh Pallipadi 		return 0;
17981da177e4SLinus Torvalds 
17991da177e4SLinus Torvalds 	if (!first_run) {
1800c1e3b377SZhao Yakui 		if (idle_halt) {
1801c1e3b377SZhao Yakui 			/*
1802c1e3b377SZhao Yakui 			 * When the boot option of "idle=halt" is added, halt
1803c1e3b377SZhao Yakui 			 * is used for CPU IDLE.
1804c1e3b377SZhao Yakui 			 * In such case C2/C3 is meaningless. So the max_cstate
1805c1e3b377SZhao Yakui 			 * is set to one.
1806c1e3b377SZhao Yakui 			 */
1807c1e3b377SZhao Yakui 			max_cstate = 1;
1808c1e3b377SZhao Yakui 		}
18091da177e4SLinus Torvalds 		dmi_check_system(processor_power_dmi_table);
1810c1c30634SAlexey Starikovskiy 		max_cstate = acpi_processor_cstate_check(max_cstate);
18111da177e4SLinus Torvalds 		if (max_cstate < ACPI_C_STATES_MAX)
18124be44fcdSLen Brown 			printk(KERN_NOTICE
18134be44fcdSLen Brown 			       "ACPI: processor limited to max C-state %d\n",
18144be44fcdSLen Brown 			       max_cstate);
18151da177e4SLinus Torvalds 		first_run++;
18164f86d3a8SLen Brown #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1817f011e2e2SMark Gross 		pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1818f011e2e2SMark Gross 				&acpi_processor_latency_notifier);
18191fec74a9SAndrew Morton #endif
18201da177e4SLinus Torvalds 	}
18211da177e4SLinus Torvalds 
182202df8b93SVenkatesh Pallipadi 	if (!pr)
1823d550d98dSPatrick Mochel 		return -EINVAL;
182402df8b93SVenkatesh Pallipadi 
1825cee324b1SAlexey Starikovskiy 	if (acpi_gbl_FADT.cst_control && !nocst) {
18264be44fcdSLen Brown 		status =
1827cee324b1SAlexey Starikovskiy 		    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
18281da177e4SLinus Torvalds 		if (ACPI_FAILURE(status)) {
1829a6fc6720SThomas Renninger 			ACPI_EXCEPTION((AE_INFO, status,
1830a6fc6720SThomas Renninger 					"Notifying BIOS of _CST ability failed"));
18311da177e4SLinus Torvalds 		}
18321da177e4SLinus Torvalds 	}
18331da177e4SLinus Torvalds 
18341da177e4SLinus Torvalds 	acpi_processor_get_power_info(pr);
18354f86d3a8SLen Brown 	pr->flags.power_setup_done = 1;
18361da177e4SLinus Torvalds 
18371da177e4SLinus Torvalds 	/*
18381da177e4SLinus Torvalds 	 * Install the idle handler if processor power management is supported.
18391da177e4SLinus Torvalds 	 * Note that we use previously set idle handler will be used on
18401da177e4SLinus Torvalds 	 * platforms that only support C1.
18411da177e4SLinus Torvalds 	 */
184236a91358SVenkatesh Pallipadi 	if (pr->flags.power) {
18434f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE
18444f86d3a8SLen Brown 		acpi_processor_setup_cpuidle(pr);
18454f86d3a8SLen Brown 		if (cpuidle_register_device(&pr->power.dev))
18464f86d3a8SLen Brown 			return -EIO;
18474f86d3a8SLen Brown #endif
18484f86d3a8SLen Brown 
18491da177e4SLinus Torvalds 		printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
18501da177e4SLinus Torvalds 		for (i = 1; i <= pr->power.count; i++)
18511da177e4SLinus Torvalds 			if (pr->power.states[i].valid)
18524be44fcdSLen Brown 				printk(" C%d[C%d]", i,
18534be44fcdSLen Brown 				       pr->power.states[i].type);
18541da177e4SLinus Torvalds 		printk(")\n");
18551da177e4SLinus Torvalds 
18564f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
18571da177e4SLinus Torvalds 		if (pr->id == 0) {
18581da177e4SLinus Torvalds 			pm_idle_save = pm_idle;
18591da177e4SLinus Torvalds 			pm_idle = acpi_processor_idle;
18601da177e4SLinus Torvalds 		}
18614f86d3a8SLen Brown #endif
18621da177e4SLinus Torvalds 	}
18631da177e4SLinus Torvalds 
18641da177e4SLinus Torvalds 	/* 'power' [R] */
1865cf7acfabSDenis V. Lunev 	entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
1866cf7acfabSDenis V. Lunev 				 S_IRUGO, acpi_device_dir(device),
1867cf7acfabSDenis V. Lunev 				 &acpi_processor_power_fops,
1868cf7acfabSDenis V. Lunev 				 acpi_driver_data(device));
18691da177e4SLinus Torvalds 	if (!entry)
1870a6fc6720SThomas Renninger 		return -EIO;
1871d550d98dSPatrick Mochel 	return 0;
18721da177e4SLinus Torvalds }
18731da177e4SLinus Torvalds 
18744be44fcdSLen Brown int acpi_processor_power_exit(struct acpi_processor *pr,
18754be44fcdSLen Brown 			      struct acpi_device *device)
18761da177e4SLinus Torvalds {
187736a91358SVenkatesh Pallipadi 	if (boot_option_idle_override)
187836a91358SVenkatesh Pallipadi 		return 0;
187936a91358SVenkatesh Pallipadi 
18804f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE
18814f86d3a8SLen Brown 	cpuidle_unregister_device(&pr->power.dev);
18824f86d3a8SLen Brown #endif
18831da177e4SLinus Torvalds 	pr->flags.power_setup_done = 0;
18841da177e4SLinus Torvalds 
18851da177e4SLinus Torvalds 	if (acpi_device_dir(device))
18864be44fcdSLen Brown 		remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
18874be44fcdSLen Brown 				  acpi_device_dir(device));
18881da177e4SLinus Torvalds 
18894f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
18904f86d3a8SLen Brown 
18911da177e4SLinus Torvalds 	/* Unregister the idle handler when processor #0 is removed. */
18921da177e4SLinus Torvalds 	if (pr->id == 0) {
18931da177e4SLinus Torvalds 		pm_idle = pm_idle_save;
18941da177e4SLinus Torvalds 
18951da177e4SLinus Torvalds 		/*
18961da177e4SLinus Torvalds 		 * We are about to unload the current idle thread pm callback
18971da177e4SLinus Torvalds 		 * (pm_idle), Wait for all processors to update cached/local
18981da177e4SLinus Torvalds 		 * copies of pm_idle before proceeding.
18991da177e4SLinus Torvalds 		 */
19001da177e4SLinus Torvalds 		cpu_idle_wait();
19011fec74a9SAndrew Morton #ifdef CONFIG_SMP
1902f011e2e2SMark Gross 		pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1903f011e2e2SMark Gross 				&acpi_processor_latency_notifier);
19041fec74a9SAndrew Morton #endif
19051da177e4SLinus Torvalds 	}
19064f86d3a8SLen Brown #endif
19071da177e4SLinus Torvalds 
1908d550d98dSPatrick Mochel 	return 0;
19091da177e4SLinus Torvalds }
1910