xref: /openbmc/linux/drivers/acpi/processor_idle.c (revision 4fcb2fcd)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * processor_idle - idle state submodule to the ACPI processor driver
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
51da177e4SLinus Torvalds  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6c5ab81caSDominik Brodowski  *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
71da177e4SLinus Torvalds  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
81da177e4SLinus Torvalds  *  			- Added processor hotplug support
902df8b93SVenkatesh Pallipadi  *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
1002df8b93SVenkatesh Pallipadi  *  			- Added support for C3 on SMP
111da177e4SLinus Torvalds  *
121da177e4SLinus Torvalds  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  *  This program is free software; you can redistribute it and/or modify
151da177e4SLinus Torvalds  *  it under the terms of the GNU General Public License as published by
161da177e4SLinus Torvalds  *  the Free Software Foundation; either version 2 of the License, or (at
171da177e4SLinus Torvalds  *  your option) any later version.
181da177e4SLinus Torvalds  *
191da177e4SLinus Torvalds  *  This program is distributed in the hope that it will be useful, but
201da177e4SLinus Torvalds  *  WITHOUT ANY WARRANTY; without even the implied warranty of
211da177e4SLinus Torvalds  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
221da177e4SLinus Torvalds  *  General Public License for more details.
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *  You should have received a copy of the GNU General Public License along
251da177e4SLinus Torvalds  *  with this program; if not, write to the Free Software Foundation, Inc.,
261da177e4SLinus Torvalds  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
271da177e4SLinus Torvalds  *
281da177e4SLinus Torvalds  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
291da177e4SLinus Torvalds  */
301da177e4SLinus Torvalds 
311da177e4SLinus Torvalds #include <linux/kernel.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/init.h>
341da177e4SLinus Torvalds #include <linux/cpufreq.h>
351da177e4SLinus Torvalds #include <linux/proc_fs.h>
361da177e4SLinus Torvalds #include <linux/seq_file.h>
371da177e4SLinus Torvalds #include <linux/acpi.h>
381da177e4SLinus Torvalds #include <linux/dmi.h>
391da177e4SLinus Torvalds #include <linux/moduleparam.h>
404e57b681STim Schmielau #include <linux/sched.h>	/* need_resched() */
41f011e2e2SMark Gross #include <linux/pm_qos_params.h>
42e9e2cdb4SThomas Gleixner #include <linux/clockchips.h>
434f86d3a8SLen Brown #include <linux/cpuidle.h>
441da177e4SLinus Torvalds 
453434933bSThomas Gleixner /*
463434933bSThomas Gleixner  * Include the apic definitions for x86 to have the APIC timer related defines
473434933bSThomas Gleixner  * available also for UP (on SMP it gets magically included via linux/smp.h).
483434933bSThomas Gleixner  * asm/acpi.h is not an option, as it would require more include magic. Also
493434933bSThomas Gleixner  * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
503434933bSThomas Gleixner  */
513434933bSThomas Gleixner #ifdef CONFIG_X86
523434933bSThomas Gleixner #include <asm/apic.h>
533434933bSThomas Gleixner #endif
543434933bSThomas Gleixner 
551da177e4SLinus Torvalds #include <asm/io.h>
561da177e4SLinus Torvalds #include <asm/uaccess.h>
571da177e4SLinus Torvalds 
581da177e4SLinus Torvalds #include <acpi/acpi_bus.h>
591da177e4SLinus Torvalds #include <acpi/processor.h>
601da177e4SLinus Torvalds 
611da177e4SLinus Torvalds #define ACPI_PROCESSOR_COMPONENT        0x01000000
621da177e4SLinus Torvalds #define ACPI_PROCESSOR_CLASS            "processor"
631da177e4SLinus Torvalds #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
64f52fd66dSLen Brown ACPI_MODULE_NAME("processor_idle");
651da177e4SLinus Torvalds #define ACPI_PROCESSOR_FILE_POWER	"power"
661da177e4SLinus Torvalds #define US_TO_PM_TIMER_TICKS(t)		((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
672aa44d05SIngo Molnar #define PM_TIMER_TICK_NS		(1000000000ULL/PM_TIMER_FREQUENCY)
684f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
691da177e4SLinus Torvalds #define C2_OVERHEAD			4	/* 1us (3.579 ticks per us) */
701da177e4SLinus Torvalds #define C3_OVERHEAD			4	/* 1us (3.579 ticks per us) */
71b6835052SAndreas Mohr static void (*pm_idle_save) (void) __read_mostly;
724f86d3a8SLen Brown #else
734f86d3a8SLen Brown #define C2_OVERHEAD			1	/* 1us */
744f86d3a8SLen Brown #define C3_OVERHEAD			1	/* 1us */
754f86d3a8SLen Brown #endif
764f86d3a8SLen Brown #define PM_TIMER_TICKS_TO_US(p)		(((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
771da177e4SLinus Torvalds 
784f86d3a8SLen Brown static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
795b3f0e6cSVenki Pallipadi #ifdef CONFIG_CPU_IDLE
804f86d3a8SLen Brown module_param(max_cstate, uint, 0000);
815b3f0e6cSVenki Pallipadi #else
825b3f0e6cSVenki Pallipadi module_param(max_cstate, uint, 0644);
835b3f0e6cSVenki Pallipadi #endif
84b6835052SAndreas Mohr static unsigned int nocst __read_mostly;
851da177e4SLinus Torvalds module_param(nocst, uint, 0000);
861da177e4SLinus Torvalds 
874f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
881da177e4SLinus Torvalds /*
891da177e4SLinus Torvalds  * bm_history -- bit-mask with a bit per jiffy of bus-master activity
901da177e4SLinus Torvalds  * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
911da177e4SLinus Torvalds  * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
921da177e4SLinus Torvalds  * 100 HZ: 0x0000000F: 4 jiffies = 40ms
931da177e4SLinus Torvalds  * reduce history for more aggressive entry into C3
941da177e4SLinus Torvalds  */
95b6835052SAndreas Mohr static unsigned int bm_history __read_mostly =
964be44fcdSLen Brown     (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
971da177e4SLinus Torvalds module_param(bm_history, uint, 0644);
984f86d3a8SLen Brown 
994f86d3a8SLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr);
1004f86d3a8SLen Brown 
1014963f620SLen Brown #else	/* CONFIG_CPU_IDLE */
10225de5718SLen Brown static unsigned int latency_factor __read_mostly = 2;
1034963f620SLen Brown module_param(latency_factor, uint, 0644);
1044f86d3a8SLen Brown #endif
1051da177e4SLinus Torvalds 
1061da177e4SLinus Torvalds /*
1071da177e4SLinus Torvalds  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
1081da177e4SLinus Torvalds  * For now disable this. Probably a bug somewhere else.
1091da177e4SLinus Torvalds  *
1101da177e4SLinus Torvalds  * To skip this limit, boot/load with a large max_cstate limit.
1111da177e4SLinus Torvalds  */
1121855256cSJeff Garzik static int set_max_cstate(const struct dmi_system_id *id)
1131da177e4SLinus Torvalds {
1141da177e4SLinus Torvalds 	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
1151da177e4SLinus Torvalds 		return 0;
1161da177e4SLinus Torvalds 
1173d35600aSLen Brown 	printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
1181da177e4SLinus Torvalds 	       " Override with \"processor.max_cstate=%d\"\n", id->ident,
1193d35600aSLen Brown 	       (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
1201da177e4SLinus Torvalds 
1213d35600aSLen Brown 	max_cstate = (long)id->driver_data;
1221da177e4SLinus Torvalds 
1231da177e4SLinus Torvalds 	return 0;
1241da177e4SLinus Torvalds }
1251da177e4SLinus Torvalds 
1267ded5689SAshok Raj /* Actually this shouldn't be __cpuinitdata, would be better to fix the
1277ded5689SAshok Raj    callers to only run once -AK */
1287ded5689SAshok Raj static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
129335f16beSDavid Shaohua Li 	{ set_max_cstate, "IBM ThinkPad R40e", {
130876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
131f831335dSBartlomiej Swiercz 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
132f831335dSBartlomiej Swiercz 	{ set_max_cstate, "IBM ThinkPad R40e", {
133f831335dSBartlomiej Swiercz 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
134876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
135876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
136876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
137876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
138876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
139876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
140876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
141876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
142876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
143876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
144876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
145876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
146876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
147876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
148876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
149876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
150876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
151876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
152876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
153876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
154876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
155876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
156876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
157876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
158876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
159876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
160876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
161876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
162876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
163876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
164876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
165876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
166876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
167876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
168876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
169876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
170876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
171876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
172876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
173876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
174876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
175876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
176876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
177335f16beSDavid Shaohua Li 	{ set_max_cstate, "Medion 41700", {
178876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
179876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
180335f16beSDavid Shaohua Li 	{ set_max_cstate, "Clevo 5600D", {
181876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
182876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
183335f16beSDavid Shaohua Li 	 (void *)2},
1841da177e4SLinus Torvalds 	{},
1851da177e4SLinus Torvalds };
1861da177e4SLinus Torvalds 
1874be44fcdSLen Brown static inline u32 ticks_elapsed(u32 t1, u32 t2)
1881da177e4SLinus Torvalds {
1891da177e4SLinus Torvalds 	if (t2 >= t1)
1901da177e4SLinus Torvalds 		return (t2 - t1);
191cee324b1SAlexey Starikovskiy 	else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
1921da177e4SLinus Torvalds 		return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
1931da177e4SLinus Torvalds 	else
1941da177e4SLinus Torvalds 		return ((0xFFFFFFFF - t1) + t2);
1951da177e4SLinus Torvalds }
1961da177e4SLinus Torvalds 
1974f86d3a8SLen Brown static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
1984f86d3a8SLen Brown {
1994f86d3a8SLen Brown 	if (t2 >= t1)
2004f86d3a8SLen Brown 		return PM_TIMER_TICKS_TO_US(t2 - t1);
2014f86d3a8SLen Brown 	else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
2024f86d3a8SLen Brown 		return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
2034f86d3a8SLen Brown 	else
2044f86d3a8SLen Brown 		return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
2054f86d3a8SLen Brown }
2064f86d3a8SLen Brown 
2072e906655Svenkatesh.pallipadi@intel.com /*
2082e906655Svenkatesh.pallipadi@intel.com  * Callers should disable interrupts before the call and enable
2092e906655Svenkatesh.pallipadi@intel.com  * interrupts after return.
2102e906655Svenkatesh.pallipadi@intel.com  */
211ddc081a1SVenkatesh Pallipadi static void acpi_safe_halt(void)
212ddc081a1SVenkatesh Pallipadi {
213ddc081a1SVenkatesh Pallipadi 	current_thread_info()->status &= ~TS_POLLING;
214ddc081a1SVenkatesh Pallipadi 	/*
215ddc081a1SVenkatesh Pallipadi 	 * TS_POLLING-cleared state must be visible before we
216ddc081a1SVenkatesh Pallipadi 	 * test NEED_RESCHED:
217ddc081a1SVenkatesh Pallipadi 	 */
218ddc081a1SVenkatesh Pallipadi 	smp_mb();
219ddc081a1SVenkatesh Pallipadi 	if (!need_resched())
220ddc081a1SVenkatesh Pallipadi 		safe_halt();
221ddc081a1SVenkatesh Pallipadi 	current_thread_info()->status |= TS_POLLING;
222ddc081a1SVenkatesh Pallipadi }
223ddc081a1SVenkatesh Pallipadi 
2244f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
2254f86d3a8SLen Brown 
2261da177e4SLinus Torvalds static void
2274be44fcdSLen Brown acpi_processor_power_activate(struct acpi_processor *pr,
2281da177e4SLinus Torvalds 			      struct acpi_processor_cx *new)
2291da177e4SLinus Torvalds {
2301da177e4SLinus Torvalds 	struct acpi_processor_cx *old;
2311da177e4SLinus Torvalds 
2321da177e4SLinus Torvalds 	if (!pr || !new)
2331da177e4SLinus Torvalds 		return;
2341da177e4SLinus Torvalds 
2351da177e4SLinus Torvalds 	old = pr->power.state;
2361da177e4SLinus Torvalds 
2371da177e4SLinus Torvalds 	if (old)
2381da177e4SLinus Torvalds 		old->promotion.count = 0;
2391da177e4SLinus Torvalds 	new->demotion.count = 0;
2401da177e4SLinus Torvalds 
2411da177e4SLinus Torvalds 	/* Cleanup from old state. */
2421da177e4SLinus Torvalds 	if (old) {
2431da177e4SLinus Torvalds 		switch (old->type) {
2441da177e4SLinus Torvalds 		case ACPI_STATE_C3:
2451da177e4SLinus Torvalds 			/* Disable bus master reload */
24602df8b93SVenkatesh Pallipadi 			if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
247d8c71b6dSBob Moore 				acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
2481da177e4SLinus Torvalds 			break;
2491da177e4SLinus Torvalds 		}
2501da177e4SLinus Torvalds 	}
2511da177e4SLinus Torvalds 
2521da177e4SLinus Torvalds 	/* Prepare to use new state. */
2531da177e4SLinus Torvalds 	switch (new->type) {
2541da177e4SLinus Torvalds 	case ACPI_STATE_C3:
2551da177e4SLinus Torvalds 		/* Enable bus master reload */
25602df8b93SVenkatesh Pallipadi 		if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
257d8c71b6dSBob Moore 			acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
2581da177e4SLinus Torvalds 		break;
2591da177e4SLinus Torvalds 	}
2601da177e4SLinus Torvalds 
2611da177e4SLinus Torvalds 	pr->power.state = new;
2621da177e4SLinus Torvalds 
2631da177e4SLinus Torvalds 	return;
2641da177e4SLinus Torvalds }
2651da177e4SLinus Torvalds 
26602df8b93SVenkatesh Pallipadi static atomic_t c3_cpu_count;
26702df8b93SVenkatesh Pallipadi 
268991528d7SVenkatesh Pallipadi /* Common C-state entry for C2, C3, .. */
269991528d7SVenkatesh Pallipadi static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
270991528d7SVenkatesh Pallipadi {
271bc71bec9Svenkatesh.pallipadi@intel.com 	if (cstate->entry_method == ACPI_CSTATE_FFH) {
272991528d7SVenkatesh Pallipadi 		/* Call into architectural FFH based C-state */
273991528d7SVenkatesh Pallipadi 		acpi_processor_ffh_cstate_enter(cstate);
274991528d7SVenkatesh Pallipadi 	} else {
275991528d7SVenkatesh Pallipadi 		int unused;
276991528d7SVenkatesh Pallipadi 		/* IO port based C-state */
277991528d7SVenkatesh Pallipadi 		inb(cstate->address);
278991528d7SVenkatesh Pallipadi 		/* Dummy wait op - must do something useless after P_LVL2 read
279991528d7SVenkatesh Pallipadi 		   because chipsets cannot guarantee that STPCLK# signal
280991528d7SVenkatesh Pallipadi 		   gets asserted in time to freeze execution properly. */
281cee324b1SAlexey Starikovskiy 		unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
282991528d7SVenkatesh Pallipadi 	}
283991528d7SVenkatesh Pallipadi }
2844f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */
285991528d7SVenkatesh Pallipadi 
286169a0abbSThomas Gleixner #ifdef ARCH_APICTIMER_STOPS_ON_C3
287169a0abbSThomas Gleixner 
288169a0abbSThomas Gleixner /*
289169a0abbSThomas Gleixner  * Some BIOS implementations switch to C3 in the published C2 state.
290296d93cdSLinus Torvalds  * This seems to be a common problem on AMD boxen, but other vendors
291296d93cdSLinus Torvalds  * are affected too. We pick the most conservative approach: we assume
292296d93cdSLinus Torvalds  * that the local APIC stops in both C2 and C3.
293169a0abbSThomas Gleixner  */
294169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr,
295169a0abbSThomas Gleixner 				   struct acpi_processor_cx *cx)
296169a0abbSThomas Gleixner {
297169a0abbSThomas Gleixner 	struct acpi_processor_power *pwr = &pr->power;
298e585bef8SThomas Gleixner 	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
299169a0abbSThomas Gleixner 
300169a0abbSThomas Gleixner 	/*
301169a0abbSThomas Gleixner 	 * Check, if one of the previous states already marked the lapic
302169a0abbSThomas Gleixner 	 * unstable
303169a0abbSThomas Gleixner 	 */
304169a0abbSThomas Gleixner 	if (pwr->timer_broadcast_on_state < state)
305169a0abbSThomas Gleixner 		return;
306169a0abbSThomas Gleixner 
307e585bef8SThomas Gleixner 	if (cx->type >= type)
308169a0abbSThomas Gleixner 		pr->power.timer_broadcast_on_state = state;
309169a0abbSThomas Gleixner }
310169a0abbSThomas Gleixner 
311169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
312169a0abbSThomas Gleixner {
313e9e2cdb4SThomas Gleixner 	unsigned long reason;
314e9e2cdb4SThomas Gleixner 
315e9e2cdb4SThomas Gleixner 	reason = pr->power.timer_broadcast_on_state < INT_MAX ?
316e9e2cdb4SThomas Gleixner 		CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
317e9e2cdb4SThomas Gleixner 
318e9e2cdb4SThomas Gleixner 	clockevents_notify(reason, &pr->id);
319e9e2cdb4SThomas Gleixner }
320e9e2cdb4SThomas Gleixner 
321e9e2cdb4SThomas Gleixner /* Power(C) State timer broadcast control */
322e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr,
323e9e2cdb4SThomas Gleixner 				       struct acpi_processor_cx *cx,
324e9e2cdb4SThomas Gleixner 				       int broadcast)
325e9e2cdb4SThomas Gleixner {
326e9e2cdb4SThomas Gleixner 	int state = cx - pr->power.states;
327e9e2cdb4SThomas Gleixner 
328e9e2cdb4SThomas Gleixner 	if (state >= pr->power.timer_broadcast_on_state) {
329e9e2cdb4SThomas Gleixner 		unsigned long reason;
330e9e2cdb4SThomas Gleixner 
331e9e2cdb4SThomas Gleixner 		reason = broadcast ?  CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
332e9e2cdb4SThomas Gleixner 			CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
333e9e2cdb4SThomas Gleixner 		clockevents_notify(reason, &pr->id);
334e9e2cdb4SThomas Gleixner 	}
335169a0abbSThomas Gleixner }
336169a0abbSThomas Gleixner 
337169a0abbSThomas Gleixner #else
338169a0abbSThomas Gleixner 
339169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr,
340169a0abbSThomas Gleixner 				   struct acpi_processor_cx *cstate) { }
341169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
342e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr,
343e9e2cdb4SThomas Gleixner 				       struct acpi_processor_cx *cx,
344e9e2cdb4SThomas Gleixner 				       int broadcast)
345e9e2cdb4SThomas Gleixner {
346e9e2cdb4SThomas Gleixner }
347169a0abbSThomas Gleixner 
348169a0abbSThomas Gleixner #endif
349169a0abbSThomas Gleixner 
350b04e7bdbSThomas Gleixner /*
351b04e7bdbSThomas Gleixner  * Suspend / resume control
352b04e7bdbSThomas Gleixner  */
353b04e7bdbSThomas Gleixner static int acpi_idle_suspend;
354b04e7bdbSThomas Gleixner 
355b04e7bdbSThomas Gleixner int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
356b04e7bdbSThomas Gleixner {
357b04e7bdbSThomas Gleixner 	acpi_idle_suspend = 1;
358b04e7bdbSThomas Gleixner 	return 0;
359b04e7bdbSThomas Gleixner }
360b04e7bdbSThomas Gleixner 
361b04e7bdbSThomas Gleixner int acpi_processor_resume(struct acpi_device * device)
362b04e7bdbSThomas Gleixner {
363b04e7bdbSThomas Gleixner 	acpi_idle_suspend = 0;
364b04e7bdbSThomas Gleixner 	return 0;
365b04e7bdbSThomas Gleixner }
366b04e7bdbSThomas Gleixner 
367ddb25f9aSAndi Kleen #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
368ddb25f9aSAndi Kleen static int tsc_halts_in_c(int state)
369ddb25f9aSAndi Kleen {
370ddb25f9aSAndi Kleen 	switch (boot_cpu_data.x86_vendor) {
371ddb25f9aSAndi Kleen 	case X86_VENDOR_AMD:
372ddb25f9aSAndi Kleen 		/*
373ddb25f9aSAndi Kleen 		 * AMD Fam10h TSC will tick in all
374ddb25f9aSAndi Kleen 		 * C/P/S0/S1 states when this bit is set.
375ddb25f9aSAndi Kleen 		 */
376ddb25f9aSAndi Kleen 		if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
377ddb25f9aSAndi Kleen 			return 0;
378ddb25f9aSAndi Kleen 		/*FALL THROUGH*/
379ddb25f9aSAndi Kleen 	case X86_VENDOR_INTEL:
380ddb25f9aSAndi Kleen 		/* Several cases known where TSC halts in C2 too */
381ddb25f9aSAndi Kleen 	default:
382ddb25f9aSAndi Kleen 		return state > ACPI_STATE_C1;
383ddb25f9aSAndi Kleen 	}
384ddb25f9aSAndi Kleen }
385ddb25f9aSAndi Kleen #endif
386ddb25f9aSAndi Kleen 
3874f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
3881da177e4SLinus Torvalds static void acpi_processor_idle(void)
3891da177e4SLinus Torvalds {
3901da177e4SLinus Torvalds 	struct acpi_processor *pr = NULL;
3911da177e4SLinus Torvalds 	struct acpi_processor_cx *cx = NULL;
3921da177e4SLinus Torvalds 	struct acpi_processor_cx *next_state = NULL;
3931da177e4SLinus Torvalds 	int sleep_ticks = 0;
3941da177e4SLinus Torvalds 	u32 t1, t2 = 0;
3951da177e4SLinus Torvalds 
3961da177e4SLinus Torvalds 	/*
3971da177e4SLinus Torvalds 	 * Interrupts must be disabled during bus mastering calculations and
3981da177e4SLinus Torvalds 	 * for C2/C3 transitions.
3991da177e4SLinus Torvalds 	 */
4001da177e4SLinus Torvalds 	local_irq_disable();
4011da177e4SLinus Torvalds 
402d5a3d32aSVenkatesh Pallipadi 	pr = processors[smp_processor_id()];
403d5a3d32aSVenkatesh Pallipadi 	if (!pr) {
404d5a3d32aSVenkatesh Pallipadi 		local_irq_enable();
405d5a3d32aSVenkatesh Pallipadi 		return;
406d5a3d32aSVenkatesh Pallipadi 	}
407d5a3d32aSVenkatesh Pallipadi 
4081da177e4SLinus Torvalds 	/*
4091da177e4SLinus Torvalds 	 * Check whether we truly need to go idle, or should
4101da177e4SLinus Torvalds 	 * reschedule:
4111da177e4SLinus Torvalds 	 */
4121da177e4SLinus Torvalds 	if (unlikely(need_resched())) {
4131da177e4SLinus Torvalds 		local_irq_enable();
4141da177e4SLinus Torvalds 		return;
4151da177e4SLinus Torvalds 	}
4161da177e4SLinus Torvalds 
4171da177e4SLinus Torvalds 	cx = pr->power.state;
418b04e7bdbSThomas Gleixner 	if (!cx || acpi_idle_suspend) {
41964c7c8f8SNick Piggin 		if (pm_idle_save)
42064c7c8f8SNick Piggin 			pm_idle_save();
42164c7c8f8SNick Piggin 		else
42264c7c8f8SNick Piggin 			acpi_safe_halt();
4232e906655Svenkatesh.pallipadi@intel.com 
4242e906655Svenkatesh.pallipadi@intel.com 		local_irq_enable();
42564c7c8f8SNick Piggin 		return;
42664c7c8f8SNick Piggin 	}
4271da177e4SLinus Torvalds 
4281da177e4SLinus Torvalds 	/*
4291da177e4SLinus Torvalds 	 * Check BM Activity
4301da177e4SLinus Torvalds 	 * -----------------
4311da177e4SLinus Torvalds 	 * Check for bus mastering activity (if required), record, and check
4321da177e4SLinus Torvalds 	 * for demotion.
4331da177e4SLinus Torvalds 	 */
4341da177e4SLinus Torvalds 	if (pr->flags.bm_check) {
4351da177e4SLinus Torvalds 		u32 bm_status = 0;
4361da177e4SLinus Torvalds 		unsigned long diff = jiffies - pr->power.bm_check_timestamp;
4371da177e4SLinus Torvalds 
438c5ab81caSDominik Brodowski 		if (diff > 31)
439c5ab81caSDominik Brodowski 			diff = 31;
4401da177e4SLinus Torvalds 
441c5ab81caSDominik Brodowski 		pr->power.bm_activity <<= diff;
4421da177e4SLinus Torvalds 
443d8c71b6dSBob Moore 		acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
4441da177e4SLinus Torvalds 		if (bm_status) {
445c5ab81caSDominik Brodowski 			pr->power.bm_activity |= 0x1;
446d8c71b6dSBob Moore 			acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
4471da177e4SLinus Torvalds 		}
4481da177e4SLinus Torvalds 		/*
4491da177e4SLinus Torvalds 		 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
4501da177e4SLinus Torvalds 		 * the true state of bus mastering activity; forcing us to
4511da177e4SLinus Torvalds 		 * manually check the BMIDEA bit of each IDE channel.
4521da177e4SLinus Torvalds 		 */
4531da177e4SLinus Torvalds 		else if (errata.piix4.bmisx) {
4541da177e4SLinus Torvalds 			if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
4551da177e4SLinus Torvalds 			    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
456c5ab81caSDominik Brodowski 				pr->power.bm_activity |= 0x1;
4571da177e4SLinus Torvalds 		}
4581da177e4SLinus Torvalds 
4591da177e4SLinus Torvalds 		pr->power.bm_check_timestamp = jiffies;
4601da177e4SLinus Torvalds 
4611da177e4SLinus Torvalds 		/*
462c4a001b1SDominik Brodowski 		 * If bus mastering is or was active this jiffy, demote
4631da177e4SLinus Torvalds 		 * to avoid a faulty transition.  Note that the processor
4641da177e4SLinus Torvalds 		 * won't enter a low-power state during this call (to this
465c4a001b1SDominik Brodowski 		 * function) but should upon the next.
4661da177e4SLinus Torvalds 		 *
4671da177e4SLinus Torvalds 		 * TBD: A better policy might be to fallback to the demotion
4681da177e4SLinus Torvalds 		 *      state (use it for this quantum only) istead of
4691da177e4SLinus Torvalds 		 *      demoting -- and rely on duration as our sole demotion
4701da177e4SLinus Torvalds 		 *      qualification.  This may, however, introduce DMA
4711da177e4SLinus Torvalds 		 *      issues (e.g. floppy DMA transfer overrun/underrun).
4721da177e4SLinus Torvalds 		 */
473c4a001b1SDominik Brodowski 		if ((pr->power.bm_activity & 0x1) &&
474c4a001b1SDominik Brodowski 		    cx->demotion.threshold.bm) {
4751da177e4SLinus Torvalds 			local_irq_enable();
4761da177e4SLinus Torvalds 			next_state = cx->demotion.state;
4771da177e4SLinus Torvalds 			goto end;
4781da177e4SLinus Torvalds 		}
4791da177e4SLinus Torvalds 	}
4801da177e4SLinus Torvalds 
4814c033552SVenkatesh Pallipadi #ifdef CONFIG_HOTPLUG_CPU
4824c033552SVenkatesh Pallipadi 	/*
4834c033552SVenkatesh Pallipadi 	 * Check for P_LVL2_UP flag before entering C2 and above on
4844c033552SVenkatesh Pallipadi 	 * an SMP system. We do it here instead of doing it at _CST/P_LVL
4854c033552SVenkatesh Pallipadi 	 * detection phase, to work cleanly with logical CPU hotplug.
4864c033552SVenkatesh Pallipadi 	 */
4874c033552SVenkatesh Pallipadi 	if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
488cee324b1SAlexey Starikovskiy 	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
4891e483969SDavid Shaohua Li 		cx = &pr->power.states[ACPI_STATE_C1];
4904c033552SVenkatesh Pallipadi #endif
4911e483969SDavid Shaohua Li 
4921da177e4SLinus Torvalds 	/*
4931da177e4SLinus Torvalds 	 * Sleep:
4941da177e4SLinus Torvalds 	 * ------
4951da177e4SLinus Torvalds 	 * Invoke the current Cx state to put the processor to sleep.
4961da177e4SLinus Torvalds 	 */
4972a298a35SNick Piggin 	if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
498495ab9c0SAndi Kleen 		current_thread_info()->status &= ~TS_POLLING;
4990888f06aSIngo Molnar 		/*
5000888f06aSIngo Molnar 		 * TS_POLLING-cleared state must be visible before we
5010888f06aSIngo Molnar 		 * test NEED_RESCHED:
5020888f06aSIngo Molnar 		 */
5030888f06aSIngo Molnar 		smp_mb();
5042a298a35SNick Piggin 		if (need_resched()) {
505495ab9c0SAndi Kleen 			current_thread_info()->status |= TS_POLLING;
506af2eb17bSLinus Torvalds 			local_irq_enable();
5072a298a35SNick Piggin 			return;
5082a298a35SNick Piggin 		}
5092a298a35SNick Piggin 	}
5102a298a35SNick Piggin 
5111da177e4SLinus Torvalds 	switch (cx->type) {
5121da177e4SLinus Torvalds 
5131da177e4SLinus Torvalds 	case ACPI_STATE_C1:
5141da177e4SLinus Torvalds 		/*
5151da177e4SLinus Torvalds 		 * Invoke C1.
5161da177e4SLinus Torvalds 		 * Use the appropriate idle routine, the one that would
5171da177e4SLinus Torvalds 		 * be used without acpi C-states.
5181da177e4SLinus Torvalds 		 */
5191da177e4SLinus Torvalds 		if (pm_idle_save)
5201da177e4SLinus Torvalds 			pm_idle_save();
5211da177e4SLinus Torvalds 		else
52264c7c8f8SNick Piggin 			acpi_safe_halt();
52364c7c8f8SNick Piggin 
5241da177e4SLinus Torvalds 		/*
5251da177e4SLinus Torvalds 		 * TBD: Can't get time duration while in C1, as resumes
5261da177e4SLinus Torvalds 		 *      go to an ISR rather than here.  Need to instrument
5271da177e4SLinus Torvalds 		 *      base interrupt handler.
5282aa44d05SIngo Molnar 		 *
5292aa44d05SIngo Molnar 		 * Note: the TSC better not stop in C1, sched_clock() will
5302aa44d05SIngo Molnar 		 *       skew otherwise.
5311da177e4SLinus Torvalds 		 */
5321da177e4SLinus Torvalds 		sleep_ticks = 0xFFFFFFFF;
5332e906655Svenkatesh.pallipadi@intel.com 		local_irq_enable();
5341da177e4SLinus Torvalds 		break;
5351da177e4SLinus Torvalds 
5361da177e4SLinus Torvalds 	case ACPI_STATE_C2:
5371da177e4SLinus Torvalds 		/* Get start time (ticks) */
538cee324b1SAlexey Starikovskiy 		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
5392aa44d05SIngo Molnar 		/* Tell the scheduler that we are going deep-idle: */
5402aa44d05SIngo Molnar 		sched_clock_idle_sleep_event();
5411da177e4SLinus Torvalds 		/* Invoke C2 */
542e9e2cdb4SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 1);
543991528d7SVenkatesh Pallipadi 		acpi_cstate_enter(cx);
5441da177e4SLinus Torvalds 		/* Get end time (ticks) */
545cee324b1SAlexey Starikovskiy 		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
546539eb11eSjohn stultz 
5470aa366f3STony Luck #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
548539eb11eSjohn stultz 		/* TSC halts in C2, so notify users */
549ddb25f9aSAndi Kleen 		if (tsc_halts_in_c(ACPI_STATE_C2))
5505a90cf20Sjohn stultz 			mark_tsc_unstable("possible TSC halt in C2");
551539eb11eSjohn stultz #endif
5522aa44d05SIngo Molnar 		/* Compute time (ticks) that we were actually asleep */
5532aa44d05SIngo Molnar 		sleep_ticks = ticks_elapsed(t1, t2);
5542aa44d05SIngo Molnar 
5552aa44d05SIngo Molnar 		/* Tell the scheduler how much we idled: */
5562aa44d05SIngo Molnar 		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
5572aa44d05SIngo Molnar 
5581da177e4SLinus Torvalds 		/* Re-enable interrupts */
5591da177e4SLinus Torvalds 		local_irq_enable();
5602aa44d05SIngo Molnar 		/* Do not account our idle-switching overhead: */
5612aa44d05SIngo Molnar 		sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
5622aa44d05SIngo Molnar 
563495ab9c0SAndi Kleen 		current_thread_info()->status |= TS_POLLING;
564e9e2cdb4SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 0);
5651da177e4SLinus Torvalds 		break;
5661da177e4SLinus Torvalds 
5671da177e4SLinus Torvalds 	case ACPI_STATE_C3:
568bde6f5f5SVenki Pallipadi 		acpi_unlazy_tlb(smp_processor_id());
56918eab855SVenkatesh Pallipadi 		/*
570e17bcb43SThomas Gleixner 		 * Must be done before busmaster disable as we might
571e17bcb43SThomas Gleixner 		 * need to access HPET !
572e17bcb43SThomas Gleixner 		 */
573e17bcb43SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 1);
574e17bcb43SThomas Gleixner 		/*
57518eab855SVenkatesh Pallipadi 		 * disable bus master
57618eab855SVenkatesh Pallipadi 		 * bm_check implies we need ARB_DIS
57718eab855SVenkatesh Pallipadi 		 * !bm_check implies we need cache flush
57818eab855SVenkatesh Pallipadi 		 * bm_control implies whether we can do ARB_DIS
57918eab855SVenkatesh Pallipadi 		 *
58018eab855SVenkatesh Pallipadi 		 * That leaves a case where bm_check is set and bm_control is
58118eab855SVenkatesh Pallipadi 		 * not set. In that case we cannot do much, we enter C3
58218eab855SVenkatesh Pallipadi 		 * without doing anything.
58318eab855SVenkatesh Pallipadi 		 */
58418eab855SVenkatesh Pallipadi 		if (pr->flags.bm_check && pr->flags.bm_control) {
58502df8b93SVenkatesh Pallipadi 			if (atomic_inc_return(&c3_cpu_count) ==
58602df8b93SVenkatesh Pallipadi 			    num_online_cpus()) {
58702df8b93SVenkatesh Pallipadi 				/*
58802df8b93SVenkatesh Pallipadi 				 * All CPUs are trying to go to C3
58902df8b93SVenkatesh Pallipadi 				 * Disable bus master arbitration
59002df8b93SVenkatesh Pallipadi 				 */
591d8c71b6dSBob Moore 				acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
59202df8b93SVenkatesh Pallipadi 			}
59318eab855SVenkatesh Pallipadi 		} else if (!pr->flags.bm_check) {
59402df8b93SVenkatesh Pallipadi 			/* SMP with no shared cache... Invalidate cache  */
59502df8b93SVenkatesh Pallipadi 			ACPI_FLUSH_CPU_CACHE();
59602df8b93SVenkatesh Pallipadi 		}
59702df8b93SVenkatesh Pallipadi 
5981da177e4SLinus Torvalds 		/* Get start time (ticks) */
599cee324b1SAlexey Starikovskiy 		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
6001da177e4SLinus Torvalds 		/* Invoke C3 */
6012aa44d05SIngo Molnar 		/* Tell the scheduler that we are going deep-idle: */
6022aa44d05SIngo Molnar 		sched_clock_idle_sleep_event();
603991528d7SVenkatesh Pallipadi 		acpi_cstate_enter(cx);
6041da177e4SLinus Torvalds 		/* Get end time (ticks) */
605cee324b1SAlexey Starikovskiy 		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
60618eab855SVenkatesh Pallipadi 		if (pr->flags.bm_check && pr->flags.bm_control) {
6071da177e4SLinus Torvalds 			/* Enable bus master arbitration */
60802df8b93SVenkatesh Pallipadi 			atomic_dec(&c3_cpu_count);
609d8c71b6dSBob Moore 			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
61002df8b93SVenkatesh Pallipadi 		}
61102df8b93SVenkatesh Pallipadi 
6120aa366f3STony Luck #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
613539eb11eSjohn stultz 		/* TSC halts in C3, so notify users */
614ddb25f9aSAndi Kleen 		if (tsc_halts_in_c(ACPI_STATE_C3))
6155a90cf20Sjohn stultz 			mark_tsc_unstable("TSC halts in C3");
616539eb11eSjohn stultz #endif
6172aa44d05SIngo Molnar 		/* Compute time (ticks) that we were actually asleep */
6182aa44d05SIngo Molnar 		sleep_ticks = ticks_elapsed(t1, t2);
6192aa44d05SIngo Molnar 		/* Tell the scheduler how much we idled: */
6202aa44d05SIngo Molnar 		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
6212aa44d05SIngo Molnar 
6221da177e4SLinus Torvalds 		/* Re-enable interrupts */
6231da177e4SLinus Torvalds 		local_irq_enable();
6242aa44d05SIngo Molnar 		/* Do not account our idle-switching overhead: */
6252aa44d05SIngo Molnar 		sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
6262aa44d05SIngo Molnar 
627495ab9c0SAndi Kleen 		current_thread_info()->status |= TS_POLLING;
628e9e2cdb4SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 0);
6291da177e4SLinus Torvalds 		break;
6301da177e4SLinus Torvalds 
6311da177e4SLinus Torvalds 	default:
6321da177e4SLinus Torvalds 		local_irq_enable();
6331da177e4SLinus Torvalds 		return;
6341da177e4SLinus Torvalds 	}
635a3c6598fSDominik Brodowski 	cx->usage++;
636a3c6598fSDominik Brodowski 	if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
637a3c6598fSDominik Brodowski 		cx->time += sleep_ticks;
6381da177e4SLinus Torvalds 
6391da177e4SLinus Torvalds 	next_state = pr->power.state;
6401da177e4SLinus Torvalds 
6411e483969SDavid Shaohua Li #ifdef CONFIG_HOTPLUG_CPU
6421e483969SDavid Shaohua Li 	/* Don't do promotion/demotion */
6431e483969SDavid Shaohua Li 	if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
644cee324b1SAlexey Starikovskiy 	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
6451e483969SDavid Shaohua Li 		next_state = cx;
6461e483969SDavid Shaohua Li 		goto end;
6471e483969SDavid Shaohua Li 	}
6481e483969SDavid Shaohua Li #endif
6491e483969SDavid Shaohua Li 
6501da177e4SLinus Torvalds 	/*
6511da177e4SLinus Torvalds 	 * Promotion?
6521da177e4SLinus Torvalds 	 * ----------
6531da177e4SLinus Torvalds 	 * Track the number of longs (time asleep is greater than threshold)
6541da177e4SLinus Torvalds 	 * and promote when the count threshold is reached.  Note that bus
6551da177e4SLinus Torvalds 	 * mastering activity may prevent promotions.
6561da177e4SLinus Torvalds 	 * Do not promote above max_cstate.
6571da177e4SLinus Torvalds 	 */
6581da177e4SLinus Torvalds 	if (cx->promotion.state &&
6591da177e4SLinus Torvalds 	    ((cx->promotion.state - pr->power.states) <= max_cstate)) {
6605c87579eSArjan van de Ven 		if (sleep_ticks > cx->promotion.threshold.ticks &&
661f011e2e2SMark Gross 		  cx->promotion.state->latency <=
662f011e2e2SMark Gross 				pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
6631da177e4SLinus Torvalds 			cx->promotion.count++;
6641da177e4SLinus Torvalds 			cx->demotion.count = 0;
6654be44fcdSLen Brown 			if (cx->promotion.count >=
6664be44fcdSLen Brown 			    cx->promotion.threshold.count) {
6671da177e4SLinus Torvalds 				if (pr->flags.bm_check) {
6684be44fcdSLen Brown 					if (!
6694be44fcdSLen Brown 					    (pr->power.bm_activity & cx->
6704be44fcdSLen Brown 					     promotion.threshold.bm)) {
6714be44fcdSLen Brown 						next_state =
6724be44fcdSLen Brown 						    cx->promotion.state;
6731da177e4SLinus Torvalds 						goto end;
6741da177e4SLinus Torvalds 					}
6754be44fcdSLen Brown 				} else {
6761da177e4SLinus Torvalds 					next_state = cx->promotion.state;
6771da177e4SLinus Torvalds 					goto end;
6781da177e4SLinus Torvalds 				}
6791da177e4SLinus Torvalds 			}
6801da177e4SLinus Torvalds 		}
6811da177e4SLinus Torvalds 	}
6821da177e4SLinus Torvalds 
6831da177e4SLinus Torvalds 	/*
6841da177e4SLinus Torvalds 	 * Demotion?
6851da177e4SLinus Torvalds 	 * ---------
6861da177e4SLinus Torvalds 	 * Track the number of shorts (time asleep is less than time threshold)
6871da177e4SLinus Torvalds 	 * and demote when the usage threshold is reached.
6881da177e4SLinus Torvalds 	 */
6891da177e4SLinus Torvalds 	if (cx->demotion.state) {
6901da177e4SLinus Torvalds 		if (sleep_ticks < cx->demotion.threshold.ticks) {
6911da177e4SLinus Torvalds 			cx->demotion.count++;
6921da177e4SLinus Torvalds 			cx->promotion.count = 0;
6931da177e4SLinus Torvalds 			if (cx->demotion.count >= cx->demotion.threshold.count) {
6941da177e4SLinus Torvalds 				next_state = cx->demotion.state;
6951da177e4SLinus Torvalds 				goto end;
6961da177e4SLinus Torvalds 			}
6971da177e4SLinus Torvalds 		}
6981da177e4SLinus Torvalds 	}
6991da177e4SLinus Torvalds 
7001da177e4SLinus Torvalds       end:
7011da177e4SLinus Torvalds 	/*
7021da177e4SLinus Torvalds 	 * Demote if current state exceeds max_cstate
7035c87579eSArjan van de Ven 	 * or if the latency of the current state is unacceptable
7041da177e4SLinus Torvalds 	 */
7055c87579eSArjan van de Ven 	if ((pr->power.state - pr->power.states) > max_cstate ||
706f011e2e2SMark Gross 		pr->power.state->latency >
707f011e2e2SMark Gross 				pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
7081da177e4SLinus Torvalds 		if (cx->demotion.state)
7091da177e4SLinus Torvalds 			next_state = cx->demotion.state;
7101da177e4SLinus Torvalds 	}
7111da177e4SLinus Torvalds 
7121da177e4SLinus Torvalds 	/*
7131da177e4SLinus Torvalds 	 * New Cx State?
7141da177e4SLinus Torvalds 	 * -------------
7151da177e4SLinus Torvalds 	 * If we're going to start using a new Cx state we must clean up
7161da177e4SLinus Torvalds 	 * from the previous and prepare to use the new.
7171da177e4SLinus Torvalds 	 */
7181da177e4SLinus Torvalds 	if (next_state != pr->power.state)
7191da177e4SLinus Torvalds 		acpi_processor_power_activate(pr, next_state);
7201da177e4SLinus Torvalds }
7211da177e4SLinus Torvalds 
7224be44fcdSLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr)
7231da177e4SLinus Torvalds {
7241da177e4SLinus Torvalds 	unsigned int i;
7251da177e4SLinus Torvalds 	unsigned int state_is_set = 0;
7261da177e4SLinus Torvalds 	struct acpi_processor_cx *lower = NULL;
7271da177e4SLinus Torvalds 	struct acpi_processor_cx *higher = NULL;
7281da177e4SLinus Torvalds 	struct acpi_processor_cx *cx;
7291da177e4SLinus Torvalds 
7301da177e4SLinus Torvalds 
7311da177e4SLinus Torvalds 	if (!pr)
732d550d98dSPatrick Mochel 		return -EINVAL;
7331da177e4SLinus Torvalds 
7341da177e4SLinus Torvalds 	/*
7351da177e4SLinus Torvalds 	 * This function sets the default Cx state policy (OS idle handler).
7361da177e4SLinus Torvalds 	 * Our scheme is to promote quickly to C2 but more conservatively
7371da177e4SLinus Torvalds 	 * to C3.  We're favoring C2  for its characteristics of low latency
7381da177e4SLinus Torvalds 	 * (quick response), good power savings, and ability to allow bus
7391da177e4SLinus Torvalds 	 * mastering activity.  Note that the Cx state policy is completely
7401da177e4SLinus Torvalds 	 * customizable and can be altered dynamically.
7411da177e4SLinus Torvalds 	 */
7421da177e4SLinus Torvalds 
7431da177e4SLinus Torvalds 	/* startup state */
7441da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
7451da177e4SLinus Torvalds 		cx = &pr->power.states[i];
7461da177e4SLinus Torvalds 		if (!cx->valid)
7471da177e4SLinus Torvalds 			continue;
7481da177e4SLinus Torvalds 
7491da177e4SLinus Torvalds 		if (!state_is_set)
7501da177e4SLinus Torvalds 			pr->power.state = cx;
7511da177e4SLinus Torvalds 		state_is_set++;
7521da177e4SLinus Torvalds 		break;
7531da177e4SLinus Torvalds 	}
7541da177e4SLinus Torvalds 
7551da177e4SLinus Torvalds 	if (!state_is_set)
756d550d98dSPatrick Mochel 		return -ENODEV;
7571da177e4SLinus Torvalds 
7581da177e4SLinus Torvalds 	/* demotion */
7591da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
7601da177e4SLinus Torvalds 		cx = &pr->power.states[i];
7611da177e4SLinus Torvalds 		if (!cx->valid)
7621da177e4SLinus Torvalds 			continue;
7631da177e4SLinus Torvalds 
7641da177e4SLinus Torvalds 		if (lower) {
7651da177e4SLinus Torvalds 			cx->demotion.state = lower;
7661da177e4SLinus Torvalds 			cx->demotion.threshold.ticks = cx->latency_ticks;
7671da177e4SLinus Torvalds 			cx->demotion.threshold.count = 1;
7681da177e4SLinus Torvalds 			if (cx->type == ACPI_STATE_C3)
7691da177e4SLinus Torvalds 				cx->demotion.threshold.bm = bm_history;
7701da177e4SLinus Torvalds 		}
7711da177e4SLinus Torvalds 
7721da177e4SLinus Torvalds 		lower = cx;
7731da177e4SLinus Torvalds 	}
7741da177e4SLinus Torvalds 
7751da177e4SLinus Torvalds 	/* promotion */
7761da177e4SLinus Torvalds 	for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
7771da177e4SLinus Torvalds 		cx = &pr->power.states[i];
7781da177e4SLinus Torvalds 		if (!cx->valid)
7791da177e4SLinus Torvalds 			continue;
7801da177e4SLinus Torvalds 
7811da177e4SLinus Torvalds 		if (higher) {
7821da177e4SLinus Torvalds 			cx->promotion.state = higher;
7831da177e4SLinus Torvalds 			cx->promotion.threshold.ticks = cx->latency_ticks;
7841da177e4SLinus Torvalds 			if (cx->type >= ACPI_STATE_C2)
7851da177e4SLinus Torvalds 				cx->promotion.threshold.count = 4;
7861da177e4SLinus Torvalds 			else
7871da177e4SLinus Torvalds 				cx->promotion.threshold.count = 10;
7881da177e4SLinus Torvalds 			if (higher->type == ACPI_STATE_C3)
7891da177e4SLinus Torvalds 				cx->promotion.threshold.bm = bm_history;
7901da177e4SLinus Torvalds 		}
7911da177e4SLinus Torvalds 
7921da177e4SLinus Torvalds 		higher = cx;
7931da177e4SLinus Torvalds 	}
7941da177e4SLinus Torvalds 
795d550d98dSPatrick Mochel 	return 0;
7961da177e4SLinus Torvalds }
7974f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */
7981da177e4SLinus Torvalds 
7991da177e4SLinus Torvalds static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
8001da177e4SLinus Torvalds {
8011da177e4SLinus Torvalds 
8021da177e4SLinus Torvalds 	if (!pr)
803d550d98dSPatrick Mochel 		return -EINVAL;
8041da177e4SLinus Torvalds 
8051da177e4SLinus Torvalds 	if (!pr->pblk)
806d550d98dSPatrick Mochel 		return -ENODEV;
8071da177e4SLinus Torvalds 
8081da177e4SLinus Torvalds 	/* if info is obtained from pblk/fadt, type equals state */
8091da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
8101da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
8111da177e4SLinus Torvalds 
8124c033552SVenkatesh Pallipadi #ifndef CONFIG_HOTPLUG_CPU
8134c033552SVenkatesh Pallipadi 	/*
8144c033552SVenkatesh Pallipadi 	 * Check for P_LVL2_UP flag before entering C2 and above on
8154c033552SVenkatesh Pallipadi 	 * an SMP system.
8164c033552SVenkatesh Pallipadi 	 */
817ad71860aSAlexey Starikovskiy 	if ((num_online_cpus() > 1) &&
818cee324b1SAlexey Starikovskiy 	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
819d550d98dSPatrick Mochel 		return -ENODEV;
8204c033552SVenkatesh Pallipadi #endif
8214c033552SVenkatesh Pallipadi 
8221da177e4SLinus Torvalds 	/* determine C2 and C3 address from pblk */
8231da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
8241da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
8251da177e4SLinus Torvalds 
8261da177e4SLinus Torvalds 	/* determine latencies from FADT */
827cee324b1SAlexey Starikovskiy 	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
828cee324b1SAlexey Starikovskiy 	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
8291da177e4SLinus Torvalds 
8301da177e4SLinus Torvalds 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
8311da177e4SLinus Torvalds 			  "lvl2[0x%08x] lvl3[0x%08x]\n",
8321da177e4SLinus Torvalds 			  pr->power.states[ACPI_STATE_C2].address,
8331da177e4SLinus Torvalds 			  pr->power.states[ACPI_STATE_C3].address));
8341da177e4SLinus Torvalds 
835d550d98dSPatrick Mochel 	return 0;
8361da177e4SLinus Torvalds }
8371da177e4SLinus Torvalds 
838991528d7SVenkatesh Pallipadi static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
839acf05f4bSVenkatesh Pallipadi {
840991528d7SVenkatesh Pallipadi 	if (!pr->power.states[ACPI_STATE_C1].valid) {
841cf824788SJanosch Machowinski 		/* set the first C-State to C1 */
842991528d7SVenkatesh Pallipadi 		/* all processors need to support C1 */
843acf05f4bSVenkatesh Pallipadi 		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
844acf05f4bSVenkatesh Pallipadi 		pr->power.states[ACPI_STATE_C1].valid = 1;
845991528d7SVenkatesh Pallipadi 	}
846991528d7SVenkatesh Pallipadi 	/* the C0 state only exists as a filler in our array */
847991528d7SVenkatesh Pallipadi 	pr->power.states[ACPI_STATE_C0].valid = 1;
848d550d98dSPatrick Mochel 	return 0;
849acf05f4bSVenkatesh Pallipadi }
850acf05f4bSVenkatesh Pallipadi 
8511da177e4SLinus Torvalds static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
8521da177e4SLinus Torvalds {
8531da177e4SLinus Torvalds 	acpi_status status = 0;
8541da177e4SLinus Torvalds 	acpi_integer count;
855cf824788SJanosch Machowinski 	int current_count;
8561da177e4SLinus Torvalds 	int i;
8571da177e4SLinus Torvalds 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
8581da177e4SLinus Torvalds 	union acpi_object *cst;
8591da177e4SLinus Torvalds 
8601da177e4SLinus Torvalds 
8611da177e4SLinus Torvalds 	if (nocst)
862d550d98dSPatrick Mochel 		return -ENODEV;
8631da177e4SLinus Torvalds 
864991528d7SVenkatesh Pallipadi 	current_count = 0;
8651da177e4SLinus Torvalds 
8661da177e4SLinus Torvalds 	status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
8671da177e4SLinus Torvalds 	if (ACPI_FAILURE(status)) {
8681da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
869d550d98dSPatrick Mochel 		return -ENODEV;
8701da177e4SLinus Torvalds 	}
8711da177e4SLinus Torvalds 
87250dd0969SJan Engelhardt 	cst = buffer.pointer;
8731da177e4SLinus Torvalds 
8741da177e4SLinus Torvalds 	/* There must be at least 2 elements */
8751da177e4SLinus Torvalds 	if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
8766468463aSLen Brown 		printk(KERN_ERR PREFIX "not enough elements in _CST\n");
8771da177e4SLinus Torvalds 		status = -EFAULT;
8781da177e4SLinus Torvalds 		goto end;
8791da177e4SLinus Torvalds 	}
8801da177e4SLinus Torvalds 
8811da177e4SLinus Torvalds 	count = cst->package.elements[0].integer.value;
8821da177e4SLinus Torvalds 
8831da177e4SLinus Torvalds 	/* Validate number of power states. */
8841da177e4SLinus Torvalds 	if (count < 1 || count != cst->package.count - 1) {
8856468463aSLen Brown 		printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
8861da177e4SLinus Torvalds 		status = -EFAULT;
8871da177e4SLinus Torvalds 		goto end;
8881da177e4SLinus Torvalds 	}
8891da177e4SLinus Torvalds 
8901da177e4SLinus Torvalds 	/* Tell driver that at least _CST is supported. */
8911da177e4SLinus Torvalds 	pr->flags.has_cst = 1;
8921da177e4SLinus Torvalds 
8931da177e4SLinus Torvalds 	for (i = 1; i <= count; i++) {
8941da177e4SLinus Torvalds 		union acpi_object *element;
8951da177e4SLinus Torvalds 		union acpi_object *obj;
8961da177e4SLinus Torvalds 		struct acpi_power_register *reg;
8971da177e4SLinus Torvalds 		struct acpi_processor_cx cx;
8981da177e4SLinus Torvalds 
8991da177e4SLinus Torvalds 		memset(&cx, 0, sizeof(cx));
9001da177e4SLinus Torvalds 
90150dd0969SJan Engelhardt 		element = &(cst->package.elements[i]);
9021da177e4SLinus Torvalds 		if (element->type != ACPI_TYPE_PACKAGE)
9031da177e4SLinus Torvalds 			continue;
9041da177e4SLinus Torvalds 
9051da177e4SLinus Torvalds 		if (element->package.count != 4)
9061da177e4SLinus Torvalds 			continue;
9071da177e4SLinus Torvalds 
90850dd0969SJan Engelhardt 		obj = &(element->package.elements[0]);
9091da177e4SLinus Torvalds 
9101da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_BUFFER)
9111da177e4SLinus Torvalds 			continue;
9121da177e4SLinus Torvalds 
9131da177e4SLinus Torvalds 		reg = (struct acpi_power_register *)obj->buffer.pointer;
9141da177e4SLinus Torvalds 
9151da177e4SLinus Torvalds 		if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
9161da177e4SLinus Torvalds 		    (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
9171da177e4SLinus Torvalds 			continue;
9181da177e4SLinus Torvalds 
9191da177e4SLinus Torvalds 		/* There should be an easy way to extract an integer... */
92050dd0969SJan Engelhardt 		obj = &(element->package.elements[1]);
9211da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_INTEGER)
9221da177e4SLinus Torvalds 			continue;
9231da177e4SLinus Torvalds 
9241da177e4SLinus Torvalds 		cx.type = obj->integer.value;
925991528d7SVenkatesh Pallipadi 		/*
926991528d7SVenkatesh Pallipadi 		 * Some buggy BIOSes won't list C1 in _CST -
927991528d7SVenkatesh Pallipadi 		 * Let acpi_processor_get_power_info_default() handle them later
928991528d7SVenkatesh Pallipadi 		 */
929991528d7SVenkatesh Pallipadi 		if (i == 1 && cx.type != ACPI_STATE_C1)
930991528d7SVenkatesh Pallipadi 			current_count++;
9311da177e4SLinus Torvalds 
932991528d7SVenkatesh Pallipadi 		cx.address = reg->address;
933991528d7SVenkatesh Pallipadi 		cx.index = current_count + 1;
9341da177e4SLinus Torvalds 
935bc71bec9Svenkatesh.pallipadi@intel.com 		cx.entry_method = ACPI_CSTATE_SYSTEMIO;
936991528d7SVenkatesh Pallipadi 		if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
937991528d7SVenkatesh Pallipadi 			if (acpi_processor_ffh_cstate_probe
938991528d7SVenkatesh Pallipadi 					(pr->id, &cx, reg) == 0) {
939bc71bec9Svenkatesh.pallipadi@intel.com 				cx.entry_method = ACPI_CSTATE_FFH;
940bc71bec9Svenkatesh.pallipadi@intel.com 			} else if (cx.type == ACPI_STATE_C1) {
941991528d7SVenkatesh Pallipadi 				/*
942991528d7SVenkatesh Pallipadi 				 * C1 is a special case where FIXED_HARDWARE
943991528d7SVenkatesh Pallipadi 				 * can be handled in non-MWAIT way as well.
944991528d7SVenkatesh Pallipadi 				 * In that case, save this _CST entry info.
945991528d7SVenkatesh Pallipadi 				 * Otherwise, ignore this info and continue.
946991528d7SVenkatesh Pallipadi 				 */
947bc71bec9Svenkatesh.pallipadi@intel.com 				cx.entry_method = ACPI_CSTATE_HALT;
9484fcb2fcdSVenkatesh Pallipadi 				snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
949bc71bec9Svenkatesh.pallipadi@intel.com 			} else {
9501da177e4SLinus Torvalds 				continue;
951991528d7SVenkatesh Pallipadi 			}
9524fcb2fcdSVenkatesh Pallipadi 		} else {
9534fcb2fcdSVenkatesh Pallipadi 			snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
9544fcb2fcdSVenkatesh Pallipadi 				 cx.address);
955991528d7SVenkatesh Pallipadi 		}
9561da177e4SLinus Torvalds 
9574fcb2fcdSVenkatesh Pallipadi 
95850dd0969SJan Engelhardt 		obj = &(element->package.elements[2]);
9591da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_INTEGER)
9601da177e4SLinus Torvalds 			continue;
9611da177e4SLinus Torvalds 
9621da177e4SLinus Torvalds 		cx.latency = obj->integer.value;
9631da177e4SLinus Torvalds 
96450dd0969SJan Engelhardt 		obj = &(element->package.elements[3]);
9651da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_INTEGER)
9661da177e4SLinus Torvalds 			continue;
9671da177e4SLinus Torvalds 
9681da177e4SLinus Torvalds 		cx.power = obj->integer.value;
9691da177e4SLinus Torvalds 
970cf824788SJanosch Machowinski 		current_count++;
971cf824788SJanosch Machowinski 		memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
972cf824788SJanosch Machowinski 
973cf824788SJanosch Machowinski 		/*
974cf824788SJanosch Machowinski 		 * We support total ACPI_PROCESSOR_MAX_POWER - 1
975cf824788SJanosch Machowinski 		 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
976cf824788SJanosch Machowinski 		 */
977cf824788SJanosch Machowinski 		if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
978cf824788SJanosch Machowinski 			printk(KERN_WARNING
979cf824788SJanosch Machowinski 			       "Limiting number of power states to max (%d)\n",
980cf824788SJanosch Machowinski 			       ACPI_PROCESSOR_MAX_POWER);
981cf824788SJanosch Machowinski 			printk(KERN_WARNING
982cf824788SJanosch Machowinski 			       "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
983cf824788SJanosch Machowinski 			break;
984cf824788SJanosch Machowinski 		}
9851da177e4SLinus Torvalds 	}
9861da177e4SLinus Torvalds 
9874be44fcdSLen Brown 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
988cf824788SJanosch Machowinski 			  current_count));
9891da177e4SLinus Torvalds 
9901da177e4SLinus Torvalds 	/* Validate number of power states discovered */
991cf824788SJanosch Machowinski 	if (current_count < 2)
9926d93c648SVenkatesh Pallipadi 		status = -EFAULT;
9931da177e4SLinus Torvalds 
9941da177e4SLinus Torvalds       end:
99502438d87SLen Brown 	kfree(buffer.pointer);
9961da177e4SLinus Torvalds 
997d550d98dSPatrick Mochel 	return status;
9981da177e4SLinus Torvalds }
9991da177e4SLinus Torvalds 
10001da177e4SLinus Torvalds static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
10011da177e4SLinus Torvalds {
10021da177e4SLinus Torvalds 
10031da177e4SLinus Torvalds 	if (!cx->address)
1004d550d98dSPatrick Mochel 		return;
10051da177e4SLinus Torvalds 
10061da177e4SLinus Torvalds 	/*
10071da177e4SLinus Torvalds 	 * C2 latency must be less than or equal to 100
10081da177e4SLinus Torvalds 	 * microseconds.
10091da177e4SLinus Torvalds 	 */
10101da177e4SLinus Torvalds 	else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
10111da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
10124be44fcdSLen Brown 				  "latency too large [%d]\n", cx->latency));
1013d550d98dSPatrick Mochel 		return;
10141da177e4SLinus Torvalds 	}
10151da177e4SLinus Torvalds 
10161da177e4SLinus Torvalds 	/*
10171da177e4SLinus Torvalds 	 * Otherwise we've met all of our C2 requirements.
10181da177e4SLinus Torvalds 	 * Normalize the C2 latency to expidite policy
10191da177e4SLinus Torvalds 	 */
10201da177e4SLinus Torvalds 	cx->valid = 1;
10214f86d3a8SLen Brown 
10224f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
10231da177e4SLinus Torvalds 	cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
10244f86d3a8SLen Brown #else
10254f86d3a8SLen Brown 	cx->latency_ticks = cx->latency;
10264f86d3a8SLen Brown #endif
10271da177e4SLinus Torvalds 
1028d550d98dSPatrick Mochel 	return;
10291da177e4SLinus Torvalds }
10301da177e4SLinus Torvalds 
10314be44fcdSLen Brown static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
10321da177e4SLinus Torvalds 					   struct acpi_processor_cx *cx)
10331da177e4SLinus Torvalds {
103402df8b93SVenkatesh Pallipadi 	static int bm_check_flag;
103502df8b93SVenkatesh Pallipadi 
10361da177e4SLinus Torvalds 
10371da177e4SLinus Torvalds 	if (!cx->address)
1038d550d98dSPatrick Mochel 		return;
10391da177e4SLinus Torvalds 
10401da177e4SLinus Torvalds 	/*
10411da177e4SLinus Torvalds 	 * C3 latency must be less than or equal to 1000
10421da177e4SLinus Torvalds 	 * microseconds.
10431da177e4SLinus Torvalds 	 */
10441da177e4SLinus Torvalds 	else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
10451da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
10464be44fcdSLen Brown 				  "latency too large [%d]\n", cx->latency));
1047d550d98dSPatrick Mochel 		return;
10481da177e4SLinus Torvalds 	}
10491da177e4SLinus Torvalds 
10501da177e4SLinus Torvalds 	/*
10511da177e4SLinus Torvalds 	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
10521da177e4SLinus Torvalds 	 * DMA transfers are used by any ISA device to avoid livelock.
10531da177e4SLinus Torvalds 	 * Note that we could disable Type-F DMA (as recommended by
10541da177e4SLinus Torvalds 	 * the erratum), but this is known to disrupt certain ISA
10551da177e4SLinus Torvalds 	 * devices thus we take the conservative approach.
10561da177e4SLinus Torvalds 	 */
10571da177e4SLinus Torvalds 	else if (errata.piix4.fdma) {
10581da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
10591da177e4SLinus Torvalds 				  "C3 not supported on PIIX4 with Type-F DMA\n"));
1060d550d98dSPatrick Mochel 		return;
10611da177e4SLinus Torvalds 	}
10621da177e4SLinus Torvalds 
106302df8b93SVenkatesh Pallipadi 	/* All the logic here assumes flags.bm_check is same across all CPUs */
106402df8b93SVenkatesh Pallipadi 	if (!bm_check_flag) {
106502df8b93SVenkatesh Pallipadi 		/* Determine whether bm_check is needed based on CPU  */
106602df8b93SVenkatesh Pallipadi 		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
106702df8b93SVenkatesh Pallipadi 		bm_check_flag = pr->flags.bm_check;
106802df8b93SVenkatesh Pallipadi 	} else {
106902df8b93SVenkatesh Pallipadi 		pr->flags.bm_check = bm_check_flag;
107002df8b93SVenkatesh Pallipadi 	}
107102df8b93SVenkatesh Pallipadi 
107202df8b93SVenkatesh Pallipadi 	if (pr->flags.bm_check) {
107302df8b93SVenkatesh Pallipadi 		if (!pr->flags.bm_control) {
1074ed3110efSVenki Pallipadi 			if (pr->flags.has_cst != 1) {
1075ed3110efSVenki Pallipadi 				/* bus mastering control is necessary */
107602df8b93SVenkatesh Pallipadi 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1077ed3110efSVenki Pallipadi 					"C3 support requires BM control\n"));
1078ed3110efSVenki Pallipadi 				return;
1079ed3110efSVenki Pallipadi 			} else {
1080ed3110efSVenki Pallipadi 				/* Here we enter C3 without bus mastering */
1081ed3110efSVenki Pallipadi 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1082ed3110efSVenki Pallipadi 					"C3 support without BM control\n"));
1083ed3110efSVenki Pallipadi 			}
108402df8b93SVenkatesh Pallipadi 		}
108502df8b93SVenkatesh Pallipadi 	} else {
108602df8b93SVenkatesh Pallipadi 		/*
108702df8b93SVenkatesh Pallipadi 		 * WBINVD should be set in fadt, for C3 state to be
108802df8b93SVenkatesh Pallipadi 		 * supported on when bm_check is not required.
108902df8b93SVenkatesh Pallipadi 		 */
1090cee324b1SAlexey Starikovskiy 		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
109102df8b93SVenkatesh Pallipadi 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
109202df8b93SVenkatesh Pallipadi 					  "Cache invalidation should work properly"
109302df8b93SVenkatesh Pallipadi 					  " for C3 to be enabled on SMP systems\n"));
1094d550d98dSPatrick Mochel 			return;
109502df8b93SVenkatesh Pallipadi 		}
1096d8c71b6dSBob Moore 		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
109702df8b93SVenkatesh Pallipadi 	}
109802df8b93SVenkatesh Pallipadi 
10991da177e4SLinus Torvalds 	/*
11001da177e4SLinus Torvalds 	 * Otherwise we've met all of our C3 requirements.
11011da177e4SLinus Torvalds 	 * Normalize the C3 latency to expidite policy.  Enable
11021da177e4SLinus Torvalds 	 * checking of bus mastering status (bm_check) so we can
11031da177e4SLinus Torvalds 	 * use this in our C3 policy
11041da177e4SLinus Torvalds 	 */
11051da177e4SLinus Torvalds 	cx->valid = 1;
11064f86d3a8SLen Brown 
11074f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
11081da177e4SLinus Torvalds 	cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
11094f86d3a8SLen Brown #else
11104f86d3a8SLen Brown 	cx->latency_ticks = cx->latency;
11114f86d3a8SLen Brown #endif
11121da177e4SLinus Torvalds 
1113d550d98dSPatrick Mochel 	return;
11141da177e4SLinus Torvalds }
11151da177e4SLinus Torvalds 
11161da177e4SLinus Torvalds static int acpi_processor_power_verify(struct acpi_processor *pr)
11171da177e4SLinus Torvalds {
11181da177e4SLinus Torvalds 	unsigned int i;
11191da177e4SLinus Torvalds 	unsigned int working = 0;
11206eb0a0fdSVenkatesh Pallipadi 
1121169a0abbSThomas Gleixner 	pr->power.timer_broadcast_on_state = INT_MAX;
11226eb0a0fdSVenkatesh Pallipadi 
11231da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
11241da177e4SLinus Torvalds 		struct acpi_processor_cx *cx = &pr->power.states[i];
11251da177e4SLinus Torvalds 
11261da177e4SLinus Torvalds 		switch (cx->type) {
11271da177e4SLinus Torvalds 		case ACPI_STATE_C1:
11281da177e4SLinus Torvalds 			cx->valid = 1;
11291da177e4SLinus Torvalds 			break;
11301da177e4SLinus Torvalds 
11311da177e4SLinus Torvalds 		case ACPI_STATE_C2:
11321da177e4SLinus Torvalds 			acpi_processor_power_verify_c2(cx);
1133296d93cdSLinus Torvalds 			if (cx->valid)
1134169a0abbSThomas Gleixner 				acpi_timer_check_state(i, pr, cx);
11351da177e4SLinus Torvalds 			break;
11361da177e4SLinus Torvalds 
11371da177e4SLinus Torvalds 		case ACPI_STATE_C3:
11381da177e4SLinus Torvalds 			acpi_processor_power_verify_c3(pr, cx);
1139296d93cdSLinus Torvalds 			if (cx->valid)
1140169a0abbSThomas Gleixner 				acpi_timer_check_state(i, pr, cx);
11411da177e4SLinus Torvalds 			break;
11421da177e4SLinus Torvalds 		}
11431da177e4SLinus Torvalds 
11441da177e4SLinus Torvalds 		if (cx->valid)
11451da177e4SLinus Torvalds 			working++;
11461da177e4SLinus Torvalds 	}
11471da177e4SLinus Torvalds 
1148169a0abbSThomas Gleixner 	acpi_propagate_timer_broadcast(pr);
1149bd663347SAndi Kleen 
11501da177e4SLinus Torvalds 	return (working);
11511da177e4SLinus Torvalds }
11521da177e4SLinus Torvalds 
11534be44fcdSLen Brown static int acpi_processor_get_power_info(struct acpi_processor *pr)
11541da177e4SLinus Torvalds {
11551da177e4SLinus Torvalds 	unsigned int i;
11561da177e4SLinus Torvalds 	int result;
11571da177e4SLinus Torvalds 
11581da177e4SLinus Torvalds 
11591da177e4SLinus Torvalds 	/* NOTE: the idle thread may not be running while calling
11601da177e4SLinus Torvalds 	 * this function */
11611da177e4SLinus Torvalds 
1162991528d7SVenkatesh Pallipadi 	/* Zero initialize all the C-states info. */
1163991528d7SVenkatesh Pallipadi 	memset(pr->power.states, 0, sizeof(pr->power.states));
1164991528d7SVenkatesh Pallipadi 
11651da177e4SLinus Torvalds 	result = acpi_processor_get_power_info_cst(pr);
11666d93c648SVenkatesh Pallipadi 	if (result == -ENODEV)
1167c5a114f1SDarrick J. Wong 		result = acpi_processor_get_power_info_fadt(pr);
11686d93c648SVenkatesh Pallipadi 
1169991528d7SVenkatesh Pallipadi 	if (result)
1170991528d7SVenkatesh Pallipadi 		return result;
1171991528d7SVenkatesh Pallipadi 
1172991528d7SVenkatesh Pallipadi 	acpi_processor_get_power_info_default(pr);
1173991528d7SVenkatesh Pallipadi 
1174cf824788SJanosch Machowinski 	pr->power.count = acpi_processor_power_verify(pr);
11751da177e4SLinus Torvalds 
11764f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
11771da177e4SLinus Torvalds 	/*
11781da177e4SLinus Torvalds 	 * Set Default Policy
11791da177e4SLinus Torvalds 	 * ------------------
11801da177e4SLinus Torvalds 	 * Now that we know which states are supported, set the default
11811da177e4SLinus Torvalds 	 * policy.  Note that this policy can be changed dynamically
11821da177e4SLinus Torvalds 	 * (e.g. encourage deeper sleeps to conserve battery life when
11831da177e4SLinus Torvalds 	 * not on AC).
11841da177e4SLinus Torvalds 	 */
11851da177e4SLinus Torvalds 	result = acpi_processor_set_power_policy(pr);
11861da177e4SLinus Torvalds 	if (result)
1187d550d98dSPatrick Mochel 		return result;
11884f86d3a8SLen Brown #endif
11891da177e4SLinus Torvalds 
11901da177e4SLinus Torvalds 	/*
11911da177e4SLinus Torvalds 	 * if one state of type C2 or C3 is available, mark this
11921da177e4SLinus Torvalds 	 * CPU as being "idle manageable"
11931da177e4SLinus Torvalds 	 */
11941da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1195acf05f4bSVenkatesh Pallipadi 		if (pr->power.states[i].valid) {
11961da177e4SLinus Torvalds 			pr->power.count = i;
11972203d6edSLinus Torvalds 			if (pr->power.states[i].type >= ACPI_STATE_C2)
11981da177e4SLinus Torvalds 				pr->flags.power = 1;
11991da177e4SLinus Torvalds 		}
1200acf05f4bSVenkatesh Pallipadi 	}
12011da177e4SLinus Torvalds 
1202d550d98dSPatrick Mochel 	return 0;
12031da177e4SLinus Torvalds }
12041da177e4SLinus Torvalds 
12051da177e4SLinus Torvalds static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
12061da177e4SLinus Torvalds {
120750dd0969SJan Engelhardt 	struct acpi_processor *pr = seq->private;
12081da177e4SLinus Torvalds 	unsigned int i;
12091da177e4SLinus Torvalds 
12101da177e4SLinus Torvalds 
12111da177e4SLinus Torvalds 	if (!pr)
12121da177e4SLinus Torvalds 		goto end;
12131da177e4SLinus Torvalds 
12141da177e4SLinus Torvalds 	seq_printf(seq, "active state:            C%zd\n"
12151da177e4SLinus Torvalds 		   "max_cstate:              C%d\n"
12165c87579eSArjan van de Ven 		   "bus master activity:     %08x\n"
12175c87579eSArjan van de Ven 		   "maximum allowed latency: %d usec\n",
12181da177e4SLinus Torvalds 		   pr->power.state ? pr->power.state - pr->power.states : 0,
12195c87579eSArjan van de Ven 		   max_cstate, (unsigned)pr->power.bm_activity,
1220f011e2e2SMark Gross 		   pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
12211da177e4SLinus Torvalds 
12221da177e4SLinus Torvalds 	seq_puts(seq, "states:\n");
12231da177e4SLinus Torvalds 
12241da177e4SLinus Torvalds 	for (i = 1; i <= pr->power.count; i++) {
12251da177e4SLinus Torvalds 		seq_printf(seq, "   %cC%d:                  ",
12264be44fcdSLen Brown 			   (&pr->power.states[i] ==
12274be44fcdSLen Brown 			    pr->power.state ? '*' : ' '), i);
12281da177e4SLinus Torvalds 
12291da177e4SLinus Torvalds 		if (!pr->power.states[i].valid) {
12301da177e4SLinus Torvalds 			seq_puts(seq, "<not supported>\n");
12311da177e4SLinus Torvalds 			continue;
12321da177e4SLinus Torvalds 		}
12331da177e4SLinus Torvalds 
12341da177e4SLinus Torvalds 		switch (pr->power.states[i].type) {
12351da177e4SLinus Torvalds 		case ACPI_STATE_C1:
12361da177e4SLinus Torvalds 			seq_printf(seq, "type[C1] ");
12371da177e4SLinus Torvalds 			break;
12381da177e4SLinus Torvalds 		case ACPI_STATE_C2:
12391da177e4SLinus Torvalds 			seq_printf(seq, "type[C2] ");
12401da177e4SLinus Torvalds 			break;
12411da177e4SLinus Torvalds 		case ACPI_STATE_C3:
12421da177e4SLinus Torvalds 			seq_printf(seq, "type[C3] ");
12431da177e4SLinus Torvalds 			break;
12441da177e4SLinus Torvalds 		default:
12451da177e4SLinus Torvalds 			seq_printf(seq, "type[--] ");
12461da177e4SLinus Torvalds 			break;
12471da177e4SLinus Torvalds 		}
12481da177e4SLinus Torvalds 
12491da177e4SLinus Torvalds 		if (pr->power.states[i].promotion.state)
12501da177e4SLinus Torvalds 			seq_printf(seq, "promotion[C%zd] ",
12511da177e4SLinus Torvalds 				   (pr->power.states[i].promotion.state -
12521da177e4SLinus Torvalds 				    pr->power.states));
12531da177e4SLinus Torvalds 		else
12541da177e4SLinus Torvalds 			seq_puts(seq, "promotion[--] ");
12551da177e4SLinus Torvalds 
12561da177e4SLinus Torvalds 		if (pr->power.states[i].demotion.state)
12571da177e4SLinus Torvalds 			seq_printf(seq, "demotion[C%zd] ",
12581da177e4SLinus Torvalds 				   (pr->power.states[i].demotion.state -
12591da177e4SLinus Torvalds 				    pr->power.states));
12601da177e4SLinus Torvalds 		else
12611da177e4SLinus Torvalds 			seq_puts(seq, "demotion[--] ");
12621da177e4SLinus Torvalds 
1263a3c6598fSDominik Brodowski 		seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
12641da177e4SLinus Torvalds 			   pr->power.states[i].latency,
1265a3c6598fSDominik Brodowski 			   pr->power.states[i].usage,
1266b0b7eaafSAlexey Starikovskiy 			   (unsigned long long)pr->power.states[i].time);
12671da177e4SLinus Torvalds 	}
12681da177e4SLinus Torvalds 
12691da177e4SLinus Torvalds       end:
1270d550d98dSPatrick Mochel 	return 0;
12711da177e4SLinus Torvalds }
12721da177e4SLinus Torvalds 
12731da177e4SLinus Torvalds static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
12741da177e4SLinus Torvalds {
12751da177e4SLinus Torvalds 	return single_open(file, acpi_processor_power_seq_show,
12761da177e4SLinus Torvalds 			   PDE(inode)->data);
12771da177e4SLinus Torvalds }
12781da177e4SLinus Torvalds 
1279d7508032SArjan van de Ven static const struct file_operations acpi_processor_power_fops = {
12801da177e4SLinus Torvalds 	.open = acpi_processor_power_open_fs,
12811da177e4SLinus Torvalds 	.read = seq_read,
12821da177e4SLinus Torvalds 	.llseek = seq_lseek,
12831da177e4SLinus Torvalds 	.release = single_release,
12841da177e4SLinus Torvalds };
12851da177e4SLinus Torvalds 
12864f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
12874f86d3a8SLen Brown 
12884f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr)
12894f86d3a8SLen Brown {
12904f86d3a8SLen Brown 	int result = 0;
12914f86d3a8SLen Brown 
12924f86d3a8SLen Brown 
12934f86d3a8SLen Brown 	if (!pr)
12944f86d3a8SLen Brown 		return -EINVAL;
12954f86d3a8SLen Brown 
12964f86d3a8SLen Brown 	if (nocst) {
12974f86d3a8SLen Brown 		return -ENODEV;
12984f86d3a8SLen Brown 	}
12994f86d3a8SLen Brown 
13004f86d3a8SLen Brown 	if (!pr->flags.power_setup_done)
13014f86d3a8SLen Brown 		return -ENODEV;
13024f86d3a8SLen Brown 
13034f86d3a8SLen Brown 	/* Fall back to the default idle loop */
13044f86d3a8SLen Brown 	pm_idle = pm_idle_save;
13054f86d3a8SLen Brown 	synchronize_sched();	/* Relies on interrupts forcing exit from idle. */
13064f86d3a8SLen Brown 
13074f86d3a8SLen Brown 	pr->flags.power = 0;
13084f86d3a8SLen Brown 	result = acpi_processor_get_power_info(pr);
13094f86d3a8SLen Brown 	if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
13104f86d3a8SLen Brown 		pm_idle = acpi_processor_idle;
13114f86d3a8SLen Brown 
13124f86d3a8SLen Brown 	return result;
13134f86d3a8SLen Brown }
13144f86d3a8SLen Brown 
13151fec74a9SAndrew Morton #ifdef CONFIG_SMP
13165c87579eSArjan van de Ven static void smp_callback(void *v)
13175c87579eSArjan van de Ven {
13185c87579eSArjan van de Ven 	/* we already woke the CPU up, nothing more to do */
13195c87579eSArjan van de Ven }
13205c87579eSArjan van de Ven 
13215c87579eSArjan van de Ven /*
13225c87579eSArjan van de Ven  * This function gets called when a part of the kernel has a new latency
13235c87579eSArjan van de Ven  * requirement.  This means we need to get all processors out of their C-state,
13245c87579eSArjan van de Ven  * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
13255c87579eSArjan van de Ven  * wakes them all right up.
13265c87579eSArjan van de Ven  */
13275c87579eSArjan van de Ven static int acpi_processor_latency_notify(struct notifier_block *b,
13285c87579eSArjan van de Ven 		unsigned long l, void *v)
13295c87579eSArjan van de Ven {
13305c87579eSArjan van de Ven 	smp_call_function(smp_callback, NULL, 0, 1);
13315c87579eSArjan van de Ven 	return NOTIFY_OK;
13325c87579eSArjan van de Ven }
13335c87579eSArjan van de Ven 
13345c87579eSArjan van de Ven static struct notifier_block acpi_processor_latency_notifier = {
13355c87579eSArjan van de Ven 	.notifier_call = acpi_processor_latency_notify,
13365c87579eSArjan van de Ven };
13374f86d3a8SLen Brown 
13381fec74a9SAndrew Morton #endif
13395c87579eSArjan van de Ven 
13404f86d3a8SLen Brown #else /* CONFIG_CPU_IDLE */
13414f86d3a8SLen Brown 
13424f86d3a8SLen Brown /**
13434f86d3a8SLen Brown  * acpi_idle_bm_check - checks if bus master activity was detected
13444f86d3a8SLen Brown  */
13454f86d3a8SLen Brown static int acpi_idle_bm_check(void)
13464f86d3a8SLen Brown {
13474f86d3a8SLen Brown 	u32 bm_status = 0;
13484f86d3a8SLen Brown 
13494f86d3a8SLen Brown 	acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
13504f86d3a8SLen Brown 	if (bm_status)
13514f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
13524f86d3a8SLen Brown 	/*
13534f86d3a8SLen Brown 	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
13544f86d3a8SLen Brown 	 * the true state of bus mastering activity; forcing us to
13554f86d3a8SLen Brown 	 * manually check the BMIDEA bit of each IDE channel.
13564f86d3a8SLen Brown 	 */
13574f86d3a8SLen Brown 	else if (errata.piix4.bmisx) {
13584f86d3a8SLen Brown 		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
13594f86d3a8SLen Brown 		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
13604f86d3a8SLen Brown 			bm_status = 1;
13614f86d3a8SLen Brown 	}
13624f86d3a8SLen Brown 	return bm_status;
13634f86d3a8SLen Brown }
13644f86d3a8SLen Brown 
13654f86d3a8SLen Brown /**
13664f86d3a8SLen Brown  * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
13674f86d3a8SLen Brown  * @pr: the processor
13684f86d3a8SLen Brown  * @target: the new target state
13694f86d3a8SLen Brown  */
13704f86d3a8SLen Brown static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
13714f86d3a8SLen Brown 					   struct acpi_processor_cx *target)
13724f86d3a8SLen Brown {
13734f86d3a8SLen Brown 	if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
13744f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
13754f86d3a8SLen Brown 		pr->flags.bm_rld_set = 0;
13764f86d3a8SLen Brown 	}
13774f86d3a8SLen Brown 
13784f86d3a8SLen Brown 	if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
13794f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
13804f86d3a8SLen Brown 		pr->flags.bm_rld_set = 1;
13814f86d3a8SLen Brown 	}
13824f86d3a8SLen Brown }
13834f86d3a8SLen Brown 
13844f86d3a8SLen Brown /**
13854f86d3a8SLen Brown  * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
13864f86d3a8SLen Brown  * @cx: cstate data
1387bc71bec9Svenkatesh.pallipadi@intel.com  *
1388bc71bec9Svenkatesh.pallipadi@intel.com  * Caller disables interrupt before call and enables interrupt after return.
13894f86d3a8SLen Brown  */
13904f86d3a8SLen Brown static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
13914f86d3a8SLen Brown {
1392bc71bec9Svenkatesh.pallipadi@intel.com 	if (cx->entry_method == ACPI_CSTATE_FFH) {
13934f86d3a8SLen Brown 		/* Call into architectural FFH based C-state */
13944f86d3a8SLen Brown 		acpi_processor_ffh_cstate_enter(cx);
1395bc71bec9Svenkatesh.pallipadi@intel.com 	} else if (cx->entry_method == ACPI_CSTATE_HALT) {
1396bc71bec9Svenkatesh.pallipadi@intel.com 		acpi_safe_halt();
13974f86d3a8SLen Brown 	} else {
13984f86d3a8SLen Brown 		int unused;
13994f86d3a8SLen Brown 		/* IO port based C-state */
14004f86d3a8SLen Brown 		inb(cx->address);
14014f86d3a8SLen Brown 		/* Dummy wait op - must do something useless after P_LVL2 read
14024f86d3a8SLen Brown 		   because chipsets cannot guarantee that STPCLK# signal
14034f86d3a8SLen Brown 		   gets asserted in time to freeze execution properly. */
14044f86d3a8SLen Brown 		unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
14054f86d3a8SLen Brown 	}
14064f86d3a8SLen Brown }
14074f86d3a8SLen Brown 
14084f86d3a8SLen Brown /**
14094f86d3a8SLen Brown  * acpi_idle_enter_c1 - enters an ACPI C1 state-type
14104f86d3a8SLen Brown  * @dev: the target CPU
14114f86d3a8SLen Brown  * @state: the state data
14124f86d3a8SLen Brown  *
14134f86d3a8SLen Brown  * This is equivalent to the HALT instruction.
14144f86d3a8SLen Brown  */
14154f86d3a8SLen Brown static int acpi_idle_enter_c1(struct cpuidle_device *dev,
14164f86d3a8SLen Brown 			      struct cpuidle_state *state)
14174f86d3a8SLen Brown {
14189b12e18cSvenkatesh.pallipadi@intel.com 	u32 t1, t2;
14194f86d3a8SLen Brown 	struct acpi_processor *pr;
14204f86d3a8SLen Brown 	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
14219b12e18cSvenkatesh.pallipadi@intel.com 
14224f86d3a8SLen Brown 	pr = processors[smp_processor_id()];
14234f86d3a8SLen Brown 
14244f86d3a8SLen Brown 	if (unlikely(!pr))
14254f86d3a8SLen Brown 		return 0;
14264f86d3a8SLen Brown 
14272e906655Svenkatesh.pallipadi@intel.com 	local_irq_disable();
14284f86d3a8SLen Brown 	if (pr->flags.bm_check)
14294f86d3a8SLen Brown 		acpi_idle_update_bm_rld(pr, cx);
14304f86d3a8SLen Brown 
14319b12e18cSvenkatesh.pallipadi@intel.com 	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1432bc71bec9Svenkatesh.pallipadi@intel.com 	acpi_idle_do_entry(cx);
14339b12e18cSvenkatesh.pallipadi@intel.com 	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
14344f86d3a8SLen Brown 
14352e906655Svenkatesh.pallipadi@intel.com 	local_irq_enable();
14364f86d3a8SLen Brown 	cx->usage++;
14374f86d3a8SLen Brown 
14389b12e18cSvenkatesh.pallipadi@intel.com 	return ticks_elapsed_in_us(t1, t2);
14394f86d3a8SLen Brown }
14404f86d3a8SLen Brown 
14414f86d3a8SLen Brown /**
14424f86d3a8SLen Brown  * acpi_idle_enter_simple - enters an ACPI state without BM handling
14434f86d3a8SLen Brown  * @dev: the target CPU
14444f86d3a8SLen Brown  * @state: the state data
14454f86d3a8SLen Brown  */
14464f86d3a8SLen Brown static int acpi_idle_enter_simple(struct cpuidle_device *dev,
14474f86d3a8SLen Brown 				  struct cpuidle_state *state)
14484f86d3a8SLen Brown {
14494f86d3a8SLen Brown 	struct acpi_processor *pr;
14504f86d3a8SLen Brown 	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
14514f86d3a8SLen Brown 	u32 t1, t2;
145250629118SVenkatesh Pallipadi 	int sleep_ticks = 0;
145350629118SVenkatesh Pallipadi 
14544f86d3a8SLen Brown 	pr = processors[smp_processor_id()];
14554f86d3a8SLen Brown 
14564f86d3a8SLen Brown 	if (unlikely(!pr))
14574f86d3a8SLen Brown 		return 0;
14584f86d3a8SLen Brown 
1459e196441bSLen Brown 	if (acpi_idle_suspend)
1460e196441bSLen Brown 		return(acpi_idle_enter_c1(dev, state));
1461e196441bSLen Brown 
14624f86d3a8SLen Brown 	local_irq_disable();
14634f86d3a8SLen Brown 	current_thread_info()->status &= ~TS_POLLING;
14644f86d3a8SLen Brown 	/*
14654f86d3a8SLen Brown 	 * TS_POLLING-cleared state must be visible before we test
14664f86d3a8SLen Brown 	 * NEED_RESCHED:
14674f86d3a8SLen Brown 	 */
14684f86d3a8SLen Brown 	smp_mb();
14694f86d3a8SLen Brown 
14704f86d3a8SLen Brown 	if (unlikely(need_resched())) {
14714f86d3a8SLen Brown 		current_thread_info()->status |= TS_POLLING;
14724f86d3a8SLen Brown 		local_irq_enable();
14734f86d3a8SLen Brown 		return 0;
14744f86d3a8SLen Brown 	}
14754f86d3a8SLen Brown 
1476bde6f5f5SVenki Pallipadi 	acpi_unlazy_tlb(smp_processor_id());
1477e17bcb43SThomas Gleixner 	/*
1478e17bcb43SThomas Gleixner 	 * Must be done before busmaster disable as we might need to
1479e17bcb43SThomas Gleixner 	 * access HPET !
1480e17bcb43SThomas Gleixner 	 */
1481e17bcb43SThomas Gleixner 	acpi_state_timer_broadcast(pr, cx, 1);
1482e17bcb43SThomas Gleixner 
1483e17bcb43SThomas Gleixner 	if (pr->flags.bm_check)
1484e17bcb43SThomas Gleixner 		acpi_idle_update_bm_rld(pr, cx);
1485e17bcb43SThomas Gleixner 
14864f86d3a8SLen Brown 	if (cx->type == ACPI_STATE_C3)
14874f86d3a8SLen Brown 		ACPI_FLUSH_CPU_CACHE();
14884f86d3a8SLen Brown 
14894f86d3a8SLen Brown 	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
149050629118SVenkatesh Pallipadi 	/* Tell the scheduler that we are going deep-idle: */
149150629118SVenkatesh Pallipadi 	sched_clock_idle_sleep_event();
14924f86d3a8SLen Brown 	acpi_idle_do_entry(cx);
14934f86d3a8SLen Brown 	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
14944f86d3a8SLen Brown 
14954f86d3a8SLen Brown #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
14964f86d3a8SLen Brown 	/* TSC could halt in idle, so notify users */
1497ddb25f9aSAndi Kleen 	if (tsc_halts_in_c(cx->type))
14984f86d3a8SLen Brown 		mark_tsc_unstable("TSC halts in idle");;
14994f86d3a8SLen Brown #endif
150050629118SVenkatesh Pallipadi 	sleep_ticks = ticks_elapsed(t1, t2);
150150629118SVenkatesh Pallipadi 
150250629118SVenkatesh Pallipadi 	/* Tell the scheduler how much we idled: */
150350629118SVenkatesh Pallipadi 	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
15044f86d3a8SLen Brown 
15054f86d3a8SLen Brown 	local_irq_enable();
15064f86d3a8SLen Brown 	current_thread_info()->status |= TS_POLLING;
15074f86d3a8SLen Brown 
15084f86d3a8SLen Brown 	cx->usage++;
15094f86d3a8SLen Brown 
15104f86d3a8SLen Brown 	acpi_state_timer_broadcast(pr, cx, 0);
151150629118SVenkatesh Pallipadi 	cx->time += sleep_ticks;
15124f86d3a8SLen Brown 	return ticks_elapsed_in_us(t1, t2);
15134f86d3a8SLen Brown }
15144f86d3a8SLen Brown 
15154f86d3a8SLen Brown static int c3_cpu_count;
15164f86d3a8SLen Brown static DEFINE_SPINLOCK(c3_lock);
15174f86d3a8SLen Brown 
15184f86d3a8SLen Brown /**
15194f86d3a8SLen Brown  * acpi_idle_enter_bm - enters C3 with proper BM handling
15204f86d3a8SLen Brown  * @dev: the target CPU
15214f86d3a8SLen Brown  * @state: the state data
15224f86d3a8SLen Brown  *
15234f86d3a8SLen Brown  * If BM is detected, the deepest non-C3 idle state is entered instead.
15244f86d3a8SLen Brown  */
15254f86d3a8SLen Brown static int acpi_idle_enter_bm(struct cpuidle_device *dev,
15264f86d3a8SLen Brown 			      struct cpuidle_state *state)
15274f86d3a8SLen Brown {
15284f86d3a8SLen Brown 	struct acpi_processor *pr;
15294f86d3a8SLen Brown 	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
15304f86d3a8SLen Brown 	u32 t1, t2;
153150629118SVenkatesh Pallipadi 	int sleep_ticks = 0;
153250629118SVenkatesh Pallipadi 
15334f86d3a8SLen Brown 	pr = processors[smp_processor_id()];
15344f86d3a8SLen Brown 
15354f86d3a8SLen Brown 	if (unlikely(!pr))
15364f86d3a8SLen Brown 		return 0;
15374f86d3a8SLen Brown 
1538e196441bSLen Brown 	if (acpi_idle_suspend)
1539e196441bSLen Brown 		return(acpi_idle_enter_c1(dev, state));
1540e196441bSLen Brown 
1541ddc081a1SVenkatesh Pallipadi 	if (acpi_idle_bm_check()) {
1542ddc081a1SVenkatesh Pallipadi 		if (dev->safe_state) {
1543ddc081a1SVenkatesh Pallipadi 			return dev->safe_state->enter(dev, dev->safe_state);
1544ddc081a1SVenkatesh Pallipadi 		} else {
15452e906655Svenkatesh.pallipadi@intel.com 			local_irq_disable();
1546ddc081a1SVenkatesh Pallipadi 			acpi_safe_halt();
15472e906655Svenkatesh.pallipadi@intel.com 			local_irq_enable();
1548ddc081a1SVenkatesh Pallipadi 			return 0;
1549ddc081a1SVenkatesh Pallipadi 		}
1550ddc081a1SVenkatesh Pallipadi 	}
1551ddc081a1SVenkatesh Pallipadi 
15524f86d3a8SLen Brown 	local_irq_disable();
15534f86d3a8SLen Brown 	current_thread_info()->status &= ~TS_POLLING;
15544f86d3a8SLen Brown 	/*
15554f86d3a8SLen Brown 	 * TS_POLLING-cleared state must be visible before we test
15564f86d3a8SLen Brown 	 * NEED_RESCHED:
15574f86d3a8SLen Brown 	 */
15584f86d3a8SLen Brown 	smp_mb();
15594f86d3a8SLen Brown 
15604f86d3a8SLen Brown 	if (unlikely(need_resched())) {
15614f86d3a8SLen Brown 		current_thread_info()->status |= TS_POLLING;
15624f86d3a8SLen Brown 		local_irq_enable();
15634f86d3a8SLen Brown 		return 0;
15644f86d3a8SLen Brown 	}
15654f86d3a8SLen Brown 
156650629118SVenkatesh Pallipadi 	/* Tell the scheduler that we are going deep-idle: */
156750629118SVenkatesh Pallipadi 	sched_clock_idle_sleep_event();
15684f86d3a8SLen Brown 	/*
15694f86d3a8SLen Brown 	 * Must be done before busmaster disable as we might need to
15704f86d3a8SLen Brown 	 * access HPET !
15714f86d3a8SLen Brown 	 */
15724f86d3a8SLen Brown 	acpi_state_timer_broadcast(pr, cx, 1);
15734f86d3a8SLen Brown 
15744f86d3a8SLen Brown 	acpi_idle_update_bm_rld(pr, cx);
15754f86d3a8SLen Brown 
1576c9c860e5SVenkatesh Pallipadi 	/*
1577c9c860e5SVenkatesh Pallipadi 	 * disable bus master
1578c9c860e5SVenkatesh Pallipadi 	 * bm_check implies we need ARB_DIS
1579c9c860e5SVenkatesh Pallipadi 	 * !bm_check implies we need cache flush
1580c9c860e5SVenkatesh Pallipadi 	 * bm_control implies whether we can do ARB_DIS
1581c9c860e5SVenkatesh Pallipadi 	 *
1582c9c860e5SVenkatesh Pallipadi 	 * That leaves a case where bm_check is set and bm_control is
1583c9c860e5SVenkatesh Pallipadi 	 * not set. In that case we cannot do much, we enter C3
1584c9c860e5SVenkatesh Pallipadi 	 * without doing anything.
1585c9c860e5SVenkatesh Pallipadi 	 */
1586c9c860e5SVenkatesh Pallipadi 	if (pr->flags.bm_check && pr->flags.bm_control) {
15874f86d3a8SLen Brown 		spin_lock(&c3_lock);
15884f86d3a8SLen Brown 		c3_cpu_count++;
15894f86d3a8SLen Brown 		/* Disable bus master arbitration when all CPUs are in C3 */
15904f86d3a8SLen Brown 		if (c3_cpu_count == num_online_cpus())
15914f86d3a8SLen Brown 			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
15924f86d3a8SLen Brown 		spin_unlock(&c3_lock);
1593c9c860e5SVenkatesh Pallipadi 	} else if (!pr->flags.bm_check) {
1594c9c860e5SVenkatesh Pallipadi 		ACPI_FLUSH_CPU_CACHE();
1595c9c860e5SVenkatesh Pallipadi 	}
15964f86d3a8SLen Brown 
15974f86d3a8SLen Brown 	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
15984f86d3a8SLen Brown 	acpi_idle_do_entry(cx);
15994f86d3a8SLen Brown 	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
16004f86d3a8SLen Brown 
16014f86d3a8SLen Brown 	/* Re-enable bus master arbitration */
1602c9c860e5SVenkatesh Pallipadi 	if (pr->flags.bm_check && pr->flags.bm_control) {
1603c9c860e5SVenkatesh Pallipadi 		spin_lock(&c3_lock);
16044f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
16054f86d3a8SLen Brown 		c3_cpu_count--;
16064f86d3a8SLen Brown 		spin_unlock(&c3_lock);
16074f86d3a8SLen Brown 	}
16084f86d3a8SLen Brown 
16094f86d3a8SLen Brown #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
16104f86d3a8SLen Brown 	/* TSC could halt in idle, so notify users */
1611ddb25f9aSAndi Kleen 	if (tsc_halts_in_c(ACPI_STATE_C3))
16124f86d3a8SLen Brown 		mark_tsc_unstable("TSC halts in idle");
16134f86d3a8SLen Brown #endif
161450629118SVenkatesh Pallipadi 	sleep_ticks = ticks_elapsed(t1, t2);
161550629118SVenkatesh Pallipadi 	/* Tell the scheduler how much we idled: */
161650629118SVenkatesh Pallipadi 	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
16174f86d3a8SLen Brown 
16184f86d3a8SLen Brown 	local_irq_enable();
16194f86d3a8SLen Brown 	current_thread_info()->status |= TS_POLLING;
16204f86d3a8SLen Brown 
16214f86d3a8SLen Brown 	cx->usage++;
16224f86d3a8SLen Brown 
16234f86d3a8SLen Brown 	acpi_state_timer_broadcast(pr, cx, 0);
162450629118SVenkatesh Pallipadi 	cx->time += sleep_ticks;
16254f86d3a8SLen Brown 	return ticks_elapsed_in_us(t1, t2);
16264f86d3a8SLen Brown }
16274f86d3a8SLen Brown 
16284f86d3a8SLen Brown struct cpuidle_driver acpi_idle_driver = {
16294f86d3a8SLen Brown 	.name =		"acpi_idle",
16304f86d3a8SLen Brown 	.owner =	THIS_MODULE,
16314f86d3a8SLen Brown };
16324f86d3a8SLen Brown 
16334f86d3a8SLen Brown /**
16344f86d3a8SLen Brown  * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
16354f86d3a8SLen Brown  * @pr: the ACPI processor
16364f86d3a8SLen Brown  */
16374f86d3a8SLen Brown static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
16384f86d3a8SLen Brown {
16399a0b8415Svenkatesh.pallipadi@intel.com 	int i, count = CPUIDLE_DRIVER_STATE_START;
16404f86d3a8SLen Brown 	struct acpi_processor_cx *cx;
16414f86d3a8SLen Brown 	struct cpuidle_state *state;
16424f86d3a8SLen Brown 	struct cpuidle_device *dev = &pr->power.dev;
16434f86d3a8SLen Brown 
16444f86d3a8SLen Brown 	if (!pr->flags.power_setup_done)
16454f86d3a8SLen Brown 		return -EINVAL;
16464f86d3a8SLen Brown 
16474f86d3a8SLen Brown 	if (pr->flags.power == 0) {
16484f86d3a8SLen Brown 		return -EINVAL;
16494f86d3a8SLen Brown 	}
16504f86d3a8SLen Brown 
16514fcb2fcdSVenkatesh Pallipadi 	for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
16524fcb2fcdSVenkatesh Pallipadi 		dev->states[i].name[0] = '\0';
16534fcb2fcdSVenkatesh Pallipadi 		dev->states[i].desc[0] = '\0';
16544fcb2fcdSVenkatesh Pallipadi 	}
16554fcb2fcdSVenkatesh Pallipadi 
16564f86d3a8SLen Brown 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
16574f86d3a8SLen Brown 		cx = &pr->power.states[i];
16584f86d3a8SLen Brown 		state = &dev->states[count];
16594f86d3a8SLen Brown 
16604f86d3a8SLen Brown 		if (!cx->valid)
16614f86d3a8SLen Brown 			continue;
16624f86d3a8SLen Brown 
16634f86d3a8SLen Brown #ifdef CONFIG_HOTPLUG_CPU
16644f86d3a8SLen Brown 		if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
16654f86d3a8SLen Brown 		    !pr->flags.has_cst &&
16664f86d3a8SLen Brown 		    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
16674f86d3a8SLen Brown 			continue;
16684f86d3a8SLen Brown #endif
16694f86d3a8SLen Brown 		cpuidle_set_statedata(state, cx);
16704f86d3a8SLen Brown 
16714f86d3a8SLen Brown 		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
16724fcb2fcdSVenkatesh Pallipadi 		strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
16734f86d3a8SLen Brown 		state->exit_latency = cx->latency;
16744963f620SLen Brown 		state->target_residency = cx->latency * latency_factor;
16754f86d3a8SLen Brown 		state->power_usage = cx->power;
16764f86d3a8SLen Brown 
16774f86d3a8SLen Brown 		state->flags = 0;
16784f86d3a8SLen Brown 		switch (cx->type) {
16794f86d3a8SLen Brown 			case ACPI_STATE_C1:
16804f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_SHALLOW;
16819b12e18cSvenkatesh.pallipadi@intel.com 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
16824f86d3a8SLen Brown 			state->enter = acpi_idle_enter_c1;
1683ddc081a1SVenkatesh Pallipadi 			dev->safe_state = state;
16844f86d3a8SLen Brown 			break;
16854f86d3a8SLen Brown 
16864f86d3a8SLen Brown 			case ACPI_STATE_C2:
16874f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_BALANCED;
16884f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
16894f86d3a8SLen Brown 			state->enter = acpi_idle_enter_simple;
1690ddc081a1SVenkatesh Pallipadi 			dev->safe_state = state;
16914f86d3a8SLen Brown 			break;
16924f86d3a8SLen Brown 
16934f86d3a8SLen Brown 			case ACPI_STATE_C3:
16944f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_DEEP;
16954f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
16964f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_CHECK_BM;
16974f86d3a8SLen Brown 			state->enter = pr->flags.bm_check ?
16984f86d3a8SLen Brown 					acpi_idle_enter_bm :
16994f86d3a8SLen Brown 					acpi_idle_enter_simple;
17004f86d3a8SLen Brown 			break;
17014f86d3a8SLen Brown 		}
17024f86d3a8SLen Brown 
17034f86d3a8SLen Brown 		count++;
17049a0b8415Svenkatesh.pallipadi@intel.com 		if (count == CPUIDLE_STATE_MAX)
17059a0b8415Svenkatesh.pallipadi@intel.com 			break;
17064f86d3a8SLen Brown 	}
17074f86d3a8SLen Brown 
17084f86d3a8SLen Brown 	dev->state_count = count;
17094f86d3a8SLen Brown 
17104f86d3a8SLen Brown 	if (!count)
17114f86d3a8SLen Brown 		return -EINVAL;
17124f86d3a8SLen Brown 
17134f86d3a8SLen Brown 	return 0;
17144f86d3a8SLen Brown }
17154f86d3a8SLen Brown 
17164f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr)
17174f86d3a8SLen Brown {
17184f86d3a8SLen Brown 	int ret;
17194f86d3a8SLen Brown 
17204f86d3a8SLen Brown 	if (!pr)
17214f86d3a8SLen Brown 		return -EINVAL;
17224f86d3a8SLen Brown 
17234f86d3a8SLen Brown 	if (nocst) {
17244f86d3a8SLen Brown 		return -ENODEV;
17254f86d3a8SLen Brown 	}
17264f86d3a8SLen Brown 
17274f86d3a8SLen Brown 	if (!pr->flags.power_setup_done)
17284f86d3a8SLen Brown 		return -ENODEV;
17294f86d3a8SLen Brown 
17304f86d3a8SLen Brown 	cpuidle_pause_and_lock();
17314f86d3a8SLen Brown 	cpuidle_disable_device(&pr->power.dev);
17324f86d3a8SLen Brown 	acpi_processor_get_power_info(pr);
17334f86d3a8SLen Brown 	acpi_processor_setup_cpuidle(pr);
17344f86d3a8SLen Brown 	ret = cpuidle_enable_device(&pr->power.dev);
17354f86d3a8SLen Brown 	cpuidle_resume_and_unlock();
17364f86d3a8SLen Brown 
17374f86d3a8SLen Brown 	return ret;
17384f86d3a8SLen Brown }
17394f86d3a8SLen Brown 
17404f86d3a8SLen Brown #endif /* CONFIG_CPU_IDLE */
17414f86d3a8SLen Brown 
17427af8b660SPierre Ossman int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
17434be44fcdSLen Brown 			      struct acpi_device *device)
17441da177e4SLinus Torvalds {
17451da177e4SLinus Torvalds 	acpi_status status = 0;
1746b6835052SAndreas Mohr 	static int first_run;
17471da177e4SLinus Torvalds 	struct proc_dir_entry *entry = NULL;
17481da177e4SLinus Torvalds 	unsigned int i;
17491da177e4SLinus Torvalds 
17501da177e4SLinus Torvalds 
17511da177e4SLinus Torvalds 	if (!first_run) {
17521da177e4SLinus Torvalds 		dmi_check_system(processor_power_dmi_table);
1753c1c30634SAlexey Starikovskiy 		max_cstate = acpi_processor_cstate_check(max_cstate);
17541da177e4SLinus Torvalds 		if (max_cstate < ACPI_C_STATES_MAX)
17554be44fcdSLen Brown 			printk(KERN_NOTICE
17564be44fcdSLen Brown 			       "ACPI: processor limited to max C-state %d\n",
17574be44fcdSLen Brown 			       max_cstate);
17581da177e4SLinus Torvalds 		first_run++;
17594f86d3a8SLen Brown #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1760f011e2e2SMark Gross 		pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1761f011e2e2SMark Gross 				&acpi_processor_latency_notifier);
17621fec74a9SAndrew Morton #endif
17631da177e4SLinus Torvalds 	}
17641da177e4SLinus Torvalds 
176502df8b93SVenkatesh Pallipadi 	if (!pr)
1766d550d98dSPatrick Mochel 		return -EINVAL;
176702df8b93SVenkatesh Pallipadi 
1768cee324b1SAlexey Starikovskiy 	if (acpi_gbl_FADT.cst_control && !nocst) {
17694be44fcdSLen Brown 		status =
1770cee324b1SAlexey Starikovskiy 		    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
17711da177e4SLinus Torvalds 		if (ACPI_FAILURE(status)) {
1772a6fc6720SThomas Renninger 			ACPI_EXCEPTION((AE_INFO, status,
1773a6fc6720SThomas Renninger 					"Notifying BIOS of _CST ability failed"));
17741da177e4SLinus Torvalds 		}
17751da177e4SLinus Torvalds 	}
17761da177e4SLinus Torvalds 
17771da177e4SLinus Torvalds 	acpi_processor_get_power_info(pr);
17784f86d3a8SLen Brown 	pr->flags.power_setup_done = 1;
17791da177e4SLinus Torvalds 
17801da177e4SLinus Torvalds 	/*
17811da177e4SLinus Torvalds 	 * Install the idle handler if processor power management is supported.
17821da177e4SLinus Torvalds 	 * Note that we use previously set idle handler will be used on
17831da177e4SLinus Torvalds 	 * platforms that only support C1.
17841da177e4SLinus Torvalds 	 */
17851da177e4SLinus Torvalds 	if ((pr->flags.power) && (!boot_option_idle_override)) {
17864f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE
17874f86d3a8SLen Brown 		acpi_processor_setup_cpuidle(pr);
17884f86d3a8SLen Brown 		pr->power.dev.cpu = pr->id;
17894f86d3a8SLen Brown 		if (cpuidle_register_device(&pr->power.dev))
17904f86d3a8SLen Brown 			return -EIO;
17914f86d3a8SLen Brown #endif
17924f86d3a8SLen Brown 
17931da177e4SLinus Torvalds 		printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
17941da177e4SLinus Torvalds 		for (i = 1; i <= pr->power.count; i++)
17951da177e4SLinus Torvalds 			if (pr->power.states[i].valid)
17964be44fcdSLen Brown 				printk(" C%d[C%d]", i,
17974be44fcdSLen Brown 				       pr->power.states[i].type);
17981da177e4SLinus Torvalds 		printk(")\n");
17991da177e4SLinus Torvalds 
18004f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
18011da177e4SLinus Torvalds 		if (pr->id == 0) {
18021da177e4SLinus Torvalds 			pm_idle_save = pm_idle;
18031da177e4SLinus Torvalds 			pm_idle = acpi_processor_idle;
18041da177e4SLinus Torvalds 		}
18054f86d3a8SLen Brown #endif
18061da177e4SLinus Torvalds 	}
18071da177e4SLinus Torvalds 
18081da177e4SLinus Torvalds 	/* 'power' [R] */
18091da177e4SLinus Torvalds 	entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
18101da177e4SLinus Torvalds 				  S_IRUGO, acpi_device_dir(device));
18111da177e4SLinus Torvalds 	if (!entry)
1812a6fc6720SThomas Renninger 		return -EIO;
18131da177e4SLinus Torvalds 	else {
18141da177e4SLinus Torvalds 		entry->proc_fops = &acpi_processor_power_fops;
18151da177e4SLinus Torvalds 		entry->data = acpi_driver_data(device);
18161da177e4SLinus Torvalds 		entry->owner = THIS_MODULE;
18171da177e4SLinus Torvalds 	}
18181da177e4SLinus Torvalds 
1819d550d98dSPatrick Mochel 	return 0;
18201da177e4SLinus Torvalds }
18211da177e4SLinus Torvalds 
18224be44fcdSLen Brown int acpi_processor_power_exit(struct acpi_processor *pr,
18234be44fcdSLen Brown 			      struct acpi_device *device)
18241da177e4SLinus Torvalds {
18254f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE
18264f86d3a8SLen Brown 	if ((pr->flags.power) && (!boot_option_idle_override))
18274f86d3a8SLen Brown 		cpuidle_unregister_device(&pr->power.dev);
18284f86d3a8SLen Brown #endif
18291da177e4SLinus Torvalds 	pr->flags.power_setup_done = 0;
18301da177e4SLinus Torvalds 
18311da177e4SLinus Torvalds 	if (acpi_device_dir(device))
18324be44fcdSLen Brown 		remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
18334be44fcdSLen Brown 				  acpi_device_dir(device));
18341da177e4SLinus Torvalds 
18354f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
18364f86d3a8SLen Brown 
18371da177e4SLinus Torvalds 	/* Unregister the idle handler when processor #0 is removed. */
18381da177e4SLinus Torvalds 	if (pr->id == 0) {
18391da177e4SLinus Torvalds 		pm_idle = pm_idle_save;
18401da177e4SLinus Torvalds 
18411da177e4SLinus Torvalds 		/*
18421da177e4SLinus Torvalds 		 * We are about to unload the current idle thread pm callback
18431da177e4SLinus Torvalds 		 * (pm_idle), Wait for all processors to update cached/local
18441da177e4SLinus Torvalds 		 * copies of pm_idle before proceeding.
18451da177e4SLinus Torvalds 		 */
18461da177e4SLinus Torvalds 		cpu_idle_wait();
18471fec74a9SAndrew Morton #ifdef CONFIG_SMP
1848f011e2e2SMark Gross 		pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1849f011e2e2SMark Gross 				&acpi_processor_latency_notifier);
18501fec74a9SAndrew Morton #endif
18511da177e4SLinus Torvalds 	}
18524f86d3a8SLen Brown #endif
18531da177e4SLinus Torvalds 
1854d550d98dSPatrick Mochel 	return 0;
18551da177e4SLinus Torvalds }
1856