xref: /openbmc/linux/drivers/acpi/processor_idle.c (revision 7f424a8b)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * processor_idle - idle state submodule to the ACPI processor driver
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
51da177e4SLinus Torvalds  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6c5ab81caSDominik Brodowski  *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
71da177e4SLinus Torvalds  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
81da177e4SLinus Torvalds  *  			- Added processor hotplug support
902df8b93SVenkatesh Pallipadi  *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
1002df8b93SVenkatesh Pallipadi  *  			- Added support for C3 on SMP
111da177e4SLinus Torvalds  *
121da177e4SLinus Torvalds  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  *  This program is free software; you can redistribute it and/or modify
151da177e4SLinus Torvalds  *  it under the terms of the GNU General Public License as published by
161da177e4SLinus Torvalds  *  the Free Software Foundation; either version 2 of the License, or (at
171da177e4SLinus Torvalds  *  your option) any later version.
181da177e4SLinus Torvalds  *
191da177e4SLinus Torvalds  *  This program is distributed in the hope that it will be useful, but
201da177e4SLinus Torvalds  *  WITHOUT ANY WARRANTY; without even the implied warranty of
211da177e4SLinus Torvalds  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
221da177e4SLinus Torvalds  *  General Public License for more details.
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *  You should have received a copy of the GNU General Public License along
251da177e4SLinus Torvalds  *  with this program; if not, write to the Free Software Foundation, Inc.,
261da177e4SLinus Torvalds  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
271da177e4SLinus Torvalds  *
281da177e4SLinus Torvalds  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
291da177e4SLinus Torvalds  */
301da177e4SLinus Torvalds 
311da177e4SLinus Torvalds #include <linux/kernel.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/init.h>
341da177e4SLinus Torvalds #include <linux/cpufreq.h>
351da177e4SLinus Torvalds #include <linux/proc_fs.h>
361da177e4SLinus Torvalds #include <linux/seq_file.h>
371da177e4SLinus Torvalds #include <linux/acpi.h>
381da177e4SLinus Torvalds #include <linux/dmi.h>
391da177e4SLinus Torvalds #include <linux/moduleparam.h>
404e57b681STim Schmielau #include <linux/sched.h>	/* need_resched() */
41f011e2e2SMark Gross #include <linux/pm_qos_params.h>
42e9e2cdb4SThomas Gleixner #include <linux/clockchips.h>
434f86d3a8SLen Brown #include <linux/cpuidle.h>
441da177e4SLinus Torvalds 
453434933bSThomas Gleixner /*
463434933bSThomas Gleixner  * Include the apic definitions for x86 to have the APIC timer related defines
473434933bSThomas Gleixner  * available also for UP (on SMP it gets magically included via linux/smp.h).
483434933bSThomas Gleixner  * asm/acpi.h is not an option, as it would require more include magic. Also
493434933bSThomas Gleixner  * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
503434933bSThomas Gleixner  */
513434933bSThomas Gleixner #ifdef CONFIG_X86
523434933bSThomas Gleixner #include <asm/apic.h>
533434933bSThomas Gleixner #endif
543434933bSThomas Gleixner 
551da177e4SLinus Torvalds #include <asm/io.h>
561da177e4SLinus Torvalds #include <asm/uaccess.h>
571da177e4SLinus Torvalds 
581da177e4SLinus Torvalds #include <acpi/acpi_bus.h>
591da177e4SLinus Torvalds #include <acpi/processor.h>
601da177e4SLinus Torvalds 
611da177e4SLinus Torvalds #define ACPI_PROCESSOR_COMPONENT        0x01000000
621da177e4SLinus Torvalds #define ACPI_PROCESSOR_CLASS            "processor"
631da177e4SLinus Torvalds #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
64f52fd66dSLen Brown ACPI_MODULE_NAME("processor_idle");
651da177e4SLinus Torvalds #define ACPI_PROCESSOR_FILE_POWER	"power"
661da177e4SLinus Torvalds #define US_TO_PM_TIMER_TICKS(t)		((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
672aa44d05SIngo Molnar #define PM_TIMER_TICK_NS		(1000000000ULL/PM_TIMER_FREQUENCY)
684f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
691da177e4SLinus Torvalds #define C2_OVERHEAD			4	/* 1us (3.579 ticks per us) */
701da177e4SLinus Torvalds #define C3_OVERHEAD			4	/* 1us (3.579 ticks per us) */
71b6835052SAndreas Mohr static void (*pm_idle_save) (void) __read_mostly;
724f86d3a8SLen Brown #else
734f86d3a8SLen Brown #define C2_OVERHEAD			1	/* 1us */
744f86d3a8SLen Brown #define C3_OVERHEAD			1	/* 1us */
754f86d3a8SLen Brown #endif
764f86d3a8SLen Brown #define PM_TIMER_TICKS_TO_US(p)		(((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
771da177e4SLinus Torvalds 
784f86d3a8SLen Brown static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
795b3f0e6cSVenki Pallipadi #ifdef CONFIG_CPU_IDLE
804f86d3a8SLen Brown module_param(max_cstate, uint, 0000);
815b3f0e6cSVenki Pallipadi #else
825b3f0e6cSVenki Pallipadi module_param(max_cstate, uint, 0644);
835b3f0e6cSVenki Pallipadi #endif
84b6835052SAndreas Mohr static unsigned int nocst __read_mostly;
851da177e4SLinus Torvalds module_param(nocst, uint, 0000);
861da177e4SLinus Torvalds 
874f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
881da177e4SLinus Torvalds /*
891da177e4SLinus Torvalds  * bm_history -- bit-mask with a bit per jiffy of bus-master activity
901da177e4SLinus Torvalds  * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
911da177e4SLinus Torvalds  * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
921da177e4SLinus Torvalds  * 100 HZ: 0x0000000F: 4 jiffies = 40ms
931da177e4SLinus Torvalds  * reduce history for more aggressive entry into C3
941da177e4SLinus Torvalds  */
95b6835052SAndreas Mohr static unsigned int bm_history __read_mostly =
964be44fcdSLen Brown     (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
971da177e4SLinus Torvalds module_param(bm_history, uint, 0644);
984f86d3a8SLen Brown 
994f86d3a8SLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr);
1004f86d3a8SLen Brown 
1014963f620SLen Brown #else	/* CONFIG_CPU_IDLE */
10225de5718SLen Brown static unsigned int latency_factor __read_mostly = 2;
1034963f620SLen Brown module_param(latency_factor, uint, 0644);
1044f86d3a8SLen Brown #endif
1051da177e4SLinus Torvalds 
1061da177e4SLinus Torvalds /*
1071da177e4SLinus Torvalds  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
1081da177e4SLinus Torvalds  * For now disable this. Probably a bug somewhere else.
1091da177e4SLinus Torvalds  *
1101da177e4SLinus Torvalds  * To skip this limit, boot/load with a large max_cstate limit.
1111da177e4SLinus Torvalds  */
1121855256cSJeff Garzik static int set_max_cstate(const struct dmi_system_id *id)
1131da177e4SLinus Torvalds {
1141da177e4SLinus Torvalds 	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
1151da177e4SLinus Torvalds 		return 0;
1161da177e4SLinus Torvalds 
1173d35600aSLen Brown 	printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
1181da177e4SLinus Torvalds 	       " Override with \"processor.max_cstate=%d\"\n", id->ident,
1193d35600aSLen Brown 	       (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
1201da177e4SLinus Torvalds 
1213d35600aSLen Brown 	max_cstate = (long)id->driver_data;
1221da177e4SLinus Torvalds 
1231da177e4SLinus Torvalds 	return 0;
1241da177e4SLinus Torvalds }
1251da177e4SLinus Torvalds 
1267ded5689SAshok Raj /* Actually this shouldn't be __cpuinitdata, would be better to fix the
1277ded5689SAshok Raj    callers to only run once -AK */
1287ded5689SAshok Raj static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
129335f16beSDavid Shaohua Li 	{ set_max_cstate, "IBM ThinkPad R40e", {
130876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
131f831335dSBartlomiej Swiercz 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
132f831335dSBartlomiej Swiercz 	{ set_max_cstate, "IBM ThinkPad R40e", {
133f831335dSBartlomiej Swiercz 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
134876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
135876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
136876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
137876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
138876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
139876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
140876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
141876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
142876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
143876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
144876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
145876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
146876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
147876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
148876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
149876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
150876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
151876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
152876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
153876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
154876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
155876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
156876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
157876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
158876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
159876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
160876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
161876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
162876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
163876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
164876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
165876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
166876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
167876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
168876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
169876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
170876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
171876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
172876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
173876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
174876c184bSThomas Rosner 	{ set_max_cstate, "IBM ThinkPad R40e", {
175876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
176876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
177335f16beSDavid Shaohua Li 	{ set_max_cstate, "Medion 41700", {
178876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
179876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
180335f16beSDavid Shaohua Li 	{ set_max_cstate, "Clevo 5600D", {
181876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
182876c184bSThomas Rosner 	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
183335f16beSDavid Shaohua Li 	 (void *)2},
1841da177e4SLinus Torvalds 	{},
1851da177e4SLinus Torvalds };
1861da177e4SLinus Torvalds 
1874be44fcdSLen Brown static inline u32 ticks_elapsed(u32 t1, u32 t2)
1881da177e4SLinus Torvalds {
1891da177e4SLinus Torvalds 	if (t2 >= t1)
1901da177e4SLinus Torvalds 		return (t2 - t1);
191cee324b1SAlexey Starikovskiy 	else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
1921da177e4SLinus Torvalds 		return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
1931da177e4SLinus Torvalds 	else
1941da177e4SLinus Torvalds 		return ((0xFFFFFFFF - t1) + t2);
1951da177e4SLinus Torvalds }
1961da177e4SLinus Torvalds 
1974f86d3a8SLen Brown static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
1984f86d3a8SLen Brown {
1994f86d3a8SLen Brown 	if (t2 >= t1)
2004f86d3a8SLen Brown 		return PM_TIMER_TICKS_TO_US(t2 - t1);
2014f86d3a8SLen Brown 	else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
2024f86d3a8SLen Brown 		return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
2034f86d3a8SLen Brown 	else
2044f86d3a8SLen Brown 		return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
2054f86d3a8SLen Brown }
2064f86d3a8SLen Brown 
2072e906655Svenkatesh.pallipadi@intel.com /*
2082e906655Svenkatesh.pallipadi@intel.com  * Callers should disable interrupts before the call and enable
2092e906655Svenkatesh.pallipadi@intel.com  * interrupts after return.
2102e906655Svenkatesh.pallipadi@intel.com  */
211ddc081a1SVenkatesh Pallipadi static void acpi_safe_halt(void)
212ddc081a1SVenkatesh Pallipadi {
213ddc081a1SVenkatesh Pallipadi 	current_thread_info()->status &= ~TS_POLLING;
214ddc081a1SVenkatesh Pallipadi 	/*
215ddc081a1SVenkatesh Pallipadi 	 * TS_POLLING-cleared state must be visible before we
216ddc081a1SVenkatesh Pallipadi 	 * test NEED_RESCHED:
217ddc081a1SVenkatesh Pallipadi 	 */
218ddc081a1SVenkatesh Pallipadi 	smp_mb();
21971e93d15SVenki Pallipadi 	if (!need_resched()) {
220ddc081a1SVenkatesh Pallipadi 		safe_halt();
22171e93d15SVenki Pallipadi 		local_irq_disable();
22271e93d15SVenki Pallipadi 	}
223ddc081a1SVenkatesh Pallipadi 	current_thread_info()->status |= TS_POLLING;
224ddc081a1SVenkatesh Pallipadi }
225ddc081a1SVenkatesh Pallipadi 
2264f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
2274f86d3a8SLen Brown 
2281da177e4SLinus Torvalds static void
2294be44fcdSLen Brown acpi_processor_power_activate(struct acpi_processor *pr,
2301da177e4SLinus Torvalds 			      struct acpi_processor_cx *new)
2311da177e4SLinus Torvalds {
2321da177e4SLinus Torvalds 	struct acpi_processor_cx *old;
2331da177e4SLinus Torvalds 
2341da177e4SLinus Torvalds 	if (!pr || !new)
2351da177e4SLinus Torvalds 		return;
2361da177e4SLinus Torvalds 
2371da177e4SLinus Torvalds 	old = pr->power.state;
2381da177e4SLinus Torvalds 
2391da177e4SLinus Torvalds 	if (old)
2401da177e4SLinus Torvalds 		old->promotion.count = 0;
2411da177e4SLinus Torvalds 	new->demotion.count = 0;
2421da177e4SLinus Torvalds 
2431da177e4SLinus Torvalds 	/* Cleanup from old state. */
2441da177e4SLinus Torvalds 	if (old) {
2451da177e4SLinus Torvalds 		switch (old->type) {
2461da177e4SLinus Torvalds 		case ACPI_STATE_C3:
2471da177e4SLinus Torvalds 			/* Disable bus master reload */
24802df8b93SVenkatesh Pallipadi 			if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
249d8c71b6dSBob Moore 				acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
2501da177e4SLinus Torvalds 			break;
2511da177e4SLinus Torvalds 		}
2521da177e4SLinus Torvalds 	}
2531da177e4SLinus Torvalds 
2541da177e4SLinus Torvalds 	/* Prepare to use new state. */
2551da177e4SLinus Torvalds 	switch (new->type) {
2561da177e4SLinus Torvalds 	case ACPI_STATE_C3:
2571da177e4SLinus Torvalds 		/* Enable bus master reload */
25802df8b93SVenkatesh Pallipadi 		if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
259d8c71b6dSBob Moore 			acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
2601da177e4SLinus Torvalds 		break;
2611da177e4SLinus Torvalds 	}
2621da177e4SLinus Torvalds 
2631da177e4SLinus Torvalds 	pr->power.state = new;
2641da177e4SLinus Torvalds 
2651da177e4SLinus Torvalds 	return;
2661da177e4SLinus Torvalds }
2671da177e4SLinus Torvalds 
26802df8b93SVenkatesh Pallipadi static atomic_t c3_cpu_count;
26902df8b93SVenkatesh Pallipadi 
270991528d7SVenkatesh Pallipadi /* Common C-state entry for C2, C3, .. */
271991528d7SVenkatesh Pallipadi static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
272991528d7SVenkatesh Pallipadi {
273bc71bec9Svenkatesh.pallipadi@intel.com 	if (cstate->entry_method == ACPI_CSTATE_FFH) {
274991528d7SVenkatesh Pallipadi 		/* Call into architectural FFH based C-state */
275991528d7SVenkatesh Pallipadi 		acpi_processor_ffh_cstate_enter(cstate);
276991528d7SVenkatesh Pallipadi 	} else {
277991528d7SVenkatesh Pallipadi 		int unused;
278991528d7SVenkatesh Pallipadi 		/* IO port based C-state */
279991528d7SVenkatesh Pallipadi 		inb(cstate->address);
280991528d7SVenkatesh Pallipadi 		/* Dummy wait op - must do something useless after P_LVL2 read
281991528d7SVenkatesh Pallipadi 		   because chipsets cannot guarantee that STPCLK# signal
282991528d7SVenkatesh Pallipadi 		   gets asserted in time to freeze execution properly. */
283cee324b1SAlexey Starikovskiy 		unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
284991528d7SVenkatesh Pallipadi 	}
285991528d7SVenkatesh Pallipadi }
2864f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */
287991528d7SVenkatesh Pallipadi 
288169a0abbSThomas Gleixner #ifdef ARCH_APICTIMER_STOPS_ON_C3
289169a0abbSThomas Gleixner 
290169a0abbSThomas Gleixner /*
291169a0abbSThomas Gleixner  * Some BIOS implementations switch to C3 in the published C2 state.
292296d93cdSLinus Torvalds  * This seems to be a common problem on AMD boxen, but other vendors
293296d93cdSLinus Torvalds  * are affected too. We pick the most conservative approach: we assume
294296d93cdSLinus Torvalds  * that the local APIC stops in both C2 and C3.
295169a0abbSThomas Gleixner  */
296169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr,
297169a0abbSThomas Gleixner 				   struct acpi_processor_cx *cx)
298169a0abbSThomas Gleixner {
299169a0abbSThomas Gleixner 	struct acpi_processor_power *pwr = &pr->power;
300e585bef8SThomas Gleixner 	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
301169a0abbSThomas Gleixner 
302169a0abbSThomas Gleixner 	/*
303169a0abbSThomas Gleixner 	 * Check, if one of the previous states already marked the lapic
304169a0abbSThomas Gleixner 	 * unstable
305169a0abbSThomas Gleixner 	 */
306169a0abbSThomas Gleixner 	if (pwr->timer_broadcast_on_state < state)
307169a0abbSThomas Gleixner 		return;
308169a0abbSThomas Gleixner 
309e585bef8SThomas Gleixner 	if (cx->type >= type)
310169a0abbSThomas Gleixner 		pr->power.timer_broadcast_on_state = state;
311169a0abbSThomas Gleixner }
312169a0abbSThomas Gleixner 
313169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
314169a0abbSThomas Gleixner {
315e9e2cdb4SThomas Gleixner 	unsigned long reason;
316e9e2cdb4SThomas Gleixner 
317e9e2cdb4SThomas Gleixner 	reason = pr->power.timer_broadcast_on_state < INT_MAX ?
318e9e2cdb4SThomas Gleixner 		CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
319e9e2cdb4SThomas Gleixner 
320e9e2cdb4SThomas Gleixner 	clockevents_notify(reason, &pr->id);
321e9e2cdb4SThomas Gleixner }
322e9e2cdb4SThomas Gleixner 
323e9e2cdb4SThomas Gleixner /* Power(C) State timer broadcast control */
324e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr,
325e9e2cdb4SThomas Gleixner 				       struct acpi_processor_cx *cx,
326e9e2cdb4SThomas Gleixner 				       int broadcast)
327e9e2cdb4SThomas Gleixner {
328e9e2cdb4SThomas Gleixner 	int state = cx - pr->power.states;
329e9e2cdb4SThomas Gleixner 
330e9e2cdb4SThomas Gleixner 	if (state >= pr->power.timer_broadcast_on_state) {
331e9e2cdb4SThomas Gleixner 		unsigned long reason;
332e9e2cdb4SThomas Gleixner 
333e9e2cdb4SThomas Gleixner 		reason = broadcast ?  CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
334e9e2cdb4SThomas Gleixner 			CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
335e9e2cdb4SThomas Gleixner 		clockevents_notify(reason, &pr->id);
336e9e2cdb4SThomas Gleixner 	}
337169a0abbSThomas Gleixner }
338169a0abbSThomas Gleixner 
339169a0abbSThomas Gleixner #else
340169a0abbSThomas Gleixner 
341169a0abbSThomas Gleixner static void acpi_timer_check_state(int state, struct acpi_processor *pr,
342169a0abbSThomas Gleixner 				   struct acpi_processor_cx *cstate) { }
343169a0abbSThomas Gleixner static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
344e9e2cdb4SThomas Gleixner static void acpi_state_timer_broadcast(struct acpi_processor *pr,
345e9e2cdb4SThomas Gleixner 				       struct acpi_processor_cx *cx,
346e9e2cdb4SThomas Gleixner 				       int broadcast)
347e9e2cdb4SThomas Gleixner {
348e9e2cdb4SThomas Gleixner }
349169a0abbSThomas Gleixner 
350169a0abbSThomas Gleixner #endif
351169a0abbSThomas Gleixner 
352b04e7bdbSThomas Gleixner /*
353b04e7bdbSThomas Gleixner  * Suspend / resume control
354b04e7bdbSThomas Gleixner  */
355b04e7bdbSThomas Gleixner static int acpi_idle_suspend;
356b04e7bdbSThomas Gleixner 
357b04e7bdbSThomas Gleixner int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
358b04e7bdbSThomas Gleixner {
359b04e7bdbSThomas Gleixner 	acpi_idle_suspend = 1;
360b04e7bdbSThomas Gleixner 	return 0;
361b04e7bdbSThomas Gleixner }
362b04e7bdbSThomas Gleixner 
363b04e7bdbSThomas Gleixner int acpi_processor_resume(struct acpi_device * device)
364b04e7bdbSThomas Gleixner {
365b04e7bdbSThomas Gleixner 	acpi_idle_suspend = 0;
366b04e7bdbSThomas Gleixner 	return 0;
367b04e7bdbSThomas Gleixner }
368b04e7bdbSThomas Gleixner 
36961331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
370ddb25f9aSAndi Kleen static int tsc_halts_in_c(int state)
371ddb25f9aSAndi Kleen {
372ddb25f9aSAndi Kleen 	switch (boot_cpu_data.x86_vendor) {
373ddb25f9aSAndi Kleen 	case X86_VENDOR_AMD:
374ddb25f9aSAndi Kleen 		/*
375ddb25f9aSAndi Kleen 		 * AMD Fam10h TSC will tick in all
376ddb25f9aSAndi Kleen 		 * C/P/S0/S1 states when this bit is set.
377ddb25f9aSAndi Kleen 		 */
378ddb25f9aSAndi Kleen 		if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
379ddb25f9aSAndi Kleen 			return 0;
380ddb25f9aSAndi Kleen 		/*FALL THROUGH*/
381ddb25f9aSAndi Kleen 	case X86_VENDOR_INTEL:
382ddb25f9aSAndi Kleen 		/* Several cases known where TSC halts in C2 too */
383ddb25f9aSAndi Kleen 	default:
384ddb25f9aSAndi Kleen 		return state > ACPI_STATE_C1;
385ddb25f9aSAndi Kleen 	}
386ddb25f9aSAndi Kleen }
387ddb25f9aSAndi Kleen #endif
388ddb25f9aSAndi Kleen 
3894f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
3901da177e4SLinus Torvalds static void acpi_processor_idle(void)
3911da177e4SLinus Torvalds {
3921da177e4SLinus Torvalds 	struct acpi_processor *pr = NULL;
3931da177e4SLinus Torvalds 	struct acpi_processor_cx *cx = NULL;
3941da177e4SLinus Torvalds 	struct acpi_processor_cx *next_state = NULL;
3951da177e4SLinus Torvalds 	int sleep_ticks = 0;
3961da177e4SLinus Torvalds 	u32 t1, t2 = 0;
3971da177e4SLinus Torvalds 
3981da177e4SLinus Torvalds 	/*
3991da177e4SLinus Torvalds 	 * Interrupts must be disabled during bus mastering calculations and
4001da177e4SLinus Torvalds 	 * for C2/C3 transitions.
4011da177e4SLinus Torvalds 	 */
4021da177e4SLinus Torvalds 	local_irq_disable();
4031da177e4SLinus Torvalds 
404d5a3d32aSVenkatesh Pallipadi 	pr = processors[smp_processor_id()];
405d5a3d32aSVenkatesh Pallipadi 	if (!pr) {
406d5a3d32aSVenkatesh Pallipadi 		local_irq_enable();
407d5a3d32aSVenkatesh Pallipadi 		return;
408d5a3d32aSVenkatesh Pallipadi 	}
409d5a3d32aSVenkatesh Pallipadi 
4101da177e4SLinus Torvalds 	/*
4111da177e4SLinus Torvalds 	 * Check whether we truly need to go idle, or should
4121da177e4SLinus Torvalds 	 * reschedule:
4131da177e4SLinus Torvalds 	 */
4141da177e4SLinus Torvalds 	if (unlikely(need_resched())) {
4151da177e4SLinus Torvalds 		local_irq_enable();
4161da177e4SLinus Torvalds 		return;
4171da177e4SLinus Torvalds 	}
4181da177e4SLinus Torvalds 
4191da177e4SLinus Torvalds 	cx = pr->power.state;
420b04e7bdbSThomas Gleixner 	if (!cx || acpi_idle_suspend) {
4217f424a8bSPeter Zijlstra 		if (pm_idle_save) {
4227f424a8bSPeter Zijlstra 			pm_idle_save(); /* enables IRQs */
4237f424a8bSPeter Zijlstra 		} else {
42464c7c8f8SNick Piggin 			acpi_safe_halt();
4252e906655Svenkatesh.pallipadi@intel.com 			local_irq_enable();
4267f424a8bSPeter Zijlstra 		}
42771e93d15SVenki Pallipadi 
42864c7c8f8SNick Piggin 		return;
42964c7c8f8SNick Piggin 	}
4301da177e4SLinus Torvalds 
4311da177e4SLinus Torvalds 	/*
4321da177e4SLinus Torvalds 	 * Check BM Activity
4331da177e4SLinus Torvalds 	 * -----------------
4341da177e4SLinus Torvalds 	 * Check for bus mastering activity (if required), record, and check
4351da177e4SLinus Torvalds 	 * for demotion.
4361da177e4SLinus Torvalds 	 */
4371da177e4SLinus Torvalds 	if (pr->flags.bm_check) {
4381da177e4SLinus Torvalds 		u32 bm_status = 0;
4391da177e4SLinus Torvalds 		unsigned long diff = jiffies - pr->power.bm_check_timestamp;
4401da177e4SLinus Torvalds 
441c5ab81caSDominik Brodowski 		if (diff > 31)
442c5ab81caSDominik Brodowski 			diff = 31;
4431da177e4SLinus Torvalds 
444c5ab81caSDominik Brodowski 		pr->power.bm_activity <<= diff;
4451da177e4SLinus Torvalds 
446d8c71b6dSBob Moore 		acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
4471da177e4SLinus Torvalds 		if (bm_status) {
448c5ab81caSDominik Brodowski 			pr->power.bm_activity |= 0x1;
449d8c71b6dSBob Moore 			acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
4501da177e4SLinus Torvalds 		}
4511da177e4SLinus Torvalds 		/*
4521da177e4SLinus Torvalds 		 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
4531da177e4SLinus Torvalds 		 * the true state of bus mastering activity; forcing us to
4541da177e4SLinus Torvalds 		 * manually check the BMIDEA bit of each IDE channel.
4551da177e4SLinus Torvalds 		 */
4561da177e4SLinus Torvalds 		else if (errata.piix4.bmisx) {
4571da177e4SLinus Torvalds 			if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
4581da177e4SLinus Torvalds 			    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
459c5ab81caSDominik Brodowski 				pr->power.bm_activity |= 0x1;
4601da177e4SLinus Torvalds 		}
4611da177e4SLinus Torvalds 
4621da177e4SLinus Torvalds 		pr->power.bm_check_timestamp = jiffies;
4631da177e4SLinus Torvalds 
4641da177e4SLinus Torvalds 		/*
465c4a001b1SDominik Brodowski 		 * If bus mastering is or was active this jiffy, demote
4661da177e4SLinus Torvalds 		 * to avoid a faulty transition.  Note that the processor
4671da177e4SLinus Torvalds 		 * won't enter a low-power state during this call (to this
468c4a001b1SDominik Brodowski 		 * function) but should upon the next.
4691da177e4SLinus Torvalds 		 *
4701da177e4SLinus Torvalds 		 * TBD: A better policy might be to fallback to the demotion
4711da177e4SLinus Torvalds 		 *      state (use it for this quantum only) istead of
4721da177e4SLinus Torvalds 		 *      demoting -- and rely on duration as our sole demotion
4731da177e4SLinus Torvalds 		 *      qualification.  This may, however, introduce DMA
4741da177e4SLinus Torvalds 		 *      issues (e.g. floppy DMA transfer overrun/underrun).
4751da177e4SLinus Torvalds 		 */
476c4a001b1SDominik Brodowski 		if ((pr->power.bm_activity & 0x1) &&
477c4a001b1SDominik Brodowski 		    cx->demotion.threshold.bm) {
4781da177e4SLinus Torvalds 			local_irq_enable();
4791da177e4SLinus Torvalds 			next_state = cx->demotion.state;
4801da177e4SLinus Torvalds 			goto end;
4811da177e4SLinus Torvalds 		}
4821da177e4SLinus Torvalds 	}
4831da177e4SLinus Torvalds 
4844c033552SVenkatesh Pallipadi #ifdef CONFIG_HOTPLUG_CPU
4854c033552SVenkatesh Pallipadi 	/*
4864c033552SVenkatesh Pallipadi 	 * Check for P_LVL2_UP flag before entering C2 and above on
4874c033552SVenkatesh Pallipadi 	 * an SMP system. We do it here instead of doing it at _CST/P_LVL
4884c033552SVenkatesh Pallipadi 	 * detection phase, to work cleanly with logical CPU hotplug.
4894c033552SVenkatesh Pallipadi 	 */
4904c033552SVenkatesh Pallipadi 	if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
491cee324b1SAlexey Starikovskiy 	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
4921e483969SDavid Shaohua Li 		cx = &pr->power.states[ACPI_STATE_C1];
4934c033552SVenkatesh Pallipadi #endif
4941e483969SDavid Shaohua Li 
4951da177e4SLinus Torvalds 	/*
4961da177e4SLinus Torvalds 	 * Sleep:
4971da177e4SLinus Torvalds 	 * ------
4981da177e4SLinus Torvalds 	 * Invoke the current Cx state to put the processor to sleep.
4991da177e4SLinus Torvalds 	 */
5002a298a35SNick Piggin 	if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
501495ab9c0SAndi Kleen 		current_thread_info()->status &= ~TS_POLLING;
5020888f06aSIngo Molnar 		/*
5030888f06aSIngo Molnar 		 * TS_POLLING-cleared state must be visible before we
5040888f06aSIngo Molnar 		 * test NEED_RESCHED:
5050888f06aSIngo Molnar 		 */
5060888f06aSIngo Molnar 		smp_mb();
5072a298a35SNick Piggin 		if (need_resched()) {
508495ab9c0SAndi Kleen 			current_thread_info()->status |= TS_POLLING;
509af2eb17bSLinus Torvalds 			local_irq_enable();
5102a298a35SNick Piggin 			return;
5112a298a35SNick Piggin 		}
5122a298a35SNick Piggin 	}
5132a298a35SNick Piggin 
5141da177e4SLinus Torvalds 	switch (cx->type) {
5151da177e4SLinus Torvalds 
5161da177e4SLinus Torvalds 	case ACPI_STATE_C1:
5171da177e4SLinus Torvalds 		/*
5181da177e4SLinus Torvalds 		 * Invoke C1.
5191da177e4SLinus Torvalds 		 * Use the appropriate idle routine, the one that would
5201da177e4SLinus Torvalds 		 * be used without acpi C-states.
5211da177e4SLinus Torvalds 		 */
5227f424a8bSPeter Zijlstra 		if (pm_idle_save) {
5237f424a8bSPeter Zijlstra 			pm_idle_save(); /* enables IRQs */
5247f424a8bSPeter Zijlstra 		} else {
52564c7c8f8SNick Piggin 			acpi_safe_halt();
5267f424a8bSPeter Zijlstra 			local_irq_enable();
5277f424a8bSPeter Zijlstra 		}
52864c7c8f8SNick Piggin 
5291da177e4SLinus Torvalds 		/*
5301da177e4SLinus Torvalds 		 * TBD: Can't get time duration while in C1, as resumes
5311da177e4SLinus Torvalds 		 *      go to an ISR rather than here.  Need to instrument
5321da177e4SLinus Torvalds 		 *      base interrupt handler.
5332aa44d05SIngo Molnar 		 *
5342aa44d05SIngo Molnar 		 * Note: the TSC better not stop in C1, sched_clock() will
5352aa44d05SIngo Molnar 		 *       skew otherwise.
5361da177e4SLinus Torvalds 		 */
5371da177e4SLinus Torvalds 		sleep_ticks = 0xFFFFFFFF;
53871e93d15SVenki Pallipadi 
5391da177e4SLinus Torvalds 		break;
5401da177e4SLinus Torvalds 
5411da177e4SLinus Torvalds 	case ACPI_STATE_C2:
5421da177e4SLinus Torvalds 		/* Get start time (ticks) */
543cee324b1SAlexey Starikovskiy 		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
5442aa44d05SIngo Molnar 		/* Tell the scheduler that we are going deep-idle: */
5452aa44d05SIngo Molnar 		sched_clock_idle_sleep_event();
5461da177e4SLinus Torvalds 		/* Invoke C2 */
547e9e2cdb4SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 1);
548991528d7SVenkatesh Pallipadi 		acpi_cstate_enter(cx);
5491da177e4SLinus Torvalds 		/* Get end time (ticks) */
550cee324b1SAlexey Starikovskiy 		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
551539eb11eSjohn stultz 
55261331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
553539eb11eSjohn stultz 		/* TSC halts in C2, so notify users */
554ddb25f9aSAndi Kleen 		if (tsc_halts_in_c(ACPI_STATE_C2))
5555a90cf20Sjohn stultz 			mark_tsc_unstable("possible TSC halt in C2");
556539eb11eSjohn stultz #endif
5572aa44d05SIngo Molnar 		/* Compute time (ticks) that we were actually asleep */
5582aa44d05SIngo Molnar 		sleep_ticks = ticks_elapsed(t1, t2);
5592aa44d05SIngo Molnar 
5602aa44d05SIngo Molnar 		/* Tell the scheduler how much we idled: */
5612aa44d05SIngo Molnar 		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
5622aa44d05SIngo Molnar 
5631da177e4SLinus Torvalds 		/* Re-enable interrupts */
5641da177e4SLinus Torvalds 		local_irq_enable();
5652aa44d05SIngo Molnar 		/* Do not account our idle-switching overhead: */
5662aa44d05SIngo Molnar 		sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
5672aa44d05SIngo Molnar 
568495ab9c0SAndi Kleen 		current_thread_info()->status |= TS_POLLING;
569e9e2cdb4SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 0);
5701da177e4SLinus Torvalds 		break;
5711da177e4SLinus Torvalds 
5721da177e4SLinus Torvalds 	case ACPI_STATE_C3:
573bde6f5f5SVenki Pallipadi 		acpi_unlazy_tlb(smp_processor_id());
57418eab855SVenkatesh Pallipadi 		/*
575e17bcb43SThomas Gleixner 		 * Must be done before busmaster disable as we might
576e17bcb43SThomas Gleixner 		 * need to access HPET !
577e17bcb43SThomas Gleixner 		 */
578e17bcb43SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 1);
579e17bcb43SThomas Gleixner 		/*
58018eab855SVenkatesh Pallipadi 		 * disable bus master
58118eab855SVenkatesh Pallipadi 		 * bm_check implies we need ARB_DIS
58218eab855SVenkatesh Pallipadi 		 * !bm_check implies we need cache flush
58318eab855SVenkatesh Pallipadi 		 * bm_control implies whether we can do ARB_DIS
58418eab855SVenkatesh Pallipadi 		 *
58518eab855SVenkatesh Pallipadi 		 * That leaves a case where bm_check is set and bm_control is
58618eab855SVenkatesh Pallipadi 		 * not set. In that case we cannot do much, we enter C3
58718eab855SVenkatesh Pallipadi 		 * without doing anything.
58818eab855SVenkatesh Pallipadi 		 */
58918eab855SVenkatesh Pallipadi 		if (pr->flags.bm_check && pr->flags.bm_control) {
59002df8b93SVenkatesh Pallipadi 			if (atomic_inc_return(&c3_cpu_count) ==
59102df8b93SVenkatesh Pallipadi 			    num_online_cpus()) {
59202df8b93SVenkatesh Pallipadi 				/*
59302df8b93SVenkatesh Pallipadi 				 * All CPUs are trying to go to C3
59402df8b93SVenkatesh Pallipadi 				 * Disable bus master arbitration
59502df8b93SVenkatesh Pallipadi 				 */
596d8c71b6dSBob Moore 				acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
59702df8b93SVenkatesh Pallipadi 			}
59818eab855SVenkatesh Pallipadi 		} else if (!pr->flags.bm_check) {
59902df8b93SVenkatesh Pallipadi 			/* SMP with no shared cache... Invalidate cache  */
60002df8b93SVenkatesh Pallipadi 			ACPI_FLUSH_CPU_CACHE();
60102df8b93SVenkatesh Pallipadi 		}
60202df8b93SVenkatesh Pallipadi 
6031da177e4SLinus Torvalds 		/* Get start time (ticks) */
604cee324b1SAlexey Starikovskiy 		t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
6051da177e4SLinus Torvalds 		/* Invoke C3 */
6062aa44d05SIngo Molnar 		/* Tell the scheduler that we are going deep-idle: */
6072aa44d05SIngo Molnar 		sched_clock_idle_sleep_event();
608991528d7SVenkatesh Pallipadi 		acpi_cstate_enter(cx);
6091da177e4SLinus Torvalds 		/* Get end time (ticks) */
610cee324b1SAlexey Starikovskiy 		t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
61118eab855SVenkatesh Pallipadi 		if (pr->flags.bm_check && pr->flags.bm_control) {
6121da177e4SLinus Torvalds 			/* Enable bus master arbitration */
61302df8b93SVenkatesh Pallipadi 			atomic_dec(&c3_cpu_count);
614d8c71b6dSBob Moore 			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
61502df8b93SVenkatesh Pallipadi 		}
61602df8b93SVenkatesh Pallipadi 
61761331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
618539eb11eSjohn stultz 		/* TSC halts in C3, so notify users */
619ddb25f9aSAndi Kleen 		if (tsc_halts_in_c(ACPI_STATE_C3))
6205a90cf20Sjohn stultz 			mark_tsc_unstable("TSC halts in C3");
621539eb11eSjohn stultz #endif
6222aa44d05SIngo Molnar 		/* Compute time (ticks) that we were actually asleep */
6232aa44d05SIngo Molnar 		sleep_ticks = ticks_elapsed(t1, t2);
6242aa44d05SIngo Molnar 		/* Tell the scheduler how much we idled: */
6252aa44d05SIngo Molnar 		sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
6262aa44d05SIngo Molnar 
6271da177e4SLinus Torvalds 		/* Re-enable interrupts */
6281da177e4SLinus Torvalds 		local_irq_enable();
6292aa44d05SIngo Molnar 		/* Do not account our idle-switching overhead: */
6302aa44d05SIngo Molnar 		sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
6312aa44d05SIngo Molnar 
632495ab9c0SAndi Kleen 		current_thread_info()->status |= TS_POLLING;
633e9e2cdb4SThomas Gleixner 		acpi_state_timer_broadcast(pr, cx, 0);
6341da177e4SLinus Torvalds 		break;
6351da177e4SLinus Torvalds 
6361da177e4SLinus Torvalds 	default:
6371da177e4SLinus Torvalds 		local_irq_enable();
6381da177e4SLinus Torvalds 		return;
6391da177e4SLinus Torvalds 	}
640a3c6598fSDominik Brodowski 	cx->usage++;
641a3c6598fSDominik Brodowski 	if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
642a3c6598fSDominik Brodowski 		cx->time += sleep_ticks;
6431da177e4SLinus Torvalds 
6441da177e4SLinus Torvalds 	next_state = pr->power.state;
6451da177e4SLinus Torvalds 
6461e483969SDavid Shaohua Li #ifdef CONFIG_HOTPLUG_CPU
6471e483969SDavid Shaohua Li 	/* Don't do promotion/demotion */
6481e483969SDavid Shaohua Li 	if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
649cee324b1SAlexey Starikovskiy 	    !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
6501e483969SDavid Shaohua Li 		next_state = cx;
6511e483969SDavid Shaohua Li 		goto end;
6521e483969SDavid Shaohua Li 	}
6531e483969SDavid Shaohua Li #endif
6541e483969SDavid Shaohua Li 
6551da177e4SLinus Torvalds 	/*
6561da177e4SLinus Torvalds 	 * Promotion?
6571da177e4SLinus Torvalds 	 * ----------
6581da177e4SLinus Torvalds 	 * Track the number of longs (time asleep is greater than threshold)
6591da177e4SLinus Torvalds 	 * and promote when the count threshold is reached.  Note that bus
6601da177e4SLinus Torvalds 	 * mastering activity may prevent promotions.
6611da177e4SLinus Torvalds 	 * Do not promote above max_cstate.
6621da177e4SLinus Torvalds 	 */
6631da177e4SLinus Torvalds 	if (cx->promotion.state &&
6641da177e4SLinus Torvalds 	    ((cx->promotion.state - pr->power.states) <= max_cstate)) {
6655c87579eSArjan van de Ven 		if (sleep_ticks > cx->promotion.threshold.ticks &&
666f011e2e2SMark Gross 		  cx->promotion.state->latency <=
667f011e2e2SMark Gross 				pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
6681da177e4SLinus Torvalds 			cx->promotion.count++;
6691da177e4SLinus Torvalds 			cx->demotion.count = 0;
6704be44fcdSLen Brown 			if (cx->promotion.count >=
6714be44fcdSLen Brown 			    cx->promotion.threshold.count) {
6721da177e4SLinus Torvalds 				if (pr->flags.bm_check) {
6734be44fcdSLen Brown 					if (!
6744be44fcdSLen Brown 					    (pr->power.bm_activity & cx->
6754be44fcdSLen Brown 					     promotion.threshold.bm)) {
6764be44fcdSLen Brown 						next_state =
6774be44fcdSLen Brown 						    cx->promotion.state;
6781da177e4SLinus Torvalds 						goto end;
6791da177e4SLinus Torvalds 					}
6804be44fcdSLen Brown 				} else {
6811da177e4SLinus Torvalds 					next_state = cx->promotion.state;
6821da177e4SLinus Torvalds 					goto end;
6831da177e4SLinus Torvalds 				}
6841da177e4SLinus Torvalds 			}
6851da177e4SLinus Torvalds 		}
6861da177e4SLinus Torvalds 	}
6871da177e4SLinus Torvalds 
6881da177e4SLinus Torvalds 	/*
6891da177e4SLinus Torvalds 	 * Demotion?
6901da177e4SLinus Torvalds 	 * ---------
6911da177e4SLinus Torvalds 	 * Track the number of shorts (time asleep is less than time threshold)
6921da177e4SLinus Torvalds 	 * and demote when the usage threshold is reached.
6931da177e4SLinus Torvalds 	 */
6941da177e4SLinus Torvalds 	if (cx->demotion.state) {
6951da177e4SLinus Torvalds 		if (sleep_ticks < cx->demotion.threshold.ticks) {
6961da177e4SLinus Torvalds 			cx->demotion.count++;
6971da177e4SLinus Torvalds 			cx->promotion.count = 0;
6981da177e4SLinus Torvalds 			if (cx->demotion.count >= cx->demotion.threshold.count) {
6991da177e4SLinus Torvalds 				next_state = cx->demotion.state;
7001da177e4SLinus Torvalds 				goto end;
7011da177e4SLinus Torvalds 			}
7021da177e4SLinus Torvalds 		}
7031da177e4SLinus Torvalds 	}
7041da177e4SLinus Torvalds 
7051da177e4SLinus Torvalds       end:
7061da177e4SLinus Torvalds 	/*
7071da177e4SLinus Torvalds 	 * Demote if current state exceeds max_cstate
7085c87579eSArjan van de Ven 	 * or if the latency of the current state is unacceptable
7091da177e4SLinus Torvalds 	 */
7105c87579eSArjan van de Ven 	if ((pr->power.state - pr->power.states) > max_cstate ||
711f011e2e2SMark Gross 		pr->power.state->latency >
712f011e2e2SMark Gross 				pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
7131da177e4SLinus Torvalds 		if (cx->demotion.state)
7141da177e4SLinus Torvalds 			next_state = cx->demotion.state;
7151da177e4SLinus Torvalds 	}
7161da177e4SLinus Torvalds 
7171da177e4SLinus Torvalds 	/*
7181da177e4SLinus Torvalds 	 * New Cx State?
7191da177e4SLinus Torvalds 	 * -------------
7201da177e4SLinus Torvalds 	 * If we're going to start using a new Cx state we must clean up
7211da177e4SLinus Torvalds 	 * from the previous and prepare to use the new.
7221da177e4SLinus Torvalds 	 */
7231da177e4SLinus Torvalds 	if (next_state != pr->power.state)
7241da177e4SLinus Torvalds 		acpi_processor_power_activate(pr, next_state);
7251da177e4SLinus Torvalds }
7261da177e4SLinus Torvalds 
7274be44fcdSLen Brown static int acpi_processor_set_power_policy(struct acpi_processor *pr)
7281da177e4SLinus Torvalds {
7291da177e4SLinus Torvalds 	unsigned int i;
7301da177e4SLinus Torvalds 	unsigned int state_is_set = 0;
7311da177e4SLinus Torvalds 	struct acpi_processor_cx *lower = NULL;
7321da177e4SLinus Torvalds 	struct acpi_processor_cx *higher = NULL;
7331da177e4SLinus Torvalds 	struct acpi_processor_cx *cx;
7341da177e4SLinus Torvalds 
7351da177e4SLinus Torvalds 
7361da177e4SLinus Torvalds 	if (!pr)
737d550d98dSPatrick Mochel 		return -EINVAL;
7381da177e4SLinus Torvalds 
7391da177e4SLinus Torvalds 	/*
7401da177e4SLinus Torvalds 	 * This function sets the default Cx state policy (OS idle handler).
7411da177e4SLinus Torvalds 	 * Our scheme is to promote quickly to C2 but more conservatively
7421da177e4SLinus Torvalds 	 * to C3.  We're favoring C2  for its characteristics of low latency
7431da177e4SLinus Torvalds 	 * (quick response), good power savings, and ability to allow bus
7441da177e4SLinus Torvalds 	 * mastering activity.  Note that the Cx state policy is completely
7451da177e4SLinus Torvalds 	 * customizable and can be altered dynamically.
7461da177e4SLinus Torvalds 	 */
7471da177e4SLinus Torvalds 
7481da177e4SLinus Torvalds 	/* startup state */
7491da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
7501da177e4SLinus Torvalds 		cx = &pr->power.states[i];
7511da177e4SLinus Torvalds 		if (!cx->valid)
7521da177e4SLinus Torvalds 			continue;
7531da177e4SLinus Torvalds 
7541da177e4SLinus Torvalds 		if (!state_is_set)
7551da177e4SLinus Torvalds 			pr->power.state = cx;
7561da177e4SLinus Torvalds 		state_is_set++;
7571da177e4SLinus Torvalds 		break;
7581da177e4SLinus Torvalds 	}
7591da177e4SLinus Torvalds 
7601da177e4SLinus Torvalds 	if (!state_is_set)
761d550d98dSPatrick Mochel 		return -ENODEV;
7621da177e4SLinus Torvalds 
7631da177e4SLinus Torvalds 	/* demotion */
7641da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
7651da177e4SLinus Torvalds 		cx = &pr->power.states[i];
7661da177e4SLinus Torvalds 		if (!cx->valid)
7671da177e4SLinus Torvalds 			continue;
7681da177e4SLinus Torvalds 
7691da177e4SLinus Torvalds 		if (lower) {
7701da177e4SLinus Torvalds 			cx->demotion.state = lower;
7711da177e4SLinus Torvalds 			cx->demotion.threshold.ticks = cx->latency_ticks;
7721da177e4SLinus Torvalds 			cx->demotion.threshold.count = 1;
7731da177e4SLinus Torvalds 			if (cx->type == ACPI_STATE_C3)
7741da177e4SLinus Torvalds 				cx->demotion.threshold.bm = bm_history;
7751da177e4SLinus Torvalds 		}
7761da177e4SLinus Torvalds 
7771da177e4SLinus Torvalds 		lower = cx;
7781da177e4SLinus Torvalds 	}
7791da177e4SLinus Torvalds 
7801da177e4SLinus Torvalds 	/* promotion */
7811da177e4SLinus Torvalds 	for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
7821da177e4SLinus Torvalds 		cx = &pr->power.states[i];
7831da177e4SLinus Torvalds 		if (!cx->valid)
7841da177e4SLinus Torvalds 			continue;
7851da177e4SLinus Torvalds 
7861da177e4SLinus Torvalds 		if (higher) {
7871da177e4SLinus Torvalds 			cx->promotion.state = higher;
7881da177e4SLinus Torvalds 			cx->promotion.threshold.ticks = cx->latency_ticks;
7891da177e4SLinus Torvalds 			if (cx->type >= ACPI_STATE_C2)
7901da177e4SLinus Torvalds 				cx->promotion.threshold.count = 4;
7911da177e4SLinus Torvalds 			else
7921da177e4SLinus Torvalds 				cx->promotion.threshold.count = 10;
7931da177e4SLinus Torvalds 			if (higher->type == ACPI_STATE_C3)
7941da177e4SLinus Torvalds 				cx->promotion.threshold.bm = bm_history;
7951da177e4SLinus Torvalds 		}
7961da177e4SLinus Torvalds 
7971da177e4SLinus Torvalds 		higher = cx;
7981da177e4SLinus Torvalds 	}
7991da177e4SLinus Torvalds 
800d550d98dSPatrick Mochel 	return 0;
8011da177e4SLinus Torvalds }
8024f86d3a8SLen Brown #endif /* !CONFIG_CPU_IDLE */
8031da177e4SLinus Torvalds 
8041da177e4SLinus Torvalds static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
8051da177e4SLinus Torvalds {
8061da177e4SLinus Torvalds 
8071da177e4SLinus Torvalds 	if (!pr)
808d550d98dSPatrick Mochel 		return -EINVAL;
8091da177e4SLinus Torvalds 
8101da177e4SLinus Torvalds 	if (!pr->pblk)
811d550d98dSPatrick Mochel 		return -ENODEV;
8121da177e4SLinus Torvalds 
8131da177e4SLinus Torvalds 	/* if info is obtained from pblk/fadt, type equals state */
8141da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
8151da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
8161da177e4SLinus Torvalds 
8174c033552SVenkatesh Pallipadi #ifndef CONFIG_HOTPLUG_CPU
8184c033552SVenkatesh Pallipadi 	/*
8194c033552SVenkatesh Pallipadi 	 * Check for P_LVL2_UP flag before entering C2 and above on
8204c033552SVenkatesh Pallipadi 	 * an SMP system.
8214c033552SVenkatesh Pallipadi 	 */
822ad71860aSAlexey Starikovskiy 	if ((num_online_cpus() > 1) &&
823cee324b1SAlexey Starikovskiy 	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
824d550d98dSPatrick Mochel 		return -ENODEV;
8254c033552SVenkatesh Pallipadi #endif
8264c033552SVenkatesh Pallipadi 
8271da177e4SLinus Torvalds 	/* determine C2 and C3 address from pblk */
8281da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
8291da177e4SLinus Torvalds 	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
8301da177e4SLinus Torvalds 
8311da177e4SLinus Torvalds 	/* determine latencies from FADT */
832cee324b1SAlexey Starikovskiy 	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
833cee324b1SAlexey Starikovskiy 	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
8341da177e4SLinus Torvalds 
8351da177e4SLinus Torvalds 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
8361da177e4SLinus Torvalds 			  "lvl2[0x%08x] lvl3[0x%08x]\n",
8371da177e4SLinus Torvalds 			  pr->power.states[ACPI_STATE_C2].address,
8381da177e4SLinus Torvalds 			  pr->power.states[ACPI_STATE_C3].address));
8391da177e4SLinus Torvalds 
840d550d98dSPatrick Mochel 	return 0;
8411da177e4SLinus Torvalds }
8421da177e4SLinus Torvalds 
843991528d7SVenkatesh Pallipadi static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
844acf05f4bSVenkatesh Pallipadi {
845991528d7SVenkatesh Pallipadi 	if (!pr->power.states[ACPI_STATE_C1].valid) {
846cf824788SJanosch Machowinski 		/* set the first C-State to C1 */
847991528d7SVenkatesh Pallipadi 		/* all processors need to support C1 */
848acf05f4bSVenkatesh Pallipadi 		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
849acf05f4bSVenkatesh Pallipadi 		pr->power.states[ACPI_STATE_C1].valid = 1;
850991528d7SVenkatesh Pallipadi 	}
851991528d7SVenkatesh Pallipadi 	/* the C0 state only exists as a filler in our array */
852991528d7SVenkatesh Pallipadi 	pr->power.states[ACPI_STATE_C0].valid = 1;
853d550d98dSPatrick Mochel 	return 0;
854acf05f4bSVenkatesh Pallipadi }
855acf05f4bSVenkatesh Pallipadi 
8561da177e4SLinus Torvalds static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
8571da177e4SLinus Torvalds {
8581da177e4SLinus Torvalds 	acpi_status status = 0;
8591da177e4SLinus Torvalds 	acpi_integer count;
860cf824788SJanosch Machowinski 	int current_count;
8611da177e4SLinus Torvalds 	int i;
8621da177e4SLinus Torvalds 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
8631da177e4SLinus Torvalds 	union acpi_object *cst;
8641da177e4SLinus Torvalds 
8651da177e4SLinus Torvalds 
8661da177e4SLinus Torvalds 	if (nocst)
867d550d98dSPatrick Mochel 		return -ENODEV;
8681da177e4SLinus Torvalds 
869991528d7SVenkatesh Pallipadi 	current_count = 0;
8701da177e4SLinus Torvalds 
8711da177e4SLinus Torvalds 	status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
8721da177e4SLinus Torvalds 	if (ACPI_FAILURE(status)) {
8731da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
874d550d98dSPatrick Mochel 		return -ENODEV;
8751da177e4SLinus Torvalds 	}
8761da177e4SLinus Torvalds 
87750dd0969SJan Engelhardt 	cst = buffer.pointer;
8781da177e4SLinus Torvalds 
8791da177e4SLinus Torvalds 	/* There must be at least 2 elements */
8801da177e4SLinus Torvalds 	if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
8816468463aSLen Brown 		printk(KERN_ERR PREFIX "not enough elements in _CST\n");
8821da177e4SLinus Torvalds 		status = -EFAULT;
8831da177e4SLinus Torvalds 		goto end;
8841da177e4SLinus Torvalds 	}
8851da177e4SLinus Torvalds 
8861da177e4SLinus Torvalds 	count = cst->package.elements[0].integer.value;
8871da177e4SLinus Torvalds 
8881da177e4SLinus Torvalds 	/* Validate number of power states. */
8891da177e4SLinus Torvalds 	if (count < 1 || count != cst->package.count - 1) {
8906468463aSLen Brown 		printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
8911da177e4SLinus Torvalds 		status = -EFAULT;
8921da177e4SLinus Torvalds 		goto end;
8931da177e4SLinus Torvalds 	}
8941da177e4SLinus Torvalds 
8951da177e4SLinus Torvalds 	/* Tell driver that at least _CST is supported. */
8961da177e4SLinus Torvalds 	pr->flags.has_cst = 1;
8971da177e4SLinus Torvalds 
8981da177e4SLinus Torvalds 	for (i = 1; i <= count; i++) {
8991da177e4SLinus Torvalds 		union acpi_object *element;
9001da177e4SLinus Torvalds 		union acpi_object *obj;
9011da177e4SLinus Torvalds 		struct acpi_power_register *reg;
9021da177e4SLinus Torvalds 		struct acpi_processor_cx cx;
9031da177e4SLinus Torvalds 
9041da177e4SLinus Torvalds 		memset(&cx, 0, sizeof(cx));
9051da177e4SLinus Torvalds 
90650dd0969SJan Engelhardt 		element = &(cst->package.elements[i]);
9071da177e4SLinus Torvalds 		if (element->type != ACPI_TYPE_PACKAGE)
9081da177e4SLinus Torvalds 			continue;
9091da177e4SLinus Torvalds 
9101da177e4SLinus Torvalds 		if (element->package.count != 4)
9111da177e4SLinus Torvalds 			continue;
9121da177e4SLinus Torvalds 
91350dd0969SJan Engelhardt 		obj = &(element->package.elements[0]);
9141da177e4SLinus Torvalds 
9151da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_BUFFER)
9161da177e4SLinus Torvalds 			continue;
9171da177e4SLinus Torvalds 
9181da177e4SLinus Torvalds 		reg = (struct acpi_power_register *)obj->buffer.pointer;
9191da177e4SLinus Torvalds 
9201da177e4SLinus Torvalds 		if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
9211da177e4SLinus Torvalds 		    (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
9221da177e4SLinus Torvalds 			continue;
9231da177e4SLinus Torvalds 
9241da177e4SLinus Torvalds 		/* There should be an easy way to extract an integer... */
92550dd0969SJan Engelhardt 		obj = &(element->package.elements[1]);
9261da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_INTEGER)
9271da177e4SLinus Torvalds 			continue;
9281da177e4SLinus Torvalds 
9291da177e4SLinus Torvalds 		cx.type = obj->integer.value;
930991528d7SVenkatesh Pallipadi 		/*
931991528d7SVenkatesh Pallipadi 		 * Some buggy BIOSes won't list C1 in _CST -
932991528d7SVenkatesh Pallipadi 		 * Let acpi_processor_get_power_info_default() handle them later
933991528d7SVenkatesh Pallipadi 		 */
934991528d7SVenkatesh Pallipadi 		if (i == 1 && cx.type != ACPI_STATE_C1)
935991528d7SVenkatesh Pallipadi 			current_count++;
9361da177e4SLinus Torvalds 
937991528d7SVenkatesh Pallipadi 		cx.address = reg->address;
938991528d7SVenkatesh Pallipadi 		cx.index = current_count + 1;
9391da177e4SLinus Torvalds 
940bc71bec9Svenkatesh.pallipadi@intel.com 		cx.entry_method = ACPI_CSTATE_SYSTEMIO;
941991528d7SVenkatesh Pallipadi 		if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
942991528d7SVenkatesh Pallipadi 			if (acpi_processor_ffh_cstate_probe
943991528d7SVenkatesh Pallipadi 					(pr->id, &cx, reg) == 0) {
944bc71bec9Svenkatesh.pallipadi@intel.com 				cx.entry_method = ACPI_CSTATE_FFH;
945bc71bec9Svenkatesh.pallipadi@intel.com 			} else if (cx.type == ACPI_STATE_C1) {
946991528d7SVenkatesh Pallipadi 				/*
947991528d7SVenkatesh Pallipadi 				 * C1 is a special case where FIXED_HARDWARE
948991528d7SVenkatesh Pallipadi 				 * can be handled in non-MWAIT way as well.
949991528d7SVenkatesh Pallipadi 				 * In that case, save this _CST entry info.
950991528d7SVenkatesh Pallipadi 				 * Otherwise, ignore this info and continue.
951991528d7SVenkatesh Pallipadi 				 */
952bc71bec9Svenkatesh.pallipadi@intel.com 				cx.entry_method = ACPI_CSTATE_HALT;
9534fcb2fcdSVenkatesh Pallipadi 				snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
954bc71bec9Svenkatesh.pallipadi@intel.com 			} else {
9551da177e4SLinus Torvalds 				continue;
956991528d7SVenkatesh Pallipadi 			}
9574fcb2fcdSVenkatesh Pallipadi 		} else {
9584fcb2fcdSVenkatesh Pallipadi 			snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
9594fcb2fcdSVenkatesh Pallipadi 				 cx.address);
960991528d7SVenkatesh Pallipadi 		}
9611da177e4SLinus Torvalds 
9624fcb2fcdSVenkatesh Pallipadi 
96350dd0969SJan Engelhardt 		obj = &(element->package.elements[2]);
9641da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_INTEGER)
9651da177e4SLinus Torvalds 			continue;
9661da177e4SLinus Torvalds 
9671da177e4SLinus Torvalds 		cx.latency = obj->integer.value;
9681da177e4SLinus Torvalds 
96950dd0969SJan Engelhardt 		obj = &(element->package.elements[3]);
9701da177e4SLinus Torvalds 		if (obj->type != ACPI_TYPE_INTEGER)
9711da177e4SLinus Torvalds 			continue;
9721da177e4SLinus Torvalds 
9731da177e4SLinus Torvalds 		cx.power = obj->integer.value;
9741da177e4SLinus Torvalds 
975cf824788SJanosch Machowinski 		current_count++;
976cf824788SJanosch Machowinski 		memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
977cf824788SJanosch Machowinski 
978cf824788SJanosch Machowinski 		/*
979cf824788SJanosch Machowinski 		 * We support total ACPI_PROCESSOR_MAX_POWER - 1
980cf824788SJanosch Machowinski 		 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
981cf824788SJanosch Machowinski 		 */
982cf824788SJanosch Machowinski 		if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
983cf824788SJanosch Machowinski 			printk(KERN_WARNING
984cf824788SJanosch Machowinski 			       "Limiting number of power states to max (%d)\n",
985cf824788SJanosch Machowinski 			       ACPI_PROCESSOR_MAX_POWER);
986cf824788SJanosch Machowinski 			printk(KERN_WARNING
987cf824788SJanosch Machowinski 			       "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
988cf824788SJanosch Machowinski 			break;
989cf824788SJanosch Machowinski 		}
9901da177e4SLinus Torvalds 	}
9911da177e4SLinus Torvalds 
9924be44fcdSLen Brown 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
993cf824788SJanosch Machowinski 			  current_count));
9941da177e4SLinus Torvalds 
9951da177e4SLinus Torvalds 	/* Validate number of power states discovered */
996cf824788SJanosch Machowinski 	if (current_count < 2)
9976d93c648SVenkatesh Pallipadi 		status = -EFAULT;
9981da177e4SLinus Torvalds 
9991da177e4SLinus Torvalds       end:
100002438d87SLen Brown 	kfree(buffer.pointer);
10011da177e4SLinus Torvalds 
1002d550d98dSPatrick Mochel 	return status;
10031da177e4SLinus Torvalds }
10041da177e4SLinus Torvalds 
10051da177e4SLinus Torvalds static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
10061da177e4SLinus Torvalds {
10071da177e4SLinus Torvalds 
10081da177e4SLinus Torvalds 	if (!cx->address)
1009d550d98dSPatrick Mochel 		return;
10101da177e4SLinus Torvalds 
10111da177e4SLinus Torvalds 	/*
10121da177e4SLinus Torvalds 	 * C2 latency must be less than or equal to 100
10131da177e4SLinus Torvalds 	 * microseconds.
10141da177e4SLinus Torvalds 	 */
10151da177e4SLinus Torvalds 	else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
10161da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
10174be44fcdSLen Brown 				  "latency too large [%d]\n", cx->latency));
1018d550d98dSPatrick Mochel 		return;
10191da177e4SLinus Torvalds 	}
10201da177e4SLinus Torvalds 
10211da177e4SLinus Torvalds 	/*
10221da177e4SLinus Torvalds 	 * Otherwise we've met all of our C2 requirements.
10231da177e4SLinus Torvalds 	 * Normalize the C2 latency to expidite policy
10241da177e4SLinus Torvalds 	 */
10251da177e4SLinus Torvalds 	cx->valid = 1;
10264f86d3a8SLen Brown 
10274f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
10281da177e4SLinus Torvalds 	cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
10294f86d3a8SLen Brown #else
10304f86d3a8SLen Brown 	cx->latency_ticks = cx->latency;
10314f86d3a8SLen Brown #endif
10321da177e4SLinus Torvalds 
1033d550d98dSPatrick Mochel 	return;
10341da177e4SLinus Torvalds }
10351da177e4SLinus Torvalds 
10364be44fcdSLen Brown static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
10371da177e4SLinus Torvalds 					   struct acpi_processor_cx *cx)
10381da177e4SLinus Torvalds {
103902df8b93SVenkatesh Pallipadi 	static int bm_check_flag;
104002df8b93SVenkatesh Pallipadi 
10411da177e4SLinus Torvalds 
10421da177e4SLinus Torvalds 	if (!cx->address)
1043d550d98dSPatrick Mochel 		return;
10441da177e4SLinus Torvalds 
10451da177e4SLinus Torvalds 	/*
10461da177e4SLinus Torvalds 	 * C3 latency must be less than or equal to 1000
10471da177e4SLinus Torvalds 	 * microseconds.
10481da177e4SLinus Torvalds 	 */
10491da177e4SLinus Torvalds 	else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
10501da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
10514be44fcdSLen Brown 				  "latency too large [%d]\n", cx->latency));
1052d550d98dSPatrick Mochel 		return;
10531da177e4SLinus Torvalds 	}
10541da177e4SLinus Torvalds 
10551da177e4SLinus Torvalds 	/*
10561da177e4SLinus Torvalds 	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
10571da177e4SLinus Torvalds 	 * DMA transfers are used by any ISA device to avoid livelock.
10581da177e4SLinus Torvalds 	 * Note that we could disable Type-F DMA (as recommended by
10591da177e4SLinus Torvalds 	 * the erratum), but this is known to disrupt certain ISA
10601da177e4SLinus Torvalds 	 * devices thus we take the conservative approach.
10611da177e4SLinus Torvalds 	 */
10621da177e4SLinus Torvalds 	else if (errata.piix4.fdma) {
10631da177e4SLinus Torvalds 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
10641da177e4SLinus Torvalds 				  "C3 not supported on PIIX4 with Type-F DMA\n"));
1065d550d98dSPatrick Mochel 		return;
10661da177e4SLinus Torvalds 	}
10671da177e4SLinus Torvalds 
106802df8b93SVenkatesh Pallipadi 	/* All the logic here assumes flags.bm_check is same across all CPUs */
106902df8b93SVenkatesh Pallipadi 	if (!bm_check_flag) {
107002df8b93SVenkatesh Pallipadi 		/* Determine whether bm_check is needed based on CPU  */
107102df8b93SVenkatesh Pallipadi 		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
107202df8b93SVenkatesh Pallipadi 		bm_check_flag = pr->flags.bm_check;
107302df8b93SVenkatesh Pallipadi 	} else {
107402df8b93SVenkatesh Pallipadi 		pr->flags.bm_check = bm_check_flag;
107502df8b93SVenkatesh Pallipadi 	}
107602df8b93SVenkatesh Pallipadi 
107702df8b93SVenkatesh Pallipadi 	if (pr->flags.bm_check) {
107802df8b93SVenkatesh Pallipadi 		if (!pr->flags.bm_control) {
1079ed3110efSVenki Pallipadi 			if (pr->flags.has_cst != 1) {
1080ed3110efSVenki Pallipadi 				/* bus mastering control is necessary */
108102df8b93SVenkatesh Pallipadi 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1082ed3110efSVenki Pallipadi 					"C3 support requires BM control\n"));
1083ed3110efSVenki Pallipadi 				return;
1084ed3110efSVenki Pallipadi 			} else {
1085ed3110efSVenki Pallipadi 				/* Here we enter C3 without bus mastering */
1086ed3110efSVenki Pallipadi 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1087ed3110efSVenki Pallipadi 					"C3 support without BM control\n"));
1088ed3110efSVenki Pallipadi 			}
108902df8b93SVenkatesh Pallipadi 		}
109002df8b93SVenkatesh Pallipadi 	} else {
109102df8b93SVenkatesh Pallipadi 		/*
109202df8b93SVenkatesh Pallipadi 		 * WBINVD should be set in fadt, for C3 state to be
109302df8b93SVenkatesh Pallipadi 		 * supported on when bm_check is not required.
109402df8b93SVenkatesh Pallipadi 		 */
1095cee324b1SAlexey Starikovskiy 		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
109602df8b93SVenkatesh Pallipadi 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
109702df8b93SVenkatesh Pallipadi 					  "Cache invalidation should work properly"
109802df8b93SVenkatesh Pallipadi 					  " for C3 to be enabled on SMP systems\n"));
1099d550d98dSPatrick Mochel 			return;
110002df8b93SVenkatesh Pallipadi 		}
1101d8c71b6dSBob Moore 		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
110202df8b93SVenkatesh Pallipadi 	}
110302df8b93SVenkatesh Pallipadi 
11041da177e4SLinus Torvalds 	/*
11051da177e4SLinus Torvalds 	 * Otherwise we've met all of our C3 requirements.
11061da177e4SLinus Torvalds 	 * Normalize the C3 latency to expidite policy.  Enable
11071da177e4SLinus Torvalds 	 * checking of bus mastering status (bm_check) so we can
11081da177e4SLinus Torvalds 	 * use this in our C3 policy
11091da177e4SLinus Torvalds 	 */
11101da177e4SLinus Torvalds 	cx->valid = 1;
11114f86d3a8SLen Brown 
11124f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
11131da177e4SLinus Torvalds 	cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
11144f86d3a8SLen Brown #else
11154f86d3a8SLen Brown 	cx->latency_ticks = cx->latency;
11164f86d3a8SLen Brown #endif
11171da177e4SLinus Torvalds 
1118d550d98dSPatrick Mochel 	return;
11191da177e4SLinus Torvalds }
11201da177e4SLinus Torvalds 
11211da177e4SLinus Torvalds static int acpi_processor_power_verify(struct acpi_processor *pr)
11221da177e4SLinus Torvalds {
11231da177e4SLinus Torvalds 	unsigned int i;
11241da177e4SLinus Torvalds 	unsigned int working = 0;
11256eb0a0fdSVenkatesh Pallipadi 
1126169a0abbSThomas Gleixner 	pr->power.timer_broadcast_on_state = INT_MAX;
11276eb0a0fdSVenkatesh Pallipadi 
11281da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
11291da177e4SLinus Torvalds 		struct acpi_processor_cx *cx = &pr->power.states[i];
11301da177e4SLinus Torvalds 
11311da177e4SLinus Torvalds 		switch (cx->type) {
11321da177e4SLinus Torvalds 		case ACPI_STATE_C1:
11331da177e4SLinus Torvalds 			cx->valid = 1;
11341da177e4SLinus Torvalds 			break;
11351da177e4SLinus Torvalds 
11361da177e4SLinus Torvalds 		case ACPI_STATE_C2:
11371da177e4SLinus Torvalds 			acpi_processor_power_verify_c2(cx);
1138296d93cdSLinus Torvalds 			if (cx->valid)
1139169a0abbSThomas Gleixner 				acpi_timer_check_state(i, pr, cx);
11401da177e4SLinus Torvalds 			break;
11411da177e4SLinus Torvalds 
11421da177e4SLinus Torvalds 		case ACPI_STATE_C3:
11431da177e4SLinus Torvalds 			acpi_processor_power_verify_c3(pr, cx);
1144296d93cdSLinus Torvalds 			if (cx->valid)
1145169a0abbSThomas Gleixner 				acpi_timer_check_state(i, pr, cx);
11461da177e4SLinus Torvalds 			break;
11471da177e4SLinus Torvalds 		}
11481da177e4SLinus Torvalds 
11491da177e4SLinus Torvalds 		if (cx->valid)
11501da177e4SLinus Torvalds 			working++;
11511da177e4SLinus Torvalds 	}
11521da177e4SLinus Torvalds 
1153169a0abbSThomas Gleixner 	acpi_propagate_timer_broadcast(pr);
1154bd663347SAndi Kleen 
11551da177e4SLinus Torvalds 	return (working);
11561da177e4SLinus Torvalds }
11571da177e4SLinus Torvalds 
11584be44fcdSLen Brown static int acpi_processor_get_power_info(struct acpi_processor *pr)
11591da177e4SLinus Torvalds {
11601da177e4SLinus Torvalds 	unsigned int i;
11611da177e4SLinus Torvalds 	int result;
11621da177e4SLinus Torvalds 
11631da177e4SLinus Torvalds 
11641da177e4SLinus Torvalds 	/* NOTE: the idle thread may not be running while calling
11651da177e4SLinus Torvalds 	 * this function */
11661da177e4SLinus Torvalds 
1167991528d7SVenkatesh Pallipadi 	/* Zero initialize all the C-states info. */
1168991528d7SVenkatesh Pallipadi 	memset(pr->power.states, 0, sizeof(pr->power.states));
1169991528d7SVenkatesh Pallipadi 
11701da177e4SLinus Torvalds 	result = acpi_processor_get_power_info_cst(pr);
11716d93c648SVenkatesh Pallipadi 	if (result == -ENODEV)
1172c5a114f1SDarrick J. Wong 		result = acpi_processor_get_power_info_fadt(pr);
11736d93c648SVenkatesh Pallipadi 
1174991528d7SVenkatesh Pallipadi 	if (result)
1175991528d7SVenkatesh Pallipadi 		return result;
1176991528d7SVenkatesh Pallipadi 
1177991528d7SVenkatesh Pallipadi 	acpi_processor_get_power_info_default(pr);
1178991528d7SVenkatesh Pallipadi 
1179cf824788SJanosch Machowinski 	pr->power.count = acpi_processor_power_verify(pr);
11801da177e4SLinus Torvalds 
11814f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
11821da177e4SLinus Torvalds 	/*
11831da177e4SLinus Torvalds 	 * Set Default Policy
11841da177e4SLinus Torvalds 	 * ------------------
11851da177e4SLinus Torvalds 	 * Now that we know which states are supported, set the default
11861da177e4SLinus Torvalds 	 * policy.  Note that this policy can be changed dynamically
11871da177e4SLinus Torvalds 	 * (e.g. encourage deeper sleeps to conserve battery life when
11881da177e4SLinus Torvalds 	 * not on AC).
11891da177e4SLinus Torvalds 	 */
11901da177e4SLinus Torvalds 	result = acpi_processor_set_power_policy(pr);
11911da177e4SLinus Torvalds 	if (result)
1192d550d98dSPatrick Mochel 		return result;
11934f86d3a8SLen Brown #endif
11941da177e4SLinus Torvalds 
11951da177e4SLinus Torvalds 	/*
11961da177e4SLinus Torvalds 	 * if one state of type C2 or C3 is available, mark this
11971da177e4SLinus Torvalds 	 * CPU as being "idle manageable"
11981da177e4SLinus Torvalds 	 */
11991da177e4SLinus Torvalds 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1200acf05f4bSVenkatesh Pallipadi 		if (pr->power.states[i].valid) {
12011da177e4SLinus Torvalds 			pr->power.count = i;
12022203d6edSLinus Torvalds 			if (pr->power.states[i].type >= ACPI_STATE_C2)
12031da177e4SLinus Torvalds 				pr->flags.power = 1;
12041da177e4SLinus Torvalds 		}
1205acf05f4bSVenkatesh Pallipadi 	}
12061da177e4SLinus Torvalds 
1207d550d98dSPatrick Mochel 	return 0;
12081da177e4SLinus Torvalds }
12091da177e4SLinus Torvalds 
12101da177e4SLinus Torvalds static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
12111da177e4SLinus Torvalds {
121250dd0969SJan Engelhardt 	struct acpi_processor *pr = seq->private;
12131da177e4SLinus Torvalds 	unsigned int i;
12141da177e4SLinus Torvalds 
12151da177e4SLinus Torvalds 
12161da177e4SLinus Torvalds 	if (!pr)
12171da177e4SLinus Torvalds 		goto end;
12181da177e4SLinus Torvalds 
12191da177e4SLinus Torvalds 	seq_printf(seq, "active state:            C%zd\n"
12201da177e4SLinus Torvalds 		   "max_cstate:              C%d\n"
12215c87579eSArjan van de Ven 		   "bus master activity:     %08x\n"
12225c87579eSArjan van de Ven 		   "maximum allowed latency: %d usec\n",
12231da177e4SLinus Torvalds 		   pr->power.state ? pr->power.state - pr->power.states : 0,
12245c87579eSArjan van de Ven 		   max_cstate, (unsigned)pr->power.bm_activity,
1225f011e2e2SMark Gross 		   pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
12261da177e4SLinus Torvalds 
12271da177e4SLinus Torvalds 	seq_puts(seq, "states:\n");
12281da177e4SLinus Torvalds 
12291da177e4SLinus Torvalds 	for (i = 1; i <= pr->power.count; i++) {
12301da177e4SLinus Torvalds 		seq_printf(seq, "   %cC%d:                  ",
12314be44fcdSLen Brown 			   (&pr->power.states[i] ==
12324be44fcdSLen Brown 			    pr->power.state ? '*' : ' '), i);
12331da177e4SLinus Torvalds 
12341da177e4SLinus Torvalds 		if (!pr->power.states[i].valid) {
12351da177e4SLinus Torvalds 			seq_puts(seq, "<not supported>\n");
12361da177e4SLinus Torvalds 			continue;
12371da177e4SLinus Torvalds 		}
12381da177e4SLinus Torvalds 
12391da177e4SLinus Torvalds 		switch (pr->power.states[i].type) {
12401da177e4SLinus Torvalds 		case ACPI_STATE_C1:
12411da177e4SLinus Torvalds 			seq_printf(seq, "type[C1] ");
12421da177e4SLinus Torvalds 			break;
12431da177e4SLinus Torvalds 		case ACPI_STATE_C2:
12441da177e4SLinus Torvalds 			seq_printf(seq, "type[C2] ");
12451da177e4SLinus Torvalds 			break;
12461da177e4SLinus Torvalds 		case ACPI_STATE_C3:
12471da177e4SLinus Torvalds 			seq_printf(seq, "type[C3] ");
12481da177e4SLinus Torvalds 			break;
12491da177e4SLinus Torvalds 		default:
12501da177e4SLinus Torvalds 			seq_printf(seq, "type[--] ");
12511da177e4SLinus Torvalds 			break;
12521da177e4SLinus Torvalds 		}
12531da177e4SLinus Torvalds 
12541da177e4SLinus Torvalds 		if (pr->power.states[i].promotion.state)
12551da177e4SLinus Torvalds 			seq_printf(seq, "promotion[C%zd] ",
12561da177e4SLinus Torvalds 				   (pr->power.states[i].promotion.state -
12571da177e4SLinus Torvalds 				    pr->power.states));
12581da177e4SLinus Torvalds 		else
12591da177e4SLinus Torvalds 			seq_puts(seq, "promotion[--] ");
12601da177e4SLinus Torvalds 
12611da177e4SLinus Torvalds 		if (pr->power.states[i].demotion.state)
12621da177e4SLinus Torvalds 			seq_printf(seq, "demotion[C%zd] ",
12631da177e4SLinus Torvalds 				   (pr->power.states[i].demotion.state -
12641da177e4SLinus Torvalds 				    pr->power.states));
12651da177e4SLinus Torvalds 		else
12661da177e4SLinus Torvalds 			seq_puts(seq, "demotion[--] ");
12671da177e4SLinus Torvalds 
1268a3c6598fSDominik Brodowski 		seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
12691da177e4SLinus Torvalds 			   pr->power.states[i].latency,
1270a3c6598fSDominik Brodowski 			   pr->power.states[i].usage,
1271b0b7eaafSAlexey Starikovskiy 			   (unsigned long long)pr->power.states[i].time);
12721da177e4SLinus Torvalds 	}
12731da177e4SLinus Torvalds 
12741da177e4SLinus Torvalds       end:
1275d550d98dSPatrick Mochel 	return 0;
12761da177e4SLinus Torvalds }
12771da177e4SLinus Torvalds 
12781da177e4SLinus Torvalds static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
12791da177e4SLinus Torvalds {
12801da177e4SLinus Torvalds 	return single_open(file, acpi_processor_power_seq_show,
12811da177e4SLinus Torvalds 			   PDE(inode)->data);
12821da177e4SLinus Torvalds }
12831da177e4SLinus Torvalds 
1284d7508032SArjan van de Ven static const struct file_operations acpi_processor_power_fops = {
12851da177e4SLinus Torvalds 	.open = acpi_processor_power_open_fs,
12861da177e4SLinus Torvalds 	.read = seq_read,
12871da177e4SLinus Torvalds 	.llseek = seq_lseek,
12881da177e4SLinus Torvalds 	.release = single_release,
12891da177e4SLinus Torvalds };
12901da177e4SLinus Torvalds 
12914f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
12924f86d3a8SLen Brown 
12934f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr)
12944f86d3a8SLen Brown {
12954f86d3a8SLen Brown 	int result = 0;
12964f86d3a8SLen Brown 
12974f86d3a8SLen Brown 
12984f86d3a8SLen Brown 	if (!pr)
12994f86d3a8SLen Brown 		return -EINVAL;
13004f86d3a8SLen Brown 
13014f86d3a8SLen Brown 	if (nocst) {
13024f86d3a8SLen Brown 		return -ENODEV;
13034f86d3a8SLen Brown 	}
13044f86d3a8SLen Brown 
13054f86d3a8SLen Brown 	if (!pr->flags.power_setup_done)
13064f86d3a8SLen Brown 		return -ENODEV;
13074f86d3a8SLen Brown 
13084f86d3a8SLen Brown 	/* Fall back to the default idle loop */
13094f86d3a8SLen Brown 	pm_idle = pm_idle_save;
13104f86d3a8SLen Brown 	synchronize_sched();	/* Relies on interrupts forcing exit from idle. */
13114f86d3a8SLen Brown 
13124f86d3a8SLen Brown 	pr->flags.power = 0;
13134f86d3a8SLen Brown 	result = acpi_processor_get_power_info(pr);
13144f86d3a8SLen Brown 	if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
13154f86d3a8SLen Brown 		pm_idle = acpi_processor_idle;
13164f86d3a8SLen Brown 
13174f86d3a8SLen Brown 	return result;
13184f86d3a8SLen Brown }
13194f86d3a8SLen Brown 
13201fec74a9SAndrew Morton #ifdef CONFIG_SMP
13215c87579eSArjan van de Ven static void smp_callback(void *v)
13225c87579eSArjan van de Ven {
13235c87579eSArjan van de Ven 	/* we already woke the CPU up, nothing more to do */
13245c87579eSArjan van de Ven }
13255c87579eSArjan van de Ven 
13265c87579eSArjan van de Ven /*
13275c87579eSArjan van de Ven  * This function gets called when a part of the kernel has a new latency
13285c87579eSArjan van de Ven  * requirement.  This means we need to get all processors out of their C-state,
13295c87579eSArjan van de Ven  * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
13305c87579eSArjan van de Ven  * wakes them all right up.
13315c87579eSArjan van de Ven  */
13325c87579eSArjan van de Ven static int acpi_processor_latency_notify(struct notifier_block *b,
13335c87579eSArjan van de Ven 		unsigned long l, void *v)
13345c87579eSArjan van de Ven {
13355c87579eSArjan van de Ven 	smp_call_function(smp_callback, NULL, 0, 1);
13365c87579eSArjan van de Ven 	return NOTIFY_OK;
13375c87579eSArjan van de Ven }
13385c87579eSArjan van de Ven 
13395c87579eSArjan van de Ven static struct notifier_block acpi_processor_latency_notifier = {
13405c87579eSArjan van de Ven 	.notifier_call = acpi_processor_latency_notify,
13415c87579eSArjan van de Ven };
13424f86d3a8SLen Brown 
13431fec74a9SAndrew Morton #endif
13445c87579eSArjan van de Ven 
13454f86d3a8SLen Brown #else /* CONFIG_CPU_IDLE */
13464f86d3a8SLen Brown 
13474f86d3a8SLen Brown /**
13484f86d3a8SLen Brown  * acpi_idle_bm_check - checks if bus master activity was detected
13494f86d3a8SLen Brown  */
13504f86d3a8SLen Brown static int acpi_idle_bm_check(void)
13514f86d3a8SLen Brown {
13524f86d3a8SLen Brown 	u32 bm_status = 0;
13534f86d3a8SLen Brown 
13544f86d3a8SLen Brown 	acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
13554f86d3a8SLen Brown 	if (bm_status)
13564f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
13574f86d3a8SLen Brown 	/*
13584f86d3a8SLen Brown 	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
13594f86d3a8SLen Brown 	 * the true state of bus mastering activity; forcing us to
13604f86d3a8SLen Brown 	 * manually check the BMIDEA bit of each IDE channel.
13614f86d3a8SLen Brown 	 */
13624f86d3a8SLen Brown 	else if (errata.piix4.bmisx) {
13634f86d3a8SLen Brown 		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
13644f86d3a8SLen Brown 		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
13654f86d3a8SLen Brown 			bm_status = 1;
13664f86d3a8SLen Brown 	}
13674f86d3a8SLen Brown 	return bm_status;
13684f86d3a8SLen Brown }
13694f86d3a8SLen Brown 
13704f86d3a8SLen Brown /**
13714f86d3a8SLen Brown  * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
13724f86d3a8SLen Brown  * @pr: the processor
13734f86d3a8SLen Brown  * @target: the new target state
13744f86d3a8SLen Brown  */
13754f86d3a8SLen Brown static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
13764f86d3a8SLen Brown 					   struct acpi_processor_cx *target)
13774f86d3a8SLen Brown {
13784f86d3a8SLen Brown 	if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
13794f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
13804f86d3a8SLen Brown 		pr->flags.bm_rld_set = 0;
13814f86d3a8SLen Brown 	}
13824f86d3a8SLen Brown 
13834f86d3a8SLen Brown 	if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
13844f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
13854f86d3a8SLen Brown 		pr->flags.bm_rld_set = 1;
13864f86d3a8SLen Brown 	}
13874f86d3a8SLen Brown }
13884f86d3a8SLen Brown 
13894f86d3a8SLen Brown /**
13904f86d3a8SLen Brown  * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
13914f86d3a8SLen Brown  * @cx: cstate data
1392bc71bec9Svenkatesh.pallipadi@intel.com  *
1393bc71bec9Svenkatesh.pallipadi@intel.com  * Caller disables interrupt before call and enables interrupt after return.
13944f86d3a8SLen Brown  */
13954f86d3a8SLen Brown static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
13964f86d3a8SLen Brown {
1397bc71bec9Svenkatesh.pallipadi@intel.com 	if (cx->entry_method == ACPI_CSTATE_FFH) {
13984f86d3a8SLen Brown 		/* Call into architectural FFH based C-state */
13994f86d3a8SLen Brown 		acpi_processor_ffh_cstate_enter(cx);
1400bc71bec9Svenkatesh.pallipadi@intel.com 	} else if (cx->entry_method == ACPI_CSTATE_HALT) {
1401bc71bec9Svenkatesh.pallipadi@intel.com 		acpi_safe_halt();
14024f86d3a8SLen Brown 	} else {
14034f86d3a8SLen Brown 		int unused;
14044f86d3a8SLen Brown 		/* IO port based C-state */
14054f86d3a8SLen Brown 		inb(cx->address);
14064f86d3a8SLen Brown 		/* Dummy wait op - must do something useless after P_LVL2 read
14074f86d3a8SLen Brown 		   because chipsets cannot guarantee that STPCLK# signal
14084f86d3a8SLen Brown 		   gets asserted in time to freeze execution properly. */
14094f86d3a8SLen Brown 		unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
14104f86d3a8SLen Brown 	}
14114f86d3a8SLen Brown }
14124f86d3a8SLen Brown 
14134f86d3a8SLen Brown /**
14144f86d3a8SLen Brown  * acpi_idle_enter_c1 - enters an ACPI C1 state-type
14154f86d3a8SLen Brown  * @dev: the target CPU
14164f86d3a8SLen Brown  * @state: the state data
14174f86d3a8SLen Brown  *
14184f86d3a8SLen Brown  * This is equivalent to the HALT instruction.
14194f86d3a8SLen Brown  */
14204f86d3a8SLen Brown static int acpi_idle_enter_c1(struct cpuidle_device *dev,
14214f86d3a8SLen Brown 			      struct cpuidle_state *state)
14224f86d3a8SLen Brown {
14239b12e18cSvenkatesh.pallipadi@intel.com 	u32 t1, t2;
14244f86d3a8SLen Brown 	struct acpi_processor *pr;
14254f86d3a8SLen Brown 	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
14269b12e18cSvenkatesh.pallipadi@intel.com 
14274f86d3a8SLen Brown 	pr = processors[smp_processor_id()];
14284f86d3a8SLen Brown 
14294f86d3a8SLen Brown 	if (unlikely(!pr))
14304f86d3a8SLen Brown 		return 0;
14314f86d3a8SLen Brown 
14322e906655Svenkatesh.pallipadi@intel.com 	local_irq_disable();
1433b077fbadSVenkatesh Pallipadi 
1434b077fbadSVenkatesh Pallipadi 	/* Do not access any ACPI IO ports in suspend path */
1435b077fbadSVenkatesh Pallipadi 	if (acpi_idle_suspend) {
1436b077fbadSVenkatesh Pallipadi 		acpi_safe_halt();
1437b077fbadSVenkatesh Pallipadi 		local_irq_enable();
1438b077fbadSVenkatesh Pallipadi 		return 0;
1439b077fbadSVenkatesh Pallipadi 	}
1440b077fbadSVenkatesh Pallipadi 
14414f86d3a8SLen Brown 	if (pr->flags.bm_check)
14424f86d3a8SLen Brown 		acpi_idle_update_bm_rld(pr, cx);
14434f86d3a8SLen Brown 
14449b12e18cSvenkatesh.pallipadi@intel.com 	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1445bc71bec9Svenkatesh.pallipadi@intel.com 	acpi_idle_do_entry(cx);
14469b12e18cSvenkatesh.pallipadi@intel.com 	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
14474f86d3a8SLen Brown 
14482e906655Svenkatesh.pallipadi@intel.com 	local_irq_enable();
14494f86d3a8SLen Brown 	cx->usage++;
14504f86d3a8SLen Brown 
14519b12e18cSvenkatesh.pallipadi@intel.com 	return ticks_elapsed_in_us(t1, t2);
14524f86d3a8SLen Brown }
14534f86d3a8SLen Brown 
14544f86d3a8SLen Brown /**
14554f86d3a8SLen Brown  * acpi_idle_enter_simple - enters an ACPI state without BM handling
14564f86d3a8SLen Brown  * @dev: the target CPU
14574f86d3a8SLen Brown  * @state: the state data
14584f86d3a8SLen Brown  */
14594f86d3a8SLen Brown static int acpi_idle_enter_simple(struct cpuidle_device *dev,
14604f86d3a8SLen Brown 				  struct cpuidle_state *state)
14614f86d3a8SLen Brown {
14624f86d3a8SLen Brown 	struct acpi_processor *pr;
14634f86d3a8SLen Brown 	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
14644f86d3a8SLen Brown 	u32 t1, t2;
146550629118SVenkatesh Pallipadi 	int sleep_ticks = 0;
146650629118SVenkatesh Pallipadi 
14674f86d3a8SLen Brown 	pr = processors[smp_processor_id()];
14684f86d3a8SLen Brown 
14694f86d3a8SLen Brown 	if (unlikely(!pr))
14704f86d3a8SLen Brown 		return 0;
14714f86d3a8SLen Brown 
1472e196441bSLen Brown 	if (acpi_idle_suspend)
1473e196441bSLen Brown 		return(acpi_idle_enter_c1(dev, state));
1474e196441bSLen Brown 
14754f86d3a8SLen Brown 	local_irq_disable();
14764f86d3a8SLen Brown 	current_thread_info()->status &= ~TS_POLLING;
14774f86d3a8SLen Brown 	/*
14784f86d3a8SLen Brown 	 * TS_POLLING-cleared state must be visible before we test
14794f86d3a8SLen Brown 	 * NEED_RESCHED:
14804f86d3a8SLen Brown 	 */
14814f86d3a8SLen Brown 	smp_mb();
14824f86d3a8SLen Brown 
14834f86d3a8SLen Brown 	if (unlikely(need_resched())) {
14844f86d3a8SLen Brown 		current_thread_info()->status |= TS_POLLING;
14854f86d3a8SLen Brown 		local_irq_enable();
14864f86d3a8SLen Brown 		return 0;
14874f86d3a8SLen Brown 	}
14884f86d3a8SLen Brown 
1489e17bcb43SThomas Gleixner 	/*
1490e17bcb43SThomas Gleixner 	 * Must be done before busmaster disable as we might need to
1491e17bcb43SThomas Gleixner 	 * access HPET !
1492e17bcb43SThomas Gleixner 	 */
1493e17bcb43SThomas Gleixner 	acpi_state_timer_broadcast(pr, cx, 1);
1494e17bcb43SThomas Gleixner 
1495e17bcb43SThomas Gleixner 	if (pr->flags.bm_check)
1496e17bcb43SThomas Gleixner 		acpi_idle_update_bm_rld(pr, cx);
1497e17bcb43SThomas Gleixner 
14984f86d3a8SLen Brown 	if (cx->type == ACPI_STATE_C3)
14994f86d3a8SLen Brown 		ACPI_FLUSH_CPU_CACHE();
15004f86d3a8SLen Brown 
15014f86d3a8SLen Brown 	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
150250629118SVenkatesh Pallipadi 	/* Tell the scheduler that we are going deep-idle: */
150350629118SVenkatesh Pallipadi 	sched_clock_idle_sleep_event();
15044f86d3a8SLen Brown 	acpi_idle_do_entry(cx);
15054f86d3a8SLen Brown 	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
15064f86d3a8SLen Brown 
150761331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
15084f86d3a8SLen Brown 	/* TSC could halt in idle, so notify users */
1509ddb25f9aSAndi Kleen 	if (tsc_halts_in_c(cx->type))
15104f86d3a8SLen Brown 		mark_tsc_unstable("TSC halts in idle");;
15114f86d3a8SLen Brown #endif
151250629118SVenkatesh Pallipadi 	sleep_ticks = ticks_elapsed(t1, t2);
151350629118SVenkatesh Pallipadi 
151450629118SVenkatesh Pallipadi 	/* Tell the scheduler how much we idled: */
151550629118SVenkatesh Pallipadi 	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
15164f86d3a8SLen Brown 
15174f86d3a8SLen Brown 	local_irq_enable();
15184f86d3a8SLen Brown 	current_thread_info()->status |= TS_POLLING;
15194f86d3a8SLen Brown 
15204f86d3a8SLen Brown 	cx->usage++;
15214f86d3a8SLen Brown 
15224f86d3a8SLen Brown 	acpi_state_timer_broadcast(pr, cx, 0);
152350629118SVenkatesh Pallipadi 	cx->time += sleep_ticks;
15244f86d3a8SLen Brown 	return ticks_elapsed_in_us(t1, t2);
15254f86d3a8SLen Brown }
15264f86d3a8SLen Brown 
15274f86d3a8SLen Brown static int c3_cpu_count;
15284f86d3a8SLen Brown static DEFINE_SPINLOCK(c3_lock);
15294f86d3a8SLen Brown 
15304f86d3a8SLen Brown /**
15314f86d3a8SLen Brown  * acpi_idle_enter_bm - enters C3 with proper BM handling
15324f86d3a8SLen Brown  * @dev: the target CPU
15334f86d3a8SLen Brown  * @state: the state data
15344f86d3a8SLen Brown  *
15354f86d3a8SLen Brown  * If BM is detected, the deepest non-C3 idle state is entered instead.
15364f86d3a8SLen Brown  */
15374f86d3a8SLen Brown static int acpi_idle_enter_bm(struct cpuidle_device *dev,
15384f86d3a8SLen Brown 			      struct cpuidle_state *state)
15394f86d3a8SLen Brown {
15404f86d3a8SLen Brown 	struct acpi_processor *pr;
15414f86d3a8SLen Brown 	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
15424f86d3a8SLen Brown 	u32 t1, t2;
154350629118SVenkatesh Pallipadi 	int sleep_ticks = 0;
154450629118SVenkatesh Pallipadi 
15454f86d3a8SLen Brown 	pr = processors[smp_processor_id()];
15464f86d3a8SLen Brown 
15474f86d3a8SLen Brown 	if (unlikely(!pr))
15484f86d3a8SLen Brown 		return 0;
15494f86d3a8SLen Brown 
1550e196441bSLen Brown 	if (acpi_idle_suspend)
1551e196441bSLen Brown 		return(acpi_idle_enter_c1(dev, state));
1552e196441bSLen Brown 
1553ddc081a1SVenkatesh Pallipadi 	if (acpi_idle_bm_check()) {
1554ddc081a1SVenkatesh Pallipadi 		if (dev->safe_state) {
1555ddc081a1SVenkatesh Pallipadi 			return dev->safe_state->enter(dev, dev->safe_state);
1556ddc081a1SVenkatesh Pallipadi 		} else {
15572e906655Svenkatesh.pallipadi@intel.com 			local_irq_disable();
1558ddc081a1SVenkatesh Pallipadi 			acpi_safe_halt();
15592e906655Svenkatesh.pallipadi@intel.com 			local_irq_enable();
1560ddc081a1SVenkatesh Pallipadi 			return 0;
1561ddc081a1SVenkatesh Pallipadi 		}
1562ddc081a1SVenkatesh Pallipadi 	}
1563ddc081a1SVenkatesh Pallipadi 
15644f86d3a8SLen Brown 	local_irq_disable();
15654f86d3a8SLen Brown 	current_thread_info()->status &= ~TS_POLLING;
15664f86d3a8SLen Brown 	/*
15674f86d3a8SLen Brown 	 * TS_POLLING-cleared state must be visible before we test
15684f86d3a8SLen Brown 	 * NEED_RESCHED:
15694f86d3a8SLen Brown 	 */
15704f86d3a8SLen Brown 	smp_mb();
15714f86d3a8SLen Brown 
15724f86d3a8SLen Brown 	if (unlikely(need_resched())) {
15734f86d3a8SLen Brown 		current_thread_info()->status |= TS_POLLING;
15744f86d3a8SLen Brown 		local_irq_enable();
15754f86d3a8SLen Brown 		return 0;
15764f86d3a8SLen Brown 	}
15774f86d3a8SLen Brown 
1578996520c1SVenki Pallipadi 	acpi_unlazy_tlb(smp_processor_id());
1579996520c1SVenki Pallipadi 
158050629118SVenkatesh Pallipadi 	/* Tell the scheduler that we are going deep-idle: */
158150629118SVenkatesh Pallipadi 	sched_clock_idle_sleep_event();
15824f86d3a8SLen Brown 	/*
15834f86d3a8SLen Brown 	 * Must be done before busmaster disable as we might need to
15844f86d3a8SLen Brown 	 * access HPET !
15854f86d3a8SLen Brown 	 */
15864f86d3a8SLen Brown 	acpi_state_timer_broadcast(pr, cx, 1);
15874f86d3a8SLen Brown 
15884f86d3a8SLen Brown 	acpi_idle_update_bm_rld(pr, cx);
15894f86d3a8SLen Brown 
1590c9c860e5SVenkatesh Pallipadi 	/*
1591c9c860e5SVenkatesh Pallipadi 	 * disable bus master
1592c9c860e5SVenkatesh Pallipadi 	 * bm_check implies we need ARB_DIS
1593c9c860e5SVenkatesh Pallipadi 	 * !bm_check implies we need cache flush
1594c9c860e5SVenkatesh Pallipadi 	 * bm_control implies whether we can do ARB_DIS
1595c9c860e5SVenkatesh Pallipadi 	 *
1596c9c860e5SVenkatesh Pallipadi 	 * That leaves a case where bm_check is set and bm_control is
1597c9c860e5SVenkatesh Pallipadi 	 * not set. In that case we cannot do much, we enter C3
1598c9c860e5SVenkatesh Pallipadi 	 * without doing anything.
1599c9c860e5SVenkatesh Pallipadi 	 */
1600c9c860e5SVenkatesh Pallipadi 	if (pr->flags.bm_check && pr->flags.bm_control) {
16014f86d3a8SLen Brown 		spin_lock(&c3_lock);
16024f86d3a8SLen Brown 		c3_cpu_count++;
16034f86d3a8SLen Brown 		/* Disable bus master arbitration when all CPUs are in C3 */
16044f86d3a8SLen Brown 		if (c3_cpu_count == num_online_cpus())
16054f86d3a8SLen Brown 			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
16064f86d3a8SLen Brown 		spin_unlock(&c3_lock);
1607c9c860e5SVenkatesh Pallipadi 	} else if (!pr->flags.bm_check) {
1608c9c860e5SVenkatesh Pallipadi 		ACPI_FLUSH_CPU_CACHE();
1609c9c860e5SVenkatesh Pallipadi 	}
16104f86d3a8SLen Brown 
16114f86d3a8SLen Brown 	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
16124f86d3a8SLen Brown 	acpi_idle_do_entry(cx);
16134f86d3a8SLen Brown 	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
16144f86d3a8SLen Brown 
16154f86d3a8SLen Brown 	/* Re-enable bus master arbitration */
1616c9c860e5SVenkatesh Pallipadi 	if (pr->flags.bm_check && pr->flags.bm_control) {
1617c9c860e5SVenkatesh Pallipadi 		spin_lock(&c3_lock);
16184f86d3a8SLen Brown 		acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
16194f86d3a8SLen Brown 		c3_cpu_count--;
16204f86d3a8SLen Brown 		spin_unlock(&c3_lock);
16214f86d3a8SLen Brown 	}
16224f86d3a8SLen Brown 
162361331168SPavel Machek #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
16244f86d3a8SLen Brown 	/* TSC could halt in idle, so notify users */
1625ddb25f9aSAndi Kleen 	if (tsc_halts_in_c(ACPI_STATE_C3))
16264f86d3a8SLen Brown 		mark_tsc_unstable("TSC halts in idle");
16274f86d3a8SLen Brown #endif
162850629118SVenkatesh Pallipadi 	sleep_ticks = ticks_elapsed(t1, t2);
162950629118SVenkatesh Pallipadi 	/* Tell the scheduler how much we idled: */
163050629118SVenkatesh Pallipadi 	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
16314f86d3a8SLen Brown 
16324f86d3a8SLen Brown 	local_irq_enable();
16334f86d3a8SLen Brown 	current_thread_info()->status |= TS_POLLING;
16344f86d3a8SLen Brown 
16354f86d3a8SLen Brown 	cx->usage++;
16364f86d3a8SLen Brown 
16374f86d3a8SLen Brown 	acpi_state_timer_broadcast(pr, cx, 0);
163850629118SVenkatesh Pallipadi 	cx->time += sleep_ticks;
16394f86d3a8SLen Brown 	return ticks_elapsed_in_us(t1, t2);
16404f86d3a8SLen Brown }
16414f86d3a8SLen Brown 
16424f86d3a8SLen Brown struct cpuidle_driver acpi_idle_driver = {
16434f86d3a8SLen Brown 	.name =		"acpi_idle",
16444f86d3a8SLen Brown 	.owner =	THIS_MODULE,
16454f86d3a8SLen Brown };
16464f86d3a8SLen Brown 
16474f86d3a8SLen Brown /**
16484f86d3a8SLen Brown  * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
16494f86d3a8SLen Brown  * @pr: the ACPI processor
16504f86d3a8SLen Brown  */
16514f86d3a8SLen Brown static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
16524f86d3a8SLen Brown {
16539a0b8415Svenkatesh.pallipadi@intel.com 	int i, count = CPUIDLE_DRIVER_STATE_START;
16544f86d3a8SLen Brown 	struct acpi_processor_cx *cx;
16554f86d3a8SLen Brown 	struct cpuidle_state *state;
16564f86d3a8SLen Brown 	struct cpuidle_device *dev = &pr->power.dev;
16574f86d3a8SLen Brown 
16584f86d3a8SLen Brown 	if (!pr->flags.power_setup_done)
16594f86d3a8SLen Brown 		return -EINVAL;
16604f86d3a8SLen Brown 
16614f86d3a8SLen Brown 	if (pr->flags.power == 0) {
16624f86d3a8SLen Brown 		return -EINVAL;
16634f86d3a8SLen Brown 	}
16644f86d3a8SLen Brown 
16654fcb2fcdSVenkatesh Pallipadi 	for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
16664fcb2fcdSVenkatesh Pallipadi 		dev->states[i].name[0] = '\0';
16674fcb2fcdSVenkatesh Pallipadi 		dev->states[i].desc[0] = '\0';
16684fcb2fcdSVenkatesh Pallipadi 	}
16694fcb2fcdSVenkatesh Pallipadi 
16704f86d3a8SLen Brown 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
16714f86d3a8SLen Brown 		cx = &pr->power.states[i];
16724f86d3a8SLen Brown 		state = &dev->states[count];
16734f86d3a8SLen Brown 
16744f86d3a8SLen Brown 		if (!cx->valid)
16754f86d3a8SLen Brown 			continue;
16764f86d3a8SLen Brown 
16774f86d3a8SLen Brown #ifdef CONFIG_HOTPLUG_CPU
16784f86d3a8SLen Brown 		if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
16794f86d3a8SLen Brown 		    !pr->flags.has_cst &&
16804f86d3a8SLen Brown 		    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
16814f86d3a8SLen Brown 			continue;
16824f86d3a8SLen Brown #endif
16834f86d3a8SLen Brown 		cpuidle_set_statedata(state, cx);
16844f86d3a8SLen Brown 
16854f86d3a8SLen Brown 		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
16864fcb2fcdSVenkatesh Pallipadi 		strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
16874f86d3a8SLen Brown 		state->exit_latency = cx->latency;
16884963f620SLen Brown 		state->target_residency = cx->latency * latency_factor;
16894f86d3a8SLen Brown 		state->power_usage = cx->power;
16904f86d3a8SLen Brown 
16914f86d3a8SLen Brown 		state->flags = 0;
16924f86d3a8SLen Brown 		switch (cx->type) {
16934f86d3a8SLen Brown 			case ACPI_STATE_C1:
16944f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_SHALLOW;
16958e92b660SVenki Pallipadi 			if (cx->entry_method == ACPI_CSTATE_FFH)
16969b12e18cSvenkatesh.pallipadi@intel.com 				state->flags |= CPUIDLE_FLAG_TIME_VALID;
16978e92b660SVenki Pallipadi 
16984f86d3a8SLen Brown 			state->enter = acpi_idle_enter_c1;
1699ddc081a1SVenkatesh Pallipadi 			dev->safe_state = state;
17004f86d3a8SLen Brown 			break;
17014f86d3a8SLen Brown 
17024f86d3a8SLen Brown 			case ACPI_STATE_C2:
17034f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_BALANCED;
17044f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
17054f86d3a8SLen Brown 			state->enter = acpi_idle_enter_simple;
1706ddc081a1SVenkatesh Pallipadi 			dev->safe_state = state;
17074f86d3a8SLen Brown 			break;
17084f86d3a8SLen Brown 
17094f86d3a8SLen Brown 			case ACPI_STATE_C3:
17104f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_DEEP;
17114f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
17124f86d3a8SLen Brown 			state->flags |= CPUIDLE_FLAG_CHECK_BM;
17134f86d3a8SLen Brown 			state->enter = pr->flags.bm_check ?
17144f86d3a8SLen Brown 					acpi_idle_enter_bm :
17154f86d3a8SLen Brown 					acpi_idle_enter_simple;
17164f86d3a8SLen Brown 			break;
17174f86d3a8SLen Brown 		}
17184f86d3a8SLen Brown 
17194f86d3a8SLen Brown 		count++;
17209a0b8415Svenkatesh.pallipadi@intel.com 		if (count == CPUIDLE_STATE_MAX)
17219a0b8415Svenkatesh.pallipadi@intel.com 			break;
17224f86d3a8SLen Brown 	}
17234f86d3a8SLen Brown 
17244f86d3a8SLen Brown 	dev->state_count = count;
17254f86d3a8SLen Brown 
17264f86d3a8SLen Brown 	if (!count)
17274f86d3a8SLen Brown 		return -EINVAL;
17284f86d3a8SLen Brown 
17294f86d3a8SLen Brown 	return 0;
17304f86d3a8SLen Brown }
17314f86d3a8SLen Brown 
17324f86d3a8SLen Brown int acpi_processor_cst_has_changed(struct acpi_processor *pr)
17334f86d3a8SLen Brown {
17344f86d3a8SLen Brown 	int ret;
17354f86d3a8SLen Brown 
17364f86d3a8SLen Brown 	if (!pr)
17374f86d3a8SLen Brown 		return -EINVAL;
17384f86d3a8SLen Brown 
17394f86d3a8SLen Brown 	if (nocst) {
17404f86d3a8SLen Brown 		return -ENODEV;
17414f86d3a8SLen Brown 	}
17424f86d3a8SLen Brown 
17434f86d3a8SLen Brown 	if (!pr->flags.power_setup_done)
17444f86d3a8SLen Brown 		return -ENODEV;
17454f86d3a8SLen Brown 
17464f86d3a8SLen Brown 	cpuidle_pause_and_lock();
17474f86d3a8SLen Brown 	cpuidle_disable_device(&pr->power.dev);
17484f86d3a8SLen Brown 	acpi_processor_get_power_info(pr);
17494f86d3a8SLen Brown 	acpi_processor_setup_cpuidle(pr);
17504f86d3a8SLen Brown 	ret = cpuidle_enable_device(&pr->power.dev);
17514f86d3a8SLen Brown 	cpuidle_resume_and_unlock();
17524f86d3a8SLen Brown 
17534f86d3a8SLen Brown 	return ret;
17544f86d3a8SLen Brown }
17554f86d3a8SLen Brown 
17564f86d3a8SLen Brown #endif /* CONFIG_CPU_IDLE */
17574f86d3a8SLen Brown 
17587af8b660SPierre Ossman int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
17594be44fcdSLen Brown 			      struct acpi_device *device)
17601da177e4SLinus Torvalds {
17611da177e4SLinus Torvalds 	acpi_status status = 0;
1762b6835052SAndreas Mohr 	static int first_run;
17631da177e4SLinus Torvalds 	struct proc_dir_entry *entry = NULL;
17641da177e4SLinus Torvalds 	unsigned int i;
17651da177e4SLinus Torvalds 
17661da177e4SLinus Torvalds 
17671da177e4SLinus Torvalds 	if (!first_run) {
17681da177e4SLinus Torvalds 		dmi_check_system(processor_power_dmi_table);
1769c1c30634SAlexey Starikovskiy 		max_cstate = acpi_processor_cstate_check(max_cstate);
17701da177e4SLinus Torvalds 		if (max_cstate < ACPI_C_STATES_MAX)
17714be44fcdSLen Brown 			printk(KERN_NOTICE
17724be44fcdSLen Brown 			       "ACPI: processor limited to max C-state %d\n",
17734be44fcdSLen Brown 			       max_cstate);
17741da177e4SLinus Torvalds 		first_run++;
17754f86d3a8SLen Brown #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1776f011e2e2SMark Gross 		pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1777f011e2e2SMark Gross 				&acpi_processor_latency_notifier);
17781fec74a9SAndrew Morton #endif
17791da177e4SLinus Torvalds 	}
17801da177e4SLinus Torvalds 
178102df8b93SVenkatesh Pallipadi 	if (!pr)
1782d550d98dSPatrick Mochel 		return -EINVAL;
178302df8b93SVenkatesh Pallipadi 
1784cee324b1SAlexey Starikovskiy 	if (acpi_gbl_FADT.cst_control && !nocst) {
17854be44fcdSLen Brown 		status =
1786cee324b1SAlexey Starikovskiy 		    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
17871da177e4SLinus Torvalds 		if (ACPI_FAILURE(status)) {
1788a6fc6720SThomas Renninger 			ACPI_EXCEPTION((AE_INFO, status,
1789a6fc6720SThomas Renninger 					"Notifying BIOS of _CST ability failed"));
17901da177e4SLinus Torvalds 		}
17911da177e4SLinus Torvalds 	}
17921da177e4SLinus Torvalds 
17931da177e4SLinus Torvalds 	acpi_processor_get_power_info(pr);
17944f86d3a8SLen Brown 	pr->flags.power_setup_done = 1;
17951da177e4SLinus Torvalds 
17961da177e4SLinus Torvalds 	/*
17971da177e4SLinus Torvalds 	 * Install the idle handler if processor power management is supported.
17981da177e4SLinus Torvalds 	 * Note that we use previously set idle handler will be used on
17991da177e4SLinus Torvalds 	 * platforms that only support C1.
18001da177e4SLinus Torvalds 	 */
18011da177e4SLinus Torvalds 	if ((pr->flags.power) && (!boot_option_idle_override)) {
18024f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE
18034f86d3a8SLen Brown 		acpi_processor_setup_cpuidle(pr);
18044f86d3a8SLen Brown 		pr->power.dev.cpu = pr->id;
18054f86d3a8SLen Brown 		if (cpuidle_register_device(&pr->power.dev))
18064f86d3a8SLen Brown 			return -EIO;
18074f86d3a8SLen Brown #endif
18084f86d3a8SLen Brown 
18091da177e4SLinus Torvalds 		printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
18101da177e4SLinus Torvalds 		for (i = 1; i <= pr->power.count; i++)
18111da177e4SLinus Torvalds 			if (pr->power.states[i].valid)
18124be44fcdSLen Brown 				printk(" C%d[C%d]", i,
18134be44fcdSLen Brown 				       pr->power.states[i].type);
18141da177e4SLinus Torvalds 		printk(")\n");
18151da177e4SLinus Torvalds 
18164f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
18171da177e4SLinus Torvalds 		if (pr->id == 0) {
18181da177e4SLinus Torvalds 			pm_idle_save = pm_idle;
18191da177e4SLinus Torvalds 			pm_idle = acpi_processor_idle;
18201da177e4SLinus Torvalds 		}
18214f86d3a8SLen Brown #endif
18221da177e4SLinus Torvalds 	}
18231da177e4SLinus Torvalds 
18241da177e4SLinus Torvalds 	/* 'power' [R] */
18251da177e4SLinus Torvalds 	entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
18261da177e4SLinus Torvalds 				  S_IRUGO, acpi_device_dir(device));
18271da177e4SLinus Torvalds 	if (!entry)
1828a6fc6720SThomas Renninger 		return -EIO;
18291da177e4SLinus Torvalds 	else {
18301da177e4SLinus Torvalds 		entry->proc_fops = &acpi_processor_power_fops;
18311da177e4SLinus Torvalds 		entry->data = acpi_driver_data(device);
18321da177e4SLinus Torvalds 		entry->owner = THIS_MODULE;
18331da177e4SLinus Torvalds 	}
18341da177e4SLinus Torvalds 
1835d550d98dSPatrick Mochel 	return 0;
18361da177e4SLinus Torvalds }
18371da177e4SLinus Torvalds 
18384be44fcdSLen Brown int acpi_processor_power_exit(struct acpi_processor *pr,
18394be44fcdSLen Brown 			      struct acpi_device *device)
18401da177e4SLinus Torvalds {
18414f86d3a8SLen Brown #ifdef CONFIG_CPU_IDLE
18424f86d3a8SLen Brown 	if ((pr->flags.power) && (!boot_option_idle_override))
18434f86d3a8SLen Brown 		cpuidle_unregister_device(&pr->power.dev);
18444f86d3a8SLen Brown #endif
18451da177e4SLinus Torvalds 	pr->flags.power_setup_done = 0;
18461da177e4SLinus Torvalds 
18471da177e4SLinus Torvalds 	if (acpi_device_dir(device))
18484be44fcdSLen Brown 		remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
18494be44fcdSLen Brown 				  acpi_device_dir(device));
18501da177e4SLinus Torvalds 
18514f86d3a8SLen Brown #ifndef CONFIG_CPU_IDLE
18524f86d3a8SLen Brown 
18531da177e4SLinus Torvalds 	/* Unregister the idle handler when processor #0 is removed. */
18541da177e4SLinus Torvalds 	if (pr->id == 0) {
18551da177e4SLinus Torvalds 		pm_idle = pm_idle_save;
18561da177e4SLinus Torvalds 
18571da177e4SLinus Torvalds 		/*
18581da177e4SLinus Torvalds 		 * We are about to unload the current idle thread pm callback
18591da177e4SLinus Torvalds 		 * (pm_idle), Wait for all processors to update cached/local
18601da177e4SLinus Torvalds 		 * copies of pm_idle before proceeding.
18611da177e4SLinus Torvalds 		 */
18621da177e4SLinus Torvalds 		cpu_idle_wait();
18631fec74a9SAndrew Morton #ifdef CONFIG_SMP
1864f011e2e2SMark Gross 		pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1865f011e2e2SMark Gross 				&acpi_processor_latency_notifier);
18661fec74a9SAndrew Morton #endif
18671da177e4SLinus Torvalds 	}
18684f86d3a8SLen Brown #endif
18691da177e4SLinus Torvalds 
1870d550d98dSPatrick Mochel 	return 0;
18711da177e4SLinus Torvalds }
1872