xref: /openbmc/linux/arch/x86/kernel/acpi/cstate.c (revision 5b394b2d)
1 /*
2  * Copyright (C) 2005 Intel Corporation
3  * 	Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
4  * 	- Added _PDC for SMP C-states on Intel CPUs
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/acpi.h>
11 #include <linux/cpu.h>
12 #include <linux/sched.h>
13 
14 #include <acpi/processor.h>
15 #include <asm/mwait.h>
16 #include <asm/special_insns.h>
17 
18 /*
19  * Initialize bm_flags based on the CPU cache properties
20  * On SMP it depends on cache configuration
21  * - When cache is not shared among all CPUs, we flush cache
22  *   before entering C3.
23  * - When cache is shared among all CPUs, we use bm_check
24  *   mechanism as in UP case
25  *
26  * This routine is called only after all the CPUs are online
27  */
28 void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
29 					unsigned int cpu)
30 {
31 	struct cpuinfo_x86 *c = &cpu_data(cpu);
32 
33 	flags->bm_check = 0;
34 	if (num_online_cpus() == 1)
35 		flags->bm_check = 1;
36 	else if (c->x86_vendor == X86_VENDOR_INTEL) {
37 		/*
38 		 * Today all MP CPUs that support C3 share cache.
39 		 * And caches should not be flushed by software while
40 		 * entering C3 type state.
41 		 */
42 		flags->bm_check = 1;
43 	}
44 
45 	/*
46 	 * On all recent Intel platforms, ARB_DISABLE is a nop.
47 	 * So, set bm_control to zero to indicate that ARB_DISABLE
48 	 * is not required while entering C3 type state on
49 	 * P4, Core and beyond CPUs
50 	 */
51 	if (c->x86_vendor == X86_VENDOR_INTEL &&
52 	    (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
53 			flags->bm_control = 0;
54 }
55 EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
56 
57 /* The code below handles cstate entry with monitor-mwait pair on Intel*/
58 
59 struct cstate_entry {
60 	struct {
61 		unsigned int eax;
62 		unsigned int ecx;
63 	} states[ACPI_PROCESSOR_MAX_POWER];
64 };
65 static struct cstate_entry __percpu *cpu_cstate_entry;	/* per CPU ptr */
66 
67 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
68 
69 #define NATIVE_CSTATE_BEYOND_HALT	(2)
70 
71 static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
72 {
73 	struct acpi_processor_cx *cx = _cx;
74 	long retval;
75 	unsigned int eax, ebx, ecx, edx;
76 	unsigned int edx_part;
77 	unsigned int cstate_type; /* C-state type and not ACPI C-state type */
78 	unsigned int num_cstate_subtype;
79 
80 	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
81 
82 	/* Check whether this particular cx_type (in CST) is supported or not */
83 	cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) &
84 			MWAIT_CSTATE_MASK) + 1;
85 	edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
86 	num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
87 
88 	retval = 0;
89 	/* If the HW does not support any sub-states in this C-state */
90 	if (num_cstate_subtype == 0) {
91 		pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n",
92 				cx->address, edx_part);
93 		retval = -1;
94 		goto out;
95 	}
96 
97 	/* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
98 	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
99 	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
100 		retval = -1;
101 		goto out;
102 	}
103 
104 	if (!mwait_supported[cstate_type]) {
105 		mwait_supported[cstate_type] = 1;
106 		printk(KERN_DEBUG
107 			"Monitor-Mwait will be used to enter C-%d state\n",
108 			cx->type);
109 	}
110 	snprintf(cx->desc,
111 			ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x",
112 			cx->address);
113 out:
114 	return retval;
115 }
116 
117 int acpi_processor_ffh_cstate_probe(unsigned int cpu,
118 		struct acpi_processor_cx *cx, struct acpi_power_register *reg)
119 {
120 	struct cstate_entry *percpu_entry;
121 	struct cpuinfo_x86 *c = &cpu_data(cpu);
122 	long retval;
123 
124 	if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
125 		return -1;
126 
127 	if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
128 		return -1;
129 
130 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
131 	percpu_entry->states[cx->index].eax = 0;
132 	percpu_entry->states[cx->index].ecx = 0;
133 
134 	/* Make sure we are running on right CPU */
135 
136 	retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx);
137 	if (retval == 0) {
138 		/* Use the hint in CST */
139 		percpu_entry->states[cx->index].eax = cx->address;
140 		percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
141 	}
142 
143 	/*
144 	 * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
145 	 * then we should skip checking BM_STS for this C-state.
146 	 * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
147 	 */
148 	if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
149 		cx->bm_sts_skip = 1;
150 
151 	return retval;
152 }
153 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
154 
155 void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
156 {
157 	unsigned int cpu = smp_processor_id();
158 	struct cstate_entry *percpu_entry;
159 
160 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
161 	mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
162 	                      percpu_entry->states[cx->index].ecx);
163 }
164 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
165 
166 static int __init ffh_cstate_init(void)
167 {
168 	struct cpuinfo_x86 *c = &boot_cpu_data;
169 
170 	if (c->x86_vendor != X86_VENDOR_INTEL &&
171 	    c->x86_vendor != X86_VENDOR_AMD)
172 		return -1;
173 
174 	cpu_cstate_entry = alloc_percpu(struct cstate_entry);
175 	return 0;
176 }
177 
178 static void __exit ffh_cstate_exit(void)
179 {
180 	free_percpu(cpu_cstate_entry);
181 	cpu_cstate_entry = NULL;
182 }
183 
184 arch_initcall(ffh_cstate_init);
185 __exitcall(ffh_cstate_exit);
186