1 /* 2 * Copyright (C) 2005 Intel Corporation 3 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 4 * - Added _PDC for SMP C-states on Intel CPUs 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/export.h> 9 #include <linux/init.h> 10 #include <linux/acpi.h> 11 #include <linux/cpu.h> 12 #include <linux/sched.h> 13 14 #include <acpi/processor.h> 15 #include <asm/mwait.h> 16 #include <asm/special_insns.h> 17 18 /* 19 * Initialize bm_flags based on the CPU cache properties 20 * On SMP it depends on cache configuration 21 * - When cache is not shared among all CPUs, we flush cache 22 * before entering C3. 23 * - When cache is shared among all CPUs, we use bm_check 24 * mechanism as in UP case 25 * 26 * This routine is called only after all the CPUs are online 27 */ 28 void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, 29 unsigned int cpu) 30 { 31 struct cpuinfo_x86 *c = &cpu_data(cpu); 32 33 flags->bm_check = 0; 34 if (num_online_cpus() == 1) 35 flags->bm_check = 1; 36 else if (c->x86_vendor == X86_VENDOR_INTEL) { 37 /* 38 * Today all MP CPUs that support C3 share cache. 39 * And caches should not be flushed by software while 40 * entering C3 type state. 41 */ 42 flags->bm_check = 1; 43 } 44 45 /* 46 * On all recent Intel platforms, ARB_DISABLE is a nop. 47 * So, set bm_control to zero to indicate that ARB_DISABLE 48 * is not required while entering C3 type state on 49 * P4, Core and beyond CPUs 50 */ 51 if (c->x86_vendor == X86_VENDOR_INTEL && 52 (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f))) 53 flags->bm_control = 0; 54 /* 55 * For all recent Centaur CPUs, the ucode will make sure that each 56 * core can keep cache coherence with each other while entering C3 57 * type state. So, set bm_check to 1 to indicate that the kernel 58 * doesn't need to execute a cache flush operation (WBINVD) when 59 * entering C3 type state. 60 */ 61 if (c->x86_vendor == X86_VENDOR_CENTAUR) { 62 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f && 63 c->x86_stepping >= 0x0e)) 64 flags->bm_check = 1; 65 } 66 } 67 EXPORT_SYMBOL(acpi_processor_power_init_bm_check); 68 69 /* The code below handles cstate entry with monitor-mwait pair on Intel*/ 70 71 struct cstate_entry { 72 struct { 73 unsigned int eax; 74 unsigned int ecx; 75 } states[ACPI_PROCESSOR_MAX_POWER]; 76 }; 77 static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */ 78 79 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; 80 81 #define NATIVE_CSTATE_BEYOND_HALT (2) 82 83 static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) 84 { 85 struct acpi_processor_cx *cx = _cx; 86 long retval; 87 unsigned int eax, ebx, ecx, edx; 88 unsigned int edx_part; 89 unsigned int cstate_type; /* C-state type and not ACPI C-state type */ 90 unsigned int num_cstate_subtype; 91 92 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); 93 94 /* Check whether this particular cx_type (in CST) is supported or not */ 95 cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) & 96 MWAIT_CSTATE_MASK) + 1; 97 edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); 98 num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; 99 100 retval = 0; 101 /* If the HW does not support any sub-states in this C-state */ 102 if (num_cstate_subtype == 0) { 103 pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n", 104 cx->address, edx_part); 105 retval = -1; 106 goto out; 107 } 108 109 /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */ 110 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || 111 !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) { 112 retval = -1; 113 goto out; 114 } 115 116 if (!mwait_supported[cstate_type]) { 117 mwait_supported[cstate_type] = 1; 118 printk(KERN_DEBUG 119 "Monitor-Mwait will be used to enter C-%d state\n", 120 cx->type); 121 } 122 snprintf(cx->desc, 123 ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x", 124 cx->address); 125 out: 126 return retval; 127 } 128 129 int acpi_processor_ffh_cstate_probe(unsigned int cpu, 130 struct acpi_processor_cx *cx, struct acpi_power_register *reg) 131 { 132 struct cstate_entry *percpu_entry; 133 struct cpuinfo_x86 *c = &cpu_data(cpu); 134 long retval; 135 136 if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF) 137 return -1; 138 139 if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) 140 return -1; 141 142 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); 143 percpu_entry->states[cx->index].eax = 0; 144 percpu_entry->states[cx->index].ecx = 0; 145 146 /* Make sure we are running on right CPU */ 147 148 retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx); 149 if (retval == 0) { 150 /* Use the hint in CST */ 151 percpu_entry->states[cx->index].eax = cx->address; 152 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; 153 } 154 155 /* 156 * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared, 157 * then we should skip checking BM_STS for this C-state. 158 * ref: "Intel Processor Vendor-Specific ACPI Interface Specification" 159 */ 160 if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2)) 161 cx->bm_sts_skip = 1; 162 163 return retval; 164 } 165 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); 166 167 void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) 168 { 169 unsigned int cpu = smp_processor_id(); 170 struct cstate_entry *percpu_entry; 171 172 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); 173 mwait_idle_with_hints(percpu_entry->states[cx->index].eax, 174 percpu_entry->states[cx->index].ecx); 175 } 176 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter); 177 178 static int __init ffh_cstate_init(void) 179 { 180 struct cpuinfo_x86 *c = &boot_cpu_data; 181 182 if (c->x86_vendor != X86_VENDOR_INTEL && 183 c->x86_vendor != X86_VENDOR_AMD) 184 return -1; 185 186 cpu_cstate_entry = alloc_percpu(struct cstate_entry); 187 return 0; 188 } 189 190 static void __exit ffh_cstate_exit(void) 191 { 192 free_percpu(cpu_cstate_entry); 193 cpu_cstate_entry = NULL; 194 } 195 196 arch_initcall(ffh_cstate_init); 197 __exitcall(ffh_cstate_exit); 198