1 /* 2 * Pentium 4/Xeon CPU on demand clock modulation/speed scaling 3 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 4 * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com> 5 * (C) 2002 Arjan van de Ven <arjanv@redhat.com> 6 * (C) 2002 Tora T. Engstad 7 * All Rights Reserved 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 * 14 * The author(s) of this software shall not be held liable for damages 15 * of any nature resulting due to the use of this software. This 16 * software is provided AS-IS with no warranties. 17 * 18 * Date Errata Description 19 * 20020525 N44, O17 12.5% or 25% DC causes lockup 20 * 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/init.h> 26 #include <linux/smp.h> 27 #include <linux/cpufreq.h> 28 #include <linux/cpumask.h> 29 #include <linux/timex.h> 30 31 #include <asm/processor.h> 32 #include <asm/msr.h> 33 #include <asm/timer.h> 34 #include <asm/cpu_device_id.h> 35 36 #include "speedstep-lib.h" 37 38 #define PFX "p4-clockmod: " 39 40 /* 41 * Duty Cycle (3bits), note DC_DISABLE is not specified in 42 * intel docs i just use it to mean disable 43 */ 44 enum { 45 DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT, 46 DC_64PT, DC_75PT, DC_88PT, DC_DISABLE 47 }; 48 49 #define DC_ENTRIES 8 50 51 52 static int has_N44_O17_errata[NR_CPUS]; 53 static unsigned int stock_freq; 54 static struct cpufreq_driver p4clockmod_driver; 55 static unsigned int cpufreq_p4_get(unsigned int cpu); 56 57 static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) 58 { 59 u32 l, h; 60 61 if (!cpu_online(cpu) || 62 (newstate > DC_DISABLE) || (newstate == DC_RESV)) 63 return -EINVAL; 64 65 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); 66 67 if (l & 0x01) 68 pr_debug("CPU#%d currently thermal throttled\n", cpu); 69 70 if (has_N44_O17_errata[cpu] && 71 (newstate == DC_25PT || newstate == DC_DFLT)) 72 newstate = DC_38PT; 73 74 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); 75 if (newstate == DC_DISABLE) { 76 pr_debug("CPU#%d disabling modulation\n", cpu); 77 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); 78 } else { 79 pr_debug("CPU#%d setting duty cycle to %d%%\n", 80 cpu, ((125 * newstate) / 10)); 81 /* bits 63 - 5 : reserved 82 * bit 4 : enable/disable 83 * bits 3-1 : duty cycle 84 * bit 0 : reserved 85 */ 86 l = (l & ~14); 87 l = l | (1<<4) | ((newstate & 0x7)<<1); 88 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h); 89 } 90 91 return 0; 92 } 93 94 95 static struct cpufreq_frequency_table p4clockmod_table[] = { 96 {DC_RESV, CPUFREQ_ENTRY_INVALID}, 97 {DC_DFLT, 0}, 98 {DC_25PT, 0}, 99 {DC_38PT, 0}, 100 {DC_50PT, 0}, 101 {DC_64PT, 0}, 102 {DC_75PT, 0}, 103 {DC_88PT, 0}, 104 {DC_DISABLE, 0}, 105 {DC_RESV, CPUFREQ_TABLE_END}, 106 }; 107 108 109 static int cpufreq_p4_target(struct cpufreq_policy *policy, 110 unsigned int target_freq, 111 unsigned int relation) 112 { 113 unsigned int newstate = DC_RESV; 114 struct cpufreq_freqs freqs; 115 int i; 116 117 if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], 118 target_freq, relation, &newstate)) 119 return -EINVAL; 120 121 freqs.old = cpufreq_p4_get(policy->cpu); 122 freqs.new = stock_freq * p4clockmod_table[newstate].index / 8; 123 124 if (freqs.new == freqs.old) 125 return 0; 126 127 /* notifiers */ 128 for_each_cpu(i, policy->cpus) { 129 freqs.cpu = i; 130 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 131 } 132 133 /* run on each logical CPU, 134 * see section 13.15.3 of IA32 Intel Architecture Software 135 * Developer's Manual, Volume 3 136 */ 137 for_each_cpu(i, policy->cpus) 138 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 139 140 /* notifiers */ 141 for_each_cpu(i, policy->cpus) { 142 freqs.cpu = i; 143 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 144 } 145 146 return 0; 147 } 148 149 150 static int cpufreq_p4_verify(struct cpufreq_policy *policy) 151 { 152 return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); 153 } 154 155 156 static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) 157 { 158 if (c->x86 == 0x06) { 159 if (cpu_has(c, X86_FEATURE_EST)) 160 printk_once(KERN_WARNING PFX "Warning: EST-capable " 161 "CPU detected. The acpi-cpufreq module offers " 162 "voltage scaling in addition to frequency " 163 "scaling. You should use that instead of " 164 "p4-clockmod, if possible.\n"); 165 switch (c->x86_model) { 166 case 0x0E: /* Core */ 167 case 0x0F: /* Core Duo */ 168 case 0x16: /* Celeron Core */ 169 case 0x1C: /* Atom */ 170 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; 171 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); 172 case 0x0D: /* Pentium M (Dothan) */ 173 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; 174 /* fall through */ 175 case 0x09: /* Pentium M (Banias) */ 176 return speedstep_get_frequency(SPEEDSTEP_CPU_PM); 177 } 178 } 179 180 if (c->x86 != 0xF) 181 return 0; 182 183 /* on P-4s, the TSC runs with constant frequency independent whether 184 * throttling is active or not. */ 185 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; 186 187 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) { 188 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. " 189 "The speedstep-ich or acpi cpufreq modules offer " 190 "voltage scaling in addition of frequency scaling. " 191 "You should use either one instead of p4-clockmod, " 192 "if possible.\n"); 193 return speedstep_get_frequency(SPEEDSTEP_CPU_P4M); 194 } 195 196 return speedstep_get_frequency(SPEEDSTEP_CPU_P4D); 197 } 198 199 200 201 static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) 202 { 203 struct cpuinfo_x86 *c = &cpu_data(policy->cpu); 204 int cpuid = 0; 205 unsigned int i; 206 207 #ifdef CONFIG_SMP 208 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); 209 #endif 210 211 /* Errata workaround */ 212 cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; 213 switch (cpuid) { 214 case 0x0f07: 215 case 0x0f0a: 216 case 0x0f11: 217 case 0x0f12: 218 has_N44_O17_errata[policy->cpu] = 1; 219 pr_debug("has errata -- disabling low frequencies\n"); 220 } 221 222 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && 223 c->x86_model < 2) { 224 /* switch to maximum frequency and measure result */ 225 cpufreq_p4_setdc(policy->cpu, DC_DISABLE); 226 recalibrate_cpu_khz(); 227 } 228 /* get max frequency */ 229 stock_freq = cpufreq_p4_get_frequency(c); 230 if (!stock_freq) 231 return -EINVAL; 232 233 /* table init */ 234 for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { 235 if ((i < 2) && (has_N44_O17_errata[policy->cpu])) 236 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; 237 else 238 p4clockmod_table[i].frequency = (stock_freq * i)/8; 239 } 240 cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); 241 242 /* cpuinfo and default policy values */ 243 244 /* the transition latency is set to be 1 higher than the maximum 245 * transition latency of the ondemand governor */ 246 policy->cpuinfo.transition_latency = 10000001; 247 policy->cur = stock_freq; 248 249 return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); 250 } 251 252 253 static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) 254 { 255 cpufreq_frequency_table_put_attr(policy->cpu); 256 return 0; 257 } 258 259 static unsigned int cpufreq_p4_get(unsigned int cpu) 260 { 261 u32 l, h; 262 263 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); 264 265 if (l & 0x10) { 266 l = l >> 1; 267 l &= 0x7; 268 } else 269 l = DC_DISABLE; 270 271 if (l != DC_DISABLE) 272 return stock_freq * l / 8; 273 274 return stock_freq; 275 } 276 277 static struct freq_attr *p4clockmod_attr[] = { 278 &cpufreq_freq_attr_scaling_available_freqs, 279 NULL, 280 }; 281 282 static struct cpufreq_driver p4clockmod_driver = { 283 .verify = cpufreq_p4_verify, 284 .target = cpufreq_p4_target, 285 .init = cpufreq_p4_cpu_init, 286 .exit = cpufreq_p4_cpu_exit, 287 .get = cpufreq_p4_get, 288 .name = "p4-clockmod", 289 .owner = THIS_MODULE, 290 .attr = p4clockmod_attr, 291 }; 292 293 static const struct x86_cpu_id cpufreq_p4_id[] = { 294 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ACC }, 295 {} 296 }; 297 298 /* 299 * Intentionally no MODULE_DEVICE_TABLE here: this driver should not 300 * be auto loaded. Please don't add one. 301 */ 302 303 static int __init cpufreq_p4_init(void) 304 { 305 int ret; 306 307 /* 308 * THERM_CONTROL is architectural for IA32 now, so 309 * we can rely on the capability checks 310 */ 311 if (!x86_match_cpu(cpufreq_p4_id) || !boot_cpu_has(X86_FEATURE_ACPI)) 312 return -ENODEV; 313 314 ret = cpufreq_register_driver(&p4clockmod_driver); 315 if (!ret) 316 printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock " 317 "Modulation available\n"); 318 319 return ret; 320 } 321 322 323 static void __exit cpufreq_p4_exit(void) 324 { 325 cpufreq_unregister_driver(&p4clockmod_driver); 326 } 327 328 329 MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>"); 330 MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); 331 MODULE_LICENSE("GPL"); 332 333 late_initcall(cpufreq_p4_init); 334 module_exit(cpufreq_p4_exit); 335