1 /* 2 * This file provides the ACPI based P-state support. This 3 * module works with generic cpufreq infrastructure. Most of 4 * the code is based on i386 version 5 * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c) 6 * 7 * Copyright (C) 2005 Intel Corp 8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/cpufreq.h> 18 #include <linux/proc_fs.h> 19 #include <linux/seq_file.h> 20 #include <asm/io.h> 21 #include <linux/uaccess.h> 22 #include <asm/pal.h> 23 24 #include <linux/acpi.h> 25 #include <acpi/processor.h> 26 27 MODULE_AUTHOR("Venkatesh Pallipadi"); 28 MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 29 MODULE_LICENSE("GPL"); 30 31 32 struct cpufreq_acpi_io { 33 struct acpi_processor_performance acpi_data; 34 unsigned int resume; 35 }; 36 37 static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; 38 39 static struct cpufreq_driver acpi_cpufreq_driver; 40 41 42 static int 43 processor_set_pstate ( 44 u32 value) 45 { 46 s64 retval; 47 48 pr_debug("processor_set_pstate\n"); 49 50 retval = ia64_pal_set_pstate((u64)value); 51 52 if (retval) { 53 pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", 54 value, retval); 55 return -ENODEV; 56 } 57 return (int)retval; 58 } 59 60 61 static int 62 processor_get_pstate ( 63 u32 *value) 64 { 65 u64 pstate_index = 0; 66 s64 retval; 67 68 pr_debug("processor_get_pstate\n"); 69 70 retval = ia64_pal_get_pstate(&pstate_index, 71 PAL_GET_PSTATE_TYPE_INSTANT); 72 *value = (u32) pstate_index; 73 74 if (retval) 75 pr_debug("Failed to get current freq with " 76 "error 0x%lx, idx 0x%x\n", retval, *value); 77 78 return (int)retval; 79 } 80 81 82 /* To be used only after data->acpi_data is initialized */ 83 static unsigned 84 extract_clock ( 85 struct cpufreq_acpi_io *data, 86 unsigned value, 87 unsigned int cpu) 88 { 89 unsigned long i; 90 91 pr_debug("extract_clock\n"); 92 93 for (i = 0; i < data->acpi_data.state_count; i++) { 94 if (value == data->acpi_data.states[i].status) 95 return data->acpi_data.states[i].core_frequency; 96 } 97 return data->acpi_data.states[i-1].core_frequency; 98 } 99 100 101 static unsigned int 102 processor_get_freq ( 103 struct cpufreq_acpi_io *data, 104 unsigned int cpu) 105 { 106 int ret = 0; 107 u32 value = 0; 108 cpumask_t saved_mask; 109 unsigned long clock_freq; 110 111 pr_debug("processor_get_freq\n"); 112 113 saved_mask = current->cpus_allowed; 114 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 115 if (smp_processor_id() != cpu) 116 goto migrate_end; 117 118 /* processor_get_pstate gets the instantaneous frequency */ 119 ret = processor_get_pstate(&value); 120 121 if (ret) { 122 set_cpus_allowed_ptr(current, &saved_mask); 123 pr_warn("get performance failed with error %d\n", ret); 124 ret = 0; 125 goto migrate_end; 126 } 127 clock_freq = extract_clock(data, value, cpu); 128 ret = (clock_freq*1000); 129 130 migrate_end: 131 set_cpus_allowed_ptr(current, &saved_mask); 132 return ret; 133 } 134 135 136 static int 137 processor_set_freq ( 138 struct cpufreq_acpi_io *data, 139 struct cpufreq_policy *policy, 140 int state) 141 { 142 int ret = 0; 143 u32 value = 0; 144 cpumask_t saved_mask; 145 int retval; 146 147 pr_debug("processor_set_freq\n"); 148 149 saved_mask = current->cpus_allowed; 150 set_cpus_allowed_ptr(current, cpumask_of(policy->cpu)); 151 if (smp_processor_id() != policy->cpu) { 152 retval = -EAGAIN; 153 goto migrate_end; 154 } 155 156 if (state == data->acpi_data.state) { 157 if (unlikely(data->resume)) { 158 pr_debug("Called after resume, resetting to P%d\n", state); 159 data->resume = 0; 160 } else { 161 pr_debug("Already at target state (P%d)\n", state); 162 retval = 0; 163 goto migrate_end; 164 } 165 } 166 167 pr_debug("Transitioning from P%d to P%d\n", 168 data->acpi_data.state, state); 169 170 /* 171 * First we write the target state's 'control' value to the 172 * control_register. 173 */ 174 175 value = (u32) data->acpi_data.states[state].control; 176 177 pr_debug("Transitioning to state: 0x%08x\n", value); 178 179 ret = processor_set_pstate(value); 180 if (ret) { 181 pr_warn("Transition failed with error %d\n", ret); 182 retval = -ENODEV; 183 goto migrate_end; 184 } 185 186 data->acpi_data.state = state; 187 188 retval = 0; 189 190 migrate_end: 191 set_cpus_allowed_ptr(current, &saved_mask); 192 return (retval); 193 } 194 195 196 static unsigned int 197 acpi_cpufreq_get ( 198 unsigned int cpu) 199 { 200 struct cpufreq_acpi_io *data = acpi_io_data[cpu]; 201 202 pr_debug("acpi_cpufreq_get\n"); 203 204 return processor_get_freq(data, cpu); 205 } 206 207 208 static int 209 acpi_cpufreq_target ( 210 struct cpufreq_policy *policy, 211 unsigned int index) 212 { 213 return processor_set_freq(acpi_io_data[policy->cpu], policy, index); 214 } 215 216 static int 217 acpi_cpufreq_cpu_init ( 218 struct cpufreq_policy *policy) 219 { 220 unsigned int i; 221 unsigned int cpu = policy->cpu; 222 struct cpufreq_acpi_io *data; 223 unsigned int result = 0; 224 struct cpufreq_frequency_table *freq_table; 225 226 pr_debug("acpi_cpufreq_cpu_init\n"); 227 228 data = kzalloc(sizeof(*data), GFP_KERNEL); 229 if (!data) 230 return (-ENOMEM); 231 232 acpi_io_data[cpu] = data; 233 234 result = acpi_processor_register_performance(&data->acpi_data, cpu); 235 236 if (result) 237 goto err_free; 238 239 /* capability check */ 240 if (data->acpi_data.state_count <= 1) { 241 pr_debug("No P-States\n"); 242 result = -ENODEV; 243 goto err_unreg; 244 } 245 246 if ((data->acpi_data.control_register.space_id != 247 ACPI_ADR_SPACE_FIXED_HARDWARE) || 248 (data->acpi_data.status_register.space_id != 249 ACPI_ADR_SPACE_FIXED_HARDWARE)) { 250 pr_debug("Unsupported address space [%d, %d]\n", 251 (u32) (data->acpi_data.control_register.space_id), 252 (u32) (data->acpi_data.status_register.space_id)); 253 result = -ENODEV; 254 goto err_unreg; 255 } 256 257 /* alloc freq_table */ 258 freq_table = kzalloc(sizeof(*freq_table) * 259 (data->acpi_data.state_count + 1), 260 GFP_KERNEL); 261 if (!freq_table) { 262 result = -ENOMEM; 263 goto err_unreg; 264 } 265 266 /* detect transition latency */ 267 policy->cpuinfo.transition_latency = 0; 268 for (i=0; i<data->acpi_data.state_count; i++) { 269 if ((data->acpi_data.states[i].transition_latency * 1000) > 270 policy->cpuinfo.transition_latency) { 271 policy->cpuinfo.transition_latency = 272 data->acpi_data.states[i].transition_latency * 1000; 273 } 274 } 275 276 /* table init */ 277 for (i = 0; i <= data->acpi_data.state_count; i++) 278 { 279 if (i < data->acpi_data.state_count) { 280 freq_table[i].frequency = 281 data->acpi_data.states[i].core_frequency * 1000; 282 } else { 283 freq_table[i].frequency = CPUFREQ_TABLE_END; 284 } 285 } 286 287 result = cpufreq_table_validate_and_show(policy, freq_table); 288 if (result) { 289 goto err_freqfree; 290 } 291 292 /* notify BIOS that we exist */ 293 acpi_processor_notify_smm(THIS_MODULE); 294 295 pr_info("CPU%u - ACPI performance management activated\n", cpu); 296 297 for (i = 0; i < data->acpi_data.state_count; i++) 298 pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", 299 (i == data->acpi_data.state?'*':' '), i, 300 (u32) data->acpi_data.states[i].core_frequency, 301 (u32) data->acpi_data.states[i].power, 302 (u32) data->acpi_data.states[i].transition_latency, 303 (u32) data->acpi_data.states[i].bus_master_latency, 304 (u32) data->acpi_data.states[i].status, 305 (u32) data->acpi_data.states[i].control); 306 307 /* the first call to ->target() should result in us actually 308 * writing something to the appropriate registers. */ 309 data->resume = 1; 310 311 return (result); 312 313 err_freqfree: 314 kfree(freq_table); 315 err_unreg: 316 acpi_processor_unregister_performance(cpu); 317 err_free: 318 kfree(data); 319 acpi_io_data[cpu] = NULL; 320 321 return (result); 322 } 323 324 325 static int 326 acpi_cpufreq_cpu_exit ( 327 struct cpufreq_policy *policy) 328 { 329 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 330 331 pr_debug("acpi_cpufreq_cpu_exit\n"); 332 333 if (data) { 334 acpi_io_data[policy->cpu] = NULL; 335 acpi_processor_unregister_performance(policy->cpu); 336 kfree(policy->freq_table); 337 kfree(data); 338 } 339 340 return (0); 341 } 342 343 344 static struct cpufreq_driver acpi_cpufreq_driver = { 345 .verify = cpufreq_generic_frequency_table_verify, 346 .target_index = acpi_cpufreq_target, 347 .get = acpi_cpufreq_get, 348 .init = acpi_cpufreq_cpu_init, 349 .exit = acpi_cpufreq_cpu_exit, 350 .name = "acpi-cpufreq", 351 .attr = cpufreq_generic_attr, 352 }; 353 354 355 static int __init 356 acpi_cpufreq_init (void) 357 { 358 pr_debug("acpi_cpufreq_init\n"); 359 360 return cpufreq_register_driver(&acpi_cpufreq_driver); 361 } 362 363 364 static void __exit 365 acpi_cpufreq_exit (void) 366 { 367 pr_debug("acpi_cpufreq_exit\n"); 368 369 cpufreq_unregister_driver(&acpi_cpufreq_driver); 370 return; 371 } 372 373 374 late_initcall(acpi_cpufreq_init); 375 module_exit(acpi_cpufreq_exit); 376 377