1 /* 2 * acpi-cpufreq.c - ACPI Processor P-States Driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> 8 * 9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License along 22 * with this program; if not, write to the Free Software Foundation, Inc., 23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 24 * 25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 26 */ 27 28 #include <linux/kernel.h> 29 #include <linux/module.h> 30 #include <linux/init.h> 31 #include <linux/smp.h> 32 #include <linux/sched.h> 33 #include <linux/cpufreq.h> 34 #include <linux/compiler.h> 35 #include <linux/dmi.h> 36 #include <linux/slab.h> 37 38 #include <linux/acpi.h> 39 #include <linux/io.h> 40 #include <linux/delay.h> 41 #include <linux/uaccess.h> 42 43 #include <acpi/processor.h> 44 45 #include <asm/msr.h> 46 #include <asm/processor.h> 47 #include <asm/cpufeature.h> 48 #include "mperf.h" 49 50 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); 51 MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 52 MODULE_LICENSE("GPL"); 53 54 enum { 55 UNDEFINED_CAPABLE = 0, 56 SYSTEM_INTEL_MSR_CAPABLE, 57 SYSTEM_AMD_MSR_CAPABLE, 58 SYSTEM_IO_CAPABLE, 59 }; 60 61 #define INTEL_MSR_RANGE (0xffff) 62 #define AMD_MSR_RANGE (0x7) 63 64 struct acpi_cpufreq_data { 65 struct acpi_processor_performance *acpi_data; 66 struct cpufreq_frequency_table *freq_table; 67 unsigned int resume; 68 unsigned int cpu_feature; 69 }; 70 71 static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); 72 73 /* acpi_perf_data is a pointer to percpu data. */ 74 static struct acpi_processor_performance __percpu *acpi_perf_data; 75 76 static struct cpufreq_driver acpi_cpufreq_driver; 77 78 static unsigned int acpi_pstate_strict; 79 80 static int check_est_cpu(unsigned int cpuid) 81 { 82 struct cpuinfo_x86 *cpu = &cpu_data(cpuid); 83 84 return cpu_has(cpu, X86_FEATURE_EST); 85 } 86 87 static int check_amd_hwpstate_cpu(unsigned int cpuid) 88 { 89 struct cpuinfo_x86 *cpu = &cpu_data(cpuid); 90 91 return cpu_has(cpu, X86_FEATURE_HW_PSTATE); 92 } 93 94 static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) 95 { 96 struct acpi_processor_performance *perf; 97 int i; 98 99 perf = data->acpi_data; 100 101 for (i = 0; i < perf->state_count; i++) { 102 if (value == perf->states[i].status) 103 return data->freq_table[i].frequency; 104 } 105 return 0; 106 } 107 108 static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) 109 { 110 int i; 111 struct acpi_processor_performance *perf; 112 113 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 114 msr &= AMD_MSR_RANGE; 115 else 116 msr &= INTEL_MSR_RANGE; 117 118 perf = data->acpi_data; 119 120 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { 121 if (msr == perf->states[data->freq_table[i].index].status) 122 return data->freq_table[i].frequency; 123 } 124 return data->freq_table[0].frequency; 125 } 126 127 static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) 128 { 129 switch (data->cpu_feature) { 130 case SYSTEM_INTEL_MSR_CAPABLE: 131 case SYSTEM_AMD_MSR_CAPABLE: 132 return extract_msr(val, data); 133 case SYSTEM_IO_CAPABLE: 134 return extract_io(val, data); 135 default: 136 return 0; 137 } 138 } 139 140 struct msr_addr { 141 u32 reg; 142 }; 143 144 struct io_addr { 145 u16 port; 146 u8 bit_width; 147 }; 148 149 struct drv_cmd { 150 unsigned int type; 151 const struct cpumask *mask; 152 union { 153 struct msr_addr msr; 154 struct io_addr io; 155 } addr; 156 u32 val; 157 }; 158 159 /* Called via smp_call_function_single(), on the target CPU */ 160 static void do_drv_read(void *_cmd) 161 { 162 struct drv_cmd *cmd = _cmd; 163 u32 h; 164 165 switch (cmd->type) { 166 case SYSTEM_INTEL_MSR_CAPABLE: 167 case SYSTEM_AMD_MSR_CAPABLE: 168 rdmsr(cmd->addr.msr.reg, cmd->val, h); 169 break; 170 case SYSTEM_IO_CAPABLE: 171 acpi_os_read_port((acpi_io_address)cmd->addr.io.port, 172 &cmd->val, 173 (u32)cmd->addr.io.bit_width); 174 break; 175 default: 176 break; 177 } 178 } 179 180 /* Called via smp_call_function_many(), on the target CPUs */ 181 static void do_drv_write(void *_cmd) 182 { 183 struct drv_cmd *cmd = _cmd; 184 u32 lo, hi; 185 186 switch (cmd->type) { 187 case SYSTEM_INTEL_MSR_CAPABLE: 188 rdmsr(cmd->addr.msr.reg, lo, hi); 189 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); 190 wrmsr(cmd->addr.msr.reg, lo, hi); 191 break; 192 case SYSTEM_AMD_MSR_CAPABLE: 193 wrmsr(cmd->addr.msr.reg, cmd->val, 0); 194 break; 195 case SYSTEM_IO_CAPABLE: 196 acpi_os_write_port((acpi_io_address)cmd->addr.io.port, 197 cmd->val, 198 (u32)cmd->addr.io.bit_width); 199 break; 200 default: 201 break; 202 } 203 } 204 205 static void drv_read(struct drv_cmd *cmd) 206 { 207 int err; 208 cmd->val = 0; 209 210 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1); 211 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ 212 } 213 214 static void drv_write(struct drv_cmd *cmd) 215 { 216 int this_cpu; 217 218 this_cpu = get_cpu(); 219 if (cpumask_test_cpu(this_cpu, cmd->mask)) 220 do_drv_write(cmd); 221 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); 222 put_cpu(); 223 } 224 225 static u32 get_cur_val(const struct cpumask *mask) 226 { 227 struct acpi_processor_performance *perf; 228 struct drv_cmd cmd; 229 230 if (unlikely(cpumask_empty(mask))) 231 return 0; 232 233 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { 234 case SYSTEM_INTEL_MSR_CAPABLE: 235 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 236 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 237 break; 238 case SYSTEM_AMD_MSR_CAPABLE: 239 cmd.type = SYSTEM_AMD_MSR_CAPABLE; 240 cmd.addr.msr.reg = MSR_AMD_PERF_STATUS; 241 break; 242 case SYSTEM_IO_CAPABLE: 243 cmd.type = SYSTEM_IO_CAPABLE; 244 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; 245 cmd.addr.io.port = perf->control_register.address; 246 cmd.addr.io.bit_width = perf->control_register.bit_width; 247 break; 248 default: 249 return 0; 250 } 251 252 cmd.mask = mask; 253 drv_read(&cmd); 254 255 pr_debug("get_cur_val = %u\n", cmd.val); 256 257 return cmd.val; 258 } 259 260 static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 261 { 262 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); 263 unsigned int freq; 264 unsigned int cached_freq; 265 266 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); 267 268 if (unlikely(data == NULL || 269 data->acpi_data == NULL || data->freq_table == NULL)) { 270 return 0; 271 } 272 273 cached_freq = data->freq_table[data->acpi_data->state].frequency; 274 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); 275 if (freq != cached_freq) { 276 /* 277 * The dreaded BIOS frequency change behind our back. 278 * Force set the frequency on next target call. 279 */ 280 data->resume = 1; 281 } 282 283 pr_debug("cur freq = %u\n", freq); 284 285 return freq; 286 } 287 288 static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, 289 struct acpi_cpufreq_data *data) 290 { 291 unsigned int cur_freq; 292 unsigned int i; 293 294 for (i = 0; i < 100; i++) { 295 cur_freq = extract_freq(get_cur_val(mask), data); 296 if (cur_freq == freq) 297 return 1; 298 udelay(10); 299 } 300 return 0; 301 } 302 303 static int acpi_cpufreq_target(struct cpufreq_policy *policy, 304 unsigned int target_freq, unsigned int relation) 305 { 306 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 307 struct acpi_processor_performance *perf; 308 struct cpufreq_freqs freqs; 309 struct drv_cmd cmd; 310 unsigned int next_state = 0; /* Index into freq_table */ 311 unsigned int next_perf_state = 0; /* Index into perf table */ 312 unsigned int i; 313 int result = 0; 314 315 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); 316 317 if (unlikely(data == NULL || 318 data->acpi_data == NULL || data->freq_table == NULL)) { 319 return -ENODEV; 320 } 321 322 perf = data->acpi_data; 323 result = cpufreq_frequency_table_target(policy, 324 data->freq_table, 325 target_freq, 326 relation, &next_state); 327 if (unlikely(result)) { 328 result = -ENODEV; 329 goto out; 330 } 331 332 next_perf_state = data->freq_table[next_state].index; 333 if (perf->state == next_perf_state) { 334 if (unlikely(data->resume)) { 335 pr_debug("Called after resume, resetting to P%d\n", 336 next_perf_state); 337 data->resume = 0; 338 } else { 339 pr_debug("Already at target state (P%d)\n", 340 next_perf_state); 341 goto out; 342 } 343 } 344 345 switch (data->cpu_feature) { 346 case SYSTEM_INTEL_MSR_CAPABLE: 347 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 348 cmd.addr.msr.reg = MSR_IA32_PERF_CTL; 349 cmd.val = (u32) perf->states[next_perf_state].control; 350 break; 351 case SYSTEM_AMD_MSR_CAPABLE: 352 cmd.type = SYSTEM_AMD_MSR_CAPABLE; 353 cmd.addr.msr.reg = MSR_AMD_PERF_CTL; 354 cmd.val = (u32) perf->states[next_perf_state].control; 355 break; 356 case SYSTEM_IO_CAPABLE: 357 cmd.type = SYSTEM_IO_CAPABLE; 358 cmd.addr.io.port = perf->control_register.address; 359 cmd.addr.io.bit_width = perf->control_register.bit_width; 360 cmd.val = (u32) perf->states[next_perf_state].control; 361 break; 362 default: 363 result = -ENODEV; 364 goto out; 365 } 366 367 /* cpufreq holds the hotplug lock, so we are safe from here on */ 368 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) 369 cmd.mask = policy->cpus; 370 else 371 cmd.mask = cpumask_of(policy->cpu); 372 373 freqs.old = perf->states[perf->state].core_frequency * 1000; 374 freqs.new = data->freq_table[next_state].frequency; 375 for_each_cpu(i, policy->cpus) { 376 freqs.cpu = i; 377 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 378 } 379 380 drv_write(&cmd); 381 382 if (acpi_pstate_strict) { 383 if (!check_freqs(cmd.mask, freqs.new, data)) { 384 pr_debug("acpi_cpufreq_target failed (%d)\n", 385 policy->cpu); 386 result = -EAGAIN; 387 goto out; 388 } 389 } 390 391 for_each_cpu(i, policy->cpus) { 392 freqs.cpu = i; 393 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 394 } 395 perf->state = next_perf_state; 396 397 out: 398 return result; 399 } 400 401 static int acpi_cpufreq_verify(struct cpufreq_policy *policy) 402 { 403 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 404 405 pr_debug("acpi_cpufreq_verify\n"); 406 407 return cpufreq_frequency_table_verify(policy, data->freq_table); 408 } 409 410 static unsigned long 411 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) 412 { 413 struct acpi_processor_performance *perf = data->acpi_data; 414 415 if (cpu_khz) { 416 /* search the closest match to cpu_khz */ 417 unsigned int i; 418 unsigned long freq; 419 unsigned long freqn = perf->states[0].core_frequency * 1000; 420 421 for (i = 0; i < (perf->state_count-1); i++) { 422 freq = freqn; 423 freqn = perf->states[i+1].core_frequency * 1000; 424 if ((2 * cpu_khz) > (freqn + freq)) { 425 perf->state = i; 426 return freq; 427 } 428 } 429 perf->state = perf->state_count-1; 430 return freqn; 431 } else { 432 /* assume CPU is at P0... */ 433 perf->state = 0; 434 return perf->states[0].core_frequency * 1000; 435 } 436 } 437 438 static void free_acpi_perf_data(void) 439 { 440 unsigned int i; 441 442 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ 443 for_each_possible_cpu(i) 444 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) 445 ->shared_cpu_map); 446 free_percpu(acpi_perf_data); 447 } 448 449 /* 450 * acpi_cpufreq_early_init - initialize ACPI P-States library 451 * 452 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) 453 * in order to determine correct frequency and voltage pairings. We can 454 * do _PDC and _PSD and find out the processor dependency for the 455 * actual init that will happen later... 456 */ 457 static int __init acpi_cpufreq_early_init(void) 458 { 459 unsigned int i; 460 pr_debug("acpi_cpufreq_early_init\n"); 461 462 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 463 if (!acpi_perf_data) { 464 pr_debug("Memory allocation error for acpi_perf_data.\n"); 465 return -ENOMEM; 466 } 467 for_each_possible_cpu(i) { 468 if (!zalloc_cpumask_var_node( 469 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, 470 GFP_KERNEL, cpu_to_node(i))) { 471 472 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ 473 free_acpi_perf_data(); 474 return -ENOMEM; 475 } 476 } 477 478 /* Do initialization in ACPI core */ 479 acpi_processor_preregister_performance(acpi_perf_data); 480 return 0; 481 } 482 483 #ifdef CONFIG_SMP 484 /* 485 * Some BIOSes do SW_ANY coordination internally, either set it up in hw 486 * or do it in BIOS firmware and won't inform about it to OS. If not 487 * detected, this has a side effect of making CPU run at a different speed 488 * than OS intended it to run at. Detect it and handle it cleanly. 489 */ 490 static int bios_with_sw_any_bug; 491 492 static int sw_any_bug_found(const struct dmi_system_id *d) 493 { 494 bios_with_sw_any_bug = 1; 495 return 0; 496 } 497 498 static const struct dmi_system_id sw_any_bug_dmi_table[] = { 499 { 500 .callback = sw_any_bug_found, 501 .ident = "Supermicro Server X6DLP", 502 .matches = { 503 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), 504 DMI_MATCH(DMI_BIOS_VERSION, "080010"), 505 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), 506 }, 507 }, 508 { } 509 }; 510 511 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) 512 { 513 /* Intel Xeon Processor 7100 Series Specification Update 514 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf 515 * AL30: A Machine Check Exception (MCE) Occurring during an 516 * Enhanced Intel SpeedStep Technology Ratio Change May Cause 517 * Both Processor Cores to Lock Up. */ 518 if (c->x86_vendor == X86_VENDOR_INTEL) { 519 if ((c->x86 == 15) && 520 (c->x86_model == 6) && 521 (c->x86_mask == 8)) { 522 printk(KERN_INFO "acpi-cpufreq: Intel(R) " 523 "Xeon(R) 7100 Errata AL30, processors may " 524 "lock up on frequency changes: disabling " 525 "acpi-cpufreq.\n"); 526 return -ENODEV; 527 } 528 } 529 return 0; 530 } 531 #endif 532 533 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) 534 { 535 unsigned int i; 536 unsigned int valid_states = 0; 537 unsigned int cpu = policy->cpu; 538 struct acpi_cpufreq_data *data; 539 unsigned int result = 0; 540 struct cpuinfo_x86 *c = &cpu_data(policy->cpu); 541 struct acpi_processor_performance *perf; 542 #ifdef CONFIG_SMP 543 static int blacklisted; 544 #endif 545 546 pr_debug("acpi_cpufreq_cpu_init\n"); 547 548 #ifdef CONFIG_SMP 549 if (blacklisted) 550 return blacklisted; 551 blacklisted = acpi_cpufreq_blacklist(c); 552 if (blacklisted) 553 return blacklisted; 554 #endif 555 556 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); 557 if (!data) 558 return -ENOMEM; 559 560 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); 561 per_cpu(acfreq_data, cpu) = data; 562 563 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) 564 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; 565 566 result = acpi_processor_register_performance(data->acpi_data, cpu); 567 if (result) 568 goto err_free; 569 570 perf = data->acpi_data; 571 policy->shared_type = perf->shared_type; 572 573 /* 574 * Will let policy->cpus know about dependency only when software 575 * coordination is required. 576 */ 577 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 578 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 579 cpumask_copy(policy->cpus, perf->shared_cpu_map); 580 } 581 cpumask_copy(policy->related_cpus, perf->shared_cpu_map); 582 583 #ifdef CONFIG_SMP 584 dmi_check_system(sw_any_bug_dmi_table); 585 if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) { 586 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 587 cpumask_copy(policy->cpus, cpu_core_mask(cpu)); 588 } 589 #endif 590 591 /* capability check */ 592 if (perf->state_count <= 1) { 593 pr_debug("No P-States\n"); 594 result = -ENODEV; 595 goto err_unreg; 596 } 597 598 if (perf->control_register.space_id != perf->status_register.space_id) { 599 result = -ENODEV; 600 goto err_unreg; 601 } 602 603 switch (perf->control_register.space_id) { 604 case ACPI_ADR_SPACE_SYSTEM_IO: 605 pr_debug("SYSTEM IO addr space\n"); 606 data->cpu_feature = SYSTEM_IO_CAPABLE; 607 break; 608 case ACPI_ADR_SPACE_FIXED_HARDWARE: 609 pr_debug("HARDWARE addr space\n"); 610 if (check_est_cpu(cpu)) { 611 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; 612 break; 613 } 614 if (check_amd_hwpstate_cpu(cpu)) { 615 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; 616 break; 617 } 618 result = -ENODEV; 619 goto err_unreg; 620 default: 621 pr_debug("Unknown addr space %d\n", 622 (u32) (perf->control_register.space_id)); 623 result = -ENODEV; 624 goto err_unreg; 625 } 626 627 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * 628 (perf->state_count+1), GFP_KERNEL); 629 if (!data->freq_table) { 630 result = -ENOMEM; 631 goto err_unreg; 632 } 633 634 /* detect transition latency */ 635 policy->cpuinfo.transition_latency = 0; 636 for (i = 0; i < perf->state_count; i++) { 637 if ((perf->states[i].transition_latency * 1000) > 638 policy->cpuinfo.transition_latency) 639 policy->cpuinfo.transition_latency = 640 perf->states[i].transition_latency * 1000; 641 } 642 643 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ 644 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 645 policy->cpuinfo.transition_latency > 20 * 1000) { 646 policy->cpuinfo.transition_latency = 20 * 1000; 647 printk_once(KERN_INFO 648 "P-state transition latency capped at 20 uS\n"); 649 } 650 651 /* table init */ 652 for (i = 0; i < perf->state_count; i++) { 653 if (i > 0 && perf->states[i].core_frequency >= 654 data->freq_table[valid_states-1].frequency / 1000) 655 continue; 656 657 data->freq_table[valid_states].index = i; 658 data->freq_table[valid_states].frequency = 659 perf->states[i].core_frequency * 1000; 660 valid_states++; 661 } 662 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; 663 perf->state = 0; 664 665 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); 666 if (result) 667 goto err_freqfree; 668 669 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) 670 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n"); 671 672 switch (perf->control_register.space_id) { 673 case ACPI_ADR_SPACE_SYSTEM_IO: 674 /* Current speed is unknown and not detectable by IO port */ 675 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); 676 break; 677 case ACPI_ADR_SPACE_FIXED_HARDWARE: 678 acpi_cpufreq_driver.get = get_cur_freq_on_cpu; 679 policy->cur = get_cur_freq_on_cpu(cpu); 680 break; 681 default: 682 break; 683 } 684 685 /* notify BIOS that we exist */ 686 acpi_processor_notify_smm(THIS_MODULE); 687 688 /* Check for APERF/MPERF support in hardware */ 689 if (boot_cpu_has(X86_FEATURE_APERFMPERF)) 690 acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; 691 692 pr_debug("CPU%u - ACPI performance management activated.\n", cpu); 693 for (i = 0; i < perf->state_count; i++) 694 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", 695 (i == perf->state ? '*' : ' '), i, 696 (u32) perf->states[i].core_frequency, 697 (u32) perf->states[i].power, 698 (u32) perf->states[i].transition_latency); 699 700 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); 701 702 /* 703 * the first call to ->target() should result in us actually 704 * writing something to the appropriate registers. 705 */ 706 data->resume = 1; 707 708 return result; 709 710 err_freqfree: 711 kfree(data->freq_table); 712 err_unreg: 713 acpi_processor_unregister_performance(perf, cpu); 714 err_free: 715 kfree(data); 716 per_cpu(acfreq_data, cpu) = NULL; 717 718 return result; 719 } 720 721 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) 722 { 723 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 724 725 pr_debug("acpi_cpufreq_cpu_exit\n"); 726 727 if (data) { 728 cpufreq_frequency_table_put_attr(policy->cpu); 729 per_cpu(acfreq_data, policy->cpu) = NULL; 730 acpi_processor_unregister_performance(data->acpi_data, 731 policy->cpu); 732 kfree(data->freq_table); 733 kfree(data); 734 } 735 736 return 0; 737 } 738 739 static int acpi_cpufreq_resume(struct cpufreq_policy *policy) 740 { 741 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 742 743 pr_debug("acpi_cpufreq_resume\n"); 744 745 data->resume = 1; 746 747 return 0; 748 } 749 750 static struct freq_attr *acpi_cpufreq_attr[] = { 751 &cpufreq_freq_attr_scaling_available_freqs, 752 NULL, 753 }; 754 755 static struct cpufreq_driver acpi_cpufreq_driver = { 756 .verify = acpi_cpufreq_verify, 757 .target = acpi_cpufreq_target, 758 .bios_limit = acpi_processor_get_bios_limit, 759 .init = acpi_cpufreq_cpu_init, 760 .exit = acpi_cpufreq_cpu_exit, 761 .resume = acpi_cpufreq_resume, 762 .name = "acpi-cpufreq", 763 .owner = THIS_MODULE, 764 .attr = acpi_cpufreq_attr, 765 }; 766 767 static int __init acpi_cpufreq_init(void) 768 { 769 int ret; 770 771 if (acpi_disabled) 772 return 0; 773 774 pr_debug("acpi_cpufreq_init\n"); 775 776 ret = acpi_cpufreq_early_init(); 777 if (ret) 778 return ret; 779 780 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 781 if (ret) 782 free_acpi_perf_data(); 783 784 return ret; 785 } 786 787 static void __exit acpi_cpufreq_exit(void) 788 { 789 pr_debug("acpi_cpufreq_exit\n"); 790 791 cpufreq_unregister_driver(&acpi_cpufreq_driver); 792 793 free_acpi_perf_data(); 794 } 795 796 module_param(acpi_pstate_strict, uint, 0644); 797 MODULE_PARM_DESC(acpi_pstate_strict, 798 "value 0 or non-zero. non-zero -> strict ACPI checks are " 799 "performed during frequency changes."); 800 801 late_initcall(acpi_cpufreq_init); 802 module_exit(acpi_cpufreq_exit); 803 804 MODULE_ALIAS("acpi"); 805