1 /* 2 * acpi-cpufreq.c - ACPI Processor P-States Driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> 8 * 9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License along 22 * with this program; if not, write to the Free Software Foundation, Inc., 23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 24 * 25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 26 */ 27 28 #include <linux/kernel.h> 29 #include <linux/module.h> 30 #include <linux/init.h> 31 #include <linux/smp.h> 32 #include <linux/sched.h> 33 #include <linux/cpufreq.h> 34 #include <linux/compiler.h> 35 #include <linux/dmi.h> 36 #include <linux/slab.h> 37 38 #include <linux/acpi.h> 39 #include <linux/io.h> 40 #include <linux/delay.h> 41 #include <linux/uaccess.h> 42 43 #include <acpi/processor.h> 44 45 #include <asm/msr.h> 46 #include <asm/processor.h> 47 #include <asm/cpufeature.h> 48 #include "mperf.h" 49 50 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); 51 MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 52 MODULE_LICENSE("GPL"); 53 54 #define PFX "acpi-cpufreq: " 55 56 enum { 57 UNDEFINED_CAPABLE = 0, 58 SYSTEM_INTEL_MSR_CAPABLE, 59 SYSTEM_AMD_MSR_CAPABLE, 60 SYSTEM_IO_CAPABLE, 61 }; 62 63 #define INTEL_MSR_RANGE (0xffff) 64 #define AMD_MSR_RANGE (0x7) 65 66 #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) 67 68 struct acpi_cpufreq_data { 69 struct acpi_processor_performance *acpi_data; 70 struct cpufreq_frequency_table *freq_table; 71 unsigned int resume; 72 unsigned int cpu_feature; 73 }; 74 75 static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); 76 77 /* acpi_perf_data is a pointer to percpu data. */ 78 static struct acpi_processor_performance __percpu *acpi_perf_data; 79 80 static struct cpufreq_driver acpi_cpufreq_driver; 81 82 static unsigned int acpi_pstate_strict; 83 static bool boost_enabled, boost_supported; 84 static struct msr __percpu *msrs; 85 86 static bool boost_state(unsigned int cpu) 87 { 88 u32 lo, hi; 89 u64 msr; 90 91 switch (boot_cpu_data.x86_vendor) { 92 case X86_VENDOR_INTEL: 93 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); 94 msr = lo | ((u64)hi << 32); 95 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); 96 case X86_VENDOR_AMD: 97 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); 98 msr = lo | ((u64)hi << 32); 99 return !(msr & MSR_K7_HWCR_CPB_DIS); 100 } 101 return false; 102 } 103 104 static void boost_set_msrs(bool enable, const struct cpumask *cpumask) 105 { 106 u32 cpu; 107 u32 msr_addr; 108 u64 msr_mask; 109 110 switch (boot_cpu_data.x86_vendor) { 111 case X86_VENDOR_INTEL: 112 msr_addr = MSR_IA32_MISC_ENABLE; 113 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; 114 break; 115 case X86_VENDOR_AMD: 116 msr_addr = MSR_K7_HWCR; 117 msr_mask = MSR_K7_HWCR_CPB_DIS; 118 break; 119 default: 120 return; 121 } 122 123 rdmsr_on_cpus(cpumask, msr_addr, msrs); 124 125 for_each_cpu(cpu, cpumask) { 126 struct msr *reg = per_cpu_ptr(msrs, cpu); 127 if (enable) 128 reg->q &= ~msr_mask; 129 else 130 reg->q |= msr_mask; 131 } 132 133 wrmsr_on_cpus(cpumask, msr_addr, msrs); 134 } 135 136 static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, 137 const char *buf, size_t count) 138 { 139 int ret; 140 unsigned long val = 0; 141 142 if (!boost_supported) 143 return -EINVAL; 144 145 ret = kstrtoul(buf, 10, &val); 146 if (ret || (val > 1)) 147 return -EINVAL; 148 149 if ((val && boost_enabled) || (!val && !boost_enabled)) 150 return count; 151 152 get_online_cpus(); 153 154 boost_set_msrs(val, cpu_online_mask); 155 156 put_online_cpus(); 157 158 boost_enabled = val; 159 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); 160 161 return count; 162 } 163 164 static ssize_t show_global_boost(struct kobject *kobj, 165 struct attribute *attr, char *buf) 166 { 167 return sprintf(buf, "%u\n", boost_enabled); 168 } 169 170 static struct global_attr global_boost = __ATTR(boost, 0644, 171 show_global_boost, 172 store_global_boost); 173 174 static int check_est_cpu(unsigned int cpuid) 175 { 176 struct cpuinfo_x86 *cpu = &cpu_data(cpuid); 177 178 return cpu_has(cpu, X86_FEATURE_EST); 179 } 180 181 static int check_amd_hwpstate_cpu(unsigned int cpuid) 182 { 183 struct cpuinfo_x86 *cpu = &cpu_data(cpuid); 184 185 return cpu_has(cpu, X86_FEATURE_HW_PSTATE); 186 } 187 188 static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) 189 { 190 struct acpi_processor_performance *perf; 191 int i; 192 193 perf = data->acpi_data; 194 195 for (i = 0; i < perf->state_count; i++) { 196 if (value == perf->states[i].status) 197 return data->freq_table[i].frequency; 198 } 199 return 0; 200 } 201 202 static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) 203 { 204 int i; 205 struct acpi_processor_performance *perf; 206 207 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 208 msr &= AMD_MSR_RANGE; 209 else 210 msr &= INTEL_MSR_RANGE; 211 212 perf = data->acpi_data; 213 214 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { 215 if (msr == perf->states[data->freq_table[i].index].status) 216 return data->freq_table[i].frequency; 217 } 218 return data->freq_table[0].frequency; 219 } 220 221 static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) 222 { 223 switch (data->cpu_feature) { 224 case SYSTEM_INTEL_MSR_CAPABLE: 225 case SYSTEM_AMD_MSR_CAPABLE: 226 return extract_msr(val, data); 227 case SYSTEM_IO_CAPABLE: 228 return extract_io(val, data); 229 default: 230 return 0; 231 } 232 } 233 234 struct msr_addr { 235 u32 reg; 236 }; 237 238 struct io_addr { 239 u16 port; 240 u8 bit_width; 241 }; 242 243 struct drv_cmd { 244 unsigned int type; 245 const struct cpumask *mask; 246 union { 247 struct msr_addr msr; 248 struct io_addr io; 249 } addr; 250 u32 val; 251 }; 252 253 /* Called via smp_call_function_single(), on the target CPU */ 254 static void do_drv_read(void *_cmd) 255 { 256 struct drv_cmd *cmd = _cmd; 257 u32 h; 258 259 switch (cmd->type) { 260 case SYSTEM_INTEL_MSR_CAPABLE: 261 case SYSTEM_AMD_MSR_CAPABLE: 262 rdmsr(cmd->addr.msr.reg, cmd->val, h); 263 break; 264 case SYSTEM_IO_CAPABLE: 265 acpi_os_read_port((acpi_io_address)cmd->addr.io.port, 266 &cmd->val, 267 (u32)cmd->addr.io.bit_width); 268 break; 269 default: 270 break; 271 } 272 } 273 274 /* Called via smp_call_function_many(), on the target CPUs */ 275 static void do_drv_write(void *_cmd) 276 { 277 struct drv_cmd *cmd = _cmd; 278 u32 lo, hi; 279 280 switch (cmd->type) { 281 case SYSTEM_INTEL_MSR_CAPABLE: 282 rdmsr(cmd->addr.msr.reg, lo, hi); 283 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); 284 wrmsr(cmd->addr.msr.reg, lo, hi); 285 break; 286 case SYSTEM_AMD_MSR_CAPABLE: 287 wrmsr(cmd->addr.msr.reg, cmd->val, 0); 288 break; 289 case SYSTEM_IO_CAPABLE: 290 acpi_os_write_port((acpi_io_address)cmd->addr.io.port, 291 cmd->val, 292 (u32)cmd->addr.io.bit_width); 293 break; 294 default: 295 break; 296 } 297 } 298 299 static void drv_read(struct drv_cmd *cmd) 300 { 301 int err; 302 cmd->val = 0; 303 304 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1); 305 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ 306 } 307 308 static void drv_write(struct drv_cmd *cmd) 309 { 310 int this_cpu; 311 312 this_cpu = get_cpu(); 313 if (cpumask_test_cpu(this_cpu, cmd->mask)) 314 do_drv_write(cmd); 315 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); 316 put_cpu(); 317 } 318 319 static u32 get_cur_val(const struct cpumask *mask) 320 { 321 struct acpi_processor_performance *perf; 322 struct drv_cmd cmd; 323 324 if (unlikely(cpumask_empty(mask))) 325 return 0; 326 327 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { 328 case SYSTEM_INTEL_MSR_CAPABLE: 329 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 330 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 331 break; 332 case SYSTEM_AMD_MSR_CAPABLE: 333 cmd.type = SYSTEM_AMD_MSR_CAPABLE; 334 cmd.addr.msr.reg = MSR_AMD_PERF_STATUS; 335 break; 336 case SYSTEM_IO_CAPABLE: 337 cmd.type = SYSTEM_IO_CAPABLE; 338 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; 339 cmd.addr.io.port = perf->control_register.address; 340 cmd.addr.io.bit_width = perf->control_register.bit_width; 341 break; 342 default: 343 return 0; 344 } 345 346 cmd.mask = mask; 347 drv_read(&cmd); 348 349 pr_debug("get_cur_val = %u\n", cmd.val); 350 351 return cmd.val; 352 } 353 354 static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 355 { 356 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); 357 unsigned int freq; 358 unsigned int cached_freq; 359 360 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); 361 362 if (unlikely(data == NULL || 363 data->acpi_data == NULL || data->freq_table == NULL)) { 364 return 0; 365 } 366 367 cached_freq = data->freq_table[data->acpi_data->state].frequency; 368 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); 369 if (freq != cached_freq) { 370 /* 371 * The dreaded BIOS frequency change behind our back. 372 * Force set the frequency on next target call. 373 */ 374 data->resume = 1; 375 } 376 377 pr_debug("cur freq = %u\n", freq); 378 379 return freq; 380 } 381 382 static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, 383 struct acpi_cpufreq_data *data) 384 { 385 unsigned int cur_freq; 386 unsigned int i; 387 388 for (i = 0; i < 100; i++) { 389 cur_freq = extract_freq(get_cur_val(mask), data); 390 if (cur_freq == freq) 391 return 1; 392 udelay(10); 393 } 394 return 0; 395 } 396 397 static int acpi_cpufreq_target(struct cpufreq_policy *policy, 398 unsigned int target_freq, unsigned int relation) 399 { 400 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 401 struct acpi_processor_performance *perf; 402 struct cpufreq_freqs freqs; 403 struct drv_cmd cmd; 404 unsigned int next_state = 0; /* Index into freq_table */ 405 unsigned int next_perf_state = 0; /* Index into perf table */ 406 unsigned int i; 407 int result = 0; 408 409 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); 410 411 if (unlikely(data == NULL || 412 data->acpi_data == NULL || data->freq_table == NULL)) { 413 return -ENODEV; 414 } 415 416 perf = data->acpi_data; 417 result = cpufreq_frequency_table_target(policy, 418 data->freq_table, 419 target_freq, 420 relation, &next_state); 421 if (unlikely(result)) { 422 result = -ENODEV; 423 goto out; 424 } 425 426 next_perf_state = data->freq_table[next_state].index; 427 if (perf->state == next_perf_state) { 428 if (unlikely(data->resume)) { 429 pr_debug("Called after resume, resetting to P%d\n", 430 next_perf_state); 431 data->resume = 0; 432 } else { 433 pr_debug("Already at target state (P%d)\n", 434 next_perf_state); 435 goto out; 436 } 437 } 438 439 switch (data->cpu_feature) { 440 case SYSTEM_INTEL_MSR_CAPABLE: 441 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 442 cmd.addr.msr.reg = MSR_IA32_PERF_CTL; 443 cmd.val = (u32) perf->states[next_perf_state].control; 444 break; 445 case SYSTEM_AMD_MSR_CAPABLE: 446 cmd.type = SYSTEM_AMD_MSR_CAPABLE; 447 cmd.addr.msr.reg = MSR_AMD_PERF_CTL; 448 cmd.val = (u32) perf->states[next_perf_state].control; 449 break; 450 case SYSTEM_IO_CAPABLE: 451 cmd.type = SYSTEM_IO_CAPABLE; 452 cmd.addr.io.port = perf->control_register.address; 453 cmd.addr.io.bit_width = perf->control_register.bit_width; 454 cmd.val = (u32) perf->states[next_perf_state].control; 455 break; 456 default: 457 result = -ENODEV; 458 goto out; 459 } 460 461 /* cpufreq holds the hotplug lock, so we are safe from here on */ 462 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) 463 cmd.mask = policy->cpus; 464 else 465 cmd.mask = cpumask_of(policy->cpu); 466 467 freqs.old = perf->states[perf->state].core_frequency * 1000; 468 freqs.new = data->freq_table[next_state].frequency; 469 for_each_cpu(i, policy->cpus) { 470 freqs.cpu = i; 471 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 472 } 473 474 drv_write(&cmd); 475 476 if (acpi_pstate_strict) { 477 if (!check_freqs(cmd.mask, freqs.new, data)) { 478 pr_debug("acpi_cpufreq_target failed (%d)\n", 479 policy->cpu); 480 result = -EAGAIN; 481 goto out; 482 } 483 } 484 485 for_each_cpu(i, policy->cpus) { 486 freqs.cpu = i; 487 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 488 } 489 perf->state = next_perf_state; 490 491 out: 492 return result; 493 } 494 495 static int acpi_cpufreq_verify(struct cpufreq_policy *policy) 496 { 497 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 498 499 pr_debug("acpi_cpufreq_verify\n"); 500 501 return cpufreq_frequency_table_verify(policy, data->freq_table); 502 } 503 504 static unsigned long 505 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) 506 { 507 struct acpi_processor_performance *perf = data->acpi_data; 508 509 if (cpu_khz) { 510 /* search the closest match to cpu_khz */ 511 unsigned int i; 512 unsigned long freq; 513 unsigned long freqn = perf->states[0].core_frequency * 1000; 514 515 for (i = 0; i < (perf->state_count-1); i++) { 516 freq = freqn; 517 freqn = perf->states[i+1].core_frequency * 1000; 518 if ((2 * cpu_khz) > (freqn + freq)) { 519 perf->state = i; 520 return freq; 521 } 522 } 523 perf->state = perf->state_count-1; 524 return freqn; 525 } else { 526 /* assume CPU is at P0... */ 527 perf->state = 0; 528 return perf->states[0].core_frequency * 1000; 529 } 530 } 531 532 static void free_acpi_perf_data(void) 533 { 534 unsigned int i; 535 536 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ 537 for_each_possible_cpu(i) 538 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) 539 ->shared_cpu_map); 540 free_percpu(acpi_perf_data); 541 } 542 543 static int boost_notify(struct notifier_block *nb, unsigned long action, 544 void *hcpu) 545 { 546 unsigned cpu = (long)hcpu; 547 const struct cpumask *cpumask; 548 549 cpumask = get_cpu_mask(cpu); 550 551 /* 552 * Clear the boost-disable bit on the CPU_DOWN path so that 553 * this cpu cannot block the remaining ones from boosting. On 554 * the CPU_UP path we simply keep the boost-disable flag in 555 * sync with the current global state. 556 */ 557 558 switch (action) { 559 case CPU_UP_PREPARE: 560 case CPU_UP_PREPARE_FROZEN: 561 boost_set_msrs(boost_enabled, cpumask); 562 break; 563 564 case CPU_DOWN_PREPARE: 565 case CPU_DOWN_PREPARE_FROZEN: 566 boost_set_msrs(1, cpumask); 567 break; 568 569 default: 570 break; 571 } 572 573 return NOTIFY_OK; 574 } 575 576 577 static struct notifier_block boost_nb = { 578 .notifier_call = boost_notify, 579 }; 580 581 /* 582 * acpi_cpufreq_early_init - initialize ACPI P-States library 583 * 584 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) 585 * in order to determine correct frequency and voltage pairings. We can 586 * do _PDC and _PSD and find out the processor dependency for the 587 * actual init that will happen later... 588 */ 589 static int __init acpi_cpufreq_early_init(void) 590 { 591 unsigned int i; 592 pr_debug("acpi_cpufreq_early_init\n"); 593 594 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 595 if (!acpi_perf_data) { 596 pr_debug("Memory allocation error for acpi_perf_data.\n"); 597 return -ENOMEM; 598 } 599 for_each_possible_cpu(i) { 600 if (!zalloc_cpumask_var_node( 601 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, 602 GFP_KERNEL, cpu_to_node(i))) { 603 604 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ 605 free_acpi_perf_data(); 606 return -ENOMEM; 607 } 608 } 609 610 /* Do initialization in ACPI core */ 611 acpi_processor_preregister_performance(acpi_perf_data); 612 return 0; 613 } 614 615 #ifdef CONFIG_SMP 616 /* 617 * Some BIOSes do SW_ANY coordination internally, either set it up in hw 618 * or do it in BIOS firmware and won't inform about it to OS. If not 619 * detected, this has a side effect of making CPU run at a different speed 620 * than OS intended it to run at. Detect it and handle it cleanly. 621 */ 622 static int bios_with_sw_any_bug; 623 624 static int sw_any_bug_found(const struct dmi_system_id *d) 625 { 626 bios_with_sw_any_bug = 1; 627 return 0; 628 } 629 630 static const struct dmi_system_id sw_any_bug_dmi_table[] = { 631 { 632 .callback = sw_any_bug_found, 633 .ident = "Supermicro Server X6DLP", 634 .matches = { 635 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), 636 DMI_MATCH(DMI_BIOS_VERSION, "080010"), 637 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), 638 }, 639 }, 640 { } 641 }; 642 643 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) 644 { 645 /* Intel Xeon Processor 7100 Series Specification Update 646 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf 647 * AL30: A Machine Check Exception (MCE) Occurring during an 648 * Enhanced Intel SpeedStep Technology Ratio Change May Cause 649 * Both Processor Cores to Lock Up. */ 650 if (c->x86_vendor == X86_VENDOR_INTEL) { 651 if ((c->x86 == 15) && 652 (c->x86_model == 6) && 653 (c->x86_mask == 8)) { 654 printk(KERN_INFO "acpi-cpufreq: Intel(R) " 655 "Xeon(R) 7100 Errata AL30, processors may " 656 "lock up on frequency changes: disabling " 657 "acpi-cpufreq.\n"); 658 return -ENODEV; 659 } 660 } 661 return 0; 662 } 663 #endif 664 665 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) 666 { 667 unsigned int i; 668 unsigned int valid_states = 0; 669 unsigned int cpu = policy->cpu; 670 struct acpi_cpufreq_data *data; 671 unsigned int result = 0; 672 struct cpuinfo_x86 *c = &cpu_data(policy->cpu); 673 struct acpi_processor_performance *perf; 674 #ifdef CONFIG_SMP 675 static int blacklisted; 676 #endif 677 678 pr_debug("acpi_cpufreq_cpu_init\n"); 679 680 #ifdef CONFIG_SMP 681 if (blacklisted) 682 return blacklisted; 683 blacklisted = acpi_cpufreq_blacklist(c); 684 if (blacklisted) 685 return blacklisted; 686 #endif 687 688 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); 689 if (!data) 690 return -ENOMEM; 691 692 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); 693 per_cpu(acfreq_data, cpu) = data; 694 695 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) 696 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; 697 698 result = acpi_processor_register_performance(data->acpi_data, cpu); 699 if (result) 700 goto err_free; 701 702 perf = data->acpi_data; 703 policy->shared_type = perf->shared_type; 704 705 /* 706 * Will let policy->cpus know about dependency only when software 707 * coordination is required. 708 */ 709 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 710 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 711 cpumask_copy(policy->cpus, perf->shared_cpu_map); 712 } 713 cpumask_copy(policy->related_cpus, perf->shared_cpu_map); 714 715 #ifdef CONFIG_SMP 716 dmi_check_system(sw_any_bug_dmi_table); 717 if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) { 718 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 719 cpumask_copy(policy->cpus, cpu_core_mask(cpu)); 720 } 721 722 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { 723 cpumask_clear(policy->cpus); 724 cpumask_set_cpu(cpu, policy->cpus); 725 cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu)); 726 policy->shared_type = CPUFREQ_SHARED_TYPE_HW; 727 pr_info_once(PFX "overriding BIOS provided _PSD data\n"); 728 } 729 #endif 730 731 /* capability check */ 732 if (perf->state_count <= 1) { 733 pr_debug("No P-States\n"); 734 result = -ENODEV; 735 goto err_unreg; 736 } 737 738 if (perf->control_register.space_id != perf->status_register.space_id) { 739 result = -ENODEV; 740 goto err_unreg; 741 } 742 743 switch (perf->control_register.space_id) { 744 case ACPI_ADR_SPACE_SYSTEM_IO: 745 pr_debug("SYSTEM IO addr space\n"); 746 data->cpu_feature = SYSTEM_IO_CAPABLE; 747 break; 748 case ACPI_ADR_SPACE_FIXED_HARDWARE: 749 pr_debug("HARDWARE addr space\n"); 750 if (check_est_cpu(cpu)) { 751 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; 752 break; 753 } 754 if (check_amd_hwpstate_cpu(cpu)) { 755 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; 756 break; 757 } 758 result = -ENODEV; 759 goto err_unreg; 760 default: 761 pr_debug("Unknown addr space %d\n", 762 (u32) (perf->control_register.space_id)); 763 result = -ENODEV; 764 goto err_unreg; 765 } 766 767 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * 768 (perf->state_count+1), GFP_KERNEL); 769 if (!data->freq_table) { 770 result = -ENOMEM; 771 goto err_unreg; 772 } 773 774 /* detect transition latency */ 775 policy->cpuinfo.transition_latency = 0; 776 for (i = 0; i < perf->state_count; i++) { 777 if ((perf->states[i].transition_latency * 1000) > 778 policy->cpuinfo.transition_latency) 779 policy->cpuinfo.transition_latency = 780 perf->states[i].transition_latency * 1000; 781 } 782 783 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ 784 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 785 policy->cpuinfo.transition_latency > 20 * 1000) { 786 policy->cpuinfo.transition_latency = 20 * 1000; 787 printk_once(KERN_INFO 788 "P-state transition latency capped at 20 uS\n"); 789 } 790 791 /* table init */ 792 for (i = 0; i < perf->state_count; i++) { 793 if (i > 0 && perf->states[i].core_frequency >= 794 data->freq_table[valid_states-1].frequency / 1000) 795 continue; 796 797 data->freq_table[valid_states].index = i; 798 data->freq_table[valid_states].frequency = 799 perf->states[i].core_frequency * 1000; 800 valid_states++; 801 } 802 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; 803 perf->state = 0; 804 805 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); 806 if (result) 807 goto err_freqfree; 808 809 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) 810 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n"); 811 812 switch (perf->control_register.space_id) { 813 case ACPI_ADR_SPACE_SYSTEM_IO: 814 /* Current speed is unknown and not detectable by IO port */ 815 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); 816 break; 817 case ACPI_ADR_SPACE_FIXED_HARDWARE: 818 acpi_cpufreq_driver.get = get_cur_freq_on_cpu; 819 policy->cur = get_cur_freq_on_cpu(cpu); 820 break; 821 default: 822 break; 823 } 824 825 /* notify BIOS that we exist */ 826 acpi_processor_notify_smm(THIS_MODULE); 827 828 /* Check for APERF/MPERF support in hardware */ 829 if (boot_cpu_has(X86_FEATURE_APERFMPERF)) 830 acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; 831 832 pr_debug("CPU%u - ACPI performance management activated.\n", cpu); 833 for (i = 0; i < perf->state_count; i++) 834 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", 835 (i == perf->state ? '*' : ' '), i, 836 (u32) perf->states[i].core_frequency, 837 (u32) perf->states[i].power, 838 (u32) perf->states[i].transition_latency); 839 840 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); 841 842 /* 843 * the first call to ->target() should result in us actually 844 * writing something to the appropriate registers. 845 */ 846 data->resume = 1; 847 848 return result; 849 850 err_freqfree: 851 kfree(data->freq_table); 852 err_unreg: 853 acpi_processor_unregister_performance(perf, cpu); 854 err_free: 855 kfree(data); 856 per_cpu(acfreq_data, cpu) = NULL; 857 858 return result; 859 } 860 861 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) 862 { 863 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 864 865 pr_debug("acpi_cpufreq_cpu_exit\n"); 866 867 if (data) { 868 cpufreq_frequency_table_put_attr(policy->cpu); 869 per_cpu(acfreq_data, policy->cpu) = NULL; 870 acpi_processor_unregister_performance(data->acpi_data, 871 policy->cpu); 872 kfree(data->freq_table); 873 kfree(data); 874 } 875 876 return 0; 877 } 878 879 static int acpi_cpufreq_resume(struct cpufreq_policy *policy) 880 { 881 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 882 883 pr_debug("acpi_cpufreq_resume\n"); 884 885 data->resume = 1; 886 887 return 0; 888 } 889 890 static struct freq_attr *acpi_cpufreq_attr[] = { 891 &cpufreq_freq_attr_scaling_available_freqs, 892 NULL, 893 }; 894 895 static struct cpufreq_driver acpi_cpufreq_driver = { 896 .verify = acpi_cpufreq_verify, 897 .target = acpi_cpufreq_target, 898 .bios_limit = acpi_processor_get_bios_limit, 899 .init = acpi_cpufreq_cpu_init, 900 .exit = acpi_cpufreq_cpu_exit, 901 .resume = acpi_cpufreq_resume, 902 .name = "acpi-cpufreq", 903 .owner = THIS_MODULE, 904 .attr = acpi_cpufreq_attr, 905 }; 906 907 static void __init acpi_cpufreq_boost_init(void) 908 { 909 if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { 910 msrs = msrs_alloc(); 911 912 if (!msrs) 913 return; 914 915 boost_supported = true; 916 boost_enabled = boost_state(0); 917 918 get_online_cpus(); 919 920 /* Force all MSRs to the same value */ 921 boost_set_msrs(boost_enabled, cpu_online_mask); 922 923 register_cpu_notifier(&boost_nb); 924 925 put_online_cpus(); 926 } else 927 global_boost.attr.mode = 0444; 928 929 /* We create the boost file in any case, though for systems without 930 * hardware support it will be read-only and hardwired to return 0. 931 */ 932 if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr))) 933 pr_warn(PFX "could not register global boost sysfs file\n"); 934 else 935 pr_debug("registered global boost sysfs file\n"); 936 } 937 938 static void __exit acpi_cpufreq_boost_exit(void) 939 { 940 sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr)); 941 942 if (msrs) { 943 unregister_cpu_notifier(&boost_nb); 944 945 msrs_free(msrs); 946 msrs = NULL; 947 } 948 } 949 950 static int __init acpi_cpufreq_init(void) 951 { 952 int ret; 953 954 if (acpi_disabled) 955 return 0; 956 957 pr_debug("acpi_cpufreq_init\n"); 958 959 ret = acpi_cpufreq_early_init(); 960 if (ret) 961 return ret; 962 963 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 964 if (ret) 965 free_acpi_perf_data(); 966 else 967 acpi_cpufreq_boost_init(); 968 969 return ret; 970 } 971 972 static void __exit acpi_cpufreq_exit(void) 973 { 974 pr_debug("acpi_cpufreq_exit\n"); 975 976 acpi_cpufreq_boost_exit(); 977 978 cpufreq_unregister_driver(&acpi_cpufreq_driver); 979 980 free_acpi_perf_data(); 981 } 982 983 module_param(acpi_pstate_strict, uint, 0644); 984 MODULE_PARM_DESC(acpi_pstate_strict, 985 "value 0 or non-zero. non-zero -> strict ACPI checks are " 986 "performed during frequency changes."); 987 988 late_initcall(acpi_cpufreq_init); 989 module_exit(acpi_cpufreq_exit); 990 991 MODULE_ALIAS("acpi"); 992