1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pstate.c: Native P state management for Intel processors 4 * 5 * (C) Copyright 2012 Intel Corporation 6 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/module.h> 14 #include <linux/ktime.h> 15 #include <linux/hrtimer.h> 16 #include <linux/tick.h> 17 #include <linux/slab.h> 18 #include <linux/sched/cpufreq.h> 19 #include <linux/list.h> 20 #include <linux/cpu.h> 21 #include <linux/cpufreq.h> 22 #include <linux/sysfs.h> 23 #include <linux/types.h> 24 #include <linux/fs.h> 25 #include <linux/acpi.h> 26 #include <linux/vmalloc.h> 27 #include <linux/pm_qos.h> 28 #include <trace/events/power.h> 29 30 #include <asm/div64.h> 31 #include <asm/msr.h> 32 #include <asm/cpu_device_id.h> 33 #include <asm/cpufeature.h> 34 #include <asm/intel-family.h> 35 36 #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) 37 38 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 39 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 40 41 #ifdef CONFIG_ACPI 42 #include <acpi/processor.h> 43 #include <acpi/cppc_acpi.h> 44 #endif 45 46 #define FRAC_BITS 8 47 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 48 #define fp_toint(X) ((X) >> FRAC_BITS) 49 50 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) 51 52 #define EXT_BITS 6 53 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 54 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 55 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 56 57 static inline int32_t mul_fp(int32_t x, int32_t y) 58 { 59 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 60 } 61 62 static inline int32_t div_fp(s64 x, s64 y) 63 { 64 return div64_s64((int64_t)x << FRAC_BITS, y); 65 } 66 67 static inline int ceiling_fp(int32_t x) 68 { 69 int mask, ret; 70 71 ret = fp_toint(x); 72 mask = (1 << FRAC_BITS) - 1; 73 if (x & mask) 74 ret += 1; 75 return ret; 76 } 77 78 static inline int32_t percent_fp(int percent) 79 { 80 return div_fp(percent, 100); 81 } 82 83 static inline u64 mul_ext_fp(u64 x, u64 y) 84 { 85 return (x * y) >> EXT_FRAC_BITS; 86 } 87 88 static inline u64 div_ext_fp(u64 x, u64 y) 89 { 90 return div64_u64(x << EXT_FRAC_BITS, y); 91 } 92 93 static inline int32_t percent_ext_fp(int percent) 94 { 95 return div_ext_fp(percent, 100); 96 } 97 98 /** 99 * struct sample - Store performance sample 100 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 101 * performance during last sample period 102 * @busy_scaled: Scaled busy value which is used to calculate next 103 * P state. This can be different than core_avg_perf 104 * to account for cpu idle period 105 * @aperf: Difference of actual performance frequency clock count 106 * read from APERF MSR between last and current sample 107 * @mperf: Difference of maximum performance frequency clock count 108 * read from MPERF MSR between last and current sample 109 * @tsc: Difference of time stamp counter between last and 110 * current sample 111 * @time: Current time from scheduler 112 * 113 * This structure is used in the cpudata structure to store performance sample 114 * data for choosing next P State. 115 */ 116 struct sample { 117 int32_t core_avg_perf; 118 int32_t busy_scaled; 119 u64 aperf; 120 u64 mperf; 121 u64 tsc; 122 u64 time; 123 }; 124 125 /** 126 * struct pstate_data - Store P state data 127 * @current_pstate: Current requested P state 128 * @min_pstate: Min P state possible for this platform 129 * @max_pstate: Max P state possible for this platform 130 * @max_pstate_physical:This is physical Max P state for a processor 131 * This can be higher than the max_pstate which can 132 * be limited by platform thermal design power limits 133 * @scaling: Scaling factor to convert frequency to cpufreq 134 * frequency units 135 * @turbo_pstate: Max Turbo P state possible for this platform 136 * @max_freq: @max_pstate frequency in cpufreq units 137 * @turbo_freq: @turbo_pstate frequency in cpufreq units 138 * 139 * Stores the per cpu model P state limits and current P state. 140 */ 141 struct pstate_data { 142 int current_pstate; 143 int min_pstate; 144 int max_pstate; 145 int max_pstate_physical; 146 int scaling; 147 int turbo_pstate; 148 unsigned int max_freq; 149 unsigned int turbo_freq; 150 }; 151 152 /** 153 * struct vid_data - Stores voltage information data 154 * @min: VID data for this platform corresponding to 155 * the lowest P state 156 * @max: VID data corresponding to the highest P State. 157 * @turbo: VID data for turbo P state 158 * @ratio: Ratio of (vid max - vid min) / 159 * (max P state - Min P State) 160 * 161 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 162 * This data is used in Atom platforms, where in addition to target P state, 163 * the voltage data needs to be specified to select next P State. 164 */ 165 struct vid_data { 166 int min; 167 int max; 168 int turbo; 169 int32_t ratio; 170 }; 171 172 /** 173 * struct global_params - Global parameters, mostly tunable via sysfs. 174 * @no_turbo: Whether or not to use turbo P-states. 175 * @turbo_disabled: Whether or not turbo P-states are available at all, 176 * based on the MSR_IA32_MISC_ENABLE value and whether or 177 * not the maximum reported turbo P-state is different from 178 * the maximum reported non-turbo one. 179 * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. 180 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo 181 * P-state capacity. 182 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo 183 * P-state capacity. 184 */ 185 struct global_params { 186 bool no_turbo; 187 bool turbo_disabled; 188 bool turbo_disabled_mf; 189 int max_perf_pct; 190 int min_perf_pct; 191 }; 192 193 /** 194 * struct cpudata - Per CPU instance data storage 195 * @cpu: CPU number for this instance data 196 * @policy: CPUFreq policy value 197 * @update_util: CPUFreq utility callback information 198 * @update_util_set: CPUFreq utility callback is set 199 * @iowait_boost: iowait-related boost fraction 200 * @last_update: Time of the last update. 201 * @pstate: Stores P state limits for this CPU 202 * @vid: Stores VID limits for this CPU 203 * @last_sample_time: Last Sample time 204 * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented 205 * This shift is a multiplier to mperf delta to 206 * calculate CPU busy. 207 * @prev_aperf: Last APERF value read from APERF MSR 208 * @prev_mperf: Last MPERF value read from MPERF MSR 209 * @prev_tsc: Last timestamp counter (TSC) value 210 * @prev_cummulative_iowait: IO Wait time difference from last and 211 * current sample 212 * @sample: Storage for storing last Sample data 213 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios 214 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios 215 * @acpi_perf_data: Stores ACPI perf information read from _PSS 216 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 217 * @epp_powersave: Last saved HWP energy performance preference 218 * (EPP) or energy performance bias (EPB), 219 * when policy switched to performance 220 * @epp_policy: Last saved policy used to set EPP/EPB 221 * @epp_default: Power on default HWP energy performance 222 * preference/bias 223 * @epp_saved: Saved EPP/EPB during system suspend or CPU offline 224 * operation 225 * @hwp_req_cached: Cached value of the last HWP Request MSR 226 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR 227 * @last_io_update: Last time when IO wake flag was set 228 * @sched_flags: Store scheduler flags for possible cross CPU update 229 * @hwp_boost_min: Last HWP boosted min performance 230 * 231 * This structure stores per CPU instance data for all CPUs. 232 */ 233 struct cpudata { 234 int cpu; 235 236 unsigned int policy; 237 struct update_util_data update_util; 238 bool update_util_set; 239 240 struct pstate_data pstate; 241 struct vid_data vid; 242 243 u64 last_update; 244 u64 last_sample_time; 245 u64 aperf_mperf_shift; 246 u64 prev_aperf; 247 u64 prev_mperf; 248 u64 prev_tsc; 249 u64 prev_cummulative_iowait; 250 struct sample sample; 251 int32_t min_perf_ratio; 252 int32_t max_perf_ratio; 253 #ifdef CONFIG_ACPI 254 struct acpi_processor_performance acpi_perf_data; 255 bool valid_pss_table; 256 #endif 257 unsigned int iowait_boost; 258 s16 epp_powersave; 259 s16 epp_policy; 260 s16 epp_default; 261 s16 epp_saved; 262 u64 hwp_req_cached; 263 u64 hwp_cap_cached; 264 u64 last_io_update; 265 unsigned int sched_flags; 266 u32 hwp_boost_min; 267 }; 268 269 static struct cpudata **all_cpu_data; 270 271 /** 272 * struct pstate_funcs - Per CPU model specific callbacks 273 * @get_max: Callback to get maximum non turbo effective P state 274 * @get_max_physical: Callback to get maximum non turbo physical P state 275 * @get_min: Callback to get minimum P state 276 * @get_turbo: Callback to get turbo P state 277 * @get_scaling: Callback to get frequency scaling factor 278 * @get_val: Callback to convert P state to actual MSR write value 279 * @get_vid: Callback to get VID data for Atom platforms 280 * 281 * Core and Atom CPU models have different way to get P State limits. This 282 * structure is used to store those callbacks. 283 */ 284 struct pstate_funcs { 285 int (*get_max)(void); 286 int (*get_max_physical)(void); 287 int (*get_min)(void); 288 int (*get_turbo)(void); 289 int (*get_scaling)(void); 290 int (*get_aperf_mperf_shift)(void); 291 u64 (*get_val)(struct cpudata*, int pstate); 292 void (*get_vid)(struct cpudata *); 293 }; 294 295 static struct pstate_funcs pstate_funcs __read_mostly; 296 297 static int hwp_active __read_mostly; 298 static int hwp_mode_bdw __read_mostly; 299 static bool per_cpu_limits __read_mostly; 300 static bool hwp_boost __read_mostly; 301 302 static struct cpufreq_driver *intel_pstate_driver __read_mostly; 303 304 #ifdef CONFIG_ACPI 305 static bool acpi_ppc; 306 #endif 307 308 static struct global_params global; 309 310 static DEFINE_MUTEX(intel_pstate_driver_lock); 311 static DEFINE_MUTEX(intel_pstate_limits_lock); 312 313 #ifdef CONFIG_ACPI 314 315 static bool intel_pstate_acpi_pm_profile_server(void) 316 { 317 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 318 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 319 return true; 320 321 return false; 322 } 323 324 static bool intel_pstate_get_ppc_enable_status(void) 325 { 326 if (intel_pstate_acpi_pm_profile_server()) 327 return true; 328 329 return acpi_ppc; 330 } 331 332 #ifdef CONFIG_ACPI_CPPC_LIB 333 334 /* The work item is needed to avoid CPU hotplug locking issues */ 335 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) 336 { 337 sched_set_itmt_support(); 338 } 339 340 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); 341 342 static void intel_pstate_set_itmt_prio(int cpu) 343 { 344 struct cppc_perf_caps cppc_perf; 345 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; 346 int ret; 347 348 ret = cppc_get_perf_caps(cpu, &cppc_perf); 349 if (ret) 350 return; 351 352 /* 353 * The priorities can be set regardless of whether or not 354 * sched_set_itmt_support(true) has been called and it is valid to 355 * update them at any time after it has been called. 356 */ 357 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); 358 359 if (max_highest_perf <= min_highest_perf) { 360 if (cppc_perf.highest_perf > max_highest_perf) 361 max_highest_perf = cppc_perf.highest_perf; 362 363 if (cppc_perf.highest_perf < min_highest_perf) 364 min_highest_perf = cppc_perf.highest_perf; 365 366 if (max_highest_perf > min_highest_perf) { 367 /* 368 * This code can be run during CPU online under the 369 * CPU hotplug locks, so sched_set_itmt_support() 370 * cannot be called from here. Queue up a work item 371 * to invoke it. 372 */ 373 schedule_work(&sched_itmt_work); 374 } 375 } 376 } 377 378 static int intel_pstate_get_cppc_guranteed(int cpu) 379 { 380 struct cppc_perf_caps cppc_perf; 381 int ret; 382 383 ret = cppc_get_perf_caps(cpu, &cppc_perf); 384 if (ret) 385 return ret; 386 387 if (cppc_perf.guaranteed_perf) 388 return cppc_perf.guaranteed_perf; 389 390 return cppc_perf.nominal_perf; 391 } 392 393 #else /* CONFIG_ACPI_CPPC_LIB */ 394 static void intel_pstate_set_itmt_prio(int cpu) 395 { 396 } 397 #endif /* CONFIG_ACPI_CPPC_LIB */ 398 399 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 400 { 401 struct cpudata *cpu; 402 int ret; 403 int i; 404 405 if (hwp_active) { 406 intel_pstate_set_itmt_prio(policy->cpu); 407 return; 408 } 409 410 if (!intel_pstate_get_ppc_enable_status()) 411 return; 412 413 cpu = all_cpu_data[policy->cpu]; 414 415 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 416 policy->cpu); 417 if (ret) 418 return; 419 420 /* 421 * Check if the control value in _PSS is for PERF_CTL MSR, which should 422 * guarantee that the states returned by it map to the states in our 423 * list directly. 424 */ 425 if (cpu->acpi_perf_data.control_register.space_id != 426 ACPI_ADR_SPACE_FIXED_HARDWARE) 427 goto err; 428 429 /* 430 * If there is only one entry _PSS, simply ignore _PSS and continue as 431 * usual without taking _PSS into account 432 */ 433 if (cpu->acpi_perf_data.state_count < 2) 434 goto err; 435 436 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 437 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 438 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 439 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 440 (u32) cpu->acpi_perf_data.states[i].core_frequency, 441 (u32) cpu->acpi_perf_data.states[i].power, 442 (u32) cpu->acpi_perf_data.states[i].control); 443 } 444 445 /* 446 * The _PSS table doesn't contain whole turbo frequency range. 447 * This just contains +1 MHZ above the max non turbo frequency, 448 * with control value corresponding to max turbo ratio. But 449 * when cpufreq set policy is called, it will call with this 450 * max frequency, which will cause a reduced performance as 451 * this driver uses real max turbo frequency as the max 452 * frequency. So correct this frequency in _PSS table to 453 * correct max turbo frequency based on the turbo state. 454 * Also need to convert to MHz as _PSS freq is in MHz. 455 */ 456 if (!global.turbo_disabled) 457 cpu->acpi_perf_data.states[0].core_frequency = 458 policy->cpuinfo.max_freq / 1000; 459 cpu->valid_pss_table = true; 460 pr_debug("_PPC limits will be enforced\n"); 461 462 return; 463 464 err: 465 cpu->valid_pss_table = false; 466 acpi_processor_unregister_performance(policy->cpu); 467 } 468 469 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 470 { 471 struct cpudata *cpu; 472 473 cpu = all_cpu_data[policy->cpu]; 474 if (!cpu->valid_pss_table) 475 return; 476 477 acpi_processor_unregister_performance(policy->cpu); 478 } 479 #else /* CONFIG_ACPI */ 480 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 481 { 482 } 483 484 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 485 { 486 } 487 488 static inline bool intel_pstate_acpi_pm_profile_server(void) 489 { 490 return false; 491 } 492 #endif /* CONFIG_ACPI */ 493 494 #ifndef CONFIG_ACPI_CPPC_LIB 495 static int intel_pstate_get_cppc_guranteed(int cpu) 496 { 497 return -ENOTSUPP; 498 } 499 #endif /* CONFIG_ACPI_CPPC_LIB */ 500 501 static inline void update_turbo_state(void) 502 { 503 u64 misc_en; 504 struct cpudata *cpu; 505 506 cpu = all_cpu_data[0]; 507 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 508 global.turbo_disabled = 509 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 510 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 511 } 512 513 static int min_perf_pct_min(void) 514 { 515 struct cpudata *cpu = all_cpu_data[0]; 516 int turbo_pstate = cpu->pstate.turbo_pstate; 517 518 return turbo_pstate ? 519 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; 520 } 521 522 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 523 { 524 u64 epb; 525 int ret; 526 527 if (!boot_cpu_has(X86_FEATURE_EPB)) 528 return -ENXIO; 529 530 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 531 if (ret) 532 return (s16)ret; 533 534 return (s16)(epb & 0x0f); 535 } 536 537 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 538 { 539 s16 epp; 540 541 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 542 /* 543 * When hwp_req_data is 0, means that caller didn't read 544 * MSR_HWP_REQUEST, so need to read and get EPP. 545 */ 546 if (!hwp_req_data) { 547 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, 548 &hwp_req_data); 549 if (epp) 550 return epp; 551 } 552 epp = (hwp_req_data >> 24) & 0xff; 553 } else { 554 /* When there is no EPP present, HWP uses EPB settings */ 555 epp = intel_pstate_get_epb(cpu_data); 556 } 557 558 return epp; 559 } 560 561 static int intel_pstate_set_epb(int cpu, s16 pref) 562 { 563 u64 epb; 564 int ret; 565 566 if (!boot_cpu_has(X86_FEATURE_EPB)) 567 return -ENXIO; 568 569 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 570 if (ret) 571 return ret; 572 573 epb = (epb & ~0x0f) | pref; 574 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 575 576 return 0; 577 } 578 579 /* 580 * EPP/EPB display strings corresponding to EPP index in the 581 * energy_perf_strings[] 582 * index String 583 *------------------------------------- 584 * 0 default 585 * 1 performance 586 * 2 balance_performance 587 * 3 balance_power 588 * 4 power 589 */ 590 static const char * const energy_perf_strings[] = { 591 "default", 592 "performance", 593 "balance_performance", 594 "balance_power", 595 "power", 596 NULL 597 }; 598 static const unsigned int epp_values[] = { 599 HWP_EPP_PERFORMANCE, 600 HWP_EPP_BALANCE_PERFORMANCE, 601 HWP_EPP_BALANCE_POWERSAVE, 602 HWP_EPP_POWERSAVE 603 }; 604 605 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) 606 { 607 s16 epp; 608 int index = -EINVAL; 609 610 *raw_epp = 0; 611 epp = intel_pstate_get_epp(cpu_data, 0); 612 if (epp < 0) 613 return epp; 614 615 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 616 if (epp == HWP_EPP_PERFORMANCE) 617 return 1; 618 if (epp == HWP_EPP_BALANCE_PERFORMANCE) 619 return 2; 620 if (epp == HWP_EPP_BALANCE_POWERSAVE) 621 return 3; 622 if (epp == HWP_EPP_POWERSAVE) 623 return 4; 624 *raw_epp = epp; 625 return 0; 626 } else if (boot_cpu_has(X86_FEATURE_EPB)) { 627 /* 628 * Range: 629 * 0x00-0x03 : Performance 630 * 0x04-0x07 : Balance performance 631 * 0x08-0x0B : Balance power 632 * 0x0C-0x0F : Power 633 * The EPB is a 4 bit value, but our ranges restrict the 634 * value which can be set. Here only using top two bits 635 * effectively. 636 */ 637 index = (epp >> 2) + 1; 638 } 639 640 return index; 641 } 642 643 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, 644 int pref_index, bool use_raw, 645 u32 raw_epp) 646 { 647 int epp = -EINVAL; 648 int ret; 649 650 if (!pref_index) 651 epp = cpu_data->epp_default; 652 653 mutex_lock(&intel_pstate_limits_lock); 654 655 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 656 u64 value; 657 658 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); 659 if (ret) 660 goto return_pref; 661 662 value &= ~GENMASK_ULL(31, 24); 663 664 if (use_raw) { 665 if (raw_epp > 255) { 666 ret = -EINVAL; 667 goto return_pref; 668 } 669 value |= (u64)raw_epp << 24; 670 ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); 671 goto return_pref; 672 } 673 674 if (epp == -EINVAL) 675 epp = epp_values[pref_index - 1]; 676 677 value |= (u64)epp << 24; 678 ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); 679 } else { 680 if (epp == -EINVAL) 681 epp = (pref_index - 1) << 2; 682 ret = intel_pstate_set_epb(cpu_data->cpu, epp); 683 } 684 return_pref: 685 mutex_unlock(&intel_pstate_limits_lock); 686 687 return ret; 688 } 689 690 static ssize_t show_energy_performance_available_preferences( 691 struct cpufreq_policy *policy, char *buf) 692 { 693 int i = 0; 694 int ret = 0; 695 696 while (energy_perf_strings[i] != NULL) 697 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); 698 699 ret += sprintf(&buf[ret], "\n"); 700 701 return ret; 702 } 703 704 cpufreq_freq_attr_ro(energy_performance_available_preferences); 705 706 static ssize_t store_energy_performance_preference( 707 struct cpufreq_policy *policy, const char *buf, size_t count) 708 { 709 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 710 char str_preference[21]; 711 bool raw = false; 712 u32 epp; 713 int ret; 714 715 ret = sscanf(buf, "%20s", str_preference); 716 if (ret != 1) 717 return -EINVAL; 718 719 ret = match_string(energy_perf_strings, -1, str_preference); 720 if (ret < 0) { 721 if (!boot_cpu_has(X86_FEATURE_HWP_EPP)) 722 return ret; 723 724 ret = kstrtouint(buf, 10, &epp); 725 if (ret) 726 return ret; 727 728 raw = true; 729 } 730 731 ret = intel_pstate_set_energy_pref_index(cpu_data, ret, raw, epp); 732 if (ret) 733 return ret; 734 735 return count; 736 } 737 738 static ssize_t show_energy_performance_preference( 739 struct cpufreq_policy *policy, char *buf) 740 { 741 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 742 int preference, raw_epp; 743 744 preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp); 745 if (preference < 0) 746 return preference; 747 748 if (raw_epp) 749 return sprintf(buf, "%d\n", raw_epp); 750 else 751 return sprintf(buf, "%s\n", energy_perf_strings[preference]); 752 } 753 754 cpufreq_freq_attr_rw(energy_performance_preference); 755 756 static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) 757 { 758 struct cpudata *cpu; 759 u64 cap; 760 int ratio; 761 762 ratio = intel_pstate_get_cppc_guranteed(policy->cpu); 763 if (ratio <= 0) { 764 rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); 765 ratio = HWP_GUARANTEED_PERF(cap); 766 } 767 768 cpu = all_cpu_data[policy->cpu]; 769 770 return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling); 771 } 772 773 cpufreq_freq_attr_ro(base_frequency); 774 775 static struct freq_attr *hwp_cpufreq_attrs[] = { 776 &energy_performance_preference, 777 &energy_performance_available_preferences, 778 &base_frequency, 779 NULL, 780 }; 781 782 static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, 783 int *current_max) 784 { 785 u64 cap; 786 787 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 788 WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap); 789 if (global.no_turbo) 790 *current_max = HWP_GUARANTEED_PERF(cap); 791 else 792 *current_max = HWP_HIGHEST_PERF(cap); 793 794 *phy_max = HWP_HIGHEST_PERF(cap); 795 } 796 797 static void intel_pstate_hwp_set(unsigned int cpu) 798 { 799 struct cpudata *cpu_data = all_cpu_data[cpu]; 800 int max, min; 801 u64 value; 802 s16 epp; 803 804 max = cpu_data->max_perf_ratio; 805 min = cpu_data->min_perf_ratio; 806 807 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 808 min = max; 809 810 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 811 812 value &= ~HWP_MIN_PERF(~0L); 813 value |= HWP_MIN_PERF(min); 814 815 value &= ~HWP_MAX_PERF(~0L); 816 value |= HWP_MAX_PERF(max); 817 818 if (cpu_data->epp_policy == cpu_data->policy) 819 goto skip_epp; 820 821 cpu_data->epp_policy = cpu_data->policy; 822 823 if (cpu_data->epp_saved >= 0) { 824 epp = cpu_data->epp_saved; 825 cpu_data->epp_saved = -EINVAL; 826 goto update_epp; 827 } 828 829 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 830 epp = intel_pstate_get_epp(cpu_data, value); 831 cpu_data->epp_powersave = epp; 832 /* If EPP read was failed, then don't try to write */ 833 if (epp < 0) 834 goto skip_epp; 835 836 epp = 0; 837 } else { 838 /* skip setting EPP, when saved value is invalid */ 839 if (cpu_data->epp_powersave < 0) 840 goto skip_epp; 841 842 /* 843 * No need to restore EPP when it is not zero. This 844 * means: 845 * - Policy is not changed 846 * - user has manually changed 847 * - Error reading EPB 848 */ 849 epp = intel_pstate_get_epp(cpu_data, value); 850 if (epp) 851 goto skip_epp; 852 853 epp = cpu_data->epp_powersave; 854 } 855 update_epp: 856 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 857 value &= ~GENMASK_ULL(31, 24); 858 value |= (u64)epp << 24; 859 } else { 860 intel_pstate_set_epb(cpu, epp); 861 } 862 skip_epp: 863 WRITE_ONCE(cpu_data->hwp_req_cached, value); 864 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 865 } 866 867 static void intel_pstate_hwp_force_min_perf(int cpu) 868 { 869 u64 value; 870 int min_perf; 871 872 value = all_cpu_data[cpu]->hwp_req_cached; 873 value &= ~GENMASK_ULL(31, 0); 874 min_perf = HWP_LOWEST_PERF(all_cpu_data[cpu]->hwp_cap_cached); 875 876 /* Set hwp_max = hwp_min */ 877 value |= HWP_MAX_PERF(min_perf); 878 value |= HWP_MIN_PERF(min_perf); 879 880 /* Set EPP to min */ 881 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) 882 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); 883 884 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 885 } 886 887 static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) 888 { 889 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 890 891 if (!hwp_active) 892 return 0; 893 894 cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0); 895 896 return 0; 897 } 898 899 #define POWER_CTL_EE_ENABLE 1 900 #define POWER_CTL_EE_DISABLE 2 901 902 static int power_ctl_ee_state; 903 904 static void set_power_ctl_ee_state(bool input) 905 { 906 u64 power_ctl; 907 908 mutex_lock(&intel_pstate_driver_lock); 909 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 910 if (input) { 911 power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); 912 power_ctl_ee_state = POWER_CTL_EE_ENABLE; 913 } else { 914 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); 915 power_ctl_ee_state = POWER_CTL_EE_DISABLE; 916 } 917 wrmsrl(MSR_IA32_POWER_CTL, power_ctl); 918 mutex_unlock(&intel_pstate_driver_lock); 919 } 920 921 static void intel_pstate_hwp_enable(struct cpudata *cpudata); 922 923 static int intel_pstate_resume(struct cpufreq_policy *policy) 924 { 925 926 /* Only restore if the system default is changed */ 927 if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) 928 set_power_ctl_ee_state(true); 929 else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) 930 set_power_ctl_ee_state(false); 931 932 if (!hwp_active) 933 return 0; 934 935 mutex_lock(&intel_pstate_limits_lock); 936 937 if (policy->cpu == 0) 938 intel_pstate_hwp_enable(all_cpu_data[policy->cpu]); 939 940 all_cpu_data[policy->cpu]->epp_policy = 0; 941 intel_pstate_hwp_set(policy->cpu); 942 943 mutex_unlock(&intel_pstate_limits_lock); 944 945 return 0; 946 } 947 948 static void intel_pstate_update_policies(void) 949 { 950 int cpu; 951 952 for_each_possible_cpu(cpu) 953 cpufreq_update_policy(cpu); 954 } 955 956 static void intel_pstate_update_max_freq(unsigned int cpu) 957 { 958 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); 959 struct cpudata *cpudata; 960 961 if (!policy) 962 return; 963 964 cpudata = all_cpu_data[cpu]; 965 policy->cpuinfo.max_freq = global.turbo_disabled_mf ? 966 cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; 967 968 refresh_frequency_limits(policy); 969 970 cpufreq_cpu_release(policy); 971 } 972 973 static void intel_pstate_update_limits(unsigned int cpu) 974 { 975 mutex_lock(&intel_pstate_driver_lock); 976 977 update_turbo_state(); 978 /* 979 * If turbo has been turned on or off globally, policy limits for 980 * all CPUs need to be updated to reflect that. 981 */ 982 if (global.turbo_disabled_mf != global.turbo_disabled) { 983 global.turbo_disabled_mf = global.turbo_disabled; 984 arch_set_max_freq_ratio(global.turbo_disabled); 985 for_each_possible_cpu(cpu) 986 intel_pstate_update_max_freq(cpu); 987 } else { 988 cpufreq_update_policy(cpu); 989 } 990 991 mutex_unlock(&intel_pstate_driver_lock); 992 } 993 994 /************************** sysfs begin ************************/ 995 #define show_one(file_name, object) \ 996 static ssize_t show_##file_name \ 997 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ 998 { \ 999 return sprintf(buf, "%u\n", global.object); \ 1000 } 1001 1002 static ssize_t intel_pstate_show_status(char *buf); 1003 static int intel_pstate_update_status(const char *buf, size_t size); 1004 1005 static ssize_t show_status(struct kobject *kobj, 1006 struct kobj_attribute *attr, char *buf) 1007 { 1008 ssize_t ret; 1009 1010 mutex_lock(&intel_pstate_driver_lock); 1011 ret = intel_pstate_show_status(buf); 1012 mutex_unlock(&intel_pstate_driver_lock); 1013 1014 return ret; 1015 } 1016 1017 static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, 1018 const char *buf, size_t count) 1019 { 1020 char *p = memchr(buf, '\n', count); 1021 int ret; 1022 1023 mutex_lock(&intel_pstate_driver_lock); 1024 ret = intel_pstate_update_status(buf, p ? p - buf : count); 1025 mutex_unlock(&intel_pstate_driver_lock); 1026 1027 return ret < 0 ? ret : count; 1028 } 1029 1030 static ssize_t show_turbo_pct(struct kobject *kobj, 1031 struct kobj_attribute *attr, char *buf) 1032 { 1033 struct cpudata *cpu; 1034 int total, no_turbo, turbo_pct; 1035 uint32_t turbo_fp; 1036 1037 mutex_lock(&intel_pstate_driver_lock); 1038 1039 if (!intel_pstate_driver) { 1040 mutex_unlock(&intel_pstate_driver_lock); 1041 return -EAGAIN; 1042 } 1043 1044 cpu = all_cpu_data[0]; 1045 1046 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1047 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 1048 turbo_fp = div_fp(no_turbo, total); 1049 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 1050 1051 mutex_unlock(&intel_pstate_driver_lock); 1052 1053 return sprintf(buf, "%u\n", turbo_pct); 1054 } 1055 1056 static ssize_t show_num_pstates(struct kobject *kobj, 1057 struct kobj_attribute *attr, char *buf) 1058 { 1059 struct cpudata *cpu; 1060 int total; 1061 1062 mutex_lock(&intel_pstate_driver_lock); 1063 1064 if (!intel_pstate_driver) { 1065 mutex_unlock(&intel_pstate_driver_lock); 1066 return -EAGAIN; 1067 } 1068 1069 cpu = all_cpu_data[0]; 1070 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1071 1072 mutex_unlock(&intel_pstate_driver_lock); 1073 1074 return sprintf(buf, "%u\n", total); 1075 } 1076 1077 static ssize_t show_no_turbo(struct kobject *kobj, 1078 struct kobj_attribute *attr, char *buf) 1079 { 1080 ssize_t ret; 1081 1082 mutex_lock(&intel_pstate_driver_lock); 1083 1084 if (!intel_pstate_driver) { 1085 mutex_unlock(&intel_pstate_driver_lock); 1086 return -EAGAIN; 1087 } 1088 1089 update_turbo_state(); 1090 if (global.turbo_disabled) 1091 ret = sprintf(buf, "%u\n", global.turbo_disabled); 1092 else 1093 ret = sprintf(buf, "%u\n", global.no_turbo); 1094 1095 mutex_unlock(&intel_pstate_driver_lock); 1096 1097 return ret; 1098 } 1099 1100 static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, 1101 const char *buf, size_t count) 1102 { 1103 unsigned int input; 1104 int ret; 1105 1106 ret = sscanf(buf, "%u", &input); 1107 if (ret != 1) 1108 return -EINVAL; 1109 1110 mutex_lock(&intel_pstate_driver_lock); 1111 1112 if (!intel_pstate_driver) { 1113 mutex_unlock(&intel_pstate_driver_lock); 1114 return -EAGAIN; 1115 } 1116 1117 mutex_lock(&intel_pstate_limits_lock); 1118 1119 update_turbo_state(); 1120 if (global.turbo_disabled) { 1121 pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); 1122 mutex_unlock(&intel_pstate_limits_lock); 1123 mutex_unlock(&intel_pstate_driver_lock); 1124 return -EPERM; 1125 } 1126 1127 global.no_turbo = clamp_t(int, input, 0, 1); 1128 1129 if (global.no_turbo) { 1130 struct cpudata *cpu = all_cpu_data[0]; 1131 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; 1132 1133 /* Squash the global minimum into the permitted range. */ 1134 if (global.min_perf_pct > pct) 1135 global.min_perf_pct = pct; 1136 } 1137 1138 mutex_unlock(&intel_pstate_limits_lock); 1139 1140 intel_pstate_update_policies(); 1141 1142 mutex_unlock(&intel_pstate_driver_lock); 1143 1144 return count; 1145 } 1146 1147 static struct cpufreq_driver intel_pstate; 1148 1149 static void update_qos_request(enum freq_qos_req_type type) 1150 { 1151 int max_state, turbo_max, freq, i, perf_pct; 1152 struct freq_qos_request *req; 1153 struct cpufreq_policy *policy; 1154 1155 for_each_possible_cpu(i) { 1156 struct cpudata *cpu = all_cpu_data[i]; 1157 1158 policy = cpufreq_cpu_get(i); 1159 if (!policy) 1160 continue; 1161 1162 req = policy->driver_data; 1163 cpufreq_cpu_put(policy); 1164 1165 if (!req) 1166 continue; 1167 1168 if (hwp_active) 1169 intel_pstate_get_hwp_max(i, &turbo_max, &max_state); 1170 else 1171 turbo_max = cpu->pstate.turbo_pstate; 1172 1173 if (type == FREQ_QOS_MIN) { 1174 perf_pct = global.min_perf_pct; 1175 } else { 1176 req++; 1177 perf_pct = global.max_perf_pct; 1178 } 1179 1180 freq = DIV_ROUND_UP(turbo_max * perf_pct, 100); 1181 freq *= cpu->pstate.scaling; 1182 1183 if (freq_qos_update_request(req, freq) < 0) 1184 pr_warn("Failed to update freq constraint: CPU%d\n", i); 1185 } 1186 } 1187 1188 static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, 1189 const char *buf, size_t count) 1190 { 1191 unsigned int input; 1192 int ret; 1193 1194 ret = sscanf(buf, "%u", &input); 1195 if (ret != 1) 1196 return -EINVAL; 1197 1198 mutex_lock(&intel_pstate_driver_lock); 1199 1200 if (!intel_pstate_driver) { 1201 mutex_unlock(&intel_pstate_driver_lock); 1202 return -EAGAIN; 1203 } 1204 1205 mutex_lock(&intel_pstate_limits_lock); 1206 1207 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); 1208 1209 mutex_unlock(&intel_pstate_limits_lock); 1210 1211 if (intel_pstate_driver == &intel_pstate) 1212 intel_pstate_update_policies(); 1213 else 1214 update_qos_request(FREQ_QOS_MAX); 1215 1216 mutex_unlock(&intel_pstate_driver_lock); 1217 1218 return count; 1219 } 1220 1221 static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, 1222 const char *buf, size_t count) 1223 { 1224 unsigned int input; 1225 int ret; 1226 1227 ret = sscanf(buf, "%u", &input); 1228 if (ret != 1) 1229 return -EINVAL; 1230 1231 mutex_lock(&intel_pstate_driver_lock); 1232 1233 if (!intel_pstate_driver) { 1234 mutex_unlock(&intel_pstate_driver_lock); 1235 return -EAGAIN; 1236 } 1237 1238 mutex_lock(&intel_pstate_limits_lock); 1239 1240 global.min_perf_pct = clamp_t(int, input, 1241 min_perf_pct_min(), global.max_perf_pct); 1242 1243 mutex_unlock(&intel_pstate_limits_lock); 1244 1245 if (intel_pstate_driver == &intel_pstate) 1246 intel_pstate_update_policies(); 1247 else 1248 update_qos_request(FREQ_QOS_MIN); 1249 1250 mutex_unlock(&intel_pstate_driver_lock); 1251 1252 return count; 1253 } 1254 1255 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, 1256 struct kobj_attribute *attr, char *buf) 1257 { 1258 return sprintf(buf, "%u\n", hwp_boost); 1259 } 1260 1261 static ssize_t store_hwp_dynamic_boost(struct kobject *a, 1262 struct kobj_attribute *b, 1263 const char *buf, size_t count) 1264 { 1265 unsigned int input; 1266 int ret; 1267 1268 ret = kstrtouint(buf, 10, &input); 1269 if (ret) 1270 return ret; 1271 1272 mutex_lock(&intel_pstate_driver_lock); 1273 hwp_boost = !!input; 1274 intel_pstate_update_policies(); 1275 mutex_unlock(&intel_pstate_driver_lock); 1276 1277 return count; 1278 } 1279 1280 static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr, 1281 char *buf) 1282 { 1283 u64 power_ctl; 1284 int enable; 1285 1286 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1287 enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); 1288 return sprintf(buf, "%d\n", !enable); 1289 } 1290 1291 static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b, 1292 const char *buf, size_t count) 1293 { 1294 bool input; 1295 int ret; 1296 1297 ret = kstrtobool(buf, &input); 1298 if (ret) 1299 return ret; 1300 1301 set_power_ctl_ee_state(input); 1302 1303 return count; 1304 } 1305 1306 show_one(max_perf_pct, max_perf_pct); 1307 show_one(min_perf_pct, min_perf_pct); 1308 1309 define_one_global_rw(status); 1310 define_one_global_rw(no_turbo); 1311 define_one_global_rw(max_perf_pct); 1312 define_one_global_rw(min_perf_pct); 1313 define_one_global_ro(turbo_pct); 1314 define_one_global_ro(num_pstates); 1315 define_one_global_rw(hwp_dynamic_boost); 1316 define_one_global_rw(energy_efficiency); 1317 1318 static struct attribute *intel_pstate_attributes[] = { 1319 &status.attr, 1320 &no_turbo.attr, 1321 &turbo_pct.attr, 1322 &num_pstates.attr, 1323 NULL 1324 }; 1325 1326 static const struct attribute_group intel_pstate_attr_group = { 1327 .attrs = intel_pstate_attributes, 1328 }; 1329 1330 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; 1331 1332 static void __init intel_pstate_sysfs_expose_params(void) 1333 { 1334 struct kobject *intel_pstate_kobject; 1335 int rc; 1336 1337 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 1338 &cpu_subsys.dev_root->kobj); 1339 if (WARN_ON(!intel_pstate_kobject)) 1340 return; 1341 1342 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 1343 if (WARN_ON(rc)) 1344 return; 1345 1346 /* 1347 * If per cpu limits are enforced there are no global limits, so 1348 * return without creating max/min_perf_pct attributes 1349 */ 1350 if (per_cpu_limits) 1351 return; 1352 1353 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 1354 WARN_ON(rc); 1355 1356 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1357 WARN_ON(rc); 1358 1359 if (hwp_active) { 1360 rc = sysfs_create_file(intel_pstate_kobject, 1361 &hwp_dynamic_boost.attr); 1362 WARN_ON(rc); 1363 } 1364 1365 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { 1366 rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); 1367 WARN_ON(rc); 1368 } 1369 } 1370 /************************** sysfs end ************************/ 1371 1372 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1373 { 1374 /* First disable HWP notification interrupt as we don't process them */ 1375 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1376 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1377 1378 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1379 cpudata->epp_policy = 0; 1380 if (cpudata->epp_default == -EINVAL) 1381 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1382 } 1383 1384 static int atom_get_min_pstate(void) 1385 { 1386 u64 value; 1387 1388 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1389 return (value >> 8) & 0x7F; 1390 } 1391 1392 static int atom_get_max_pstate(void) 1393 { 1394 u64 value; 1395 1396 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1397 return (value >> 16) & 0x7F; 1398 } 1399 1400 static int atom_get_turbo_pstate(void) 1401 { 1402 u64 value; 1403 1404 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); 1405 return value & 0x7F; 1406 } 1407 1408 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1409 { 1410 u64 val; 1411 int32_t vid_fp; 1412 u32 vid; 1413 1414 val = (u64)pstate << 8; 1415 if (global.no_turbo && !global.turbo_disabled) 1416 val |= (u64)1 << 32; 1417 1418 vid_fp = cpudata->vid.min + mul_fp( 1419 int_tofp(pstate - cpudata->pstate.min_pstate), 1420 cpudata->vid.ratio); 1421 1422 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1423 vid = ceiling_fp(vid_fp); 1424 1425 if (pstate > cpudata->pstate.max_pstate) 1426 vid = cpudata->vid.turbo; 1427 1428 return val | vid; 1429 } 1430 1431 static int silvermont_get_scaling(void) 1432 { 1433 u64 value; 1434 int i; 1435 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1436 static int silvermont_freq_table[] = { 1437 83300, 100000, 133300, 116700, 80000}; 1438 1439 rdmsrl(MSR_FSB_FREQ, value); 1440 i = value & 0x7; 1441 WARN_ON(i > 4); 1442 1443 return silvermont_freq_table[i]; 1444 } 1445 1446 static int airmont_get_scaling(void) 1447 { 1448 u64 value; 1449 int i; 1450 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1451 static int airmont_freq_table[] = { 1452 83300, 100000, 133300, 116700, 80000, 1453 93300, 90000, 88900, 87500}; 1454 1455 rdmsrl(MSR_FSB_FREQ, value); 1456 i = value & 0xF; 1457 WARN_ON(i > 8); 1458 1459 return airmont_freq_table[i]; 1460 } 1461 1462 static void atom_get_vid(struct cpudata *cpudata) 1463 { 1464 u64 value; 1465 1466 rdmsrl(MSR_ATOM_CORE_VIDS, value); 1467 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1468 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1469 cpudata->vid.ratio = div_fp( 1470 cpudata->vid.max - cpudata->vid.min, 1471 int_tofp(cpudata->pstate.max_pstate - 1472 cpudata->pstate.min_pstate)); 1473 1474 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); 1475 cpudata->vid.turbo = value & 0x7f; 1476 } 1477 1478 static int core_get_min_pstate(void) 1479 { 1480 u64 value; 1481 1482 rdmsrl(MSR_PLATFORM_INFO, value); 1483 return (value >> 40) & 0xFF; 1484 } 1485 1486 static int core_get_max_pstate_physical(void) 1487 { 1488 u64 value; 1489 1490 rdmsrl(MSR_PLATFORM_INFO, value); 1491 return (value >> 8) & 0xFF; 1492 } 1493 1494 static int core_get_tdp_ratio(u64 plat_info) 1495 { 1496 /* Check how many TDP levels present */ 1497 if (plat_info & 0x600000000) { 1498 u64 tdp_ctrl; 1499 u64 tdp_ratio; 1500 int tdp_msr; 1501 int err; 1502 1503 /* Get the TDP level (0, 1, 2) to get ratios */ 1504 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1505 if (err) 1506 return err; 1507 1508 /* TDP MSR are continuous starting at 0x648 */ 1509 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); 1510 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 1511 if (err) 1512 return err; 1513 1514 /* For level 1 and 2, bits[23:16] contain the ratio */ 1515 if (tdp_ctrl & 0x03) 1516 tdp_ratio >>= 16; 1517 1518 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1519 pr_debug("tdp_ratio %x\n", (int)tdp_ratio); 1520 1521 return (int)tdp_ratio; 1522 } 1523 1524 return -ENXIO; 1525 } 1526 1527 static int core_get_max_pstate(void) 1528 { 1529 u64 tar; 1530 u64 plat_info; 1531 int max_pstate; 1532 int tdp_ratio; 1533 int err; 1534 1535 rdmsrl(MSR_PLATFORM_INFO, plat_info); 1536 max_pstate = (plat_info >> 8) & 0xFF; 1537 1538 tdp_ratio = core_get_tdp_ratio(plat_info); 1539 if (tdp_ratio <= 0) 1540 return max_pstate; 1541 1542 if (hwp_active) { 1543 /* Turbo activation ratio is not used on HWP platforms */ 1544 return tdp_ratio; 1545 } 1546 1547 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 1548 if (!err) { 1549 int tar_levels; 1550 1551 /* Do some sanity checking for safety */ 1552 tar_levels = tar & 0xff; 1553 if (tdp_ratio - 1 == tar_levels) { 1554 max_pstate = tar_levels; 1555 pr_debug("max_pstate=TAC %x\n", max_pstate); 1556 } 1557 } 1558 1559 return max_pstate; 1560 } 1561 1562 static int core_get_turbo_pstate(void) 1563 { 1564 u64 value; 1565 int nont, ret; 1566 1567 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1568 nont = core_get_max_pstate(); 1569 ret = (value) & 255; 1570 if (ret <= nont) 1571 ret = nont; 1572 return ret; 1573 } 1574 1575 static inline int core_get_scaling(void) 1576 { 1577 return 100000; 1578 } 1579 1580 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1581 { 1582 u64 val; 1583 1584 val = (u64)pstate << 8; 1585 if (global.no_turbo && !global.turbo_disabled) 1586 val |= (u64)1 << 32; 1587 1588 return val; 1589 } 1590 1591 static int knl_get_aperf_mperf_shift(void) 1592 { 1593 return 10; 1594 } 1595 1596 static int knl_get_turbo_pstate(void) 1597 { 1598 u64 value; 1599 int nont, ret; 1600 1601 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1602 nont = core_get_max_pstate(); 1603 ret = (((value) >> 8) & 0xFF); 1604 if (ret <= nont) 1605 ret = nont; 1606 return ret; 1607 } 1608 1609 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1610 { 1611 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1612 cpu->pstate.current_pstate = pstate; 1613 /* 1614 * Generally, there is no guarantee that this code will always run on 1615 * the CPU being updated, so force the register update to run on the 1616 * right CPU. 1617 */ 1618 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1619 pstate_funcs.get_val(cpu, pstate)); 1620 } 1621 1622 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1623 { 1624 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1625 } 1626 1627 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1628 { 1629 int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); 1630 1631 update_turbo_state(); 1632 intel_pstate_set_pstate(cpu, pstate); 1633 } 1634 1635 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1636 { 1637 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1638 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1639 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1640 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1641 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1642 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; 1643 1644 if (hwp_active && !hwp_mode_bdw) { 1645 unsigned int phy_max, current_max; 1646 1647 intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max); 1648 cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; 1649 } else { 1650 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1651 } 1652 1653 if (pstate_funcs.get_aperf_mperf_shift) 1654 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); 1655 1656 if (pstate_funcs.get_vid) 1657 pstate_funcs.get_vid(cpu); 1658 1659 intel_pstate_set_min_pstate(cpu); 1660 } 1661 1662 /* 1663 * Long hold time will keep high perf limits for long time, 1664 * which negatively impacts perf/watt for some workloads, 1665 * like specpower. 3ms is based on experiements on some 1666 * workoads. 1667 */ 1668 static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC; 1669 1670 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) 1671 { 1672 u64 hwp_req = READ_ONCE(cpu->hwp_req_cached); 1673 u32 max_limit = (hwp_req & 0xff00) >> 8; 1674 u32 min_limit = (hwp_req & 0xff); 1675 u32 boost_level1; 1676 1677 /* 1678 * Cases to consider (User changes via sysfs or boot time): 1679 * If, P0 (Turbo max) = P1 (Guaranteed max) = min: 1680 * No boost, return. 1681 * If, P0 (Turbo max) > P1 (Guaranteed max) = min: 1682 * Should result in one level boost only for P0. 1683 * If, P0 (Turbo max) = P1 (Guaranteed max) > min: 1684 * Should result in two level boost: 1685 * (min + p1)/2 and P1. 1686 * If, P0 (Turbo max) > P1 (Guaranteed max) > min: 1687 * Should result in three level boost: 1688 * (min + p1)/2, P1 and P0. 1689 */ 1690 1691 /* If max and min are equal or already at max, nothing to boost */ 1692 if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit) 1693 return; 1694 1695 if (!cpu->hwp_boost_min) 1696 cpu->hwp_boost_min = min_limit; 1697 1698 /* level at half way mark between min and guranteed */ 1699 boost_level1 = (HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) + min_limit) >> 1; 1700 1701 if (cpu->hwp_boost_min < boost_level1) 1702 cpu->hwp_boost_min = boost_level1; 1703 else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(cpu->hwp_cap_cached)) 1704 cpu->hwp_boost_min = HWP_GUARANTEED_PERF(cpu->hwp_cap_cached); 1705 else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) && 1706 max_limit != HWP_GUARANTEED_PERF(cpu->hwp_cap_cached)) 1707 cpu->hwp_boost_min = max_limit; 1708 else 1709 return; 1710 1711 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; 1712 wrmsrl(MSR_HWP_REQUEST, hwp_req); 1713 cpu->last_update = cpu->sample.time; 1714 } 1715 1716 static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) 1717 { 1718 if (cpu->hwp_boost_min) { 1719 bool expired; 1720 1721 /* Check if we are idle for hold time to boost down */ 1722 expired = time_after64(cpu->sample.time, cpu->last_update + 1723 hwp_boost_hold_time_ns); 1724 if (expired) { 1725 wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); 1726 cpu->hwp_boost_min = 0; 1727 } 1728 } 1729 cpu->last_update = cpu->sample.time; 1730 } 1731 1732 static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu, 1733 u64 time) 1734 { 1735 cpu->sample.time = time; 1736 1737 if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) { 1738 bool do_io = false; 1739 1740 cpu->sched_flags = 0; 1741 /* 1742 * Set iowait_boost flag and update time. Since IO WAIT flag 1743 * is set all the time, we can't just conclude that there is 1744 * some IO bound activity is scheduled on this CPU with just 1745 * one occurrence. If we receive at least two in two 1746 * consecutive ticks, then we treat as boost candidate. 1747 */ 1748 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC)) 1749 do_io = true; 1750 1751 cpu->last_io_update = time; 1752 1753 if (do_io) 1754 intel_pstate_hwp_boost_up(cpu); 1755 1756 } else { 1757 intel_pstate_hwp_boost_down(cpu); 1758 } 1759 } 1760 1761 static inline void intel_pstate_update_util_hwp(struct update_util_data *data, 1762 u64 time, unsigned int flags) 1763 { 1764 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1765 1766 cpu->sched_flags |= flags; 1767 1768 if (smp_processor_id() == cpu->cpu) 1769 intel_pstate_update_util_hwp_local(cpu, time); 1770 } 1771 1772 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1773 { 1774 struct sample *sample = &cpu->sample; 1775 1776 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1777 } 1778 1779 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1780 { 1781 u64 aperf, mperf; 1782 unsigned long flags; 1783 u64 tsc; 1784 1785 local_irq_save(flags); 1786 rdmsrl(MSR_IA32_APERF, aperf); 1787 rdmsrl(MSR_IA32_MPERF, mperf); 1788 tsc = rdtsc(); 1789 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1790 local_irq_restore(flags); 1791 return false; 1792 } 1793 local_irq_restore(flags); 1794 1795 cpu->last_sample_time = cpu->sample.time; 1796 cpu->sample.time = time; 1797 cpu->sample.aperf = aperf; 1798 cpu->sample.mperf = mperf; 1799 cpu->sample.tsc = tsc; 1800 cpu->sample.aperf -= cpu->prev_aperf; 1801 cpu->sample.mperf -= cpu->prev_mperf; 1802 cpu->sample.tsc -= cpu->prev_tsc; 1803 1804 cpu->prev_aperf = aperf; 1805 cpu->prev_mperf = mperf; 1806 cpu->prev_tsc = tsc; 1807 /* 1808 * First time this function is invoked in a given cycle, all of the 1809 * previous sample data fields are equal to zero or stale and they must 1810 * be populated with meaningful numbers for things to work, so assume 1811 * that sample.time will always be reset before setting the utilization 1812 * update hook and make the caller skip the sample then. 1813 */ 1814 if (cpu->last_sample_time) { 1815 intel_pstate_calc_avg_perf(cpu); 1816 return true; 1817 } 1818 return false; 1819 } 1820 1821 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1822 { 1823 return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); 1824 } 1825 1826 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1827 { 1828 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1829 cpu->sample.core_avg_perf); 1830 } 1831 1832 static inline int32_t get_target_pstate(struct cpudata *cpu) 1833 { 1834 struct sample *sample = &cpu->sample; 1835 int32_t busy_frac; 1836 int target, avg_pstate; 1837 1838 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, 1839 sample->tsc); 1840 1841 if (busy_frac < cpu->iowait_boost) 1842 busy_frac = cpu->iowait_boost; 1843 1844 sample->busy_scaled = busy_frac * 100; 1845 1846 target = global.no_turbo || global.turbo_disabled ? 1847 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1848 target += target >> 2; 1849 target = mul_fp(target, busy_frac); 1850 if (target < cpu->pstate.min_pstate) 1851 target = cpu->pstate.min_pstate; 1852 1853 /* 1854 * If the average P-state during the previous cycle was higher than the 1855 * current target, add 50% of the difference to the target to reduce 1856 * possible performance oscillations and offset possible performance 1857 * loss related to moving the workload from one CPU to another within 1858 * a package/module. 1859 */ 1860 avg_pstate = get_avg_pstate(cpu); 1861 if (avg_pstate > target) 1862 target += (avg_pstate - target) >> 1; 1863 1864 return target; 1865 } 1866 1867 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 1868 { 1869 int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); 1870 int max_pstate = max(min_pstate, cpu->max_perf_ratio); 1871 1872 return clamp_t(int, pstate, min_pstate, max_pstate); 1873 } 1874 1875 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1876 { 1877 if (pstate == cpu->pstate.current_pstate) 1878 return; 1879 1880 cpu->pstate.current_pstate = pstate; 1881 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1882 } 1883 1884 static void intel_pstate_adjust_pstate(struct cpudata *cpu) 1885 { 1886 int from = cpu->pstate.current_pstate; 1887 struct sample *sample; 1888 int target_pstate; 1889 1890 update_turbo_state(); 1891 1892 target_pstate = get_target_pstate(cpu); 1893 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 1894 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); 1895 intel_pstate_update_pstate(cpu, target_pstate); 1896 1897 sample = &cpu->sample; 1898 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1899 fp_toint(sample->busy_scaled), 1900 from, 1901 cpu->pstate.current_pstate, 1902 sample->mperf, 1903 sample->aperf, 1904 sample->tsc, 1905 get_avg_frequency(cpu), 1906 fp_toint(cpu->iowait_boost * 100)); 1907 } 1908 1909 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1910 unsigned int flags) 1911 { 1912 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1913 u64 delta_ns; 1914 1915 /* Don't allow remote callbacks */ 1916 if (smp_processor_id() != cpu->cpu) 1917 return; 1918 1919 delta_ns = time - cpu->last_update; 1920 if (flags & SCHED_CPUFREQ_IOWAIT) { 1921 /* Start over if the CPU may have been idle. */ 1922 if (delta_ns > TICK_NSEC) { 1923 cpu->iowait_boost = ONE_EIGHTH_FP; 1924 } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { 1925 cpu->iowait_boost <<= 1; 1926 if (cpu->iowait_boost > int_tofp(1)) 1927 cpu->iowait_boost = int_tofp(1); 1928 } else { 1929 cpu->iowait_boost = ONE_EIGHTH_FP; 1930 } 1931 } else if (cpu->iowait_boost) { 1932 /* Clear iowait_boost if the CPU may have been idle. */ 1933 if (delta_ns > TICK_NSEC) 1934 cpu->iowait_boost = 0; 1935 else 1936 cpu->iowait_boost >>= 1; 1937 } 1938 cpu->last_update = time; 1939 delta_ns = time - cpu->sample.time; 1940 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) 1941 return; 1942 1943 if (intel_pstate_sample(cpu, time)) 1944 intel_pstate_adjust_pstate(cpu); 1945 } 1946 1947 static struct pstate_funcs core_funcs = { 1948 .get_max = core_get_max_pstate, 1949 .get_max_physical = core_get_max_pstate_physical, 1950 .get_min = core_get_min_pstate, 1951 .get_turbo = core_get_turbo_pstate, 1952 .get_scaling = core_get_scaling, 1953 .get_val = core_get_val, 1954 }; 1955 1956 static const struct pstate_funcs silvermont_funcs = { 1957 .get_max = atom_get_max_pstate, 1958 .get_max_physical = atom_get_max_pstate, 1959 .get_min = atom_get_min_pstate, 1960 .get_turbo = atom_get_turbo_pstate, 1961 .get_val = atom_get_val, 1962 .get_scaling = silvermont_get_scaling, 1963 .get_vid = atom_get_vid, 1964 }; 1965 1966 static const struct pstate_funcs airmont_funcs = { 1967 .get_max = atom_get_max_pstate, 1968 .get_max_physical = atom_get_max_pstate, 1969 .get_min = atom_get_min_pstate, 1970 .get_turbo = atom_get_turbo_pstate, 1971 .get_val = atom_get_val, 1972 .get_scaling = airmont_get_scaling, 1973 .get_vid = atom_get_vid, 1974 }; 1975 1976 static const struct pstate_funcs knl_funcs = { 1977 .get_max = core_get_max_pstate, 1978 .get_max_physical = core_get_max_pstate_physical, 1979 .get_min = core_get_min_pstate, 1980 .get_turbo = knl_get_turbo_pstate, 1981 .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, 1982 .get_scaling = core_get_scaling, 1983 .get_val = core_get_val, 1984 }; 1985 1986 #define X86_MATCH(model, policy) \ 1987 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 1988 X86_FEATURE_APERFMPERF, &policy) 1989 1990 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1991 X86_MATCH(SANDYBRIDGE, core_funcs), 1992 X86_MATCH(SANDYBRIDGE_X, core_funcs), 1993 X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), 1994 X86_MATCH(IVYBRIDGE, core_funcs), 1995 X86_MATCH(HASWELL, core_funcs), 1996 X86_MATCH(BROADWELL, core_funcs), 1997 X86_MATCH(IVYBRIDGE_X, core_funcs), 1998 X86_MATCH(HASWELL_X, core_funcs), 1999 X86_MATCH(HASWELL_L, core_funcs), 2000 X86_MATCH(HASWELL_G, core_funcs), 2001 X86_MATCH(BROADWELL_G, core_funcs), 2002 X86_MATCH(ATOM_AIRMONT, airmont_funcs), 2003 X86_MATCH(SKYLAKE_L, core_funcs), 2004 X86_MATCH(BROADWELL_X, core_funcs), 2005 X86_MATCH(SKYLAKE, core_funcs), 2006 X86_MATCH(BROADWELL_D, core_funcs), 2007 X86_MATCH(XEON_PHI_KNL, knl_funcs), 2008 X86_MATCH(XEON_PHI_KNM, knl_funcs), 2009 X86_MATCH(ATOM_GOLDMONT, core_funcs), 2010 X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), 2011 X86_MATCH(SKYLAKE_X, core_funcs), 2012 {} 2013 }; 2014 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 2015 2016 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 2017 X86_MATCH(BROADWELL_D, core_funcs), 2018 X86_MATCH(BROADWELL_X, core_funcs), 2019 X86_MATCH(SKYLAKE_X, core_funcs), 2020 {} 2021 }; 2022 2023 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { 2024 X86_MATCH(KABYLAKE, core_funcs), 2025 {} 2026 }; 2027 2028 static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { 2029 X86_MATCH(SKYLAKE_X, core_funcs), 2030 X86_MATCH(SKYLAKE, core_funcs), 2031 {} 2032 }; 2033 2034 static int intel_pstate_init_cpu(unsigned int cpunum) 2035 { 2036 struct cpudata *cpu; 2037 2038 cpu = all_cpu_data[cpunum]; 2039 2040 if (!cpu) { 2041 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); 2042 if (!cpu) 2043 return -ENOMEM; 2044 2045 all_cpu_data[cpunum] = cpu; 2046 2047 cpu->epp_default = -EINVAL; 2048 cpu->epp_powersave = -EINVAL; 2049 cpu->epp_saved = -EINVAL; 2050 } 2051 2052 cpu = all_cpu_data[cpunum]; 2053 2054 cpu->cpu = cpunum; 2055 2056 if (hwp_active) { 2057 const struct x86_cpu_id *id; 2058 2059 intel_pstate_hwp_enable(cpu); 2060 2061 id = x86_match_cpu(intel_pstate_hwp_boost_ids); 2062 if (id && intel_pstate_acpi_pm_profile_server()) 2063 hwp_boost = true; 2064 } 2065 2066 intel_pstate_get_cpu_pstates(cpu); 2067 2068 pr_debug("controlling: cpu %d\n", cpunum); 2069 2070 return 0; 2071 } 2072 2073 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 2074 { 2075 struct cpudata *cpu = all_cpu_data[cpu_num]; 2076 2077 if (hwp_active && !hwp_boost) 2078 return; 2079 2080 if (cpu->update_util_set) 2081 return; 2082 2083 /* Prevent intel_pstate_update_util() from using stale data. */ 2084 cpu->sample.time = 0; 2085 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 2086 (hwp_active ? 2087 intel_pstate_update_util_hwp : 2088 intel_pstate_update_util)); 2089 cpu->update_util_set = true; 2090 } 2091 2092 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 2093 { 2094 struct cpudata *cpu_data = all_cpu_data[cpu]; 2095 2096 if (!cpu_data->update_util_set) 2097 return; 2098 2099 cpufreq_remove_update_util_hook(cpu); 2100 cpu_data->update_util_set = false; 2101 synchronize_rcu(); 2102 } 2103 2104 static int intel_pstate_get_max_freq(struct cpudata *cpu) 2105 { 2106 return global.turbo_disabled || global.no_turbo ? 2107 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2108 } 2109 2110 static void intel_pstate_update_perf_limits(struct cpudata *cpu, 2111 unsigned int policy_min, 2112 unsigned int policy_max) 2113 { 2114 int max_freq = intel_pstate_get_max_freq(cpu); 2115 int32_t max_policy_perf, min_policy_perf; 2116 int max_state, turbo_max; 2117 2118 /* 2119 * HWP needs some special consideration, because on BDX the 2120 * HWP_REQUEST uses abstract value to represent performance 2121 * rather than pure ratios. 2122 */ 2123 if (hwp_active) { 2124 intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); 2125 } else { 2126 max_state = global.no_turbo || global.turbo_disabled ? 2127 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2128 turbo_max = cpu->pstate.turbo_pstate; 2129 } 2130 2131 max_policy_perf = max_state * policy_max / max_freq; 2132 if (policy_max == policy_min) { 2133 min_policy_perf = max_policy_perf; 2134 } else { 2135 min_policy_perf = max_state * policy_min / max_freq; 2136 min_policy_perf = clamp_t(int32_t, min_policy_perf, 2137 0, max_policy_perf); 2138 } 2139 2140 pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n", 2141 cpu->cpu, max_state, min_policy_perf, max_policy_perf); 2142 2143 /* Normalize user input to [min_perf, max_perf] */ 2144 if (per_cpu_limits) { 2145 cpu->min_perf_ratio = min_policy_perf; 2146 cpu->max_perf_ratio = max_policy_perf; 2147 } else { 2148 int32_t global_min, global_max; 2149 2150 /* Global limits are in percent of the maximum turbo P-state. */ 2151 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 2152 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); 2153 global_min = clamp_t(int32_t, global_min, 0, global_max); 2154 2155 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, 2156 global_min, global_max); 2157 2158 cpu->min_perf_ratio = max(min_policy_perf, global_min); 2159 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); 2160 cpu->max_perf_ratio = min(max_policy_perf, global_max); 2161 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); 2162 2163 /* Make sure min_perf <= max_perf */ 2164 cpu->min_perf_ratio = min(cpu->min_perf_ratio, 2165 cpu->max_perf_ratio); 2166 2167 } 2168 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, 2169 cpu->max_perf_ratio, 2170 cpu->min_perf_ratio); 2171 } 2172 2173 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2174 { 2175 struct cpudata *cpu; 2176 2177 if (!policy->cpuinfo.max_freq) 2178 return -ENODEV; 2179 2180 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2181 policy->cpuinfo.max_freq, policy->max); 2182 2183 cpu = all_cpu_data[policy->cpu]; 2184 cpu->policy = policy->policy; 2185 2186 mutex_lock(&intel_pstate_limits_lock); 2187 2188 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2189 2190 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2191 /* 2192 * NOHZ_FULL CPUs need this as the governor callback may not 2193 * be invoked on them. 2194 */ 2195 intel_pstate_clear_update_util_hook(policy->cpu); 2196 intel_pstate_max_within_limits(cpu); 2197 } else { 2198 intel_pstate_set_update_util_hook(policy->cpu); 2199 } 2200 2201 if (hwp_active) { 2202 /* 2203 * When hwp_boost was active before and dynamically it 2204 * was turned off, in that case we need to clear the 2205 * update util hook. 2206 */ 2207 if (!hwp_boost) 2208 intel_pstate_clear_update_util_hook(policy->cpu); 2209 intel_pstate_hwp_set(policy->cpu); 2210 } 2211 2212 mutex_unlock(&intel_pstate_limits_lock); 2213 2214 return 0; 2215 } 2216 2217 static void intel_pstate_adjust_policy_max(struct cpudata *cpu, 2218 struct cpufreq_policy_data *policy) 2219 { 2220 if (!hwp_active && 2221 cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2222 policy->max < policy->cpuinfo.max_freq && 2223 policy->max > cpu->pstate.max_freq) { 2224 pr_debug("policy->max > max non turbo frequency\n"); 2225 policy->max = policy->cpuinfo.max_freq; 2226 } 2227 } 2228 2229 static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, 2230 struct cpufreq_policy_data *policy) 2231 { 2232 update_turbo_state(); 2233 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2234 intel_pstate_get_max_freq(cpu)); 2235 2236 intel_pstate_adjust_policy_max(cpu, policy); 2237 } 2238 2239 static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) 2240 { 2241 intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy); 2242 2243 return 0; 2244 } 2245 2246 static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) 2247 { 2248 intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); 2249 } 2250 2251 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 2252 { 2253 pr_debug("CPU %d exiting\n", policy->cpu); 2254 2255 intel_pstate_clear_update_util_hook(policy->cpu); 2256 if (hwp_active) { 2257 intel_pstate_hwp_save_state(policy); 2258 intel_pstate_hwp_force_min_perf(policy->cpu); 2259 } else { 2260 intel_cpufreq_stop_cpu(policy); 2261 } 2262 } 2263 2264 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 2265 { 2266 intel_pstate_exit_perf_limits(policy); 2267 2268 policy->fast_switch_possible = false; 2269 2270 return 0; 2271 } 2272 2273 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 2274 { 2275 struct cpudata *cpu; 2276 int rc; 2277 2278 rc = intel_pstate_init_cpu(policy->cpu); 2279 if (rc) 2280 return rc; 2281 2282 cpu = all_cpu_data[policy->cpu]; 2283 2284 cpu->max_perf_ratio = 0xFF; 2285 cpu->min_perf_ratio = 0; 2286 2287 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 2288 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 2289 2290 /* cpuinfo and default policy values */ 2291 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 2292 update_turbo_state(); 2293 global.turbo_disabled_mf = global.turbo_disabled; 2294 policy->cpuinfo.max_freq = global.turbo_disabled ? 2295 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2296 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 2297 2298 if (hwp_active) { 2299 unsigned int max_freq; 2300 2301 max_freq = global.turbo_disabled ? 2302 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2303 if (max_freq < policy->cpuinfo.max_freq) 2304 policy->cpuinfo.max_freq = max_freq; 2305 } 2306 2307 intel_pstate_init_acpi_perf_limits(policy); 2308 2309 policy->fast_switch_possible = true; 2310 2311 return 0; 2312 } 2313 2314 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 2315 { 2316 int ret = __intel_pstate_cpu_init(policy); 2317 2318 if (ret) 2319 return ret; 2320 2321 /* 2322 * Set the policy to powersave to provide a valid fallback value in case 2323 * the default cpufreq governor is neither powersave nor performance. 2324 */ 2325 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2326 2327 return 0; 2328 } 2329 2330 static struct cpufreq_driver intel_pstate = { 2331 .flags = CPUFREQ_CONST_LOOPS, 2332 .verify = intel_pstate_verify_policy, 2333 .setpolicy = intel_pstate_set_policy, 2334 .suspend = intel_pstate_hwp_save_state, 2335 .resume = intel_pstate_resume, 2336 .init = intel_pstate_cpu_init, 2337 .exit = intel_pstate_cpu_exit, 2338 .stop_cpu = intel_pstate_stop_cpu, 2339 .update_limits = intel_pstate_update_limits, 2340 .name = "intel_pstate", 2341 }; 2342 2343 static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) 2344 { 2345 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2346 2347 intel_pstate_verify_cpu_policy(cpu, policy); 2348 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2349 2350 return 0; 2351 } 2352 2353 /* Use of trace in passive mode: 2354 * 2355 * In passive mode the trace core_busy field (also known as the 2356 * performance field, and lablelled as such on the graphs; also known as 2357 * core_avg_perf) is not needed and so is re-assigned to indicate if the 2358 * driver call was via the normal or fast switch path. Various graphs 2359 * output from the intel_pstate_tracer.py utility that include core_busy 2360 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%, 2361 * so we use 10 to indicate the the normal path through the driver, and 2362 * 90 to indicate the fast switch path through the driver. 2363 * The scaled_busy field is not used, and is set to 0. 2364 */ 2365 2366 #define INTEL_PSTATE_TRACE_TARGET 10 2367 #define INTEL_PSTATE_TRACE_FAST_SWITCH 90 2368 2369 static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate) 2370 { 2371 struct sample *sample; 2372 2373 if (!trace_pstate_sample_enabled()) 2374 return; 2375 2376 if (!intel_pstate_sample(cpu, ktime_get())) 2377 return; 2378 2379 sample = &cpu->sample; 2380 trace_pstate_sample(trace_type, 2381 0, 2382 old_pstate, 2383 cpu->pstate.current_pstate, 2384 sample->mperf, 2385 sample->aperf, 2386 sample->tsc, 2387 get_avg_frequency(cpu), 2388 fp_toint(cpu->iowait_boost * 100)); 2389 } 2390 2391 static int intel_cpufreq_target(struct cpufreq_policy *policy, 2392 unsigned int target_freq, 2393 unsigned int relation) 2394 { 2395 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2396 struct cpufreq_freqs freqs; 2397 int target_pstate, old_pstate; 2398 2399 update_turbo_state(); 2400 2401 freqs.old = policy->cur; 2402 freqs.new = target_freq; 2403 2404 cpufreq_freq_transition_begin(policy, &freqs); 2405 switch (relation) { 2406 case CPUFREQ_RELATION_L: 2407 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 2408 break; 2409 case CPUFREQ_RELATION_H: 2410 target_pstate = freqs.new / cpu->pstate.scaling; 2411 break; 2412 default: 2413 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 2414 break; 2415 } 2416 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2417 old_pstate = cpu->pstate.current_pstate; 2418 if (target_pstate != cpu->pstate.current_pstate) { 2419 cpu->pstate.current_pstate = target_pstate; 2420 wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, 2421 pstate_funcs.get_val(cpu, target_pstate)); 2422 } 2423 freqs.new = target_pstate * cpu->pstate.scaling; 2424 intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_TARGET, old_pstate); 2425 cpufreq_freq_transition_end(policy, &freqs, false); 2426 2427 return 0; 2428 } 2429 2430 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 2431 unsigned int target_freq) 2432 { 2433 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2434 int target_pstate, old_pstate; 2435 2436 update_turbo_state(); 2437 2438 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2439 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2440 old_pstate = cpu->pstate.current_pstate; 2441 intel_pstate_update_pstate(cpu, target_pstate); 2442 intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); 2443 return target_pstate * cpu->pstate.scaling; 2444 } 2445 2446 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2447 { 2448 int max_state, turbo_max, min_freq, max_freq, ret; 2449 struct freq_qos_request *req; 2450 struct cpudata *cpu; 2451 struct device *dev; 2452 2453 dev = get_cpu_device(policy->cpu); 2454 if (!dev) 2455 return -ENODEV; 2456 2457 ret = __intel_pstate_cpu_init(policy); 2458 if (ret) 2459 return ret; 2460 2461 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 2462 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; 2463 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2464 policy->cur = policy->cpuinfo.min_freq; 2465 2466 req = kcalloc(2, sizeof(*req), GFP_KERNEL); 2467 if (!req) { 2468 ret = -ENOMEM; 2469 goto pstate_exit; 2470 } 2471 2472 cpu = all_cpu_data[policy->cpu]; 2473 2474 if (hwp_active) 2475 intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state); 2476 else 2477 turbo_max = cpu->pstate.turbo_pstate; 2478 2479 min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); 2480 min_freq *= cpu->pstate.scaling; 2481 max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 2482 max_freq *= cpu->pstate.scaling; 2483 2484 ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, 2485 min_freq); 2486 if (ret < 0) { 2487 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); 2488 goto free_req; 2489 } 2490 2491 ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, 2492 max_freq); 2493 if (ret < 0) { 2494 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); 2495 goto remove_min_req; 2496 } 2497 2498 policy->driver_data = req; 2499 2500 return 0; 2501 2502 remove_min_req: 2503 freq_qos_remove_request(req); 2504 free_req: 2505 kfree(req); 2506 pstate_exit: 2507 intel_pstate_exit_perf_limits(policy); 2508 2509 return ret; 2510 } 2511 2512 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) 2513 { 2514 struct freq_qos_request *req; 2515 2516 req = policy->driver_data; 2517 2518 freq_qos_remove_request(req + 1); 2519 freq_qos_remove_request(req); 2520 kfree(req); 2521 2522 return intel_pstate_cpu_exit(policy); 2523 } 2524 2525 static struct cpufreq_driver intel_cpufreq = { 2526 .flags = CPUFREQ_CONST_LOOPS, 2527 .verify = intel_cpufreq_verify_policy, 2528 .target = intel_cpufreq_target, 2529 .fast_switch = intel_cpufreq_fast_switch, 2530 .init = intel_cpufreq_cpu_init, 2531 .exit = intel_cpufreq_cpu_exit, 2532 .stop_cpu = intel_cpufreq_stop_cpu, 2533 .update_limits = intel_pstate_update_limits, 2534 .name = "intel_cpufreq", 2535 }; 2536 2537 static struct cpufreq_driver *default_driver = &intel_pstate; 2538 2539 static void intel_pstate_driver_cleanup(void) 2540 { 2541 unsigned int cpu; 2542 2543 get_online_cpus(); 2544 for_each_online_cpu(cpu) { 2545 if (all_cpu_data[cpu]) { 2546 if (intel_pstate_driver == &intel_pstate) 2547 intel_pstate_clear_update_util_hook(cpu); 2548 2549 kfree(all_cpu_data[cpu]); 2550 all_cpu_data[cpu] = NULL; 2551 } 2552 } 2553 put_online_cpus(); 2554 intel_pstate_driver = NULL; 2555 } 2556 2557 static int intel_pstate_register_driver(struct cpufreq_driver *driver) 2558 { 2559 int ret; 2560 2561 memset(&global, 0, sizeof(global)); 2562 global.max_perf_pct = 100; 2563 2564 intel_pstate_driver = driver; 2565 ret = cpufreq_register_driver(intel_pstate_driver); 2566 if (ret) { 2567 intel_pstate_driver_cleanup(); 2568 return ret; 2569 } 2570 2571 global.min_perf_pct = min_perf_pct_min(); 2572 2573 return 0; 2574 } 2575 2576 static int intel_pstate_unregister_driver(void) 2577 { 2578 if (hwp_active) 2579 return -EBUSY; 2580 2581 cpufreq_unregister_driver(intel_pstate_driver); 2582 intel_pstate_driver_cleanup(); 2583 2584 return 0; 2585 } 2586 2587 static ssize_t intel_pstate_show_status(char *buf) 2588 { 2589 if (!intel_pstate_driver) 2590 return sprintf(buf, "off\n"); 2591 2592 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 2593 "active" : "passive"); 2594 } 2595 2596 static int intel_pstate_update_status(const char *buf, size_t size) 2597 { 2598 int ret; 2599 2600 if (size == 3 && !strncmp(buf, "off", size)) 2601 return intel_pstate_driver ? 2602 intel_pstate_unregister_driver() : -EINVAL; 2603 2604 if (size == 6 && !strncmp(buf, "active", size)) { 2605 if (intel_pstate_driver) { 2606 if (intel_pstate_driver == &intel_pstate) 2607 return 0; 2608 2609 ret = intel_pstate_unregister_driver(); 2610 if (ret) 2611 return ret; 2612 } 2613 2614 return intel_pstate_register_driver(&intel_pstate); 2615 } 2616 2617 if (size == 7 && !strncmp(buf, "passive", size)) { 2618 if (intel_pstate_driver) { 2619 if (intel_pstate_driver == &intel_cpufreq) 2620 return 0; 2621 2622 ret = intel_pstate_unregister_driver(); 2623 if (ret) 2624 return ret; 2625 } 2626 2627 return intel_pstate_register_driver(&intel_cpufreq); 2628 } 2629 2630 return -EINVAL; 2631 } 2632 2633 static int no_load __initdata; 2634 static int no_hwp __initdata; 2635 static int hwp_only __initdata; 2636 static unsigned int force_load __initdata; 2637 2638 static int __init intel_pstate_msrs_not_valid(void) 2639 { 2640 if (!pstate_funcs.get_max() || 2641 !pstate_funcs.get_min() || 2642 !pstate_funcs.get_turbo()) 2643 return -ENODEV; 2644 2645 return 0; 2646 } 2647 2648 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 2649 { 2650 pstate_funcs.get_max = funcs->get_max; 2651 pstate_funcs.get_max_physical = funcs->get_max_physical; 2652 pstate_funcs.get_min = funcs->get_min; 2653 pstate_funcs.get_turbo = funcs->get_turbo; 2654 pstate_funcs.get_scaling = funcs->get_scaling; 2655 pstate_funcs.get_val = funcs->get_val; 2656 pstate_funcs.get_vid = funcs->get_vid; 2657 pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; 2658 } 2659 2660 #ifdef CONFIG_ACPI 2661 2662 static bool __init intel_pstate_no_acpi_pss(void) 2663 { 2664 int i; 2665 2666 for_each_possible_cpu(i) { 2667 acpi_status status; 2668 union acpi_object *pss; 2669 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 2670 struct acpi_processor *pr = per_cpu(processors, i); 2671 2672 if (!pr) 2673 continue; 2674 2675 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 2676 if (ACPI_FAILURE(status)) 2677 continue; 2678 2679 pss = buffer.pointer; 2680 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 2681 kfree(pss); 2682 return false; 2683 } 2684 2685 kfree(pss); 2686 } 2687 2688 pr_debug("ACPI _PSS not found\n"); 2689 return true; 2690 } 2691 2692 static bool __init intel_pstate_no_acpi_pcch(void) 2693 { 2694 acpi_status status; 2695 acpi_handle handle; 2696 2697 status = acpi_get_handle(NULL, "\\_SB", &handle); 2698 if (ACPI_FAILURE(status)) 2699 goto not_found; 2700 2701 if (acpi_has_method(handle, "PCCH")) 2702 return false; 2703 2704 not_found: 2705 pr_debug("ACPI PCCH not found\n"); 2706 return true; 2707 } 2708 2709 static bool __init intel_pstate_has_acpi_ppc(void) 2710 { 2711 int i; 2712 2713 for_each_possible_cpu(i) { 2714 struct acpi_processor *pr = per_cpu(processors, i); 2715 2716 if (!pr) 2717 continue; 2718 if (acpi_has_method(pr->handle, "_PPC")) 2719 return true; 2720 } 2721 pr_debug("ACPI _PPC not found\n"); 2722 return false; 2723 } 2724 2725 enum { 2726 PSS, 2727 PPC, 2728 }; 2729 2730 /* Hardware vendor-specific info that has its own power management modes */ 2731 static struct acpi_platform_list plat_info[] __initdata = { 2732 {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS}, 2733 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2734 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2735 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2736 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2737 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2738 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2739 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2740 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2741 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2742 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2743 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2744 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2745 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2746 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 2747 { } /* End */ 2748 }; 2749 2750 #define BITMASK_OOB (BIT(8) | BIT(18)) 2751 2752 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 2753 { 2754 const struct x86_cpu_id *id; 2755 u64 misc_pwr; 2756 int idx; 2757 2758 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 2759 if (id) { 2760 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 2761 if (misc_pwr & BITMASK_OOB) { 2762 pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); 2763 pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); 2764 return true; 2765 } 2766 } 2767 2768 idx = acpi_match_platform_list(plat_info); 2769 if (idx < 0) 2770 return false; 2771 2772 switch (plat_info[idx].data) { 2773 case PSS: 2774 if (!intel_pstate_no_acpi_pss()) 2775 return false; 2776 2777 return intel_pstate_no_acpi_pcch(); 2778 case PPC: 2779 return intel_pstate_has_acpi_ppc() && !force_load; 2780 } 2781 2782 return false; 2783 } 2784 2785 static void intel_pstate_request_control_from_smm(void) 2786 { 2787 /* 2788 * It may be unsafe to request P-states control from SMM if _PPC support 2789 * has not been enabled. 2790 */ 2791 if (acpi_ppc) 2792 acpi_processor_pstate_control(); 2793 } 2794 #else /* CONFIG_ACPI not enabled */ 2795 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 2796 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 2797 static inline void intel_pstate_request_control_from_smm(void) {} 2798 #endif /* CONFIG_ACPI */ 2799 2800 #define INTEL_PSTATE_HWP_BROADWELL 0x01 2801 2802 #define X86_MATCH_HWP(model, hwp_mode) \ 2803 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 2804 X86_FEATURE_HWP, hwp_mode) 2805 2806 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 2807 X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), 2808 X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), 2809 X86_MATCH_HWP(ANY, 0), 2810 {} 2811 }; 2812 2813 static int __init intel_pstate_init(void) 2814 { 2815 const struct x86_cpu_id *id; 2816 int rc; 2817 2818 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 2819 return -ENODEV; 2820 2821 if (no_load) 2822 return -ENODEV; 2823 2824 id = x86_match_cpu(hwp_support_ids); 2825 if (id) { 2826 copy_cpu_funcs(&core_funcs); 2827 if (!no_hwp) { 2828 hwp_active++; 2829 hwp_mode_bdw = id->driver_data; 2830 intel_pstate.attr = hwp_cpufreq_attrs; 2831 goto hwp_cpu_matched; 2832 } 2833 } else { 2834 id = x86_match_cpu(intel_pstate_cpu_ids); 2835 if (!id) { 2836 pr_info("CPU model not supported\n"); 2837 return -ENODEV; 2838 } 2839 2840 copy_cpu_funcs((struct pstate_funcs *)id->driver_data); 2841 } 2842 2843 if (intel_pstate_msrs_not_valid()) { 2844 pr_info("Invalid MSRs\n"); 2845 return -ENODEV; 2846 } 2847 /* Without HWP start in the passive mode. */ 2848 default_driver = &intel_cpufreq; 2849 2850 hwp_cpu_matched: 2851 /* 2852 * The Intel pstate driver will be ignored if the platform 2853 * firmware has its own power management modes. 2854 */ 2855 if (intel_pstate_platform_pwr_mgmt_exists()) { 2856 pr_info("P-states controlled by the platform\n"); 2857 return -ENODEV; 2858 } 2859 2860 if (!hwp_active && hwp_only) 2861 return -ENOTSUPP; 2862 2863 pr_info("Intel P-state driver initializing\n"); 2864 2865 all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); 2866 if (!all_cpu_data) 2867 return -ENOMEM; 2868 2869 intel_pstate_request_control_from_smm(); 2870 2871 intel_pstate_sysfs_expose_params(); 2872 2873 mutex_lock(&intel_pstate_driver_lock); 2874 rc = intel_pstate_register_driver(default_driver); 2875 mutex_unlock(&intel_pstate_driver_lock); 2876 if (rc) 2877 return rc; 2878 2879 if (hwp_active) { 2880 const struct x86_cpu_id *id; 2881 2882 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); 2883 if (id) { 2884 set_power_ctl_ee_state(false); 2885 pr_info("Disabling energy efficiency optimization\n"); 2886 } 2887 2888 pr_info("HWP enabled\n"); 2889 } 2890 2891 return 0; 2892 } 2893 device_initcall(intel_pstate_init); 2894 2895 static int __init intel_pstate_setup(char *str) 2896 { 2897 if (!str) 2898 return -EINVAL; 2899 2900 if (!strcmp(str, "disable")) { 2901 no_load = 1; 2902 } else if (!strcmp(str, "passive")) { 2903 default_driver = &intel_cpufreq; 2904 no_hwp = 1; 2905 } 2906 if (!strcmp(str, "no_hwp")) { 2907 pr_info("HWP disabled\n"); 2908 no_hwp = 1; 2909 } 2910 if (!strcmp(str, "force")) 2911 force_load = 1; 2912 if (!strcmp(str, "hwp_only")) 2913 hwp_only = 1; 2914 if (!strcmp(str, "per_cpu_perf_limits")) 2915 per_cpu_limits = true; 2916 2917 #ifdef CONFIG_ACPI 2918 if (!strcmp(str, "support_acpi_ppc")) 2919 acpi_ppc = true; 2920 #endif 2921 2922 return 0; 2923 } 2924 early_param("intel_pstate", intel_pstate_setup); 2925 2926 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 2927 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 2928 MODULE_LICENSE("GPL"); 2929