1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pstate.c: Native P state management for Intel processors 4 * 5 * (C) Copyright 2012 Intel Corporation 6 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/module.h> 14 #include <linux/ktime.h> 15 #include <linux/hrtimer.h> 16 #include <linux/tick.h> 17 #include <linux/slab.h> 18 #include <linux/sched/cpufreq.h> 19 #include <linux/list.h> 20 #include <linux/cpu.h> 21 #include <linux/cpufreq.h> 22 #include <linux/sysfs.h> 23 #include <linux/types.h> 24 #include <linux/fs.h> 25 #include <linux/acpi.h> 26 #include <linux/vmalloc.h> 27 #include <linux/pm_qos.h> 28 #include <trace/events/power.h> 29 30 #include <asm/div64.h> 31 #include <asm/msr.h> 32 #include <asm/cpu_device_id.h> 33 #include <asm/cpufeature.h> 34 #include <asm/intel-family.h> 35 #include "../drivers/thermal/intel/thermal_interrupt.h" 36 37 #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) 38 39 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 40 #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 41 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 42 43 #ifdef CONFIG_ACPI 44 #include <acpi/processor.h> 45 #include <acpi/cppc_acpi.h> 46 #endif 47 48 #define FRAC_BITS 8 49 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 50 #define fp_toint(X) ((X) >> FRAC_BITS) 51 52 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) 53 54 #define EXT_BITS 6 55 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 56 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 57 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 58 59 static inline int32_t mul_fp(int32_t x, int32_t y) 60 { 61 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 62 } 63 64 static inline int32_t div_fp(s64 x, s64 y) 65 { 66 return div64_s64((int64_t)x << FRAC_BITS, y); 67 } 68 69 static inline int ceiling_fp(int32_t x) 70 { 71 int mask, ret; 72 73 ret = fp_toint(x); 74 mask = (1 << FRAC_BITS) - 1; 75 if (x & mask) 76 ret += 1; 77 return ret; 78 } 79 80 static inline u64 mul_ext_fp(u64 x, u64 y) 81 { 82 return (x * y) >> EXT_FRAC_BITS; 83 } 84 85 static inline u64 div_ext_fp(u64 x, u64 y) 86 { 87 return div64_u64(x << EXT_FRAC_BITS, y); 88 } 89 90 /** 91 * struct sample - Store performance sample 92 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 93 * performance during last sample period 94 * @busy_scaled: Scaled busy value which is used to calculate next 95 * P state. This can be different than core_avg_perf 96 * to account for cpu idle period 97 * @aperf: Difference of actual performance frequency clock count 98 * read from APERF MSR between last and current sample 99 * @mperf: Difference of maximum performance frequency clock count 100 * read from MPERF MSR between last and current sample 101 * @tsc: Difference of time stamp counter between last and 102 * current sample 103 * @time: Current time from scheduler 104 * 105 * This structure is used in the cpudata structure to store performance sample 106 * data for choosing next P State. 107 */ 108 struct sample { 109 int32_t core_avg_perf; 110 int32_t busy_scaled; 111 u64 aperf; 112 u64 mperf; 113 u64 tsc; 114 u64 time; 115 }; 116 117 /** 118 * struct pstate_data - Store P state data 119 * @current_pstate: Current requested P state 120 * @min_pstate: Min P state possible for this platform 121 * @max_pstate: Max P state possible for this platform 122 * @max_pstate_physical:This is physical Max P state for a processor 123 * This can be higher than the max_pstate which can 124 * be limited by platform thermal design power limits 125 * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor 126 * @scaling: Scaling factor between performance and frequency 127 * @turbo_pstate: Max Turbo P state possible for this platform 128 * @min_freq: @min_pstate frequency in cpufreq units 129 * @max_freq: @max_pstate frequency in cpufreq units 130 * @turbo_freq: @turbo_pstate frequency in cpufreq units 131 * 132 * Stores the per cpu model P state limits and current P state. 133 */ 134 struct pstate_data { 135 int current_pstate; 136 int min_pstate; 137 int max_pstate; 138 int max_pstate_physical; 139 int perf_ctl_scaling; 140 int scaling; 141 int turbo_pstate; 142 unsigned int min_freq; 143 unsigned int max_freq; 144 unsigned int turbo_freq; 145 }; 146 147 /** 148 * struct vid_data - Stores voltage information data 149 * @min: VID data for this platform corresponding to 150 * the lowest P state 151 * @max: VID data corresponding to the highest P State. 152 * @turbo: VID data for turbo P state 153 * @ratio: Ratio of (vid max - vid min) / 154 * (max P state - Min P State) 155 * 156 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 157 * This data is used in Atom platforms, where in addition to target P state, 158 * the voltage data needs to be specified to select next P State. 159 */ 160 struct vid_data { 161 int min; 162 int max; 163 int turbo; 164 int32_t ratio; 165 }; 166 167 /** 168 * struct global_params - Global parameters, mostly tunable via sysfs. 169 * @no_turbo: Whether or not to use turbo P-states. 170 * @turbo_disabled: Whether or not turbo P-states are available at all, 171 * based on the MSR_IA32_MISC_ENABLE value and whether or 172 * not the maximum reported turbo P-state is different from 173 * the maximum reported non-turbo one. 174 * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. 175 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo 176 * P-state capacity. 177 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo 178 * P-state capacity. 179 */ 180 struct global_params { 181 bool no_turbo; 182 bool turbo_disabled; 183 bool turbo_disabled_mf; 184 int max_perf_pct; 185 int min_perf_pct; 186 }; 187 188 /** 189 * struct cpudata - Per CPU instance data storage 190 * @cpu: CPU number for this instance data 191 * @policy: CPUFreq policy value 192 * @update_util: CPUFreq utility callback information 193 * @update_util_set: CPUFreq utility callback is set 194 * @iowait_boost: iowait-related boost fraction 195 * @last_update: Time of the last update. 196 * @pstate: Stores P state limits for this CPU 197 * @vid: Stores VID limits for this CPU 198 * @last_sample_time: Last Sample time 199 * @aperf_mperf_shift: APERF vs MPERF counting frequency difference 200 * @prev_aperf: Last APERF value read from APERF MSR 201 * @prev_mperf: Last MPERF value read from MPERF MSR 202 * @prev_tsc: Last timestamp counter (TSC) value 203 * @prev_cummulative_iowait: IO Wait time difference from last and 204 * current sample 205 * @sample: Storage for storing last Sample data 206 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios 207 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios 208 * @acpi_perf_data: Stores ACPI perf information read from _PSS 209 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 210 * @epp_powersave: Last saved HWP energy performance preference 211 * (EPP) or energy performance bias (EPB), 212 * when policy switched to performance 213 * @epp_policy: Last saved policy used to set EPP/EPB 214 * @epp_default: Power on default HWP energy performance 215 * preference/bias 216 * @epp_cached Cached HWP energy-performance preference value 217 * @hwp_req_cached: Cached value of the last HWP Request MSR 218 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR 219 * @last_io_update: Last time when IO wake flag was set 220 * @sched_flags: Store scheduler flags for possible cross CPU update 221 * @hwp_boost_min: Last HWP boosted min performance 222 * @suspended: Whether or not the driver has been suspended. 223 * @hwp_notify_work: workqueue for HWP notifications. 224 * 225 * This structure stores per CPU instance data for all CPUs. 226 */ 227 struct cpudata { 228 int cpu; 229 230 unsigned int policy; 231 struct update_util_data update_util; 232 bool update_util_set; 233 234 struct pstate_data pstate; 235 struct vid_data vid; 236 237 u64 last_update; 238 u64 last_sample_time; 239 u64 aperf_mperf_shift; 240 u64 prev_aperf; 241 u64 prev_mperf; 242 u64 prev_tsc; 243 u64 prev_cummulative_iowait; 244 struct sample sample; 245 int32_t min_perf_ratio; 246 int32_t max_perf_ratio; 247 #ifdef CONFIG_ACPI 248 struct acpi_processor_performance acpi_perf_data; 249 bool valid_pss_table; 250 #endif 251 unsigned int iowait_boost; 252 s16 epp_powersave; 253 s16 epp_policy; 254 s16 epp_default; 255 s16 epp_cached; 256 u64 hwp_req_cached; 257 u64 hwp_cap_cached; 258 u64 last_io_update; 259 unsigned int sched_flags; 260 u32 hwp_boost_min; 261 bool suspended; 262 struct delayed_work hwp_notify_work; 263 }; 264 265 static struct cpudata **all_cpu_data; 266 267 /** 268 * struct pstate_funcs - Per CPU model specific callbacks 269 * @get_max: Callback to get maximum non turbo effective P state 270 * @get_max_physical: Callback to get maximum non turbo physical P state 271 * @get_min: Callback to get minimum P state 272 * @get_turbo: Callback to get turbo P state 273 * @get_scaling: Callback to get frequency scaling factor 274 * @get_cpu_scaling: Get frequency scaling factor for a given cpu 275 * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference 276 * @get_val: Callback to convert P state to actual MSR write value 277 * @get_vid: Callback to get VID data for Atom platforms 278 * 279 * Core and Atom CPU models have different way to get P State limits. This 280 * structure is used to store those callbacks. 281 */ 282 struct pstate_funcs { 283 int (*get_max)(void); 284 int (*get_max_physical)(void); 285 int (*get_min)(void); 286 int (*get_turbo)(void); 287 int (*get_scaling)(void); 288 int (*get_cpu_scaling)(int cpu); 289 int (*get_aperf_mperf_shift)(void); 290 u64 (*get_val)(struct cpudata*, int pstate); 291 void (*get_vid)(struct cpudata *); 292 }; 293 294 static struct pstate_funcs pstate_funcs __read_mostly; 295 296 static int hwp_active __read_mostly; 297 static int hwp_mode_bdw __read_mostly; 298 static bool per_cpu_limits __read_mostly; 299 static bool hwp_boost __read_mostly; 300 301 static struct cpufreq_driver *intel_pstate_driver __read_mostly; 302 303 #ifdef CONFIG_ACPI 304 static bool acpi_ppc; 305 #endif 306 307 static struct global_params global; 308 309 static DEFINE_MUTEX(intel_pstate_driver_lock); 310 static DEFINE_MUTEX(intel_pstate_limits_lock); 311 312 #ifdef CONFIG_ACPI 313 314 static bool intel_pstate_acpi_pm_profile_server(void) 315 { 316 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 317 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 318 return true; 319 320 return false; 321 } 322 323 static bool intel_pstate_get_ppc_enable_status(void) 324 { 325 if (intel_pstate_acpi_pm_profile_server()) 326 return true; 327 328 return acpi_ppc; 329 } 330 331 #ifdef CONFIG_ACPI_CPPC_LIB 332 333 /* The work item is needed to avoid CPU hotplug locking issues */ 334 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) 335 { 336 sched_set_itmt_support(); 337 } 338 339 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); 340 341 static void intel_pstate_set_itmt_prio(int cpu) 342 { 343 struct cppc_perf_caps cppc_perf; 344 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; 345 int ret; 346 347 ret = cppc_get_perf_caps(cpu, &cppc_perf); 348 if (ret) 349 return; 350 351 /* 352 * The priorities can be set regardless of whether or not 353 * sched_set_itmt_support(true) has been called and it is valid to 354 * update them at any time after it has been called. 355 */ 356 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); 357 358 if (max_highest_perf <= min_highest_perf) { 359 if (cppc_perf.highest_perf > max_highest_perf) 360 max_highest_perf = cppc_perf.highest_perf; 361 362 if (cppc_perf.highest_perf < min_highest_perf) 363 min_highest_perf = cppc_perf.highest_perf; 364 365 if (max_highest_perf > min_highest_perf) { 366 /* 367 * This code can be run during CPU online under the 368 * CPU hotplug locks, so sched_set_itmt_support() 369 * cannot be called from here. Queue up a work item 370 * to invoke it. 371 */ 372 schedule_work(&sched_itmt_work); 373 } 374 } 375 } 376 377 static int intel_pstate_get_cppc_guaranteed(int cpu) 378 { 379 struct cppc_perf_caps cppc_perf; 380 int ret; 381 382 ret = cppc_get_perf_caps(cpu, &cppc_perf); 383 if (ret) 384 return ret; 385 386 if (cppc_perf.guaranteed_perf) 387 return cppc_perf.guaranteed_perf; 388 389 return cppc_perf.nominal_perf; 390 } 391 392 static u32 intel_pstate_cppc_nominal(int cpu) 393 { 394 u64 nominal_perf; 395 396 if (cppc_get_nominal_perf(cpu, &nominal_perf)) 397 return 0; 398 399 return nominal_perf; 400 } 401 #else /* CONFIG_ACPI_CPPC_LIB */ 402 static inline void intel_pstate_set_itmt_prio(int cpu) 403 { 404 } 405 #endif /* CONFIG_ACPI_CPPC_LIB */ 406 407 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 408 { 409 struct cpudata *cpu; 410 int ret; 411 int i; 412 413 if (hwp_active) { 414 intel_pstate_set_itmt_prio(policy->cpu); 415 return; 416 } 417 418 if (!intel_pstate_get_ppc_enable_status()) 419 return; 420 421 cpu = all_cpu_data[policy->cpu]; 422 423 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 424 policy->cpu); 425 if (ret) 426 return; 427 428 /* 429 * Check if the control value in _PSS is for PERF_CTL MSR, which should 430 * guarantee that the states returned by it map to the states in our 431 * list directly. 432 */ 433 if (cpu->acpi_perf_data.control_register.space_id != 434 ACPI_ADR_SPACE_FIXED_HARDWARE) 435 goto err; 436 437 /* 438 * If there is only one entry _PSS, simply ignore _PSS and continue as 439 * usual without taking _PSS into account 440 */ 441 if (cpu->acpi_perf_data.state_count < 2) 442 goto err; 443 444 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 445 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 446 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 447 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 448 (u32) cpu->acpi_perf_data.states[i].core_frequency, 449 (u32) cpu->acpi_perf_data.states[i].power, 450 (u32) cpu->acpi_perf_data.states[i].control); 451 } 452 453 /* 454 * The _PSS table doesn't contain whole turbo frequency range. 455 * This just contains +1 MHZ above the max non turbo frequency, 456 * with control value corresponding to max turbo ratio. But 457 * when cpufreq set policy is called, it will call with this 458 * max frequency, which will cause a reduced performance as 459 * this driver uses real max turbo frequency as the max 460 * frequency. So correct this frequency in _PSS table to 461 * correct max turbo frequency based on the turbo state. 462 * Also need to convert to MHz as _PSS freq is in MHz. 463 */ 464 if (!global.turbo_disabled) 465 cpu->acpi_perf_data.states[0].core_frequency = 466 policy->cpuinfo.max_freq / 1000; 467 cpu->valid_pss_table = true; 468 pr_debug("_PPC limits will be enforced\n"); 469 470 return; 471 472 err: 473 cpu->valid_pss_table = false; 474 acpi_processor_unregister_performance(policy->cpu); 475 } 476 477 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 478 { 479 struct cpudata *cpu; 480 481 cpu = all_cpu_data[policy->cpu]; 482 if (!cpu->valid_pss_table) 483 return; 484 485 acpi_processor_unregister_performance(policy->cpu); 486 } 487 #else /* CONFIG_ACPI */ 488 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 489 { 490 } 491 492 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 493 { 494 } 495 496 static inline bool intel_pstate_acpi_pm_profile_server(void) 497 { 498 return false; 499 } 500 #endif /* CONFIG_ACPI */ 501 502 #ifndef CONFIG_ACPI_CPPC_LIB 503 static inline int intel_pstate_get_cppc_guaranteed(int cpu) 504 { 505 return -ENOTSUPP; 506 } 507 #endif /* CONFIG_ACPI_CPPC_LIB */ 508 509 /** 510 * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels. 511 * @cpu: Target CPU. 512 * 513 * On hybrid processors, HWP may expose more performance levels than there are 514 * P-states accessible through the PERF_CTL interface. If that happens, the 515 * scaling factor between HWP performance levels and CPU frequency will be less 516 * than the scaling factor between P-state values and CPU frequency. 517 * 518 * In that case, adjust the CPU parameters used in computations accordingly. 519 */ 520 static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) 521 { 522 int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; 523 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 524 int perf_ctl_turbo = pstate_funcs.get_turbo(); 525 int turbo_freq = perf_ctl_turbo * perf_ctl_scaling; 526 int scaling = cpu->pstate.scaling; 527 528 pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); 529 pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max()); 530 pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); 531 pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); 532 pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); 533 pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); 534 pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); 535 536 /* 537 * If the product of the HWP performance scaling factor and the HWP_CAP 538 * highest performance is greater than the maximum turbo frequency 539 * corresponding to the pstate_funcs.get_turbo() return value, the 540 * scaling factor is too high, so recompute it to make the HWP_CAP 541 * highest performance correspond to the maximum turbo frequency. 542 */ 543 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; 544 if (turbo_freq < cpu->pstate.turbo_freq) { 545 cpu->pstate.turbo_freq = turbo_freq; 546 scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate); 547 cpu->pstate.scaling = scaling; 548 549 pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n", 550 cpu->cpu, scaling); 551 } 552 553 cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, 554 perf_ctl_scaling); 555 556 cpu->pstate.max_pstate_physical = 557 DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling, 558 scaling); 559 560 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; 561 /* 562 * Cast the min P-state value retrieved via pstate_funcs.get_min() to 563 * the effective range of HWP performance levels. 564 */ 565 cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling); 566 } 567 568 static inline void update_turbo_state(void) 569 { 570 u64 misc_en; 571 struct cpudata *cpu; 572 573 cpu = all_cpu_data[0]; 574 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 575 global.turbo_disabled = 576 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 577 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 578 } 579 580 static int min_perf_pct_min(void) 581 { 582 struct cpudata *cpu = all_cpu_data[0]; 583 int turbo_pstate = cpu->pstate.turbo_pstate; 584 585 return turbo_pstate ? 586 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; 587 } 588 589 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 590 { 591 u64 epb; 592 int ret; 593 594 if (!boot_cpu_has(X86_FEATURE_EPB)) 595 return -ENXIO; 596 597 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 598 if (ret) 599 return (s16)ret; 600 601 return (s16)(epb & 0x0f); 602 } 603 604 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 605 { 606 s16 epp; 607 608 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 609 /* 610 * When hwp_req_data is 0, means that caller didn't read 611 * MSR_HWP_REQUEST, so need to read and get EPP. 612 */ 613 if (!hwp_req_data) { 614 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, 615 &hwp_req_data); 616 if (epp) 617 return epp; 618 } 619 epp = (hwp_req_data >> 24) & 0xff; 620 } else { 621 /* When there is no EPP present, HWP uses EPB settings */ 622 epp = intel_pstate_get_epb(cpu_data); 623 } 624 625 return epp; 626 } 627 628 static int intel_pstate_set_epb(int cpu, s16 pref) 629 { 630 u64 epb; 631 int ret; 632 633 if (!boot_cpu_has(X86_FEATURE_EPB)) 634 return -ENXIO; 635 636 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 637 if (ret) 638 return ret; 639 640 epb = (epb & ~0x0f) | pref; 641 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 642 643 return 0; 644 } 645 646 /* 647 * EPP/EPB display strings corresponding to EPP index in the 648 * energy_perf_strings[] 649 * index String 650 *------------------------------------- 651 * 0 default 652 * 1 performance 653 * 2 balance_performance 654 * 3 balance_power 655 * 4 power 656 */ 657 static const char * const energy_perf_strings[] = { 658 "default", 659 "performance", 660 "balance_performance", 661 "balance_power", 662 "power", 663 NULL 664 }; 665 static const unsigned int epp_values[] = { 666 HWP_EPP_PERFORMANCE, 667 HWP_EPP_BALANCE_PERFORMANCE, 668 HWP_EPP_BALANCE_POWERSAVE, 669 HWP_EPP_POWERSAVE 670 }; 671 672 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) 673 { 674 s16 epp; 675 int index = -EINVAL; 676 677 *raw_epp = 0; 678 epp = intel_pstate_get_epp(cpu_data, 0); 679 if (epp < 0) 680 return epp; 681 682 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 683 if (epp == HWP_EPP_PERFORMANCE) 684 return 1; 685 if (epp == HWP_EPP_BALANCE_PERFORMANCE) 686 return 2; 687 if (epp == HWP_EPP_BALANCE_POWERSAVE) 688 return 3; 689 if (epp == HWP_EPP_POWERSAVE) 690 return 4; 691 *raw_epp = epp; 692 return 0; 693 } else if (boot_cpu_has(X86_FEATURE_EPB)) { 694 /* 695 * Range: 696 * 0x00-0x03 : Performance 697 * 0x04-0x07 : Balance performance 698 * 0x08-0x0B : Balance power 699 * 0x0C-0x0F : Power 700 * The EPB is a 4 bit value, but our ranges restrict the 701 * value which can be set. Here only using top two bits 702 * effectively. 703 */ 704 index = (epp >> 2) + 1; 705 } 706 707 return index; 708 } 709 710 static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) 711 { 712 int ret; 713 714 /* 715 * Use the cached HWP Request MSR value, because in the active mode the 716 * register itself may be updated by intel_pstate_hwp_boost_up() or 717 * intel_pstate_hwp_boost_down() at any time. 718 */ 719 u64 value = READ_ONCE(cpu->hwp_req_cached); 720 721 value &= ~GENMASK_ULL(31, 24); 722 value |= (u64)epp << 24; 723 /* 724 * The only other updater of hwp_req_cached in the active mode, 725 * intel_pstate_hwp_set(), is called under the same lock as this 726 * function, so it cannot run in parallel with the update below. 727 */ 728 WRITE_ONCE(cpu->hwp_req_cached, value); 729 ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 730 if (!ret) 731 cpu->epp_cached = epp; 732 733 return ret; 734 } 735 736 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, 737 int pref_index, bool use_raw, 738 u32 raw_epp) 739 { 740 int epp = -EINVAL; 741 int ret; 742 743 if (!pref_index) 744 epp = cpu_data->epp_default; 745 746 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 747 if (use_raw) 748 epp = raw_epp; 749 else if (epp == -EINVAL) 750 epp = epp_values[pref_index - 1]; 751 752 /* 753 * To avoid confusion, refuse to set EPP to any values different 754 * from 0 (performance) if the current policy is "performance", 755 * because those values would be overridden. 756 */ 757 if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 758 return -EBUSY; 759 760 ret = intel_pstate_set_epp(cpu_data, epp); 761 } else { 762 if (epp == -EINVAL) 763 epp = (pref_index - 1) << 2; 764 ret = intel_pstate_set_epb(cpu_data->cpu, epp); 765 } 766 767 return ret; 768 } 769 770 static ssize_t show_energy_performance_available_preferences( 771 struct cpufreq_policy *policy, char *buf) 772 { 773 int i = 0; 774 int ret = 0; 775 776 while (energy_perf_strings[i] != NULL) 777 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); 778 779 ret += sprintf(&buf[ret], "\n"); 780 781 return ret; 782 } 783 784 cpufreq_freq_attr_ro(energy_performance_available_preferences); 785 786 static struct cpufreq_driver intel_pstate; 787 788 static ssize_t store_energy_performance_preference( 789 struct cpufreq_policy *policy, const char *buf, size_t count) 790 { 791 struct cpudata *cpu = all_cpu_data[policy->cpu]; 792 char str_preference[21]; 793 bool raw = false; 794 ssize_t ret; 795 u32 epp = 0; 796 797 ret = sscanf(buf, "%20s", str_preference); 798 if (ret != 1) 799 return -EINVAL; 800 801 ret = match_string(energy_perf_strings, -1, str_preference); 802 if (ret < 0) { 803 if (!boot_cpu_has(X86_FEATURE_HWP_EPP)) 804 return ret; 805 806 ret = kstrtouint(buf, 10, &epp); 807 if (ret) 808 return ret; 809 810 if (epp > 255) 811 return -EINVAL; 812 813 raw = true; 814 } 815 816 /* 817 * This function runs with the policy R/W semaphore held, which 818 * guarantees that the driver pointer will not change while it is 819 * running. 820 */ 821 if (!intel_pstate_driver) 822 return -EAGAIN; 823 824 mutex_lock(&intel_pstate_limits_lock); 825 826 if (intel_pstate_driver == &intel_pstate) { 827 ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); 828 } else { 829 /* 830 * In the passive mode the governor needs to be stopped on the 831 * target CPU before the EPP update and restarted after it, 832 * which is super-heavy-weight, so make sure it is worth doing 833 * upfront. 834 */ 835 if (!raw) 836 epp = ret ? epp_values[ret - 1] : cpu->epp_default; 837 838 if (cpu->epp_cached != epp) { 839 int err; 840 841 cpufreq_stop_governor(policy); 842 ret = intel_pstate_set_epp(cpu, epp); 843 err = cpufreq_start_governor(policy); 844 if (!ret) 845 ret = err; 846 } 847 } 848 849 mutex_unlock(&intel_pstate_limits_lock); 850 851 return ret ?: count; 852 } 853 854 static ssize_t show_energy_performance_preference( 855 struct cpufreq_policy *policy, char *buf) 856 { 857 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 858 int preference, raw_epp; 859 860 preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp); 861 if (preference < 0) 862 return preference; 863 864 if (raw_epp) 865 return sprintf(buf, "%d\n", raw_epp); 866 else 867 return sprintf(buf, "%s\n", energy_perf_strings[preference]); 868 } 869 870 cpufreq_freq_attr_rw(energy_performance_preference); 871 872 static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) 873 { 874 struct cpudata *cpu = all_cpu_data[policy->cpu]; 875 int ratio, freq; 876 877 ratio = intel_pstate_get_cppc_guaranteed(policy->cpu); 878 if (ratio <= 0) { 879 u64 cap; 880 881 rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); 882 ratio = HWP_GUARANTEED_PERF(cap); 883 } 884 885 freq = ratio * cpu->pstate.scaling; 886 if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling) 887 freq = rounddown(freq, cpu->pstate.perf_ctl_scaling); 888 889 return sprintf(buf, "%d\n", freq); 890 } 891 892 cpufreq_freq_attr_ro(base_frequency); 893 894 static struct freq_attr *hwp_cpufreq_attrs[] = { 895 &energy_performance_preference, 896 &energy_performance_available_preferences, 897 &base_frequency, 898 NULL, 899 }; 900 901 static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) 902 { 903 u64 cap; 904 905 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); 906 WRITE_ONCE(cpu->hwp_cap_cached, cap); 907 cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); 908 cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); 909 } 910 911 static void intel_pstate_get_hwp_cap(struct cpudata *cpu) 912 { 913 int scaling = cpu->pstate.scaling; 914 915 __intel_pstate_get_hwp_cap(cpu); 916 917 cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling; 918 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; 919 if (scaling != cpu->pstate.perf_ctl_scaling) { 920 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 921 922 cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq, 923 perf_ctl_scaling); 924 cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq, 925 perf_ctl_scaling); 926 } 927 } 928 929 static void intel_pstate_hwp_set(unsigned int cpu) 930 { 931 struct cpudata *cpu_data = all_cpu_data[cpu]; 932 int max, min; 933 u64 value; 934 s16 epp; 935 936 max = cpu_data->max_perf_ratio; 937 min = cpu_data->min_perf_ratio; 938 939 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 940 min = max; 941 942 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 943 944 value &= ~HWP_MIN_PERF(~0L); 945 value |= HWP_MIN_PERF(min); 946 947 value &= ~HWP_MAX_PERF(~0L); 948 value |= HWP_MAX_PERF(max); 949 950 if (cpu_data->epp_policy == cpu_data->policy) 951 goto skip_epp; 952 953 cpu_data->epp_policy = cpu_data->policy; 954 955 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 956 epp = intel_pstate_get_epp(cpu_data, value); 957 cpu_data->epp_powersave = epp; 958 /* If EPP read was failed, then don't try to write */ 959 if (epp < 0) 960 goto skip_epp; 961 962 epp = 0; 963 } else { 964 /* skip setting EPP, when saved value is invalid */ 965 if (cpu_data->epp_powersave < 0) 966 goto skip_epp; 967 968 /* 969 * No need to restore EPP when it is not zero. This 970 * means: 971 * - Policy is not changed 972 * - user has manually changed 973 * - Error reading EPB 974 */ 975 epp = intel_pstate_get_epp(cpu_data, value); 976 if (epp) 977 goto skip_epp; 978 979 epp = cpu_data->epp_powersave; 980 } 981 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 982 value &= ~GENMASK_ULL(31, 24); 983 value |= (u64)epp << 24; 984 } else { 985 intel_pstate_set_epb(cpu, epp); 986 } 987 skip_epp: 988 WRITE_ONCE(cpu_data->hwp_req_cached, value); 989 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 990 } 991 992 static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata); 993 994 static void intel_pstate_hwp_offline(struct cpudata *cpu) 995 { 996 u64 value = READ_ONCE(cpu->hwp_req_cached); 997 int min_perf; 998 999 intel_pstate_disable_hwp_interrupt(cpu); 1000 1001 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 1002 /* 1003 * In case the EPP has been set to "performance" by the 1004 * active mode "performance" scaling algorithm, replace that 1005 * temporary value with the cached EPP one. 1006 */ 1007 value &= ~GENMASK_ULL(31, 24); 1008 value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); 1009 /* 1010 * However, make sure that EPP will be set to "performance" when 1011 * the CPU is brought back online again and the "performance" 1012 * scaling algorithm is still in effect. 1013 */ 1014 cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; 1015 } 1016 1017 /* 1018 * Clear the desired perf field in the cached HWP request value to 1019 * prevent nonzero desired values from being leaked into the active 1020 * mode. 1021 */ 1022 value &= ~HWP_DESIRED_PERF(~0L); 1023 WRITE_ONCE(cpu->hwp_req_cached, value); 1024 1025 value &= ~GENMASK_ULL(31, 0); 1026 min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); 1027 1028 /* Set hwp_max = hwp_min */ 1029 value |= HWP_MAX_PERF(min_perf); 1030 value |= HWP_MIN_PERF(min_perf); 1031 1032 /* Set EPP to min */ 1033 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) 1034 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); 1035 1036 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 1037 } 1038 1039 #define POWER_CTL_EE_ENABLE 1 1040 #define POWER_CTL_EE_DISABLE 2 1041 1042 static int power_ctl_ee_state; 1043 1044 static void set_power_ctl_ee_state(bool input) 1045 { 1046 u64 power_ctl; 1047 1048 mutex_lock(&intel_pstate_driver_lock); 1049 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1050 if (input) { 1051 power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); 1052 power_ctl_ee_state = POWER_CTL_EE_ENABLE; 1053 } else { 1054 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); 1055 power_ctl_ee_state = POWER_CTL_EE_DISABLE; 1056 } 1057 wrmsrl(MSR_IA32_POWER_CTL, power_ctl); 1058 mutex_unlock(&intel_pstate_driver_lock); 1059 } 1060 1061 static void intel_pstate_hwp_enable(struct cpudata *cpudata); 1062 1063 static void intel_pstate_hwp_reenable(struct cpudata *cpu) 1064 { 1065 intel_pstate_hwp_enable(cpu); 1066 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); 1067 } 1068 1069 static int intel_pstate_suspend(struct cpufreq_policy *policy) 1070 { 1071 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1072 1073 pr_debug("CPU %d suspending\n", cpu->cpu); 1074 1075 cpu->suspended = true; 1076 1077 /* disable HWP interrupt and cancel any pending work */ 1078 intel_pstate_disable_hwp_interrupt(cpu); 1079 1080 return 0; 1081 } 1082 1083 static int intel_pstate_resume(struct cpufreq_policy *policy) 1084 { 1085 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1086 1087 pr_debug("CPU %d resuming\n", cpu->cpu); 1088 1089 /* Only restore if the system default is changed */ 1090 if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) 1091 set_power_ctl_ee_state(true); 1092 else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) 1093 set_power_ctl_ee_state(false); 1094 1095 if (cpu->suspended && hwp_active) { 1096 mutex_lock(&intel_pstate_limits_lock); 1097 1098 /* Re-enable HWP, because "online" has not done that. */ 1099 intel_pstate_hwp_reenable(cpu); 1100 1101 mutex_unlock(&intel_pstate_limits_lock); 1102 } 1103 1104 cpu->suspended = false; 1105 1106 return 0; 1107 } 1108 1109 static void intel_pstate_update_policies(void) 1110 { 1111 int cpu; 1112 1113 for_each_possible_cpu(cpu) 1114 cpufreq_update_policy(cpu); 1115 } 1116 1117 static void intel_pstate_update_max_freq(unsigned int cpu) 1118 { 1119 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); 1120 struct cpudata *cpudata; 1121 1122 if (!policy) 1123 return; 1124 1125 cpudata = all_cpu_data[cpu]; 1126 policy->cpuinfo.max_freq = global.turbo_disabled_mf ? 1127 cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; 1128 1129 refresh_frequency_limits(policy); 1130 1131 cpufreq_cpu_release(policy); 1132 } 1133 1134 static void intel_pstate_update_limits(unsigned int cpu) 1135 { 1136 mutex_lock(&intel_pstate_driver_lock); 1137 1138 update_turbo_state(); 1139 /* 1140 * If turbo has been turned on or off globally, policy limits for 1141 * all CPUs need to be updated to reflect that. 1142 */ 1143 if (global.turbo_disabled_mf != global.turbo_disabled) { 1144 global.turbo_disabled_mf = global.turbo_disabled; 1145 arch_set_max_freq_ratio(global.turbo_disabled); 1146 for_each_possible_cpu(cpu) 1147 intel_pstate_update_max_freq(cpu); 1148 } else { 1149 cpufreq_update_policy(cpu); 1150 } 1151 1152 mutex_unlock(&intel_pstate_driver_lock); 1153 } 1154 1155 /************************** sysfs begin ************************/ 1156 #define show_one(file_name, object) \ 1157 static ssize_t show_##file_name \ 1158 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ 1159 { \ 1160 return sprintf(buf, "%u\n", global.object); \ 1161 } 1162 1163 static ssize_t intel_pstate_show_status(char *buf); 1164 static int intel_pstate_update_status(const char *buf, size_t size); 1165 1166 static ssize_t show_status(struct kobject *kobj, 1167 struct kobj_attribute *attr, char *buf) 1168 { 1169 ssize_t ret; 1170 1171 mutex_lock(&intel_pstate_driver_lock); 1172 ret = intel_pstate_show_status(buf); 1173 mutex_unlock(&intel_pstate_driver_lock); 1174 1175 return ret; 1176 } 1177 1178 static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, 1179 const char *buf, size_t count) 1180 { 1181 char *p = memchr(buf, '\n', count); 1182 int ret; 1183 1184 mutex_lock(&intel_pstate_driver_lock); 1185 ret = intel_pstate_update_status(buf, p ? p - buf : count); 1186 mutex_unlock(&intel_pstate_driver_lock); 1187 1188 return ret < 0 ? ret : count; 1189 } 1190 1191 static ssize_t show_turbo_pct(struct kobject *kobj, 1192 struct kobj_attribute *attr, char *buf) 1193 { 1194 struct cpudata *cpu; 1195 int total, no_turbo, turbo_pct; 1196 uint32_t turbo_fp; 1197 1198 mutex_lock(&intel_pstate_driver_lock); 1199 1200 if (!intel_pstate_driver) { 1201 mutex_unlock(&intel_pstate_driver_lock); 1202 return -EAGAIN; 1203 } 1204 1205 cpu = all_cpu_data[0]; 1206 1207 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1208 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 1209 turbo_fp = div_fp(no_turbo, total); 1210 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 1211 1212 mutex_unlock(&intel_pstate_driver_lock); 1213 1214 return sprintf(buf, "%u\n", turbo_pct); 1215 } 1216 1217 static ssize_t show_num_pstates(struct kobject *kobj, 1218 struct kobj_attribute *attr, char *buf) 1219 { 1220 struct cpudata *cpu; 1221 int total; 1222 1223 mutex_lock(&intel_pstate_driver_lock); 1224 1225 if (!intel_pstate_driver) { 1226 mutex_unlock(&intel_pstate_driver_lock); 1227 return -EAGAIN; 1228 } 1229 1230 cpu = all_cpu_data[0]; 1231 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1232 1233 mutex_unlock(&intel_pstate_driver_lock); 1234 1235 return sprintf(buf, "%u\n", total); 1236 } 1237 1238 static ssize_t show_no_turbo(struct kobject *kobj, 1239 struct kobj_attribute *attr, char *buf) 1240 { 1241 ssize_t ret; 1242 1243 mutex_lock(&intel_pstate_driver_lock); 1244 1245 if (!intel_pstate_driver) { 1246 mutex_unlock(&intel_pstate_driver_lock); 1247 return -EAGAIN; 1248 } 1249 1250 update_turbo_state(); 1251 if (global.turbo_disabled) 1252 ret = sprintf(buf, "%u\n", global.turbo_disabled); 1253 else 1254 ret = sprintf(buf, "%u\n", global.no_turbo); 1255 1256 mutex_unlock(&intel_pstate_driver_lock); 1257 1258 return ret; 1259 } 1260 1261 static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, 1262 const char *buf, size_t count) 1263 { 1264 unsigned int input; 1265 int ret; 1266 1267 ret = sscanf(buf, "%u", &input); 1268 if (ret != 1) 1269 return -EINVAL; 1270 1271 mutex_lock(&intel_pstate_driver_lock); 1272 1273 if (!intel_pstate_driver) { 1274 mutex_unlock(&intel_pstate_driver_lock); 1275 return -EAGAIN; 1276 } 1277 1278 mutex_lock(&intel_pstate_limits_lock); 1279 1280 update_turbo_state(); 1281 if (global.turbo_disabled) { 1282 pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); 1283 mutex_unlock(&intel_pstate_limits_lock); 1284 mutex_unlock(&intel_pstate_driver_lock); 1285 return -EPERM; 1286 } 1287 1288 global.no_turbo = clamp_t(int, input, 0, 1); 1289 1290 if (global.no_turbo) { 1291 struct cpudata *cpu = all_cpu_data[0]; 1292 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; 1293 1294 /* Squash the global minimum into the permitted range. */ 1295 if (global.min_perf_pct > pct) 1296 global.min_perf_pct = pct; 1297 } 1298 1299 mutex_unlock(&intel_pstate_limits_lock); 1300 1301 intel_pstate_update_policies(); 1302 1303 mutex_unlock(&intel_pstate_driver_lock); 1304 1305 return count; 1306 } 1307 1308 static void update_qos_request(enum freq_qos_req_type type) 1309 { 1310 struct freq_qos_request *req; 1311 struct cpufreq_policy *policy; 1312 int i; 1313 1314 for_each_possible_cpu(i) { 1315 struct cpudata *cpu = all_cpu_data[i]; 1316 unsigned int freq, perf_pct; 1317 1318 policy = cpufreq_cpu_get(i); 1319 if (!policy) 1320 continue; 1321 1322 req = policy->driver_data; 1323 cpufreq_cpu_put(policy); 1324 1325 if (!req) 1326 continue; 1327 1328 if (hwp_active) 1329 intel_pstate_get_hwp_cap(cpu); 1330 1331 if (type == FREQ_QOS_MIN) { 1332 perf_pct = global.min_perf_pct; 1333 } else { 1334 req++; 1335 perf_pct = global.max_perf_pct; 1336 } 1337 1338 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100); 1339 1340 if (freq_qos_update_request(req, freq) < 0) 1341 pr_warn("Failed to update freq constraint: CPU%d\n", i); 1342 } 1343 } 1344 1345 static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, 1346 const char *buf, size_t count) 1347 { 1348 unsigned int input; 1349 int ret; 1350 1351 ret = sscanf(buf, "%u", &input); 1352 if (ret != 1) 1353 return -EINVAL; 1354 1355 mutex_lock(&intel_pstate_driver_lock); 1356 1357 if (!intel_pstate_driver) { 1358 mutex_unlock(&intel_pstate_driver_lock); 1359 return -EAGAIN; 1360 } 1361 1362 mutex_lock(&intel_pstate_limits_lock); 1363 1364 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); 1365 1366 mutex_unlock(&intel_pstate_limits_lock); 1367 1368 if (intel_pstate_driver == &intel_pstate) 1369 intel_pstate_update_policies(); 1370 else 1371 update_qos_request(FREQ_QOS_MAX); 1372 1373 mutex_unlock(&intel_pstate_driver_lock); 1374 1375 return count; 1376 } 1377 1378 static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, 1379 const char *buf, size_t count) 1380 { 1381 unsigned int input; 1382 int ret; 1383 1384 ret = sscanf(buf, "%u", &input); 1385 if (ret != 1) 1386 return -EINVAL; 1387 1388 mutex_lock(&intel_pstate_driver_lock); 1389 1390 if (!intel_pstate_driver) { 1391 mutex_unlock(&intel_pstate_driver_lock); 1392 return -EAGAIN; 1393 } 1394 1395 mutex_lock(&intel_pstate_limits_lock); 1396 1397 global.min_perf_pct = clamp_t(int, input, 1398 min_perf_pct_min(), global.max_perf_pct); 1399 1400 mutex_unlock(&intel_pstate_limits_lock); 1401 1402 if (intel_pstate_driver == &intel_pstate) 1403 intel_pstate_update_policies(); 1404 else 1405 update_qos_request(FREQ_QOS_MIN); 1406 1407 mutex_unlock(&intel_pstate_driver_lock); 1408 1409 return count; 1410 } 1411 1412 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, 1413 struct kobj_attribute *attr, char *buf) 1414 { 1415 return sprintf(buf, "%u\n", hwp_boost); 1416 } 1417 1418 static ssize_t store_hwp_dynamic_boost(struct kobject *a, 1419 struct kobj_attribute *b, 1420 const char *buf, size_t count) 1421 { 1422 unsigned int input; 1423 int ret; 1424 1425 ret = kstrtouint(buf, 10, &input); 1426 if (ret) 1427 return ret; 1428 1429 mutex_lock(&intel_pstate_driver_lock); 1430 hwp_boost = !!input; 1431 intel_pstate_update_policies(); 1432 mutex_unlock(&intel_pstate_driver_lock); 1433 1434 return count; 1435 } 1436 1437 static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr, 1438 char *buf) 1439 { 1440 u64 power_ctl; 1441 int enable; 1442 1443 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1444 enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); 1445 return sprintf(buf, "%d\n", !enable); 1446 } 1447 1448 static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b, 1449 const char *buf, size_t count) 1450 { 1451 bool input; 1452 int ret; 1453 1454 ret = kstrtobool(buf, &input); 1455 if (ret) 1456 return ret; 1457 1458 set_power_ctl_ee_state(input); 1459 1460 return count; 1461 } 1462 1463 show_one(max_perf_pct, max_perf_pct); 1464 show_one(min_perf_pct, min_perf_pct); 1465 1466 define_one_global_rw(status); 1467 define_one_global_rw(no_turbo); 1468 define_one_global_rw(max_perf_pct); 1469 define_one_global_rw(min_perf_pct); 1470 define_one_global_ro(turbo_pct); 1471 define_one_global_ro(num_pstates); 1472 define_one_global_rw(hwp_dynamic_boost); 1473 define_one_global_rw(energy_efficiency); 1474 1475 static struct attribute *intel_pstate_attributes[] = { 1476 &status.attr, 1477 &no_turbo.attr, 1478 NULL 1479 }; 1480 1481 static const struct attribute_group intel_pstate_attr_group = { 1482 .attrs = intel_pstate_attributes, 1483 }; 1484 1485 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; 1486 1487 static struct kobject *intel_pstate_kobject; 1488 1489 static void __init intel_pstate_sysfs_expose_params(void) 1490 { 1491 int rc; 1492 1493 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 1494 &cpu_subsys.dev_root->kobj); 1495 if (WARN_ON(!intel_pstate_kobject)) 1496 return; 1497 1498 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 1499 if (WARN_ON(rc)) 1500 return; 1501 1502 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 1503 rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr); 1504 WARN_ON(rc); 1505 1506 rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr); 1507 WARN_ON(rc); 1508 } 1509 1510 /* 1511 * If per cpu limits are enforced there are no global limits, so 1512 * return without creating max/min_perf_pct attributes 1513 */ 1514 if (per_cpu_limits) 1515 return; 1516 1517 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 1518 WARN_ON(rc); 1519 1520 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1521 WARN_ON(rc); 1522 1523 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { 1524 rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); 1525 WARN_ON(rc); 1526 } 1527 } 1528 1529 static void __init intel_pstate_sysfs_remove(void) 1530 { 1531 if (!intel_pstate_kobject) 1532 return; 1533 1534 sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group); 1535 1536 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 1537 sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr); 1538 sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr); 1539 } 1540 1541 if (!per_cpu_limits) { 1542 sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr); 1543 sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr); 1544 1545 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) 1546 sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr); 1547 } 1548 1549 kobject_put(intel_pstate_kobject); 1550 } 1551 1552 static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) 1553 { 1554 int rc; 1555 1556 if (!hwp_active) 1557 return; 1558 1559 rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); 1560 WARN_ON_ONCE(rc); 1561 } 1562 1563 static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) 1564 { 1565 if (!hwp_active) 1566 return; 1567 1568 sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); 1569 } 1570 1571 /************************** sysfs end ************************/ 1572 1573 static void intel_pstate_notify_work(struct work_struct *work) 1574 { 1575 struct cpudata *cpudata = 1576 container_of(to_delayed_work(work), struct cpudata, hwp_notify_work); 1577 1578 cpufreq_update_policy(cpudata->cpu); 1579 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); 1580 } 1581 1582 static DEFINE_SPINLOCK(hwp_notify_lock); 1583 static cpumask_t hwp_intr_enable_mask; 1584 1585 void notify_hwp_interrupt(void) 1586 { 1587 unsigned int this_cpu = smp_processor_id(); 1588 struct cpudata *cpudata; 1589 unsigned long flags; 1590 u64 value; 1591 1592 if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1593 return; 1594 1595 rdmsrl_safe(MSR_HWP_STATUS, &value); 1596 if (!(value & 0x01)) 1597 return; 1598 1599 spin_lock_irqsave(&hwp_notify_lock, flags); 1600 1601 if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) 1602 goto ack_intr; 1603 1604 /* 1605 * Currently we never free all_cpu_data. And we can't reach here 1606 * without this allocated. But for safety for future changes, added 1607 * check. 1608 */ 1609 if (unlikely(!READ_ONCE(all_cpu_data))) 1610 goto ack_intr; 1611 1612 /* 1613 * The free is done during cleanup, when cpufreq registry is failed. 1614 * We wouldn't be here if it fails on init or switch status. But for 1615 * future changes, added check. 1616 */ 1617 cpudata = READ_ONCE(all_cpu_data[this_cpu]); 1618 if (unlikely(!cpudata)) 1619 goto ack_intr; 1620 1621 schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10)); 1622 1623 spin_unlock_irqrestore(&hwp_notify_lock, flags); 1624 1625 return; 1626 1627 ack_intr: 1628 wrmsrl_safe(MSR_HWP_STATUS, 0); 1629 spin_unlock_irqrestore(&hwp_notify_lock, flags); 1630 } 1631 1632 static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) 1633 { 1634 unsigned long flags; 1635 1636 if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1637 return; 1638 1639 /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ 1640 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1641 1642 spin_lock_irqsave(&hwp_notify_lock, flags); 1643 if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) 1644 cancel_delayed_work(&cpudata->hwp_notify_work); 1645 spin_unlock_irqrestore(&hwp_notify_lock, flags); 1646 } 1647 1648 static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) 1649 { 1650 /* Enable HWP notification interrupt for guaranteed performance change */ 1651 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { 1652 unsigned long flags; 1653 1654 spin_lock_irqsave(&hwp_notify_lock, flags); 1655 INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); 1656 cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); 1657 spin_unlock_irqrestore(&hwp_notify_lock, flags); 1658 1659 /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ 1660 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); 1661 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); 1662 } 1663 } 1664 1665 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1666 { 1667 /* First disable HWP notification interrupt till we activate again */ 1668 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1669 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1670 1671 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1672 if (cpudata->epp_default == -EINVAL) 1673 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1674 1675 intel_pstate_enable_hwp_interrupt(cpudata); 1676 } 1677 1678 static int atom_get_min_pstate(void) 1679 { 1680 u64 value; 1681 1682 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1683 return (value >> 8) & 0x7F; 1684 } 1685 1686 static int atom_get_max_pstate(void) 1687 { 1688 u64 value; 1689 1690 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1691 return (value >> 16) & 0x7F; 1692 } 1693 1694 static int atom_get_turbo_pstate(void) 1695 { 1696 u64 value; 1697 1698 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); 1699 return value & 0x7F; 1700 } 1701 1702 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1703 { 1704 u64 val; 1705 int32_t vid_fp; 1706 u32 vid; 1707 1708 val = (u64)pstate << 8; 1709 if (global.no_turbo && !global.turbo_disabled) 1710 val |= (u64)1 << 32; 1711 1712 vid_fp = cpudata->vid.min + mul_fp( 1713 int_tofp(pstate - cpudata->pstate.min_pstate), 1714 cpudata->vid.ratio); 1715 1716 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1717 vid = ceiling_fp(vid_fp); 1718 1719 if (pstate > cpudata->pstate.max_pstate) 1720 vid = cpudata->vid.turbo; 1721 1722 return val | vid; 1723 } 1724 1725 static int silvermont_get_scaling(void) 1726 { 1727 u64 value; 1728 int i; 1729 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1730 static int silvermont_freq_table[] = { 1731 83300, 100000, 133300, 116700, 80000}; 1732 1733 rdmsrl(MSR_FSB_FREQ, value); 1734 i = value & 0x7; 1735 WARN_ON(i > 4); 1736 1737 return silvermont_freq_table[i]; 1738 } 1739 1740 static int airmont_get_scaling(void) 1741 { 1742 u64 value; 1743 int i; 1744 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1745 static int airmont_freq_table[] = { 1746 83300, 100000, 133300, 116700, 80000, 1747 93300, 90000, 88900, 87500}; 1748 1749 rdmsrl(MSR_FSB_FREQ, value); 1750 i = value & 0xF; 1751 WARN_ON(i > 8); 1752 1753 return airmont_freq_table[i]; 1754 } 1755 1756 static void atom_get_vid(struct cpudata *cpudata) 1757 { 1758 u64 value; 1759 1760 rdmsrl(MSR_ATOM_CORE_VIDS, value); 1761 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1762 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1763 cpudata->vid.ratio = div_fp( 1764 cpudata->vid.max - cpudata->vid.min, 1765 int_tofp(cpudata->pstate.max_pstate - 1766 cpudata->pstate.min_pstate)); 1767 1768 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); 1769 cpudata->vid.turbo = value & 0x7f; 1770 } 1771 1772 static int core_get_min_pstate(void) 1773 { 1774 u64 value; 1775 1776 rdmsrl(MSR_PLATFORM_INFO, value); 1777 return (value >> 40) & 0xFF; 1778 } 1779 1780 static int core_get_max_pstate_physical(void) 1781 { 1782 u64 value; 1783 1784 rdmsrl(MSR_PLATFORM_INFO, value); 1785 return (value >> 8) & 0xFF; 1786 } 1787 1788 static int core_get_tdp_ratio(u64 plat_info) 1789 { 1790 /* Check how many TDP levels present */ 1791 if (plat_info & 0x600000000) { 1792 u64 tdp_ctrl; 1793 u64 tdp_ratio; 1794 int tdp_msr; 1795 int err; 1796 1797 /* Get the TDP level (0, 1, 2) to get ratios */ 1798 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1799 if (err) 1800 return err; 1801 1802 /* TDP MSR are continuous starting at 0x648 */ 1803 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); 1804 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 1805 if (err) 1806 return err; 1807 1808 /* For level 1 and 2, bits[23:16] contain the ratio */ 1809 if (tdp_ctrl & 0x03) 1810 tdp_ratio >>= 16; 1811 1812 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1813 pr_debug("tdp_ratio %x\n", (int)tdp_ratio); 1814 1815 return (int)tdp_ratio; 1816 } 1817 1818 return -ENXIO; 1819 } 1820 1821 static int core_get_max_pstate(void) 1822 { 1823 u64 tar; 1824 u64 plat_info; 1825 int max_pstate; 1826 int tdp_ratio; 1827 int err; 1828 1829 rdmsrl(MSR_PLATFORM_INFO, plat_info); 1830 max_pstate = (plat_info >> 8) & 0xFF; 1831 1832 tdp_ratio = core_get_tdp_ratio(plat_info); 1833 if (tdp_ratio <= 0) 1834 return max_pstate; 1835 1836 if (hwp_active) { 1837 /* Turbo activation ratio is not used on HWP platforms */ 1838 return tdp_ratio; 1839 } 1840 1841 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 1842 if (!err) { 1843 int tar_levels; 1844 1845 /* Do some sanity checking for safety */ 1846 tar_levels = tar & 0xff; 1847 if (tdp_ratio - 1 == tar_levels) { 1848 max_pstate = tar_levels; 1849 pr_debug("max_pstate=TAC %x\n", max_pstate); 1850 } 1851 } 1852 1853 return max_pstate; 1854 } 1855 1856 static int core_get_turbo_pstate(void) 1857 { 1858 u64 value; 1859 int nont, ret; 1860 1861 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1862 nont = core_get_max_pstate(); 1863 ret = (value) & 255; 1864 if (ret <= nont) 1865 ret = nont; 1866 return ret; 1867 } 1868 1869 static inline int core_get_scaling(void) 1870 { 1871 return 100000; 1872 } 1873 1874 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1875 { 1876 u64 val; 1877 1878 val = (u64)pstate << 8; 1879 if (global.no_turbo && !global.turbo_disabled) 1880 val |= (u64)1 << 32; 1881 1882 return val; 1883 } 1884 1885 static int knl_get_aperf_mperf_shift(void) 1886 { 1887 return 10; 1888 } 1889 1890 static int knl_get_turbo_pstate(void) 1891 { 1892 u64 value; 1893 int nont, ret; 1894 1895 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1896 nont = core_get_max_pstate(); 1897 ret = (((value) >> 8) & 0xFF); 1898 if (ret <= nont) 1899 ret = nont; 1900 return ret; 1901 } 1902 1903 #ifdef CONFIG_ACPI_CPPC_LIB 1904 static u32 hybrid_ref_perf; 1905 1906 static int hybrid_get_cpu_scaling(int cpu) 1907 { 1908 return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf, 1909 intel_pstate_cppc_nominal(cpu)); 1910 } 1911 1912 static void intel_pstate_cppc_set_cpu_scaling(void) 1913 { 1914 u32 min_nominal_perf = U32_MAX; 1915 int cpu; 1916 1917 for_each_present_cpu(cpu) { 1918 u32 nominal_perf = intel_pstate_cppc_nominal(cpu); 1919 1920 if (nominal_perf && nominal_perf < min_nominal_perf) 1921 min_nominal_perf = nominal_perf; 1922 } 1923 1924 if (min_nominal_perf < U32_MAX) { 1925 hybrid_ref_perf = min_nominal_perf; 1926 pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling; 1927 } 1928 } 1929 #else 1930 static inline void intel_pstate_cppc_set_cpu_scaling(void) 1931 { 1932 } 1933 #endif /* CONFIG_ACPI_CPPC_LIB */ 1934 1935 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1936 { 1937 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1938 cpu->pstate.current_pstate = pstate; 1939 /* 1940 * Generally, there is no guarantee that this code will always run on 1941 * the CPU being updated, so force the register update to run on the 1942 * right CPU. 1943 */ 1944 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1945 pstate_funcs.get_val(cpu, pstate)); 1946 } 1947 1948 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1949 { 1950 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1951 } 1952 1953 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1954 { 1955 int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); 1956 1957 update_turbo_state(); 1958 intel_pstate_set_pstate(cpu, pstate); 1959 } 1960 1961 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1962 { 1963 int perf_ctl_max_phys = pstate_funcs.get_max_physical(); 1964 int perf_ctl_scaling = pstate_funcs.get_scaling(); 1965 1966 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1967 cpu->pstate.max_pstate_physical = perf_ctl_max_phys; 1968 cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; 1969 1970 if (hwp_active && !hwp_mode_bdw) { 1971 __intel_pstate_get_hwp_cap(cpu); 1972 1973 if (pstate_funcs.get_cpu_scaling) { 1974 cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); 1975 if (cpu->pstate.scaling != perf_ctl_scaling) 1976 intel_pstate_hybrid_hwp_adjust(cpu); 1977 } else { 1978 cpu->pstate.scaling = perf_ctl_scaling; 1979 } 1980 } else { 1981 cpu->pstate.scaling = perf_ctl_scaling; 1982 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1983 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1984 } 1985 1986 if (cpu->pstate.scaling == perf_ctl_scaling) { 1987 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; 1988 cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling; 1989 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling; 1990 } 1991 1992 if (pstate_funcs.get_aperf_mperf_shift) 1993 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); 1994 1995 if (pstate_funcs.get_vid) 1996 pstate_funcs.get_vid(cpu); 1997 1998 intel_pstate_set_min_pstate(cpu); 1999 } 2000 2001 /* 2002 * Long hold time will keep high perf limits for long time, 2003 * which negatively impacts perf/watt for some workloads, 2004 * like specpower. 3ms is based on experiements on some 2005 * workoads. 2006 */ 2007 static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC; 2008 2009 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) 2010 { 2011 u64 hwp_req = READ_ONCE(cpu->hwp_req_cached); 2012 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); 2013 u32 max_limit = (hwp_req & 0xff00) >> 8; 2014 u32 min_limit = (hwp_req & 0xff); 2015 u32 boost_level1; 2016 2017 /* 2018 * Cases to consider (User changes via sysfs or boot time): 2019 * If, P0 (Turbo max) = P1 (Guaranteed max) = min: 2020 * No boost, return. 2021 * If, P0 (Turbo max) > P1 (Guaranteed max) = min: 2022 * Should result in one level boost only for P0. 2023 * If, P0 (Turbo max) = P1 (Guaranteed max) > min: 2024 * Should result in two level boost: 2025 * (min + p1)/2 and P1. 2026 * If, P0 (Turbo max) > P1 (Guaranteed max) > min: 2027 * Should result in three level boost: 2028 * (min + p1)/2, P1 and P0. 2029 */ 2030 2031 /* If max and min are equal or already at max, nothing to boost */ 2032 if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit) 2033 return; 2034 2035 if (!cpu->hwp_boost_min) 2036 cpu->hwp_boost_min = min_limit; 2037 2038 /* level at half way mark between min and guranteed */ 2039 boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1; 2040 2041 if (cpu->hwp_boost_min < boost_level1) 2042 cpu->hwp_boost_min = boost_level1; 2043 else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap)) 2044 cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap); 2045 else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) && 2046 max_limit != HWP_GUARANTEED_PERF(hwp_cap)) 2047 cpu->hwp_boost_min = max_limit; 2048 else 2049 return; 2050 2051 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; 2052 wrmsrl(MSR_HWP_REQUEST, hwp_req); 2053 cpu->last_update = cpu->sample.time; 2054 } 2055 2056 static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) 2057 { 2058 if (cpu->hwp_boost_min) { 2059 bool expired; 2060 2061 /* Check if we are idle for hold time to boost down */ 2062 expired = time_after64(cpu->sample.time, cpu->last_update + 2063 hwp_boost_hold_time_ns); 2064 if (expired) { 2065 wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); 2066 cpu->hwp_boost_min = 0; 2067 } 2068 } 2069 cpu->last_update = cpu->sample.time; 2070 } 2071 2072 static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu, 2073 u64 time) 2074 { 2075 cpu->sample.time = time; 2076 2077 if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) { 2078 bool do_io = false; 2079 2080 cpu->sched_flags = 0; 2081 /* 2082 * Set iowait_boost flag and update time. Since IO WAIT flag 2083 * is set all the time, we can't just conclude that there is 2084 * some IO bound activity is scheduled on this CPU with just 2085 * one occurrence. If we receive at least two in two 2086 * consecutive ticks, then we treat as boost candidate. 2087 */ 2088 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC)) 2089 do_io = true; 2090 2091 cpu->last_io_update = time; 2092 2093 if (do_io) 2094 intel_pstate_hwp_boost_up(cpu); 2095 2096 } else { 2097 intel_pstate_hwp_boost_down(cpu); 2098 } 2099 } 2100 2101 static inline void intel_pstate_update_util_hwp(struct update_util_data *data, 2102 u64 time, unsigned int flags) 2103 { 2104 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 2105 2106 cpu->sched_flags |= flags; 2107 2108 if (smp_processor_id() == cpu->cpu) 2109 intel_pstate_update_util_hwp_local(cpu, time); 2110 } 2111 2112 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 2113 { 2114 struct sample *sample = &cpu->sample; 2115 2116 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 2117 } 2118 2119 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 2120 { 2121 u64 aperf, mperf; 2122 unsigned long flags; 2123 u64 tsc; 2124 2125 local_irq_save(flags); 2126 rdmsrl(MSR_IA32_APERF, aperf); 2127 rdmsrl(MSR_IA32_MPERF, mperf); 2128 tsc = rdtsc(); 2129 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 2130 local_irq_restore(flags); 2131 return false; 2132 } 2133 local_irq_restore(flags); 2134 2135 cpu->last_sample_time = cpu->sample.time; 2136 cpu->sample.time = time; 2137 cpu->sample.aperf = aperf; 2138 cpu->sample.mperf = mperf; 2139 cpu->sample.tsc = tsc; 2140 cpu->sample.aperf -= cpu->prev_aperf; 2141 cpu->sample.mperf -= cpu->prev_mperf; 2142 cpu->sample.tsc -= cpu->prev_tsc; 2143 2144 cpu->prev_aperf = aperf; 2145 cpu->prev_mperf = mperf; 2146 cpu->prev_tsc = tsc; 2147 /* 2148 * First time this function is invoked in a given cycle, all of the 2149 * previous sample data fields are equal to zero or stale and they must 2150 * be populated with meaningful numbers for things to work, so assume 2151 * that sample.time will always be reset before setting the utilization 2152 * update hook and make the caller skip the sample then. 2153 */ 2154 if (cpu->last_sample_time) { 2155 intel_pstate_calc_avg_perf(cpu); 2156 return true; 2157 } 2158 return false; 2159 } 2160 2161 static inline int32_t get_avg_frequency(struct cpudata *cpu) 2162 { 2163 return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); 2164 } 2165 2166 static inline int32_t get_avg_pstate(struct cpudata *cpu) 2167 { 2168 return mul_ext_fp(cpu->pstate.max_pstate_physical, 2169 cpu->sample.core_avg_perf); 2170 } 2171 2172 static inline int32_t get_target_pstate(struct cpudata *cpu) 2173 { 2174 struct sample *sample = &cpu->sample; 2175 int32_t busy_frac; 2176 int target, avg_pstate; 2177 2178 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, 2179 sample->tsc); 2180 2181 if (busy_frac < cpu->iowait_boost) 2182 busy_frac = cpu->iowait_boost; 2183 2184 sample->busy_scaled = busy_frac * 100; 2185 2186 target = global.no_turbo || global.turbo_disabled ? 2187 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2188 target += target >> 2; 2189 target = mul_fp(target, busy_frac); 2190 if (target < cpu->pstate.min_pstate) 2191 target = cpu->pstate.min_pstate; 2192 2193 /* 2194 * If the average P-state during the previous cycle was higher than the 2195 * current target, add 50% of the difference to the target to reduce 2196 * possible performance oscillations and offset possible performance 2197 * loss related to moving the workload from one CPU to another within 2198 * a package/module. 2199 */ 2200 avg_pstate = get_avg_pstate(cpu); 2201 if (avg_pstate > target) 2202 target += (avg_pstate - target) >> 1; 2203 2204 return target; 2205 } 2206 2207 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 2208 { 2209 int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); 2210 int max_pstate = max(min_pstate, cpu->max_perf_ratio); 2211 2212 return clamp_t(int, pstate, min_pstate, max_pstate); 2213 } 2214 2215 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 2216 { 2217 if (pstate == cpu->pstate.current_pstate) 2218 return; 2219 2220 cpu->pstate.current_pstate = pstate; 2221 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 2222 } 2223 2224 static void intel_pstate_adjust_pstate(struct cpudata *cpu) 2225 { 2226 int from = cpu->pstate.current_pstate; 2227 struct sample *sample; 2228 int target_pstate; 2229 2230 update_turbo_state(); 2231 2232 target_pstate = get_target_pstate(cpu); 2233 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2234 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); 2235 intel_pstate_update_pstate(cpu, target_pstate); 2236 2237 sample = &cpu->sample; 2238 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 2239 fp_toint(sample->busy_scaled), 2240 from, 2241 cpu->pstate.current_pstate, 2242 sample->mperf, 2243 sample->aperf, 2244 sample->tsc, 2245 get_avg_frequency(cpu), 2246 fp_toint(cpu->iowait_boost * 100)); 2247 } 2248 2249 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 2250 unsigned int flags) 2251 { 2252 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 2253 u64 delta_ns; 2254 2255 /* Don't allow remote callbacks */ 2256 if (smp_processor_id() != cpu->cpu) 2257 return; 2258 2259 delta_ns = time - cpu->last_update; 2260 if (flags & SCHED_CPUFREQ_IOWAIT) { 2261 /* Start over if the CPU may have been idle. */ 2262 if (delta_ns > TICK_NSEC) { 2263 cpu->iowait_boost = ONE_EIGHTH_FP; 2264 } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { 2265 cpu->iowait_boost <<= 1; 2266 if (cpu->iowait_boost > int_tofp(1)) 2267 cpu->iowait_boost = int_tofp(1); 2268 } else { 2269 cpu->iowait_boost = ONE_EIGHTH_FP; 2270 } 2271 } else if (cpu->iowait_boost) { 2272 /* Clear iowait_boost if the CPU may have been idle. */ 2273 if (delta_ns > TICK_NSEC) 2274 cpu->iowait_boost = 0; 2275 else 2276 cpu->iowait_boost >>= 1; 2277 } 2278 cpu->last_update = time; 2279 delta_ns = time - cpu->sample.time; 2280 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) 2281 return; 2282 2283 if (intel_pstate_sample(cpu, time)) 2284 intel_pstate_adjust_pstate(cpu); 2285 } 2286 2287 static struct pstate_funcs core_funcs = { 2288 .get_max = core_get_max_pstate, 2289 .get_max_physical = core_get_max_pstate_physical, 2290 .get_min = core_get_min_pstate, 2291 .get_turbo = core_get_turbo_pstate, 2292 .get_scaling = core_get_scaling, 2293 .get_val = core_get_val, 2294 }; 2295 2296 static const struct pstate_funcs silvermont_funcs = { 2297 .get_max = atom_get_max_pstate, 2298 .get_max_physical = atom_get_max_pstate, 2299 .get_min = atom_get_min_pstate, 2300 .get_turbo = atom_get_turbo_pstate, 2301 .get_val = atom_get_val, 2302 .get_scaling = silvermont_get_scaling, 2303 .get_vid = atom_get_vid, 2304 }; 2305 2306 static const struct pstate_funcs airmont_funcs = { 2307 .get_max = atom_get_max_pstate, 2308 .get_max_physical = atom_get_max_pstate, 2309 .get_min = atom_get_min_pstate, 2310 .get_turbo = atom_get_turbo_pstate, 2311 .get_val = atom_get_val, 2312 .get_scaling = airmont_get_scaling, 2313 .get_vid = atom_get_vid, 2314 }; 2315 2316 static const struct pstate_funcs knl_funcs = { 2317 .get_max = core_get_max_pstate, 2318 .get_max_physical = core_get_max_pstate_physical, 2319 .get_min = core_get_min_pstate, 2320 .get_turbo = knl_get_turbo_pstate, 2321 .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, 2322 .get_scaling = core_get_scaling, 2323 .get_val = core_get_val, 2324 }; 2325 2326 #define X86_MATCH(model, policy) \ 2327 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 2328 X86_FEATURE_APERFMPERF, &policy) 2329 2330 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 2331 X86_MATCH(SANDYBRIDGE, core_funcs), 2332 X86_MATCH(SANDYBRIDGE_X, core_funcs), 2333 X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), 2334 X86_MATCH(IVYBRIDGE, core_funcs), 2335 X86_MATCH(HASWELL, core_funcs), 2336 X86_MATCH(BROADWELL, core_funcs), 2337 X86_MATCH(IVYBRIDGE_X, core_funcs), 2338 X86_MATCH(HASWELL_X, core_funcs), 2339 X86_MATCH(HASWELL_L, core_funcs), 2340 X86_MATCH(HASWELL_G, core_funcs), 2341 X86_MATCH(BROADWELL_G, core_funcs), 2342 X86_MATCH(ATOM_AIRMONT, airmont_funcs), 2343 X86_MATCH(SKYLAKE_L, core_funcs), 2344 X86_MATCH(BROADWELL_X, core_funcs), 2345 X86_MATCH(SKYLAKE, core_funcs), 2346 X86_MATCH(BROADWELL_D, core_funcs), 2347 X86_MATCH(XEON_PHI_KNL, knl_funcs), 2348 X86_MATCH(XEON_PHI_KNM, knl_funcs), 2349 X86_MATCH(ATOM_GOLDMONT, core_funcs), 2350 X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), 2351 X86_MATCH(SKYLAKE_X, core_funcs), 2352 X86_MATCH(COMETLAKE, core_funcs), 2353 X86_MATCH(ICELAKE_X, core_funcs), 2354 {} 2355 }; 2356 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 2357 2358 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 2359 X86_MATCH(BROADWELL_D, core_funcs), 2360 X86_MATCH(BROADWELL_X, core_funcs), 2361 X86_MATCH(SKYLAKE_X, core_funcs), 2362 X86_MATCH(ICELAKE_X, core_funcs), 2363 {} 2364 }; 2365 2366 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { 2367 X86_MATCH(KABYLAKE, core_funcs), 2368 {} 2369 }; 2370 2371 static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { 2372 X86_MATCH(SKYLAKE_X, core_funcs), 2373 X86_MATCH(SKYLAKE, core_funcs), 2374 {} 2375 }; 2376 2377 static int intel_pstate_init_cpu(unsigned int cpunum) 2378 { 2379 struct cpudata *cpu; 2380 2381 cpu = all_cpu_data[cpunum]; 2382 2383 if (!cpu) { 2384 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); 2385 if (!cpu) 2386 return -ENOMEM; 2387 2388 WRITE_ONCE(all_cpu_data[cpunum], cpu); 2389 2390 cpu->cpu = cpunum; 2391 2392 cpu->epp_default = -EINVAL; 2393 2394 if (hwp_active) { 2395 const struct x86_cpu_id *id; 2396 2397 intel_pstate_hwp_enable(cpu); 2398 2399 id = x86_match_cpu(intel_pstate_hwp_boost_ids); 2400 if (id && intel_pstate_acpi_pm_profile_server()) 2401 hwp_boost = true; 2402 } 2403 } else if (hwp_active) { 2404 /* 2405 * Re-enable HWP in case this happens after a resume from ACPI 2406 * S3 if the CPU was offline during the whole system/resume 2407 * cycle. 2408 */ 2409 intel_pstate_hwp_reenable(cpu); 2410 } 2411 2412 cpu->epp_powersave = -EINVAL; 2413 cpu->epp_policy = 0; 2414 2415 intel_pstate_get_cpu_pstates(cpu); 2416 2417 pr_debug("controlling: cpu %d\n", cpunum); 2418 2419 return 0; 2420 } 2421 2422 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 2423 { 2424 struct cpudata *cpu = all_cpu_data[cpu_num]; 2425 2426 if (hwp_active && !hwp_boost) 2427 return; 2428 2429 if (cpu->update_util_set) 2430 return; 2431 2432 /* Prevent intel_pstate_update_util() from using stale data. */ 2433 cpu->sample.time = 0; 2434 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 2435 (hwp_active ? 2436 intel_pstate_update_util_hwp : 2437 intel_pstate_update_util)); 2438 cpu->update_util_set = true; 2439 } 2440 2441 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 2442 { 2443 struct cpudata *cpu_data = all_cpu_data[cpu]; 2444 2445 if (!cpu_data->update_util_set) 2446 return; 2447 2448 cpufreq_remove_update_util_hook(cpu); 2449 cpu_data->update_util_set = false; 2450 synchronize_rcu(); 2451 } 2452 2453 static int intel_pstate_get_max_freq(struct cpudata *cpu) 2454 { 2455 return global.turbo_disabled || global.no_turbo ? 2456 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2457 } 2458 2459 static void intel_pstate_update_perf_limits(struct cpudata *cpu, 2460 unsigned int policy_min, 2461 unsigned int policy_max) 2462 { 2463 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 2464 int32_t max_policy_perf, min_policy_perf; 2465 2466 max_policy_perf = policy_max / perf_ctl_scaling; 2467 if (policy_max == policy_min) { 2468 min_policy_perf = max_policy_perf; 2469 } else { 2470 min_policy_perf = policy_min / perf_ctl_scaling; 2471 min_policy_perf = clamp_t(int32_t, min_policy_perf, 2472 0, max_policy_perf); 2473 } 2474 2475 /* 2476 * HWP needs some special consideration, because HWP_REQUEST uses 2477 * abstract values to represent performance rather than pure ratios. 2478 */ 2479 if (hwp_active) { 2480 intel_pstate_get_hwp_cap(cpu); 2481 2482 if (cpu->pstate.scaling != perf_ctl_scaling) { 2483 int scaling = cpu->pstate.scaling; 2484 int freq; 2485 2486 freq = max_policy_perf * perf_ctl_scaling; 2487 max_policy_perf = DIV_ROUND_UP(freq, scaling); 2488 freq = min_policy_perf * perf_ctl_scaling; 2489 min_policy_perf = DIV_ROUND_UP(freq, scaling); 2490 } 2491 } 2492 2493 pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n", 2494 cpu->cpu, min_policy_perf, max_policy_perf); 2495 2496 /* Normalize user input to [min_perf, max_perf] */ 2497 if (per_cpu_limits) { 2498 cpu->min_perf_ratio = min_policy_perf; 2499 cpu->max_perf_ratio = max_policy_perf; 2500 } else { 2501 int turbo_max = cpu->pstate.turbo_pstate; 2502 int32_t global_min, global_max; 2503 2504 /* Global limits are in percent of the maximum turbo P-state. */ 2505 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 2506 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); 2507 global_min = clamp_t(int32_t, global_min, 0, global_max); 2508 2509 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, 2510 global_min, global_max); 2511 2512 cpu->min_perf_ratio = max(min_policy_perf, global_min); 2513 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); 2514 cpu->max_perf_ratio = min(max_policy_perf, global_max); 2515 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); 2516 2517 /* Make sure min_perf <= max_perf */ 2518 cpu->min_perf_ratio = min(cpu->min_perf_ratio, 2519 cpu->max_perf_ratio); 2520 2521 } 2522 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, 2523 cpu->max_perf_ratio, 2524 cpu->min_perf_ratio); 2525 } 2526 2527 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2528 { 2529 struct cpudata *cpu; 2530 2531 if (!policy->cpuinfo.max_freq) 2532 return -ENODEV; 2533 2534 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2535 policy->cpuinfo.max_freq, policy->max); 2536 2537 cpu = all_cpu_data[policy->cpu]; 2538 cpu->policy = policy->policy; 2539 2540 mutex_lock(&intel_pstate_limits_lock); 2541 2542 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2543 2544 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2545 /* 2546 * NOHZ_FULL CPUs need this as the governor callback may not 2547 * be invoked on them. 2548 */ 2549 intel_pstate_clear_update_util_hook(policy->cpu); 2550 intel_pstate_max_within_limits(cpu); 2551 } else { 2552 intel_pstate_set_update_util_hook(policy->cpu); 2553 } 2554 2555 if (hwp_active) { 2556 /* 2557 * When hwp_boost was active before and dynamically it 2558 * was turned off, in that case we need to clear the 2559 * update util hook. 2560 */ 2561 if (!hwp_boost) 2562 intel_pstate_clear_update_util_hook(policy->cpu); 2563 intel_pstate_hwp_set(policy->cpu); 2564 } 2565 2566 mutex_unlock(&intel_pstate_limits_lock); 2567 2568 return 0; 2569 } 2570 2571 static void intel_pstate_adjust_policy_max(struct cpudata *cpu, 2572 struct cpufreq_policy_data *policy) 2573 { 2574 if (!hwp_active && 2575 cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2576 policy->max < policy->cpuinfo.max_freq && 2577 policy->max > cpu->pstate.max_freq) { 2578 pr_debug("policy->max > max non turbo frequency\n"); 2579 policy->max = policy->cpuinfo.max_freq; 2580 } 2581 } 2582 2583 static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, 2584 struct cpufreq_policy_data *policy) 2585 { 2586 int max_freq; 2587 2588 update_turbo_state(); 2589 if (hwp_active) { 2590 intel_pstate_get_hwp_cap(cpu); 2591 max_freq = global.no_turbo || global.turbo_disabled ? 2592 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2593 } else { 2594 max_freq = intel_pstate_get_max_freq(cpu); 2595 } 2596 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq); 2597 2598 intel_pstate_adjust_policy_max(cpu, policy); 2599 } 2600 2601 static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) 2602 { 2603 intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy); 2604 2605 return 0; 2606 } 2607 2608 static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy) 2609 { 2610 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2611 2612 pr_debug("CPU %d going offline\n", cpu->cpu); 2613 2614 if (cpu->suspended) 2615 return 0; 2616 2617 /* 2618 * If the CPU is an SMT thread and it goes offline with the performance 2619 * settings different from the minimum, it will prevent its sibling 2620 * from getting to lower performance levels, so force the minimum 2621 * performance on CPU offline to prevent that from happening. 2622 */ 2623 if (hwp_active) 2624 intel_pstate_hwp_offline(cpu); 2625 else 2626 intel_pstate_set_min_pstate(cpu); 2627 2628 intel_pstate_exit_perf_limits(policy); 2629 2630 return 0; 2631 } 2632 2633 static int intel_pstate_cpu_online(struct cpufreq_policy *policy) 2634 { 2635 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2636 2637 pr_debug("CPU %d going online\n", cpu->cpu); 2638 2639 intel_pstate_init_acpi_perf_limits(policy); 2640 2641 if (hwp_active) { 2642 /* 2643 * Re-enable HWP and clear the "suspended" flag to let "resume" 2644 * know that it need not do that. 2645 */ 2646 intel_pstate_hwp_reenable(cpu); 2647 cpu->suspended = false; 2648 } 2649 2650 return 0; 2651 } 2652 2653 static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) 2654 { 2655 intel_pstate_clear_update_util_hook(policy->cpu); 2656 2657 return intel_cpufreq_cpu_offline(policy); 2658 } 2659 2660 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 2661 { 2662 pr_debug("CPU %d exiting\n", policy->cpu); 2663 2664 policy->fast_switch_possible = false; 2665 2666 return 0; 2667 } 2668 2669 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 2670 { 2671 struct cpudata *cpu; 2672 int rc; 2673 2674 rc = intel_pstate_init_cpu(policy->cpu); 2675 if (rc) 2676 return rc; 2677 2678 cpu = all_cpu_data[policy->cpu]; 2679 2680 cpu->max_perf_ratio = 0xFF; 2681 cpu->min_perf_ratio = 0; 2682 2683 /* cpuinfo and default policy values */ 2684 policy->cpuinfo.min_freq = cpu->pstate.min_freq; 2685 update_turbo_state(); 2686 global.turbo_disabled_mf = global.turbo_disabled; 2687 policy->cpuinfo.max_freq = global.turbo_disabled ? 2688 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2689 2690 policy->min = policy->cpuinfo.min_freq; 2691 policy->max = policy->cpuinfo.max_freq; 2692 2693 intel_pstate_init_acpi_perf_limits(policy); 2694 2695 policy->fast_switch_possible = true; 2696 2697 return 0; 2698 } 2699 2700 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 2701 { 2702 int ret = __intel_pstate_cpu_init(policy); 2703 2704 if (ret) 2705 return ret; 2706 2707 /* 2708 * Set the policy to powersave to provide a valid fallback value in case 2709 * the default cpufreq governor is neither powersave nor performance. 2710 */ 2711 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2712 2713 if (hwp_active) { 2714 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2715 2716 cpu->epp_cached = intel_pstate_get_epp(cpu, 0); 2717 } 2718 2719 return 0; 2720 } 2721 2722 static struct cpufreq_driver intel_pstate = { 2723 .flags = CPUFREQ_CONST_LOOPS, 2724 .verify = intel_pstate_verify_policy, 2725 .setpolicy = intel_pstate_set_policy, 2726 .suspend = intel_pstate_suspend, 2727 .resume = intel_pstate_resume, 2728 .init = intel_pstate_cpu_init, 2729 .exit = intel_pstate_cpu_exit, 2730 .offline = intel_pstate_cpu_offline, 2731 .online = intel_pstate_cpu_online, 2732 .update_limits = intel_pstate_update_limits, 2733 .name = "intel_pstate", 2734 }; 2735 2736 static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) 2737 { 2738 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2739 2740 intel_pstate_verify_cpu_policy(cpu, policy); 2741 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2742 2743 return 0; 2744 } 2745 2746 /* Use of trace in passive mode: 2747 * 2748 * In passive mode the trace core_busy field (also known as the 2749 * performance field, and lablelled as such on the graphs; also known as 2750 * core_avg_perf) is not needed and so is re-assigned to indicate if the 2751 * driver call was via the normal or fast switch path. Various graphs 2752 * output from the intel_pstate_tracer.py utility that include core_busy 2753 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%, 2754 * so we use 10 to indicate the normal path through the driver, and 2755 * 90 to indicate the fast switch path through the driver. 2756 * The scaled_busy field is not used, and is set to 0. 2757 */ 2758 2759 #define INTEL_PSTATE_TRACE_TARGET 10 2760 #define INTEL_PSTATE_TRACE_FAST_SWITCH 90 2761 2762 static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate) 2763 { 2764 struct sample *sample; 2765 2766 if (!trace_pstate_sample_enabled()) 2767 return; 2768 2769 if (!intel_pstate_sample(cpu, ktime_get())) 2770 return; 2771 2772 sample = &cpu->sample; 2773 trace_pstate_sample(trace_type, 2774 0, 2775 old_pstate, 2776 cpu->pstate.current_pstate, 2777 sample->mperf, 2778 sample->aperf, 2779 sample->tsc, 2780 get_avg_frequency(cpu), 2781 fp_toint(cpu->iowait_boost * 100)); 2782 } 2783 2784 static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, 2785 u32 desired, bool fast_switch) 2786 { 2787 u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; 2788 2789 value &= ~HWP_MIN_PERF(~0L); 2790 value |= HWP_MIN_PERF(min); 2791 2792 value &= ~HWP_MAX_PERF(~0L); 2793 value |= HWP_MAX_PERF(max); 2794 2795 value &= ~HWP_DESIRED_PERF(~0L); 2796 value |= HWP_DESIRED_PERF(desired); 2797 2798 if (value == prev) 2799 return; 2800 2801 WRITE_ONCE(cpu->hwp_req_cached, value); 2802 if (fast_switch) 2803 wrmsrl(MSR_HWP_REQUEST, value); 2804 else 2805 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 2806 } 2807 2808 static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, 2809 u32 target_pstate, bool fast_switch) 2810 { 2811 if (fast_switch) 2812 wrmsrl(MSR_IA32_PERF_CTL, 2813 pstate_funcs.get_val(cpu, target_pstate)); 2814 else 2815 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 2816 pstate_funcs.get_val(cpu, target_pstate)); 2817 } 2818 2819 static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, 2820 int target_pstate, bool fast_switch) 2821 { 2822 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2823 int old_pstate = cpu->pstate.current_pstate; 2824 2825 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2826 if (hwp_active) { 2827 int max_pstate = policy->strict_target ? 2828 target_pstate : cpu->max_perf_ratio; 2829 2830 intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0, 2831 fast_switch); 2832 } else if (target_pstate != old_pstate) { 2833 intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch); 2834 } 2835 2836 cpu->pstate.current_pstate = target_pstate; 2837 2838 intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : 2839 INTEL_PSTATE_TRACE_TARGET, old_pstate); 2840 2841 return target_pstate; 2842 } 2843 2844 static int intel_cpufreq_target(struct cpufreq_policy *policy, 2845 unsigned int target_freq, 2846 unsigned int relation) 2847 { 2848 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2849 struct cpufreq_freqs freqs; 2850 int target_pstate; 2851 2852 update_turbo_state(); 2853 2854 freqs.old = policy->cur; 2855 freqs.new = target_freq; 2856 2857 cpufreq_freq_transition_begin(policy, &freqs); 2858 2859 switch (relation) { 2860 case CPUFREQ_RELATION_L: 2861 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 2862 break; 2863 case CPUFREQ_RELATION_H: 2864 target_pstate = freqs.new / cpu->pstate.scaling; 2865 break; 2866 default: 2867 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 2868 break; 2869 } 2870 2871 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false); 2872 2873 freqs.new = target_pstate * cpu->pstate.scaling; 2874 2875 cpufreq_freq_transition_end(policy, &freqs, false); 2876 2877 return 0; 2878 } 2879 2880 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 2881 unsigned int target_freq) 2882 { 2883 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2884 int target_pstate; 2885 2886 update_turbo_state(); 2887 2888 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2889 2890 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); 2891 2892 return target_pstate * cpu->pstate.scaling; 2893 } 2894 2895 static void intel_cpufreq_adjust_perf(unsigned int cpunum, 2896 unsigned long min_perf, 2897 unsigned long target_perf, 2898 unsigned long capacity) 2899 { 2900 struct cpudata *cpu = all_cpu_data[cpunum]; 2901 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); 2902 int old_pstate = cpu->pstate.current_pstate; 2903 int cap_pstate, min_pstate, max_pstate, target_pstate; 2904 2905 update_turbo_state(); 2906 cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : 2907 HWP_HIGHEST_PERF(hwp_cap); 2908 2909 /* Optimization: Avoid unnecessary divisions. */ 2910 2911 target_pstate = cap_pstate; 2912 if (target_perf < capacity) 2913 target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity); 2914 2915 min_pstate = cap_pstate; 2916 if (min_perf < capacity) 2917 min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity); 2918 2919 if (min_pstate < cpu->pstate.min_pstate) 2920 min_pstate = cpu->pstate.min_pstate; 2921 2922 if (min_pstate < cpu->min_perf_ratio) 2923 min_pstate = cpu->min_perf_ratio; 2924 2925 max_pstate = min(cap_pstate, cpu->max_perf_ratio); 2926 if (max_pstate < min_pstate) 2927 max_pstate = min_pstate; 2928 2929 target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate); 2930 2931 intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true); 2932 2933 cpu->pstate.current_pstate = target_pstate; 2934 intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); 2935 } 2936 2937 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2938 { 2939 struct freq_qos_request *req; 2940 struct cpudata *cpu; 2941 struct device *dev; 2942 int ret, freq; 2943 2944 dev = get_cpu_device(policy->cpu); 2945 if (!dev) 2946 return -ENODEV; 2947 2948 ret = __intel_pstate_cpu_init(policy); 2949 if (ret) 2950 return ret; 2951 2952 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 2953 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2954 policy->cur = policy->cpuinfo.min_freq; 2955 2956 req = kcalloc(2, sizeof(*req), GFP_KERNEL); 2957 if (!req) { 2958 ret = -ENOMEM; 2959 goto pstate_exit; 2960 } 2961 2962 cpu = all_cpu_data[policy->cpu]; 2963 2964 if (hwp_active) { 2965 u64 value; 2966 2967 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; 2968 2969 intel_pstate_get_hwp_cap(cpu); 2970 2971 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); 2972 WRITE_ONCE(cpu->hwp_req_cached, value); 2973 2974 cpu->epp_cached = intel_pstate_get_epp(cpu, value); 2975 } else { 2976 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; 2977 } 2978 2979 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100); 2980 2981 ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, 2982 freq); 2983 if (ret < 0) { 2984 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); 2985 goto free_req; 2986 } 2987 2988 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100); 2989 2990 ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, 2991 freq); 2992 if (ret < 0) { 2993 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); 2994 goto remove_min_req; 2995 } 2996 2997 policy->driver_data = req; 2998 2999 return 0; 3000 3001 remove_min_req: 3002 freq_qos_remove_request(req); 3003 free_req: 3004 kfree(req); 3005 pstate_exit: 3006 intel_pstate_exit_perf_limits(policy); 3007 3008 return ret; 3009 } 3010 3011 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) 3012 { 3013 struct freq_qos_request *req; 3014 3015 req = policy->driver_data; 3016 3017 freq_qos_remove_request(req + 1); 3018 freq_qos_remove_request(req); 3019 kfree(req); 3020 3021 return intel_pstate_cpu_exit(policy); 3022 } 3023 3024 static int intel_cpufreq_suspend(struct cpufreq_policy *policy) 3025 { 3026 intel_pstate_suspend(policy); 3027 3028 if (hwp_active) { 3029 struct cpudata *cpu = all_cpu_data[policy->cpu]; 3030 u64 value = READ_ONCE(cpu->hwp_req_cached); 3031 3032 /* 3033 * Clear the desired perf field in MSR_HWP_REQUEST in case 3034 * intel_cpufreq_adjust_perf() is in use and the last value 3035 * written by it may not be suitable. 3036 */ 3037 value &= ~HWP_DESIRED_PERF(~0L); 3038 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 3039 WRITE_ONCE(cpu->hwp_req_cached, value); 3040 } 3041 3042 return 0; 3043 } 3044 3045 static struct cpufreq_driver intel_cpufreq = { 3046 .flags = CPUFREQ_CONST_LOOPS, 3047 .verify = intel_cpufreq_verify_policy, 3048 .target = intel_cpufreq_target, 3049 .fast_switch = intel_cpufreq_fast_switch, 3050 .init = intel_cpufreq_cpu_init, 3051 .exit = intel_cpufreq_cpu_exit, 3052 .offline = intel_cpufreq_cpu_offline, 3053 .online = intel_pstate_cpu_online, 3054 .suspend = intel_cpufreq_suspend, 3055 .resume = intel_pstate_resume, 3056 .update_limits = intel_pstate_update_limits, 3057 .name = "intel_cpufreq", 3058 }; 3059 3060 static struct cpufreq_driver *default_driver; 3061 3062 static void intel_pstate_driver_cleanup(void) 3063 { 3064 unsigned int cpu; 3065 3066 cpus_read_lock(); 3067 for_each_online_cpu(cpu) { 3068 if (all_cpu_data[cpu]) { 3069 if (intel_pstate_driver == &intel_pstate) 3070 intel_pstate_clear_update_util_hook(cpu); 3071 3072 spin_lock(&hwp_notify_lock); 3073 kfree(all_cpu_data[cpu]); 3074 WRITE_ONCE(all_cpu_data[cpu], NULL); 3075 spin_unlock(&hwp_notify_lock); 3076 } 3077 } 3078 cpus_read_unlock(); 3079 3080 intel_pstate_driver = NULL; 3081 } 3082 3083 static int intel_pstate_register_driver(struct cpufreq_driver *driver) 3084 { 3085 int ret; 3086 3087 if (driver == &intel_pstate) 3088 intel_pstate_sysfs_expose_hwp_dynamic_boost(); 3089 3090 memset(&global, 0, sizeof(global)); 3091 global.max_perf_pct = 100; 3092 3093 intel_pstate_driver = driver; 3094 ret = cpufreq_register_driver(intel_pstate_driver); 3095 if (ret) { 3096 intel_pstate_driver_cleanup(); 3097 return ret; 3098 } 3099 3100 global.min_perf_pct = min_perf_pct_min(); 3101 3102 return 0; 3103 } 3104 3105 static ssize_t intel_pstate_show_status(char *buf) 3106 { 3107 if (!intel_pstate_driver) 3108 return sprintf(buf, "off\n"); 3109 3110 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 3111 "active" : "passive"); 3112 } 3113 3114 static int intel_pstate_update_status(const char *buf, size_t size) 3115 { 3116 if (size == 3 && !strncmp(buf, "off", size)) { 3117 if (!intel_pstate_driver) 3118 return -EINVAL; 3119 3120 if (hwp_active) 3121 return -EBUSY; 3122 3123 cpufreq_unregister_driver(intel_pstate_driver); 3124 intel_pstate_driver_cleanup(); 3125 return 0; 3126 } 3127 3128 if (size == 6 && !strncmp(buf, "active", size)) { 3129 if (intel_pstate_driver) { 3130 if (intel_pstate_driver == &intel_pstate) 3131 return 0; 3132 3133 cpufreq_unregister_driver(intel_pstate_driver); 3134 } 3135 3136 return intel_pstate_register_driver(&intel_pstate); 3137 } 3138 3139 if (size == 7 && !strncmp(buf, "passive", size)) { 3140 if (intel_pstate_driver) { 3141 if (intel_pstate_driver == &intel_cpufreq) 3142 return 0; 3143 3144 cpufreq_unregister_driver(intel_pstate_driver); 3145 intel_pstate_sysfs_hide_hwp_dynamic_boost(); 3146 } 3147 3148 return intel_pstate_register_driver(&intel_cpufreq); 3149 } 3150 3151 return -EINVAL; 3152 } 3153 3154 static int no_load __initdata; 3155 static int no_hwp __initdata; 3156 static int hwp_only __initdata; 3157 static unsigned int force_load __initdata; 3158 3159 static int __init intel_pstate_msrs_not_valid(void) 3160 { 3161 if (!pstate_funcs.get_max() || 3162 !pstate_funcs.get_min() || 3163 !pstate_funcs.get_turbo()) 3164 return -ENODEV; 3165 3166 return 0; 3167 } 3168 3169 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 3170 { 3171 pstate_funcs.get_max = funcs->get_max; 3172 pstate_funcs.get_max_physical = funcs->get_max_physical; 3173 pstate_funcs.get_min = funcs->get_min; 3174 pstate_funcs.get_turbo = funcs->get_turbo; 3175 pstate_funcs.get_scaling = funcs->get_scaling; 3176 pstate_funcs.get_val = funcs->get_val; 3177 pstate_funcs.get_vid = funcs->get_vid; 3178 pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; 3179 } 3180 3181 #ifdef CONFIG_ACPI 3182 3183 static bool __init intel_pstate_no_acpi_pss(void) 3184 { 3185 int i; 3186 3187 for_each_possible_cpu(i) { 3188 acpi_status status; 3189 union acpi_object *pss; 3190 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 3191 struct acpi_processor *pr = per_cpu(processors, i); 3192 3193 if (!pr) 3194 continue; 3195 3196 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 3197 if (ACPI_FAILURE(status)) 3198 continue; 3199 3200 pss = buffer.pointer; 3201 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 3202 kfree(pss); 3203 return false; 3204 } 3205 3206 kfree(pss); 3207 } 3208 3209 pr_debug("ACPI _PSS not found\n"); 3210 return true; 3211 } 3212 3213 static bool __init intel_pstate_no_acpi_pcch(void) 3214 { 3215 acpi_status status; 3216 acpi_handle handle; 3217 3218 status = acpi_get_handle(NULL, "\\_SB", &handle); 3219 if (ACPI_FAILURE(status)) 3220 goto not_found; 3221 3222 if (acpi_has_method(handle, "PCCH")) 3223 return false; 3224 3225 not_found: 3226 pr_debug("ACPI PCCH not found\n"); 3227 return true; 3228 } 3229 3230 static bool __init intel_pstate_has_acpi_ppc(void) 3231 { 3232 int i; 3233 3234 for_each_possible_cpu(i) { 3235 struct acpi_processor *pr = per_cpu(processors, i); 3236 3237 if (!pr) 3238 continue; 3239 if (acpi_has_method(pr->handle, "_PPC")) 3240 return true; 3241 } 3242 pr_debug("ACPI _PPC not found\n"); 3243 return false; 3244 } 3245 3246 enum { 3247 PSS, 3248 PPC, 3249 }; 3250 3251 /* Hardware vendor-specific info that has its own power management modes */ 3252 static struct acpi_platform_list plat_info[] __initdata = { 3253 {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS}, 3254 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3255 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3256 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3257 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3258 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3259 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3260 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3261 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3262 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3263 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3264 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3265 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3266 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3267 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3268 { } /* End */ 3269 }; 3270 3271 #define BITMASK_OOB (BIT(8) | BIT(18)) 3272 3273 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 3274 { 3275 const struct x86_cpu_id *id; 3276 u64 misc_pwr; 3277 int idx; 3278 3279 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 3280 if (id) { 3281 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 3282 if (misc_pwr & BITMASK_OOB) { 3283 pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); 3284 pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); 3285 return true; 3286 } 3287 } 3288 3289 idx = acpi_match_platform_list(plat_info); 3290 if (idx < 0) 3291 return false; 3292 3293 switch (plat_info[idx].data) { 3294 case PSS: 3295 if (!intel_pstate_no_acpi_pss()) 3296 return false; 3297 3298 return intel_pstate_no_acpi_pcch(); 3299 case PPC: 3300 return intel_pstate_has_acpi_ppc() && !force_load; 3301 } 3302 3303 return false; 3304 } 3305 3306 static void intel_pstate_request_control_from_smm(void) 3307 { 3308 /* 3309 * It may be unsafe to request P-states control from SMM if _PPC support 3310 * has not been enabled. 3311 */ 3312 if (acpi_ppc) 3313 acpi_processor_pstate_control(); 3314 } 3315 #else /* CONFIG_ACPI not enabled */ 3316 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 3317 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 3318 static inline void intel_pstate_request_control_from_smm(void) {} 3319 #endif /* CONFIG_ACPI */ 3320 3321 #define INTEL_PSTATE_HWP_BROADWELL 0x01 3322 3323 #define X86_MATCH_HWP(model, hwp_mode) \ 3324 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 3325 X86_FEATURE_HWP, hwp_mode) 3326 3327 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 3328 X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), 3329 X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), 3330 X86_MATCH_HWP(ANY, 0), 3331 {} 3332 }; 3333 3334 static bool intel_pstate_hwp_is_enabled(void) 3335 { 3336 u64 value; 3337 3338 rdmsrl(MSR_PM_ENABLE, value); 3339 return !!(value & 0x1); 3340 } 3341 3342 static int __init intel_pstate_init(void) 3343 { 3344 static struct cpudata **_all_cpu_data; 3345 const struct x86_cpu_id *id; 3346 int rc; 3347 3348 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 3349 return -ENODEV; 3350 3351 id = x86_match_cpu(hwp_support_ids); 3352 if (id) { 3353 bool hwp_forced = intel_pstate_hwp_is_enabled(); 3354 3355 if (hwp_forced) 3356 pr_info("HWP enabled by BIOS\n"); 3357 else if (no_load) 3358 return -ENODEV; 3359 3360 copy_cpu_funcs(&core_funcs); 3361 /* 3362 * Avoid enabling HWP for processors without EPP support, 3363 * because that means incomplete HWP implementation which is a 3364 * corner case and supporting it is generally problematic. 3365 * 3366 * If HWP is enabled already, though, there is no choice but to 3367 * deal with it. 3368 */ 3369 if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { 3370 WRITE_ONCE(hwp_active, 1); 3371 hwp_mode_bdw = id->driver_data; 3372 intel_pstate.attr = hwp_cpufreq_attrs; 3373 intel_cpufreq.attr = hwp_cpufreq_attrs; 3374 intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS; 3375 intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf; 3376 if (!default_driver) 3377 default_driver = &intel_pstate; 3378 3379 if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) 3380 intel_pstate_cppc_set_cpu_scaling(); 3381 3382 goto hwp_cpu_matched; 3383 } 3384 pr_info("HWP not enabled\n"); 3385 } else { 3386 if (no_load) 3387 return -ENODEV; 3388 3389 id = x86_match_cpu(intel_pstate_cpu_ids); 3390 if (!id) { 3391 pr_info("CPU model not supported\n"); 3392 return -ENODEV; 3393 } 3394 3395 copy_cpu_funcs((struct pstate_funcs *)id->driver_data); 3396 } 3397 3398 if (intel_pstate_msrs_not_valid()) { 3399 pr_info("Invalid MSRs\n"); 3400 return -ENODEV; 3401 } 3402 /* Without HWP start in the passive mode. */ 3403 if (!default_driver) 3404 default_driver = &intel_cpufreq; 3405 3406 hwp_cpu_matched: 3407 /* 3408 * The Intel pstate driver will be ignored if the platform 3409 * firmware has its own power management modes. 3410 */ 3411 if (intel_pstate_platform_pwr_mgmt_exists()) { 3412 pr_info("P-states controlled by the platform\n"); 3413 return -ENODEV; 3414 } 3415 3416 if (!hwp_active && hwp_only) 3417 return -ENOTSUPP; 3418 3419 pr_info("Intel P-state driver initializing\n"); 3420 3421 _all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); 3422 if (!_all_cpu_data) 3423 return -ENOMEM; 3424 3425 WRITE_ONCE(all_cpu_data, _all_cpu_data); 3426 3427 intel_pstate_request_control_from_smm(); 3428 3429 intel_pstate_sysfs_expose_params(); 3430 3431 mutex_lock(&intel_pstate_driver_lock); 3432 rc = intel_pstate_register_driver(default_driver); 3433 mutex_unlock(&intel_pstate_driver_lock); 3434 if (rc) { 3435 intel_pstate_sysfs_remove(); 3436 return rc; 3437 } 3438 3439 if (hwp_active) { 3440 const struct x86_cpu_id *id; 3441 3442 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); 3443 if (id) { 3444 set_power_ctl_ee_state(false); 3445 pr_info("Disabling energy efficiency optimization\n"); 3446 } 3447 3448 pr_info("HWP enabled\n"); 3449 } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 3450 pr_warn("Problematic setup: Hybrid processor with disabled HWP\n"); 3451 } 3452 3453 return 0; 3454 } 3455 device_initcall(intel_pstate_init); 3456 3457 static int __init intel_pstate_setup(char *str) 3458 { 3459 if (!str) 3460 return -EINVAL; 3461 3462 if (!strcmp(str, "disable")) 3463 no_load = 1; 3464 else if (!strcmp(str, "active")) 3465 default_driver = &intel_pstate; 3466 else if (!strcmp(str, "passive")) 3467 default_driver = &intel_cpufreq; 3468 3469 if (!strcmp(str, "no_hwp")) 3470 no_hwp = 1; 3471 3472 if (!strcmp(str, "force")) 3473 force_load = 1; 3474 if (!strcmp(str, "hwp_only")) 3475 hwp_only = 1; 3476 if (!strcmp(str, "per_cpu_perf_limits")) 3477 per_cpu_limits = true; 3478 3479 #ifdef CONFIG_ACPI 3480 if (!strcmp(str, "support_acpi_ppc")) 3481 acpi_ppc = true; 3482 #endif 3483 3484 return 0; 3485 } 3486 early_param("intel_pstate", intel_pstate_setup); 3487 3488 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 3489 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 3490 MODULE_LICENSE("GPL"); 3491