1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pstate.c: Native P state management for Intel processors 4 * 5 * (C) Copyright 2012 Intel Corporation 6 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/module.h> 14 #include <linux/ktime.h> 15 #include <linux/hrtimer.h> 16 #include <linux/tick.h> 17 #include <linux/slab.h> 18 #include <linux/sched/cpufreq.h> 19 #include <linux/list.h> 20 #include <linux/cpu.h> 21 #include <linux/cpufreq.h> 22 #include <linux/sysfs.h> 23 #include <linux/types.h> 24 #include <linux/fs.h> 25 #include <linux/acpi.h> 26 #include <linux/vmalloc.h> 27 #include <linux/pm_qos.h> 28 #include <trace/events/power.h> 29 30 #include <asm/div64.h> 31 #include <asm/msr.h> 32 #include <asm/cpu_device_id.h> 33 #include <asm/cpufeature.h> 34 #include <asm/intel-family.h> 35 #include "../drivers/thermal/intel/thermal_interrupt.h" 36 37 #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) 38 39 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 40 #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 41 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 42 43 #ifdef CONFIG_ACPI 44 #include <acpi/processor.h> 45 #include <acpi/cppc_acpi.h> 46 #endif 47 48 #define FRAC_BITS 8 49 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 50 #define fp_toint(X) ((X) >> FRAC_BITS) 51 52 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) 53 54 #define EXT_BITS 6 55 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 56 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 57 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 58 59 static inline int32_t mul_fp(int32_t x, int32_t y) 60 { 61 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 62 } 63 64 static inline int32_t div_fp(s64 x, s64 y) 65 { 66 return div64_s64((int64_t)x << FRAC_BITS, y); 67 } 68 69 static inline int ceiling_fp(int32_t x) 70 { 71 int mask, ret; 72 73 ret = fp_toint(x); 74 mask = (1 << FRAC_BITS) - 1; 75 if (x & mask) 76 ret += 1; 77 return ret; 78 } 79 80 static inline u64 mul_ext_fp(u64 x, u64 y) 81 { 82 return (x * y) >> EXT_FRAC_BITS; 83 } 84 85 static inline u64 div_ext_fp(u64 x, u64 y) 86 { 87 return div64_u64(x << EXT_FRAC_BITS, y); 88 } 89 90 /** 91 * struct sample - Store performance sample 92 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 93 * performance during last sample period 94 * @busy_scaled: Scaled busy value which is used to calculate next 95 * P state. This can be different than core_avg_perf 96 * to account for cpu idle period 97 * @aperf: Difference of actual performance frequency clock count 98 * read from APERF MSR between last and current sample 99 * @mperf: Difference of maximum performance frequency clock count 100 * read from MPERF MSR between last and current sample 101 * @tsc: Difference of time stamp counter between last and 102 * current sample 103 * @time: Current time from scheduler 104 * 105 * This structure is used in the cpudata structure to store performance sample 106 * data for choosing next P State. 107 */ 108 struct sample { 109 int32_t core_avg_perf; 110 int32_t busy_scaled; 111 u64 aperf; 112 u64 mperf; 113 u64 tsc; 114 u64 time; 115 }; 116 117 /** 118 * struct pstate_data - Store P state data 119 * @current_pstate: Current requested P state 120 * @min_pstate: Min P state possible for this platform 121 * @max_pstate: Max P state possible for this platform 122 * @max_pstate_physical:This is physical Max P state for a processor 123 * This can be higher than the max_pstate which can 124 * be limited by platform thermal design power limits 125 * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor 126 * @scaling: Scaling factor between performance and frequency 127 * @turbo_pstate: Max Turbo P state possible for this platform 128 * @min_freq: @min_pstate frequency in cpufreq units 129 * @max_freq: @max_pstate frequency in cpufreq units 130 * @turbo_freq: @turbo_pstate frequency in cpufreq units 131 * 132 * Stores the per cpu model P state limits and current P state. 133 */ 134 struct pstate_data { 135 int current_pstate; 136 int min_pstate; 137 int max_pstate; 138 int max_pstate_physical; 139 int perf_ctl_scaling; 140 int scaling; 141 int turbo_pstate; 142 unsigned int min_freq; 143 unsigned int max_freq; 144 unsigned int turbo_freq; 145 }; 146 147 /** 148 * struct vid_data - Stores voltage information data 149 * @min: VID data for this platform corresponding to 150 * the lowest P state 151 * @max: VID data corresponding to the highest P State. 152 * @turbo: VID data for turbo P state 153 * @ratio: Ratio of (vid max - vid min) / 154 * (max P state - Min P State) 155 * 156 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 157 * This data is used in Atom platforms, where in addition to target P state, 158 * the voltage data needs to be specified to select next P State. 159 */ 160 struct vid_data { 161 int min; 162 int max; 163 int turbo; 164 int32_t ratio; 165 }; 166 167 /** 168 * struct global_params - Global parameters, mostly tunable via sysfs. 169 * @no_turbo: Whether or not to use turbo P-states. 170 * @turbo_disabled: Whether or not turbo P-states are available at all, 171 * based on the MSR_IA32_MISC_ENABLE value and whether or 172 * not the maximum reported turbo P-state is different from 173 * the maximum reported non-turbo one. 174 * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. 175 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo 176 * P-state capacity. 177 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo 178 * P-state capacity. 179 */ 180 struct global_params { 181 bool no_turbo; 182 bool turbo_disabled; 183 bool turbo_disabled_mf; 184 int max_perf_pct; 185 int min_perf_pct; 186 }; 187 188 /** 189 * struct cpudata - Per CPU instance data storage 190 * @cpu: CPU number for this instance data 191 * @policy: CPUFreq policy value 192 * @update_util: CPUFreq utility callback information 193 * @update_util_set: CPUFreq utility callback is set 194 * @iowait_boost: iowait-related boost fraction 195 * @last_update: Time of the last update. 196 * @pstate: Stores P state limits for this CPU 197 * @vid: Stores VID limits for this CPU 198 * @last_sample_time: Last Sample time 199 * @aperf_mperf_shift: APERF vs MPERF counting frequency difference 200 * @prev_aperf: Last APERF value read from APERF MSR 201 * @prev_mperf: Last MPERF value read from MPERF MSR 202 * @prev_tsc: Last timestamp counter (TSC) value 203 * @prev_cummulative_iowait: IO Wait time difference from last and 204 * current sample 205 * @sample: Storage for storing last Sample data 206 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios 207 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios 208 * @acpi_perf_data: Stores ACPI perf information read from _PSS 209 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 210 * @epp_powersave: Last saved HWP energy performance preference 211 * (EPP) or energy performance bias (EPB), 212 * when policy switched to performance 213 * @epp_policy: Last saved policy used to set EPP/EPB 214 * @epp_default: Power on default HWP energy performance 215 * preference/bias 216 * @epp_cached Cached HWP energy-performance preference value 217 * @hwp_req_cached: Cached value of the last HWP Request MSR 218 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR 219 * @last_io_update: Last time when IO wake flag was set 220 * @sched_flags: Store scheduler flags for possible cross CPU update 221 * @hwp_boost_min: Last HWP boosted min performance 222 * @suspended: Whether or not the driver has been suspended. 223 * @hwp_notify_work: workqueue for HWP notifications. 224 * 225 * This structure stores per CPU instance data for all CPUs. 226 */ 227 struct cpudata { 228 int cpu; 229 230 unsigned int policy; 231 struct update_util_data update_util; 232 bool update_util_set; 233 234 struct pstate_data pstate; 235 struct vid_data vid; 236 237 u64 last_update; 238 u64 last_sample_time; 239 u64 aperf_mperf_shift; 240 u64 prev_aperf; 241 u64 prev_mperf; 242 u64 prev_tsc; 243 u64 prev_cummulative_iowait; 244 struct sample sample; 245 int32_t min_perf_ratio; 246 int32_t max_perf_ratio; 247 #ifdef CONFIG_ACPI 248 struct acpi_processor_performance acpi_perf_data; 249 bool valid_pss_table; 250 #endif 251 unsigned int iowait_boost; 252 s16 epp_powersave; 253 s16 epp_policy; 254 s16 epp_default; 255 s16 epp_cached; 256 u64 hwp_req_cached; 257 u64 hwp_cap_cached; 258 u64 last_io_update; 259 unsigned int sched_flags; 260 u32 hwp_boost_min; 261 bool suspended; 262 struct delayed_work hwp_notify_work; 263 }; 264 265 static struct cpudata **all_cpu_data; 266 267 /** 268 * struct pstate_funcs - Per CPU model specific callbacks 269 * @get_max: Callback to get maximum non turbo effective P state 270 * @get_max_physical: Callback to get maximum non turbo physical P state 271 * @get_min: Callback to get minimum P state 272 * @get_turbo: Callback to get turbo P state 273 * @get_scaling: Callback to get frequency scaling factor 274 * @get_cpu_scaling: Get frequency scaling factor for a given cpu 275 * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference 276 * @get_val: Callback to convert P state to actual MSR write value 277 * @get_vid: Callback to get VID data for Atom platforms 278 * 279 * Core and Atom CPU models have different way to get P State limits. This 280 * structure is used to store those callbacks. 281 */ 282 struct pstate_funcs { 283 int (*get_max)(void); 284 int (*get_max_physical)(void); 285 int (*get_min)(void); 286 int (*get_turbo)(void); 287 int (*get_scaling)(void); 288 int (*get_cpu_scaling)(int cpu); 289 int (*get_aperf_mperf_shift)(void); 290 u64 (*get_val)(struct cpudata*, int pstate); 291 void (*get_vid)(struct cpudata *); 292 }; 293 294 static struct pstate_funcs pstate_funcs __read_mostly; 295 296 static int hwp_active __read_mostly; 297 static int hwp_mode_bdw __read_mostly; 298 static bool per_cpu_limits __read_mostly; 299 static bool hwp_boost __read_mostly; 300 301 static struct cpufreq_driver *intel_pstate_driver __read_mostly; 302 303 #ifdef CONFIG_ACPI 304 static bool acpi_ppc; 305 #endif 306 307 static struct global_params global; 308 309 static DEFINE_MUTEX(intel_pstate_driver_lock); 310 static DEFINE_MUTEX(intel_pstate_limits_lock); 311 312 #ifdef CONFIG_ACPI 313 314 static bool intel_pstate_acpi_pm_profile_server(void) 315 { 316 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 317 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 318 return true; 319 320 return false; 321 } 322 323 static bool intel_pstate_get_ppc_enable_status(void) 324 { 325 if (intel_pstate_acpi_pm_profile_server()) 326 return true; 327 328 return acpi_ppc; 329 } 330 331 #ifdef CONFIG_ACPI_CPPC_LIB 332 333 /* The work item is needed to avoid CPU hotplug locking issues */ 334 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) 335 { 336 sched_set_itmt_support(); 337 } 338 339 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); 340 341 #define CPPC_MAX_PERF U8_MAX 342 343 static void intel_pstate_set_itmt_prio(int cpu) 344 { 345 struct cppc_perf_caps cppc_perf; 346 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; 347 int ret; 348 349 ret = cppc_get_perf_caps(cpu, &cppc_perf); 350 if (ret) 351 return; 352 353 /* 354 * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. 355 * In this case we can't use CPPC.highest_perf to enable ITMT. 356 * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. 357 */ 358 if (cppc_perf.highest_perf == CPPC_MAX_PERF) 359 cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); 360 361 /* 362 * The priorities can be set regardless of whether or not 363 * sched_set_itmt_support(true) has been called and it is valid to 364 * update them at any time after it has been called. 365 */ 366 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); 367 368 if (max_highest_perf <= min_highest_perf) { 369 if (cppc_perf.highest_perf > max_highest_perf) 370 max_highest_perf = cppc_perf.highest_perf; 371 372 if (cppc_perf.highest_perf < min_highest_perf) 373 min_highest_perf = cppc_perf.highest_perf; 374 375 if (max_highest_perf > min_highest_perf) { 376 /* 377 * This code can be run during CPU online under the 378 * CPU hotplug locks, so sched_set_itmt_support() 379 * cannot be called from here. Queue up a work item 380 * to invoke it. 381 */ 382 schedule_work(&sched_itmt_work); 383 } 384 } 385 } 386 387 static int intel_pstate_get_cppc_guaranteed(int cpu) 388 { 389 struct cppc_perf_caps cppc_perf; 390 int ret; 391 392 ret = cppc_get_perf_caps(cpu, &cppc_perf); 393 if (ret) 394 return ret; 395 396 if (cppc_perf.guaranteed_perf) 397 return cppc_perf.guaranteed_perf; 398 399 return cppc_perf.nominal_perf; 400 } 401 402 static u32 intel_pstate_cppc_nominal(int cpu) 403 { 404 u64 nominal_perf; 405 406 if (cppc_get_nominal_perf(cpu, &nominal_perf)) 407 return 0; 408 409 return nominal_perf; 410 } 411 #else /* CONFIG_ACPI_CPPC_LIB */ 412 static inline void intel_pstate_set_itmt_prio(int cpu) 413 { 414 } 415 #endif /* CONFIG_ACPI_CPPC_LIB */ 416 417 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 418 { 419 struct cpudata *cpu; 420 int ret; 421 int i; 422 423 if (hwp_active) { 424 intel_pstate_set_itmt_prio(policy->cpu); 425 return; 426 } 427 428 if (!intel_pstate_get_ppc_enable_status()) 429 return; 430 431 cpu = all_cpu_data[policy->cpu]; 432 433 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 434 policy->cpu); 435 if (ret) 436 return; 437 438 /* 439 * Check if the control value in _PSS is for PERF_CTL MSR, which should 440 * guarantee that the states returned by it map to the states in our 441 * list directly. 442 */ 443 if (cpu->acpi_perf_data.control_register.space_id != 444 ACPI_ADR_SPACE_FIXED_HARDWARE) 445 goto err; 446 447 /* 448 * If there is only one entry _PSS, simply ignore _PSS and continue as 449 * usual without taking _PSS into account 450 */ 451 if (cpu->acpi_perf_data.state_count < 2) 452 goto err; 453 454 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 455 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 456 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 457 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 458 (u32) cpu->acpi_perf_data.states[i].core_frequency, 459 (u32) cpu->acpi_perf_data.states[i].power, 460 (u32) cpu->acpi_perf_data.states[i].control); 461 } 462 463 /* 464 * The _PSS table doesn't contain whole turbo frequency range. 465 * This just contains +1 MHZ above the max non turbo frequency, 466 * with control value corresponding to max turbo ratio. But 467 * when cpufreq set policy is called, it will call with this 468 * max frequency, which will cause a reduced performance as 469 * this driver uses real max turbo frequency as the max 470 * frequency. So correct this frequency in _PSS table to 471 * correct max turbo frequency based on the turbo state. 472 * Also need to convert to MHz as _PSS freq is in MHz. 473 */ 474 if (!global.turbo_disabled) 475 cpu->acpi_perf_data.states[0].core_frequency = 476 policy->cpuinfo.max_freq / 1000; 477 cpu->valid_pss_table = true; 478 pr_debug("_PPC limits will be enforced\n"); 479 480 return; 481 482 err: 483 cpu->valid_pss_table = false; 484 acpi_processor_unregister_performance(policy->cpu); 485 } 486 487 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 488 { 489 struct cpudata *cpu; 490 491 cpu = all_cpu_data[policy->cpu]; 492 if (!cpu->valid_pss_table) 493 return; 494 495 acpi_processor_unregister_performance(policy->cpu); 496 } 497 #else /* CONFIG_ACPI */ 498 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 499 { 500 } 501 502 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 503 { 504 } 505 506 static inline bool intel_pstate_acpi_pm_profile_server(void) 507 { 508 return false; 509 } 510 #endif /* CONFIG_ACPI */ 511 512 #ifndef CONFIG_ACPI_CPPC_LIB 513 static inline int intel_pstate_get_cppc_guaranteed(int cpu) 514 { 515 return -ENOTSUPP; 516 } 517 #endif /* CONFIG_ACPI_CPPC_LIB */ 518 519 /** 520 * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels. 521 * @cpu: Target CPU. 522 * 523 * On hybrid processors, HWP may expose more performance levels than there are 524 * P-states accessible through the PERF_CTL interface. If that happens, the 525 * scaling factor between HWP performance levels and CPU frequency will be less 526 * than the scaling factor between P-state values and CPU frequency. 527 * 528 * In that case, adjust the CPU parameters used in computations accordingly. 529 */ 530 static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) 531 { 532 int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; 533 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 534 int perf_ctl_turbo = pstate_funcs.get_turbo(); 535 int turbo_freq = perf_ctl_turbo * perf_ctl_scaling; 536 int scaling = cpu->pstate.scaling; 537 538 pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); 539 pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max()); 540 pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); 541 pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); 542 pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); 543 pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); 544 pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); 545 546 /* 547 * If the product of the HWP performance scaling factor and the HWP_CAP 548 * highest performance is greater than the maximum turbo frequency 549 * corresponding to the pstate_funcs.get_turbo() return value, the 550 * scaling factor is too high, so recompute it to make the HWP_CAP 551 * highest performance correspond to the maximum turbo frequency. 552 */ 553 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; 554 if (turbo_freq < cpu->pstate.turbo_freq) { 555 cpu->pstate.turbo_freq = turbo_freq; 556 scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate); 557 cpu->pstate.scaling = scaling; 558 559 pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n", 560 cpu->cpu, scaling); 561 } 562 563 cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, 564 perf_ctl_scaling); 565 566 cpu->pstate.max_pstate_physical = 567 DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling, 568 scaling); 569 570 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; 571 /* 572 * Cast the min P-state value retrieved via pstate_funcs.get_min() to 573 * the effective range of HWP performance levels. 574 */ 575 cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling); 576 } 577 578 static inline void update_turbo_state(void) 579 { 580 u64 misc_en; 581 struct cpudata *cpu; 582 583 cpu = all_cpu_data[0]; 584 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 585 global.turbo_disabled = 586 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 587 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 588 } 589 590 static int min_perf_pct_min(void) 591 { 592 struct cpudata *cpu = all_cpu_data[0]; 593 int turbo_pstate = cpu->pstate.turbo_pstate; 594 595 return turbo_pstate ? 596 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; 597 } 598 599 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 600 { 601 u64 epb; 602 int ret; 603 604 if (!boot_cpu_has(X86_FEATURE_EPB)) 605 return -ENXIO; 606 607 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 608 if (ret) 609 return (s16)ret; 610 611 return (s16)(epb & 0x0f); 612 } 613 614 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 615 { 616 s16 epp; 617 618 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 619 /* 620 * When hwp_req_data is 0, means that caller didn't read 621 * MSR_HWP_REQUEST, so need to read and get EPP. 622 */ 623 if (!hwp_req_data) { 624 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, 625 &hwp_req_data); 626 if (epp) 627 return epp; 628 } 629 epp = (hwp_req_data >> 24) & 0xff; 630 } else { 631 /* When there is no EPP present, HWP uses EPB settings */ 632 epp = intel_pstate_get_epb(cpu_data); 633 } 634 635 return epp; 636 } 637 638 static int intel_pstate_set_epb(int cpu, s16 pref) 639 { 640 u64 epb; 641 int ret; 642 643 if (!boot_cpu_has(X86_FEATURE_EPB)) 644 return -ENXIO; 645 646 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 647 if (ret) 648 return ret; 649 650 epb = (epb & ~0x0f) | pref; 651 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 652 653 return 0; 654 } 655 656 /* 657 * EPP/EPB display strings corresponding to EPP index in the 658 * energy_perf_strings[] 659 * index String 660 *------------------------------------- 661 * 0 default 662 * 1 performance 663 * 2 balance_performance 664 * 3 balance_power 665 * 4 power 666 */ 667 668 enum energy_perf_value_index { 669 EPP_INDEX_DEFAULT = 0, 670 EPP_INDEX_PERFORMANCE, 671 EPP_INDEX_BALANCE_PERFORMANCE, 672 EPP_INDEX_BALANCE_POWERSAVE, 673 EPP_INDEX_POWERSAVE, 674 }; 675 676 static const char * const energy_perf_strings[] = { 677 [EPP_INDEX_DEFAULT] = "default", 678 [EPP_INDEX_PERFORMANCE] = "performance", 679 [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance", 680 [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power", 681 [EPP_INDEX_POWERSAVE] = "power", 682 NULL 683 }; 684 static unsigned int epp_values[] = { 685 [EPP_INDEX_DEFAULT] = 0, /* Unused index */ 686 [EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE, 687 [EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE, 688 [EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE, 689 [EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE, 690 }; 691 692 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) 693 { 694 s16 epp; 695 int index = -EINVAL; 696 697 *raw_epp = 0; 698 epp = intel_pstate_get_epp(cpu_data, 0); 699 if (epp < 0) 700 return epp; 701 702 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 703 if (epp == epp_values[EPP_INDEX_PERFORMANCE]) 704 return EPP_INDEX_PERFORMANCE; 705 if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE]) 706 return EPP_INDEX_BALANCE_PERFORMANCE; 707 if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE]) 708 return EPP_INDEX_BALANCE_POWERSAVE; 709 if (epp == epp_values[EPP_INDEX_POWERSAVE]) 710 return EPP_INDEX_POWERSAVE; 711 *raw_epp = epp; 712 return 0; 713 } else if (boot_cpu_has(X86_FEATURE_EPB)) { 714 /* 715 * Range: 716 * 0x00-0x03 : Performance 717 * 0x04-0x07 : Balance performance 718 * 0x08-0x0B : Balance power 719 * 0x0C-0x0F : Power 720 * The EPB is a 4 bit value, but our ranges restrict the 721 * value which can be set. Here only using top two bits 722 * effectively. 723 */ 724 index = (epp >> 2) + 1; 725 } 726 727 return index; 728 } 729 730 static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) 731 { 732 int ret; 733 734 /* 735 * Use the cached HWP Request MSR value, because in the active mode the 736 * register itself may be updated by intel_pstate_hwp_boost_up() or 737 * intel_pstate_hwp_boost_down() at any time. 738 */ 739 u64 value = READ_ONCE(cpu->hwp_req_cached); 740 741 value &= ~GENMASK_ULL(31, 24); 742 value |= (u64)epp << 24; 743 /* 744 * The only other updater of hwp_req_cached in the active mode, 745 * intel_pstate_hwp_set(), is called under the same lock as this 746 * function, so it cannot run in parallel with the update below. 747 */ 748 WRITE_ONCE(cpu->hwp_req_cached, value); 749 ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 750 if (!ret) 751 cpu->epp_cached = epp; 752 753 return ret; 754 } 755 756 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, 757 int pref_index, bool use_raw, 758 u32 raw_epp) 759 { 760 int epp = -EINVAL; 761 int ret; 762 763 if (!pref_index) 764 epp = cpu_data->epp_default; 765 766 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 767 if (use_raw) 768 epp = raw_epp; 769 else if (epp == -EINVAL) 770 epp = epp_values[pref_index]; 771 772 /* 773 * To avoid confusion, refuse to set EPP to any values different 774 * from 0 (performance) if the current policy is "performance", 775 * because those values would be overridden. 776 */ 777 if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 778 return -EBUSY; 779 780 ret = intel_pstate_set_epp(cpu_data, epp); 781 } else { 782 if (epp == -EINVAL) 783 epp = (pref_index - 1) << 2; 784 ret = intel_pstate_set_epb(cpu_data->cpu, epp); 785 } 786 787 return ret; 788 } 789 790 static ssize_t show_energy_performance_available_preferences( 791 struct cpufreq_policy *policy, char *buf) 792 { 793 int i = 0; 794 int ret = 0; 795 796 while (energy_perf_strings[i] != NULL) 797 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); 798 799 ret += sprintf(&buf[ret], "\n"); 800 801 return ret; 802 } 803 804 cpufreq_freq_attr_ro(energy_performance_available_preferences); 805 806 static struct cpufreq_driver intel_pstate; 807 808 static ssize_t store_energy_performance_preference( 809 struct cpufreq_policy *policy, const char *buf, size_t count) 810 { 811 struct cpudata *cpu = all_cpu_data[policy->cpu]; 812 char str_preference[21]; 813 bool raw = false; 814 ssize_t ret; 815 u32 epp = 0; 816 817 ret = sscanf(buf, "%20s", str_preference); 818 if (ret != 1) 819 return -EINVAL; 820 821 ret = match_string(energy_perf_strings, -1, str_preference); 822 if (ret < 0) { 823 if (!boot_cpu_has(X86_FEATURE_HWP_EPP)) 824 return ret; 825 826 ret = kstrtouint(buf, 10, &epp); 827 if (ret) 828 return ret; 829 830 if (epp > 255) 831 return -EINVAL; 832 833 raw = true; 834 } 835 836 /* 837 * This function runs with the policy R/W semaphore held, which 838 * guarantees that the driver pointer will not change while it is 839 * running. 840 */ 841 if (!intel_pstate_driver) 842 return -EAGAIN; 843 844 mutex_lock(&intel_pstate_limits_lock); 845 846 if (intel_pstate_driver == &intel_pstate) { 847 ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); 848 } else { 849 /* 850 * In the passive mode the governor needs to be stopped on the 851 * target CPU before the EPP update and restarted after it, 852 * which is super-heavy-weight, so make sure it is worth doing 853 * upfront. 854 */ 855 if (!raw) 856 epp = ret ? epp_values[ret] : cpu->epp_default; 857 858 if (cpu->epp_cached != epp) { 859 int err; 860 861 cpufreq_stop_governor(policy); 862 ret = intel_pstate_set_epp(cpu, epp); 863 err = cpufreq_start_governor(policy); 864 if (!ret) 865 ret = err; 866 } 867 } 868 869 mutex_unlock(&intel_pstate_limits_lock); 870 871 return ret ?: count; 872 } 873 874 static ssize_t show_energy_performance_preference( 875 struct cpufreq_policy *policy, char *buf) 876 { 877 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 878 int preference, raw_epp; 879 880 preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp); 881 if (preference < 0) 882 return preference; 883 884 if (raw_epp) 885 return sprintf(buf, "%d\n", raw_epp); 886 else 887 return sprintf(buf, "%s\n", energy_perf_strings[preference]); 888 } 889 890 cpufreq_freq_attr_rw(energy_performance_preference); 891 892 static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) 893 { 894 struct cpudata *cpu = all_cpu_data[policy->cpu]; 895 int ratio, freq; 896 897 ratio = intel_pstate_get_cppc_guaranteed(policy->cpu); 898 if (ratio <= 0) { 899 u64 cap; 900 901 rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); 902 ratio = HWP_GUARANTEED_PERF(cap); 903 } 904 905 freq = ratio * cpu->pstate.scaling; 906 if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling) 907 freq = rounddown(freq, cpu->pstate.perf_ctl_scaling); 908 909 return sprintf(buf, "%d\n", freq); 910 } 911 912 cpufreq_freq_attr_ro(base_frequency); 913 914 static struct freq_attr *hwp_cpufreq_attrs[] = { 915 &energy_performance_preference, 916 &energy_performance_available_preferences, 917 &base_frequency, 918 NULL, 919 }; 920 921 static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) 922 { 923 u64 cap; 924 925 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); 926 WRITE_ONCE(cpu->hwp_cap_cached, cap); 927 cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); 928 cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); 929 } 930 931 static void intel_pstate_get_hwp_cap(struct cpudata *cpu) 932 { 933 int scaling = cpu->pstate.scaling; 934 935 __intel_pstate_get_hwp_cap(cpu); 936 937 cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling; 938 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; 939 if (scaling != cpu->pstate.perf_ctl_scaling) { 940 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 941 942 cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq, 943 perf_ctl_scaling); 944 cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq, 945 perf_ctl_scaling); 946 } 947 } 948 949 static void intel_pstate_hwp_set(unsigned int cpu) 950 { 951 struct cpudata *cpu_data = all_cpu_data[cpu]; 952 int max, min; 953 u64 value; 954 s16 epp; 955 956 max = cpu_data->max_perf_ratio; 957 min = cpu_data->min_perf_ratio; 958 959 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 960 min = max; 961 962 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 963 964 value &= ~HWP_MIN_PERF(~0L); 965 value |= HWP_MIN_PERF(min); 966 967 value &= ~HWP_MAX_PERF(~0L); 968 value |= HWP_MAX_PERF(max); 969 970 if (cpu_data->epp_policy == cpu_data->policy) 971 goto skip_epp; 972 973 cpu_data->epp_policy = cpu_data->policy; 974 975 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 976 epp = intel_pstate_get_epp(cpu_data, value); 977 cpu_data->epp_powersave = epp; 978 /* If EPP read was failed, then don't try to write */ 979 if (epp < 0) 980 goto skip_epp; 981 982 epp = 0; 983 } else { 984 /* skip setting EPP, when saved value is invalid */ 985 if (cpu_data->epp_powersave < 0) 986 goto skip_epp; 987 988 /* 989 * No need to restore EPP when it is not zero. This 990 * means: 991 * - Policy is not changed 992 * - user has manually changed 993 * - Error reading EPB 994 */ 995 epp = intel_pstate_get_epp(cpu_data, value); 996 if (epp) 997 goto skip_epp; 998 999 epp = cpu_data->epp_powersave; 1000 } 1001 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 1002 value &= ~GENMASK_ULL(31, 24); 1003 value |= (u64)epp << 24; 1004 } else { 1005 intel_pstate_set_epb(cpu, epp); 1006 } 1007 skip_epp: 1008 WRITE_ONCE(cpu_data->hwp_req_cached, value); 1009 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 1010 } 1011 1012 static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata); 1013 1014 static void intel_pstate_hwp_offline(struct cpudata *cpu) 1015 { 1016 u64 value = READ_ONCE(cpu->hwp_req_cached); 1017 int min_perf; 1018 1019 intel_pstate_disable_hwp_interrupt(cpu); 1020 1021 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 1022 /* 1023 * In case the EPP has been set to "performance" by the 1024 * active mode "performance" scaling algorithm, replace that 1025 * temporary value with the cached EPP one. 1026 */ 1027 value &= ~GENMASK_ULL(31, 24); 1028 value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); 1029 /* 1030 * However, make sure that EPP will be set to "performance" when 1031 * the CPU is brought back online again and the "performance" 1032 * scaling algorithm is still in effect. 1033 */ 1034 cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; 1035 } 1036 1037 /* 1038 * Clear the desired perf field in the cached HWP request value to 1039 * prevent nonzero desired values from being leaked into the active 1040 * mode. 1041 */ 1042 value &= ~HWP_DESIRED_PERF(~0L); 1043 WRITE_ONCE(cpu->hwp_req_cached, value); 1044 1045 value &= ~GENMASK_ULL(31, 0); 1046 min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); 1047 1048 /* Set hwp_max = hwp_min */ 1049 value |= HWP_MAX_PERF(min_perf); 1050 value |= HWP_MIN_PERF(min_perf); 1051 1052 /* Set EPP to min */ 1053 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) 1054 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); 1055 1056 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 1057 } 1058 1059 #define POWER_CTL_EE_ENABLE 1 1060 #define POWER_CTL_EE_DISABLE 2 1061 1062 static int power_ctl_ee_state; 1063 1064 static void set_power_ctl_ee_state(bool input) 1065 { 1066 u64 power_ctl; 1067 1068 mutex_lock(&intel_pstate_driver_lock); 1069 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1070 if (input) { 1071 power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); 1072 power_ctl_ee_state = POWER_CTL_EE_ENABLE; 1073 } else { 1074 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); 1075 power_ctl_ee_state = POWER_CTL_EE_DISABLE; 1076 } 1077 wrmsrl(MSR_IA32_POWER_CTL, power_ctl); 1078 mutex_unlock(&intel_pstate_driver_lock); 1079 } 1080 1081 static void intel_pstate_hwp_enable(struct cpudata *cpudata); 1082 1083 static void intel_pstate_hwp_reenable(struct cpudata *cpu) 1084 { 1085 intel_pstate_hwp_enable(cpu); 1086 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); 1087 } 1088 1089 static int intel_pstate_suspend(struct cpufreq_policy *policy) 1090 { 1091 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1092 1093 pr_debug("CPU %d suspending\n", cpu->cpu); 1094 1095 cpu->suspended = true; 1096 1097 /* disable HWP interrupt and cancel any pending work */ 1098 intel_pstate_disable_hwp_interrupt(cpu); 1099 1100 return 0; 1101 } 1102 1103 static int intel_pstate_resume(struct cpufreq_policy *policy) 1104 { 1105 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1106 1107 pr_debug("CPU %d resuming\n", cpu->cpu); 1108 1109 /* Only restore if the system default is changed */ 1110 if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) 1111 set_power_ctl_ee_state(true); 1112 else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) 1113 set_power_ctl_ee_state(false); 1114 1115 if (cpu->suspended && hwp_active) { 1116 mutex_lock(&intel_pstate_limits_lock); 1117 1118 /* Re-enable HWP, because "online" has not done that. */ 1119 intel_pstate_hwp_reenable(cpu); 1120 1121 mutex_unlock(&intel_pstate_limits_lock); 1122 } 1123 1124 cpu->suspended = false; 1125 1126 return 0; 1127 } 1128 1129 static void intel_pstate_update_policies(void) 1130 { 1131 int cpu; 1132 1133 for_each_possible_cpu(cpu) 1134 cpufreq_update_policy(cpu); 1135 } 1136 1137 static void intel_pstate_update_max_freq(unsigned int cpu) 1138 { 1139 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); 1140 struct cpudata *cpudata; 1141 1142 if (!policy) 1143 return; 1144 1145 cpudata = all_cpu_data[cpu]; 1146 policy->cpuinfo.max_freq = global.turbo_disabled_mf ? 1147 cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; 1148 1149 refresh_frequency_limits(policy); 1150 1151 cpufreq_cpu_release(policy); 1152 } 1153 1154 static void intel_pstate_update_limits(unsigned int cpu) 1155 { 1156 mutex_lock(&intel_pstate_driver_lock); 1157 1158 update_turbo_state(); 1159 /* 1160 * If turbo has been turned on or off globally, policy limits for 1161 * all CPUs need to be updated to reflect that. 1162 */ 1163 if (global.turbo_disabled_mf != global.turbo_disabled) { 1164 global.turbo_disabled_mf = global.turbo_disabled; 1165 arch_set_max_freq_ratio(global.turbo_disabled); 1166 for_each_possible_cpu(cpu) 1167 intel_pstate_update_max_freq(cpu); 1168 } else { 1169 cpufreq_update_policy(cpu); 1170 } 1171 1172 mutex_unlock(&intel_pstate_driver_lock); 1173 } 1174 1175 /************************** sysfs begin ************************/ 1176 #define show_one(file_name, object) \ 1177 static ssize_t show_##file_name \ 1178 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ 1179 { \ 1180 return sprintf(buf, "%u\n", global.object); \ 1181 } 1182 1183 static ssize_t intel_pstate_show_status(char *buf); 1184 static int intel_pstate_update_status(const char *buf, size_t size); 1185 1186 static ssize_t show_status(struct kobject *kobj, 1187 struct kobj_attribute *attr, char *buf) 1188 { 1189 ssize_t ret; 1190 1191 mutex_lock(&intel_pstate_driver_lock); 1192 ret = intel_pstate_show_status(buf); 1193 mutex_unlock(&intel_pstate_driver_lock); 1194 1195 return ret; 1196 } 1197 1198 static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, 1199 const char *buf, size_t count) 1200 { 1201 char *p = memchr(buf, '\n', count); 1202 int ret; 1203 1204 mutex_lock(&intel_pstate_driver_lock); 1205 ret = intel_pstate_update_status(buf, p ? p - buf : count); 1206 mutex_unlock(&intel_pstate_driver_lock); 1207 1208 return ret < 0 ? ret : count; 1209 } 1210 1211 static ssize_t show_turbo_pct(struct kobject *kobj, 1212 struct kobj_attribute *attr, char *buf) 1213 { 1214 struct cpudata *cpu; 1215 int total, no_turbo, turbo_pct; 1216 uint32_t turbo_fp; 1217 1218 mutex_lock(&intel_pstate_driver_lock); 1219 1220 if (!intel_pstate_driver) { 1221 mutex_unlock(&intel_pstate_driver_lock); 1222 return -EAGAIN; 1223 } 1224 1225 cpu = all_cpu_data[0]; 1226 1227 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1228 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 1229 turbo_fp = div_fp(no_turbo, total); 1230 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 1231 1232 mutex_unlock(&intel_pstate_driver_lock); 1233 1234 return sprintf(buf, "%u\n", turbo_pct); 1235 } 1236 1237 static ssize_t show_num_pstates(struct kobject *kobj, 1238 struct kobj_attribute *attr, char *buf) 1239 { 1240 struct cpudata *cpu; 1241 int total; 1242 1243 mutex_lock(&intel_pstate_driver_lock); 1244 1245 if (!intel_pstate_driver) { 1246 mutex_unlock(&intel_pstate_driver_lock); 1247 return -EAGAIN; 1248 } 1249 1250 cpu = all_cpu_data[0]; 1251 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1252 1253 mutex_unlock(&intel_pstate_driver_lock); 1254 1255 return sprintf(buf, "%u\n", total); 1256 } 1257 1258 static ssize_t show_no_turbo(struct kobject *kobj, 1259 struct kobj_attribute *attr, char *buf) 1260 { 1261 ssize_t ret; 1262 1263 mutex_lock(&intel_pstate_driver_lock); 1264 1265 if (!intel_pstate_driver) { 1266 mutex_unlock(&intel_pstate_driver_lock); 1267 return -EAGAIN; 1268 } 1269 1270 update_turbo_state(); 1271 if (global.turbo_disabled) 1272 ret = sprintf(buf, "%u\n", global.turbo_disabled); 1273 else 1274 ret = sprintf(buf, "%u\n", global.no_turbo); 1275 1276 mutex_unlock(&intel_pstate_driver_lock); 1277 1278 return ret; 1279 } 1280 1281 static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, 1282 const char *buf, size_t count) 1283 { 1284 unsigned int input; 1285 int ret; 1286 1287 ret = sscanf(buf, "%u", &input); 1288 if (ret != 1) 1289 return -EINVAL; 1290 1291 mutex_lock(&intel_pstate_driver_lock); 1292 1293 if (!intel_pstate_driver) { 1294 mutex_unlock(&intel_pstate_driver_lock); 1295 return -EAGAIN; 1296 } 1297 1298 mutex_lock(&intel_pstate_limits_lock); 1299 1300 update_turbo_state(); 1301 if (global.turbo_disabled) { 1302 pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); 1303 mutex_unlock(&intel_pstate_limits_lock); 1304 mutex_unlock(&intel_pstate_driver_lock); 1305 return -EPERM; 1306 } 1307 1308 global.no_turbo = clamp_t(int, input, 0, 1); 1309 1310 if (global.no_turbo) { 1311 struct cpudata *cpu = all_cpu_data[0]; 1312 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; 1313 1314 /* Squash the global minimum into the permitted range. */ 1315 if (global.min_perf_pct > pct) 1316 global.min_perf_pct = pct; 1317 } 1318 1319 mutex_unlock(&intel_pstate_limits_lock); 1320 1321 intel_pstate_update_policies(); 1322 1323 mutex_unlock(&intel_pstate_driver_lock); 1324 1325 return count; 1326 } 1327 1328 static void update_qos_request(enum freq_qos_req_type type) 1329 { 1330 struct freq_qos_request *req; 1331 struct cpufreq_policy *policy; 1332 int i; 1333 1334 for_each_possible_cpu(i) { 1335 struct cpudata *cpu = all_cpu_data[i]; 1336 unsigned int freq, perf_pct; 1337 1338 policy = cpufreq_cpu_get(i); 1339 if (!policy) 1340 continue; 1341 1342 req = policy->driver_data; 1343 cpufreq_cpu_put(policy); 1344 1345 if (!req) 1346 continue; 1347 1348 if (hwp_active) 1349 intel_pstate_get_hwp_cap(cpu); 1350 1351 if (type == FREQ_QOS_MIN) { 1352 perf_pct = global.min_perf_pct; 1353 } else { 1354 req++; 1355 perf_pct = global.max_perf_pct; 1356 } 1357 1358 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100); 1359 1360 if (freq_qos_update_request(req, freq) < 0) 1361 pr_warn("Failed to update freq constraint: CPU%d\n", i); 1362 } 1363 } 1364 1365 static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, 1366 const char *buf, size_t count) 1367 { 1368 unsigned int input; 1369 int ret; 1370 1371 ret = sscanf(buf, "%u", &input); 1372 if (ret != 1) 1373 return -EINVAL; 1374 1375 mutex_lock(&intel_pstate_driver_lock); 1376 1377 if (!intel_pstate_driver) { 1378 mutex_unlock(&intel_pstate_driver_lock); 1379 return -EAGAIN; 1380 } 1381 1382 mutex_lock(&intel_pstate_limits_lock); 1383 1384 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); 1385 1386 mutex_unlock(&intel_pstate_limits_lock); 1387 1388 if (intel_pstate_driver == &intel_pstate) 1389 intel_pstate_update_policies(); 1390 else 1391 update_qos_request(FREQ_QOS_MAX); 1392 1393 mutex_unlock(&intel_pstate_driver_lock); 1394 1395 return count; 1396 } 1397 1398 static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, 1399 const char *buf, size_t count) 1400 { 1401 unsigned int input; 1402 int ret; 1403 1404 ret = sscanf(buf, "%u", &input); 1405 if (ret != 1) 1406 return -EINVAL; 1407 1408 mutex_lock(&intel_pstate_driver_lock); 1409 1410 if (!intel_pstate_driver) { 1411 mutex_unlock(&intel_pstate_driver_lock); 1412 return -EAGAIN; 1413 } 1414 1415 mutex_lock(&intel_pstate_limits_lock); 1416 1417 global.min_perf_pct = clamp_t(int, input, 1418 min_perf_pct_min(), global.max_perf_pct); 1419 1420 mutex_unlock(&intel_pstate_limits_lock); 1421 1422 if (intel_pstate_driver == &intel_pstate) 1423 intel_pstate_update_policies(); 1424 else 1425 update_qos_request(FREQ_QOS_MIN); 1426 1427 mutex_unlock(&intel_pstate_driver_lock); 1428 1429 return count; 1430 } 1431 1432 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, 1433 struct kobj_attribute *attr, char *buf) 1434 { 1435 return sprintf(buf, "%u\n", hwp_boost); 1436 } 1437 1438 static ssize_t store_hwp_dynamic_boost(struct kobject *a, 1439 struct kobj_attribute *b, 1440 const char *buf, size_t count) 1441 { 1442 unsigned int input; 1443 int ret; 1444 1445 ret = kstrtouint(buf, 10, &input); 1446 if (ret) 1447 return ret; 1448 1449 mutex_lock(&intel_pstate_driver_lock); 1450 hwp_boost = !!input; 1451 intel_pstate_update_policies(); 1452 mutex_unlock(&intel_pstate_driver_lock); 1453 1454 return count; 1455 } 1456 1457 static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr, 1458 char *buf) 1459 { 1460 u64 power_ctl; 1461 int enable; 1462 1463 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1464 enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); 1465 return sprintf(buf, "%d\n", !enable); 1466 } 1467 1468 static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b, 1469 const char *buf, size_t count) 1470 { 1471 bool input; 1472 int ret; 1473 1474 ret = kstrtobool(buf, &input); 1475 if (ret) 1476 return ret; 1477 1478 set_power_ctl_ee_state(input); 1479 1480 return count; 1481 } 1482 1483 show_one(max_perf_pct, max_perf_pct); 1484 show_one(min_perf_pct, min_perf_pct); 1485 1486 define_one_global_rw(status); 1487 define_one_global_rw(no_turbo); 1488 define_one_global_rw(max_perf_pct); 1489 define_one_global_rw(min_perf_pct); 1490 define_one_global_ro(turbo_pct); 1491 define_one_global_ro(num_pstates); 1492 define_one_global_rw(hwp_dynamic_boost); 1493 define_one_global_rw(energy_efficiency); 1494 1495 static struct attribute *intel_pstate_attributes[] = { 1496 &status.attr, 1497 &no_turbo.attr, 1498 NULL 1499 }; 1500 1501 static const struct attribute_group intel_pstate_attr_group = { 1502 .attrs = intel_pstate_attributes, 1503 }; 1504 1505 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; 1506 1507 static struct kobject *intel_pstate_kobject; 1508 1509 static void __init intel_pstate_sysfs_expose_params(void) 1510 { 1511 int rc; 1512 1513 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 1514 &cpu_subsys.dev_root->kobj); 1515 if (WARN_ON(!intel_pstate_kobject)) 1516 return; 1517 1518 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 1519 if (WARN_ON(rc)) 1520 return; 1521 1522 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 1523 rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr); 1524 WARN_ON(rc); 1525 1526 rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr); 1527 WARN_ON(rc); 1528 } 1529 1530 /* 1531 * If per cpu limits are enforced there are no global limits, so 1532 * return without creating max/min_perf_pct attributes 1533 */ 1534 if (per_cpu_limits) 1535 return; 1536 1537 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 1538 WARN_ON(rc); 1539 1540 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1541 WARN_ON(rc); 1542 1543 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { 1544 rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); 1545 WARN_ON(rc); 1546 } 1547 } 1548 1549 static void __init intel_pstate_sysfs_remove(void) 1550 { 1551 if (!intel_pstate_kobject) 1552 return; 1553 1554 sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group); 1555 1556 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 1557 sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr); 1558 sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr); 1559 } 1560 1561 if (!per_cpu_limits) { 1562 sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr); 1563 sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr); 1564 1565 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) 1566 sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr); 1567 } 1568 1569 kobject_put(intel_pstate_kobject); 1570 } 1571 1572 static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) 1573 { 1574 int rc; 1575 1576 if (!hwp_active) 1577 return; 1578 1579 rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); 1580 WARN_ON_ONCE(rc); 1581 } 1582 1583 static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) 1584 { 1585 if (!hwp_active) 1586 return; 1587 1588 sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); 1589 } 1590 1591 /************************** sysfs end ************************/ 1592 1593 static void intel_pstate_notify_work(struct work_struct *work) 1594 { 1595 struct cpudata *cpudata = 1596 container_of(to_delayed_work(work), struct cpudata, hwp_notify_work); 1597 1598 cpufreq_update_policy(cpudata->cpu); 1599 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); 1600 } 1601 1602 static DEFINE_SPINLOCK(hwp_notify_lock); 1603 static cpumask_t hwp_intr_enable_mask; 1604 1605 void notify_hwp_interrupt(void) 1606 { 1607 unsigned int this_cpu = smp_processor_id(); 1608 struct cpudata *cpudata; 1609 unsigned long flags; 1610 u64 value; 1611 1612 if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1613 return; 1614 1615 rdmsrl_safe(MSR_HWP_STATUS, &value); 1616 if (!(value & 0x01)) 1617 return; 1618 1619 spin_lock_irqsave(&hwp_notify_lock, flags); 1620 1621 if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) 1622 goto ack_intr; 1623 1624 /* 1625 * Currently we never free all_cpu_data. And we can't reach here 1626 * without this allocated. But for safety for future changes, added 1627 * check. 1628 */ 1629 if (unlikely(!READ_ONCE(all_cpu_data))) 1630 goto ack_intr; 1631 1632 /* 1633 * The free is done during cleanup, when cpufreq registry is failed. 1634 * We wouldn't be here if it fails on init or switch status. But for 1635 * future changes, added check. 1636 */ 1637 cpudata = READ_ONCE(all_cpu_data[this_cpu]); 1638 if (unlikely(!cpudata)) 1639 goto ack_intr; 1640 1641 schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10)); 1642 1643 spin_unlock_irqrestore(&hwp_notify_lock, flags); 1644 1645 return; 1646 1647 ack_intr: 1648 wrmsrl_safe(MSR_HWP_STATUS, 0); 1649 spin_unlock_irqrestore(&hwp_notify_lock, flags); 1650 } 1651 1652 static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) 1653 { 1654 unsigned long flags; 1655 1656 if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1657 return; 1658 1659 /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ 1660 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1661 1662 spin_lock_irqsave(&hwp_notify_lock, flags); 1663 if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) 1664 cancel_delayed_work(&cpudata->hwp_notify_work); 1665 spin_unlock_irqrestore(&hwp_notify_lock, flags); 1666 } 1667 1668 static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) 1669 { 1670 /* Enable HWP notification interrupt for guaranteed performance change */ 1671 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { 1672 unsigned long flags; 1673 1674 spin_lock_irqsave(&hwp_notify_lock, flags); 1675 INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); 1676 cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); 1677 spin_unlock_irqrestore(&hwp_notify_lock, flags); 1678 1679 /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ 1680 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); 1681 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); 1682 } 1683 } 1684 1685 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1686 { 1687 /* First disable HWP notification interrupt till we activate again */ 1688 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1689 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1690 1691 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1692 1693 intel_pstate_enable_hwp_interrupt(cpudata); 1694 1695 if (cpudata->epp_default >= 0) 1696 return; 1697 1698 if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) { 1699 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1700 } else { 1701 cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE]; 1702 intel_pstate_set_epp(cpudata, cpudata->epp_default); 1703 } 1704 } 1705 1706 static int atom_get_min_pstate(void) 1707 { 1708 u64 value; 1709 1710 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1711 return (value >> 8) & 0x7F; 1712 } 1713 1714 static int atom_get_max_pstate(void) 1715 { 1716 u64 value; 1717 1718 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1719 return (value >> 16) & 0x7F; 1720 } 1721 1722 static int atom_get_turbo_pstate(void) 1723 { 1724 u64 value; 1725 1726 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); 1727 return value & 0x7F; 1728 } 1729 1730 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1731 { 1732 u64 val; 1733 int32_t vid_fp; 1734 u32 vid; 1735 1736 val = (u64)pstate << 8; 1737 if (global.no_turbo && !global.turbo_disabled) 1738 val |= (u64)1 << 32; 1739 1740 vid_fp = cpudata->vid.min + mul_fp( 1741 int_tofp(pstate - cpudata->pstate.min_pstate), 1742 cpudata->vid.ratio); 1743 1744 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1745 vid = ceiling_fp(vid_fp); 1746 1747 if (pstate > cpudata->pstate.max_pstate) 1748 vid = cpudata->vid.turbo; 1749 1750 return val | vid; 1751 } 1752 1753 static int silvermont_get_scaling(void) 1754 { 1755 u64 value; 1756 int i; 1757 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1758 static int silvermont_freq_table[] = { 1759 83300, 100000, 133300, 116700, 80000}; 1760 1761 rdmsrl(MSR_FSB_FREQ, value); 1762 i = value & 0x7; 1763 WARN_ON(i > 4); 1764 1765 return silvermont_freq_table[i]; 1766 } 1767 1768 static int airmont_get_scaling(void) 1769 { 1770 u64 value; 1771 int i; 1772 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1773 static int airmont_freq_table[] = { 1774 83300, 100000, 133300, 116700, 80000, 1775 93300, 90000, 88900, 87500}; 1776 1777 rdmsrl(MSR_FSB_FREQ, value); 1778 i = value & 0xF; 1779 WARN_ON(i > 8); 1780 1781 return airmont_freq_table[i]; 1782 } 1783 1784 static void atom_get_vid(struct cpudata *cpudata) 1785 { 1786 u64 value; 1787 1788 rdmsrl(MSR_ATOM_CORE_VIDS, value); 1789 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1790 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1791 cpudata->vid.ratio = div_fp( 1792 cpudata->vid.max - cpudata->vid.min, 1793 int_tofp(cpudata->pstate.max_pstate - 1794 cpudata->pstate.min_pstate)); 1795 1796 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); 1797 cpudata->vid.turbo = value & 0x7f; 1798 } 1799 1800 static int core_get_min_pstate(void) 1801 { 1802 u64 value; 1803 1804 rdmsrl(MSR_PLATFORM_INFO, value); 1805 return (value >> 40) & 0xFF; 1806 } 1807 1808 static int core_get_max_pstate_physical(void) 1809 { 1810 u64 value; 1811 1812 rdmsrl(MSR_PLATFORM_INFO, value); 1813 return (value >> 8) & 0xFF; 1814 } 1815 1816 static int core_get_tdp_ratio(u64 plat_info) 1817 { 1818 /* Check how many TDP levels present */ 1819 if (plat_info & 0x600000000) { 1820 u64 tdp_ctrl; 1821 u64 tdp_ratio; 1822 int tdp_msr; 1823 int err; 1824 1825 /* Get the TDP level (0, 1, 2) to get ratios */ 1826 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1827 if (err) 1828 return err; 1829 1830 /* TDP MSR are continuous starting at 0x648 */ 1831 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); 1832 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 1833 if (err) 1834 return err; 1835 1836 /* For level 1 and 2, bits[23:16] contain the ratio */ 1837 if (tdp_ctrl & 0x03) 1838 tdp_ratio >>= 16; 1839 1840 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1841 pr_debug("tdp_ratio %x\n", (int)tdp_ratio); 1842 1843 return (int)tdp_ratio; 1844 } 1845 1846 return -ENXIO; 1847 } 1848 1849 static int core_get_max_pstate(void) 1850 { 1851 u64 tar; 1852 u64 plat_info; 1853 int max_pstate; 1854 int tdp_ratio; 1855 int err; 1856 1857 rdmsrl(MSR_PLATFORM_INFO, plat_info); 1858 max_pstate = (plat_info >> 8) & 0xFF; 1859 1860 tdp_ratio = core_get_tdp_ratio(plat_info); 1861 if (tdp_ratio <= 0) 1862 return max_pstate; 1863 1864 if (hwp_active) { 1865 /* Turbo activation ratio is not used on HWP platforms */ 1866 return tdp_ratio; 1867 } 1868 1869 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 1870 if (!err) { 1871 int tar_levels; 1872 1873 /* Do some sanity checking for safety */ 1874 tar_levels = tar & 0xff; 1875 if (tdp_ratio - 1 == tar_levels) { 1876 max_pstate = tar_levels; 1877 pr_debug("max_pstate=TAC %x\n", max_pstate); 1878 } 1879 } 1880 1881 return max_pstate; 1882 } 1883 1884 static int core_get_turbo_pstate(void) 1885 { 1886 u64 value; 1887 int nont, ret; 1888 1889 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1890 nont = core_get_max_pstate(); 1891 ret = (value) & 255; 1892 if (ret <= nont) 1893 ret = nont; 1894 return ret; 1895 } 1896 1897 static inline int core_get_scaling(void) 1898 { 1899 return 100000; 1900 } 1901 1902 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1903 { 1904 u64 val; 1905 1906 val = (u64)pstate << 8; 1907 if (global.no_turbo && !global.turbo_disabled) 1908 val |= (u64)1 << 32; 1909 1910 return val; 1911 } 1912 1913 static int knl_get_aperf_mperf_shift(void) 1914 { 1915 return 10; 1916 } 1917 1918 static int knl_get_turbo_pstate(void) 1919 { 1920 u64 value; 1921 int nont, ret; 1922 1923 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1924 nont = core_get_max_pstate(); 1925 ret = (((value) >> 8) & 0xFF); 1926 if (ret <= nont) 1927 ret = nont; 1928 return ret; 1929 } 1930 1931 #ifdef CONFIG_ACPI_CPPC_LIB 1932 static u32 hybrid_ref_perf; 1933 1934 static int hybrid_get_cpu_scaling(int cpu) 1935 { 1936 return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf, 1937 intel_pstate_cppc_nominal(cpu)); 1938 } 1939 1940 static void intel_pstate_cppc_set_cpu_scaling(void) 1941 { 1942 u32 min_nominal_perf = U32_MAX; 1943 int cpu; 1944 1945 for_each_present_cpu(cpu) { 1946 u32 nominal_perf = intel_pstate_cppc_nominal(cpu); 1947 1948 if (nominal_perf && nominal_perf < min_nominal_perf) 1949 min_nominal_perf = nominal_perf; 1950 } 1951 1952 if (min_nominal_perf < U32_MAX) { 1953 hybrid_ref_perf = min_nominal_perf; 1954 pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling; 1955 } 1956 } 1957 #else 1958 static inline void intel_pstate_cppc_set_cpu_scaling(void) 1959 { 1960 } 1961 #endif /* CONFIG_ACPI_CPPC_LIB */ 1962 1963 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1964 { 1965 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1966 cpu->pstate.current_pstate = pstate; 1967 /* 1968 * Generally, there is no guarantee that this code will always run on 1969 * the CPU being updated, so force the register update to run on the 1970 * right CPU. 1971 */ 1972 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1973 pstate_funcs.get_val(cpu, pstate)); 1974 } 1975 1976 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1977 { 1978 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1979 } 1980 1981 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1982 { 1983 int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); 1984 1985 update_turbo_state(); 1986 intel_pstate_set_pstate(cpu, pstate); 1987 } 1988 1989 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1990 { 1991 int perf_ctl_max_phys = pstate_funcs.get_max_physical(); 1992 int perf_ctl_scaling = pstate_funcs.get_scaling(); 1993 1994 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1995 cpu->pstate.max_pstate_physical = perf_ctl_max_phys; 1996 cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; 1997 1998 if (hwp_active && !hwp_mode_bdw) { 1999 __intel_pstate_get_hwp_cap(cpu); 2000 2001 if (pstate_funcs.get_cpu_scaling) { 2002 cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); 2003 if (cpu->pstate.scaling != perf_ctl_scaling) 2004 intel_pstate_hybrid_hwp_adjust(cpu); 2005 } else { 2006 cpu->pstate.scaling = perf_ctl_scaling; 2007 } 2008 } else { 2009 cpu->pstate.scaling = perf_ctl_scaling; 2010 cpu->pstate.max_pstate = pstate_funcs.get_max(); 2011 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 2012 } 2013 2014 if (cpu->pstate.scaling == perf_ctl_scaling) { 2015 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; 2016 cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling; 2017 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling; 2018 } 2019 2020 if (pstate_funcs.get_aperf_mperf_shift) 2021 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); 2022 2023 if (pstate_funcs.get_vid) 2024 pstate_funcs.get_vid(cpu); 2025 2026 intel_pstate_set_min_pstate(cpu); 2027 } 2028 2029 /* 2030 * Long hold time will keep high perf limits for long time, 2031 * which negatively impacts perf/watt for some workloads, 2032 * like specpower. 3ms is based on experiements on some 2033 * workoads. 2034 */ 2035 static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC; 2036 2037 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) 2038 { 2039 u64 hwp_req = READ_ONCE(cpu->hwp_req_cached); 2040 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); 2041 u32 max_limit = (hwp_req & 0xff00) >> 8; 2042 u32 min_limit = (hwp_req & 0xff); 2043 u32 boost_level1; 2044 2045 /* 2046 * Cases to consider (User changes via sysfs or boot time): 2047 * If, P0 (Turbo max) = P1 (Guaranteed max) = min: 2048 * No boost, return. 2049 * If, P0 (Turbo max) > P1 (Guaranteed max) = min: 2050 * Should result in one level boost only for P0. 2051 * If, P0 (Turbo max) = P1 (Guaranteed max) > min: 2052 * Should result in two level boost: 2053 * (min + p1)/2 and P1. 2054 * If, P0 (Turbo max) > P1 (Guaranteed max) > min: 2055 * Should result in three level boost: 2056 * (min + p1)/2, P1 and P0. 2057 */ 2058 2059 /* If max and min are equal or already at max, nothing to boost */ 2060 if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit) 2061 return; 2062 2063 if (!cpu->hwp_boost_min) 2064 cpu->hwp_boost_min = min_limit; 2065 2066 /* level at half way mark between min and guranteed */ 2067 boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1; 2068 2069 if (cpu->hwp_boost_min < boost_level1) 2070 cpu->hwp_boost_min = boost_level1; 2071 else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap)) 2072 cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap); 2073 else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) && 2074 max_limit != HWP_GUARANTEED_PERF(hwp_cap)) 2075 cpu->hwp_boost_min = max_limit; 2076 else 2077 return; 2078 2079 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; 2080 wrmsrl(MSR_HWP_REQUEST, hwp_req); 2081 cpu->last_update = cpu->sample.time; 2082 } 2083 2084 static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) 2085 { 2086 if (cpu->hwp_boost_min) { 2087 bool expired; 2088 2089 /* Check if we are idle for hold time to boost down */ 2090 expired = time_after64(cpu->sample.time, cpu->last_update + 2091 hwp_boost_hold_time_ns); 2092 if (expired) { 2093 wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); 2094 cpu->hwp_boost_min = 0; 2095 } 2096 } 2097 cpu->last_update = cpu->sample.time; 2098 } 2099 2100 static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu, 2101 u64 time) 2102 { 2103 cpu->sample.time = time; 2104 2105 if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) { 2106 bool do_io = false; 2107 2108 cpu->sched_flags = 0; 2109 /* 2110 * Set iowait_boost flag and update time. Since IO WAIT flag 2111 * is set all the time, we can't just conclude that there is 2112 * some IO bound activity is scheduled on this CPU with just 2113 * one occurrence. If we receive at least two in two 2114 * consecutive ticks, then we treat as boost candidate. 2115 */ 2116 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC)) 2117 do_io = true; 2118 2119 cpu->last_io_update = time; 2120 2121 if (do_io) 2122 intel_pstate_hwp_boost_up(cpu); 2123 2124 } else { 2125 intel_pstate_hwp_boost_down(cpu); 2126 } 2127 } 2128 2129 static inline void intel_pstate_update_util_hwp(struct update_util_data *data, 2130 u64 time, unsigned int flags) 2131 { 2132 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 2133 2134 cpu->sched_flags |= flags; 2135 2136 if (smp_processor_id() == cpu->cpu) 2137 intel_pstate_update_util_hwp_local(cpu, time); 2138 } 2139 2140 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 2141 { 2142 struct sample *sample = &cpu->sample; 2143 2144 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 2145 } 2146 2147 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 2148 { 2149 u64 aperf, mperf; 2150 unsigned long flags; 2151 u64 tsc; 2152 2153 local_irq_save(flags); 2154 rdmsrl(MSR_IA32_APERF, aperf); 2155 rdmsrl(MSR_IA32_MPERF, mperf); 2156 tsc = rdtsc(); 2157 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 2158 local_irq_restore(flags); 2159 return false; 2160 } 2161 local_irq_restore(flags); 2162 2163 cpu->last_sample_time = cpu->sample.time; 2164 cpu->sample.time = time; 2165 cpu->sample.aperf = aperf; 2166 cpu->sample.mperf = mperf; 2167 cpu->sample.tsc = tsc; 2168 cpu->sample.aperf -= cpu->prev_aperf; 2169 cpu->sample.mperf -= cpu->prev_mperf; 2170 cpu->sample.tsc -= cpu->prev_tsc; 2171 2172 cpu->prev_aperf = aperf; 2173 cpu->prev_mperf = mperf; 2174 cpu->prev_tsc = tsc; 2175 /* 2176 * First time this function is invoked in a given cycle, all of the 2177 * previous sample data fields are equal to zero or stale and they must 2178 * be populated with meaningful numbers for things to work, so assume 2179 * that sample.time will always be reset before setting the utilization 2180 * update hook and make the caller skip the sample then. 2181 */ 2182 if (cpu->last_sample_time) { 2183 intel_pstate_calc_avg_perf(cpu); 2184 return true; 2185 } 2186 return false; 2187 } 2188 2189 static inline int32_t get_avg_frequency(struct cpudata *cpu) 2190 { 2191 return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); 2192 } 2193 2194 static inline int32_t get_avg_pstate(struct cpudata *cpu) 2195 { 2196 return mul_ext_fp(cpu->pstate.max_pstate_physical, 2197 cpu->sample.core_avg_perf); 2198 } 2199 2200 static inline int32_t get_target_pstate(struct cpudata *cpu) 2201 { 2202 struct sample *sample = &cpu->sample; 2203 int32_t busy_frac; 2204 int target, avg_pstate; 2205 2206 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, 2207 sample->tsc); 2208 2209 if (busy_frac < cpu->iowait_boost) 2210 busy_frac = cpu->iowait_boost; 2211 2212 sample->busy_scaled = busy_frac * 100; 2213 2214 target = global.no_turbo || global.turbo_disabled ? 2215 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2216 target += target >> 2; 2217 target = mul_fp(target, busy_frac); 2218 if (target < cpu->pstate.min_pstate) 2219 target = cpu->pstate.min_pstate; 2220 2221 /* 2222 * If the average P-state during the previous cycle was higher than the 2223 * current target, add 50% of the difference to the target to reduce 2224 * possible performance oscillations and offset possible performance 2225 * loss related to moving the workload from one CPU to another within 2226 * a package/module. 2227 */ 2228 avg_pstate = get_avg_pstate(cpu); 2229 if (avg_pstate > target) 2230 target += (avg_pstate - target) >> 1; 2231 2232 return target; 2233 } 2234 2235 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 2236 { 2237 int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); 2238 int max_pstate = max(min_pstate, cpu->max_perf_ratio); 2239 2240 return clamp_t(int, pstate, min_pstate, max_pstate); 2241 } 2242 2243 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 2244 { 2245 if (pstate == cpu->pstate.current_pstate) 2246 return; 2247 2248 cpu->pstate.current_pstate = pstate; 2249 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 2250 } 2251 2252 static void intel_pstate_adjust_pstate(struct cpudata *cpu) 2253 { 2254 int from = cpu->pstate.current_pstate; 2255 struct sample *sample; 2256 int target_pstate; 2257 2258 update_turbo_state(); 2259 2260 target_pstate = get_target_pstate(cpu); 2261 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2262 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); 2263 intel_pstate_update_pstate(cpu, target_pstate); 2264 2265 sample = &cpu->sample; 2266 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 2267 fp_toint(sample->busy_scaled), 2268 from, 2269 cpu->pstate.current_pstate, 2270 sample->mperf, 2271 sample->aperf, 2272 sample->tsc, 2273 get_avg_frequency(cpu), 2274 fp_toint(cpu->iowait_boost * 100)); 2275 } 2276 2277 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 2278 unsigned int flags) 2279 { 2280 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 2281 u64 delta_ns; 2282 2283 /* Don't allow remote callbacks */ 2284 if (smp_processor_id() != cpu->cpu) 2285 return; 2286 2287 delta_ns = time - cpu->last_update; 2288 if (flags & SCHED_CPUFREQ_IOWAIT) { 2289 /* Start over if the CPU may have been idle. */ 2290 if (delta_ns > TICK_NSEC) { 2291 cpu->iowait_boost = ONE_EIGHTH_FP; 2292 } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { 2293 cpu->iowait_boost <<= 1; 2294 if (cpu->iowait_boost > int_tofp(1)) 2295 cpu->iowait_boost = int_tofp(1); 2296 } else { 2297 cpu->iowait_boost = ONE_EIGHTH_FP; 2298 } 2299 } else if (cpu->iowait_boost) { 2300 /* Clear iowait_boost if the CPU may have been idle. */ 2301 if (delta_ns > TICK_NSEC) 2302 cpu->iowait_boost = 0; 2303 else 2304 cpu->iowait_boost >>= 1; 2305 } 2306 cpu->last_update = time; 2307 delta_ns = time - cpu->sample.time; 2308 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) 2309 return; 2310 2311 if (intel_pstate_sample(cpu, time)) 2312 intel_pstate_adjust_pstate(cpu); 2313 } 2314 2315 static struct pstate_funcs core_funcs = { 2316 .get_max = core_get_max_pstate, 2317 .get_max_physical = core_get_max_pstate_physical, 2318 .get_min = core_get_min_pstate, 2319 .get_turbo = core_get_turbo_pstate, 2320 .get_scaling = core_get_scaling, 2321 .get_val = core_get_val, 2322 }; 2323 2324 static const struct pstate_funcs silvermont_funcs = { 2325 .get_max = atom_get_max_pstate, 2326 .get_max_physical = atom_get_max_pstate, 2327 .get_min = atom_get_min_pstate, 2328 .get_turbo = atom_get_turbo_pstate, 2329 .get_val = atom_get_val, 2330 .get_scaling = silvermont_get_scaling, 2331 .get_vid = atom_get_vid, 2332 }; 2333 2334 static const struct pstate_funcs airmont_funcs = { 2335 .get_max = atom_get_max_pstate, 2336 .get_max_physical = atom_get_max_pstate, 2337 .get_min = atom_get_min_pstate, 2338 .get_turbo = atom_get_turbo_pstate, 2339 .get_val = atom_get_val, 2340 .get_scaling = airmont_get_scaling, 2341 .get_vid = atom_get_vid, 2342 }; 2343 2344 static const struct pstate_funcs knl_funcs = { 2345 .get_max = core_get_max_pstate, 2346 .get_max_physical = core_get_max_pstate_physical, 2347 .get_min = core_get_min_pstate, 2348 .get_turbo = knl_get_turbo_pstate, 2349 .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, 2350 .get_scaling = core_get_scaling, 2351 .get_val = core_get_val, 2352 }; 2353 2354 #define X86_MATCH(model, policy) \ 2355 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 2356 X86_FEATURE_APERFMPERF, &policy) 2357 2358 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 2359 X86_MATCH(SANDYBRIDGE, core_funcs), 2360 X86_MATCH(SANDYBRIDGE_X, core_funcs), 2361 X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), 2362 X86_MATCH(IVYBRIDGE, core_funcs), 2363 X86_MATCH(HASWELL, core_funcs), 2364 X86_MATCH(BROADWELL, core_funcs), 2365 X86_MATCH(IVYBRIDGE_X, core_funcs), 2366 X86_MATCH(HASWELL_X, core_funcs), 2367 X86_MATCH(HASWELL_L, core_funcs), 2368 X86_MATCH(HASWELL_G, core_funcs), 2369 X86_MATCH(BROADWELL_G, core_funcs), 2370 X86_MATCH(ATOM_AIRMONT, airmont_funcs), 2371 X86_MATCH(SKYLAKE_L, core_funcs), 2372 X86_MATCH(BROADWELL_X, core_funcs), 2373 X86_MATCH(SKYLAKE, core_funcs), 2374 X86_MATCH(BROADWELL_D, core_funcs), 2375 X86_MATCH(XEON_PHI_KNL, knl_funcs), 2376 X86_MATCH(XEON_PHI_KNM, knl_funcs), 2377 X86_MATCH(ATOM_GOLDMONT, core_funcs), 2378 X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), 2379 X86_MATCH(SKYLAKE_X, core_funcs), 2380 X86_MATCH(COMETLAKE, core_funcs), 2381 X86_MATCH(ICELAKE_X, core_funcs), 2382 {} 2383 }; 2384 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 2385 2386 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 2387 X86_MATCH(BROADWELL_D, core_funcs), 2388 X86_MATCH(BROADWELL_X, core_funcs), 2389 X86_MATCH(SKYLAKE_X, core_funcs), 2390 X86_MATCH(ICELAKE_X, core_funcs), 2391 {} 2392 }; 2393 2394 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { 2395 X86_MATCH(KABYLAKE, core_funcs), 2396 {} 2397 }; 2398 2399 static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { 2400 X86_MATCH(SKYLAKE_X, core_funcs), 2401 X86_MATCH(SKYLAKE, core_funcs), 2402 {} 2403 }; 2404 2405 static int intel_pstate_init_cpu(unsigned int cpunum) 2406 { 2407 struct cpudata *cpu; 2408 2409 cpu = all_cpu_data[cpunum]; 2410 2411 if (!cpu) { 2412 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); 2413 if (!cpu) 2414 return -ENOMEM; 2415 2416 WRITE_ONCE(all_cpu_data[cpunum], cpu); 2417 2418 cpu->cpu = cpunum; 2419 2420 cpu->epp_default = -EINVAL; 2421 2422 if (hwp_active) { 2423 const struct x86_cpu_id *id; 2424 2425 intel_pstate_hwp_enable(cpu); 2426 2427 id = x86_match_cpu(intel_pstate_hwp_boost_ids); 2428 if (id && intel_pstate_acpi_pm_profile_server()) 2429 hwp_boost = true; 2430 } 2431 } else if (hwp_active) { 2432 /* 2433 * Re-enable HWP in case this happens after a resume from ACPI 2434 * S3 if the CPU was offline during the whole system/resume 2435 * cycle. 2436 */ 2437 intel_pstate_hwp_reenable(cpu); 2438 } 2439 2440 cpu->epp_powersave = -EINVAL; 2441 cpu->epp_policy = 0; 2442 2443 intel_pstate_get_cpu_pstates(cpu); 2444 2445 pr_debug("controlling: cpu %d\n", cpunum); 2446 2447 return 0; 2448 } 2449 2450 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 2451 { 2452 struct cpudata *cpu = all_cpu_data[cpu_num]; 2453 2454 if (hwp_active && !hwp_boost) 2455 return; 2456 2457 if (cpu->update_util_set) 2458 return; 2459 2460 /* Prevent intel_pstate_update_util() from using stale data. */ 2461 cpu->sample.time = 0; 2462 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 2463 (hwp_active ? 2464 intel_pstate_update_util_hwp : 2465 intel_pstate_update_util)); 2466 cpu->update_util_set = true; 2467 } 2468 2469 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 2470 { 2471 struct cpudata *cpu_data = all_cpu_data[cpu]; 2472 2473 if (!cpu_data->update_util_set) 2474 return; 2475 2476 cpufreq_remove_update_util_hook(cpu); 2477 cpu_data->update_util_set = false; 2478 synchronize_rcu(); 2479 } 2480 2481 static int intel_pstate_get_max_freq(struct cpudata *cpu) 2482 { 2483 return global.turbo_disabled || global.no_turbo ? 2484 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2485 } 2486 2487 static void intel_pstate_update_perf_limits(struct cpudata *cpu, 2488 unsigned int policy_min, 2489 unsigned int policy_max) 2490 { 2491 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 2492 int32_t max_policy_perf, min_policy_perf; 2493 2494 max_policy_perf = policy_max / perf_ctl_scaling; 2495 if (policy_max == policy_min) { 2496 min_policy_perf = max_policy_perf; 2497 } else { 2498 min_policy_perf = policy_min / perf_ctl_scaling; 2499 min_policy_perf = clamp_t(int32_t, min_policy_perf, 2500 0, max_policy_perf); 2501 } 2502 2503 /* 2504 * HWP needs some special consideration, because HWP_REQUEST uses 2505 * abstract values to represent performance rather than pure ratios. 2506 */ 2507 if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) { 2508 int scaling = cpu->pstate.scaling; 2509 int freq; 2510 2511 freq = max_policy_perf * perf_ctl_scaling; 2512 max_policy_perf = DIV_ROUND_UP(freq, scaling); 2513 freq = min_policy_perf * perf_ctl_scaling; 2514 min_policy_perf = DIV_ROUND_UP(freq, scaling); 2515 } 2516 2517 pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n", 2518 cpu->cpu, min_policy_perf, max_policy_perf); 2519 2520 /* Normalize user input to [min_perf, max_perf] */ 2521 if (per_cpu_limits) { 2522 cpu->min_perf_ratio = min_policy_perf; 2523 cpu->max_perf_ratio = max_policy_perf; 2524 } else { 2525 int turbo_max = cpu->pstate.turbo_pstate; 2526 int32_t global_min, global_max; 2527 2528 /* Global limits are in percent of the maximum turbo P-state. */ 2529 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 2530 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); 2531 global_min = clamp_t(int32_t, global_min, 0, global_max); 2532 2533 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, 2534 global_min, global_max); 2535 2536 cpu->min_perf_ratio = max(min_policy_perf, global_min); 2537 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); 2538 cpu->max_perf_ratio = min(max_policy_perf, global_max); 2539 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); 2540 2541 /* Make sure min_perf <= max_perf */ 2542 cpu->min_perf_ratio = min(cpu->min_perf_ratio, 2543 cpu->max_perf_ratio); 2544 2545 } 2546 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, 2547 cpu->max_perf_ratio, 2548 cpu->min_perf_ratio); 2549 } 2550 2551 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2552 { 2553 struct cpudata *cpu; 2554 2555 if (!policy->cpuinfo.max_freq) 2556 return -ENODEV; 2557 2558 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2559 policy->cpuinfo.max_freq, policy->max); 2560 2561 cpu = all_cpu_data[policy->cpu]; 2562 cpu->policy = policy->policy; 2563 2564 mutex_lock(&intel_pstate_limits_lock); 2565 2566 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2567 2568 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2569 /* 2570 * NOHZ_FULL CPUs need this as the governor callback may not 2571 * be invoked on them. 2572 */ 2573 intel_pstate_clear_update_util_hook(policy->cpu); 2574 intel_pstate_max_within_limits(cpu); 2575 } else { 2576 intel_pstate_set_update_util_hook(policy->cpu); 2577 } 2578 2579 if (hwp_active) { 2580 /* 2581 * When hwp_boost was active before and dynamically it 2582 * was turned off, in that case we need to clear the 2583 * update util hook. 2584 */ 2585 if (!hwp_boost) 2586 intel_pstate_clear_update_util_hook(policy->cpu); 2587 intel_pstate_hwp_set(policy->cpu); 2588 } 2589 2590 mutex_unlock(&intel_pstate_limits_lock); 2591 2592 return 0; 2593 } 2594 2595 static void intel_pstate_adjust_policy_max(struct cpudata *cpu, 2596 struct cpufreq_policy_data *policy) 2597 { 2598 if (!hwp_active && 2599 cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2600 policy->max < policy->cpuinfo.max_freq && 2601 policy->max > cpu->pstate.max_freq) { 2602 pr_debug("policy->max > max non turbo frequency\n"); 2603 policy->max = policy->cpuinfo.max_freq; 2604 } 2605 } 2606 2607 static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, 2608 struct cpufreq_policy_data *policy) 2609 { 2610 int max_freq; 2611 2612 update_turbo_state(); 2613 if (hwp_active) { 2614 intel_pstate_get_hwp_cap(cpu); 2615 max_freq = global.no_turbo || global.turbo_disabled ? 2616 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2617 } else { 2618 max_freq = intel_pstate_get_max_freq(cpu); 2619 } 2620 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq); 2621 2622 intel_pstate_adjust_policy_max(cpu, policy); 2623 } 2624 2625 static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) 2626 { 2627 intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy); 2628 2629 return 0; 2630 } 2631 2632 static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy) 2633 { 2634 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2635 2636 pr_debug("CPU %d going offline\n", cpu->cpu); 2637 2638 if (cpu->suspended) 2639 return 0; 2640 2641 /* 2642 * If the CPU is an SMT thread and it goes offline with the performance 2643 * settings different from the minimum, it will prevent its sibling 2644 * from getting to lower performance levels, so force the minimum 2645 * performance on CPU offline to prevent that from happening. 2646 */ 2647 if (hwp_active) 2648 intel_pstate_hwp_offline(cpu); 2649 else 2650 intel_pstate_set_min_pstate(cpu); 2651 2652 intel_pstate_exit_perf_limits(policy); 2653 2654 return 0; 2655 } 2656 2657 static int intel_pstate_cpu_online(struct cpufreq_policy *policy) 2658 { 2659 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2660 2661 pr_debug("CPU %d going online\n", cpu->cpu); 2662 2663 intel_pstate_init_acpi_perf_limits(policy); 2664 2665 if (hwp_active) { 2666 /* 2667 * Re-enable HWP and clear the "suspended" flag to let "resume" 2668 * know that it need not do that. 2669 */ 2670 intel_pstate_hwp_reenable(cpu); 2671 cpu->suspended = false; 2672 } 2673 2674 return 0; 2675 } 2676 2677 static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) 2678 { 2679 intel_pstate_clear_update_util_hook(policy->cpu); 2680 2681 return intel_cpufreq_cpu_offline(policy); 2682 } 2683 2684 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 2685 { 2686 pr_debug("CPU %d exiting\n", policy->cpu); 2687 2688 policy->fast_switch_possible = false; 2689 2690 return 0; 2691 } 2692 2693 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 2694 { 2695 struct cpudata *cpu; 2696 int rc; 2697 2698 rc = intel_pstate_init_cpu(policy->cpu); 2699 if (rc) 2700 return rc; 2701 2702 cpu = all_cpu_data[policy->cpu]; 2703 2704 cpu->max_perf_ratio = 0xFF; 2705 cpu->min_perf_ratio = 0; 2706 2707 /* cpuinfo and default policy values */ 2708 policy->cpuinfo.min_freq = cpu->pstate.min_freq; 2709 update_turbo_state(); 2710 global.turbo_disabled_mf = global.turbo_disabled; 2711 policy->cpuinfo.max_freq = global.turbo_disabled ? 2712 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2713 2714 policy->min = policy->cpuinfo.min_freq; 2715 policy->max = policy->cpuinfo.max_freq; 2716 2717 intel_pstate_init_acpi_perf_limits(policy); 2718 2719 policy->fast_switch_possible = true; 2720 2721 return 0; 2722 } 2723 2724 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 2725 { 2726 int ret = __intel_pstate_cpu_init(policy); 2727 2728 if (ret) 2729 return ret; 2730 2731 /* 2732 * Set the policy to powersave to provide a valid fallback value in case 2733 * the default cpufreq governor is neither powersave nor performance. 2734 */ 2735 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2736 2737 if (hwp_active) { 2738 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2739 2740 cpu->epp_cached = intel_pstate_get_epp(cpu, 0); 2741 } 2742 2743 return 0; 2744 } 2745 2746 static struct cpufreq_driver intel_pstate = { 2747 .flags = CPUFREQ_CONST_LOOPS, 2748 .verify = intel_pstate_verify_policy, 2749 .setpolicy = intel_pstate_set_policy, 2750 .suspend = intel_pstate_suspend, 2751 .resume = intel_pstate_resume, 2752 .init = intel_pstate_cpu_init, 2753 .exit = intel_pstate_cpu_exit, 2754 .offline = intel_pstate_cpu_offline, 2755 .online = intel_pstate_cpu_online, 2756 .update_limits = intel_pstate_update_limits, 2757 .name = "intel_pstate", 2758 }; 2759 2760 static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) 2761 { 2762 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2763 2764 intel_pstate_verify_cpu_policy(cpu, policy); 2765 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2766 2767 return 0; 2768 } 2769 2770 /* Use of trace in passive mode: 2771 * 2772 * In passive mode the trace core_busy field (also known as the 2773 * performance field, and lablelled as such on the graphs; also known as 2774 * core_avg_perf) is not needed and so is re-assigned to indicate if the 2775 * driver call was via the normal or fast switch path. Various graphs 2776 * output from the intel_pstate_tracer.py utility that include core_busy 2777 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%, 2778 * so we use 10 to indicate the normal path through the driver, and 2779 * 90 to indicate the fast switch path through the driver. 2780 * The scaled_busy field is not used, and is set to 0. 2781 */ 2782 2783 #define INTEL_PSTATE_TRACE_TARGET 10 2784 #define INTEL_PSTATE_TRACE_FAST_SWITCH 90 2785 2786 static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate) 2787 { 2788 struct sample *sample; 2789 2790 if (!trace_pstate_sample_enabled()) 2791 return; 2792 2793 if (!intel_pstate_sample(cpu, ktime_get())) 2794 return; 2795 2796 sample = &cpu->sample; 2797 trace_pstate_sample(trace_type, 2798 0, 2799 old_pstate, 2800 cpu->pstate.current_pstate, 2801 sample->mperf, 2802 sample->aperf, 2803 sample->tsc, 2804 get_avg_frequency(cpu), 2805 fp_toint(cpu->iowait_boost * 100)); 2806 } 2807 2808 static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, 2809 u32 desired, bool fast_switch) 2810 { 2811 u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; 2812 2813 value &= ~HWP_MIN_PERF(~0L); 2814 value |= HWP_MIN_PERF(min); 2815 2816 value &= ~HWP_MAX_PERF(~0L); 2817 value |= HWP_MAX_PERF(max); 2818 2819 value &= ~HWP_DESIRED_PERF(~0L); 2820 value |= HWP_DESIRED_PERF(desired); 2821 2822 if (value == prev) 2823 return; 2824 2825 WRITE_ONCE(cpu->hwp_req_cached, value); 2826 if (fast_switch) 2827 wrmsrl(MSR_HWP_REQUEST, value); 2828 else 2829 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 2830 } 2831 2832 static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, 2833 u32 target_pstate, bool fast_switch) 2834 { 2835 if (fast_switch) 2836 wrmsrl(MSR_IA32_PERF_CTL, 2837 pstate_funcs.get_val(cpu, target_pstate)); 2838 else 2839 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 2840 pstate_funcs.get_val(cpu, target_pstate)); 2841 } 2842 2843 static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, 2844 int target_pstate, bool fast_switch) 2845 { 2846 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2847 int old_pstate = cpu->pstate.current_pstate; 2848 2849 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2850 if (hwp_active) { 2851 int max_pstate = policy->strict_target ? 2852 target_pstate : cpu->max_perf_ratio; 2853 2854 intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0, 2855 fast_switch); 2856 } else if (target_pstate != old_pstate) { 2857 intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch); 2858 } 2859 2860 cpu->pstate.current_pstate = target_pstate; 2861 2862 intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : 2863 INTEL_PSTATE_TRACE_TARGET, old_pstate); 2864 2865 return target_pstate; 2866 } 2867 2868 static int intel_cpufreq_target(struct cpufreq_policy *policy, 2869 unsigned int target_freq, 2870 unsigned int relation) 2871 { 2872 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2873 struct cpufreq_freqs freqs; 2874 int target_pstate; 2875 2876 update_turbo_state(); 2877 2878 freqs.old = policy->cur; 2879 freqs.new = target_freq; 2880 2881 cpufreq_freq_transition_begin(policy, &freqs); 2882 2883 switch (relation) { 2884 case CPUFREQ_RELATION_L: 2885 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 2886 break; 2887 case CPUFREQ_RELATION_H: 2888 target_pstate = freqs.new / cpu->pstate.scaling; 2889 break; 2890 default: 2891 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 2892 break; 2893 } 2894 2895 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false); 2896 2897 freqs.new = target_pstate * cpu->pstate.scaling; 2898 2899 cpufreq_freq_transition_end(policy, &freqs, false); 2900 2901 return 0; 2902 } 2903 2904 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 2905 unsigned int target_freq) 2906 { 2907 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2908 int target_pstate; 2909 2910 update_turbo_state(); 2911 2912 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2913 2914 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); 2915 2916 return target_pstate * cpu->pstate.scaling; 2917 } 2918 2919 static void intel_cpufreq_adjust_perf(unsigned int cpunum, 2920 unsigned long min_perf, 2921 unsigned long target_perf, 2922 unsigned long capacity) 2923 { 2924 struct cpudata *cpu = all_cpu_data[cpunum]; 2925 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); 2926 int old_pstate = cpu->pstate.current_pstate; 2927 int cap_pstate, min_pstate, max_pstate, target_pstate; 2928 2929 update_turbo_state(); 2930 cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : 2931 HWP_HIGHEST_PERF(hwp_cap); 2932 2933 /* Optimization: Avoid unnecessary divisions. */ 2934 2935 target_pstate = cap_pstate; 2936 if (target_perf < capacity) 2937 target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity); 2938 2939 min_pstate = cap_pstate; 2940 if (min_perf < capacity) 2941 min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity); 2942 2943 if (min_pstate < cpu->pstate.min_pstate) 2944 min_pstate = cpu->pstate.min_pstate; 2945 2946 if (min_pstate < cpu->min_perf_ratio) 2947 min_pstate = cpu->min_perf_ratio; 2948 2949 max_pstate = min(cap_pstate, cpu->max_perf_ratio); 2950 if (max_pstate < min_pstate) 2951 max_pstate = min_pstate; 2952 2953 target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate); 2954 2955 intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true); 2956 2957 cpu->pstate.current_pstate = target_pstate; 2958 intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); 2959 } 2960 2961 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2962 { 2963 struct freq_qos_request *req; 2964 struct cpudata *cpu; 2965 struct device *dev; 2966 int ret, freq; 2967 2968 dev = get_cpu_device(policy->cpu); 2969 if (!dev) 2970 return -ENODEV; 2971 2972 ret = __intel_pstate_cpu_init(policy); 2973 if (ret) 2974 return ret; 2975 2976 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 2977 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2978 policy->cur = policy->cpuinfo.min_freq; 2979 2980 req = kcalloc(2, sizeof(*req), GFP_KERNEL); 2981 if (!req) { 2982 ret = -ENOMEM; 2983 goto pstate_exit; 2984 } 2985 2986 cpu = all_cpu_data[policy->cpu]; 2987 2988 if (hwp_active) { 2989 u64 value; 2990 2991 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; 2992 2993 intel_pstate_get_hwp_cap(cpu); 2994 2995 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); 2996 WRITE_ONCE(cpu->hwp_req_cached, value); 2997 2998 cpu->epp_cached = intel_pstate_get_epp(cpu, value); 2999 } else { 3000 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; 3001 } 3002 3003 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100); 3004 3005 ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, 3006 freq); 3007 if (ret < 0) { 3008 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); 3009 goto free_req; 3010 } 3011 3012 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100); 3013 3014 ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, 3015 freq); 3016 if (ret < 0) { 3017 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); 3018 goto remove_min_req; 3019 } 3020 3021 policy->driver_data = req; 3022 3023 return 0; 3024 3025 remove_min_req: 3026 freq_qos_remove_request(req); 3027 free_req: 3028 kfree(req); 3029 pstate_exit: 3030 intel_pstate_exit_perf_limits(policy); 3031 3032 return ret; 3033 } 3034 3035 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) 3036 { 3037 struct freq_qos_request *req; 3038 3039 req = policy->driver_data; 3040 3041 freq_qos_remove_request(req + 1); 3042 freq_qos_remove_request(req); 3043 kfree(req); 3044 3045 return intel_pstate_cpu_exit(policy); 3046 } 3047 3048 static int intel_cpufreq_suspend(struct cpufreq_policy *policy) 3049 { 3050 intel_pstate_suspend(policy); 3051 3052 if (hwp_active) { 3053 struct cpudata *cpu = all_cpu_data[policy->cpu]; 3054 u64 value = READ_ONCE(cpu->hwp_req_cached); 3055 3056 /* 3057 * Clear the desired perf field in MSR_HWP_REQUEST in case 3058 * intel_cpufreq_adjust_perf() is in use and the last value 3059 * written by it may not be suitable. 3060 */ 3061 value &= ~HWP_DESIRED_PERF(~0L); 3062 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 3063 WRITE_ONCE(cpu->hwp_req_cached, value); 3064 } 3065 3066 return 0; 3067 } 3068 3069 static struct cpufreq_driver intel_cpufreq = { 3070 .flags = CPUFREQ_CONST_LOOPS, 3071 .verify = intel_cpufreq_verify_policy, 3072 .target = intel_cpufreq_target, 3073 .fast_switch = intel_cpufreq_fast_switch, 3074 .init = intel_cpufreq_cpu_init, 3075 .exit = intel_cpufreq_cpu_exit, 3076 .offline = intel_cpufreq_cpu_offline, 3077 .online = intel_pstate_cpu_online, 3078 .suspend = intel_cpufreq_suspend, 3079 .resume = intel_pstate_resume, 3080 .update_limits = intel_pstate_update_limits, 3081 .name = "intel_cpufreq", 3082 }; 3083 3084 static struct cpufreq_driver *default_driver; 3085 3086 static void intel_pstate_driver_cleanup(void) 3087 { 3088 unsigned int cpu; 3089 3090 cpus_read_lock(); 3091 for_each_online_cpu(cpu) { 3092 if (all_cpu_data[cpu]) { 3093 if (intel_pstate_driver == &intel_pstate) 3094 intel_pstate_clear_update_util_hook(cpu); 3095 3096 spin_lock(&hwp_notify_lock); 3097 kfree(all_cpu_data[cpu]); 3098 WRITE_ONCE(all_cpu_data[cpu], NULL); 3099 spin_unlock(&hwp_notify_lock); 3100 } 3101 } 3102 cpus_read_unlock(); 3103 3104 intel_pstate_driver = NULL; 3105 } 3106 3107 static int intel_pstate_register_driver(struct cpufreq_driver *driver) 3108 { 3109 int ret; 3110 3111 if (driver == &intel_pstate) 3112 intel_pstate_sysfs_expose_hwp_dynamic_boost(); 3113 3114 memset(&global, 0, sizeof(global)); 3115 global.max_perf_pct = 100; 3116 3117 intel_pstate_driver = driver; 3118 ret = cpufreq_register_driver(intel_pstate_driver); 3119 if (ret) { 3120 intel_pstate_driver_cleanup(); 3121 return ret; 3122 } 3123 3124 global.min_perf_pct = min_perf_pct_min(); 3125 3126 return 0; 3127 } 3128 3129 static ssize_t intel_pstate_show_status(char *buf) 3130 { 3131 if (!intel_pstate_driver) 3132 return sprintf(buf, "off\n"); 3133 3134 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 3135 "active" : "passive"); 3136 } 3137 3138 static int intel_pstate_update_status(const char *buf, size_t size) 3139 { 3140 if (size == 3 && !strncmp(buf, "off", size)) { 3141 if (!intel_pstate_driver) 3142 return -EINVAL; 3143 3144 if (hwp_active) 3145 return -EBUSY; 3146 3147 cpufreq_unregister_driver(intel_pstate_driver); 3148 intel_pstate_driver_cleanup(); 3149 return 0; 3150 } 3151 3152 if (size == 6 && !strncmp(buf, "active", size)) { 3153 if (intel_pstate_driver) { 3154 if (intel_pstate_driver == &intel_pstate) 3155 return 0; 3156 3157 cpufreq_unregister_driver(intel_pstate_driver); 3158 } 3159 3160 return intel_pstate_register_driver(&intel_pstate); 3161 } 3162 3163 if (size == 7 && !strncmp(buf, "passive", size)) { 3164 if (intel_pstate_driver) { 3165 if (intel_pstate_driver == &intel_cpufreq) 3166 return 0; 3167 3168 cpufreq_unregister_driver(intel_pstate_driver); 3169 intel_pstate_sysfs_hide_hwp_dynamic_boost(); 3170 } 3171 3172 return intel_pstate_register_driver(&intel_cpufreq); 3173 } 3174 3175 return -EINVAL; 3176 } 3177 3178 static int no_load __initdata; 3179 static int no_hwp __initdata; 3180 static int hwp_only __initdata; 3181 static unsigned int force_load __initdata; 3182 3183 static int __init intel_pstate_msrs_not_valid(void) 3184 { 3185 if (!pstate_funcs.get_max() || 3186 !pstate_funcs.get_min() || 3187 !pstate_funcs.get_turbo()) 3188 return -ENODEV; 3189 3190 return 0; 3191 } 3192 3193 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 3194 { 3195 pstate_funcs.get_max = funcs->get_max; 3196 pstate_funcs.get_max_physical = funcs->get_max_physical; 3197 pstate_funcs.get_min = funcs->get_min; 3198 pstate_funcs.get_turbo = funcs->get_turbo; 3199 pstate_funcs.get_scaling = funcs->get_scaling; 3200 pstate_funcs.get_val = funcs->get_val; 3201 pstate_funcs.get_vid = funcs->get_vid; 3202 pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; 3203 } 3204 3205 #ifdef CONFIG_ACPI 3206 3207 static bool __init intel_pstate_no_acpi_pss(void) 3208 { 3209 int i; 3210 3211 for_each_possible_cpu(i) { 3212 acpi_status status; 3213 union acpi_object *pss; 3214 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 3215 struct acpi_processor *pr = per_cpu(processors, i); 3216 3217 if (!pr) 3218 continue; 3219 3220 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 3221 if (ACPI_FAILURE(status)) 3222 continue; 3223 3224 pss = buffer.pointer; 3225 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 3226 kfree(pss); 3227 return false; 3228 } 3229 3230 kfree(pss); 3231 } 3232 3233 pr_debug("ACPI _PSS not found\n"); 3234 return true; 3235 } 3236 3237 static bool __init intel_pstate_no_acpi_pcch(void) 3238 { 3239 acpi_status status; 3240 acpi_handle handle; 3241 3242 status = acpi_get_handle(NULL, "\\_SB", &handle); 3243 if (ACPI_FAILURE(status)) 3244 goto not_found; 3245 3246 if (acpi_has_method(handle, "PCCH")) 3247 return false; 3248 3249 not_found: 3250 pr_debug("ACPI PCCH not found\n"); 3251 return true; 3252 } 3253 3254 static bool __init intel_pstate_has_acpi_ppc(void) 3255 { 3256 int i; 3257 3258 for_each_possible_cpu(i) { 3259 struct acpi_processor *pr = per_cpu(processors, i); 3260 3261 if (!pr) 3262 continue; 3263 if (acpi_has_method(pr->handle, "_PPC")) 3264 return true; 3265 } 3266 pr_debug("ACPI _PPC not found\n"); 3267 return false; 3268 } 3269 3270 enum { 3271 PSS, 3272 PPC, 3273 }; 3274 3275 /* Hardware vendor-specific info that has its own power management modes */ 3276 static struct acpi_platform_list plat_info[] __initdata = { 3277 {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS}, 3278 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3279 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3280 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3281 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3282 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3283 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3284 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3285 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3286 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3287 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3288 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3289 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3290 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3291 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3292 { } /* End */ 3293 }; 3294 3295 #define BITMASK_OOB (BIT(8) | BIT(18)) 3296 3297 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 3298 { 3299 const struct x86_cpu_id *id; 3300 u64 misc_pwr; 3301 int idx; 3302 3303 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 3304 if (id) { 3305 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 3306 if (misc_pwr & BITMASK_OOB) { 3307 pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); 3308 pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); 3309 return true; 3310 } 3311 } 3312 3313 idx = acpi_match_platform_list(plat_info); 3314 if (idx < 0) 3315 return false; 3316 3317 switch (plat_info[idx].data) { 3318 case PSS: 3319 if (!intel_pstate_no_acpi_pss()) 3320 return false; 3321 3322 return intel_pstate_no_acpi_pcch(); 3323 case PPC: 3324 return intel_pstate_has_acpi_ppc() && !force_load; 3325 } 3326 3327 return false; 3328 } 3329 3330 static void intel_pstate_request_control_from_smm(void) 3331 { 3332 /* 3333 * It may be unsafe to request P-states control from SMM if _PPC support 3334 * has not been enabled. 3335 */ 3336 if (acpi_ppc) 3337 acpi_processor_pstate_control(); 3338 } 3339 #else /* CONFIG_ACPI not enabled */ 3340 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 3341 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 3342 static inline void intel_pstate_request_control_from_smm(void) {} 3343 #endif /* CONFIG_ACPI */ 3344 3345 #define INTEL_PSTATE_HWP_BROADWELL 0x01 3346 3347 #define X86_MATCH_HWP(model, hwp_mode) \ 3348 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 3349 X86_FEATURE_HWP, hwp_mode) 3350 3351 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 3352 X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), 3353 X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), 3354 X86_MATCH_HWP(ANY, 0), 3355 {} 3356 }; 3357 3358 static bool intel_pstate_hwp_is_enabled(void) 3359 { 3360 u64 value; 3361 3362 rdmsrl(MSR_PM_ENABLE, value); 3363 return !!(value & 0x1); 3364 } 3365 3366 static const struct x86_cpu_id intel_epp_balance_perf[] = { 3367 /* 3368 * Set EPP value as 102, this is the max suggested EPP 3369 * which can result in one core turbo frequency for 3370 * AlderLake Mobile CPUs. 3371 */ 3372 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102), 3373 {} 3374 }; 3375 3376 static int __init intel_pstate_init(void) 3377 { 3378 static struct cpudata **_all_cpu_data; 3379 const struct x86_cpu_id *id; 3380 int rc; 3381 3382 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 3383 return -ENODEV; 3384 3385 id = x86_match_cpu(hwp_support_ids); 3386 if (id) { 3387 bool hwp_forced = intel_pstate_hwp_is_enabled(); 3388 3389 if (hwp_forced) 3390 pr_info("HWP enabled by BIOS\n"); 3391 else if (no_load) 3392 return -ENODEV; 3393 3394 copy_cpu_funcs(&core_funcs); 3395 /* 3396 * Avoid enabling HWP for processors without EPP support, 3397 * because that means incomplete HWP implementation which is a 3398 * corner case and supporting it is generally problematic. 3399 * 3400 * If HWP is enabled already, though, there is no choice but to 3401 * deal with it. 3402 */ 3403 if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { 3404 WRITE_ONCE(hwp_active, 1); 3405 hwp_mode_bdw = id->driver_data; 3406 intel_pstate.attr = hwp_cpufreq_attrs; 3407 intel_cpufreq.attr = hwp_cpufreq_attrs; 3408 intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS; 3409 intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf; 3410 if (!default_driver) 3411 default_driver = &intel_pstate; 3412 3413 if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) 3414 intel_pstate_cppc_set_cpu_scaling(); 3415 3416 goto hwp_cpu_matched; 3417 } 3418 pr_info("HWP not enabled\n"); 3419 } else { 3420 if (no_load) 3421 return -ENODEV; 3422 3423 id = x86_match_cpu(intel_pstate_cpu_ids); 3424 if (!id) { 3425 pr_info("CPU model not supported\n"); 3426 return -ENODEV; 3427 } 3428 3429 copy_cpu_funcs((struct pstate_funcs *)id->driver_data); 3430 } 3431 3432 if (intel_pstate_msrs_not_valid()) { 3433 pr_info("Invalid MSRs\n"); 3434 return -ENODEV; 3435 } 3436 /* Without HWP start in the passive mode. */ 3437 if (!default_driver) 3438 default_driver = &intel_cpufreq; 3439 3440 hwp_cpu_matched: 3441 /* 3442 * The Intel pstate driver will be ignored if the platform 3443 * firmware has its own power management modes. 3444 */ 3445 if (intel_pstate_platform_pwr_mgmt_exists()) { 3446 pr_info("P-states controlled by the platform\n"); 3447 return -ENODEV; 3448 } 3449 3450 if (!hwp_active && hwp_only) 3451 return -ENOTSUPP; 3452 3453 pr_info("Intel P-state driver initializing\n"); 3454 3455 _all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); 3456 if (!_all_cpu_data) 3457 return -ENOMEM; 3458 3459 WRITE_ONCE(all_cpu_data, _all_cpu_data); 3460 3461 intel_pstate_request_control_from_smm(); 3462 3463 intel_pstate_sysfs_expose_params(); 3464 3465 if (hwp_active) { 3466 const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf); 3467 3468 if (id) 3469 epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data; 3470 } 3471 3472 mutex_lock(&intel_pstate_driver_lock); 3473 rc = intel_pstate_register_driver(default_driver); 3474 mutex_unlock(&intel_pstate_driver_lock); 3475 if (rc) { 3476 intel_pstate_sysfs_remove(); 3477 return rc; 3478 } 3479 3480 if (hwp_active) { 3481 const struct x86_cpu_id *id; 3482 3483 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); 3484 if (id) { 3485 set_power_ctl_ee_state(false); 3486 pr_info("Disabling energy efficiency optimization\n"); 3487 } 3488 3489 pr_info("HWP enabled\n"); 3490 } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 3491 pr_warn("Problematic setup: Hybrid processor with disabled HWP\n"); 3492 } 3493 3494 return 0; 3495 } 3496 device_initcall(intel_pstate_init); 3497 3498 static int __init intel_pstate_setup(char *str) 3499 { 3500 if (!str) 3501 return -EINVAL; 3502 3503 if (!strcmp(str, "disable")) 3504 no_load = 1; 3505 else if (!strcmp(str, "active")) 3506 default_driver = &intel_pstate; 3507 else if (!strcmp(str, "passive")) 3508 default_driver = &intel_cpufreq; 3509 3510 if (!strcmp(str, "no_hwp")) 3511 no_hwp = 1; 3512 3513 if (!strcmp(str, "force")) 3514 force_load = 1; 3515 if (!strcmp(str, "hwp_only")) 3516 hwp_only = 1; 3517 if (!strcmp(str, "per_cpu_perf_limits")) 3518 per_cpu_limits = true; 3519 3520 #ifdef CONFIG_ACPI 3521 if (!strcmp(str, "support_acpi_ppc")) 3522 acpi_ppc = true; 3523 #endif 3524 3525 return 0; 3526 } 3527 early_param("intel_pstate", intel_pstate_setup); 3528 3529 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 3530 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 3531 MODULE_LICENSE("GPL"); 3532