1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pstate.c: Native P state management for Intel processors 4 * 5 * (C) Copyright 2012 Intel Corporation 6 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/module.h> 14 #include <linux/ktime.h> 15 #include <linux/hrtimer.h> 16 #include <linux/tick.h> 17 #include <linux/slab.h> 18 #include <linux/sched/cpufreq.h> 19 #include <linux/list.h> 20 #include <linux/cpu.h> 21 #include <linux/cpufreq.h> 22 #include <linux/sysfs.h> 23 #include <linux/types.h> 24 #include <linux/fs.h> 25 #include <linux/acpi.h> 26 #include <linux/vmalloc.h> 27 #include <linux/pm_qos.h> 28 #include <trace/events/power.h> 29 30 #include <asm/div64.h> 31 #include <asm/msr.h> 32 #include <asm/cpu_device_id.h> 33 #include <asm/cpufeature.h> 34 #include <asm/intel-family.h> 35 36 #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) 37 38 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 39 #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 40 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 41 42 #ifdef CONFIG_ACPI 43 #include <acpi/processor.h> 44 #include <acpi/cppc_acpi.h> 45 #endif 46 47 #define FRAC_BITS 8 48 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 49 #define fp_toint(X) ((X) >> FRAC_BITS) 50 51 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) 52 53 #define EXT_BITS 6 54 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 55 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 56 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 57 58 static inline int32_t mul_fp(int32_t x, int32_t y) 59 { 60 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 61 } 62 63 static inline int32_t div_fp(s64 x, s64 y) 64 { 65 return div64_s64((int64_t)x << FRAC_BITS, y); 66 } 67 68 static inline int ceiling_fp(int32_t x) 69 { 70 int mask, ret; 71 72 ret = fp_toint(x); 73 mask = (1 << FRAC_BITS) - 1; 74 if (x & mask) 75 ret += 1; 76 return ret; 77 } 78 79 static inline u64 mul_ext_fp(u64 x, u64 y) 80 { 81 return (x * y) >> EXT_FRAC_BITS; 82 } 83 84 static inline u64 div_ext_fp(u64 x, u64 y) 85 { 86 return div64_u64(x << EXT_FRAC_BITS, y); 87 } 88 89 /** 90 * struct sample - Store performance sample 91 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 92 * performance during last sample period 93 * @busy_scaled: Scaled busy value which is used to calculate next 94 * P state. This can be different than core_avg_perf 95 * to account for cpu idle period 96 * @aperf: Difference of actual performance frequency clock count 97 * read from APERF MSR between last and current sample 98 * @mperf: Difference of maximum performance frequency clock count 99 * read from MPERF MSR between last and current sample 100 * @tsc: Difference of time stamp counter between last and 101 * current sample 102 * @time: Current time from scheduler 103 * 104 * This structure is used in the cpudata structure to store performance sample 105 * data for choosing next P State. 106 */ 107 struct sample { 108 int32_t core_avg_perf; 109 int32_t busy_scaled; 110 u64 aperf; 111 u64 mperf; 112 u64 tsc; 113 u64 time; 114 }; 115 116 /** 117 * struct pstate_data - Store P state data 118 * @current_pstate: Current requested P state 119 * @min_pstate: Min P state possible for this platform 120 * @max_pstate: Max P state possible for this platform 121 * @max_pstate_physical:This is physical Max P state for a processor 122 * This can be higher than the max_pstate which can 123 * be limited by platform thermal design power limits 124 * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor 125 * @scaling: Scaling factor between performance and frequency 126 * @turbo_pstate: Max Turbo P state possible for this platform 127 * @min_freq: @min_pstate frequency in cpufreq units 128 * @max_freq: @max_pstate frequency in cpufreq units 129 * @turbo_freq: @turbo_pstate frequency in cpufreq units 130 * 131 * Stores the per cpu model P state limits and current P state. 132 */ 133 struct pstate_data { 134 int current_pstate; 135 int min_pstate; 136 int max_pstate; 137 int max_pstate_physical; 138 int perf_ctl_scaling; 139 int scaling; 140 int turbo_pstate; 141 unsigned int min_freq; 142 unsigned int max_freq; 143 unsigned int turbo_freq; 144 }; 145 146 /** 147 * struct vid_data - Stores voltage information data 148 * @min: VID data for this platform corresponding to 149 * the lowest P state 150 * @max: VID data corresponding to the highest P State. 151 * @turbo: VID data for turbo P state 152 * @ratio: Ratio of (vid max - vid min) / 153 * (max P state - Min P State) 154 * 155 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 156 * This data is used in Atom platforms, where in addition to target P state, 157 * the voltage data needs to be specified to select next P State. 158 */ 159 struct vid_data { 160 int min; 161 int max; 162 int turbo; 163 int32_t ratio; 164 }; 165 166 /** 167 * struct global_params - Global parameters, mostly tunable via sysfs. 168 * @no_turbo: Whether or not to use turbo P-states. 169 * @turbo_disabled: Whether or not turbo P-states are available at all, 170 * based on the MSR_IA32_MISC_ENABLE value and whether or 171 * not the maximum reported turbo P-state is different from 172 * the maximum reported non-turbo one. 173 * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. 174 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo 175 * P-state capacity. 176 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo 177 * P-state capacity. 178 */ 179 struct global_params { 180 bool no_turbo; 181 bool turbo_disabled; 182 bool turbo_disabled_mf; 183 int max_perf_pct; 184 int min_perf_pct; 185 }; 186 187 /** 188 * struct cpudata - Per CPU instance data storage 189 * @cpu: CPU number for this instance data 190 * @policy: CPUFreq policy value 191 * @update_util: CPUFreq utility callback information 192 * @update_util_set: CPUFreq utility callback is set 193 * @iowait_boost: iowait-related boost fraction 194 * @last_update: Time of the last update. 195 * @pstate: Stores P state limits for this CPU 196 * @vid: Stores VID limits for this CPU 197 * @last_sample_time: Last Sample time 198 * @aperf_mperf_shift: APERF vs MPERF counting frequency difference 199 * @prev_aperf: Last APERF value read from APERF MSR 200 * @prev_mperf: Last MPERF value read from MPERF MSR 201 * @prev_tsc: Last timestamp counter (TSC) value 202 * @prev_cummulative_iowait: IO Wait time difference from last and 203 * current sample 204 * @sample: Storage for storing last Sample data 205 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios 206 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios 207 * @acpi_perf_data: Stores ACPI perf information read from _PSS 208 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 209 * @epp_powersave: Last saved HWP energy performance preference 210 * (EPP) or energy performance bias (EPB), 211 * when policy switched to performance 212 * @epp_policy: Last saved policy used to set EPP/EPB 213 * @epp_default: Power on default HWP energy performance 214 * preference/bias 215 * @epp_cached Cached HWP energy-performance preference value 216 * @hwp_req_cached: Cached value of the last HWP Request MSR 217 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR 218 * @last_io_update: Last time when IO wake flag was set 219 * @sched_flags: Store scheduler flags for possible cross CPU update 220 * @hwp_boost_min: Last HWP boosted min performance 221 * @suspended: Whether or not the driver has been suspended. 222 * 223 * This structure stores per CPU instance data for all CPUs. 224 */ 225 struct cpudata { 226 int cpu; 227 228 unsigned int policy; 229 struct update_util_data update_util; 230 bool update_util_set; 231 232 struct pstate_data pstate; 233 struct vid_data vid; 234 235 u64 last_update; 236 u64 last_sample_time; 237 u64 aperf_mperf_shift; 238 u64 prev_aperf; 239 u64 prev_mperf; 240 u64 prev_tsc; 241 u64 prev_cummulative_iowait; 242 struct sample sample; 243 int32_t min_perf_ratio; 244 int32_t max_perf_ratio; 245 #ifdef CONFIG_ACPI 246 struct acpi_processor_performance acpi_perf_data; 247 bool valid_pss_table; 248 #endif 249 unsigned int iowait_boost; 250 s16 epp_powersave; 251 s16 epp_policy; 252 s16 epp_default; 253 s16 epp_cached; 254 u64 hwp_req_cached; 255 u64 hwp_cap_cached; 256 u64 last_io_update; 257 unsigned int sched_flags; 258 u32 hwp_boost_min; 259 bool suspended; 260 }; 261 262 static struct cpudata **all_cpu_data; 263 264 /** 265 * struct pstate_funcs - Per CPU model specific callbacks 266 * @get_max: Callback to get maximum non turbo effective P state 267 * @get_max_physical: Callback to get maximum non turbo physical P state 268 * @get_min: Callback to get minimum P state 269 * @get_turbo: Callback to get turbo P state 270 * @get_scaling: Callback to get frequency scaling factor 271 * @get_cpu_scaling: Get frequency scaling factor for a given cpu 272 * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference 273 * @get_val: Callback to convert P state to actual MSR write value 274 * @get_vid: Callback to get VID data for Atom platforms 275 * 276 * Core and Atom CPU models have different way to get P State limits. This 277 * structure is used to store those callbacks. 278 */ 279 struct pstate_funcs { 280 int (*get_max)(int cpu); 281 int (*get_max_physical)(int cpu); 282 int (*get_min)(int cpu); 283 int (*get_turbo)(int cpu); 284 int (*get_scaling)(void); 285 int (*get_cpu_scaling)(int cpu); 286 int (*get_aperf_mperf_shift)(void); 287 u64 (*get_val)(struct cpudata*, int pstate); 288 void (*get_vid)(struct cpudata *); 289 }; 290 291 static struct pstate_funcs pstate_funcs __read_mostly; 292 293 static int hwp_active __read_mostly; 294 static int hwp_mode_bdw __read_mostly; 295 static bool per_cpu_limits __read_mostly; 296 static bool hwp_boost __read_mostly; 297 298 static struct cpufreq_driver *intel_pstate_driver __read_mostly; 299 300 #ifdef CONFIG_ACPI 301 static bool acpi_ppc; 302 #endif 303 304 static struct global_params global; 305 306 static DEFINE_MUTEX(intel_pstate_driver_lock); 307 static DEFINE_MUTEX(intel_pstate_limits_lock); 308 309 #ifdef CONFIG_ACPI 310 311 static bool intel_pstate_acpi_pm_profile_server(void) 312 { 313 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 314 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 315 return true; 316 317 return false; 318 } 319 320 static bool intel_pstate_get_ppc_enable_status(void) 321 { 322 if (intel_pstate_acpi_pm_profile_server()) 323 return true; 324 325 return acpi_ppc; 326 } 327 328 #ifdef CONFIG_ACPI_CPPC_LIB 329 330 /* The work item is needed to avoid CPU hotplug locking issues */ 331 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) 332 { 333 sched_set_itmt_support(); 334 } 335 336 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); 337 338 #define CPPC_MAX_PERF U8_MAX 339 340 static void intel_pstate_set_itmt_prio(int cpu) 341 { 342 struct cppc_perf_caps cppc_perf; 343 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; 344 int ret; 345 346 ret = cppc_get_perf_caps(cpu, &cppc_perf); 347 if (ret) 348 return; 349 350 /* 351 * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. 352 * In this case we can't use CPPC.highest_perf to enable ITMT. 353 * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. 354 */ 355 if (cppc_perf.highest_perf == CPPC_MAX_PERF) 356 cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); 357 358 /* 359 * The priorities can be set regardless of whether or not 360 * sched_set_itmt_support(true) has been called and it is valid to 361 * update them at any time after it has been called. 362 */ 363 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); 364 365 if (max_highest_perf <= min_highest_perf) { 366 if (cppc_perf.highest_perf > max_highest_perf) 367 max_highest_perf = cppc_perf.highest_perf; 368 369 if (cppc_perf.highest_perf < min_highest_perf) 370 min_highest_perf = cppc_perf.highest_perf; 371 372 if (max_highest_perf > min_highest_perf) { 373 /* 374 * This code can be run during CPU online under the 375 * CPU hotplug locks, so sched_set_itmt_support() 376 * cannot be called from here. Queue up a work item 377 * to invoke it. 378 */ 379 schedule_work(&sched_itmt_work); 380 } 381 } 382 } 383 384 static int intel_pstate_get_cppc_guaranteed(int cpu) 385 { 386 struct cppc_perf_caps cppc_perf; 387 int ret; 388 389 ret = cppc_get_perf_caps(cpu, &cppc_perf); 390 if (ret) 391 return ret; 392 393 if (cppc_perf.guaranteed_perf) 394 return cppc_perf.guaranteed_perf; 395 396 return cppc_perf.nominal_perf; 397 } 398 399 static u32 intel_pstate_cppc_nominal(int cpu) 400 { 401 u64 nominal_perf; 402 403 if (cppc_get_nominal_perf(cpu, &nominal_perf)) 404 return 0; 405 406 return nominal_perf; 407 } 408 #else /* CONFIG_ACPI_CPPC_LIB */ 409 static inline void intel_pstate_set_itmt_prio(int cpu) 410 { 411 } 412 #endif /* CONFIG_ACPI_CPPC_LIB */ 413 414 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 415 { 416 struct cpudata *cpu; 417 int ret; 418 int i; 419 420 if (hwp_active) { 421 intel_pstate_set_itmt_prio(policy->cpu); 422 return; 423 } 424 425 if (!intel_pstate_get_ppc_enable_status()) 426 return; 427 428 cpu = all_cpu_data[policy->cpu]; 429 430 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 431 policy->cpu); 432 if (ret) 433 return; 434 435 /* 436 * Check if the control value in _PSS is for PERF_CTL MSR, which should 437 * guarantee that the states returned by it map to the states in our 438 * list directly. 439 */ 440 if (cpu->acpi_perf_data.control_register.space_id != 441 ACPI_ADR_SPACE_FIXED_HARDWARE) 442 goto err; 443 444 /* 445 * If there is only one entry _PSS, simply ignore _PSS and continue as 446 * usual without taking _PSS into account 447 */ 448 if (cpu->acpi_perf_data.state_count < 2) 449 goto err; 450 451 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 452 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 453 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 454 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 455 (u32) cpu->acpi_perf_data.states[i].core_frequency, 456 (u32) cpu->acpi_perf_data.states[i].power, 457 (u32) cpu->acpi_perf_data.states[i].control); 458 } 459 460 /* 461 * The _PSS table doesn't contain whole turbo frequency range. 462 * This just contains +1 MHZ above the max non turbo frequency, 463 * with control value corresponding to max turbo ratio. But 464 * when cpufreq set policy is called, it will call with this 465 * max frequency, which will cause a reduced performance as 466 * this driver uses real max turbo frequency as the max 467 * frequency. So correct this frequency in _PSS table to 468 * correct max turbo frequency based on the turbo state. 469 * Also need to convert to MHz as _PSS freq is in MHz. 470 */ 471 if (!global.turbo_disabled) 472 cpu->acpi_perf_data.states[0].core_frequency = 473 policy->cpuinfo.max_freq / 1000; 474 cpu->valid_pss_table = true; 475 pr_debug("_PPC limits will be enforced\n"); 476 477 return; 478 479 err: 480 cpu->valid_pss_table = false; 481 acpi_processor_unregister_performance(policy->cpu); 482 } 483 484 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 485 { 486 struct cpudata *cpu; 487 488 cpu = all_cpu_data[policy->cpu]; 489 if (!cpu->valid_pss_table) 490 return; 491 492 acpi_processor_unregister_performance(policy->cpu); 493 } 494 #else /* CONFIG_ACPI */ 495 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 496 { 497 } 498 499 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 500 { 501 } 502 503 static inline bool intel_pstate_acpi_pm_profile_server(void) 504 { 505 return false; 506 } 507 #endif /* CONFIG_ACPI */ 508 509 #ifndef CONFIG_ACPI_CPPC_LIB 510 static inline int intel_pstate_get_cppc_guaranteed(int cpu) 511 { 512 return -ENOTSUPP; 513 } 514 #endif /* CONFIG_ACPI_CPPC_LIB */ 515 516 /** 517 * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels. 518 * @cpu: Target CPU. 519 * 520 * On hybrid processors, HWP may expose more performance levels than there are 521 * P-states accessible through the PERF_CTL interface. If that happens, the 522 * scaling factor between HWP performance levels and CPU frequency will be less 523 * than the scaling factor between P-state values and CPU frequency. 524 * 525 * In that case, adjust the CPU parameters used in computations accordingly. 526 */ 527 static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) 528 { 529 int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; 530 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 531 int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu); 532 int turbo_freq = perf_ctl_turbo * perf_ctl_scaling; 533 int scaling = cpu->pstate.scaling; 534 535 pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); 536 pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max(cpu->cpu)); 537 pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); 538 pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); 539 pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); 540 pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); 541 pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); 542 543 /* 544 * If the product of the HWP performance scaling factor and the HWP_CAP 545 * highest performance is greater than the maximum turbo frequency 546 * corresponding to the pstate_funcs.get_turbo() return value, the 547 * scaling factor is too high, so recompute it to make the HWP_CAP 548 * highest performance correspond to the maximum turbo frequency. 549 */ 550 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; 551 if (turbo_freq < cpu->pstate.turbo_freq) { 552 cpu->pstate.turbo_freq = turbo_freq; 553 scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate); 554 cpu->pstate.scaling = scaling; 555 556 pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n", 557 cpu->cpu, scaling); 558 } 559 560 cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, 561 perf_ctl_scaling); 562 563 cpu->pstate.max_pstate_physical = 564 DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling, 565 scaling); 566 567 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; 568 /* 569 * Cast the min P-state value retrieved via pstate_funcs.get_min() to 570 * the effective range of HWP performance levels. 571 */ 572 cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling); 573 } 574 575 static inline void update_turbo_state(void) 576 { 577 u64 misc_en; 578 struct cpudata *cpu; 579 580 cpu = all_cpu_data[0]; 581 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 582 global.turbo_disabled = 583 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 584 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 585 } 586 587 static int min_perf_pct_min(void) 588 { 589 struct cpudata *cpu = all_cpu_data[0]; 590 int turbo_pstate = cpu->pstate.turbo_pstate; 591 592 return turbo_pstate ? 593 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; 594 } 595 596 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 597 { 598 u64 epb; 599 int ret; 600 601 if (!boot_cpu_has(X86_FEATURE_EPB)) 602 return -ENXIO; 603 604 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 605 if (ret) 606 return (s16)ret; 607 608 return (s16)(epb & 0x0f); 609 } 610 611 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 612 { 613 s16 epp; 614 615 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 616 /* 617 * When hwp_req_data is 0, means that caller didn't read 618 * MSR_HWP_REQUEST, so need to read and get EPP. 619 */ 620 if (!hwp_req_data) { 621 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, 622 &hwp_req_data); 623 if (epp) 624 return epp; 625 } 626 epp = (hwp_req_data >> 24) & 0xff; 627 } else { 628 /* When there is no EPP present, HWP uses EPB settings */ 629 epp = intel_pstate_get_epb(cpu_data); 630 } 631 632 return epp; 633 } 634 635 static int intel_pstate_set_epb(int cpu, s16 pref) 636 { 637 u64 epb; 638 int ret; 639 640 if (!boot_cpu_has(X86_FEATURE_EPB)) 641 return -ENXIO; 642 643 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 644 if (ret) 645 return ret; 646 647 epb = (epb & ~0x0f) | pref; 648 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 649 650 return 0; 651 } 652 653 /* 654 * EPP/EPB display strings corresponding to EPP index in the 655 * energy_perf_strings[] 656 * index String 657 *------------------------------------- 658 * 0 default 659 * 1 performance 660 * 2 balance_performance 661 * 3 balance_power 662 * 4 power 663 */ 664 static const char * const energy_perf_strings[] = { 665 "default", 666 "performance", 667 "balance_performance", 668 "balance_power", 669 "power", 670 NULL 671 }; 672 static const unsigned int epp_values[] = { 673 HWP_EPP_PERFORMANCE, 674 HWP_EPP_BALANCE_PERFORMANCE, 675 HWP_EPP_BALANCE_POWERSAVE, 676 HWP_EPP_POWERSAVE 677 }; 678 679 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) 680 { 681 s16 epp; 682 int index = -EINVAL; 683 684 *raw_epp = 0; 685 epp = intel_pstate_get_epp(cpu_data, 0); 686 if (epp < 0) 687 return epp; 688 689 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 690 if (epp == HWP_EPP_PERFORMANCE) 691 return 1; 692 if (epp == HWP_EPP_BALANCE_PERFORMANCE) 693 return 2; 694 if (epp == HWP_EPP_BALANCE_POWERSAVE) 695 return 3; 696 if (epp == HWP_EPP_POWERSAVE) 697 return 4; 698 *raw_epp = epp; 699 return 0; 700 } else if (boot_cpu_has(X86_FEATURE_EPB)) { 701 /* 702 * Range: 703 * 0x00-0x03 : Performance 704 * 0x04-0x07 : Balance performance 705 * 0x08-0x0B : Balance power 706 * 0x0C-0x0F : Power 707 * The EPB is a 4 bit value, but our ranges restrict the 708 * value which can be set. Here only using top two bits 709 * effectively. 710 */ 711 index = (epp >> 2) + 1; 712 } 713 714 return index; 715 } 716 717 static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) 718 { 719 int ret; 720 721 /* 722 * Use the cached HWP Request MSR value, because in the active mode the 723 * register itself may be updated by intel_pstate_hwp_boost_up() or 724 * intel_pstate_hwp_boost_down() at any time. 725 */ 726 u64 value = READ_ONCE(cpu->hwp_req_cached); 727 728 value &= ~GENMASK_ULL(31, 24); 729 value |= (u64)epp << 24; 730 /* 731 * The only other updater of hwp_req_cached in the active mode, 732 * intel_pstate_hwp_set(), is called under the same lock as this 733 * function, so it cannot run in parallel with the update below. 734 */ 735 WRITE_ONCE(cpu->hwp_req_cached, value); 736 ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 737 if (!ret) 738 cpu->epp_cached = epp; 739 740 return ret; 741 } 742 743 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, 744 int pref_index, bool use_raw, 745 u32 raw_epp) 746 { 747 int epp = -EINVAL; 748 int ret; 749 750 if (!pref_index) 751 epp = cpu_data->epp_default; 752 753 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 754 if (use_raw) 755 epp = raw_epp; 756 else if (epp == -EINVAL) 757 epp = epp_values[pref_index - 1]; 758 759 /* 760 * To avoid confusion, refuse to set EPP to any values different 761 * from 0 (performance) if the current policy is "performance", 762 * because those values would be overridden. 763 */ 764 if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 765 return -EBUSY; 766 767 ret = intel_pstate_set_epp(cpu_data, epp); 768 } else { 769 if (epp == -EINVAL) 770 epp = (pref_index - 1) << 2; 771 ret = intel_pstate_set_epb(cpu_data->cpu, epp); 772 } 773 774 return ret; 775 } 776 777 static ssize_t show_energy_performance_available_preferences( 778 struct cpufreq_policy *policy, char *buf) 779 { 780 int i = 0; 781 int ret = 0; 782 783 while (energy_perf_strings[i] != NULL) 784 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); 785 786 ret += sprintf(&buf[ret], "\n"); 787 788 return ret; 789 } 790 791 cpufreq_freq_attr_ro(energy_performance_available_preferences); 792 793 static struct cpufreq_driver intel_pstate; 794 795 static ssize_t store_energy_performance_preference( 796 struct cpufreq_policy *policy, const char *buf, size_t count) 797 { 798 struct cpudata *cpu = all_cpu_data[policy->cpu]; 799 char str_preference[21]; 800 bool raw = false; 801 ssize_t ret; 802 u32 epp = 0; 803 804 ret = sscanf(buf, "%20s", str_preference); 805 if (ret != 1) 806 return -EINVAL; 807 808 ret = match_string(energy_perf_strings, -1, str_preference); 809 if (ret < 0) { 810 if (!boot_cpu_has(X86_FEATURE_HWP_EPP)) 811 return ret; 812 813 ret = kstrtouint(buf, 10, &epp); 814 if (ret) 815 return ret; 816 817 if (epp > 255) 818 return -EINVAL; 819 820 raw = true; 821 } 822 823 /* 824 * This function runs with the policy R/W semaphore held, which 825 * guarantees that the driver pointer will not change while it is 826 * running. 827 */ 828 if (!intel_pstate_driver) 829 return -EAGAIN; 830 831 mutex_lock(&intel_pstate_limits_lock); 832 833 if (intel_pstate_driver == &intel_pstate) { 834 ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); 835 } else { 836 /* 837 * In the passive mode the governor needs to be stopped on the 838 * target CPU before the EPP update and restarted after it, 839 * which is super-heavy-weight, so make sure it is worth doing 840 * upfront. 841 */ 842 if (!raw) 843 epp = ret ? epp_values[ret - 1] : cpu->epp_default; 844 845 if (cpu->epp_cached != epp) { 846 int err; 847 848 cpufreq_stop_governor(policy); 849 ret = intel_pstate_set_epp(cpu, epp); 850 err = cpufreq_start_governor(policy); 851 if (!ret) 852 ret = err; 853 } 854 } 855 856 mutex_unlock(&intel_pstate_limits_lock); 857 858 return ret ?: count; 859 } 860 861 static ssize_t show_energy_performance_preference( 862 struct cpufreq_policy *policy, char *buf) 863 { 864 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 865 int preference, raw_epp; 866 867 preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp); 868 if (preference < 0) 869 return preference; 870 871 if (raw_epp) 872 return sprintf(buf, "%d\n", raw_epp); 873 else 874 return sprintf(buf, "%s\n", energy_perf_strings[preference]); 875 } 876 877 cpufreq_freq_attr_rw(energy_performance_preference); 878 879 static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) 880 { 881 struct cpudata *cpu = all_cpu_data[policy->cpu]; 882 int ratio, freq; 883 884 ratio = intel_pstate_get_cppc_guaranteed(policy->cpu); 885 if (ratio <= 0) { 886 u64 cap; 887 888 rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); 889 ratio = HWP_GUARANTEED_PERF(cap); 890 } 891 892 freq = ratio * cpu->pstate.scaling; 893 if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling) 894 freq = rounddown(freq, cpu->pstate.perf_ctl_scaling); 895 896 return sprintf(buf, "%d\n", freq); 897 } 898 899 cpufreq_freq_attr_ro(base_frequency); 900 901 static struct freq_attr *hwp_cpufreq_attrs[] = { 902 &energy_performance_preference, 903 &energy_performance_available_preferences, 904 &base_frequency, 905 NULL, 906 }; 907 908 static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) 909 { 910 u64 cap; 911 912 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); 913 WRITE_ONCE(cpu->hwp_cap_cached, cap); 914 cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); 915 cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); 916 } 917 918 static void intel_pstate_get_hwp_cap(struct cpudata *cpu) 919 { 920 int scaling = cpu->pstate.scaling; 921 922 __intel_pstate_get_hwp_cap(cpu); 923 924 cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling; 925 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; 926 if (scaling != cpu->pstate.perf_ctl_scaling) { 927 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 928 929 cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq, 930 perf_ctl_scaling); 931 cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq, 932 perf_ctl_scaling); 933 } 934 } 935 936 static void intel_pstate_hwp_set(unsigned int cpu) 937 { 938 struct cpudata *cpu_data = all_cpu_data[cpu]; 939 int max, min; 940 u64 value; 941 s16 epp; 942 943 max = cpu_data->max_perf_ratio; 944 min = cpu_data->min_perf_ratio; 945 946 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 947 min = max; 948 949 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 950 951 value &= ~HWP_MIN_PERF(~0L); 952 value |= HWP_MIN_PERF(min); 953 954 value &= ~HWP_MAX_PERF(~0L); 955 value |= HWP_MAX_PERF(max); 956 957 if (cpu_data->epp_policy == cpu_data->policy) 958 goto skip_epp; 959 960 cpu_data->epp_policy = cpu_data->policy; 961 962 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 963 epp = intel_pstate_get_epp(cpu_data, value); 964 cpu_data->epp_powersave = epp; 965 /* If EPP read was failed, then don't try to write */ 966 if (epp < 0) 967 goto skip_epp; 968 969 epp = 0; 970 } else { 971 /* skip setting EPP, when saved value is invalid */ 972 if (cpu_data->epp_powersave < 0) 973 goto skip_epp; 974 975 /* 976 * No need to restore EPP when it is not zero. This 977 * means: 978 * - Policy is not changed 979 * - user has manually changed 980 * - Error reading EPB 981 */ 982 epp = intel_pstate_get_epp(cpu_data, value); 983 if (epp) 984 goto skip_epp; 985 986 epp = cpu_data->epp_powersave; 987 } 988 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 989 value &= ~GENMASK_ULL(31, 24); 990 value |= (u64)epp << 24; 991 } else { 992 intel_pstate_set_epb(cpu, epp); 993 } 994 skip_epp: 995 WRITE_ONCE(cpu_data->hwp_req_cached, value); 996 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 997 } 998 999 static void intel_pstate_hwp_offline(struct cpudata *cpu) 1000 { 1001 u64 value = READ_ONCE(cpu->hwp_req_cached); 1002 int min_perf; 1003 1004 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 1005 /* 1006 * In case the EPP has been set to "performance" by the 1007 * active mode "performance" scaling algorithm, replace that 1008 * temporary value with the cached EPP one. 1009 */ 1010 value &= ~GENMASK_ULL(31, 24); 1011 value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); 1012 /* 1013 * However, make sure that EPP will be set to "performance" when 1014 * the CPU is brought back online again and the "performance" 1015 * scaling algorithm is still in effect. 1016 */ 1017 cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; 1018 } 1019 1020 /* 1021 * Clear the desired perf field in the cached HWP request value to 1022 * prevent nonzero desired values from being leaked into the active 1023 * mode. 1024 */ 1025 value &= ~HWP_DESIRED_PERF(~0L); 1026 WRITE_ONCE(cpu->hwp_req_cached, value); 1027 1028 value &= ~GENMASK_ULL(31, 0); 1029 min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); 1030 1031 /* Set hwp_max = hwp_min */ 1032 value |= HWP_MAX_PERF(min_perf); 1033 value |= HWP_MIN_PERF(min_perf); 1034 1035 /* Set EPP to min */ 1036 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) 1037 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); 1038 1039 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 1040 } 1041 1042 #define POWER_CTL_EE_ENABLE 1 1043 #define POWER_CTL_EE_DISABLE 2 1044 1045 static int power_ctl_ee_state; 1046 1047 static void set_power_ctl_ee_state(bool input) 1048 { 1049 u64 power_ctl; 1050 1051 mutex_lock(&intel_pstate_driver_lock); 1052 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1053 if (input) { 1054 power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); 1055 power_ctl_ee_state = POWER_CTL_EE_ENABLE; 1056 } else { 1057 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); 1058 power_ctl_ee_state = POWER_CTL_EE_DISABLE; 1059 } 1060 wrmsrl(MSR_IA32_POWER_CTL, power_ctl); 1061 mutex_unlock(&intel_pstate_driver_lock); 1062 } 1063 1064 static void intel_pstate_hwp_enable(struct cpudata *cpudata); 1065 1066 static void intel_pstate_hwp_reenable(struct cpudata *cpu) 1067 { 1068 intel_pstate_hwp_enable(cpu); 1069 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); 1070 } 1071 1072 static int intel_pstate_suspend(struct cpufreq_policy *policy) 1073 { 1074 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1075 1076 pr_debug("CPU %d suspending\n", cpu->cpu); 1077 1078 cpu->suspended = true; 1079 1080 return 0; 1081 } 1082 1083 static int intel_pstate_resume(struct cpufreq_policy *policy) 1084 { 1085 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1086 1087 pr_debug("CPU %d resuming\n", cpu->cpu); 1088 1089 /* Only restore if the system default is changed */ 1090 if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) 1091 set_power_ctl_ee_state(true); 1092 else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) 1093 set_power_ctl_ee_state(false); 1094 1095 if (cpu->suspended && hwp_active) { 1096 mutex_lock(&intel_pstate_limits_lock); 1097 1098 /* Re-enable HWP, because "online" has not done that. */ 1099 intel_pstate_hwp_reenable(cpu); 1100 1101 mutex_unlock(&intel_pstate_limits_lock); 1102 } 1103 1104 cpu->suspended = false; 1105 1106 return 0; 1107 } 1108 1109 static void intel_pstate_update_policies(void) 1110 { 1111 int cpu; 1112 1113 for_each_possible_cpu(cpu) 1114 cpufreq_update_policy(cpu); 1115 } 1116 1117 static void intel_pstate_update_max_freq(unsigned int cpu) 1118 { 1119 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); 1120 struct cpudata *cpudata; 1121 1122 if (!policy) 1123 return; 1124 1125 cpudata = all_cpu_data[cpu]; 1126 policy->cpuinfo.max_freq = global.turbo_disabled_mf ? 1127 cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; 1128 1129 refresh_frequency_limits(policy); 1130 1131 cpufreq_cpu_release(policy); 1132 } 1133 1134 static void intel_pstate_update_limits(unsigned int cpu) 1135 { 1136 mutex_lock(&intel_pstate_driver_lock); 1137 1138 update_turbo_state(); 1139 /* 1140 * If turbo has been turned on or off globally, policy limits for 1141 * all CPUs need to be updated to reflect that. 1142 */ 1143 if (global.turbo_disabled_mf != global.turbo_disabled) { 1144 global.turbo_disabled_mf = global.turbo_disabled; 1145 arch_set_max_freq_ratio(global.turbo_disabled); 1146 for_each_possible_cpu(cpu) 1147 intel_pstate_update_max_freq(cpu); 1148 } else { 1149 cpufreq_update_policy(cpu); 1150 } 1151 1152 mutex_unlock(&intel_pstate_driver_lock); 1153 } 1154 1155 /************************** sysfs begin ************************/ 1156 #define show_one(file_name, object) \ 1157 static ssize_t show_##file_name \ 1158 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ 1159 { \ 1160 return sprintf(buf, "%u\n", global.object); \ 1161 } 1162 1163 static ssize_t intel_pstate_show_status(char *buf); 1164 static int intel_pstate_update_status(const char *buf, size_t size); 1165 1166 static ssize_t show_status(struct kobject *kobj, 1167 struct kobj_attribute *attr, char *buf) 1168 { 1169 ssize_t ret; 1170 1171 mutex_lock(&intel_pstate_driver_lock); 1172 ret = intel_pstate_show_status(buf); 1173 mutex_unlock(&intel_pstate_driver_lock); 1174 1175 return ret; 1176 } 1177 1178 static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, 1179 const char *buf, size_t count) 1180 { 1181 char *p = memchr(buf, '\n', count); 1182 int ret; 1183 1184 mutex_lock(&intel_pstate_driver_lock); 1185 ret = intel_pstate_update_status(buf, p ? p - buf : count); 1186 mutex_unlock(&intel_pstate_driver_lock); 1187 1188 return ret < 0 ? ret : count; 1189 } 1190 1191 static ssize_t show_turbo_pct(struct kobject *kobj, 1192 struct kobj_attribute *attr, char *buf) 1193 { 1194 struct cpudata *cpu; 1195 int total, no_turbo, turbo_pct; 1196 uint32_t turbo_fp; 1197 1198 mutex_lock(&intel_pstate_driver_lock); 1199 1200 if (!intel_pstate_driver) { 1201 mutex_unlock(&intel_pstate_driver_lock); 1202 return -EAGAIN; 1203 } 1204 1205 cpu = all_cpu_data[0]; 1206 1207 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1208 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 1209 turbo_fp = div_fp(no_turbo, total); 1210 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 1211 1212 mutex_unlock(&intel_pstate_driver_lock); 1213 1214 return sprintf(buf, "%u\n", turbo_pct); 1215 } 1216 1217 static ssize_t show_num_pstates(struct kobject *kobj, 1218 struct kobj_attribute *attr, char *buf) 1219 { 1220 struct cpudata *cpu; 1221 int total; 1222 1223 mutex_lock(&intel_pstate_driver_lock); 1224 1225 if (!intel_pstate_driver) { 1226 mutex_unlock(&intel_pstate_driver_lock); 1227 return -EAGAIN; 1228 } 1229 1230 cpu = all_cpu_data[0]; 1231 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1232 1233 mutex_unlock(&intel_pstate_driver_lock); 1234 1235 return sprintf(buf, "%u\n", total); 1236 } 1237 1238 static ssize_t show_no_turbo(struct kobject *kobj, 1239 struct kobj_attribute *attr, char *buf) 1240 { 1241 ssize_t ret; 1242 1243 mutex_lock(&intel_pstate_driver_lock); 1244 1245 if (!intel_pstate_driver) { 1246 mutex_unlock(&intel_pstate_driver_lock); 1247 return -EAGAIN; 1248 } 1249 1250 update_turbo_state(); 1251 if (global.turbo_disabled) 1252 ret = sprintf(buf, "%u\n", global.turbo_disabled); 1253 else 1254 ret = sprintf(buf, "%u\n", global.no_turbo); 1255 1256 mutex_unlock(&intel_pstate_driver_lock); 1257 1258 return ret; 1259 } 1260 1261 static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, 1262 const char *buf, size_t count) 1263 { 1264 unsigned int input; 1265 int ret; 1266 1267 ret = sscanf(buf, "%u", &input); 1268 if (ret != 1) 1269 return -EINVAL; 1270 1271 mutex_lock(&intel_pstate_driver_lock); 1272 1273 if (!intel_pstate_driver) { 1274 mutex_unlock(&intel_pstate_driver_lock); 1275 return -EAGAIN; 1276 } 1277 1278 mutex_lock(&intel_pstate_limits_lock); 1279 1280 update_turbo_state(); 1281 if (global.turbo_disabled) { 1282 pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); 1283 mutex_unlock(&intel_pstate_limits_lock); 1284 mutex_unlock(&intel_pstate_driver_lock); 1285 return -EPERM; 1286 } 1287 1288 global.no_turbo = clamp_t(int, input, 0, 1); 1289 1290 if (global.no_turbo) { 1291 struct cpudata *cpu = all_cpu_data[0]; 1292 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; 1293 1294 /* Squash the global minimum into the permitted range. */ 1295 if (global.min_perf_pct > pct) 1296 global.min_perf_pct = pct; 1297 } 1298 1299 mutex_unlock(&intel_pstate_limits_lock); 1300 1301 intel_pstate_update_policies(); 1302 1303 mutex_unlock(&intel_pstate_driver_lock); 1304 1305 return count; 1306 } 1307 1308 static void update_qos_request(enum freq_qos_req_type type) 1309 { 1310 struct freq_qos_request *req; 1311 struct cpufreq_policy *policy; 1312 int i; 1313 1314 for_each_possible_cpu(i) { 1315 struct cpudata *cpu = all_cpu_data[i]; 1316 unsigned int freq, perf_pct; 1317 1318 policy = cpufreq_cpu_get(i); 1319 if (!policy) 1320 continue; 1321 1322 req = policy->driver_data; 1323 cpufreq_cpu_put(policy); 1324 1325 if (!req) 1326 continue; 1327 1328 if (hwp_active) 1329 intel_pstate_get_hwp_cap(cpu); 1330 1331 if (type == FREQ_QOS_MIN) { 1332 perf_pct = global.min_perf_pct; 1333 } else { 1334 req++; 1335 perf_pct = global.max_perf_pct; 1336 } 1337 1338 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100); 1339 1340 if (freq_qos_update_request(req, freq) < 0) 1341 pr_warn("Failed to update freq constraint: CPU%d\n", i); 1342 } 1343 } 1344 1345 static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, 1346 const char *buf, size_t count) 1347 { 1348 unsigned int input; 1349 int ret; 1350 1351 ret = sscanf(buf, "%u", &input); 1352 if (ret != 1) 1353 return -EINVAL; 1354 1355 mutex_lock(&intel_pstate_driver_lock); 1356 1357 if (!intel_pstate_driver) { 1358 mutex_unlock(&intel_pstate_driver_lock); 1359 return -EAGAIN; 1360 } 1361 1362 mutex_lock(&intel_pstate_limits_lock); 1363 1364 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); 1365 1366 mutex_unlock(&intel_pstate_limits_lock); 1367 1368 if (intel_pstate_driver == &intel_pstate) 1369 intel_pstate_update_policies(); 1370 else 1371 update_qos_request(FREQ_QOS_MAX); 1372 1373 mutex_unlock(&intel_pstate_driver_lock); 1374 1375 return count; 1376 } 1377 1378 static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, 1379 const char *buf, size_t count) 1380 { 1381 unsigned int input; 1382 int ret; 1383 1384 ret = sscanf(buf, "%u", &input); 1385 if (ret != 1) 1386 return -EINVAL; 1387 1388 mutex_lock(&intel_pstate_driver_lock); 1389 1390 if (!intel_pstate_driver) { 1391 mutex_unlock(&intel_pstate_driver_lock); 1392 return -EAGAIN; 1393 } 1394 1395 mutex_lock(&intel_pstate_limits_lock); 1396 1397 global.min_perf_pct = clamp_t(int, input, 1398 min_perf_pct_min(), global.max_perf_pct); 1399 1400 mutex_unlock(&intel_pstate_limits_lock); 1401 1402 if (intel_pstate_driver == &intel_pstate) 1403 intel_pstate_update_policies(); 1404 else 1405 update_qos_request(FREQ_QOS_MIN); 1406 1407 mutex_unlock(&intel_pstate_driver_lock); 1408 1409 return count; 1410 } 1411 1412 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, 1413 struct kobj_attribute *attr, char *buf) 1414 { 1415 return sprintf(buf, "%u\n", hwp_boost); 1416 } 1417 1418 static ssize_t store_hwp_dynamic_boost(struct kobject *a, 1419 struct kobj_attribute *b, 1420 const char *buf, size_t count) 1421 { 1422 unsigned int input; 1423 int ret; 1424 1425 ret = kstrtouint(buf, 10, &input); 1426 if (ret) 1427 return ret; 1428 1429 mutex_lock(&intel_pstate_driver_lock); 1430 hwp_boost = !!input; 1431 intel_pstate_update_policies(); 1432 mutex_unlock(&intel_pstate_driver_lock); 1433 1434 return count; 1435 } 1436 1437 static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr, 1438 char *buf) 1439 { 1440 u64 power_ctl; 1441 int enable; 1442 1443 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1444 enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); 1445 return sprintf(buf, "%d\n", !enable); 1446 } 1447 1448 static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b, 1449 const char *buf, size_t count) 1450 { 1451 bool input; 1452 int ret; 1453 1454 ret = kstrtobool(buf, &input); 1455 if (ret) 1456 return ret; 1457 1458 set_power_ctl_ee_state(input); 1459 1460 return count; 1461 } 1462 1463 show_one(max_perf_pct, max_perf_pct); 1464 show_one(min_perf_pct, min_perf_pct); 1465 1466 define_one_global_rw(status); 1467 define_one_global_rw(no_turbo); 1468 define_one_global_rw(max_perf_pct); 1469 define_one_global_rw(min_perf_pct); 1470 define_one_global_ro(turbo_pct); 1471 define_one_global_ro(num_pstates); 1472 define_one_global_rw(hwp_dynamic_boost); 1473 define_one_global_rw(energy_efficiency); 1474 1475 static struct attribute *intel_pstate_attributes[] = { 1476 &status.attr, 1477 &no_turbo.attr, 1478 NULL 1479 }; 1480 1481 static const struct attribute_group intel_pstate_attr_group = { 1482 .attrs = intel_pstate_attributes, 1483 }; 1484 1485 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; 1486 1487 static struct kobject *intel_pstate_kobject; 1488 1489 static void __init intel_pstate_sysfs_expose_params(void) 1490 { 1491 int rc; 1492 1493 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 1494 &cpu_subsys.dev_root->kobj); 1495 if (WARN_ON(!intel_pstate_kobject)) 1496 return; 1497 1498 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 1499 if (WARN_ON(rc)) 1500 return; 1501 1502 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 1503 rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr); 1504 WARN_ON(rc); 1505 1506 rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr); 1507 WARN_ON(rc); 1508 } 1509 1510 /* 1511 * If per cpu limits are enforced there are no global limits, so 1512 * return without creating max/min_perf_pct attributes 1513 */ 1514 if (per_cpu_limits) 1515 return; 1516 1517 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 1518 WARN_ON(rc); 1519 1520 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1521 WARN_ON(rc); 1522 1523 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { 1524 rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); 1525 WARN_ON(rc); 1526 } 1527 } 1528 1529 static void __init intel_pstate_sysfs_remove(void) 1530 { 1531 if (!intel_pstate_kobject) 1532 return; 1533 1534 sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group); 1535 1536 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 1537 sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr); 1538 sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr); 1539 } 1540 1541 if (!per_cpu_limits) { 1542 sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr); 1543 sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr); 1544 1545 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) 1546 sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr); 1547 } 1548 1549 kobject_put(intel_pstate_kobject); 1550 } 1551 1552 static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) 1553 { 1554 int rc; 1555 1556 if (!hwp_active) 1557 return; 1558 1559 rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); 1560 WARN_ON_ONCE(rc); 1561 } 1562 1563 static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) 1564 { 1565 if (!hwp_active) 1566 return; 1567 1568 sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); 1569 } 1570 1571 /************************** sysfs end ************************/ 1572 1573 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1574 { 1575 /* First disable HWP notification interrupt as we don't process them */ 1576 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1577 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1578 1579 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1580 if (cpudata->epp_default == -EINVAL) 1581 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1582 } 1583 1584 static int atom_get_min_pstate(int not_used) 1585 { 1586 u64 value; 1587 1588 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1589 return (value >> 8) & 0x7F; 1590 } 1591 1592 static int atom_get_max_pstate(int not_used) 1593 { 1594 u64 value; 1595 1596 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1597 return (value >> 16) & 0x7F; 1598 } 1599 1600 static int atom_get_turbo_pstate(int not_used) 1601 { 1602 u64 value; 1603 1604 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); 1605 return value & 0x7F; 1606 } 1607 1608 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1609 { 1610 u64 val; 1611 int32_t vid_fp; 1612 u32 vid; 1613 1614 val = (u64)pstate << 8; 1615 if (global.no_turbo && !global.turbo_disabled) 1616 val |= (u64)1 << 32; 1617 1618 vid_fp = cpudata->vid.min + mul_fp( 1619 int_tofp(pstate - cpudata->pstate.min_pstate), 1620 cpudata->vid.ratio); 1621 1622 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1623 vid = ceiling_fp(vid_fp); 1624 1625 if (pstate > cpudata->pstate.max_pstate) 1626 vid = cpudata->vid.turbo; 1627 1628 return val | vid; 1629 } 1630 1631 static int silvermont_get_scaling(void) 1632 { 1633 u64 value; 1634 int i; 1635 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1636 static int silvermont_freq_table[] = { 1637 83300, 100000, 133300, 116700, 80000}; 1638 1639 rdmsrl(MSR_FSB_FREQ, value); 1640 i = value & 0x7; 1641 WARN_ON(i > 4); 1642 1643 return silvermont_freq_table[i]; 1644 } 1645 1646 static int airmont_get_scaling(void) 1647 { 1648 u64 value; 1649 int i; 1650 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1651 static int airmont_freq_table[] = { 1652 83300, 100000, 133300, 116700, 80000, 1653 93300, 90000, 88900, 87500}; 1654 1655 rdmsrl(MSR_FSB_FREQ, value); 1656 i = value & 0xF; 1657 WARN_ON(i > 8); 1658 1659 return airmont_freq_table[i]; 1660 } 1661 1662 static void atom_get_vid(struct cpudata *cpudata) 1663 { 1664 u64 value; 1665 1666 rdmsrl(MSR_ATOM_CORE_VIDS, value); 1667 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1668 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1669 cpudata->vid.ratio = div_fp( 1670 cpudata->vid.max - cpudata->vid.min, 1671 int_tofp(cpudata->pstate.max_pstate - 1672 cpudata->pstate.min_pstate)); 1673 1674 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); 1675 cpudata->vid.turbo = value & 0x7f; 1676 } 1677 1678 static int core_get_min_pstate(int cpu) 1679 { 1680 u64 value; 1681 1682 rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); 1683 return (value >> 40) & 0xFF; 1684 } 1685 1686 static int core_get_max_pstate_physical(int cpu) 1687 { 1688 u64 value; 1689 1690 rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); 1691 return (value >> 8) & 0xFF; 1692 } 1693 1694 static int core_get_tdp_ratio(int cpu, u64 plat_info) 1695 { 1696 /* Check how many TDP levels present */ 1697 if (plat_info & 0x600000000) { 1698 u64 tdp_ctrl; 1699 u64 tdp_ratio; 1700 int tdp_msr; 1701 int err; 1702 1703 /* Get the TDP level (0, 1, 2) to get ratios */ 1704 err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1705 if (err) 1706 return err; 1707 1708 /* TDP MSR are continuous starting at 0x648 */ 1709 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); 1710 err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio); 1711 if (err) 1712 return err; 1713 1714 /* For level 1 and 2, bits[23:16] contain the ratio */ 1715 if (tdp_ctrl & 0x03) 1716 tdp_ratio >>= 16; 1717 1718 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1719 pr_debug("tdp_ratio %x\n", (int)tdp_ratio); 1720 1721 return (int)tdp_ratio; 1722 } 1723 1724 return -ENXIO; 1725 } 1726 1727 static int core_get_max_pstate(int cpu) 1728 { 1729 u64 tar; 1730 u64 plat_info; 1731 int max_pstate; 1732 int tdp_ratio; 1733 int err; 1734 1735 rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info); 1736 max_pstate = (plat_info >> 8) & 0xFF; 1737 1738 tdp_ratio = core_get_tdp_ratio(cpu, plat_info); 1739 if (tdp_ratio <= 0) 1740 return max_pstate; 1741 1742 if (hwp_active) { 1743 /* Turbo activation ratio is not used on HWP platforms */ 1744 return tdp_ratio; 1745 } 1746 1747 err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar); 1748 if (!err) { 1749 int tar_levels; 1750 1751 /* Do some sanity checking for safety */ 1752 tar_levels = tar & 0xff; 1753 if (tdp_ratio - 1 == tar_levels) { 1754 max_pstate = tar_levels; 1755 pr_debug("max_pstate=TAC %x\n", max_pstate); 1756 } 1757 } 1758 1759 return max_pstate; 1760 } 1761 1762 static int core_get_turbo_pstate(int cpu) 1763 { 1764 u64 value; 1765 int nont, ret; 1766 1767 rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); 1768 nont = core_get_max_pstate(cpu); 1769 ret = (value) & 255; 1770 if (ret <= nont) 1771 ret = nont; 1772 return ret; 1773 } 1774 1775 static inline int core_get_scaling(void) 1776 { 1777 return 100000; 1778 } 1779 1780 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1781 { 1782 u64 val; 1783 1784 val = (u64)pstate << 8; 1785 if (global.no_turbo && !global.turbo_disabled) 1786 val |= (u64)1 << 32; 1787 1788 return val; 1789 } 1790 1791 static int knl_get_aperf_mperf_shift(void) 1792 { 1793 return 10; 1794 } 1795 1796 static int knl_get_turbo_pstate(int cpu) 1797 { 1798 u64 value; 1799 int nont, ret; 1800 1801 rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); 1802 nont = core_get_max_pstate(cpu); 1803 ret = (((value) >> 8) & 0xFF); 1804 if (ret <= nont) 1805 ret = nont; 1806 return ret; 1807 } 1808 1809 #ifdef CONFIG_ACPI_CPPC_LIB 1810 static u32 hybrid_ref_perf; 1811 1812 static int hybrid_get_cpu_scaling(int cpu) 1813 { 1814 return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf, 1815 intel_pstate_cppc_nominal(cpu)); 1816 } 1817 1818 static void intel_pstate_cppc_set_cpu_scaling(void) 1819 { 1820 u32 min_nominal_perf = U32_MAX; 1821 int cpu; 1822 1823 for_each_present_cpu(cpu) { 1824 u32 nominal_perf = intel_pstate_cppc_nominal(cpu); 1825 1826 if (nominal_perf && nominal_perf < min_nominal_perf) 1827 min_nominal_perf = nominal_perf; 1828 } 1829 1830 if (min_nominal_perf < U32_MAX) { 1831 hybrid_ref_perf = min_nominal_perf; 1832 pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling; 1833 } 1834 } 1835 #else 1836 static inline void intel_pstate_cppc_set_cpu_scaling(void) 1837 { 1838 } 1839 #endif /* CONFIG_ACPI_CPPC_LIB */ 1840 1841 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1842 { 1843 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1844 cpu->pstate.current_pstate = pstate; 1845 /* 1846 * Generally, there is no guarantee that this code will always run on 1847 * the CPU being updated, so force the register update to run on the 1848 * right CPU. 1849 */ 1850 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1851 pstate_funcs.get_val(cpu, pstate)); 1852 } 1853 1854 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1855 { 1856 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1857 } 1858 1859 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1860 { 1861 int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); 1862 1863 update_turbo_state(); 1864 intel_pstate_set_pstate(cpu, pstate); 1865 } 1866 1867 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1868 { 1869 int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); 1870 int perf_ctl_scaling = pstate_funcs.get_scaling(); 1871 1872 cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu); 1873 cpu->pstate.max_pstate_physical = perf_ctl_max_phys; 1874 cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; 1875 1876 if (hwp_active && !hwp_mode_bdw) { 1877 __intel_pstate_get_hwp_cap(cpu); 1878 1879 if (pstate_funcs.get_cpu_scaling) { 1880 cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); 1881 if (cpu->pstate.scaling != perf_ctl_scaling) 1882 intel_pstate_hybrid_hwp_adjust(cpu); 1883 } else { 1884 cpu->pstate.scaling = perf_ctl_scaling; 1885 } 1886 } else { 1887 cpu->pstate.scaling = perf_ctl_scaling; 1888 cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu); 1889 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu); 1890 } 1891 1892 if (cpu->pstate.scaling == perf_ctl_scaling) { 1893 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; 1894 cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling; 1895 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling; 1896 } 1897 1898 if (pstate_funcs.get_aperf_mperf_shift) 1899 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); 1900 1901 if (pstate_funcs.get_vid) 1902 pstate_funcs.get_vid(cpu); 1903 1904 intel_pstate_set_min_pstate(cpu); 1905 } 1906 1907 /* 1908 * Long hold time will keep high perf limits for long time, 1909 * which negatively impacts perf/watt for some workloads, 1910 * like specpower. 3ms is based on experiements on some 1911 * workoads. 1912 */ 1913 static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC; 1914 1915 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) 1916 { 1917 u64 hwp_req = READ_ONCE(cpu->hwp_req_cached); 1918 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); 1919 u32 max_limit = (hwp_req & 0xff00) >> 8; 1920 u32 min_limit = (hwp_req & 0xff); 1921 u32 boost_level1; 1922 1923 /* 1924 * Cases to consider (User changes via sysfs or boot time): 1925 * If, P0 (Turbo max) = P1 (Guaranteed max) = min: 1926 * No boost, return. 1927 * If, P0 (Turbo max) > P1 (Guaranteed max) = min: 1928 * Should result in one level boost only for P0. 1929 * If, P0 (Turbo max) = P1 (Guaranteed max) > min: 1930 * Should result in two level boost: 1931 * (min + p1)/2 and P1. 1932 * If, P0 (Turbo max) > P1 (Guaranteed max) > min: 1933 * Should result in three level boost: 1934 * (min + p1)/2, P1 and P0. 1935 */ 1936 1937 /* If max and min are equal or already at max, nothing to boost */ 1938 if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit) 1939 return; 1940 1941 if (!cpu->hwp_boost_min) 1942 cpu->hwp_boost_min = min_limit; 1943 1944 /* level at half way mark between min and guranteed */ 1945 boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1; 1946 1947 if (cpu->hwp_boost_min < boost_level1) 1948 cpu->hwp_boost_min = boost_level1; 1949 else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap)) 1950 cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap); 1951 else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) && 1952 max_limit != HWP_GUARANTEED_PERF(hwp_cap)) 1953 cpu->hwp_boost_min = max_limit; 1954 else 1955 return; 1956 1957 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; 1958 wrmsrl(MSR_HWP_REQUEST, hwp_req); 1959 cpu->last_update = cpu->sample.time; 1960 } 1961 1962 static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) 1963 { 1964 if (cpu->hwp_boost_min) { 1965 bool expired; 1966 1967 /* Check if we are idle for hold time to boost down */ 1968 expired = time_after64(cpu->sample.time, cpu->last_update + 1969 hwp_boost_hold_time_ns); 1970 if (expired) { 1971 wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); 1972 cpu->hwp_boost_min = 0; 1973 } 1974 } 1975 cpu->last_update = cpu->sample.time; 1976 } 1977 1978 static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu, 1979 u64 time) 1980 { 1981 cpu->sample.time = time; 1982 1983 if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) { 1984 bool do_io = false; 1985 1986 cpu->sched_flags = 0; 1987 /* 1988 * Set iowait_boost flag and update time. Since IO WAIT flag 1989 * is set all the time, we can't just conclude that there is 1990 * some IO bound activity is scheduled on this CPU with just 1991 * one occurrence. If we receive at least two in two 1992 * consecutive ticks, then we treat as boost candidate. 1993 */ 1994 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC)) 1995 do_io = true; 1996 1997 cpu->last_io_update = time; 1998 1999 if (do_io) 2000 intel_pstate_hwp_boost_up(cpu); 2001 2002 } else { 2003 intel_pstate_hwp_boost_down(cpu); 2004 } 2005 } 2006 2007 static inline void intel_pstate_update_util_hwp(struct update_util_data *data, 2008 u64 time, unsigned int flags) 2009 { 2010 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 2011 2012 cpu->sched_flags |= flags; 2013 2014 if (smp_processor_id() == cpu->cpu) 2015 intel_pstate_update_util_hwp_local(cpu, time); 2016 } 2017 2018 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 2019 { 2020 struct sample *sample = &cpu->sample; 2021 2022 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 2023 } 2024 2025 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 2026 { 2027 u64 aperf, mperf; 2028 unsigned long flags; 2029 u64 tsc; 2030 2031 local_irq_save(flags); 2032 rdmsrl(MSR_IA32_APERF, aperf); 2033 rdmsrl(MSR_IA32_MPERF, mperf); 2034 tsc = rdtsc(); 2035 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 2036 local_irq_restore(flags); 2037 return false; 2038 } 2039 local_irq_restore(flags); 2040 2041 cpu->last_sample_time = cpu->sample.time; 2042 cpu->sample.time = time; 2043 cpu->sample.aperf = aperf; 2044 cpu->sample.mperf = mperf; 2045 cpu->sample.tsc = tsc; 2046 cpu->sample.aperf -= cpu->prev_aperf; 2047 cpu->sample.mperf -= cpu->prev_mperf; 2048 cpu->sample.tsc -= cpu->prev_tsc; 2049 2050 cpu->prev_aperf = aperf; 2051 cpu->prev_mperf = mperf; 2052 cpu->prev_tsc = tsc; 2053 /* 2054 * First time this function is invoked in a given cycle, all of the 2055 * previous sample data fields are equal to zero or stale and they must 2056 * be populated with meaningful numbers for things to work, so assume 2057 * that sample.time will always be reset before setting the utilization 2058 * update hook and make the caller skip the sample then. 2059 */ 2060 if (cpu->last_sample_time) { 2061 intel_pstate_calc_avg_perf(cpu); 2062 return true; 2063 } 2064 return false; 2065 } 2066 2067 static inline int32_t get_avg_frequency(struct cpudata *cpu) 2068 { 2069 return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); 2070 } 2071 2072 static inline int32_t get_avg_pstate(struct cpudata *cpu) 2073 { 2074 return mul_ext_fp(cpu->pstate.max_pstate_physical, 2075 cpu->sample.core_avg_perf); 2076 } 2077 2078 static inline int32_t get_target_pstate(struct cpudata *cpu) 2079 { 2080 struct sample *sample = &cpu->sample; 2081 int32_t busy_frac; 2082 int target, avg_pstate; 2083 2084 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, 2085 sample->tsc); 2086 2087 if (busy_frac < cpu->iowait_boost) 2088 busy_frac = cpu->iowait_boost; 2089 2090 sample->busy_scaled = busy_frac * 100; 2091 2092 target = global.no_turbo || global.turbo_disabled ? 2093 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2094 target += target >> 2; 2095 target = mul_fp(target, busy_frac); 2096 if (target < cpu->pstate.min_pstate) 2097 target = cpu->pstate.min_pstate; 2098 2099 /* 2100 * If the average P-state during the previous cycle was higher than the 2101 * current target, add 50% of the difference to the target to reduce 2102 * possible performance oscillations and offset possible performance 2103 * loss related to moving the workload from one CPU to another within 2104 * a package/module. 2105 */ 2106 avg_pstate = get_avg_pstate(cpu); 2107 if (avg_pstate > target) 2108 target += (avg_pstate - target) >> 1; 2109 2110 return target; 2111 } 2112 2113 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 2114 { 2115 int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); 2116 int max_pstate = max(min_pstate, cpu->max_perf_ratio); 2117 2118 return clamp_t(int, pstate, min_pstate, max_pstate); 2119 } 2120 2121 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 2122 { 2123 if (pstate == cpu->pstate.current_pstate) 2124 return; 2125 2126 cpu->pstate.current_pstate = pstate; 2127 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 2128 } 2129 2130 static void intel_pstate_adjust_pstate(struct cpudata *cpu) 2131 { 2132 int from = cpu->pstate.current_pstate; 2133 struct sample *sample; 2134 int target_pstate; 2135 2136 update_turbo_state(); 2137 2138 target_pstate = get_target_pstate(cpu); 2139 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2140 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); 2141 intel_pstate_update_pstate(cpu, target_pstate); 2142 2143 sample = &cpu->sample; 2144 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 2145 fp_toint(sample->busy_scaled), 2146 from, 2147 cpu->pstate.current_pstate, 2148 sample->mperf, 2149 sample->aperf, 2150 sample->tsc, 2151 get_avg_frequency(cpu), 2152 fp_toint(cpu->iowait_boost * 100)); 2153 } 2154 2155 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 2156 unsigned int flags) 2157 { 2158 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 2159 u64 delta_ns; 2160 2161 /* Don't allow remote callbacks */ 2162 if (smp_processor_id() != cpu->cpu) 2163 return; 2164 2165 delta_ns = time - cpu->last_update; 2166 if (flags & SCHED_CPUFREQ_IOWAIT) { 2167 /* Start over if the CPU may have been idle. */ 2168 if (delta_ns > TICK_NSEC) { 2169 cpu->iowait_boost = ONE_EIGHTH_FP; 2170 } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { 2171 cpu->iowait_boost <<= 1; 2172 if (cpu->iowait_boost > int_tofp(1)) 2173 cpu->iowait_boost = int_tofp(1); 2174 } else { 2175 cpu->iowait_boost = ONE_EIGHTH_FP; 2176 } 2177 } else if (cpu->iowait_boost) { 2178 /* Clear iowait_boost if the CPU may have been idle. */ 2179 if (delta_ns > TICK_NSEC) 2180 cpu->iowait_boost = 0; 2181 else 2182 cpu->iowait_boost >>= 1; 2183 } 2184 cpu->last_update = time; 2185 delta_ns = time - cpu->sample.time; 2186 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) 2187 return; 2188 2189 if (intel_pstate_sample(cpu, time)) 2190 intel_pstate_adjust_pstate(cpu); 2191 } 2192 2193 static struct pstate_funcs core_funcs = { 2194 .get_max = core_get_max_pstate, 2195 .get_max_physical = core_get_max_pstate_physical, 2196 .get_min = core_get_min_pstate, 2197 .get_turbo = core_get_turbo_pstate, 2198 .get_scaling = core_get_scaling, 2199 .get_val = core_get_val, 2200 }; 2201 2202 static const struct pstate_funcs silvermont_funcs = { 2203 .get_max = atom_get_max_pstate, 2204 .get_max_physical = atom_get_max_pstate, 2205 .get_min = atom_get_min_pstate, 2206 .get_turbo = atom_get_turbo_pstate, 2207 .get_val = atom_get_val, 2208 .get_scaling = silvermont_get_scaling, 2209 .get_vid = atom_get_vid, 2210 }; 2211 2212 static const struct pstate_funcs airmont_funcs = { 2213 .get_max = atom_get_max_pstate, 2214 .get_max_physical = atom_get_max_pstate, 2215 .get_min = atom_get_min_pstate, 2216 .get_turbo = atom_get_turbo_pstate, 2217 .get_val = atom_get_val, 2218 .get_scaling = airmont_get_scaling, 2219 .get_vid = atom_get_vid, 2220 }; 2221 2222 static const struct pstate_funcs knl_funcs = { 2223 .get_max = core_get_max_pstate, 2224 .get_max_physical = core_get_max_pstate_physical, 2225 .get_min = core_get_min_pstate, 2226 .get_turbo = knl_get_turbo_pstate, 2227 .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, 2228 .get_scaling = core_get_scaling, 2229 .get_val = core_get_val, 2230 }; 2231 2232 #define X86_MATCH(model, policy) \ 2233 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 2234 X86_FEATURE_APERFMPERF, &policy) 2235 2236 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 2237 X86_MATCH(SANDYBRIDGE, core_funcs), 2238 X86_MATCH(SANDYBRIDGE_X, core_funcs), 2239 X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), 2240 X86_MATCH(IVYBRIDGE, core_funcs), 2241 X86_MATCH(HASWELL, core_funcs), 2242 X86_MATCH(BROADWELL, core_funcs), 2243 X86_MATCH(IVYBRIDGE_X, core_funcs), 2244 X86_MATCH(HASWELL_X, core_funcs), 2245 X86_MATCH(HASWELL_L, core_funcs), 2246 X86_MATCH(HASWELL_G, core_funcs), 2247 X86_MATCH(BROADWELL_G, core_funcs), 2248 X86_MATCH(ATOM_AIRMONT, airmont_funcs), 2249 X86_MATCH(SKYLAKE_L, core_funcs), 2250 X86_MATCH(BROADWELL_X, core_funcs), 2251 X86_MATCH(SKYLAKE, core_funcs), 2252 X86_MATCH(BROADWELL_D, core_funcs), 2253 X86_MATCH(XEON_PHI_KNL, knl_funcs), 2254 X86_MATCH(XEON_PHI_KNM, knl_funcs), 2255 X86_MATCH(ATOM_GOLDMONT, core_funcs), 2256 X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), 2257 X86_MATCH(SKYLAKE_X, core_funcs), 2258 X86_MATCH(COMETLAKE, core_funcs), 2259 X86_MATCH(ICELAKE_X, core_funcs), 2260 X86_MATCH(TIGERLAKE, core_funcs), 2261 {} 2262 }; 2263 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 2264 2265 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 2266 X86_MATCH(BROADWELL_D, core_funcs), 2267 X86_MATCH(BROADWELL_X, core_funcs), 2268 X86_MATCH(SKYLAKE_X, core_funcs), 2269 X86_MATCH(ICELAKE_X, core_funcs), 2270 {} 2271 }; 2272 2273 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { 2274 X86_MATCH(KABYLAKE, core_funcs), 2275 {} 2276 }; 2277 2278 static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { 2279 X86_MATCH(SKYLAKE_X, core_funcs), 2280 X86_MATCH(SKYLAKE, core_funcs), 2281 {} 2282 }; 2283 2284 static int intel_pstate_init_cpu(unsigned int cpunum) 2285 { 2286 struct cpudata *cpu; 2287 2288 cpu = all_cpu_data[cpunum]; 2289 2290 if (!cpu) { 2291 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); 2292 if (!cpu) 2293 return -ENOMEM; 2294 2295 all_cpu_data[cpunum] = cpu; 2296 2297 cpu->cpu = cpunum; 2298 2299 cpu->epp_default = -EINVAL; 2300 2301 if (hwp_active) { 2302 const struct x86_cpu_id *id; 2303 2304 intel_pstate_hwp_enable(cpu); 2305 2306 id = x86_match_cpu(intel_pstate_hwp_boost_ids); 2307 if (id && intel_pstate_acpi_pm_profile_server()) 2308 hwp_boost = true; 2309 } 2310 } else if (hwp_active) { 2311 /* 2312 * Re-enable HWP in case this happens after a resume from ACPI 2313 * S3 if the CPU was offline during the whole system/resume 2314 * cycle. 2315 */ 2316 intel_pstate_hwp_reenable(cpu); 2317 } 2318 2319 cpu->epp_powersave = -EINVAL; 2320 cpu->epp_policy = 0; 2321 2322 intel_pstate_get_cpu_pstates(cpu); 2323 2324 pr_debug("controlling: cpu %d\n", cpunum); 2325 2326 return 0; 2327 } 2328 2329 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 2330 { 2331 struct cpudata *cpu = all_cpu_data[cpu_num]; 2332 2333 if (hwp_active && !hwp_boost) 2334 return; 2335 2336 if (cpu->update_util_set) 2337 return; 2338 2339 /* Prevent intel_pstate_update_util() from using stale data. */ 2340 cpu->sample.time = 0; 2341 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 2342 (hwp_active ? 2343 intel_pstate_update_util_hwp : 2344 intel_pstate_update_util)); 2345 cpu->update_util_set = true; 2346 } 2347 2348 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 2349 { 2350 struct cpudata *cpu_data = all_cpu_data[cpu]; 2351 2352 if (!cpu_data->update_util_set) 2353 return; 2354 2355 cpufreq_remove_update_util_hook(cpu); 2356 cpu_data->update_util_set = false; 2357 synchronize_rcu(); 2358 } 2359 2360 static int intel_pstate_get_max_freq(struct cpudata *cpu) 2361 { 2362 return global.turbo_disabled || global.no_turbo ? 2363 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2364 } 2365 2366 static void intel_pstate_update_perf_limits(struct cpudata *cpu, 2367 unsigned int policy_min, 2368 unsigned int policy_max) 2369 { 2370 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 2371 int32_t max_policy_perf, min_policy_perf; 2372 2373 max_policy_perf = policy_max / perf_ctl_scaling; 2374 if (policy_max == policy_min) { 2375 min_policy_perf = max_policy_perf; 2376 } else { 2377 min_policy_perf = policy_min / perf_ctl_scaling; 2378 min_policy_perf = clamp_t(int32_t, min_policy_perf, 2379 0, max_policy_perf); 2380 } 2381 2382 /* 2383 * HWP needs some special consideration, because HWP_REQUEST uses 2384 * abstract values to represent performance rather than pure ratios. 2385 */ 2386 if (hwp_active) { 2387 intel_pstate_get_hwp_cap(cpu); 2388 2389 if (cpu->pstate.scaling != perf_ctl_scaling) { 2390 int scaling = cpu->pstate.scaling; 2391 int freq; 2392 2393 freq = max_policy_perf * perf_ctl_scaling; 2394 max_policy_perf = DIV_ROUND_UP(freq, scaling); 2395 freq = min_policy_perf * perf_ctl_scaling; 2396 min_policy_perf = DIV_ROUND_UP(freq, scaling); 2397 } 2398 } 2399 2400 pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n", 2401 cpu->cpu, min_policy_perf, max_policy_perf); 2402 2403 /* Normalize user input to [min_perf, max_perf] */ 2404 if (per_cpu_limits) { 2405 cpu->min_perf_ratio = min_policy_perf; 2406 cpu->max_perf_ratio = max_policy_perf; 2407 } else { 2408 int turbo_max = cpu->pstate.turbo_pstate; 2409 int32_t global_min, global_max; 2410 2411 /* Global limits are in percent of the maximum turbo P-state. */ 2412 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 2413 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); 2414 global_min = clamp_t(int32_t, global_min, 0, global_max); 2415 2416 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, 2417 global_min, global_max); 2418 2419 cpu->min_perf_ratio = max(min_policy_perf, global_min); 2420 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); 2421 cpu->max_perf_ratio = min(max_policy_perf, global_max); 2422 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); 2423 2424 /* Make sure min_perf <= max_perf */ 2425 cpu->min_perf_ratio = min(cpu->min_perf_ratio, 2426 cpu->max_perf_ratio); 2427 2428 } 2429 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, 2430 cpu->max_perf_ratio, 2431 cpu->min_perf_ratio); 2432 } 2433 2434 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2435 { 2436 struct cpudata *cpu; 2437 2438 if (!policy->cpuinfo.max_freq) 2439 return -ENODEV; 2440 2441 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2442 policy->cpuinfo.max_freq, policy->max); 2443 2444 cpu = all_cpu_data[policy->cpu]; 2445 cpu->policy = policy->policy; 2446 2447 mutex_lock(&intel_pstate_limits_lock); 2448 2449 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2450 2451 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2452 /* 2453 * NOHZ_FULL CPUs need this as the governor callback may not 2454 * be invoked on them. 2455 */ 2456 intel_pstate_clear_update_util_hook(policy->cpu); 2457 intel_pstate_max_within_limits(cpu); 2458 } else { 2459 intel_pstate_set_update_util_hook(policy->cpu); 2460 } 2461 2462 if (hwp_active) { 2463 /* 2464 * When hwp_boost was active before and dynamically it 2465 * was turned off, in that case we need to clear the 2466 * update util hook. 2467 */ 2468 if (!hwp_boost) 2469 intel_pstate_clear_update_util_hook(policy->cpu); 2470 intel_pstate_hwp_set(policy->cpu); 2471 } 2472 2473 mutex_unlock(&intel_pstate_limits_lock); 2474 2475 return 0; 2476 } 2477 2478 static void intel_pstate_adjust_policy_max(struct cpudata *cpu, 2479 struct cpufreq_policy_data *policy) 2480 { 2481 if (!hwp_active && 2482 cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2483 policy->max < policy->cpuinfo.max_freq && 2484 policy->max > cpu->pstate.max_freq) { 2485 pr_debug("policy->max > max non turbo frequency\n"); 2486 policy->max = policy->cpuinfo.max_freq; 2487 } 2488 } 2489 2490 static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, 2491 struct cpufreq_policy_data *policy) 2492 { 2493 int max_freq; 2494 2495 update_turbo_state(); 2496 if (hwp_active) { 2497 intel_pstate_get_hwp_cap(cpu); 2498 max_freq = global.no_turbo || global.turbo_disabled ? 2499 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2500 } else { 2501 max_freq = intel_pstate_get_max_freq(cpu); 2502 } 2503 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq); 2504 2505 intel_pstate_adjust_policy_max(cpu, policy); 2506 } 2507 2508 static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) 2509 { 2510 intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy); 2511 2512 return 0; 2513 } 2514 2515 static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy) 2516 { 2517 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2518 2519 pr_debug("CPU %d going offline\n", cpu->cpu); 2520 2521 if (cpu->suspended) 2522 return 0; 2523 2524 /* 2525 * If the CPU is an SMT thread and it goes offline with the performance 2526 * settings different from the minimum, it will prevent its sibling 2527 * from getting to lower performance levels, so force the minimum 2528 * performance on CPU offline to prevent that from happening. 2529 */ 2530 if (hwp_active) 2531 intel_pstate_hwp_offline(cpu); 2532 else 2533 intel_pstate_set_min_pstate(cpu); 2534 2535 intel_pstate_exit_perf_limits(policy); 2536 2537 return 0; 2538 } 2539 2540 static int intel_pstate_cpu_online(struct cpufreq_policy *policy) 2541 { 2542 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2543 2544 pr_debug("CPU %d going online\n", cpu->cpu); 2545 2546 intel_pstate_init_acpi_perf_limits(policy); 2547 2548 if (hwp_active) { 2549 /* 2550 * Re-enable HWP and clear the "suspended" flag to let "resume" 2551 * know that it need not do that. 2552 */ 2553 intel_pstate_hwp_reenable(cpu); 2554 cpu->suspended = false; 2555 } 2556 2557 return 0; 2558 } 2559 2560 static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) 2561 { 2562 intel_pstate_clear_update_util_hook(policy->cpu); 2563 2564 return intel_cpufreq_cpu_offline(policy); 2565 } 2566 2567 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 2568 { 2569 pr_debug("CPU %d exiting\n", policy->cpu); 2570 2571 policy->fast_switch_possible = false; 2572 2573 return 0; 2574 } 2575 2576 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 2577 { 2578 struct cpudata *cpu; 2579 int rc; 2580 2581 rc = intel_pstate_init_cpu(policy->cpu); 2582 if (rc) 2583 return rc; 2584 2585 cpu = all_cpu_data[policy->cpu]; 2586 2587 cpu->max_perf_ratio = 0xFF; 2588 cpu->min_perf_ratio = 0; 2589 2590 /* cpuinfo and default policy values */ 2591 policy->cpuinfo.min_freq = cpu->pstate.min_freq; 2592 update_turbo_state(); 2593 global.turbo_disabled_mf = global.turbo_disabled; 2594 policy->cpuinfo.max_freq = global.turbo_disabled ? 2595 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2596 2597 policy->min = policy->cpuinfo.min_freq; 2598 policy->max = policy->cpuinfo.max_freq; 2599 2600 intel_pstate_init_acpi_perf_limits(policy); 2601 2602 policy->fast_switch_possible = true; 2603 2604 return 0; 2605 } 2606 2607 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 2608 { 2609 int ret = __intel_pstate_cpu_init(policy); 2610 2611 if (ret) 2612 return ret; 2613 2614 /* 2615 * Set the policy to powersave to provide a valid fallback value in case 2616 * the default cpufreq governor is neither powersave nor performance. 2617 */ 2618 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2619 2620 if (hwp_active) { 2621 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2622 2623 cpu->epp_cached = intel_pstate_get_epp(cpu, 0); 2624 } 2625 2626 return 0; 2627 } 2628 2629 static struct cpufreq_driver intel_pstate = { 2630 .flags = CPUFREQ_CONST_LOOPS, 2631 .verify = intel_pstate_verify_policy, 2632 .setpolicy = intel_pstate_set_policy, 2633 .suspend = intel_pstate_suspend, 2634 .resume = intel_pstate_resume, 2635 .init = intel_pstate_cpu_init, 2636 .exit = intel_pstate_cpu_exit, 2637 .offline = intel_pstate_cpu_offline, 2638 .online = intel_pstate_cpu_online, 2639 .update_limits = intel_pstate_update_limits, 2640 .name = "intel_pstate", 2641 }; 2642 2643 static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) 2644 { 2645 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2646 2647 intel_pstate_verify_cpu_policy(cpu, policy); 2648 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2649 2650 return 0; 2651 } 2652 2653 /* Use of trace in passive mode: 2654 * 2655 * In passive mode the trace core_busy field (also known as the 2656 * performance field, and lablelled as such on the graphs; also known as 2657 * core_avg_perf) is not needed and so is re-assigned to indicate if the 2658 * driver call was via the normal or fast switch path. Various graphs 2659 * output from the intel_pstate_tracer.py utility that include core_busy 2660 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%, 2661 * so we use 10 to indicate the normal path through the driver, and 2662 * 90 to indicate the fast switch path through the driver. 2663 * The scaled_busy field is not used, and is set to 0. 2664 */ 2665 2666 #define INTEL_PSTATE_TRACE_TARGET 10 2667 #define INTEL_PSTATE_TRACE_FAST_SWITCH 90 2668 2669 static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate) 2670 { 2671 struct sample *sample; 2672 2673 if (!trace_pstate_sample_enabled()) 2674 return; 2675 2676 if (!intel_pstate_sample(cpu, ktime_get())) 2677 return; 2678 2679 sample = &cpu->sample; 2680 trace_pstate_sample(trace_type, 2681 0, 2682 old_pstate, 2683 cpu->pstate.current_pstate, 2684 sample->mperf, 2685 sample->aperf, 2686 sample->tsc, 2687 get_avg_frequency(cpu), 2688 fp_toint(cpu->iowait_boost * 100)); 2689 } 2690 2691 static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, 2692 u32 desired, bool fast_switch) 2693 { 2694 u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; 2695 2696 value &= ~HWP_MIN_PERF(~0L); 2697 value |= HWP_MIN_PERF(min); 2698 2699 value &= ~HWP_MAX_PERF(~0L); 2700 value |= HWP_MAX_PERF(max); 2701 2702 value &= ~HWP_DESIRED_PERF(~0L); 2703 value |= HWP_DESIRED_PERF(desired); 2704 2705 if (value == prev) 2706 return; 2707 2708 WRITE_ONCE(cpu->hwp_req_cached, value); 2709 if (fast_switch) 2710 wrmsrl(MSR_HWP_REQUEST, value); 2711 else 2712 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 2713 } 2714 2715 static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, 2716 u32 target_pstate, bool fast_switch) 2717 { 2718 if (fast_switch) 2719 wrmsrl(MSR_IA32_PERF_CTL, 2720 pstate_funcs.get_val(cpu, target_pstate)); 2721 else 2722 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 2723 pstate_funcs.get_val(cpu, target_pstate)); 2724 } 2725 2726 static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, 2727 int target_pstate, bool fast_switch) 2728 { 2729 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2730 int old_pstate = cpu->pstate.current_pstate; 2731 2732 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2733 if (hwp_active) { 2734 int max_pstate = policy->strict_target ? 2735 target_pstate : cpu->max_perf_ratio; 2736 2737 intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0, 2738 fast_switch); 2739 } else if (target_pstate != old_pstate) { 2740 intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch); 2741 } 2742 2743 cpu->pstate.current_pstate = target_pstate; 2744 2745 intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : 2746 INTEL_PSTATE_TRACE_TARGET, old_pstate); 2747 2748 return target_pstate; 2749 } 2750 2751 static int intel_cpufreq_target(struct cpufreq_policy *policy, 2752 unsigned int target_freq, 2753 unsigned int relation) 2754 { 2755 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2756 struct cpufreq_freqs freqs; 2757 int target_pstate; 2758 2759 update_turbo_state(); 2760 2761 freqs.old = policy->cur; 2762 freqs.new = target_freq; 2763 2764 cpufreq_freq_transition_begin(policy, &freqs); 2765 2766 switch (relation) { 2767 case CPUFREQ_RELATION_L: 2768 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 2769 break; 2770 case CPUFREQ_RELATION_H: 2771 target_pstate = freqs.new / cpu->pstate.scaling; 2772 break; 2773 default: 2774 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 2775 break; 2776 } 2777 2778 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false); 2779 2780 freqs.new = target_pstate * cpu->pstate.scaling; 2781 2782 cpufreq_freq_transition_end(policy, &freqs, false); 2783 2784 return 0; 2785 } 2786 2787 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 2788 unsigned int target_freq) 2789 { 2790 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2791 int target_pstate; 2792 2793 update_turbo_state(); 2794 2795 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2796 2797 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); 2798 2799 return target_pstate * cpu->pstate.scaling; 2800 } 2801 2802 static void intel_cpufreq_adjust_perf(unsigned int cpunum, 2803 unsigned long min_perf, 2804 unsigned long target_perf, 2805 unsigned long capacity) 2806 { 2807 struct cpudata *cpu = all_cpu_data[cpunum]; 2808 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); 2809 int old_pstate = cpu->pstate.current_pstate; 2810 int cap_pstate, min_pstate, max_pstate, target_pstate; 2811 2812 update_turbo_state(); 2813 cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : 2814 HWP_HIGHEST_PERF(hwp_cap); 2815 2816 /* Optimization: Avoid unnecessary divisions. */ 2817 2818 target_pstate = cap_pstate; 2819 if (target_perf < capacity) 2820 target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity); 2821 2822 min_pstate = cap_pstate; 2823 if (min_perf < capacity) 2824 min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity); 2825 2826 if (min_pstate < cpu->pstate.min_pstate) 2827 min_pstate = cpu->pstate.min_pstate; 2828 2829 if (min_pstate < cpu->min_perf_ratio) 2830 min_pstate = cpu->min_perf_ratio; 2831 2832 max_pstate = min(cap_pstate, cpu->max_perf_ratio); 2833 if (max_pstate < min_pstate) 2834 max_pstate = min_pstate; 2835 2836 target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate); 2837 2838 intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true); 2839 2840 cpu->pstate.current_pstate = target_pstate; 2841 intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); 2842 } 2843 2844 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2845 { 2846 struct freq_qos_request *req; 2847 struct cpudata *cpu; 2848 struct device *dev; 2849 int ret, freq; 2850 2851 dev = get_cpu_device(policy->cpu); 2852 if (!dev) 2853 return -ENODEV; 2854 2855 ret = __intel_pstate_cpu_init(policy); 2856 if (ret) 2857 return ret; 2858 2859 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 2860 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2861 policy->cur = policy->cpuinfo.min_freq; 2862 2863 req = kcalloc(2, sizeof(*req), GFP_KERNEL); 2864 if (!req) { 2865 ret = -ENOMEM; 2866 goto pstate_exit; 2867 } 2868 2869 cpu = all_cpu_data[policy->cpu]; 2870 2871 if (hwp_active) { 2872 u64 value; 2873 2874 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; 2875 2876 intel_pstate_get_hwp_cap(cpu); 2877 2878 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); 2879 WRITE_ONCE(cpu->hwp_req_cached, value); 2880 2881 cpu->epp_cached = intel_pstate_get_epp(cpu, value); 2882 } else { 2883 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; 2884 } 2885 2886 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100); 2887 2888 ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, 2889 freq); 2890 if (ret < 0) { 2891 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); 2892 goto free_req; 2893 } 2894 2895 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100); 2896 2897 ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, 2898 freq); 2899 if (ret < 0) { 2900 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); 2901 goto remove_min_req; 2902 } 2903 2904 policy->driver_data = req; 2905 2906 return 0; 2907 2908 remove_min_req: 2909 freq_qos_remove_request(req); 2910 free_req: 2911 kfree(req); 2912 pstate_exit: 2913 intel_pstate_exit_perf_limits(policy); 2914 2915 return ret; 2916 } 2917 2918 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) 2919 { 2920 struct freq_qos_request *req; 2921 2922 req = policy->driver_data; 2923 2924 freq_qos_remove_request(req + 1); 2925 freq_qos_remove_request(req); 2926 kfree(req); 2927 2928 return intel_pstate_cpu_exit(policy); 2929 } 2930 2931 static int intel_cpufreq_suspend(struct cpufreq_policy *policy) 2932 { 2933 intel_pstate_suspend(policy); 2934 2935 if (hwp_active) { 2936 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2937 u64 value = READ_ONCE(cpu->hwp_req_cached); 2938 2939 /* 2940 * Clear the desired perf field in MSR_HWP_REQUEST in case 2941 * intel_cpufreq_adjust_perf() is in use and the last value 2942 * written by it may not be suitable. 2943 */ 2944 value &= ~HWP_DESIRED_PERF(~0L); 2945 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 2946 WRITE_ONCE(cpu->hwp_req_cached, value); 2947 } 2948 2949 return 0; 2950 } 2951 2952 static struct cpufreq_driver intel_cpufreq = { 2953 .flags = CPUFREQ_CONST_LOOPS, 2954 .verify = intel_cpufreq_verify_policy, 2955 .target = intel_cpufreq_target, 2956 .fast_switch = intel_cpufreq_fast_switch, 2957 .init = intel_cpufreq_cpu_init, 2958 .exit = intel_cpufreq_cpu_exit, 2959 .offline = intel_cpufreq_cpu_offline, 2960 .online = intel_pstate_cpu_online, 2961 .suspend = intel_cpufreq_suspend, 2962 .resume = intel_pstate_resume, 2963 .update_limits = intel_pstate_update_limits, 2964 .name = "intel_cpufreq", 2965 }; 2966 2967 static struct cpufreq_driver *default_driver; 2968 2969 static void intel_pstate_driver_cleanup(void) 2970 { 2971 unsigned int cpu; 2972 2973 cpus_read_lock(); 2974 for_each_online_cpu(cpu) { 2975 if (all_cpu_data[cpu]) { 2976 if (intel_pstate_driver == &intel_pstate) 2977 intel_pstate_clear_update_util_hook(cpu); 2978 2979 kfree(all_cpu_data[cpu]); 2980 all_cpu_data[cpu] = NULL; 2981 } 2982 } 2983 cpus_read_unlock(); 2984 2985 intel_pstate_driver = NULL; 2986 } 2987 2988 static int intel_pstate_register_driver(struct cpufreq_driver *driver) 2989 { 2990 int ret; 2991 2992 if (driver == &intel_pstate) 2993 intel_pstate_sysfs_expose_hwp_dynamic_boost(); 2994 2995 memset(&global, 0, sizeof(global)); 2996 global.max_perf_pct = 100; 2997 2998 intel_pstate_driver = driver; 2999 ret = cpufreq_register_driver(intel_pstate_driver); 3000 if (ret) { 3001 intel_pstate_driver_cleanup(); 3002 return ret; 3003 } 3004 3005 global.min_perf_pct = min_perf_pct_min(); 3006 3007 return 0; 3008 } 3009 3010 static ssize_t intel_pstate_show_status(char *buf) 3011 { 3012 if (!intel_pstate_driver) 3013 return sprintf(buf, "off\n"); 3014 3015 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 3016 "active" : "passive"); 3017 } 3018 3019 static int intel_pstate_update_status(const char *buf, size_t size) 3020 { 3021 if (size == 3 && !strncmp(buf, "off", size)) { 3022 if (!intel_pstate_driver) 3023 return -EINVAL; 3024 3025 if (hwp_active) 3026 return -EBUSY; 3027 3028 cpufreq_unregister_driver(intel_pstate_driver); 3029 intel_pstate_driver_cleanup(); 3030 return 0; 3031 } 3032 3033 if (size == 6 && !strncmp(buf, "active", size)) { 3034 if (intel_pstate_driver) { 3035 if (intel_pstate_driver == &intel_pstate) 3036 return 0; 3037 3038 cpufreq_unregister_driver(intel_pstate_driver); 3039 } 3040 3041 return intel_pstate_register_driver(&intel_pstate); 3042 } 3043 3044 if (size == 7 && !strncmp(buf, "passive", size)) { 3045 if (intel_pstate_driver) { 3046 if (intel_pstate_driver == &intel_cpufreq) 3047 return 0; 3048 3049 cpufreq_unregister_driver(intel_pstate_driver); 3050 intel_pstate_sysfs_hide_hwp_dynamic_boost(); 3051 } 3052 3053 return intel_pstate_register_driver(&intel_cpufreq); 3054 } 3055 3056 return -EINVAL; 3057 } 3058 3059 static int no_load __initdata; 3060 static int no_hwp __initdata; 3061 static int hwp_only __initdata; 3062 static unsigned int force_load __initdata; 3063 3064 static int __init intel_pstate_msrs_not_valid(void) 3065 { 3066 if (!pstate_funcs.get_max(0) || 3067 !pstate_funcs.get_min(0) || 3068 !pstate_funcs.get_turbo(0)) 3069 return -ENODEV; 3070 3071 return 0; 3072 } 3073 3074 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 3075 { 3076 pstate_funcs.get_max = funcs->get_max; 3077 pstate_funcs.get_max_physical = funcs->get_max_physical; 3078 pstate_funcs.get_min = funcs->get_min; 3079 pstate_funcs.get_turbo = funcs->get_turbo; 3080 pstate_funcs.get_scaling = funcs->get_scaling; 3081 pstate_funcs.get_val = funcs->get_val; 3082 pstate_funcs.get_vid = funcs->get_vid; 3083 pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; 3084 } 3085 3086 #ifdef CONFIG_ACPI 3087 3088 static bool __init intel_pstate_no_acpi_pss(void) 3089 { 3090 int i; 3091 3092 for_each_possible_cpu(i) { 3093 acpi_status status; 3094 union acpi_object *pss; 3095 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 3096 struct acpi_processor *pr = per_cpu(processors, i); 3097 3098 if (!pr) 3099 continue; 3100 3101 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 3102 if (ACPI_FAILURE(status)) 3103 continue; 3104 3105 pss = buffer.pointer; 3106 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 3107 kfree(pss); 3108 return false; 3109 } 3110 3111 kfree(pss); 3112 } 3113 3114 pr_debug("ACPI _PSS not found\n"); 3115 return true; 3116 } 3117 3118 static bool __init intel_pstate_no_acpi_pcch(void) 3119 { 3120 acpi_status status; 3121 acpi_handle handle; 3122 3123 status = acpi_get_handle(NULL, "\\_SB", &handle); 3124 if (ACPI_FAILURE(status)) 3125 goto not_found; 3126 3127 if (acpi_has_method(handle, "PCCH")) 3128 return false; 3129 3130 not_found: 3131 pr_debug("ACPI PCCH not found\n"); 3132 return true; 3133 } 3134 3135 static bool __init intel_pstate_has_acpi_ppc(void) 3136 { 3137 int i; 3138 3139 for_each_possible_cpu(i) { 3140 struct acpi_processor *pr = per_cpu(processors, i); 3141 3142 if (!pr) 3143 continue; 3144 if (acpi_has_method(pr->handle, "_PPC")) 3145 return true; 3146 } 3147 pr_debug("ACPI _PPC not found\n"); 3148 return false; 3149 } 3150 3151 enum { 3152 PSS, 3153 PPC, 3154 }; 3155 3156 /* Hardware vendor-specific info that has its own power management modes */ 3157 static struct acpi_platform_list plat_info[] __initdata = { 3158 {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS}, 3159 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3160 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3161 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3162 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3163 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3164 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3165 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3166 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3167 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3168 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3169 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3170 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3171 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3172 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3173 { } /* End */ 3174 }; 3175 3176 #define BITMASK_OOB (BIT(8) | BIT(18)) 3177 3178 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 3179 { 3180 const struct x86_cpu_id *id; 3181 u64 misc_pwr; 3182 int idx; 3183 3184 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 3185 if (id) { 3186 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 3187 if (misc_pwr & BITMASK_OOB) { 3188 pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); 3189 pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); 3190 return true; 3191 } 3192 } 3193 3194 idx = acpi_match_platform_list(plat_info); 3195 if (idx < 0) 3196 return false; 3197 3198 switch (plat_info[idx].data) { 3199 case PSS: 3200 if (!intel_pstate_no_acpi_pss()) 3201 return false; 3202 3203 return intel_pstate_no_acpi_pcch(); 3204 case PPC: 3205 return intel_pstate_has_acpi_ppc() && !force_load; 3206 } 3207 3208 return false; 3209 } 3210 3211 static void intel_pstate_request_control_from_smm(void) 3212 { 3213 /* 3214 * It may be unsafe to request P-states control from SMM if _PPC support 3215 * has not been enabled. 3216 */ 3217 if (acpi_ppc) 3218 acpi_processor_pstate_control(); 3219 } 3220 #else /* CONFIG_ACPI not enabled */ 3221 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 3222 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 3223 static inline void intel_pstate_request_control_from_smm(void) {} 3224 #endif /* CONFIG_ACPI */ 3225 3226 #define INTEL_PSTATE_HWP_BROADWELL 0x01 3227 3228 #define X86_MATCH_HWP(model, hwp_mode) \ 3229 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 3230 X86_FEATURE_HWP, hwp_mode) 3231 3232 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 3233 X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), 3234 X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), 3235 X86_MATCH_HWP(ANY, 0), 3236 {} 3237 }; 3238 3239 static bool intel_pstate_hwp_is_enabled(void) 3240 { 3241 u64 value; 3242 3243 rdmsrl(MSR_PM_ENABLE, value); 3244 return !!(value & 0x1); 3245 } 3246 3247 static int __init intel_pstate_init(void) 3248 { 3249 const struct x86_cpu_id *id; 3250 int rc; 3251 3252 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 3253 return -ENODEV; 3254 3255 id = x86_match_cpu(hwp_support_ids); 3256 if (id) { 3257 bool hwp_forced = intel_pstate_hwp_is_enabled(); 3258 3259 if (hwp_forced) 3260 pr_info("HWP enabled by BIOS\n"); 3261 else if (no_load) 3262 return -ENODEV; 3263 3264 copy_cpu_funcs(&core_funcs); 3265 /* 3266 * Avoid enabling HWP for processors without EPP support, 3267 * because that means incomplete HWP implementation which is a 3268 * corner case and supporting it is generally problematic. 3269 * 3270 * If HWP is enabled already, though, there is no choice but to 3271 * deal with it. 3272 */ 3273 if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { 3274 hwp_active++; 3275 hwp_mode_bdw = id->driver_data; 3276 intel_pstate.attr = hwp_cpufreq_attrs; 3277 intel_cpufreq.attr = hwp_cpufreq_attrs; 3278 intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS; 3279 intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf; 3280 if (!default_driver) 3281 default_driver = &intel_pstate; 3282 3283 if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) 3284 intel_pstate_cppc_set_cpu_scaling(); 3285 3286 goto hwp_cpu_matched; 3287 } 3288 pr_info("HWP not enabled\n"); 3289 } else { 3290 if (no_load) 3291 return -ENODEV; 3292 3293 id = x86_match_cpu(intel_pstate_cpu_ids); 3294 if (!id) { 3295 pr_info("CPU model not supported\n"); 3296 return -ENODEV; 3297 } 3298 3299 copy_cpu_funcs((struct pstate_funcs *)id->driver_data); 3300 } 3301 3302 if (intel_pstate_msrs_not_valid()) { 3303 pr_info("Invalid MSRs\n"); 3304 return -ENODEV; 3305 } 3306 /* Without HWP start in the passive mode. */ 3307 if (!default_driver) 3308 default_driver = &intel_cpufreq; 3309 3310 hwp_cpu_matched: 3311 /* 3312 * The Intel pstate driver will be ignored if the platform 3313 * firmware has its own power management modes. 3314 */ 3315 if (intel_pstate_platform_pwr_mgmt_exists()) { 3316 pr_info("P-states controlled by the platform\n"); 3317 return -ENODEV; 3318 } 3319 3320 if (!hwp_active && hwp_only) 3321 return -ENOTSUPP; 3322 3323 pr_info("Intel P-state driver initializing\n"); 3324 3325 all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); 3326 if (!all_cpu_data) 3327 return -ENOMEM; 3328 3329 intel_pstate_request_control_from_smm(); 3330 3331 intel_pstate_sysfs_expose_params(); 3332 3333 mutex_lock(&intel_pstate_driver_lock); 3334 rc = intel_pstate_register_driver(default_driver); 3335 mutex_unlock(&intel_pstate_driver_lock); 3336 if (rc) { 3337 intel_pstate_sysfs_remove(); 3338 return rc; 3339 } 3340 3341 if (hwp_active) { 3342 const struct x86_cpu_id *id; 3343 3344 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); 3345 if (id) { 3346 set_power_ctl_ee_state(false); 3347 pr_info("Disabling energy efficiency optimization\n"); 3348 } 3349 3350 pr_info("HWP enabled\n"); 3351 } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 3352 pr_warn("Problematic setup: Hybrid processor with disabled HWP\n"); 3353 } 3354 3355 return 0; 3356 } 3357 device_initcall(intel_pstate_init); 3358 3359 static int __init intel_pstate_setup(char *str) 3360 { 3361 if (!str) 3362 return -EINVAL; 3363 3364 if (!strcmp(str, "disable")) 3365 no_load = 1; 3366 else if (!strcmp(str, "active")) 3367 default_driver = &intel_pstate; 3368 else if (!strcmp(str, "passive")) 3369 default_driver = &intel_cpufreq; 3370 3371 if (!strcmp(str, "no_hwp")) 3372 no_hwp = 1; 3373 3374 if (!strcmp(str, "force")) 3375 force_load = 1; 3376 if (!strcmp(str, "hwp_only")) 3377 hwp_only = 1; 3378 if (!strcmp(str, "per_cpu_perf_limits")) 3379 per_cpu_limits = true; 3380 3381 #ifdef CONFIG_ACPI 3382 if (!strcmp(str, "support_acpi_ppc")) 3383 acpi_ppc = true; 3384 #endif 3385 3386 return 0; 3387 } 3388 early_param("intel_pstate", intel_pstate_setup); 3389 3390 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 3391 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 3392 MODULE_LICENSE("GPL"); 3393