1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pstate.c: Native P state management for Intel processors 4 * 5 * (C) Copyright 2012 Intel Corporation 6 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/module.h> 14 #include <linux/ktime.h> 15 #include <linux/hrtimer.h> 16 #include <linux/tick.h> 17 #include <linux/slab.h> 18 #include <linux/sched/cpufreq.h> 19 #include <linux/list.h> 20 #include <linux/cpu.h> 21 #include <linux/cpufreq.h> 22 #include <linux/sysfs.h> 23 #include <linux/types.h> 24 #include <linux/fs.h> 25 #include <linux/acpi.h> 26 #include <linux/vmalloc.h> 27 #include <linux/pm_qos.h> 28 #include <trace/events/power.h> 29 30 #include <asm/cpu.h> 31 #include <asm/div64.h> 32 #include <asm/msr.h> 33 #include <asm/cpu_device_id.h> 34 #include <asm/cpufeature.h> 35 #include <asm/intel-family.h> 36 37 #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) 38 39 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 40 #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 41 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 42 43 #ifdef CONFIG_ACPI 44 #include <acpi/processor.h> 45 #include <acpi/cppc_acpi.h> 46 #endif 47 48 #define FRAC_BITS 8 49 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 50 #define fp_toint(X) ((X) >> FRAC_BITS) 51 52 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) 53 54 #define EXT_BITS 6 55 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 56 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 57 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 58 59 static inline int32_t mul_fp(int32_t x, int32_t y) 60 { 61 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 62 } 63 64 static inline int32_t div_fp(s64 x, s64 y) 65 { 66 return div64_s64((int64_t)x << FRAC_BITS, y); 67 } 68 69 static inline int ceiling_fp(int32_t x) 70 { 71 int mask, ret; 72 73 ret = fp_toint(x); 74 mask = (1 << FRAC_BITS) - 1; 75 if (x & mask) 76 ret += 1; 77 return ret; 78 } 79 80 static inline u64 mul_ext_fp(u64 x, u64 y) 81 { 82 return (x * y) >> EXT_FRAC_BITS; 83 } 84 85 static inline u64 div_ext_fp(u64 x, u64 y) 86 { 87 return div64_u64(x << EXT_FRAC_BITS, y); 88 } 89 90 /** 91 * struct sample - Store performance sample 92 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 93 * performance during last sample period 94 * @busy_scaled: Scaled busy value which is used to calculate next 95 * P state. This can be different than core_avg_perf 96 * to account for cpu idle period 97 * @aperf: Difference of actual performance frequency clock count 98 * read from APERF MSR between last and current sample 99 * @mperf: Difference of maximum performance frequency clock count 100 * read from MPERF MSR between last and current sample 101 * @tsc: Difference of time stamp counter between last and 102 * current sample 103 * @time: Current time from scheduler 104 * 105 * This structure is used in the cpudata structure to store performance sample 106 * data for choosing next P State. 107 */ 108 struct sample { 109 int32_t core_avg_perf; 110 int32_t busy_scaled; 111 u64 aperf; 112 u64 mperf; 113 u64 tsc; 114 u64 time; 115 }; 116 117 /** 118 * struct pstate_data - Store P state data 119 * @current_pstate: Current requested P state 120 * @min_pstate: Min P state possible for this platform 121 * @max_pstate: Max P state possible for this platform 122 * @max_pstate_physical:This is physical Max P state for a processor 123 * This can be higher than the max_pstate which can 124 * be limited by platform thermal design power limits 125 * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor 126 * @scaling: Scaling factor between performance and frequency 127 * @turbo_pstate: Max Turbo P state possible for this platform 128 * @min_freq: @min_pstate frequency in cpufreq units 129 * @max_freq: @max_pstate frequency in cpufreq units 130 * @turbo_freq: @turbo_pstate frequency in cpufreq units 131 * 132 * Stores the per cpu model P state limits and current P state. 133 */ 134 struct pstate_data { 135 int current_pstate; 136 int min_pstate; 137 int max_pstate; 138 int max_pstate_physical; 139 int perf_ctl_scaling; 140 int scaling; 141 int turbo_pstate; 142 unsigned int min_freq; 143 unsigned int max_freq; 144 unsigned int turbo_freq; 145 }; 146 147 /** 148 * struct vid_data - Stores voltage information data 149 * @min: VID data for this platform corresponding to 150 * the lowest P state 151 * @max: VID data corresponding to the highest P State. 152 * @turbo: VID data for turbo P state 153 * @ratio: Ratio of (vid max - vid min) / 154 * (max P state - Min P State) 155 * 156 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 157 * This data is used in Atom platforms, where in addition to target P state, 158 * the voltage data needs to be specified to select next P State. 159 */ 160 struct vid_data { 161 int min; 162 int max; 163 int turbo; 164 int32_t ratio; 165 }; 166 167 /** 168 * struct global_params - Global parameters, mostly tunable via sysfs. 169 * @no_turbo: Whether or not to use turbo P-states. 170 * @turbo_disabled: Whether or not turbo P-states are available at all, 171 * based on the MSR_IA32_MISC_ENABLE value and whether or 172 * not the maximum reported turbo P-state is different from 173 * the maximum reported non-turbo one. 174 * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. 175 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo 176 * P-state capacity. 177 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo 178 * P-state capacity. 179 */ 180 struct global_params { 181 bool no_turbo; 182 bool turbo_disabled; 183 bool turbo_disabled_mf; 184 int max_perf_pct; 185 int min_perf_pct; 186 }; 187 188 /** 189 * struct cpudata - Per CPU instance data storage 190 * @cpu: CPU number for this instance data 191 * @policy: CPUFreq policy value 192 * @update_util: CPUFreq utility callback information 193 * @update_util_set: CPUFreq utility callback is set 194 * @iowait_boost: iowait-related boost fraction 195 * @last_update: Time of the last update. 196 * @pstate: Stores P state limits for this CPU 197 * @vid: Stores VID limits for this CPU 198 * @last_sample_time: Last Sample time 199 * @aperf_mperf_shift: APERF vs MPERF counting frequency difference 200 * @prev_aperf: Last APERF value read from APERF MSR 201 * @prev_mperf: Last MPERF value read from MPERF MSR 202 * @prev_tsc: Last timestamp counter (TSC) value 203 * @prev_cummulative_iowait: IO Wait time difference from last and 204 * current sample 205 * @sample: Storage for storing last Sample data 206 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios 207 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios 208 * @acpi_perf_data: Stores ACPI perf information read from _PSS 209 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 210 * @epp_powersave: Last saved HWP energy performance preference 211 * (EPP) or energy performance bias (EPB), 212 * when policy switched to performance 213 * @epp_policy: Last saved policy used to set EPP/EPB 214 * @epp_default: Power on default HWP energy performance 215 * preference/bias 216 * @epp_cached Cached HWP energy-performance preference value 217 * @hwp_req_cached: Cached value of the last HWP Request MSR 218 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR 219 * @last_io_update: Last time when IO wake flag was set 220 * @sched_flags: Store scheduler flags for possible cross CPU update 221 * @hwp_boost_min: Last HWP boosted min performance 222 * @suspended: Whether or not the driver has been suspended. 223 * 224 * This structure stores per CPU instance data for all CPUs. 225 */ 226 struct cpudata { 227 int cpu; 228 229 unsigned int policy; 230 struct update_util_data update_util; 231 bool update_util_set; 232 233 struct pstate_data pstate; 234 struct vid_data vid; 235 236 u64 last_update; 237 u64 last_sample_time; 238 u64 aperf_mperf_shift; 239 u64 prev_aperf; 240 u64 prev_mperf; 241 u64 prev_tsc; 242 u64 prev_cummulative_iowait; 243 struct sample sample; 244 int32_t min_perf_ratio; 245 int32_t max_perf_ratio; 246 #ifdef CONFIG_ACPI 247 struct acpi_processor_performance acpi_perf_data; 248 bool valid_pss_table; 249 #endif 250 unsigned int iowait_boost; 251 s16 epp_powersave; 252 s16 epp_policy; 253 s16 epp_default; 254 s16 epp_cached; 255 u64 hwp_req_cached; 256 u64 hwp_cap_cached; 257 u64 last_io_update; 258 unsigned int sched_flags; 259 u32 hwp_boost_min; 260 bool suspended; 261 }; 262 263 static struct cpudata **all_cpu_data; 264 265 /** 266 * struct pstate_funcs - Per CPU model specific callbacks 267 * @get_max: Callback to get maximum non turbo effective P state 268 * @get_max_physical: Callback to get maximum non turbo physical P state 269 * @get_min: Callback to get minimum P state 270 * @get_turbo: Callback to get turbo P state 271 * @get_scaling: Callback to get frequency scaling factor 272 * @get_cpu_scaling: Get frequency scaling factor for a given cpu 273 * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference 274 * @get_val: Callback to convert P state to actual MSR write value 275 * @get_vid: Callback to get VID data for Atom platforms 276 * 277 * Core and Atom CPU models have different way to get P State limits. This 278 * structure is used to store those callbacks. 279 */ 280 struct pstate_funcs { 281 int (*get_max)(int cpu); 282 int (*get_max_physical)(int cpu); 283 int (*get_min)(int cpu); 284 int (*get_turbo)(int cpu); 285 int (*get_scaling)(void); 286 int (*get_cpu_scaling)(int cpu); 287 int (*get_aperf_mperf_shift)(void); 288 u64 (*get_val)(struct cpudata*, int pstate); 289 void (*get_vid)(struct cpudata *); 290 }; 291 292 static struct pstate_funcs pstate_funcs __read_mostly; 293 294 static int hwp_active __read_mostly; 295 static int hwp_mode_bdw __read_mostly; 296 static bool per_cpu_limits __read_mostly; 297 static bool hwp_boost __read_mostly; 298 299 static struct cpufreq_driver *intel_pstate_driver __read_mostly; 300 301 #ifdef CONFIG_ACPI 302 static bool acpi_ppc; 303 #endif 304 305 static struct global_params global; 306 307 static DEFINE_MUTEX(intel_pstate_driver_lock); 308 static DEFINE_MUTEX(intel_pstate_limits_lock); 309 310 #ifdef CONFIG_ACPI 311 312 static bool intel_pstate_acpi_pm_profile_server(void) 313 { 314 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 315 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 316 return true; 317 318 return false; 319 } 320 321 static bool intel_pstate_get_ppc_enable_status(void) 322 { 323 if (intel_pstate_acpi_pm_profile_server()) 324 return true; 325 326 return acpi_ppc; 327 } 328 329 #ifdef CONFIG_ACPI_CPPC_LIB 330 331 /* The work item is needed to avoid CPU hotplug locking issues */ 332 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) 333 { 334 sched_set_itmt_support(); 335 } 336 337 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); 338 339 #define CPPC_MAX_PERF U8_MAX 340 341 static void intel_pstate_set_itmt_prio(int cpu) 342 { 343 struct cppc_perf_caps cppc_perf; 344 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; 345 int ret; 346 347 ret = cppc_get_perf_caps(cpu, &cppc_perf); 348 if (ret) 349 return; 350 351 /* 352 * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. 353 * In this case we can't use CPPC.highest_perf to enable ITMT. 354 * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. 355 */ 356 if (cppc_perf.highest_perf == CPPC_MAX_PERF) 357 cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); 358 359 /* 360 * The priorities can be set regardless of whether or not 361 * sched_set_itmt_support(true) has been called and it is valid to 362 * update them at any time after it has been called. 363 */ 364 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); 365 366 if (max_highest_perf <= min_highest_perf) { 367 if (cppc_perf.highest_perf > max_highest_perf) 368 max_highest_perf = cppc_perf.highest_perf; 369 370 if (cppc_perf.highest_perf < min_highest_perf) 371 min_highest_perf = cppc_perf.highest_perf; 372 373 if (max_highest_perf > min_highest_perf) { 374 /* 375 * This code can be run during CPU online under the 376 * CPU hotplug locks, so sched_set_itmt_support() 377 * cannot be called from here. Queue up a work item 378 * to invoke it. 379 */ 380 schedule_work(&sched_itmt_work); 381 } 382 } 383 } 384 385 static int intel_pstate_get_cppc_guaranteed(int cpu) 386 { 387 struct cppc_perf_caps cppc_perf; 388 int ret; 389 390 ret = cppc_get_perf_caps(cpu, &cppc_perf); 391 if (ret) 392 return ret; 393 394 if (cppc_perf.guaranteed_perf) 395 return cppc_perf.guaranteed_perf; 396 397 return cppc_perf.nominal_perf; 398 } 399 #else /* CONFIG_ACPI_CPPC_LIB */ 400 static inline void intel_pstate_set_itmt_prio(int cpu) 401 { 402 } 403 #endif /* CONFIG_ACPI_CPPC_LIB */ 404 405 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 406 { 407 struct cpudata *cpu; 408 int ret; 409 int i; 410 411 if (hwp_active) { 412 intel_pstate_set_itmt_prio(policy->cpu); 413 return; 414 } 415 416 if (!intel_pstate_get_ppc_enable_status()) 417 return; 418 419 cpu = all_cpu_data[policy->cpu]; 420 421 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 422 policy->cpu); 423 if (ret) 424 return; 425 426 /* 427 * Check if the control value in _PSS is for PERF_CTL MSR, which should 428 * guarantee that the states returned by it map to the states in our 429 * list directly. 430 */ 431 if (cpu->acpi_perf_data.control_register.space_id != 432 ACPI_ADR_SPACE_FIXED_HARDWARE) 433 goto err; 434 435 /* 436 * If there is only one entry _PSS, simply ignore _PSS and continue as 437 * usual without taking _PSS into account 438 */ 439 if (cpu->acpi_perf_data.state_count < 2) 440 goto err; 441 442 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 443 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 444 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 445 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 446 (u32) cpu->acpi_perf_data.states[i].core_frequency, 447 (u32) cpu->acpi_perf_data.states[i].power, 448 (u32) cpu->acpi_perf_data.states[i].control); 449 } 450 451 /* 452 * The _PSS table doesn't contain whole turbo frequency range. 453 * This just contains +1 MHZ above the max non turbo frequency, 454 * with control value corresponding to max turbo ratio. But 455 * when cpufreq set policy is called, it will call with this 456 * max frequency, which will cause a reduced performance as 457 * this driver uses real max turbo frequency as the max 458 * frequency. So correct this frequency in _PSS table to 459 * correct max turbo frequency based on the turbo state. 460 * Also need to convert to MHz as _PSS freq is in MHz. 461 */ 462 if (!global.turbo_disabled) 463 cpu->acpi_perf_data.states[0].core_frequency = 464 policy->cpuinfo.max_freq / 1000; 465 cpu->valid_pss_table = true; 466 pr_debug("_PPC limits will be enforced\n"); 467 468 return; 469 470 err: 471 cpu->valid_pss_table = false; 472 acpi_processor_unregister_performance(policy->cpu); 473 } 474 475 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 476 { 477 struct cpudata *cpu; 478 479 cpu = all_cpu_data[policy->cpu]; 480 if (!cpu->valid_pss_table) 481 return; 482 483 acpi_processor_unregister_performance(policy->cpu); 484 } 485 #else /* CONFIG_ACPI */ 486 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 487 { 488 } 489 490 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 491 { 492 } 493 494 static inline bool intel_pstate_acpi_pm_profile_server(void) 495 { 496 return false; 497 } 498 #endif /* CONFIG_ACPI */ 499 500 #ifndef CONFIG_ACPI_CPPC_LIB 501 static inline int intel_pstate_get_cppc_guaranteed(int cpu) 502 { 503 return -ENOTSUPP; 504 } 505 #endif /* CONFIG_ACPI_CPPC_LIB */ 506 507 /** 508 * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels. 509 * @cpu: Target CPU. 510 * 511 * On hybrid processors, HWP may expose more performance levels than there are 512 * P-states accessible through the PERF_CTL interface. If that happens, the 513 * scaling factor between HWP performance levels and CPU frequency will be less 514 * than the scaling factor between P-state values and CPU frequency. 515 * 516 * In that case, adjust the CPU parameters used in computations accordingly. 517 */ 518 static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) 519 { 520 int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; 521 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 522 int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu); 523 int scaling = cpu->pstate.scaling; 524 525 pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); 526 pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); 527 pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); 528 pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); 529 pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); 530 pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); 531 532 cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling, 533 perf_ctl_scaling); 534 cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, 535 perf_ctl_scaling); 536 537 cpu->pstate.max_pstate_physical = 538 DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling, 539 scaling); 540 541 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; 542 /* 543 * Cast the min P-state value retrieved via pstate_funcs.get_min() to 544 * the effective range of HWP performance levels. 545 */ 546 cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling); 547 } 548 549 static inline void update_turbo_state(void) 550 { 551 u64 misc_en; 552 struct cpudata *cpu; 553 554 cpu = all_cpu_data[0]; 555 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 556 global.turbo_disabled = 557 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 558 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 559 } 560 561 static int min_perf_pct_min(void) 562 { 563 struct cpudata *cpu = all_cpu_data[0]; 564 int turbo_pstate = cpu->pstate.turbo_pstate; 565 566 return turbo_pstate ? 567 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; 568 } 569 570 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 571 { 572 u64 epb; 573 int ret; 574 575 if (!boot_cpu_has(X86_FEATURE_EPB)) 576 return -ENXIO; 577 578 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 579 if (ret) 580 return (s16)ret; 581 582 return (s16)(epb & 0x0f); 583 } 584 585 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 586 { 587 s16 epp; 588 589 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 590 /* 591 * When hwp_req_data is 0, means that caller didn't read 592 * MSR_HWP_REQUEST, so need to read and get EPP. 593 */ 594 if (!hwp_req_data) { 595 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, 596 &hwp_req_data); 597 if (epp) 598 return epp; 599 } 600 epp = (hwp_req_data >> 24) & 0xff; 601 } else { 602 /* When there is no EPP present, HWP uses EPB settings */ 603 epp = intel_pstate_get_epb(cpu_data); 604 } 605 606 return epp; 607 } 608 609 static int intel_pstate_set_epb(int cpu, s16 pref) 610 { 611 u64 epb; 612 int ret; 613 614 if (!boot_cpu_has(X86_FEATURE_EPB)) 615 return -ENXIO; 616 617 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 618 if (ret) 619 return ret; 620 621 epb = (epb & ~0x0f) | pref; 622 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 623 624 return 0; 625 } 626 627 /* 628 * EPP/EPB display strings corresponding to EPP index in the 629 * energy_perf_strings[] 630 * index String 631 *------------------------------------- 632 * 0 default 633 * 1 performance 634 * 2 balance_performance 635 * 3 balance_power 636 * 4 power 637 */ 638 static const char * const energy_perf_strings[] = { 639 "default", 640 "performance", 641 "balance_performance", 642 "balance_power", 643 "power", 644 NULL 645 }; 646 static const unsigned int epp_values[] = { 647 HWP_EPP_PERFORMANCE, 648 HWP_EPP_BALANCE_PERFORMANCE, 649 HWP_EPP_BALANCE_POWERSAVE, 650 HWP_EPP_POWERSAVE 651 }; 652 653 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) 654 { 655 s16 epp; 656 int index = -EINVAL; 657 658 *raw_epp = 0; 659 epp = intel_pstate_get_epp(cpu_data, 0); 660 if (epp < 0) 661 return epp; 662 663 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 664 if (epp == HWP_EPP_PERFORMANCE) 665 return 1; 666 if (epp == HWP_EPP_BALANCE_PERFORMANCE) 667 return 2; 668 if (epp == HWP_EPP_BALANCE_POWERSAVE) 669 return 3; 670 if (epp == HWP_EPP_POWERSAVE) 671 return 4; 672 *raw_epp = epp; 673 return 0; 674 } else if (boot_cpu_has(X86_FEATURE_EPB)) { 675 /* 676 * Range: 677 * 0x00-0x03 : Performance 678 * 0x04-0x07 : Balance performance 679 * 0x08-0x0B : Balance power 680 * 0x0C-0x0F : Power 681 * The EPB is a 4 bit value, but our ranges restrict the 682 * value which can be set. Here only using top two bits 683 * effectively. 684 */ 685 index = (epp >> 2) + 1; 686 } 687 688 return index; 689 } 690 691 static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) 692 { 693 int ret; 694 695 /* 696 * Use the cached HWP Request MSR value, because in the active mode the 697 * register itself may be updated by intel_pstate_hwp_boost_up() or 698 * intel_pstate_hwp_boost_down() at any time. 699 */ 700 u64 value = READ_ONCE(cpu->hwp_req_cached); 701 702 value &= ~GENMASK_ULL(31, 24); 703 value |= (u64)epp << 24; 704 /* 705 * The only other updater of hwp_req_cached in the active mode, 706 * intel_pstate_hwp_set(), is called under the same lock as this 707 * function, so it cannot run in parallel with the update below. 708 */ 709 WRITE_ONCE(cpu->hwp_req_cached, value); 710 ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 711 if (!ret) 712 cpu->epp_cached = epp; 713 714 return ret; 715 } 716 717 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, 718 int pref_index, bool use_raw, 719 u32 raw_epp) 720 { 721 int epp = -EINVAL; 722 int ret; 723 724 if (!pref_index) 725 epp = cpu_data->epp_default; 726 727 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 728 if (use_raw) 729 epp = raw_epp; 730 else if (epp == -EINVAL) 731 epp = epp_values[pref_index - 1]; 732 733 /* 734 * To avoid confusion, refuse to set EPP to any values different 735 * from 0 (performance) if the current policy is "performance", 736 * because those values would be overridden. 737 */ 738 if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 739 return -EBUSY; 740 741 ret = intel_pstate_set_epp(cpu_data, epp); 742 } else { 743 if (epp == -EINVAL) 744 epp = (pref_index - 1) << 2; 745 ret = intel_pstate_set_epb(cpu_data->cpu, epp); 746 } 747 748 return ret; 749 } 750 751 static ssize_t show_energy_performance_available_preferences( 752 struct cpufreq_policy *policy, char *buf) 753 { 754 int i = 0; 755 int ret = 0; 756 757 while (energy_perf_strings[i] != NULL) 758 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); 759 760 ret += sprintf(&buf[ret], "\n"); 761 762 return ret; 763 } 764 765 cpufreq_freq_attr_ro(energy_performance_available_preferences); 766 767 static struct cpufreq_driver intel_pstate; 768 769 static ssize_t store_energy_performance_preference( 770 struct cpufreq_policy *policy, const char *buf, size_t count) 771 { 772 struct cpudata *cpu = all_cpu_data[policy->cpu]; 773 char str_preference[21]; 774 bool raw = false; 775 ssize_t ret; 776 u32 epp = 0; 777 778 ret = sscanf(buf, "%20s", str_preference); 779 if (ret != 1) 780 return -EINVAL; 781 782 ret = match_string(energy_perf_strings, -1, str_preference); 783 if (ret < 0) { 784 if (!boot_cpu_has(X86_FEATURE_HWP_EPP)) 785 return ret; 786 787 ret = kstrtouint(buf, 10, &epp); 788 if (ret) 789 return ret; 790 791 if (epp > 255) 792 return -EINVAL; 793 794 raw = true; 795 } 796 797 /* 798 * This function runs with the policy R/W semaphore held, which 799 * guarantees that the driver pointer will not change while it is 800 * running. 801 */ 802 if (!intel_pstate_driver) 803 return -EAGAIN; 804 805 mutex_lock(&intel_pstate_limits_lock); 806 807 if (intel_pstate_driver == &intel_pstate) { 808 ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); 809 } else { 810 /* 811 * In the passive mode the governor needs to be stopped on the 812 * target CPU before the EPP update and restarted after it, 813 * which is super-heavy-weight, so make sure it is worth doing 814 * upfront. 815 */ 816 if (!raw) 817 epp = ret ? epp_values[ret - 1] : cpu->epp_default; 818 819 if (cpu->epp_cached != epp) { 820 int err; 821 822 cpufreq_stop_governor(policy); 823 ret = intel_pstate_set_epp(cpu, epp); 824 err = cpufreq_start_governor(policy); 825 if (!ret) 826 ret = err; 827 } 828 } 829 830 mutex_unlock(&intel_pstate_limits_lock); 831 832 return ret ?: count; 833 } 834 835 static ssize_t show_energy_performance_preference( 836 struct cpufreq_policy *policy, char *buf) 837 { 838 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 839 int preference, raw_epp; 840 841 preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp); 842 if (preference < 0) 843 return preference; 844 845 if (raw_epp) 846 return sprintf(buf, "%d\n", raw_epp); 847 else 848 return sprintf(buf, "%s\n", energy_perf_strings[preference]); 849 } 850 851 cpufreq_freq_attr_rw(energy_performance_preference); 852 853 static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) 854 { 855 struct cpudata *cpu = all_cpu_data[policy->cpu]; 856 int ratio, freq; 857 858 ratio = intel_pstate_get_cppc_guaranteed(policy->cpu); 859 if (ratio <= 0) { 860 u64 cap; 861 862 rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); 863 ratio = HWP_GUARANTEED_PERF(cap); 864 } 865 866 freq = ratio * cpu->pstate.scaling; 867 if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling) 868 freq = rounddown(freq, cpu->pstate.perf_ctl_scaling); 869 870 return sprintf(buf, "%d\n", freq); 871 } 872 873 cpufreq_freq_attr_ro(base_frequency); 874 875 static struct freq_attr *hwp_cpufreq_attrs[] = { 876 &energy_performance_preference, 877 &energy_performance_available_preferences, 878 &base_frequency, 879 NULL, 880 }; 881 882 static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) 883 { 884 u64 cap; 885 886 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); 887 WRITE_ONCE(cpu->hwp_cap_cached, cap); 888 cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); 889 cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); 890 } 891 892 static void intel_pstate_get_hwp_cap(struct cpudata *cpu) 893 { 894 int scaling = cpu->pstate.scaling; 895 896 __intel_pstate_get_hwp_cap(cpu); 897 898 cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling; 899 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; 900 if (scaling != cpu->pstate.perf_ctl_scaling) { 901 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 902 903 cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq, 904 perf_ctl_scaling); 905 cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq, 906 perf_ctl_scaling); 907 } 908 } 909 910 static void intel_pstate_hwp_set(unsigned int cpu) 911 { 912 struct cpudata *cpu_data = all_cpu_data[cpu]; 913 int max, min; 914 u64 value; 915 s16 epp; 916 917 max = cpu_data->max_perf_ratio; 918 min = cpu_data->min_perf_ratio; 919 920 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 921 min = max; 922 923 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 924 925 value &= ~HWP_MIN_PERF(~0L); 926 value |= HWP_MIN_PERF(min); 927 928 value &= ~HWP_MAX_PERF(~0L); 929 value |= HWP_MAX_PERF(max); 930 931 if (cpu_data->epp_policy == cpu_data->policy) 932 goto skip_epp; 933 934 cpu_data->epp_policy = cpu_data->policy; 935 936 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 937 epp = intel_pstate_get_epp(cpu_data, value); 938 cpu_data->epp_powersave = epp; 939 /* If EPP read was failed, then don't try to write */ 940 if (epp < 0) 941 goto skip_epp; 942 943 epp = 0; 944 } else { 945 /* skip setting EPP, when saved value is invalid */ 946 if (cpu_data->epp_powersave < 0) 947 goto skip_epp; 948 949 /* 950 * No need to restore EPP when it is not zero. This 951 * means: 952 * - Policy is not changed 953 * - user has manually changed 954 * - Error reading EPB 955 */ 956 epp = intel_pstate_get_epp(cpu_data, value); 957 if (epp) 958 goto skip_epp; 959 960 epp = cpu_data->epp_powersave; 961 } 962 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 963 value &= ~GENMASK_ULL(31, 24); 964 value |= (u64)epp << 24; 965 } else { 966 intel_pstate_set_epb(cpu, epp); 967 } 968 skip_epp: 969 WRITE_ONCE(cpu_data->hwp_req_cached, value); 970 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 971 } 972 973 static void intel_pstate_hwp_offline(struct cpudata *cpu) 974 { 975 u64 value = READ_ONCE(cpu->hwp_req_cached); 976 int min_perf; 977 978 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 979 /* 980 * In case the EPP has been set to "performance" by the 981 * active mode "performance" scaling algorithm, replace that 982 * temporary value with the cached EPP one. 983 */ 984 value &= ~GENMASK_ULL(31, 24); 985 value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); 986 /* 987 * However, make sure that EPP will be set to "performance" when 988 * the CPU is brought back online again and the "performance" 989 * scaling algorithm is still in effect. 990 */ 991 cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; 992 } 993 994 /* 995 * Clear the desired perf field in the cached HWP request value to 996 * prevent nonzero desired values from being leaked into the active 997 * mode. 998 */ 999 value &= ~HWP_DESIRED_PERF(~0L); 1000 WRITE_ONCE(cpu->hwp_req_cached, value); 1001 1002 value &= ~GENMASK_ULL(31, 0); 1003 min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); 1004 1005 /* Set hwp_max = hwp_min */ 1006 value |= HWP_MAX_PERF(min_perf); 1007 value |= HWP_MIN_PERF(min_perf); 1008 1009 /* Set EPP to min */ 1010 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) 1011 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); 1012 1013 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 1014 } 1015 1016 #define POWER_CTL_EE_ENABLE 1 1017 #define POWER_CTL_EE_DISABLE 2 1018 1019 static int power_ctl_ee_state; 1020 1021 static void set_power_ctl_ee_state(bool input) 1022 { 1023 u64 power_ctl; 1024 1025 mutex_lock(&intel_pstate_driver_lock); 1026 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1027 if (input) { 1028 power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); 1029 power_ctl_ee_state = POWER_CTL_EE_ENABLE; 1030 } else { 1031 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); 1032 power_ctl_ee_state = POWER_CTL_EE_DISABLE; 1033 } 1034 wrmsrl(MSR_IA32_POWER_CTL, power_ctl); 1035 mutex_unlock(&intel_pstate_driver_lock); 1036 } 1037 1038 static void intel_pstate_hwp_enable(struct cpudata *cpudata); 1039 1040 static void intel_pstate_hwp_reenable(struct cpudata *cpu) 1041 { 1042 intel_pstate_hwp_enable(cpu); 1043 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); 1044 } 1045 1046 static int intel_pstate_suspend(struct cpufreq_policy *policy) 1047 { 1048 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1049 1050 pr_debug("CPU %d suspending\n", cpu->cpu); 1051 1052 cpu->suspended = true; 1053 1054 return 0; 1055 } 1056 1057 static int intel_pstate_resume(struct cpufreq_policy *policy) 1058 { 1059 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1060 1061 pr_debug("CPU %d resuming\n", cpu->cpu); 1062 1063 /* Only restore if the system default is changed */ 1064 if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) 1065 set_power_ctl_ee_state(true); 1066 else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) 1067 set_power_ctl_ee_state(false); 1068 1069 if (cpu->suspended && hwp_active) { 1070 mutex_lock(&intel_pstate_limits_lock); 1071 1072 /* Re-enable HWP, because "online" has not done that. */ 1073 intel_pstate_hwp_reenable(cpu); 1074 1075 mutex_unlock(&intel_pstate_limits_lock); 1076 } 1077 1078 cpu->suspended = false; 1079 1080 return 0; 1081 } 1082 1083 static void intel_pstate_update_policies(void) 1084 { 1085 int cpu; 1086 1087 for_each_possible_cpu(cpu) 1088 cpufreq_update_policy(cpu); 1089 } 1090 1091 static void intel_pstate_update_max_freq(unsigned int cpu) 1092 { 1093 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); 1094 struct cpudata *cpudata; 1095 1096 if (!policy) 1097 return; 1098 1099 cpudata = all_cpu_data[cpu]; 1100 policy->cpuinfo.max_freq = global.turbo_disabled_mf ? 1101 cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; 1102 1103 refresh_frequency_limits(policy); 1104 1105 cpufreq_cpu_release(policy); 1106 } 1107 1108 static void intel_pstate_update_limits(unsigned int cpu) 1109 { 1110 mutex_lock(&intel_pstate_driver_lock); 1111 1112 update_turbo_state(); 1113 /* 1114 * If turbo has been turned on or off globally, policy limits for 1115 * all CPUs need to be updated to reflect that. 1116 */ 1117 if (global.turbo_disabled_mf != global.turbo_disabled) { 1118 global.turbo_disabled_mf = global.turbo_disabled; 1119 arch_set_max_freq_ratio(global.turbo_disabled); 1120 for_each_possible_cpu(cpu) 1121 intel_pstate_update_max_freq(cpu); 1122 } else { 1123 cpufreq_update_policy(cpu); 1124 } 1125 1126 mutex_unlock(&intel_pstate_driver_lock); 1127 } 1128 1129 /************************** sysfs begin ************************/ 1130 #define show_one(file_name, object) \ 1131 static ssize_t show_##file_name \ 1132 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ 1133 { \ 1134 return sprintf(buf, "%u\n", global.object); \ 1135 } 1136 1137 static ssize_t intel_pstate_show_status(char *buf); 1138 static int intel_pstate_update_status(const char *buf, size_t size); 1139 1140 static ssize_t show_status(struct kobject *kobj, 1141 struct kobj_attribute *attr, char *buf) 1142 { 1143 ssize_t ret; 1144 1145 mutex_lock(&intel_pstate_driver_lock); 1146 ret = intel_pstate_show_status(buf); 1147 mutex_unlock(&intel_pstate_driver_lock); 1148 1149 return ret; 1150 } 1151 1152 static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, 1153 const char *buf, size_t count) 1154 { 1155 char *p = memchr(buf, '\n', count); 1156 int ret; 1157 1158 mutex_lock(&intel_pstate_driver_lock); 1159 ret = intel_pstate_update_status(buf, p ? p - buf : count); 1160 mutex_unlock(&intel_pstate_driver_lock); 1161 1162 return ret < 0 ? ret : count; 1163 } 1164 1165 static ssize_t show_turbo_pct(struct kobject *kobj, 1166 struct kobj_attribute *attr, char *buf) 1167 { 1168 struct cpudata *cpu; 1169 int total, no_turbo, turbo_pct; 1170 uint32_t turbo_fp; 1171 1172 mutex_lock(&intel_pstate_driver_lock); 1173 1174 if (!intel_pstate_driver) { 1175 mutex_unlock(&intel_pstate_driver_lock); 1176 return -EAGAIN; 1177 } 1178 1179 cpu = all_cpu_data[0]; 1180 1181 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1182 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 1183 turbo_fp = div_fp(no_turbo, total); 1184 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 1185 1186 mutex_unlock(&intel_pstate_driver_lock); 1187 1188 return sprintf(buf, "%u\n", turbo_pct); 1189 } 1190 1191 static ssize_t show_num_pstates(struct kobject *kobj, 1192 struct kobj_attribute *attr, char *buf) 1193 { 1194 struct cpudata *cpu; 1195 int total; 1196 1197 mutex_lock(&intel_pstate_driver_lock); 1198 1199 if (!intel_pstate_driver) { 1200 mutex_unlock(&intel_pstate_driver_lock); 1201 return -EAGAIN; 1202 } 1203 1204 cpu = all_cpu_data[0]; 1205 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1206 1207 mutex_unlock(&intel_pstate_driver_lock); 1208 1209 return sprintf(buf, "%u\n", total); 1210 } 1211 1212 static ssize_t show_no_turbo(struct kobject *kobj, 1213 struct kobj_attribute *attr, char *buf) 1214 { 1215 ssize_t ret; 1216 1217 mutex_lock(&intel_pstate_driver_lock); 1218 1219 if (!intel_pstate_driver) { 1220 mutex_unlock(&intel_pstate_driver_lock); 1221 return -EAGAIN; 1222 } 1223 1224 update_turbo_state(); 1225 if (global.turbo_disabled) 1226 ret = sprintf(buf, "%u\n", global.turbo_disabled); 1227 else 1228 ret = sprintf(buf, "%u\n", global.no_turbo); 1229 1230 mutex_unlock(&intel_pstate_driver_lock); 1231 1232 return ret; 1233 } 1234 1235 static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, 1236 const char *buf, size_t count) 1237 { 1238 unsigned int input; 1239 int ret; 1240 1241 ret = sscanf(buf, "%u", &input); 1242 if (ret != 1) 1243 return -EINVAL; 1244 1245 mutex_lock(&intel_pstate_driver_lock); 1246 1247 if (!intel_pstate_driver) { 1248 mutex_unlock(&intel_pstate_driver_lock); 1249 return -EAGAIN; 1250 } 1251 1252 mutex_lock(&intel_pstate_limits_lock); 1253 1254 update_turbo_state(); 1255 if (global.turbo_disabled) { 1256 pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); 1257 mutex_unlock(&intel_pstate_limits_lock); 1258 mutex_unlock(&intel_pstate_driver_lock); 1259 return -EPERM; 1260 } 1261 1262 global.no_turbo = clamp_t(int, input, 0, 1); 1263 1264 if (global.no_turbo) { 1265 struct cpudata *cpu = all_cpu_data[0]; 1266 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; 1267 1268 /* Squash the global minimum into the permitted range. */ 1269 if (global.min_perf_pct > pct) 1270 global.min_perf_pct = pct; 1271 } 1272 1273 mutex_unlock(&intel_pstate_limits_lock); 1274 1275 intel_pstate_update_policies(); 1276 1277 mutex_unlock(&intel_pstate_driver_lock); 1278 1279 return count; 1280 } 1281 1282 static void update_qos_request(enum freq_qos_req_type type) 1283 { 1284 struct freq_qos_request *req; 1285 struct cpufreq_policy *policy; 1286 int i; 1287 1288 for_each_possible_cpu(i) { 1289 struct cpudata *cpu = all_cpu_data[i]; 1290 unsigned int freq, perf_pct; 1291 1292 policy = cpufreq_cpu_get(i); 1293 if (!policy) 1294 continue; 1295 1296 req = policy->driver_data; 1297 cpufreq_cpu_put(policy); 1298 1299 if (!req) 1300 continue; 1301 1302 if (hwp_active) 1303 intel_pstate_get_hwp_cap(cpu); 1304 1305 if (type == FREQ_QOS_MIN) { 1306 perf_pct = global.min_perf_pct; 1307 } else { 1308 req++; 1309 perf_pct = global.max_perf_pct; 1310 } 1311 1312 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100); 1313 1314 if (freq_qos_update_request(req, freq) < 0) 1315 pr_warn("Failed to update freq constraint: CPU%d\n", i); 1316 } 1317 } 1318 1319 static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, 1320 const char *buf, size_t count) 1321 { 1322 unsigned int input; 1323 int ret; 1324 1325 ret = sscanf(buf, "%u", &input); 1326 if (ret != 1) 1327 return -EINVAL; 1328 1329 mutex_lock(&intel_pstate_driver_lock); 1330 1331 if (!intel_pstate_driver) { 1332 mutex_unlock(&intel_pstate_driver_lock); 1333 return -EAGAIN; 1334 } 1335 1336 mutex_lock(&intel_pstate_limits_lock); 1337 1338 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); 1339 1340 mutex_unlock(&intel_pstate_limits_lock); 1341 1342 if (intel_pstate_driver == &intel_pstate) 1343 intel_pstate_update_policies(); 1344 else 1345 update_qos_request(FREQ_QOS_MAX); 1346 1347 mutex_unlock(&intel_pstate_driver_lock); 1348 1349 return count; 1350 } 1351 1352 static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, 1353 const char *buf, size_t count) 1354 { 1355 unsigned int input; 1356 int ret; 1357 1358 ret = sscanf(buf, "%u", &input); 1359 if (ret != 1) 1360 return -EINVAL; 1361 1362 mutex_lock(&intel_pstate_driver_lock); 1363 1364 if (!intel_pstate_driver) { 1365 mutex_unlock(&intel_pstate_driver_lock); 1366 return -EAGAIN; 1367 } 1368 1369 mutex_lock(&intel_pstate_limits_lock); 1370 1371 global.min_perf_pct = clamp_t(int, input, 1372 min_perf_pct_min(), global.max_perf_pct); 1373 1374 mutex_unlock(&intel_pstate_limits_lock); 1375 1376 if (intel_pstate_driver == &intel_pstate) 1377 intel_pstate_update_policies(); 1378 else 1379 update_qos_request(FREQ_QOS_MIN); 1380 1381 mutex_unlock(&intel_pstate_driver_lock); 1382 1383 return count; 1384 } 1385 1386 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, 1387 struct kobj_attribute *attr, char *buf) 1388 { 1389 return sprintf(buf, "%u\n", hwp_boost); 1390 } 1391 1392 static ssize_t store_hwp_dynamic_boost(struct kobject *a, 1393 struct kobj_attribute *b, 1394 const char *buf, size_t count) 1395 { 1396 unsigned int input; 1397 int ret; 1398 1399 ret = kstrtouint(buf, 10, &input); 1400 if (ret) 1401 return ret; 1402 1403 mutex_lock(&intel_pstate_driver_lock); 1404 hwp_boost = !!input; 1405 intel_pstate_update_policies(); 1406 mutex_unlock(&intel_pstate_driver_lock); 1407 1408 return count; 1409 } 1410 1411 static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr, 1412 char *buf) 1413 { 1414 u64 power_ctl; 1415 int enable; 1416 1417 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1418 enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); 1419 return sprintf(buf, "%d\n", !enable); 1420 } 1421 1422 static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b, 1423 const char *buf, size_t count) 1424 { 1425 bool input; 1426 int ret; 1427 1428 ret = kstrtobool(buf, &input); 1429 if (ret) 1430 return ret; 1431 1432 set_power_ctl_ee_state(input); 1433 1434 return count; 1435 } 1436 1437 show_one(max_perf_pct, max_perf_pct); 1438 show_one(min_perf_pct, min_perf_pct); 1439 1440 define_one_global_rw(status); 1441 define_one_global_rw(no_turbo); 1442 define_one_global_rw(max_perf_pct); 1443 define_one_global_rw(min_perf_pct); 1444 define_one_global_ro(turbo_pct); 1445 define_one_global_ro(num_pstates); 1446 define_one_global_rw(hwp_dynamic_boost); 1447 define_one_global_rw(energy_efficiency); 1448 1449 static struct attribute *intel_pstate_attributes[] = { 1450 &status.attr, 1451 &no_turbo.attr, 1452 NULL 1453 }; 1454 1455 static const struct attribute_group intel_pstate_attr_group = { 1456 .attrs = intel_pstate_attributes, 1457 }; 1458 1459 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; 1460 1461 static struct kobject *intel_pstate_kobject; 1462 1463 static void __init intel_pstate_sysfs_expose_params(void) 1464 { 1465 int rc; 1466 1467 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 1468 &cpu_subsys.dev_root->kobj); 1469 if (WARN_ON(!intel_pstate_kobject)) 1470 return; 1471 1472 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 1473 if (WARN_ON(rc)) 1474 return; 1475 1476 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 1477 rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr); 1478 WARN_ON(rc); 1479 1480 rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr); 1481 WARN_ON(rc); 1482 } 1483 1484 /* 1485 * If per cpu limits are enforced there are no global limits, so 1486 * return without creating max/min_perf_pct attributes 1487 */ 1488 if (per_cpu_limits) 1489 return; 1490 1491 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 1492 WARN_ON(rc); 1493 1494 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1495 WARN_ON(rc); 1496 1497 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { 1498 rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); 1499 WARN_ON(rc); 1500 } 1501 } 1502 1503 static void __init intel_pstate_sysfs_remove(void) 1504 { 1505 if (!intel_pstate_kobject) 1506 return; 1507 1508 sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group); 1509 1510 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 1511 sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr); 1512 sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr); 1513 } 1514 1515 if (!per_cpu_limits) { 1516 sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr); 1517 sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr); 1518 1519 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) 1520 sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr); 1521 } 1522 1523 kobject_put(intel_pstate_kobject); 1524 } 1525 1526 static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) 1527 { 1528 int rc; 1529 1530 if (!hwp_active) 1531 return; 1532 1533 rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); 1534 WARN_ON_ONCE(rc); 1535 } 1536 1537 static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) 1538 { 1539 if (!hwp_active) 1540 return; 1541 1542 sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); 1543 } 1544 1545 /************************** sysfs end ************************/ 1546 1547 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1548 { 1549 /* First disable HWP notification interrupt as we don't process them */ 1550 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1551 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1552 1553 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1554 if (cpudata->epp_default == -EINVAL) 1555 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1556 } 1557 1558 static int atom_get_min_pstate(int not_used) 1559 { 1560 u64 value; 1561 1562 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1563 return (value >> 8) & 0x7F; 1564 } 1565 1566 static int atom_get_max_pstate(int not_used) 1567 { 1568 u64 value; 1569 1570 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1571 return (value >> 16) & 0x7F; 1572 } 1573 1574 static int atom_get_turbo_pstate(int not_used) 1575 { 1576 u64 value; 1577 1578 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); 1579 return value & 0x7F; 1580 } 1581 1582 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1583 { 1584 u64 val; 1585 int32_t vid_fp; 1586 u32 vid; 1587 1588 val = (u64)pstate << 8; 1589 if (global.no_turbo && !global.turbo_disabled) 1590 val |= (u64)1 << 32; 1591 1592 vid_fp = cpudata->vid.min + mul_fp( 1593 int_tofp(pstate - cpudata->pstate.min_pstate), 1594 cpudata->vid.ratio); 1595 1596 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1597 vid = ceiling_fp(vid_fp); 1598 1599 if (pstate > cpudata->pstate.max_pstate) 1600 vid = cpudata->vid.turbo; 1601 1602 return val | vid; 1603 } 1604 1605 static int silvermont_get_scaling(void) 1606 { 1607 u64 value; 1608 int i; 1609 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1610 static int silvermont_freq_table[] = { 1611 83300, 100000, 133300, 116700, 80000}; 1612 1613 rdmsrl(MSR_FSB_FREQ, value); 1614 i = value & 0x7; 1615 WARN_ON(i > 4); 1616 1617 return silvermont_freq_table[i]; 1618 } 1619 1620 static int airmont_get_scaling(void) 1621 { 1622 u64 value; 1623 int i; 1624 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1625 static int airmont_freq_table[] = { 1626 83300, 100000, 133300, 116700, 80000, 1627 93300, 90000, 88900, 87500}; 1628 1629 rdmsrl(MSR_FSB_FREQ, value); 1630 i = value & 0xF; 1631 WARN_ON(i > 8); 1632 1633 return airmont_freq_table[i]; 1634 } 1635 1636 static void atom_get_vid(struct cpudata *cpudata) 1637 { 1638 u64 value; 1639 1640 rdmsrl(MSR_ATOM_CORE_VIDS, value); 1641 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1642 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1643 cpudata->vid.ratio = div_fp( 1644 cpudata->vid.max - cpudata->vid.min, 1645 int_tofp(cpudata->pstate.max_pstate - 1646 cpudata->pstate.min_pstate)); 1647 1648 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); 1649 cpudata->vid.turbo = value & 0x7f; 1650 } 1651 1652 static int core_get_min_pstate(int cpu) 1653 { 1654 u64 value; 1655 1656 rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); 1657 return (value >> 40) & 0xFF; 1658 } 1659 1660 static int core_get_max_pstate_physical(int cpu) 1661 { 1662 u64 value; 1663 1664 rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); 1665 return (value >> 8) & 0xFF; 1666 } 1667 1668 static int core_get_tdp_ratio(int cpu, u64 plat_info) 1669 { 1670 /* Check how many TDP levels present */ 1671 if (plat_info & 0x600000000) { 1672 u64 tdp_ctrl; 1673 u64 tdp_ratio; 1674 int tdp_msr; 1675 int err; 1676 1677 /* Get the TDP level (0, 1, 2) to get ratios */ 1678 err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1679 if (err) 1680 return err; 1681 1682 /* TDP MSR are continuous starting at 0x648 */ 1683 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); 1684 err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio); 1685 if (err) 1686 return err; 1687 1688 /* For level 1 and 2, bits[23:16] contain the ratio */ 1689 if (tdp_ctrl & 0x03) 1690 tdp_ratio >>= 16; 1691 1692 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1693 pr_debug("tdp_ratio %x\n", (int)tdp_ratio); 1694 1695 return (int)tdp_ratio; 1696 } 1697 1698 return -ENXIO; 1699 } 1700 1701 static int core_get_max_pstate(int cpu) 1702 { 1703 u64 tar; 1704 u64 plat_info; 1705 int max_pstate; 1706 int tdp_ratio; 1707 int err; 1708 1709 rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info); 1710 max_pstate = (plat_info >> 8) & 0xFF; 1711 1712 tdp_ratio = core_get_tdp_ratio(cpu, plat_info); 1713 if (tdp_ratio <= 0) 1714 return max_pstate; 1715 1716 if (hwp_active) { 1717 /* Turbo activation ratio is not used on HWP platforms */ 1718 return tdp_ratio; 1719 } 1720 1721 err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar); 1722 if (!err) { 1723 int tar_levels; 1724 1725 /* Do some sanity checking for safety */ 1726 tar_levels = tar & 0xff; 1727 if (tdp_ratio - 1 == tar_levels) { 1728 max_pstate = tar_levels; 1729 pr_debug("max_pstate=TAC %x\n", max_pstate); 1730 } 1731 } 1732 1733 return max_pstate; 1734 } 1735 1736 static int core_get_turbo_pstate(int cpu) 1737 { 1738 u64 value; 1739 int nont, ret; 1740 1741 rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); 1742 nont = core_get_max_pstate(cpu); 1743 ret = (value) & 255; 1744 if (ret <= nont) 1745 ret = nont; 1746 return ret; 1747 } 1748 1749 static inline int core_get_scaling(void) 1750 { 1751 return 100000; 1752 } 1753 1754 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1755 { 1756 u64 val; 1757 1758 val = (u64)pstate << 8; 1759 if (global.no_turbo && !global.turbo_disabled) 1760 val |= (u64)1 << 32; 1761 1762 return val; 1763 } 1764 1765 static int knl_get_aperf_mperf_shift(void) 1766 { 1767 return 10; 1768 } 1769 1770 static int knl_get_turbo_pstate(int cpu) 1771 { 1772 u64 value; 1773 int nont, ret; 1774 1775 rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); 1776 nont = core_get_max_pstate(cpu); 1777 ret = (((value) >> 8) & 0xFF); 1778 if (ret <= nont) 1779 ret = nont; 1780 return ret; 1781 } 1782 1783 static void hybrid_get_type(void *data) 1784 { 1785 u8 *cpu_type = data; 1786 1787 *cpu_type = get_this_hybrid_cpu_type(); 1788 } 1789 1790 static int hybrid_get_cpu_scaling(int cpu) 1791 { 1792 u8 cpu_type = 0; 1793 1794 smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1); 1795 /* P-cores have a smaller perf level-to-freqency scaling factor. */ 1796 if (cpu_type == 0x40) 1797 return 78741; 1798 1799 return core_get_scaling(); 1800 } 1801 1802 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1803 { 1804 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1805 cpu->pstate.current_pstate = pstate; 1806 /* 1807 * Generally, there is no guarantee that this code will always run on 1808 * the CPU being updated, so force the register update to run on the 1809 * right CPU. 1810 */ 1811 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1812 pstate_funcs.get_val(cpu, pstate)); 1813 } 1814 1815 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1816 { 1817 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1818 } 1819 1820 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1821 { 1822 int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); 1823 1824 update_turbo_state(); 1825 intel_pstate_set_pstate(cpu, pstate); 1826 } 1827 1828 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1829 { 1830 int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); 1831 int perf_ctl_scaling = pstate_funcs.get_scaling(); 1832 1833 cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu); 1834 cpu->pstate.max_pstate_physical = perf_ctl_max_phys; 1835 cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; 1836 1837 if (hwp_active && !hwp_mode_bdw) { 1838 __intel_pstate_get_hwp_cap(cpu); 1839 1840 if (pstate_funcs.get_cpu_scaling) { 1841 cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); 1842 if (cpu->pstate.scaling != perf_ctl_scaling) 1843 intel_pstate_hybrid_hwp_adjust(cpu); 1844 } else { 1845 cpu->pstate.scaling = perf_ctl_scaling; 1846 } 1847 } else { 1848 cpu->pstate.scaling = perf_ctl_scaling; 1849 cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu); 1850 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu); 1851 } 1852 1853 if (cpu->pstate.scaling == perf_ctl_scaling) { 1854 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; 1855 cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling; 1856 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling; 1857 } 1858 1859 if (pstate_funcs.get_aperf_mperf_shift) 1860 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); 1861 1862 if (pstate_funcs.get_vid) 1863 pstate_funcs.get_vid(cpu); 1864 1865 intel_pstate_set_min_pstate(cpu); 1866 } 1867 1868 /* 1869 * Long hold time will keep high perf limits for long time, 1870 * which negatively impacts perf/watt for some workloads, 1871 * like specpower. 3ms is based on experiements on some 1872 * workoads. 1873 */ 1874 static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC; 1875 1876 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) 1877 { 1878 u64 hwp_req = READ_ONCE(cpu->hwp_req_cached); 1879 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); 1880 u32 max_limit = (hwp_req & 0xff00) >> 8; 1881 u32 min_limit = (hwp_req & 0xff); 1882 u32 boost_level1; 1883 1884 /* 1885 * Cases to consider (User changes via sysfs or boot time): 1886 * If, P0 (Turbo max) = P1 (Guaranteed max) = min: 1887 * No boost, return. 1888 * If, P0 (Turbo max) > P1 (Guaranteed max) = min: 1889 * Should result in one level boost only for P0. 1890 * If, P0 (Turbo max) = P1 (Guaranteed max) > min: 1891 * Should result in two level boost: 1892 * (min + p1)/2 and P1. 1893 * If, P0 (Turbo max) > P1 (Guaranteed max) > min: 1894 * Should result in three level boost: 1895 * (min + p1)/2, P1 and P0. 1896 */ 1897 1898 /* If max and min are equal or already at max, nothing to boost */ 1899 if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit) 1900 return; 1901 1902 if (!cpu->hwp_boost_min) 1903 cpu->hwp_boost_min = min_limit; 1904 1905 /* level at half way mark between min and guranteed */ 1906 boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1; 1907 1908 if (cpu->hwp_boost_min < boost_level1) 1909 cpu->hwp_boost_min = boost_level1; 1910 else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap)) 1911 cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap); 1912 else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) && 1913 max_limit != HWP_GUARANTEED_PERF(hwp_cap)) 1914 cpu->hwp_boost_min = max_limit; 1915 else 1916 return; 1917 1918 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; 1919 wrmsrl(MSR_HWP_REQUEST, hwp_req); 1920 cpu->last_update = cpu->sample.time; 1921 } 1922 1923 static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) 1924 { 1925 if (cpu->hwp_boost_min) { 1926 bool expired; 1927 1928 /* Check if we are idle for hold time to boost down */ 1929 expired = time_after64(cpu->sample.time, cpu->last_update + 1930 hwp_boost_hold_time_ns); 1931 if (expired) { 1932 wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); 1933 cpu->hwp_boost_min = 0; 1934 } 1935 } 1936 cpu->last_update = cpu->sample.time; 1937 } 1938 1939 static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu, 1940 u64 time) 1941 { 1942 cpu->sample.time = time; 1943 1944 if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) { 1945 bool do_io = false; 1946 1947 cpu->sched_flags = 0; 1948 /* 1949 * Set iowait_boost flag and update time. Since IO WAIT flag 1950 * is set all the time, we can't just conclude that there is 1951 * some IO bound activity is scheduled on this CPU with just 1952 * one occurrence. If we receive at least two in two 1953 * consecutive ticks, then we treat as boost candidate. 1954 */ 1955 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC)) 1956 do_io = true; 1957 1958 cpu->last_io_update = time; 1959 1960 if (do_io) 1961 intel_pstate_hwp_boost_up(cpu); 1962 1963 } else { 1964 intel_pstate_hwp_boost_down(cpu); 1965 } 1966 } 1967 1968 static inline void intel_pstate_update_util_hwp(struct update_util_data *data, 1969 u64 time, unsigned int flags) 1970 { 1971 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1972 1973 cpu->sched_flags |= flags; 1974 1975 if (smp_processor_id() == cpu->cpu) 1976 intel_pstate_update_util_hwp_local(cpu, time); 1977 } 1978 1979 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1980 { 1981 struct sample *sample = &cpu->sample; 1982 1983 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1984 } 1985 1986 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1987 { 1988 u64 aperf, mperf; 1989 unsigned long flags; 1990 u64 tsc; 1991 1992 local_irq_save(flags); 1993 rdmsrl(MSR_IA32_APERF, aperf); 1994 rdmsrl(MSR_IA32_MPERF, mperf); 1995 tsc = rdtsc(); 1996 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1997 local_irq_restore(flags); 1998 return false; 1999 } 2000 local_irq_restore(flags); 2001 2002 cpu->last_sample_time = cpu->sample.time; 2003 cpu->sample.time = time; 2004 cpu->sample.aperf = aperf; 2005 cpu->sample.mperf = mperf; 2006 cpu->sample.tsc = tsc; 2007 cpu->sample.aperf -= cpu->prev_aperf; 2008 cpu->sample.mperf -= cpu->prev_mperf; 2009 cpu->sample.tsc -= cpu->prev_tsc; 2010 2011 cpu->prev_aperf = aperf; 2012 cpu->prev_mperf = mperf; 2013 cpu->prev_tsc = tsc; 2014 /* 2015 * First time this function is invoked in a given cycle, all of the 2016 * previous sample data fields are equal to zero or stale and they must 2017 * be populated with meaningful numbers for things to work, so assume 2018 * that sample.time will always be reset before setting the utilization 2019 * update hook and make the caller skip the sample then. 2020 */ 2021 if (cpu->last_sample_time) { 2022 intel_pstate_calc_avg_perf(cpu); 2023 return true; 2024 } 2025 return false; 2026 } 2027 2028 static inline int32_t get_avg_frequency(struct cpudata *cpu) 2029 { 2030 return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); 2031 } 2032 2033 static inline int32_t get_avg_pstate(struct cpudata *cpu) 2034 { 2035 return mul_ext_fp(cpu->pstate.max_pstate_physical, 2036 cpu->sample.core_avg_perf); 2037 } 2038 2039 static inline int32_t get_target_pstate(struct cpudata *cpu) 2040 { 2041 struct sample *sample = &cpu->sample; 2042 int32_t busy_frac; 2043 int target, avg_pstate; 2044 2045 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, 2046 sample->tsc); 2047 2048 if (busy_frac < cpu->iowait_boost) 2049 busy_frac = cpu->iowait_boost; 2050 2051 sample->busy_scaled = busy_frac * 100; 2052 2053 target = global.no_turbo || global.turbo_disabled ? 2054 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2055 target += target >> 2; 2056 target = mul_fp(target, busy_frac); 2057 if (target < cpu->pstate.min_pstate) 2058 target = cpu->pstate.min_pstate; 2059 2060 /* 2061 * If the average P-state during the previous cycle was higher than the 2062 * current target, add 50% of the difference to the target to reduce 2063 * possible performance oscillations and offset possible performance 2064 * loss related to moving the workload from one CPU to another within 2065 * a package/module. 2066 */ 2067 avg_pstate = get_avg_pstate(cpu); 2068 if (avg_pstate > target) 2069 target += (avg_pstate - target) >> 1; 2070 2071 return target; 2072 } 2073 2074 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 2075 { 2076 int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); 2077 int max_pstate = max(min_pstate, cpu->max_perf_ratio); 2078 2079 return clamp_t(int, pstate, min_pstate, max_pstate); 2080 } 2081 2082 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 2083 { 2084 if (pstate == cpu->pstate.current_pstate) 2085 return; 2086 2087 cpu->pstate.current_pstate = pstate; 2088 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 2089 } 2090 2091 static void intel_pstate_adjust_pstate(struct cpudata *cpu) 2092 { 2093 int from = cpu->pstate.current_pstate; 2094 struct sample *sample; 2095 int target_pstate; 2096 2097 update_turbo_state(); 2098 2099 target_pstate = get_target_pstate(cpu); 2100 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2101 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); 2102 intel_pstate_update_pstate(cpu, target_pstate); 2103 2104 sample = &cpu->sample; 2105 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 2106 fp_toint(sample->busy_scaled), 2107 from, 2108 cpu->pstate.current_pstate, 2109 sample->mperf, 2110 sample->aperf, 2111 sample->tsc, 2112 get_avg_frequency(cpu), 2113 fp_toint(cpu->iowait_boost * 100)); 2114 } 2115 2116 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 2117 unsigned int flags) 2118 { 2119 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 2120 u64 delta_ns; 2121 2122 /* Don't allow remote callbacks */ 2123 if (smp_processor_id() != cpu->cpu) 2124 return; 2125 2126 delta_ns = time - cpu->last_update; 2127 if (flags & SCHED_CPUFREQ_IOWAIT) { 2128 /* Start over if the CPU may have been idle. */ 2129 if (delta_ns > TICK_NSEC) { 2130 cpu->iowait_boost = ONE_EIGHTH_FP; 2131 } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { 2132 cpu->iowait_boost <<= 1; 2133 if (cpu->iowait_boost > int_tofp(1)) 2134 cpu->iowait_boost = int_tofp(1); 2135 } else { 2136 cpu->iowait_boost = ONE_EIGHTH_FP; 2137 } 2138 } else if (cpu->iowait_boost) { 2139 /* Clear iowait_boost if the CPU may have been idle. */ 2140 if (delta_ns > TICK_NSEC) 2141 cpu->iowait_boost = 0; 2142 else 2143 cpu->iowait_boost >>= 1; 2144 } 2145 cpu->last_update = time; 2146 delta_ns = time - cpu->sample.time; 2147 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) 2148 return; 2149 2150 if (intel_pstate_sample(cpu, time)) 2151 intel_pstate_adjust_pstate(cpu); 2152 } 2153 2154 static struct pstate_funcs core_funcs = { 2155 .get_max = core_get_max_pstate, 2156 .get_max_physical = core_get_max_pstate_physical, 2157 .get_min = core_get_min_pstate, 2158 .get_turbo = core_get_turbo_pstate, 2159 .get_scaling = core_get_scaling, 2160 .get_val = core_get_val, 2161 }; 2162 2163 static const struct pstate_funcs silvermont_funcs = { 2164 .get_max = atom_get_max_pstate, 2165 .get_max_physical = atom_get_max_pstate, 2166 .get_min = atom_get_min_pstate, 2167 .get_turbo = atom_get_turbo_pstate, 2168 .get_val = atom_get_val, 2169 .get_scaling = silvermont_get_scaling, 2170 .get_vid = atom_get_vid, 2171 }; 2172 2173 static const struct pstate_funcs airmont_funcs = { 2174 .get_max = atom_get_max_pstate, 2175 .get_max_physical = atom_get_max_pstate, 2176 .get_min = atom_get_min_pstate, 2177 .get_turbo = atom_get_turbo_pstate, 2178 .get_val = atom_get_val, 2179 .get_scaling = airmont_get_scaling, 2180 .get_vid = atom_get_vid, 2181 }; 2182 2183 static const struct pstate_funcs knl_funcs = { 2184 .get_max = core_get_max_pstate, 2185 .get_max_physical = core_get_max_pstate_physical, 2186 .get_min = core_get_min_pstate, 2187 .get_turbo = knl_get_turbo_pstate, 2188 .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, 2189 .get_scaling = core_get_scaling, 2190 .get_val = core_get_val, 2191 }; 2192 2193 #define X86_MATCH(model, policy) \ 2194 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 2195 X86_FEATURE_APERFMPERF, &policy) 2196 2197 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 2198 X86_MATCH(SANDYBRIDGE, core_funcs), 2199 X86_MATCH(SANDYBRIDGE_X, core_funcs), 2200 X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), 2201 X86_MATCH(IVYBRIDGE, core_funcs), 2202 X86_MATCH(HASWELL, core_funcs), 2203 X86_MATCH(BROADWELL, core_funcs), 2204 X86_MATCH(IVYBRIDGE_X, core_funcs), 2205 X86_MATCH(HASWELL_X, core_funcs), 2206 X86_MATCH(HASWELL_L, core_funcs), 2207 X86_MATCH(HASWELL_G, core_funcs), 2208 X86_MATCH(BROADWELL_G, core_funcs), 2209 X86_MATCH(ATOM_AIRMONT, airmont_funcs), 2210 X86_MATCH(SKYLAKE_L, core_funcs), 2211 X86_MATCH(BROADWELL_X, core_funcs), 2212 X86_MATCH(SKYLAKE, core_funcs), 2213 X86_MATCH(BROADWELL_D, core_funcs), 2214 X86_MATCH(XEON_PHI_KNL, knl_funcs), 2215 X86_MATCH(XEON_PHI_KNM, knl_funcs), 2216 X86_MATCH(ATOM_GOLDMONT, core_funcs), 2217 X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), 2218 X86_MATCH(SKYLAKE_X, core_funcs), 2219 X86_MATCH(COMETLAKE, core_funcs), 2220 X86_MATCH(ICELAKE_X, core_funcs), 2221 X86_MATCH(TIGERLAKE, core_funcs), 2222 {} 2223 }; 2224 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 2225 2226 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 2227 X86_MATCH(BROADWELL_D, core_funcs), 2228 X86_MATCH(BROADWELL_X, core_funcs), 2229 X86_MATCH(SKYLAKE_X, core_funcs), 2230 X86_MATCH(ICELAKE_X, core_funcs), 2231 {} 2232 }; 2233 2234 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { 2235 X86_MATCH(KABYLAKE, core_funcs), 2236 {} 2237 }; 2238 2239 static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { 2240 X86_MATCH(SKYLAKE_X, core_funcs), 2241 X86_MATCH(SKYLAKE, core_funcs), 2242 {} 2243 }; 2244 2245 static int intel_pstate_init_cpu(unsigned int cpunum) 2246 { 2247 struct cpudata *cpu; 2248 2249 cpu = all_cpu_data[cpunum]; 2250 2251 if (!cpu) { 2252 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); 2253 if (!cpu) 2254 return -ENOMEM; 2255 2256 all_cpu_data[cpunum] = cpu; 2257 2258 cpu->cpu = cpunum; 2259 2260 cpu->epp_default = -EINVAL; 2261 2262 if (hwp_active) { 2263 const struct x86_cpu_id *id; 2264 2265 intel_pstate_hwp_enable(cpu); 2266 2267 id = x86_match_cpu(intel_pstate_hwp_boost_ids); 2268 if (id && intel_pstate_acpi_pm_profile_server()) 2269 hwp_boost = true; 2270 } 2271 } else if (hwp_active) { 2272 /* 2273 * Re-enable HWP in case this happens after a resume from ACPI 2274 * S3 if the CPU was offline during the whole system/resume 2275 * cycle. 2276 */ 2277 intel_pstate_hwp_reenable(cpu); 2278 } 2279 2280 cpu->epp_powersave = -EINVAL; 2281 cpu->epp_policy = 0; 2282 2283 intel_pstate_get_cpu_pstates(cpu); 2284 2285 pr_debug("controlling: cpu %d\n", cpunum); 2286 2287 return 0; 2288 } 2289 2290 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 2291 { 2292 struct cpudata *cpu = all_cpu_data[cpu_num]; 2293 2294 if (hwp_active && !hwp_boost) 2295 return; 2296 2297 if (cpu->update_util_set) 2298 return; 2299 2300 /* Prevent intel_pstate_update_util() from using stale data. */ 2301 cpu->sample.time = 0; 2302 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 2303 (hwp_active ? 2304 intel_pstate_update_util_hwp : 2305 intel_pstate_update_util)); 2306 cpu->update_util_set = true; 2307 } 2308 2309 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 2310 { 2311 struct cpudata *cpu_data = all_cpu_data[cpu]; 2312 2313 if (!cpu_data->update_util_set) 2314 return; 2315 2316 cpufreq_remove_update_util_hook(cpu); 2317 cpu_data->update_util_set = false; 2318 synchronize_rcu(); 2319 } 2320 2321 static int intel_pstate_get_max_freq(struct cpudata *cpu) 2322 { 2323 return global.turbo_disabled || global.no_turbo ? 2324 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2325 } 2326 2327 static void intel_pstate_update_perf_limits(struct cpudata *cpu, 2328 unsigned int policy_min, 2329 unsigned int policy_max) 2330 { 2331 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 2332 int32_t max_policy_perf, min_policy_perf; 2333 2334 max_policy_perf = policy_max / perf_ctl_scaling; 2335 if (policy_max == policy_min) { 2336 min_policy_perf = max_policy_perf; 2337 } else { 2338 min_policy_perf = policy_min / perf_ctl_scaling; 2339 min_policy_perf = clamp_t(int32_t, min_policy_perf, 2340 0, max_policy_perf); 2341 } 2342 2343 /* 2344 * HWP needs some special consideration, because HWP_REQUEST uses 2345 * abstract values to represent performance rather than pure ratios. 2346 */ 2347 if (hwp_active) { 2348 intel_pstate_get_hwp_cap(cpu); 2349 2350 if (cpu->pstate.scaling != perf_ctl_scaling) { 2351 int scaling = cpu->pstate.scaling; 2352 int freq; 2353 2354 freq = max_policy_perf * perf_ctl_scaling; 2355 max_policy_perf = DIV_ROUND_UP(freq, scaling); 2356 freq = min_policy_perf * perf_ctl_scaling; 2357 min_policy_perf = DIV_ROUND_UP(freq, scaling); 2358 } 2359 } 2360 2361 pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n", 2362 cpu->cpu, min_policy_perf, max_policy_perf); 2363 2364 /* Normalize user input to [min_perf, max_perf] */ 2365 if (per_cpu_limits) { 2366 cpu->min_perf_ratio = min_policy_perf; 2367 cpu->max_perf_ratio = max_policy_perf; 2368 } else { 2369 int turbo_max = cpu->pstate.turbo_pstate; 2370 int32_t global_min, global_max; 2371 2372 /* Global limits are in percent of the maximum turbo P-state. */ 2373 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 2374 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); 2375 global_min = clamp_t(int32_t, global_min, 0, global_max); 2376 2377 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, 2378 global_min, global_max); 2379 2380 cpu->min_perf_ratio = max(min_policy_perf, global_min); 2381 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); 2382 cpu->max_perf_ratio = min(max_policy_perf, global_max); 2383 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); 2384 2385 /* Make sure min_perf <= max_perf */ 2386 cpu->min_perf_ratio = min(cpu->min_perf_ratio, 2387 cpu->max_perf_ratio); 2388 2389 } 2390 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, 2391 cpu->max_perf_ratio, 2392 cpu->min_perf_ratio); 2393 } 2394 2395 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2396 { 2397 struct cpudata *cpu; 2398 2399 if (!policy->cpuinfo.max_freq) 2400 return -ENODEV; 2401 2402 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2403 policy->cpuinfo.max_freq, policy->max); 2404 2405 cpu = all_cpu_data[policy->cpu]; 2406 cpu->policy = policy->policy; 2407 2408 mutex_lock(&intel_pstate_limits_lock); 2409 2410 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2411 2412 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2413 /* 2414 * NOHZ_FULL CPUs need this as the governor callback may not 2415 * be invoked on them. 2416 */ 2417 intel_pstate_clear_update_util_hook(policy->cpu); 2418 intel_pstate_max_within_limits(cpu); 2419 } else { 2420 intel_pstate_set_update_util_hook(policy->cpu); 2421 } 2422 2423 if (hwp_active) { 2424 /* 2425 * When hwp_boost was active before and dynamically it 2426 * was turned off, in that case we need to clear the 2427 * update util hook. 2428 */ 2429 if (!hwp_boost) 2430 intel_pstate_clear_update_util_hook(policy->cpu); 2431 intel_pstate_hwp_set(policy->cpu); 2432 } 2433 2434 mutex_unlock(&intel_pstate_limits_lock); 2435 2436 return 0; 2437 } 2438 2439 static void intel_pstate_adjust_policy_max(struct cpudata *cpu, 2440 struct cpufreq_policy_data *policy) 2441 { 2442 if (!hwp_active && 2443 cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2444 policy->max < policy->cpuinfo.max_freq && 2445 policy->max > cpu->pstate.max_freq) { 2446 pr_debug("policy->max > max non turbo frequency\n"); 2447 policy->max = policy->cpuinfo.max_freq; 2448 } 2449 } 2450 2451 static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, 2452 struct cpufreq_policy_data *policy) 2453 { 2454 int max_freq; 2455 2456 update_turbo_state(); 2457 if (hwp_active) { 2458 intel_pstate_get_hwp_cap(cpu); 2459 max_freq = global.no_turbo || global.turbo_disabled ? 2460 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2461 } else { 2462 max_freq = intel_pstate_get_max_freq(cpu); 2463 } 2464 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq); 2465 2466 intel_pstate_adjust_policy_max(cpu, policy); 2467 } 2468 2469 static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) 2470 { 2471 intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy); 2472 2473 return 0; 2474 } 2475 2476 static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy) 2477 { 2478 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2479 2480 pr_debug("CPU %d going offline\n", cpu->cpu); 2481 2482 if (cpu->suspended) 2483 return 0; 2484 2485 /* 2486 * If the CPU is an SMT thread and it goes offline with the performance 2487 * settings different from the minimum, it will prevent its sibling 2488 * from getting to lower performance levels, so force the minimum 2489 * performance on CPU offline to prevent that from happening. 2490 */ 2491 if (hwp_active) 2492 intel_pstate_hwp_offline(cpu); 2493 else 2494 intel_pstate_set_min_pstate(cpu); 2495 2496 intel_pstate_exit_perf_limits(policy); 2497 2498 return 0; 2499 } 2500 2501 static int intel_pstate_cpu_online(struct cpufreq_policy *policy) 2502 { 2503 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2504 2505 pr_debug("CPU %d going online\n", cpu->cpu); 2506 2507 intel_pstate_init_acpi_perf_limits(policy); 2508 2509 if (hwp_active) { 2510 /* 2511 * Re-enable HWP and clear the "suspended" flag to let "resume" 2512 * know that it need not do that. 2513 */ 2514 intel_pstate_hwp_reenable(cpu); 2515 cpu->suspended = false; 2516 } 2517 2518 return 0; 2519 } 2520 2521 static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) 2522 { 2523 intel_pstate_clear_update_util_hook(policy->cpu); 2524 2525 return intel_cpufreq_cpu_offline(policy); 2526 } 2527 2528 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 2529 { 2530 pr_debug("CPU %d exiting\n", policy->cpu); 2531 2532 policy->fast_switch_possible = false; 2533 2534 return 0; 2535 } 2536 2537 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 2538 { 2539 struct cpudata *cpu; 2540 int rc; 2541 2542 rc = intel_pstate_init_cpu(policy->cpu); 2543 if (rc) 2544 return rc; 2545 2546 cpu = all_cpu_data[policy->cpu]; 2547 2548 cpu->max_perf_ratio = 0xFF; 2549 cpu->min_perf_ratio = 0; 2550 2551 /* cpuinfo and default policy values */ 2552 policy->cpuinfo.min_freq = cpu->pstate.min_freq; 2553 update_turbo_state(); 2554 global.turbo_disabled_mf = global.turbo_disabled; 2555 policy->cpuinfo.max_freq = global.turbo_disabled ? 2556 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2557 2558 policy->min = policy->cpuinfo.min_freq; 2559 policy->max = policy->cpuinfo.max_freq; 2560 2561 intel_pstate_init_acpi_perf_limits(policy); 2562 2563 policy->fast_switch_possible = true; 2564 2565 return 0; 2566 } 2567 2568 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 2569 { 2570 int ret = __intel_pstate_cpu_init(policy); 2571 2572 if (ret) 2573 return ret; 2574 2575 /* 2576 * Set the policy to powersave to provide a valid fallback value in case 2577 * the default cpufreq governor is neither powersave nor performance. 2578 */ 2579 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2580 2581 if (hwp_active) { 2582 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2583 2584 cpu->epp_cached = intel_pstate_get_epp(cpu, 0); 2585 } 2586 2587 return 0; 2588 } 2589 2590 static struct cpufreq_driver intel_pstate = { 2591 .flags = CPUFREQ_CONST_LOOPS, 2592 .verify = intel_pstate_verify_policy, 2593 .setpolicy = intel_pstate_set_policy, 2594 .suspend = intel_pstate_suspend, 2595 .resume = intel_pstate_resume, 2596 .init = intel_pstate_cpu_init, 2597 .exit = intel_pstate_cpu_exit, 2598 .offline = intel_pstate_cpu_offline, 2599 .online = intel_pstate_cpu_online, 2600 .update_limits = intel_pstate_update_limits, 2601 .name = "intel_pstate", 2602 }; 2603 2604 static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) 2605 { 2606 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2607 2608 intel_pstate_verify_cpu_policy(cpu, policy); 2609 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2610 2611 return 0; 2612 } 2613 2614 /* Use of trace in passive mode: 2615 * 2616 * In passive mode the trace core_busy field (also known as the 2617 * performance field, and lablelled as such on the graphs; also known as 2618 * core_avg_perf) is not needed and so is re-assigned to indicate if the 2619 * driver call was via the normal or fast switch path. Various graphs 2620 * output from the intel_pstate_tracer.py utility that include core_busy 2621 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%, 2622 * so we use 10 to indicate the normal path through the driver, and 2623 * 90 to indicate the fast switch path through the driver. 2624 * The scaled_busy field is not used, and is set to 0. 2625 */ 2626 2627 #define INTEL_PSTATE_TRACE_TARGET 10 2628 #define INTEL_PSTATE_TRACE_FAST_SWITCH 90 2629 2630 static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate) 2631 { 2632 struct sample *sample; 2633 2634 if (!trace_pstate_sample_enabled()) 2635 return; 2636 2637 if (!intel_pstate_sample(cpu, ktime_get())) 2638 return; 2639 2640 sample = &cpu->sample; 2641 trace_pstate_sample(trace_type, 2642 0, 2643 old_pstate, 2644 cpu->pstate.current_pstate, 2645 sample->mperf, 2646 sample->aperf, 2647 sample->tsc, 2648 get_avg_frequency(cpu), 2649 fp_toint(cpu->iowait_boost * 100)); 2650 } 2651 2652 static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, 2653 u32 desired, bool fast_switch) 2654 { 2655 u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; 2656 2657 value &= ~HWP_MIN_PERF(~0L); 2658 value |= HWP_MIN_PERF(min); 2659 2660 value &= ~HWP_MAX_PERF(~0L); 2661 value |= HWP_MAX_PERF(max); 2662 2663 value &= ~HWP_DESIRED_PERF(~0L); 2664 value |= HWP_DESIRED_PERF(desired); 2665 2666 if (value == prev) 2667 return; 2668 2669 WRITE_ONCE(cpu->hwp_req_cached, value); 2670 if (fast_switch) 2671 wrmsrl(MSR_HWP_REQUEST, value); 2672 else 2673 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 2674 } 2675 2676 static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, 2677 u32 target_pstate, bool fast_switch) 2678 { 2679 if (fast_switch) 2680 wrmsrl(MSR_IA32_PERF_CTL, 2681 pstate_funcs.get_val(cpu, target_pstate)); 2682 else 2683 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 2684 pstate_funcs.get_val(cpu, target_pstate)); 2685 } 2686 2687 static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, 2688 int target_pstate, bool fast_switch) 2689 { 2690 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2691 int old_pstate = cpu->pstate.current_pstate; 2692 2693 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2694 if (hwp_active) { 2695 int max_pstate = policy->strict_target ? 2696 target_pstate : cpu->max_perf_ratio; 2697 2698 intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0, 2699 fast_switch); 2700 } else if (target_pstate != old_pstate) { 2701 intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch); 2702 } 2703 2704 cpu->pstate.current_pstate = target_pstate; 2705 2706 intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : 2707 INTEL_PSTATE_TRACE_TARGET, old_pstate); 2708 2709 return target_pstate; 2710 } 2711 2712 static int intel_cpufreq_target(struct cpufreq_policy *policy, 2713 unsigned int target_freq, 2714 unsigned int relation) 2715 { 2716 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2717 struct cpufreq_freqs freqs; 2718 int target_pstate; 2719 2720 update_turbo_state(); 2721 2722 freqs.old = policy->cur; 2723 freqs.new = target_freq; 2724 2725 cpufreq_freq_transition_begin(policy, &freqs); 2726 2727 switch (relation) { 2728 case CPUFREQ_RELATION_L: 2729 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 2730 break; 2731 case CPUFREQ_RELATION_H: 2732 target_pstate = freqs.new / cpu->pstate.scaling; 2733 break; 2734 default: 2735 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 2736 break; 2737 } 2738 2739 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false); 2740 2741 freqs.new = target_pstate * cpu->pstate.scaling; 2742 2743 cpufreq_freq_transition_end(policy, &freqs, false); 2744 2745 return 0; 2746 } 2747 2748 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 2749 unsigned int target_freq) 2750 { 2751 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2752 int target_pstate; 2753 2754 update_turbo_state(); 2755 2756 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2757 2758 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); 2759 2760 return target_pstate * cpu->pstate.scaling; 2761 } 2762 2763 static void intel_cpufreq_adjust_perf(unsigned int cpunum, 2764 unsigned long min_perf, 2765 unsigned long target_perf, 2766 unsigned long capacity) 2767 { 2768 struct cpudata *cpu = all_cpu_data[cpunum]; 2769 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); 2770 int old_pstate = cpu->pstate.current_pstate; 2771 int cap_pstate, min_pstate, max_pstate, target_pstate; 2772 2773 update_turbo_state(); 2774 cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : 2775 HWP_HIGHEST_PERF(hwp_cap); 2776 2777 /* Optimization: Avoid unnecessary divisions. */ 2778 2779 target_pstate = cap_pstate; 2780 if (target_perf < capacity) 2781 target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity); 2782 2783 min_pstate = cap_pstate; 2784 if (min_perf < capacity) 2785 min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity); 2786 2787 if (min_pstate < cpu->pstate.min_pstate) 2788 min_pstate = cpu->pstate.min_pstate; 2789 2790 if (min_pstate < cpu->min_perf_ratio) 2791 min_pstate = cpu->min_perf_ratio; 2792 2793 max_pstate = min(cap_pstate, cpu->max_perf_ratio); 2794 if (max_pstate < min_pstate) 2795 max_pstate = min_pstate; 2796 2797 target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate); 2798 2799 intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true); 2800 2801 cpu->pstate.current_pstate = target_pstate; 2802 intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); 2803 } 2804 2805 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2806 { 2807 struct freq_qos_request *req; 2808 struct cpudata *cpu; 2809 struct device *dev; 2810 int ret, freq; 2811 2812 dev = get_cpu_device(policy->cpu); 2813 if (!dev) 2814 return -ENODEV; 2815 2816 ret = __intel_pstate_cpu_init(policy); 2817 if (ret) 2818 return ret; 2819 2820 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 2821 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2822 policy->cur = policy->cpuinfo.min_freq; 2823 2824 req = kcalloc(2, sizeof(*req), GFP_KERNEL); 2825 if (!req) { 2826 ret = -ENOMEM; 2827 goto pstate_exit; 2828 } 2829 2830 cpu = all_cpu_data[policy->cpu]; 2831 2832 if (hwp_active) { 2833 u64 value; 2834 2835 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; 2836 2837 intel_pstate_get_hwp_cap(cpu); 2838 2839 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); 2840 WRITE_ONCE(cpu->hwp_req_cached, value); 2841 2842 cpu->epp_cached = intel_pstate_get_epp(cpu, value); 2843 } else { 2844 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; 2845 } 2846 2847 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100); 2848 2849 ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, 2850 freq); 2851 if (ret < 0) { 2852 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); 2853 goto free_req; 2854 } 2855 2856 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100); 2857 2858 ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, 2859 freq); 2860 if (ret < 0) { 2861 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); 2862 goto remove_min_req; 2863 } 2864 2865 policy->driver_data = req; 2866 2867 return 0; 2868 2869 remove_min_req: 2870 freq_qos_remove_request(req); 2871 free_req: 2872 kfree(req); 2873 pstate_exit: 2874 intel_pstate_exit_perf_limits(policy); 2875 2876 return ret; 2877 } 2878 2879 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) 2880 { 2881 struct freq_qos_request *req; 2882 2883 req = policy->driver_data; 2884 2885 freq_qos_remove_request(req + 1); 2886 freq_qos_remove_request(req); 2887 kfree(req); 2888 2889 return intel_pstate_cpu_exit(policy); 2890 } 2891 2892 static int intel_cpufreq_suspend(struct cpufreq_policy *policy) 2893 { 2894 intel_pstate_suspend(policy); 2895 2896 if (hwp_active) { 2897 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2898 u64 value = READ_ONCE(cpu->hwp_req_cached); 2899 2900 /* 2901 * Clear the desired perf field in MSR_HWP_REQUEST in case 2902 * intel_cpufreq_adjust_perf() is in use and the last value 2903 * written by it may not be suitable. 2904 */ 2905 value &= ~HWP_DESIRED_PERF(~0L); 2906 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 2907 WRITE_ONCE(cpu->hwp_req_cached, value); 2908 } 2909 2910 return 0; 2911 } 2912 2913 static struct cpufreq_driver intel_cpufreq = { 2914 .flags = CPUFREQ_CONST_LOOPS, 2915 .verify = intel_cpufreq_verify_policy, 2916 .target = intel_cpufreq_target, 2917 .fast_switch = intel_cpufreq_fast_switch, 2918 .init = intel_cpufreq_cpu_init, 2919 .exit = intel_cpufreq_cpu_exit, 2920 .offline = intel_cpufreq_cpu_offline, 2921 .online = intel_pstate_cpu_online, 2922 .suspend = intel_cpufreq_suspend, 2923 .resume = intel_pstate_resume, 2924 .update_limits = intel_pstate_update_limits, 2925 .name = "intel_cpufreq", 2926 }; 2927 2928 static struct cpufreq_driver *default_driver; 2929 2930 static void intel_pstate_driver_cleanup(void) 2931 { 2932 unsigned int cpu; 2933 2934 cpus_read_lock(); 2935 for_each_online_cpu(cpu) { 2936 if (all_cpu_data[cpu]) { 2937 if (intel_pstate_driver == &intel_pstate) 2938 intel_pstate_clear_update_util_hook(cpu); 2939 2940 kfree(all_cpu_data[cpu]); 2941 all_cpu_data[cpu] = NULL; 2942 } 2943 } 2944 cpus_read_unlock(); 2945 2946 intel_pstate_driver = NULL; 2947 } 2948 2949 static int intel_pstate_register_driver(struct cpufreq_driver *driver) 2950 { 2951 int ret; 2952 2953 if (driver == &intel_pstate) 2954 intel_pstate_sysfs_expose_hwp_dynamic_boost(); 2955 2956 memset(&global, 0, sizeof(global)); 2957 global.max_perf_pct = 100; 2958 2959 intel_pstate_driver = driver; 2960 ret = cpufreq_register_driver(intel_pstate_driver); 2961 if (ret) { 2962 intel_pstate_driver_cleanup(); 2963 return ret; 2964 } 2965 2966 global.min_perf_pct = min_perf_pct_min(); 2967 2968 return 0; 2969 } 2970 2971 static ssize_t intel_pstate_show_status(char *buf) 2972 { 2973 if (!intel_pstate_driver) 2974 return sprintf(buf, "off\n"); 2975 2976 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 2977 "active" : "passive"); 2978 } 2979 2980 static int intel_pstate_update_status(const char *buf, size_t size) 2981 { 2982 if (size == 3 && !strncmp(buf, "off", size)) { 2983 if (!intel_pstate_driver) 2984 return -EINVAL; 2985 2986 if (hwp_active) 2987 return -EBUSY; 2988 2989 cpufreq_unregister_driver(intel_pstate_driver); 2990 intel_pstate_driver_cleanup(); 2991 return 0; 2992 } 2993 2994 if (size == 6 && !strncmp(buf, "active", size)) { 2995 if (intel_pstate_driver) { 2996 if (intel_pstate_driver == &intel_pstate) 2997 return 0; 2998 2999 cpufreq_unregister_driver(intel_pstate_driver); 3000 } 3001 3002 return intel_pstate_register_driver(&intel_pstate); 3003 } 3004 3005 if (size == 7 && !strncmp(buf, "passive", size)) { 3006 if (intel_pstate_driver) { 3007 if (intel_pstate_driver == &intel_cpufreq) 3008 return 0; 3009 3010 cpufreq_unregister_driver(intel_pstate_driver); 3011 intel_pstate_sysfs_hide_hwp_dynamic_boost(); 3012 } 3013 3014 return intel_pstate_register_driver(&intel_cpufreq); 3015 } 3016 3017 return -EINVAL; 3018 } 3019 3020 static int no_load __initdata; 3021 static int no_hwp __initdata; 3022 static int hwp_only __initdata; 3023 static unsigned int force_load __initdata; 3024 3025 static int __init intel_pstate_msrs_not_valid(void) 3026 { 3027 if (!pstate_funcs.get_max(0) || 3028 !pstate_funcs.get_min(0) || 3029 !pstate_funcs.get_turbo(0)) 3030 return -ENODEV; 3031 3032 return 0; 3033 } 3034 3035 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 3036 { 3037 pstate_funcs.get_max = funcs->get_max; 3038 pstate_funcs.get_max_physical = funcs->get_max_physical; 3039 pstate_funcs.get_min = funcs->get_min; 3040 pstate_funcs.get_turbo = funcs->get_turbo; 3041 pstate_funcs.get_scaling = funcs->get_scaling; 3042 pstate_funcs.get_val = funcs->get_val; 3043 pstate_funcs.get_vid = funcs->get_vid; 3044 pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; 3045 } 3046 3047 #ifdef CONFIG_ACPI 3048 3049 static bool __init intel_pstate_no_acpi_pss(void) 3050 { 3051 int i; 3052 3053 for_each_possible_cpu(i) { 3054 acpi_status status; 3055 union acpi_object *pss; 3056 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 3057 struct acpi_processor *pr = per_cpu(processors, i); 3058 3059 if (!pr) 3060 continue; 3061 3062 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 3063 if (ACPI_FAILURE(status)) 3064 continue; 3065 3066 pss = buffer.pointer; 3067 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 3068 kfree(pss); 3069 return false; 3070 } 3071 3072 kfree(pss); 3073 } 3074 3075 pr_debug("ACPI _PSS not found\n"); 3076 return true; 3077 } 3078 3079 static bool __init intel_pstate_no_acpi_pcch(void) 3080 { 3081 acpi_status status; 3082 acpi_handle handle; 3083 3084 status = acpi_get_handle(NULL, "\\_SB", &handle); 3085 if (ACPI_FAILURE(status)) 3086 goto not_found; 3087 3088 if (acpi_has_method(handle, "PCCH")) 3089 return false; 3090 3091 not_found: 3092 pr_debug("ACPI PCCH not found\n"); 3093 return true; 3094 } 3095 3096 static bool __init intel_pstate_has_acpi_ppc(void) 3097 { 3098 int i; 3099 3100 for_each_possible_cpu(i) { 3101 struct acpi_processor *pr = per_cpu(processors, i); 3102 3103 if (!pr) 3104 continue; 3105 if (acpi_has_method(pr->handle, "_PPC")) 3106 return true; 3107 } 3108 pr_debug("ACPI _PPC not found\n"); 3109 return false; 3110 } 3111 3112 enum { 3113 PSS, 3114 PPC, 3115 }; 3116 3117 /* Hardware vendor-specific info that has its own power management modes */ 3118 static struct acpi_platform_list plat_info[] __initdata = { 3119 {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS}, 3120 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3121 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3122 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3123 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3124 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3125 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3126 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3127 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3128 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3129 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3130 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3131 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3132 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3133 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3134 { } /* End */ 3135 }; 3136 3137 #define BITMASK_OOB (BIT(8) | BIT(18)) 3138 3139 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 3140 { 3141 const struct x86_cpu_id *id; 3142 u64 misc_pwr; 3143 int idx; 3144 3145 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 3146 if (id) { 3147 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 3148 if (misc_pwr & BITMASK_OOB) { 3149 pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); 3150 pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); 3151 return true; 3152 } 3153 } 3154 3155 idx = acpi_match_platform_list(plat_info); 3156 if (idx < 0) 3157 return false; 3158 3159 switch (plat_info[idx].data) { 3160 case PSS: 3161 if (!intel_pstate_no_acpi_pss()) 3162 return false; 3163 3164 return intel_pstate_no_acpi_pcch(); 3165 case PPC: 3166 return intel_pstate_has_acpi_ppc() && !force_load; 3167 } 3168 3169 return false; 3170 } 3171 3172 static void intel_pstate_request_control_from_smm(void) 3173 { 3174 /* 3175 * It may be unsafe to request P-states control from SMM if _PPC support 3176 * has not been enabled. 3177 */ 3178 if (acpi_ppc) 3179 acpi_processor_pstate_control(); 3180 } 3181 #else /* CONFIG_ACPI not enabled */ 3182 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 3183 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 3184 static inline void intel_pstate_request_control_from_smm(void) {} 3185 #endif /* CONFIG_ACPI */ 3186 3187 #define INTEL_PSTATE_HWP_BROADWELL 0x01 3188 3189 #define X86_MATCH_HWP(model, hwp_mode) \ 3190 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 3191 X86_FEATURE_HWP, hwp_mode) 3192 3193 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 3194 X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), 3195 X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), 3196 X86_MATCH_HWP(ANY, 0), 3197 {} 3198 }; 3199 3200 static bool intel_pstate_hwp_is_enabled(void) 3201 { 3202 u64 value; 3203 3204 rdmsrl(MSR_PM_ENABLE, value); 3205 return !!(value & 0x1); 3206 } 3207 3208 static int __init intel_pstate_init(void) 3209 { 3210 const struct x86_cpu_id *id; 3211 int rc; 3212 3213 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 3214 return -ENODEV; 3215 3216 id = x86_match_cpu(hwp_support_ids); 3217 if (id) { 3218 bool hwp_forced = intel_pstate_hwp_is_enabled(); 3219 3220 if (hwp_forced) 3221 pr_info("HWP enabled by BIOS\n"); 3222 else if (no_load) 3223 return -ENODEV; 3224 3225 copy_cpu_funcs(&core_funcs); 3226 /* 3227 * Avoid enabling HWP for processors without EPP support, 3228 * because that means incomplete HWP implementation which is a 3229 * corner case and supporting it is generally problematic. 3230 * 3231 * If HWP is enabled already, though, there is no choice but to 3232 * deal with it. 3233 */ 3234 if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { 3235 hwp_active++; 3236 hwp_mode_bdw = id->driver_data; 3237 intel_pstate.attr = hwp_cpufreq_attrs; 3238 intel_cpufreq.attr = hwp_cpufreq_attrs; 3239 intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS; 3240 intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf; 3241 if (!default_driver) 3242 default_driver = &intel_pstate; 3243 3244 if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) 3245 pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling; 3246 3247 goto hwp_cpu_matched; 3248 } 3249 pr_info("HWP not enabled\n"); 3250 } else { 3251 if (no_load) 3252 return -ENODEV; 3253 3254 id = x86_match_cpu(intel_pstate_cpu_ids); 3255 if (!id) { 3256 pr_info("CPU model not supported\n"); 3257 return -ENODEV; 3258 } 3259 3260 copy_cpu_funcs((struct pstate_funcs *)id->driver_data); 3261 } 3262 3263 if (intel_pstate_msrs_not_valid()) { 3264 pr_info("Invalid MSRs\n"); 3265 return -ENODEV; 3266 } 3267 /* Without HWP start in the passive mode. */ 3268 if (!default_driver) 3269 default_driver = &intel_cpufreq; 3270 3271 hwp_cpu_matched: 3272 /* 3273 * The Intel pstate driver will be ignored if the platform 3274 * firmware has its own power management modes. 3275 */ 3276 if (intel_pstate_platform_pwr_mgmt_exists()) { 3277 pr_info("P-states controlled by the platform\n"); 3278 return -ENODEV; 3279 } 3280 3281 if (!hwp_active && hwp_only) 3282 return -ENOTSUPP; 3283 3284 pr_info("Intel P-state driver initializing\n"); 3285 3286 all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); 3287 if (!all_cpu_data) 3288 return -ENOMEM; 3289 3290 intel_pstate_request_control_from_smm(); 3291 3292 intel_pstate_sysfs_expose_params(); 3293 3294 mutex_lock(&intel_pstate_driver_lock); 3295 rc = intel_pstate_register_driver(default_driver); 3296 mutex_unlock(&intel_pstate_driver_lock); 3297 if (rc) { 3298 intel_pstate_sysfs_remove(); 3299 return rc; 3300 } 3301 3302 if (hwp_active) { 3303 const struct x86_cpu_id *id; 3304 3305 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); 3306 if (id) { 3307 set_power_ctl_ee_state(false); 3308 pr_info("Disabling energy efficiency optimization\n"); 3309 } 3310 3311 pr_info("HWP enabled\n"); 3312 } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 3313 pr_warn("Problematic setup: Hybrid processor with disabled HWP\n"); 3314 } 3315 3316 return 0; 3317 } 3318 device_initcall(intel_pstate_init); 3319 3320 static int __init intel_pstate_setup(char *str) 3321 { 3322 if (!str) 3323 return -EINVAL; 3324 3325 if (!strcmp(str, "disable")) 3326 no_load = 1; 3327 else if (!strcmp(str, "active")) 3328 default_driver = &intel_pstate; 3329 else if (!strcmp(str, "passive")) 3330 default_driver = &intel_cpufreq; 3331 3332 if (!strcmp(str, "no_hwp")) 3333 no_hwp = 1; 3334 3335 if (!strcmp(str, "force")) 3336 force_load = 1; 3337 if (!strcmp(str, "hwp_only")) 3338 hwp_only = 1; 3339 if (!strcmp(str, "per_cpu_perf_limits")) 3340 per_cpu_limits = true; 3341 3342 #ifdef CONFIG_ACPI 3343 if (!strcmp(str, "support_acpi_ppc")) 3344 acpi_ppc = true; 3345 #endif 3346 3347 return 0; 3348 } 3349 early_param("intel_pstate", intel_pstate_setup); 3350 3351 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 3352 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 3353 MODULE_LICENSE("GPL"); 3354