1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pstate.c: Native P state management for Intel processors 4 * 5 * (C) Copyright 2012 Intel Corporation 6 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/module.h> 14 #include <linux/ktime.h> 15 #include <linux/hrtimer.h> 16 #include <linux/tick.h> 17 #include <linux/slab.h> 18 #include <linux/sched/cpufreq.h> 19 #include <linux/list.h> 20 #include <linux/cpu.h> 21 #include <linux/cpufreq.h> 22 #include <linux/sysfs.h> 23 #include <linux/types.h> 24 #include <linux/fs.h> 25 #include <linux/acpi.h> 26 #include <linux/vmalloc.h> 27 #include <linux/pm_qos.h> 28 #include <trace/events/power.h> 29 30 #include <asm/div64.h> 31 #include <asm/msr.h> 32 #include <asm/cpu_device_id.h> 33 #include <asm/cpufeature.h> 34 #include <asm/intel-family.h> 35 36 #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) 37 38 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 39 #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 40 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 41 42 #ifdef CONFIG_ACPI 43 #include <acpi/processor.h> 44 #include <acpi/cppc_acpi.h> 45 #endif 46 47 #define FRAC_BITS 8 48 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 49 #define fp_toint(X) ((X) >> FRAC_BITS) 50 51 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) 52 53 #define EXT_BITS 6 54 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 55 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 56 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 57 58 static inline int32_t mul_fp(int32_t x, int32_t y) 59 { 60 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 61 } 62 63 static inline int32_t div_fp(s64 x, s64 y) 64 { 65 return div64_s64((int64_t)x << FRAC_BITS, y); 66 } 67 68 static inline int ceiling_fp(int32_t x) 69 { 70 int mask, ret; 71 72 ret = fp_toint(x); 73 mask = (1 << FRAC_BITS) - 1; 74 if (x & mask) 75 ret += 1; 76 return ret; 77 } 78 79 static inline u64 mul_ext_fp(u64 x, u64 y) 80 { 81 return (x * y) >> EXT_FRAC_BITS; 82 } 83 84 static inline u64 div_ext_fp(u64 x, u64 y) 85 { 86 return div64_u64(x << EXT_FRAC_BITS, y); 87 } 88 89 /** 90 * struct sample - Store performance sample 91 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 92 * performance during last sample period 93 * @busy_scaled: Scaled busy value which is used to calculate next 94 * P state. This can be different than core_avg_perf 95 * to account for cpu idle period 96 * @aperf: Difference of actual performance frequency clock count 97 * read from APERF MSR between last and current sample 98 * @mperf: Difference of maximum performance frequency clock count 99 * read from MPERF MSR between last and current sample 100 * @tsc: Difference of time stamp counter between last and 101 * current sample 102 * @time: Current time from scheduler 103 * 104 * This structure is used in the cpudata structure to store performance sample 105 * data for choosing next P State. 106 */ 107 struct sample { 108 int32_t core_avg_perf; 109 int32_t busy_scaled; 110 u64 aperf; 111 u64 mperf; 112 u64 tsc; 113 u64 time; 114 }; 115 116 /** 117 * struct pstate_data - Store P state data 118 * @current_pstate: Current requested P state 119 * @min_pstate: Min P state possible for this platform 120 * @max_pstate: Max P state possible for this platform 121 * @max_pstate_physical:This is physical Max P state for a processor 122 * This can be higher than the max_pstate which can 123 * be limited by platform thermal design power limits 124 * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor 125 * @scaling: Scaling factor between performance and frequency 126 * @turbo_pstate: Max Turbo P state possible for this platform 127 * @min_freq: @min_pstate frequency in cpufreq units 128 * @max_freq: @max_pstate frequency in cpufreq units 129 * @turbo_freq: @turbo_pstate frequency in cpufreq units 130 * 131 * Stores the per cpu model P state limits and current P state. 132 */ 133 struct pstate_data { 134 int current_pstate; 135 int min_pstate; 136 int max_pstate; 137 int max_pstate_physical; 138 int perf_ctl_scaling; 139 int scaling; 140 int turbo_pstate; 141 unsigned int min_freq; 142 unsigned int max_freq; 143 unsigned int turbo_freq; 144 }; 145 146 /** 147 * struct vid_data - Stores voltage information data 148 * @min: VID data for this platform corresponding to 149 * the lowest P state 150 * @max: VID data corresponding to the highest P State. 151 * @turbo: VID data for turbo P state 152 * @ratio: Ratio of (vid max - vid min) / 153 * (max P state - Min P State) 154 * 155 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 156 * This data is used in Atom platforms, where in addition to target P state, 157 * the voltage data needs to be specified to select next P State. 158 */ 159 struct vid_data { 160 int min; 161 int max; 162 int turbo; 163 int32_t ratio; 164 }; 165 166 /** 167 * struct global_params - Global parameters, mostly tunable via sysfs. 168 * @no_turbo: Whether or not to use turbo P-states. 169 * @turbo_disabled: Whether or not turbo P-states are available at all, 170 * based on the MSR_IA32_MISC_ENABLE value and whether or 171 * not the maximum reported turbo P-state is different from 172 * the maximum reported non-turbo one. 173 * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. 174 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo 175 * P-state capacity. 176 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo 177 * P-state capacity. 178 */ 179 struct global_params { 180 bool no_turbo; 181 bool turbo_disabled; 182 bool turbo_disabled_mf; 183 int max_perf_pct; 184 int min_perf_pct; 185 }; 186 187 /** 188 * struct cpudata - Per CPU instance data storage 189 * @cpu: CPU number for this instance data 190 * @policy: CPUFreq policy value 191 * @update_util: CPUFreq utility callback information 192 * @update_util_set: CPUFreq utility callback is set 193 * @iowait_boost: iowait-related boost fraction 194 * @last_update: Time of the last update. 195 * @pstate: Stores P state limits for this CPU 196 * @vid: Stores VID limits for this CPU 197 * @last_sample_time: Last Sample time 198 * @aperf_mperf_shift: APERF vs MPERF counting frequency difference 199 * @prev_aperf: Last APERF value read from APERF MSR 200 * @prev_mperf: Last MPERF value read from MPERF MSR 201 * @prev_tsc: Last timestamp counter (TSC) value 202 * @prev_cummulative_iowait: IO Wait time difference from last and 203 * current sample 204 * @sample: Storage for storing last Sample data 205 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios 206 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios 207 * @acpi_perf_data: Stores ACPI perf information read from _PSS 208 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 209 * @epp_powersave: Last saved HWP energy performance preference 210 * (EPP) or energy performance bias (EPB), 211 * when policy switched to performance 212 * @epp_policy: Last saved policy used to set EPP/EPB 213 * @epp_default: Power on default HWP energy performance 214 * preference/bias 215 * @epp_cached Cached HWP energy-performance preference value 216 * @hwp_req_cached: Cached value of the last HWP Request MSR 217 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR 218 * @last_io_update: Last time when IO wake flag was set 219 * @sched_flags: Store scheduler flags for possible cross CPU update 220 * @hwp_boost_min: Last HWP boosted min performance 221 * @suspended: Whether or not the driver has been suspended. 222 * 223 * This structure stores per CPU instance data for all CPUs. 224 */ 225 struct cpudata { 226 int cpu; 227 228 unsigned int policy; 229 struct update_util_data update_util; 230 bool update_util_set; 231 232 struct pstate_data pstate; 233 struct vid_data vid; 234 235 u64 last_update; 236 u64 last_sample_time; 237 u64 aperf_mperf_shift; 238 u64 prev_aperf; 239 u64 prev_mperf; 240 u64 prev_tsc; 241 u64 prev_cummulative_iowait; 242 struct sample sample; 243 int32_t min_perf_ratio; 244 int32_t max_perf_ratio; 245 #ifdef CONFIG_ACPI 246 struct acpi_processor_performance acpi_perf_data; 247 bool valid_pss_table; 248 #endif 249 unsigned int iowait_boost; 250 s16 epp_powersave; 251 s16 epp_policy; 252 s16 epp_default; 253 s16 epp_cached; 254 u64 hwp_req_cached; 255 u64 hwp_cap_cached; 256 u64 last_io_update; 257 unsigned int sched_flags; 258 u32 hwp_boost_min; 259 bool suspended; 260 }; 261 262 static struct cpudata **all_cpu_data; 263 264 /** 265 * struct pstate_funcs - Per CPU model specific callbacks 266 * @get_max: Callback to get maximum non turbo effective P state 267 * @get_max_physical: Callback to get maximum non turbo physical P state 268 * @get_min: Callback to get minimum P state 269 * @get_turbo: Callback to get turbo P state 270 * @get_scaling: Callback to get frequency scaling factor 271 * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference 272 * @get_val: Callback to convert P state to actual MSR write value 273 * @get_vid: Callback to get VID data for Atom platforms 274 * 275 * Core and Atom CPU models have different way to get P State limits. This 276 * structure is used to store those callbacks. 277 */ 278 struct pstate_funcs { 279 int (*get_max)(void); 280 int (*get_max_physical)(void); 281 int (*get_min)(void); 282 int (*get_turbo)(void); 283 int (*get_scaling)(void); 284 int (*get_aperf_mperf_shift)(void); 285 u64 (*get_val)(struct cpudata*, int pstate); 286 void (*get_vid)(struct cpudata *); 287 }; 288 289 static struct pstate_funcs pstate_funcs __read_mostly; 290 291 static int hwp_active __read_mostly; 292 static int hwp_mode_bdw __read_mostly; 293 static bool per_cpu_limits __read_mostly; 294 static bool hwp_boost __read_mostly; 295 296 static struct cpufreq_driver *intel_pstate_driver __read_mostly; 297 298 #ifdef CONFIG_ACPI 299 static bool acpi_ppc; 300 #endif 301 302 static struct global_params global; 303 304 static DEFINE_MUTEX(intel_pstate_driver_lock); 305 static DEFINE_MUTEX(intel_pstate_limits_lock); 306 307 #ifdef CONFIG_ACPI 308 309 static bool intel_pstate_acpi_pm_profile_server(void) 310 { 311 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 312 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 313 return true; 314 315 return false; 316 } 317 318 static bool intel_pstate_get_ppc_enable_status(void) 319 { 320 if (intel_pstate_acpi_pm_profile_server()) 321 return true; 322 323 return acpi_ppc; 324 } 325 326 #ifdef CONFIG_ACPI_CPPC_LIB 327 328 /* The work item is needed to avoid CPU hotplug locking issues */ 329 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) 330 { 331 sched_set_itmt_support(); 332 } 333 334 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); 335 336 static void intel_pstate_set_itmt_prio(int cpu) 337 { 338 struct cppc_perf_caps cppc_perf; 339 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; 340 int ret; 341 342 ret = cppc_get_perf_caps(cpu, &cppc_perf); 343 if (ret) 344 return; 345 346 /* 347 * The priorities can be set regardless of whether or not 348 * sched_set_itmt_support(true) has been called and it is valid to 349 * update them at any time after it has been called. 350 */ 351 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); 352 353 if (max_highest_perf <= min_highest_perf) { 354 if (cppc_perf.highest_perf > max_highest_perf) 355 max_highest_perf = cppc_perf.highest_perf; 356 357 if (cppc_perf.highest_perf < min_highest_perf) 358 min_highest_perf = cppc_perf.highest_perf; 359 360 if (max_highest_perf > min_highest_perf) { 361 /* 362 * This code can be run during CPU online under the 363 * CPU hotplug locks, so sched_set_itmt_support() 364 * cannot be called from here. Queue up a work item 365 * to invoke it. 366 */ 367 schedule_work(&sched_itmt_work); 368 } 369 } 370 } 371 372 static int intel_pstate_get_cppc_guranteed(int cpu) 373 { 374 struct cppc_perf_caps cppc_perf; 375 int ret; 376 377 ret = cppc_get_perf_caps(cpu, &cppc_perf); 378 if (ret) 379 return ret; 380 381 if (cppc_perf.guaranteed_perf) 382 return cppc_perf.guaranteed_perf; 383 384 return cppc_perf.nominal_perf; 385 } 386 387 #else /* CONFIG_ACPI_CPPC_LIB */ 388 static void intel_pstate_set_itmt_prio(int cpu) 389 { 390 } 391 #endif /* CONFIG_ACPI_CPPC_LIB */ 392 393 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 394 { 395 struct cpudata *cpu; 396 int ret; 397 int i; 398 399 if (hwp_active) { 400 intel_pstate_set_itmt_prio(policy->cpu); 401 return; 402 } 403 404 if (!intel_pstate_get_ppc_enable_status()) 405 return; 406 407 cpu = all_cpu_data[policy->cpu]; 408 409 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 410 policy->cpu); 411 if (ret) 412 return; 413 414 /* 415 * Check if the control value in _PSS is for PERF_CTL MSR, which should 416 * guarantee that the states returned by it map to the states in our 417 * list directly. 418 */ 419 if (cpu->acpi_perf_data.control_register.space_id != 420 ACPI_ADR_SPACE_FIXED_HARDWARE) 421 goto err; 422 423 /* 424 * If there is only one entry _PSS, simply ignore _PSS and continue as 425 * usual without taking _PSS into account 426 */ 427 if (cpu->acpi_perf_data.state_count < 2) 428 goto err; 429 430 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 431 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 432 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 433 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 434 (u32) cpu->acpi_perf_data.states[i].core_frequency, 435 (u32) cpu->acpi_perf_data.states[i].power, 436 (u32) cpu->acpi_perf_data.states[i].control); 437 } 438 439 /* 440 * The _PSS table doesn't contain whole turbo frequency range. 441 * This just contains +1 MHZ above the max non turbo frequency, 442 * with control value corresponding to max turbo ratio. But 443 * when cpufreq set policy is called, it will call with this 444 * max frequency, which will cause a reduced performance as 445 * this driver uses real max turbo frequency as the max 446 * frequency. So correct this frequency in _PSS table to 447 * correct max turbo frequency based on the turbo state. 448 * Also need to convert to MHz as _PSS freq is in MHz. 449 */ 450 if (!global.turbo_disabled) 451 cpu->acpi_perf_data.states[0].core_frequency = 452 policy->cpuinfo.max_freq / 1000; 453 cpu->valid_pss_table = true; 454 pr_debug("_PPC limits will be enforced\n"); 455 456 return; 457 458 err: 459 cpu->valid_pss_table = false; 460 acpi_processor_unregister_performance(policy->cpu); 461 } 462 463 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 464 { 465 struct cpudata *cpu; 466 467 cpu = all_cpu_data[policy->cpu]; 468 if (!cpu->valid_pss_table) 469 return; 470 471 acpi_processor_unregister_performance(policy->cpu); 472 } 473 #else /* CONFIG_ACPI */ 474 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 475 { 476 } 477 478 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 479 { 480 } 481 482 static inline bool intel_pstate_acpi_pm_profile_server(void) 483 { 484 return false; 485 } 486 #endif /* CONFIG_ACPI */ 487 488 #ifndef CONFIG_ACPI_CPPC_LIB 489 static int intel_pstate_get_cppc_guranteed(int cpu) 490 { 491 return -ENOTSUPP; 492 } 493 #endif /* CONFIG_ACPI_CPPC_LIB */ 494 495 static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps) 496 { 497 return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf; 498 } 499 500 static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu, 501 struct cppc_perf_caps *caps) 502 { 503 if (cppc_get_perf_caps(cpu->cpu, caps)) 504 return false; 505 506 return caps->highest_perf && caps->lowest_perf <= caps->highest_perf; 507 } 508 509 static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu) 510 { 511 pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu); 512 513 cpu->pstate.scaling = cpu->pstate.perf_ctl_scaling; 514 } 515 516 /** 517 * intel_pstate_hybrid_hwp_calibrate - Calibrate HWP performance levels. 518 * @cpu: Target CPU. 519 * 520 * On hybrid processors, HWP may expose more performance levels than there are 521 * P-states accessible through the PERF_CTL interface. If that happens, the 522 * scaling factor between HWP performance levels and CPU frequency will be less 523 * than the scaling factor between P-state values and CPU frequency. 524 * 525 * In that case, the scaling factor between HWP performance levels and CPU 526 * frequency needs to be determined which can be done with the help of the 527 * observation that certain HWP performance levels should correspond to certain 528 * P-states, like for example the HWP highest performance should correspond 529 * to the maximum turbo P-state of the CPU. 530 */ 531 static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu) 532 { 533 struct cppc_perf_caps caps; 534 int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; 535 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 536 int perf_ctl_turbo = pstate_funcs.get_turbo(); 537 int turbo_freq = perf_ctl_turbo * perf_ctl_scaling; 538 int perf_ctl_max = pstate_funcs.get_max(); 539 int max_freq = perf_ctl_max * perf_ctl_scaling; 540 int scaling = INT_MAX; 541 int freq; 542 543 pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); 544 pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, perf_ctl_max); 545 pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); 546 pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); 547 548 pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); 549 pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); 550 551 if (intel_pstate_cppc_perf_caps(cpu, &caps)) { 552 if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) { 553 pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu); 554 555 /* 556 * If the CPPC nominal performance is valid, it can be 557 * assumed to correspond to cpu_khz. 558 */ 559 if (caps.nominal_perf == perf_ctl_max_phys) { 560 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); 561 return; 562 } 563 scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf); 564 } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) { 565 pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu); 566 567 /* 568 * If the CPPC guaranteed performance is valid, it can 569 * be assumed to correspond to max_freq. 570 */ 571 if (caps.guaranteed_perf == perf_ctl_max) { 572 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); 573 return; 574 } 575 scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf); 576 } 577 } 578 /* 579 * If using the CPPC data to compute the HWP-to-frequency scaling factor 580 * doesn't work, use the HWP_CAP gauranteed perf for this purpose with 581 * the assumption that it corresponds to max_freq. 582 */ 583 if (scaling > perf_ctl_scaling) { 584 pr_debug("CPU%d: Using HWP_CAP guaranteed\n", cpu->cpu); 585 586 if (cpu->pstate.max_pstate == perf_ctl_max) { 587 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); 588 return; 589 } 590 scaling = DIV_ROUND_UP(max_freq, cpu->pstate.max_pstate); 591 if (scaling > perf_ctl_scaling) { 592 /* 593 * This should not happen, because it would mean that 594 * the number of HWP perf levels was less than the 595 * number of P-states, so use the PERF_CTL scaling in 596 * that case. 597 */ 598 pr_debug("CPU%d: scaling (%d) out of range\n", cpu->cpu, 599 scaling); 600 601 intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); 602 return; 603 } 604 } 605 606 /* 607 * If the product of the HWP performance scaling factor obtained above 608 * and the HWP_CAP highest performance is greater than the maximum turbo 609 * frequency corresponding to the pstate_funcs.get_turbo() return value, 610 * the scaling factor is too high, so recompute it so that the HWP_CAP 611 * highest performance corresponds to the maximum turbo frequency. 612 */ 613 if (turbo_freq < cpu->pstate.turbo_pstate * scaling) { 614 pr_debug("CPU%d: scaling too high (%d)\n", cpu->cpu, scaling); 615 616 cpu->pstate.turbo_freq = turbo_freq; 617 scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate); 618 } 619 620 cpu->pstate.scaling = scaling; 621 622 pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); 623 624 cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, 625 perf_ctl_scaling); 626 627 freq = perf_ctl_max_phys * perf_ctl_scaling; 628 cpu->pstate.max_pstate_physical = DIV_ROUND_UP(freq, scaling); 629 630 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; 631 /* 632 * Cast the min P-state value retrieved via pstate_funcs.get_min() to 633 * the effective range of HWP performance levels. 634 */ 635 cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling); 636 } 637 638 static inline void update_turbo_state(void) 639 { 640 u64 misc_en; 641 struct cpudata *cpu; 642 643 cpu = all_cpu_data[0]; 644 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 645 global.turbo_disabled = 646 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 647 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 648 } 649 650 static int min_perf_pct_min(void) 651 { 652 struct cpudata *cpu = all_cpu_data[0]; 653 int turbo_pstate = cpu->pstate.turbo_pstate; 654 655 return turbo_pstate ? 656 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; 657 } 658 659 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 660 { 661 u64 epb; 662 int ret; 663 664 if (!boot_cpu_has(X86_FEATURE_EPB)) 665 return -ENXIO; 666 667 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 668 if (ret) 669 return (s16)ret; 670 671 return (s16)(epb & 0x0f); 672 } 673 674 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 675 { 676 s16 epp; 677 678 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 679 /* 680 * When hwp_req_data is 0, means that caller didn't read 681 * MSR_HWP_REQUEST, so need to read and get EPP. 682 */ 683 if (!hwp_req_data) { 684 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, 685 &hwp_req_data); 686 if (epp) 687 return epp; 688 } 689 epp = (hwp_req_data >> 24) & 0xff; 690 } else { 691 /* When there is no EPP present, HWP uses EPB settings */ 692 epp = intel_pstate_get_epb(cpu_data); 693 } 694 695 return epp; 696 } 697 698 static int intel_pstate_set_epb(int cpu, s16 pref) 699 { 700 u64 epb; 701 int ret; 702 703 if (!boot_cpu_has(X86_FEATURE_EPB)) 704 return -ENXIO; 705 706 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 707 if (ret) 708 return ret; 709 710 epb = (epb & ~0x0f) | pref; 711 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 712 713 return 0; 714 } 715 716 /* 717 * EPP/EPB display strings corresponding to EPP index in the 718 * energy_perf_strings[] 719 * index String 720 *------------------------------------- 721 * 0 default 722 * 1 performance 723 * 2 balance_performance 724 * 3 balance_power 725 * 4 power 726 */ 727 static const char * const energy_perf_strings[] = { 728 "default", 729 "performance", 730 "balance_performance", 731 "balance_power", 732 "power", 733 NULL 734 }; 735 static const unsigned int epp_values[] = { 736 HWP_EPP_PERFORMANCE, 737 HWP_EPP_BALANCE_PERFORMANCE, 738 HWP_EPP_BALANCE_POWERSAVE, 739 HWP_EPP_POWERSAVE 740 }; 741 742 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) 743 { 744 s16 epp; 745 int index = -EINVAL; 746 747 *raw_epp = 0; 748 epp = intel_pstate_get_epp(cpu_data, 0); 749 if (epp < 0) 750 return epp; 751 752 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 753 if (epp == HWP_EPP_PERFORMANCE) 754 return 1; 755 if (epp == HWP_EPP_BALANCE_PERFORMANCE) 756 return 2; 757 if (epp == HWP_EPP_BALANCE_POWERSAVE) 758 return 3; 759 if (epp == HWP_EPP_POWERSAVE) 760 return 4; 761 *raw_epp = epp; 762 return 0; 763 } else if (boot_cpu_has(X86_FEATURE_EPB)) { 764 /* 765 * Range: 766 * 0x00-0x03 : Performance 767 * 0x04-0x07 : Balance performance 768 * 0x08-0x0B : Balance power 769 * 0x0C-0x0F : Power 770 * The EPB is a 4 bit value, but our ranges restrict the 771 * value which can be set. Here only using top two bits 772 * effectively. 773 */ 774 index = (epp >> 2) + 1; 775 } 776 777 return index; 778 } 779 780 static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) 781 { 782 int ret; 783 784 /* 785 * Use the cached HWP Request MSR value, because in the active mode the 786 * register itself may be updated by intel_pstate_hwp_boost_up() or 787 * intel_pstate_hwp_boost_down() at any time. 788 */ 789 u64 value = READ_ONCE(cpu->hwp_req_cached); 790 791 value &= ~GENMASK_ULL(31, 24); 792 value |= (u64)epp << 24; 793 /* 794 * The only other updater of hwp_req_cached in the active mode, 795 * intel_pstate_hwp_set(), is called under the same lock as this 796 * function, so it cannot run in parallel with the update below. 797 */ 798 WRITE_ONCE(cpu->hwp_req_cached, value); 799 ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 800 if (!ret) 801 cpu->epp_cached = epp; 802 803 return ret; 804 } 805 806 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, 807 int pref_index, bool use_raw, 808 u32 raw_epp) 809 { 810 int epp = -EINVAL; 811 int ret; 812 813 if (!pref_index) 814 epp = cpu_data->epp_default; 815 816 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 817 if (use_raw) 818 epp = raw_epp; 819 else if (epp == -EINVAL) 820 epp = epp_values[pref_index - 1]; 821 822 /* 823 * To avoid confusion, refuse to set EPP to any values different 824 * from 0 (performance) if the current policy is "performance", 825 * because those values would be overridden. 826 */ 827 if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 828 return -EBUSY; 829 830 ret = intel_pstate_set_epp(cpu_data, epp); 831 } else { 832 if (epp == -EINVAL) 833 epp = (pref_index - 1) << 2; 834 ret = intel_pstate_set_epb(cpu_data->cpu, epp); 835 } 836 837 return ret; 838 } 839 840 static ssize_t show_energy_performance_available_preferences( 841 struct cpufreq_policy *policy, char *buf) 842 { 843 int i = 0; 844 int ret = 0; 845 846 while (energy_perf_strings[i] != NULL) 847 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); 848 849 ret += sprintf(&buf[ret], "\n"); 850 851 return ret; 852 } 853 854 cpufreq_freq_attr_ro(energy_performance_available_preferences); 855 856 static struct cpufreq_driver intel_pstate; 857 858 static ssize_t store_energy_performance_preference( 859 struct cpufreq_policy *policy, const char *buf, size_t count) 860 { 861 struct cpudata *cpu = all_cpu_data[policy->cpu]; 862 char str_preference[21]; 863 bool raw = false; 864 ssize_t ret; 865 u32 epp = 0; 866 867 ret = sscanf(buf, "%20s", str_preference); 868 if (ret != 1) 869 return -EINVAL; 870 871 ret = match_string(energy_perf_strings, -1, str_preference); 872 if (ret < 0) { 873 if (!boot_cpu_has(X86_FEATURE_HWP_EPP)) 874 return ret; 875 876 ret = kstrtouint(buf, 10, &epp); 877 if (ret) 878 return ret; 879 880 if (epp > 255) 881 return -EINVAL; 882 883 raw = true; 884 } 885 886 /* 887 * This function runs with the policy R/W semaphore held, which 888 * guarantees that the driver pointer will not change while it is 889 * running. 890 */ 891 if (!intel_pstate_driver) 892 return -EAGAIN; 893 894 mutex_lock(&intel_pstate_limits_lock); 895 896 if (intel_pstate_driver == &intel_pstate) { 897 ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); 898 } else { 899 /* 900 * In the passive mode the governor needs to be stopped on the 901 * target CPU before the EPP update and restarted after it, 902 * which is super-heavy-weight, so make sure it is worth doing 903 * upfront. 904 */ 905 if (!raw) 906 epp = ret ? epp_values[ret - 1] : cpu->epp_default; 907 908 if (cpu->epp_cached != epp) { 909 int err; 910 911 cpufreq_stop_governor(policy); 912 ret = intel_pstate_set_epp(cpu, epp); 913 err = cpufreq_start_governor(policy); 914 if (!ret) 915 ret = err; 916 } 917 } 918 919 mutex_unlock(&intel_pstate_limits_lock); 920 921 return ret ?: count; 922 } 923 924 static ssize_t show_energy_performance_preference( 925 struct cpufreq_policy *policy, char *buf) 926 { 927 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 928 int preference, raw_epp; 929 930 preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp); 931 if (preference < 0) 932 return preference; 933 934 if (raw_epp) 935 return sprintf(buf, "%d\n", raw_epp); 936 else 937 return sprintf(buf, "%s\n", energy_perf_strings[preference]); 938 } 939 940 cpufreq_freq_attr_rw(energy_performance_preference); 941 942 static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) 943 { 944 struct cpudata *cpu = all_cpu_data[policy->cpu]; 945 int ratio, freq; 946 947 ratio = intel_pstate_get_cppc_guranteed(policy->cpu); 948 if (ratio <= 0) { 949 u64 cap; 950 951 rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); 952 ratio = HWP_GUARANTEED_PERF(cap); 953 } 954 955 freq = ratio * cpu->pstate.scaling; 956 if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling) 957 freq = rounddown(freq, cpu->pstate.perf_ctl_scaling); 958 959 return sprintf(buf, "%d\n", freq); 960 } 961 962 cpufreq_freq_attr_ro(base_frequency); 963 964 static struct freq_attr *hwp_cpufreq_attrs[] = { 965 &energy_performance_preference, 966 &energy_performance_available_preferences, 967 &base_frequency, 968 NULL, 969 }; 970 971 static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) 972 { 973 u64 cap; 974 975 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); 976 WRITE_ONCE(cpu->hwp_cap_cached, cap); 977 cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); 978 cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); 979 } 980 981 static void intel_pstate_get_hwp_cap(struct cpudata *cpu) 982 { 983 int scaling = cpu->pstate.scaling; 984 985 __intel_pstate_get_hwp_cap(cpu); 986 987 cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling; 988 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; 989 if (scaling != cpu->pstate.perf_ctl_scaling) { 990 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 991 992 cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq, 993 perf_ctl_scaling); 994 cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq, 995 perf_ctl_scaling); 996 } 997 } 998 999 static void intel_pstate_hwp_set(unsigned int cpu) 1000 { 1001 struct cpudata *cpu_data = all_cpu_data[cpu]; 1002 int max, min; 1003 u64 value; 1004 s16 epp; 1005 1006 max = cpu_data->max_perf_ratio; 1007 min = cpu_data->min_perf_ratio; 1008 1009 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 1010 min = max; 1011 1012 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 1013 1014 value &= ~HWP_MIN_PERF(~0L); 1015 value |= HWP_MIN_PERF(min); 1016 1017 value &= ~HWP_MAX_PERF(~0L); 1018 value |= HWP_MAX_PERF(max); 1019 1020 if (cpu_data->epp_policy == cpu_data->policy) 1021 goto skip_epp; 1022 1023 cpu_data->epp_policy = cpu_data->policy; 1024 1025 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 1026 epp = intel_pstate_get_epp(cpu_data, value); 1027 cpu_data->epp_powersave = epp; 1028 /* If EPP read was failed, then don't try to write */ 1029 if (epp < 0) 1030 goto skip_epp; 1031 1032 epp = 0; 1033 } else { 1034 /* skip setting EPP, when saved value is invalid */ 1035 if (cpu_data->epp_powersave < 0) 1036 goto skip_epp; 1037 1038 /* 1039 * No need to restore EPP when it is not zero. This 1040 * means: 1041 * - Policy is not changed 1042 * - user has manually changed 1043 * - Error reading EPB 1044 */ 1045 epp = intel_pstate_get_epp(cpu_data, value); 1046 if (epp) 1047 goto skip_epp; 1048 1049 epp = cpu_data->epp_powersave; 1050 } 1051 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 1052 value &= ~GENMASK_ULL(31, 24); 1053 value |= (u64)epp << 24; 1054 } else { 1055 intel_pstate_set_epb(cpu, epp); 1056 } 1057 skip_epp: 1058 WRITE_ONCE(cpu_data->hwp_req_cached, value); 1059 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 1060 } 1061 1062 static void intel_pstate_hwp_offline(struct cpudata *cpu) 1063 { 1064 u64 value = READ_ONCE(cpu->hwp_req_cached); 1065 int min_perf; 1066 1067 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { 1068 /* 1069 * In case the EPP has been set to "performance" by the 1070 * active mode "performance" scaling algorithm, replace that 1071 * temporary value with the cached EPP one. 1072 */ 1073 value &= ~GENMASK_ULL(31, 24); 1074 value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); 1075 WRITE_ONCE(cpu->hwp_req_cached, value); 1076 } 1077 1078 value &= ~GENMASK_ULL(31, 0); 1079 min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); 1080 1081 /* Set hwp_max = hwp_min */ 1082 value |= HWP_MAX_PERF(min_perf); 1083 value |= HWP_MIN_PERF(min_perf); 1084 1085 /* Set EPP to min */ 1086 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) 1087 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); 1088 1089 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 1090 } 1091 1092 #define POWER_CTL_EE_ENABLE 1 1093 #define POWER_CTL_EE_DISABLE 2 1094 1095 static int power_ctl_ee_state; 1096 1097 static void set_power_ctl_ee_state(bool input) 1098 { 1099 u64 power_ctl; 1100 1101 mutex_lock(&intel_pstate_driver_lock); 1102 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1103 if (input) { 1104 power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); 1105 power_ctl_ee_state = POWER_CTL_EE_ENABLE; 1106 } else { 1107 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); 1108 power_ctl_ee_state = POWER_CTL_EE_DISABLE; 1109 } 1110 wrmsrl(MSR_IA32_POWER_CTL, power_ctl); 1111 mutex_unlock(&intel_pstate_driver_lock); 1112 } 1113 1114 static void intel_pstate_hwp_enable(struct cpudata *cpudata); 1115 1116 static void intel_pstate_hwp_reenable(struct cpudata *cpu) 1117 { 1118 intel_pstate_hwp_enable(cpu); 1119 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); 1120 } 1121 1122 static int intel_pstate_suspend(struct cpufreq_policy *policy) 1123 { 1124 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1125 1126 pr_debug("CPU %d suspending\n", cpu->cpu); 1127 1128 cpu->suspended = true; 1129 1130 return 0; 1131 } 1132 1133 static int intel_pstate_resume(struct cpufreq_policy *policy) 1134 { 1135 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1136 1137 pr_debug("CPU %d resuming\n", cpu->cpu); 1138 1139 /* Only restore if the system default is changed */ 1140 if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) 1141 set_power_ctl_ee_state(true); 1142 else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) 1143 set_power_ctl_ee_state(false); 1144 1145 if (cpu->suspended && hwp_active) { 1146 mutex_lock(&intel_pstate_limits_lock); 1147 1148 /* Re-enable HWP, because "online" has not done that. */ 1149 intel_pstate_hwp_reenable(cpu); 1150 1151 mutex_unlock(&intel_pstate_limits_lock); 1152 } 1153 1154 cpu->suspended = false; 1155 1156 return 0; 1157 } 1158 1159 static void intel_pstate_update_policies(void) 1160 { 1161 int cpu; 1162 1163 for_each_possible_cpu(cpu) 1164 cpufreq_update_policy(cpu); 1165 } 1166 1167 static void intel_pstate_update_max_freq(unsigned int cpu) 1168 { 1169 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); 1170 struct cpudata *cpudata; 1171 1172 if (!policy) 1173 return; 1174 1175 cpudata = all_cpu_data[cpu]; 1176 policy->cpuinfo.max_freq = global.turbo_disabled_mf ? 1177 cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; 1178 1179 refresh_frequency_limits(policy); 1180 1181 cpufreq_cpu_release(policy); 1182 } 1183 1184 static void intel_pstate_update_limits(unsigned int cpu) 1185 { 1186 mutex_lock(&intel_pstate_driver_lock); 1187 1188 update_turbo_state(); 1189 /* 1190 * If turbo has been turned on or off globally, policy limits for 1191 * all CPUs need to be updated to reflect that. 1192 */ 1193 if (global.turbo_disabled_mf != global.turbo_disabled) { 1194 global.turbo_disabled_mf = global.turbo_disabled; 1195 arch_set_max_freq_ratio(global.turbo_disabled); 1196 for_each_possible_cpu(cpu) 1197 intel_pstate_update_max_freq(cpu); 1198 } else { 1199 cpufreq_update_policy(cpu); 1200 } 1201 1202 mutex_unlock(&intel_pstate_driver_lock); 1203 } 1204 1205 /************************** sysfs begin ************************/ 1206 #define show_one(file_name, object) \ 1207 static ssize_t show_##file_name \ 1208 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ 1209 { \ 1210 return sprintf(buf, "%u\n", global.object); \ 1211 } 1212 1213 static ssize_t intel_pstate_show_status(char *buf); 1214 static int intel_pstate_update_status(const char *buf, size_t size); 1215 1216 static ssize_t show_status(struct kobject *kobj, 1217 struct kobj_attribute *attr, char *buf) 1218 { 1219 ssize_t ret; 1220 1221 mutex_lock(&intel_pstate_driver_lock); 1222 ret = intel_pstate_show_status(buf); 1223 mutex_unlock(&intel_pstate_driver_lock); 1224 1225 return ret; 1226 } 1227 1228 static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, 1229 const char *buf, size_t count) 1230 { 1231 char *p = memchr(buf, '\n', count); 1232 int ret; 1233 1234 mutex_lock(&intel_pstate_driver_lock); 1235 ret = intel_pstate_update_status(buf, p ? p - buf : count); 1236 mutex_unlock(&intel_pstate_driver_lock); 1237 1238 return ret < 0 ? ret : count; 1239 } 1240 1241 static ssize_t show_turbo_pct(struct kobject *kobj, 1242 struct kobj_attribute *attr, char *buf) 1243 { 1244 struct cpudata *cpu; 1245 int total, no_turbo, turbo_pct; 1246 uint32_t turbo_fp; 1247 1248 mutex_lock(&intel_pstate_driver_lock); 1249 1250 if (!intel_pstate_driver) { 1251 mutex_unlock(&intel_pstate_driver_lock); 1252 return -EAGAIN; 1253 } 1254 1255 cpu = all_cpu_data[0]; 1256 1257 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1258 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 1259 turbo_fp = div_fp(no_turbo, total); 1260 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 1261 1262 mutex_unlock(&intel_pstate_driver_lock); 1263 1264 return sprintf(buf, "%u\n", turbo_pct); 1265 } 1266 1267 static ssize_t show_num_pstates(struct kobject *kobj, 1268 struct kobj_attribute *attr, char *buf) 1269 { 1270 struct cpudata *cpu; 1271 int total; 1272 1273 mutex_lock(&intel_pstate_driver_lock); 1274 1275 if (!intel_pstate_driver) { 1276 mutex_unlock(&intel_pstate_driver_lock); 1277 return -EAGAIN; 1278 } 1279 1280 cpu = all_cpu_data[0]; 1281 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1282 1283 mutex_unlock(&intel_pstate_driver_lock); 1284 1285 return sprintf(buf, "%u\n", total); 1286 } 1287 1288 static ssize_t show_no_turbo(struct kobject *kobj, 1289 struct kobj_attribute *attr, char *buf) 1290 { 1291 ssize_t ret; 1292 1293 mutex_lock(&intel_pstate_driver_lock); 1294 1295 if (!intel_pstate_driver) { 1296 mutex_unlock(&intel_pstate_driver_lock); 1297 return -EAGAIN; 1298 } 1299 1300 update_turbo_state(); 1301 if (global.turbo_disabled) 1302 ret = sprintf(buf, "%u\n", global.turbo_disabled); 1303 else 1304 ret = sprintf(buf, "%u\n", global.no_turbo); 1305 1306 mutex_unlock(&intel_pstate_driver_lock); 1307 1308 return ret; 1309 } 1310 1311 static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, 1312 const char *buf, size_t count) 1313 { 1314 unsigned int input; 1315 int ret; 1316 1317 ret = sscanf(buf, "%u", &input); 1318 if (ret != 1) 1319 return -EINVAL; 1320 1321 mutex_lock(&intel_pstate_driver_lock); 1322 1323 if (!intel_pstate_driver) { 1324 mutex_unlock(&intel_pstate_driver_lock); 1325 return -EAGAIN; 1326 } 1327 1328 mutex_lock(&intel_pstate_limits_lock); 1329 1330 update_turbo_state(); 1331 if (global.turbo_disabled) { 1332 pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); 1333 mutex_unlock(&intel_pstate_limits_lock); 1334 mutex_unlock(&intel_pstate_driver_lock); 1335 return -EPERM; 1336 } 1337 1338 global.no_turbo = clamp_t(int, input, 0, 1); 1339 1340 if (global.no_turbo) { 1341 struct cpudata *cpu = all_cpu_data[0]; 1342 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; 1343 1344 /* Squash the global minimum into the permitted range. */ 1345 if (global.min_perf_pct > pct) 1346 global.min_perf_pct = pct; 1347 } 1348 1349 mutex_unlock(&intel_pstate_limits_lock); 1350 1351 intel_pstate_update_policies(); 1352 1353 mutex_unlock(&intel_pstate_driver_lock); 1354 1355 return count; 1356 } 1357 1358 static void update_qos_request(enum freq_qos_req_type type) 1359 { 1360 struct freq_qos_request *req; 1361 struct cpufreq_policy *policy; 1362 int i; 1363 1364 for_each_possible_cpu(i) { 1365 struct cpudata *cpu = all_cpu_data[i]; 1366 unsigned int freq, perf_pct; 1367 1368 policy = cpufreq_cpu_get(i); 1369 if (!policy) 1370 continue; 1371 1372 req = policy->driver_data; 1373 cpufreq_cpu_put(policy); 1374 1375 if (!req) 1376 continue; 1377 1378 if (hwp_active) 1379 intel_pstate_get_hwp_cap(cpu); 1380 1381 if (type == FREQ_QOS_MIN) { 1382 perf_pct = global.min_perf_pct; 1383 } else { 1384 req++; 1385 perf_pct = global.max_perf_pct; 1386 } 1387 1388 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100); 1389 1390 if (freq_qos_update_request(req, freq) < 0) 1391 pr_warn("Failed to update freq constraint: CPU%d\n", i); 1392 } 1393 } 1394 1395 static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, 1396 const char *buf, size_t count) 1397 { 1398 unsigned int input; 1399 int ret; 1400 1401 ret = sscanf(buf, "%u", &input); 1402 if (ret != 1) 1403 return -EINVAL; 1404 1405 mutex_lock(&intel_pstate_driver_lock); 1406 1407 if (!intel_pstate_driver) { 1408 mutex_unlock(&intel_pstate_driver_lock); 1409 return -EAGAIN; 1410 } 1411 1412 mutex_lock(&intel_pstate_limits_lock); 1413 1414 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); 1415 1416 mutex_unlock(&intel_pstate_limits_lock); 1417 1418 if (intel_pstate_driver == &intel_pstate) 1419 intel_pstate_update_policies(); 1420 else 1421 update_qos_request(FREQ_QOS_MAX); 1422 1423 mutex_unlock(&intel_pstate_driver_lock); 1424 1425 return count; 1426 } 1427 1428 static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, 1429 const char *buf, size_t count) 1430 { 1431 unsigned int input; 1432 int ret; 1433 1434 ret = sscanf(buf, "%u", &input); 1435 if (ret != 1) 1436 return -EINVAL; 1437 1438 mutex_lock(&intel_pstate_driver_lock); 1439 1440 if (!intel_pstate_driver) { 1441 mutex_unlock(&intel_pstate_driver_lock); 1442 return -EAGAIN; 1443 } 1444 1445 mutex_lock(&intel_pstate_limits_lock); 1446 1447 global.min_perf_pct = clamp_t(int, input, 1448 min_perf_pct_min(), global.max_perf_pct); 1449 1450 mutex_unlock(&intel_pstate_limits_lock); 1451 1452 if (intel_pstate_driver == &intel_pstate) 1453 intel_pstate_update_policies(); 1454 else 1455 update_qos_request(FREQ_QOS_MIN); 1456 1457 mutex_unlock(&intel_pstate_driver_lock); 1458 1459 return count; 1460 } 1461 1462 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, 1463 struct kobj_attribute *attr, char *buf) 1464 { 1465 return sprintf(buf, "%u\n", hwp_boost); 1466 } 1467 1468 static ssize_t store_hwp_dynamic_boost(struct kobject *a, 1469 struct kobj_attribute *b, 1470 const char *buf, size_t count) 1471 { 1472 unsigned int input; 1473 int ret; 1474 1475 ret = kstrtouint(buf, 10, &input); 1476 if (ret) 1477 return ret; 1478 1479 mutex_lock(&intel_pstate_driver_lock); 1480 hwp_boost = !!input; 1481 intel_pstate_update_policies(); 1482 mutex_unlock(&intel_pstate_driver_lock); 1483 1484 return count; 1485 } 1486 1487 static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr, 1488 char *buf) 1489 { 1490 u64 power_ctl; 1491 int enable; 1492 1493 rdmsrl(MSR_IA32_POWER_CTL, power_ctl); 1494 enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); 1495 return sprintf(buf, "%d\n", !enable); 1496 } 1497 1498 static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b, 1499 const char *buf, size_t count) 1500 { 1501 bool input; 1502 int ret; 1503 1504 ret = kstrtobool(buf, &input); 1505 if (ret) 1506 return ret; 1507 1508 set_power_ctl_ee_state(input); 1509 1510 return count; 1511 } 1512 1513 show_one(max_perf_pct, max_perf_pct); 1514 show_one(min_perf_pct, min_perf_pct); 1515 1516 define_one_global_rw(status); 1517 define_one_global_rw(no_turbo); 1518 define_one_global_rw(max_perf_pct); 1519 define_one_global_rw(min_perf_pct); 1520 define_one_global_ro(turbo_pct); 1521 define_one_global_ro(num_pstates); 1522 define_one_global_rw(hwp_dynamic_boost); 1523 define_one_global_rw(energy_efficiency); 1524 1525 static struct attribute *intel_pstate_attributes[] = { 1526 &status.attr, 1527 &no_turbo.attr, 1528 NULL 1529 }; 1530 1531 static const struct attribute_group intel_pstate_attr_group = { 1532 .attrs = intel_pstate_attributes, 1533 }; 1534 1535 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; 1536 1537 static struct kobject *intel_pstate_kobject; 1538 1539 static void __init intel_pstate_sysfs_expose_params(void) 1540 { 1541 int rc; 1542 1543 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 1544 &cpu_subsys.dev_root->kobj); 1545 if (WARN_ON(!intel_pstate_kobject)) 1546 return; 1547 1548 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 1549 if (WARN_ON(rc)) 1550 return; 1551 1552 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 1553 rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr); 1554 WARN_ON(rc); 1555 1556 rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr); 1557 WARN_ON(rc); 1558 } 1559 1560 /* 1561 * If per cpu limits are enforced there are no global limits, so 1562 * return without creating max/min_perf_pct attributes 1563 */ 1564 if (per_cpu_limits) 1565 return; 1566 1567 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 1568 WARN_ON(rc); 1569 1570 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1571 WARN_ON(rc); 1572 1573 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { 1574 rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); 1575 WARN_ON(rc); 1576 } 1577 } 1578 1579 static void __init intel_pstate_sysfs_remove(void) 1580 { 1581 if (!intel_pstate_kobject) 1582 return; 1583 1584 sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group); 1585 1586 if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 1587 sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr); 1588 sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr); 1589 } 1590 1591 if (!per_cpu_limits) { 1592 sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr); 1593 sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr); 1594 1595 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) 1596 sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr); 1597 } 1598 1599 kobject_put(intel_pstate_kobject); 1600 } 1601 1602 static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) 1603 { 1604 int rc; 1605 1606 if (!hwp_active) 1607 return; 1608 1609 rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); 1610 WARN_ON_ONCE(rc); 1611 } 1612 1613 static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) 1614 { 1615 if (!hwp_active) 1616 return; 1617 1618 sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); 1619 } 1620 1621 /************************** sysfs end ************************/ 1622 1623 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1624 { 1625 /* First disable HWP notification interrupt as we don't process them */ 1626 if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1627 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1628 1629 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1630 if (cpudata->epp_default == -EINVAL) 1631 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1632 } 1633 1634 static int atom_get_min_pstate(void) 1635 { 1636 u64 value; 1637 1638 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1639 return (value >> 8) & 0x7F; 1640 } 1641 1642 static int atom_get_max_pstate(void) 1643 { 1644 u64 value; 1645 1646 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1647 return (value >> 16) & 0x7F; 1648 } 1649 1650 static int atom_get_turbo_pstate(void) 1651 { 1652 u64 value; 1653 1654 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); 1655 return value & 0x7F; 1656 } 1657 1658 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1659 { 1660 u64 val; 1661 int32_t vid_fp; 1662 u32 vid; 1663 1664 val = (u64)pstate << 8; 1665 if (global.no_turbo && !global.turbo_disabled) 1666 val |= (u64)1 << 32; 1667 1668 vid_fp = cpudata->vid.min + mul_fp( 1669 int_tofp(pstate - cpudata->pstate.min_pstate), 1670 cpudata->vid.ratio); 1671 1672 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1673 vid = ceiling_fp(vid_fp); 1674 1675 if (pstate > cpudata->pstate.max_pstate) 1676 vid = cpudata->vid.turbo; 1677 1678 return val | vid; 1679 } 1680 1681 static int silvermont_get_scaling(void) 1682 { 1683 u64 value; 1684 int i; 1685 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1686 static int silvermont_freq_table[] = { 1687 83300, 100000, 133300, 116700, 80000}; 1688 1689 rdmsrl(MSR_FSB_FREQ, value); 1690 i = value & 0x7; 1691 WARN_ON(i > 4); 1692 1693 return silvermont_freq_table[i]; 1694 } 1695 1696 static int airmont_get_scaling(void) 1697 { 1698 u64 value; 1699 int i; 1700 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1701 static int airmont_freq_table[] = { 1702 83300, 100000, 133300, 116700, 80000, 1703 93300, 90000, 88900, 87500}; 1704 1705 rdmsrl(MSR_FSB_FREQ, value); 1706 i = value & 0xF; 1707 WARN_ON(i > 8); 1708 1709 return airmont_freq_table[i]; 1710 } 1711 1712 static void atom_get_vid(struct cpudata *cpudata) 1713 { 1714 u64 value; 1715 1716 rdmsrl(MSR_ATOM_CORE_VIDS, value); 1717 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1718 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1719 cpudata->vid.ratio = div_fp( 1720 cpudata->vid.max - cpudata->vid.min, 1721 int_tofp(cpudata->pstate.max_pstate - 1722 cpudata->pstate.min_pstate)); 1723 1724 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); 1725 cpudata->vid.turbo = value & 0x7f; 1726 } 1727 1728 static int core_get_min_pstate(void) 1729 { 1730 u64 value; 1731 1732 rdmsrl(MSR_PLATFORM_INFO, value); 1733 return (value >> 40) & 0xFF; 1734 } 1735 1736 static int core_get_max_pstate_physical(void) 1737 { 1738 u64 value; 1739 1740 rdmsrl(MSR_PLATFORM_INFO, value); 1741 return (value >> 8) & 0xFF; 1742 } 1743 1744 static int core_get_tdp_ratio(u64 plat_info) 1745 { 1746 /* Check how many TDP levels present */ 1747 if (plat_info & 0x600000000) { 1748 u64 tdp_ctrl; 1749 u64 tdp_ratio; 1750 int tdp_msr; 1751 int err; 1752 1753 /* Get the TDP level (0, 1, 2) to get ratios */ 1754 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1755 if (err) 1756 return err; 1757 1758 /* TDP MSR are continuous starting at 0x648 */ 1759 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); 1760 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 1761 if (err) 1762 return err; 1763 1764 /* For level 1 and 2, bits[23:16] contain the ratio */ 1765 if (tdp_ctrl & 0x03) 1766 tdp_ratio >>= 16; 1767 1768 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1769 pr_debug("tdp_ratio %x\n", (int)tdp_ratio); 1770 1771 return (int)tdp_ratio; 1772 } 1773 1774 return -ENXIO; 1775 } 1776 1777 static int core_get_max_pstate(void) 1778 { 1779 u64 tar; 1780 u64 plat_info; 1781 int max_pstate; 1782 int tdp_ratio; 1783 int err; 1784 1785 rdmsrl(MSR_PLATFORM_INFO, plat_info); 1786 max_pstate = (plat_info >> 8) & 0xFF; 1787 1788 tdp_ratio = core_get_tdp_ratio(plat_info); 1789 if (tdp_ratio <= 0) 1790 return max_pstate; 1791 1792 if (hwp_active) { 1793 /* Turbo activation ratio is not used on HWP platforms */ 1794 return tdp_ratio; 1795 } 1796 1797 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 1798 if (!err) { 1799 int tar_levels; 1800 1801 /* Do some sanity checking for safety */ 1802 tar_levels = tar & 0xff; 1803 if (tdp_ratio - 1 == tar_levels) { 1804 max_pstate = tar_levels; 1805 pr_debug("max_pstate=TAC %x\n", max_pstate); 1806 } 1807 } 1808 1809 return max_pstate; 1810 } 1811 1812 static int core_get_turbo_pstate(void) 1813 { 1814 u64 value; 1815 int nont, ret; 1816 1817 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1818 nont = core_get_max_pstate(); 1819 ret = (value) & 255; 1820 if (ret <= nont) 1821 ret = nont; 1822 return ret; 1823 } 1824 1825 static inline int core_get_scaling(void) 1826 { 1827 return 100000; 1828 } 1829 1830 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1831 { 1832 u64 val; 1833 1834 val = (u64)pstate << 8; 1835 if (global.no_turbo && !global.turbo_disabled) 1836 val |= (u64)1 << 32; 1837 1838 return val; 1839 } 1840 1841 static int knl_get_aperf_mperf_shift(void) 1842 { 1843 return 10; 1844 } 1845 1846 static int knl_get_turbo_pstate(void) 1847 { 1848 u64 value; 1849 int nont, ret; 1850 1851 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1852 nont = core_get_max_pstate(); 1853 ret = (((value) >> 8) & 0xFF); 1854 if (ret <= nont) 1855 ret = nont; 1856 return ret; 1857 } 1858 1859 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1860 { 1861 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1862 cpu->pstate.current_pstate = pstate; 1863 /* 1864 * Generally, there is no guarantee that this code will always run on 1865 * the CPU being updated, so force the register update to run on the 1866 * right CPU. 1867 */ 1868 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1869 pstate_funcs.get_val(cpu, pstate)); 1870 } 1871 1872 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1873 { 1874 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1875 } 1876 1877 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1878 { 1879 int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); 1880 1881 update_turbo_state(); 1882 intel_pstate_set_pstate(cpu, pstate); 1883 } 1884 1885 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1886 { 1887 bool hybrid_cpu = boot_cpu_has(X86_FEATURE_HYBRID_CPU); 1888 int perf_ctl_max_phys = pstate_funcs.get_max_physical(); 1889 int perf_ctl_scaling = hybrid_cpu ? cpu_khz / perf_ctl_max_phys : 1890 pstate_funcs.get_scaling(); 1891 1892 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1893 cpu->pstate.max_pstate_physical = perf_ctl_max_phys; 1894 cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; 1895 1896 if (hwp_active && !hwp_mode_bdw) { 1897 __intel_pstate_get_hwp_cap(cpu); 1898 1899 if (hybrid_cpu) 1900 intel_pstate_hybrid_hwp_calibrate(cpu); 1901 else 1902 cpu->pstate.scaling = perf_ctl_scaling; 1903 } else { 1904 cpu->pstate.scaling = perf_ctl_scaling; 1905 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1906 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1907 } 1908 1909 if (cpu->pstate.scaling == perf_ctl_scaling) { 1910 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; 1911 cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling; 1912 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling; 1913 } 1914 1915 if (pstate_funcs.get_aperf_mperf_shift) 1916 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); 1917 1918 if (pstate_funcs.get_vid) 1919 pstate_funcs.get_vid(cpu); 1920 1921 intel_pstate_set_min_pstate(cpu); 1922 } 1923 1924 /* 1925 * Long hold time will keep high perf limits for long time, 1926 * which negatively impacts perf/watt for some workloads, 1927 * like specpower. 3ms is based on experiements on some 1928 * workoads. 1929 */ 1930 static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC; 1931 1932 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) 1933 { 1934 u64 hwp_req = READ_ONCE(cpu->hwp_req_cached); 1935 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); 1936 u32 max_limit = (hwp_req & 0xff00) >> 8; 1937 u32 min_limit = (hwp_req & 0xff); 1938 u32 boost_level1; 1939 1940 /* 1941 * Cases to consider (User changes via sysfs or boot time): 1942 * If, P0 (Turbo max) = P1 (Guaranteed max) = min: 1943 * No boost, return. 1944 * If, P0 (Turbo max) > P1 (Guaranteed max) = min: 1945 * Should result in one level boost only for P0. 1946 * If, P0 (Turbo max) = P1 (Guaranteed max) > min: 1947 * Should result in two level boost: 1948 * (min + p1)/2 and P1. 1949 * If, P0 (Turbo max) > P1 (Guaranteed max) > min: 1950 * Should result in three level boost: 1951 * (min + p1)/2, P1 and P0. 1952 */ 1953 1954 /* If max and min are equal or already at max, nothing to boost */ 1955 if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit) 1956 return; 1957 1958 if (!cpu->hwp_boost_min) 1959 cpu->hwp_boost_min = min_limit; 1960 1961 /* level at half way mark between min and guranteed */ 1962 boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1; 1963 1964 if (cpu->hwp_boost_min < boost_level1) 1965 cpu->hwp_boost_min = boost_level1; 1966 else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap)) 1967 cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap); 1968 else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) && 1969 max_limit != HWP_GUARANTEED_PERF(hwp_cap)) 1970 cpu->hwp_boost_min = max_limit; 1971 else 1972 return; 1973 1974 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; 1975 wrmsrl(MSR_HWP_REQUEST, hwp_req); 1976 cpu->last_update = cpu->sample.time; 1977 } 1978 1979 static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) 1980 { 1981 if (cpu->hwp_boost_min) { 1982 bool expired; 1983 1984 /* Check if we are idle for hold time to boost down */ 1985 expired = time_after64(cpu->sample.time, cpu->last_update + 1986 hwp_boost_hold_time_ns); 1987 if (expired) { 1988 wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); 1989 cpu->hwp_boost_min = 0; 1990 } 1991 } 1992 cpu->last_update = cpu->sample.time; 1993 } 1994 1995 static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu, 1996 u64 time) 1997 { 1998 cpu->sample.time = time; 1999 2000 if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) { 2001 bool do_io = false; 2002 2003 cpu->sched_flags = 0; 2004 /* 2005 * Set iowait_boost flag and update time. Since IO WAIT flag 2006 * is set all the time, we can't just conclude that there is 2007 * some IO bound activity is scheduled on this CPU with just 2008 * one occurrence. If we receive at least two in two 2009 * consecutive ticks, then we treat as boost candidate. 2010 */ 2011 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC)) 2012 do_io = true; 2013 2014 cpu->last_io_update = time; 2015 2016 if (do_io) 2017 intel_pstate_hwp_boost_up(cpu); 2018 2019 } else { 2020 intel_pstate_hwp_boost_down(cpu); 2021 } 2022 } 2023 2024 static inline void intel_pstate_update_util_hwp(struct update_util_data *data, 2025 u64 time, unsigned int flags) 2026 { 2027 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 2028 2029 cpu->sched_flags |= flags; 2030 2031 if (smp_processor_id() == cpu->cpu) 2032 intel_pstate_update_util_hwp_local(cpu, time); 2033 } 2034 2035 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 2036 { 2037 struct sample *sample = &cpu->sample; 2038 2039 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 2040 } 2041 2042 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 2043 { 2044 u64 aperf, mperf; 2045 unsigned long flags; 2046 u64 tsc; 2047 2048 local_irq_save(flags); 2049 rdmsrl(MSR_IA32_APERF, aperf); 2050 rdmsrl(MSR_IA32_MPERF, mperf); 2051 tsc = rdtsc(); 2052 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 2053 local_irq_restore(flags); 2054 return false; 2055 } 2056 local_irq_restore(flags); 2057 2058 cpu->last_sample_time = cpu->sample.time; 2059 cpu->sample.time = time; 2060 cpu->sample.aperf = aperf; 2061 cpu->sample.mperf = mperf; 2062 cpu->sample.tsc = tsc; 2063 cpu->sample.aperf -= cpu->prev_aperf; 2064 cpu->sample.mperf -= cpu->prev_mperf; 2065 cpu->sample.tsc -= cpu->prev_tsc; 2066 2067 cpu->prev_aperf = aperf; 2068 cpu->prev_mperf = mperf; 2069 cpu->prev_tsc = tsc; 2070 /* 2071 * First time this function is invoked in a given cycle, all of the 2072 * previous sample data fields are equal to zero or stale and they must 2073 * be populated with meaningful numbers for things to work, so assume 2074 * that sample.time will always be reset before setting the utilization 2075 * update hook and make the caller skip the sample then. 2076 */ 2077 if (cpu->last_sample_time) { 2078 intel_pstate_calc_avg_perf(cpu); 2079 return true; 2080 } 2081 return false; 2082 } 2083 2084 static inline int32_t get_avg_frequency(struct cpudata *cpu) 2085 { 2086 return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); 2087 } 2088 2089 static inline int32_t get_avg_pstate(struct cpudata *cpu) 2090 { 2091 return mul_ext_fp(cpu->pstate.max_pstate_physical, 2092 cpu->sample.core_avg_perf); 2093 } 2094 2095 static inline int32_t get_target_pstate(struct cpudata *cpu) 2096 { 2097 struct sample *sample = &cpu->sample; 2098 int32_t busy_frac; 2099 int target, avg_pstate; 2100 2101 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, 2102 sample->tsc); 2103 2104 if (busy_frac < cpu->iowait_boost) 2105 busy_frac = cpu->iowait_boost; 2106 2107 sample->busy_scaled = busy_frac * 100; 2108 2109 target = global.no_turbo || global.turbo_disabled ? 2110 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2111 target += target >> 2; 2112 target = mul_fp(target, busy_frac); 2113 if (target < cpu->pstate.min_pstate) 2114 target = cpu->pstate.min_pstate; 2115 2116 /* 2117 * If the average P-state during the previous cycle was higher than the 2118 * current target, add 50% of the difference to the target to reduce 2119 * possible performance oscillations and offset possible performance 2120 * loss related to moving the workload from one CPU to another within 2121 * a package/module. 2122 */ 2123 avg_pstate = get_avg_pstate(cpu); 2124 if (avg_pstate > target) 2125 target += (avg_pstate - target) >> 1; 2126 2127 return target; 2128 } 2129 2130 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 2131 { 2132 int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); 2133 int max_pstate = max(min_pstate, cpu->max_perf_ratio); 2134 2135 return clamp_t(int, pstate, min_pstate, max_pstate); 2136 } 2137 2138 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 2139 { 2140 if (pstate == cpu->pstate.current_pstate) 2141 return; 2142 2143 cpu->pstate.current_pstate = pstate; 2144 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 2145 } 2146 2147 static void intel_pstate_adjust_pstate(struct cpudata *cpu) 2148 { 2149 int from = cpu->pstate.current_pstate; 2150 struct sample *sample; 2151 int target_pstate; 2152 2153 update_turbo_state(); 2154 2155 target_pstate = get_target_pstate(cpu); 2156 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2157 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); 2158 intel_pstate_update_pstate(cpu, target_pstate); 2159 2160 sample = &cpu->sample; 2161 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 2162 fp_toint(sample->busy_scaled), 2163 from, 2164 cpu->pstate.current_pstate, 2165 sample->mperf, 2166 sample->aperf, 2167 sample->tsc, 2168 get_avg_frequency(cpu), 2169 fp_toint(cpu->iowait_boost * 100)); 2170 } 2171 2172 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 2173 unsigned int flags) 2174 { 2175 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 2176 u64 delta_ns; 2177 2178 /* Don't allow remote callbacks */ 2179 if (smp_processor_id() != cpu->cpu) 2180 return; 2181 2182 delta_ns = time - cpu->last_update; 2183 if (flags & SCHED_CPUFREQ_IOWAIT) { 2184 /* Start over if the CPU may have been idle. */ 2185 if (delta_ns > TICK_NSEC) { 2186 cpu->iowait_boost = ONE_EIGHTH_FP; 2187 } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { 2188 cpu->iowait_boost <<= 1; 2189 if (cpu->iowait_boost > int_tofp(1)) 2190 cpu->iowait_boost = int_tofp(1); 2191 } else { 2192 cpu->iowait_boost = ONE_EIGHTH_FP; 2193 } 2194 } else if (cpu->iowait_boost) { 2195 /* Clear iowait_boost if the CPU may have been idle. */ 2196 if (delta_ns > TICK_NSEC) 2197 cpu->iowait_boost = 0; 2198 else 2199 cpu->iowait_boost >>= 1; 2200 } 2201 cpu->last_update = time; 2202 delta_ns = time - cpu->sample.time; 2203 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) 2204 return; 2205 2206 if (intel_pstate_sample(cpu, time)) 2207 intel_pstate_adjust_pstate(cpu); 2208 } 2209 2210 static struct pstate_funcs core_funcs = { 2211 .get_max = core_get_max_pstate, 2212 .get_max_physical = core_get_max_pstate_physical, 2213 .get_min = core_get_min_pstate, 2214 .get_turbo = core_get_turbo_pstate, 2215 .get_scaling = core_get_scaling, 2216 .get_val = core_get_val, 2217 }; 2218 2219 static const struct pstate_funcs silvermont_funcs = { 2220 .get_max = atom_get_max_pstate, 2221 .get_max_physical = atom_get_max_pstate, 2222 .get_min = atom_get_min_pstate, 2223 .get_turbo = atom_get_turbo_pstate, 2224 .get_val = atom_get_val, 2225 .get_scaling = silvermont_get_scaling, 2226 .get_vid = atom_get_vid, 2227 }; 2228 2229 static const struct pstate_funcs airmont_funcs = { 2230 .get_max = atom_get_max_pstate, 2231 .get_max_physical = atom_get_max_pstate, 2232 .get_min = atom_get_min_pstate, 2233 .get_turbo = atom_get_turbo_pstate, 2234 .get_val = atom_get_val, 2235 .get_scaling = airmont_get_scaling, 2236 .get_vid = atom_get_vid, 2237 }; 2238 2239 static const struct pstate_funcs knl_funcs = { 2240 .get_max = core_get_max_pstate, 2241 .get_max_physical = core_get_max_pstate_physical, 2242 .get_min = core_get_min_pstate, 2243 .get_turbo = knl_get_turbo_pstate, 2244 .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, 2245 .get_scaling = core_get_scaling, 2246 .get_val = core_get_val, 2247 }; 2248 2249 #define X86_MATCH(model, policy) \ 2250 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 2251 X86_FEATURE_APERFMPERF, &policy) 2252 2253 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 2254 X86_MATCH(SANDYBRIDGE, core_funcs), 2255 X86_MATCH(SANDYBRIDGE_X, core_funcs), 2256 X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), 2257 X86_MATCH(IVYBRIDGE, core_funcs), 2258 X86_MATCH(HASWELL, core_funcs), 2259 X86_MATCH(BROADWELL, core_funcs), 2260 X86_MATCH(IVYBRIDGE_X, core_funcs), 2261 X86_MATCH(HASWELL_X, core_funcs), 2262 X86_MATCH(HASWELL_L, core_funcs), 2263 X86_MATCH(HASWELL_G, core_funcs), 2264 X86_MATCH(BROADWELL_G, core_funcs), 2265 X86_MATCH(ATOM_AIRMONT, airmont_funcs), 2266 X86_MATCH(SKYLAKE_L, core_funcs), 2267 X86_MATCH(BROADWELL_X, core_funcs), 2268 X86_MATCH(SKYLAKE, core_funcs), 2269 X86_MATCH(BROADWELL_D, core_funcs), 2270 X86_MATCH(XEON_PHI_KNL, knl_funcs), 2271 X86_MATCH(XEON_PHI_KNM, knl_funcs), 2272 X86_MATCH(ATOM_GOLDMONT, core_funcs), 2273 X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), 2274 X86_MATCH(SKYLAKE_X, core_funcs), 2275 {} 2276 }; 2277 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 2278 2279 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 2280 X86_MATCH(BROADWELL_D, core_funcs), 2281 X86_MATCH(BROADWELL_X, core_funcs), 2282 X86_MATCH(SKYLAKE_X, core_funcs), 2283 {} 2284 }; 2285 2286 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { 2287 X86_MATCH(KABYLAKE, core_funcs), 2288 {} 2289 }; 2290 2291 static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { 2292 X86_MATCH(SKYLAKE_X, core_funcs), 2293 X86_MATCH(SKYLAKE, core_funcs), 2294 {} 2295 }; 2296 2297 static int intel_pstate_init_cpu(unsigned int cpunum) 2298 { 2299 struct cpudata *cpu; 2300 2301 cpu = all_cpu_data[cpunum]; 2302 2303 if (!cpu) { 2304 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); 2305 if (!cpu) 2306 return -ENOMEM; 2307 2308 all_cpu_data[cpunum] = cpu; 2309 2310 cpu->cpu = cpunum; 2311 2312 cpu->epp_default = -EINVAL; 2313 2314 if (hwp_active) { 2315 const struct x86_cpu_id *id; 2316 2317 intel_pstate_hwp_enable(cpu); 2318 2319 id = x86_match_cpu(intel_pstate_hwp_boost_ids); 2320 if (id && intel_pstate_acpi_pm_profile_server()) 2321 hwp_boost = true; 2322 } 2323 } else if (hwp_active) { 2324 /* 2325 * Re-enable HWP in case this happens after a resume from ACPI 2326 * S3 if the CPU was offline during the whole system/resume 2327 * cycle. 2328 */ 2329 intel_pstate_hwp_reenable(cpu); 2330 } 2331 2332 cpu->epp_powersave = -EINVAL; 2333 cpu->epp_policy = 0; 2334 2335 intel_pstate_get_cpu_pstates(cpu); 2336 2337 pr_debug("controlling: cpu %d\n", cpunum); 2338 2339 return 0; 2340 } 2341 2342 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 2343 { 2344 struct cpudata *cpu = all_cpu_data[cpu_num]; 2345 2346 if (hwp_active && !hwp_boost) 2347 return; 2348 2349 if (cpu->update_util_set) 2350 return; 2351 2352 /* Prevent intel_pstate_update_util() from using stale data. */ 2353 cpu->sample.time = 0; 2354 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 2355 (hwp_active ? 2356 intel_pstate_update_util_hwp : 2357 intel_pstate_update_util)); 2358 cpu->update_util_set = true; 2359 } 2360 2361 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 2362 { 2363 struct cpudata *cpu_data = all_cpu_data[cpu]; 2364 2365 if (!cpu_data->update_util_set) 2366 return; 2367 2368 cpufreq_remove_update_util_hook(cpu); 2369 cpu_data->update_util_set = false; 2370 synchronize_rcu(); 2371 } 2372 2373 static int intel_pstate_get_max_freq(struct cpudata *cpu) 2374 { 2375 return global.turbo_disabled || global.no_turbo ? 2376 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2377 } 2378 2379 static void intel_pstate_update_perf_limits(struct cpudata *cpu, 2380 unsigned int policy_min, 2381 unsigned int policy_max) 2382 { 2383 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 2384 int32_t max_policy_perf, min_policy_perf; 2385 2386 max_policy_perf = policy_max / perf_ctl_scaling; 2387 if (policy_max == policy_min) { 2388 min_policy_perf = max_policy_perf; 2389 } else { 2390 min_policy_perf = policy_min / perf_ctl_scaling; 2391 min_policy_perf = clamp_t(int32_t, min_policy_perf, 2392 0, max_policy_perf); 2393 } 2394 2395 /* 2396 * HWP needs some special consideration, because HWP_REQUEST uses 2397 * abstract values to represent performance rather than pure ratios. 2398 */ 2399 if (hwp_active) { 2400 intel_pstate_get_hwp_cap(cpu); 2401 2402 if (cpu->pstate.scaling != perf_ctl_scaling) { 2403 int scaling = cpu->pstate.scaling; 2404 int freq; 2405 2406 freq = max_policy_perf * perf_ctl_scaling; 2407 max_policy_perf = DIV_ROUND_UP(freq, scaling); 2408 freq = min_policy_perf * perf_ctl_scaling; 2409 min_policy_perf = DIV_ROUND_UP(freq, scaling); 2410 } 2411 } 2412 2413 pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n", 2414 cpu->cpu, min_policy_perf, max_policy_perf); 2415 2416 /* Normalize user input to [min_perf, max_perf] */ 2417 if (per_cpu_limits) { 2418 cpu->min_perf_ratio = min_policy_perf; 2419 cpu->max_perf_ratio = max_policy_perf; 2420 } else { 2421 int turbo_max = cpu->pstate.turbo_pstate; 2422 int32_t global_min, global_max; 2423 2424 /* Global limits are in percent of the maximum turbo P-state. */ 2425 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 2426 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); 2427 global_min = clamp_t(int32_t, global_min, 0, global_max); 2428 2429 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, 2430 global_min, global_max); 2431 2432 cpu->min_perf_ratio = max(min_policy_perf, global_min); 2433 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); 2434 cpu->max_perf_ratio = min(max_policy_perf, global_max); 2435 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); 2436 2437 /* Make sure min_perf <= max_perf */ 2438 cpu->min_perf_ratio = min(cpu->min_perf_ratio, 2439 cpu->max_perf_ratio); 2440 2441 } 2442 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, 2443 cpu->max_perf_ratio, 2444 cpu->min_perf_ratio); 2445 } 2446 2447 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2448 { 2449 struct cpudata *cpu; 2450 2451 if (!policy->cpuinfo.max_freq) 2452 return -ENODEV; 2453 2454 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2455 policy->cpuinfo.max_freq, policy->max); 2456 2457 cpu = all_cpu_data[policy->cpu]; 2458 cpu->policy = policy->policy; 2459 2460 mutex_lock(&intel_pstate_limits_lock); 2461 2462 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2463 2464 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2465 /* 2466 * NOHZ_FULL CPUs need this as the governor callback may not 2467 * be invoked on them. 2468 */ 2469 intel_pstate_clear_update_util_hook(policy->cpu); 2470 intel_pstate_max_within_limits(cpu); 2471 } else { 2472 intel_pstate_set_update_util_hook(policy->cpu); 2473 } 2474 2475 if (hwp_active) { 2476 /* 2477 * When hwp_boost was active before and dynamically it 2478 * was turned off, in that case we need to clear the 2479 * update util hook. 2480 */ 2481 if (!hwp_boost) 2482 intel_pstate_clear_update_util_hook(policy->cpu); 2483 intel_pstate_hwp_set(policy->cpu); 2484 } 2485 2486 mutex_unlock(&intel_pstate_limits_lock); 2487 2488 return 0; 2489 } 2490 2491 static void intel_pstate_adjust_policy_max(struct cpudata *cpu, 2492 struct cpufreq_policy_data *policy) 2493 { 2494 if (!hwp_active && 2495 cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2496 policy->max < policy->cpuinfo.max_freq && 2497 policy->max > cpu->pstate.max_freq) { 2498 pr_debug("policy->max > max non turbo frequency\n"); 2499 policy->max = policy->cpuinfo.max_freq; 2500 } 2501 } 2502 2503 static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, 2504 struct cpufreq_policy_data *policy) 2505 { 2506 int max_freq; 2507 2508 update_turbo_state(); 2509 if (hwp_active) { 2510 intel_pstate_get_hwp_cap(cpu); 2511 max_freq = global.no_turbo || global.turbo_disabled ? 2512 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2513 } else { 2514 max_freq = intel_pstate_get_max_freq(cpu); 2515 } 2516 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq); 2517 2518 intel_pstate_adjust_policy_max(cpu, policy); 2519 } 2520 2521 static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) 2522 { 2523 intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy); 2524 2525 return 0; 2526 } 2527 2528 static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) 2529 { 2530 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2531 2532 pr_debug("CPU %d going offline\n", cpu->cpu); 2533 2534 if (cpu->suspended) 2535 return 0; 2536 2537 /* 2538 * If the CPU is an SMT thread and it goes offline with the performance 2539 * settings different from the minimum, it will prevent its sibling 2540 * from getting to lower performance levels, so force the minimum 2541 * performance on CPU offline to prevent that from happening. 2542 */ 2543 if (hwp_active) 2544 intel_pstate_hwp_offline(cpu); 2545 else 2546 intel_pstate_set_min_pstate(cpu); 2547 2548 intel_pstate_exit_perf_limits(policy); 2549 2550 return 0; 2551 } 2552 2553 static int intel_pstate_cpu_online(struct cpufreq_policy *policy) 2554 { 2555 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2556 2557 pr_debug("CPU %d going online\n", cpu->cpu); 2558 2559 intel_pstate_init_acpi_perf_limits(policy); 2560 2561 if (hwp_active) { 2562 /* 2563 * Re-enable HWP and clear the "suspended" flag to let "resume" 2564 * know that it need not do that. 2565 */ 2566 intel_pstate_hwp_reenable(cpu); 2567 cpu->suspended = false; 2568 } 2569 2570 return 0; 2571 } 2572 2573 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 2574 { 2575 pr_debug("CPU %d stopping\n", policy->cpu); 2576 2577 intel_pstate_clear_update_util_hook(policy->cpu); 2578 } 2579 2580 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 2581 { 2582 pr_debug("CPU %d exiting\n", policy->cpu); 2583 2584 policy->fast_switch_possible = false; 2585 2586 return 0; 2587 } 2588 2589 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 2590 { 2591 struct cpudata *cpu; 2592 int rc; 2593 2594 rc = intel_pstate_init_cpu(policy->cpu); 2595 if (rc) 2596 return rc; 2597 2598 cpu = all_cpu_data[policy->cpu]; 2599 2600 cpu->max_perf_ratio = 0xFF; 2601 cpu->min_perf_ratio = 0; 2602 2603 /* cpuinfo and default policy values */ 2604 policy->cpuinfo.min_freq = cpu->pstate.min_freq; 2605 update_turbo_state(); 2606 global.turbo_disabled_mf = global.turbo_disabled; 2607 policy->cpuinfo.max_freq = global.turbo_disabled ? 2608 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2609 2610 policy->min = policy->cpuinfo.min_freq; 2611 policy->max = policy->cpuinfo.max_freq; 2612 2613 intel_pstate_init_acpi_perf_limits(policy); 2614 2615 policy->fast_switch_possible = true; 2616 2617 return 0; 2618 } 2619 2620 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 2621 { 2622 int ret = __intel_pstate_cpu_init(policy); 2623 2624 if (ret) 2625 return ret; 2626 2627 /* 2628 * Set the policy to powersave to provide a valid fallback value in case 2629 * the default cpufreq governor is neither powersave nor performance. 2630 */ 2631 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2632 2633 if (hwp_active) { 2634 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2635 2636 cpu->epp_cached = intel_pstate_get_epp(cpu, 0); 2637 } 2638 2639 return 0; 2640 } 2641 2642 static struct cpufreq_driver intel_pstate = { 2643 .flags = CPUFREQ_CONST_LOOPS, 2644 .verify = intel_pstate_verify_policy, 2645 .setpolicy = intel_pstate_set_policy, 2646 .suspend = intel_pstate_suspend, 2647 .resume = intel_pstate_resume, 2648 .init = intel_pstate_cpu_init, 2649 .exit = intel_pstate_cpu_exit, 2650 .stop_cpu = intel_pstate_stop_cpu, 2651 .offline = intel_pstate_cpu_offline, 2652 .online = intel_pstate_cpu_online, 2653 .update_limits = intel_pstate_update_limits, 2654 .name = "intel_pstate", 2655 }; 2656 2657 static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) 2658 { 2659 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2660 2661 intel_pstate_verify_cpu_policy(cpu, policy); 2662 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); 2663 2664 return 0; 2665 } 2666 2667 /* Use of trace in passive mode: 2668 * 2669 * In passive mode the trace core_busy field (also known as the 2670 * performance field, and lablelled as such on the graphs; also known as 2671 * core_avg_perf) is not needed and so is re-assigned to indicate if the 2672 * driver call was via the normal or fast switch path. Various graphs 2673 * output from the intel_pstate_tracer.py utility that include core_busy 2674 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%, 2675 * so we use 10 to indicate the normal path through the driver, and 2676 * 90 to indicate the fast switch path through the driver. 2677 * The scaled_busy field is not used, and is set to 0. 2678 */ 2679 2680 #define INTEL_PSTATE_TRACE_TARGET 10 2681 #define INTEL_PSTATE_TRACE_FAST_SWITCH 90 2682 2683 static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate) 2684 { 2685 struct sample *sample; 2686 2687 if (!trace_pstate_sample_enabled()) 2688 return; 2689 2690 if (!intel_pstate_sample(cpu, ktime_get())) 2691 return; 2692 2693 sample = &cpu->sample; 2694 trace_pstate_sample(trace_type, 2695 0, 2696 old_pstate, 2697 cpu->pstate.current_pstate, 2698 sample->mperf, 2699 sample->aperf, 2700 sample->tsc, 2701 get_avg_frequency(cpu), 2702 fp_toint(cpu->iowait_boost * 100)); 2703 } 2704 2705 static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, 2706 u32 desired, bool fast_switch) 2707 { 2708 u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; 2709 2710 value &= ~HWP_MIN_PERF(~0L); 2711 value |= HWP_MIN_PERF(min); 2712 2713 value &= ~HWP_MAX_PERF(~0L); 2714 value |= HWP_MAX_PERF(max); 2715 2716 value &= ~HWP_DESIRED_PERF(~0L); 2717 value |= HWP_DESIRED_PERF(desired); 2718 2719 if (value == prev) 2720 return; 2721 2722 WRITE_ONCE(cpu->hwp_req_cached, value); 2723 if (fast_switch) 2724 wrmsrl(MSR_HWP_REQUEST, value); 2725 else 2726 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); 2727 } 2728 2729 static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, 2730 u32 target_pstate, bool fast_switch) 2731 { 2732 if (fast_switch) 2733 wrmsrl(MSR_IA32_PERF_CTL, 2734 pstate_funcs.get_val(cpu, target_pstate)); 2735 else 2736 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 2737 pstate_funcs.get_val(cpu, target_pstate)); 2738 } 2739 2740 static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, 2741 int target_pstate, bool fast_switch) 2742 { 2743 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2744 int old_pstate = cpu->pstate.current_pstate; 2745 2746 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2747 if (hwp_active) { 2748 int max_pstate = policy->strict_target ? 2749 target_pstate : cpu->max_perf_ratio; 2750 2751 intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0, 2752 fast_switch); 2753 } else if (target_pstate != old_pstate) { 2754 intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch); 2755 } 2756 2757 cpu->pstate.current_pstate = target_pstate; 2758 2759 intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : 2760 INTEL_PSTATE_TRACE_TARGET, old_pstate); 2761 2762 return target_pstate; 2763 } 2764 2765 static int intel_cpufreq_target(struct cpufreq_policy *policy, 2766 unsigned int target_freq, 2767 unsigned int relation) 2768 { 2769 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2770 struct cpufreq_freqs freqs; 2771 int target_pstate; 2772 2773 update_turbo_state(); 2774 2775 freqs.old = policy->cur; 2776 freqs.new = target_freq; 2777 2778 cpufreq_freq_transition_begin(policy, &freqs); 2779 2780 switch (relation) { 2781 case CPUFREQ_RELATION_L: 2782 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 2783 break; 2784 case CPUFREQ_RELATION_H: 2785 target_pstate = freqs.new / cpu->pstate.scaling; 2786 break; 2787 default: 2788 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 2789 break; 2790 } 2791 2792 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false); 2793 2794 freqs.new = target_pstate * cpu->pstate.scaling; 2795 2796 cpufreq_freq_transition_end(policy, &freqs, false); 2797 2798 return 0; 2799 } 2800 2801 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 2802 unsigned int target_freq) 2803 { 2804 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2805 int target_pstate; 2806 2807 update_turbo_state(); 2808 2809 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2810 2811 target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); 2812 2813 return target_pstate * cpu->pstate.scaling; 2814 } 2815 2816 static void intel_cpufreq_adjust_perf(unsigned int cpunum, 2817 unsigned long min_perf, 2818 unsigned long target_perf, 2819 unsigned long capacity) 2820 { 2821 struct cpudata *cpu = all_cpu_data[cpunum]; 2822 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); 2823 int old_pstate = cpu->pstate.current_pstate; 2824 int cap_pstate, min_pstate, max_pstate, target_pstate; 2825 2826 update_turbo_state(); 2827 cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : 2828 HWP_HIGHEST_PERF(hwp_cap); 2829 2830 /* Optimization: Avoid unnecessary divisions. */ 2831 2832 target_pstate = cap_pstate; 2833 if (target_perf < capacity) 2834 target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity); 2835 2836 min_pstate = cap_pstate; 2837 if (min_perf < capacity) 2838 min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity); 2839 2840 if (min_pstate < cpu->pstate.min_pstate) 2841 min_pstate = cpu->pstate.min_pstate; 2842 2843 if (min_pstate < cpu->min_perf_ratio) 2844 min_pstate = cpu->min_perf_ratio; 2845 2846 max_pstate = min(cap_pstate, cpu->max_perf_ratio); 2847 if (max_pstate < min_pstate) 2848 max_pstate = min_pstate; 2849 2850 target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate); 2851 2852 intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true); 2853 2854 cpu->pstate.current_pstate = target_pstate; 2855 intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); 2856 } 2857 2858 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2859 { 2860 struct freq_qos_request *req; 2861 struct cpudata *cpu; 2862 struct device *dev; 2863 int ret, freq; 2864 2865 dev = get_cpu_device(policy->cpu); 2866 if (!dev) 2867 return -ENODEV; 2868 2869 ret = __intel_pstate_cpu_init(policy); 2870 if (ret) 2871 return ret; 2872 2873 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 2874 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2875 policy->cur = policy->cpuinfo.min_freq; 2876 2877 req = kcalloc(2, sizeof(*req), GFP_KERNEL); 2878 if (!req) { 2879 ret = -ENOMEM; 2880 goto pstate_exit; 2881 } 2882 2883 cpu = all_cpu_data[policy->cpu]; 2884 2885 if (hwp_active) { 2886 u64 value; 2887 2888 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; 2889 2890 intel_pstate_get_hwp_cap(cpu); 2891 2892 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); 2893 WRITE_ONCE(cpu->hwp_req_cached, value); 2894 2895 cpu->epp_cached = intel_pstate_get_epp(cpu, value); 2896 } else { 2897 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; 2898 } 2899 2900 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100); 2901 2902 ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, 2903 freq); 2904 if (ret < 0) { 2905 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); 2906 goto free_req; 2907 } 2908 2909 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100); 2910 2911 ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, 2912 freq); 2913 if (ret < 0) { 2914 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); 2915 goto remove_min_req; 2916 } 2917 2918 policy->driver_data = req; 2919 2920 return 0; 2921 2922 remove_min_req: 2923 freq_qos_remove_request(req); 2924 free_req: 2925 kfree(req); 2926 pstate_exit: 2927 intel_pstate_exit_perf_limits(policy); 2928 2929 return ret; 2930 } 2931 2932 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) 2933 { 2934 struct freq_qos_request *req; 2935 2936 req = policy->driver_data; 2937 2938 freq_qos_remove_request(req + 1); 2939 freq_qos_remove_request(req); 2940 kfree(req); 2941 2942 return intel_pstate_cpu_exit(policy); 2943 } 2944 2945 static struct cpufreq_driver intel_cpufreq = { 2946 .flags = CPUFREQ_CONST_LOOPS, 2947 .verify = intel_cpufreq_verify_policy, 2948 .target = intel_cpufreq_target, 2949 .fast_switch = intel_cpufreq_fast_switch, 2950 .init = intel_cpufreq_cpu_init, 2951 .exit = intel_cpufreq_cpu_exit, 2952 .offline = intel_pstate_cpu_offline, 2953 .online = intel_pstate_cpu_online, 2954 .suspend = intel_pstate_suspend, 2955 .resume = intel_pstate_resume, 2956 .update_limits = intel_pstate_update_limits, 2957 .name = "intel_cpufreq", 2958 }; 2959 2960 static struct cpufreq_driver *default_driver; 2961 2962 static void intel_pstate_driver_cleanup(void) 2963 { 2964 unsigned int cpu; 2965 2966 get_online_cpus(); 2967 for_each_online_cpu(cpu) { 2968 if (all_cpu_data[cpu]) { 2969 if (intel_pstate_driver == &intel_pstate) 2970 intel_pstate_clear_update_util_hook(cpu); 2971 2972 kfree(all_cpu_data[cpu]); 2973 all_cpu_data[cpu] = NULL; 2974 } 2975 } 2976 put_online_cpus(); 2977 2978 intel_pstate_driver = NULL; 2979 } 2980 2981 static int intel_pstate_register_driver(struct cpufreq_driver *driver) 2982 { 2983 int ret; 2984 2985 if (driver == &intel_pstate) 2986 intel_pstate_sysfs_expose_hwp_dynamic_boost(); 2987 2988 memset(&global, 0, sizeof(global)); 2989 global.max_perf_pct = 100; 2990 2991 intel_pstate_driver = driver; 2992 ret = cpufreq_register_driver(intel_pstate_driver); 2993 if (ret) { 2994 intel_pstate_driver_cleanup(); 2995 return ret; 2996 } 2997 2998 global.min_perf_pct = min_perf_pct_min(); 2999 3000 return 0; 3001 } 3002 3003 static ssize_t intel_pstate_show_status(char *buf) 3004 { 3005 if (!intel_pstate_driver) 3006 return sprintf(buf, "off\n"); 3007 3008 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 3009 "active" : "passive"); 3010 } 3011 3012 static int intel_pstate_update_status(const char *buf, size_t size) 3013 { 3014 if (size == 3 && !strncmp(buf, "off", size)) { 3015 if (!intel_pstate_driver) 3016 return -EINVAL; 3017 3018 if (hwp_active) 3019 return -EBUSY; 3020 3021 cpufreq_unregister_driver(intel_pstate_driver); 3022 intel_pstate_driver_cleanup(); 3023 return 0; 3024 } 3025 3026 if (size == 6 && !strncmp(buf, "active", size)) { 3027 if (intel_pstate_driver) { 3028 if (intel_pstate_driver == &intel_pstate) 3029 return 0; 3030 3031 cpufreq_unregister_driver(intel_pstate_driver); 3032 } 3033 3034 return intel_pstate_register_driver(&intel_pstate); 3035 } 3036 3037 if (size == 7 && !strncmp(buf, "passive", size)) { 3038 if (intel_pstate_driver) { 3039 if (intel_pstate_driver == &intel_cpufreq) 3040 return 0; 3041 3042 cpufreq_unregister_driver(intel_pstate_driver); 3043 intel_pstate_sysfs_hide_hwp_dynamic_boost(); 3044 } 3045 3046 return intel_pstate_register_driver(&intel_cpufreq); 3047 } 3048 3049 return -EINVAL; 3050 } 3051 3052 static int no_load __initdata; 3053 static int no_hwp __initdata; 3054 static int hwp_only __initdata; 3055 static unsigned int force_load __initdata; 3056 3057 static int __init intel_pstate_msrs_not_valid(void) 3058 { 3059 if (!pstate_funcs.get_max() || 3060 !pstate_funcs.get_min() || 3061 !pstate_funcs.get_turbo()) 3062 return -ENODEV; 3063 3064 return 0; 3065 } 3066 3067 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 3068 { 3069 pstate_funcs.get_max = funcs->get_max; 3070 pstate_funcs.get_max_physical = funcs->get_max_physical; 3071 pstate_funcs.get_min = funcs->get_min; 3072 pstate_funcs.get_turbo = funcs->get_turbo; 3073 pstate_funcs.get_scaling = funcs->get_scaling; 3074 pstate_funcs.get_val = funcs->get_val; 3075 pstate_funcs.get_vid = funcs->get_vid; 3076 pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; 3077 } 3078 3079 #ifdef CONFIG_ACPI 3080 3081 static bool __init intel_pstate_no_acpi_pss(void) 3082 { 3083 int i; 3084 3085 for_each_possible_cpu(i) { 3086 acpi_status status; 3087 union acpi_object *pss; 3088 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 3089 struct acpi_processor *pr = per_cpu(processors, i); 3090 3091 if (!pr) 3092 continue; 3093 3094 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 3095 if (ACPI_FAILURE(status)) 3096 continue; 3097 3098 pss = buffer.pointer; 3099 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 3100 kfree(pss); 3101 return false; 3102 } 3103 3104 kfree(pss); 3105 } 3106 3107 pr_debug("ACPI _PSS not found\n"); 3108 return true; 3109 } 3110 3111 static bool __init intel_pstate_no_acpi_pcch(void) 3112 { 3113 acpi_status status; 3114 acpi_handle handle; 3115 3116 status = acpi_get_handle(NULL, "\\_SB", &handle); 3117 if (ACPI_FAILURE(status)) 3118 goto not_found; 3119 3120 if (acpi_has_method(handle, "PCCH")) 3121 return false; 3122 3123 not_found: 3124 pr_debug("ACPI PCCH not found\n"); 3125 return true; 3126 } 3127 3128 static bool __init intel_pstate_has_acpi_ppc(void) 3129 { 3130 int i; 3131 3132 for_each_possible_cpu(i) { 3133 struct acpi_processor *pr = per_cpu(processors, i); 3134 3135 if (!pr) 3136 continue; 3137 if (acpi_has_method(pr->handle, "_PPC")) 3138 return true; 3139 } 3140 pr_debug("ACPI _PPC not found\n"); 3141 return false; 3142 } 3143 3144 enum { 3145 PSS, 3146 PPC, 3147 }; 3148 3149 /* Hardware vendor-specific info that has its own power management modes */ 3150 static struct acpi_platform_list plat_info[] __initdata = { 3151 {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS}, 3152 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3153 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3154 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3155 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3156 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3157 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3158 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3159 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3160 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3161 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3162 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3163 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3164 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3165 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, 3166 { } /* End */ 3167 }; 3168 3169 #define BITMASK_OOB (BIT(8) | BIT(18)) 3170 3171 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 3172 { 3173 const struct x86_cpu_id *id; 3174 u64 misc_pwr; 3175 int idx; 3176 3177 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 3178 if (id) { 3179 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 3180 if (misc_pwr & BITMASK_OOB) { 3181 pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); 3182 pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); 3183 return true; 3184 } 3185 } 3186 3187 idx = acpi_match_platform_list(plat_info); 3188 if (idx < 0) 3189 return false; 3190 3191 switch (plat_info[idx].data) { 3192 case PSS: 3193 if (!intel_pstate_no_acpi_pss()) 3194 return false; 3195 3196 return intel_pstate_no_acpi_pcch(); 3197 case PPC: 3198 return intel_pstate_has_acpi_ppc() && !force_load; 3199 } 3200 3201 return false; 3202 } 3203 3204 static void intel_pstate_request_control_from_smm(void) 3205 { 3206 /* 3207 * It may be unsafe to request P-states control from SMM if _PPC support 3208 * has not been enabled. 3209 */ 3210 if (acpi_ppc) 3211 acpi_processor_pstate_control(); 3212 } 3213 #else /* CONFIG_ACPI not enabled */ 3214 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 3215 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 3216 static inline void intel_pstate_request_control_from_smm(void) {} 3217 #endif /* CONFIG_ACPI */ 3218 3219 #define INTEL_PSTATE_HWP_BROADWELL 0x01 3220 3221 #define X86_MATCH_HWP(model, hwp_mode) \ 3222 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ 3223 X86_FEATURE_HWP, hwp_mode) 3224 3225 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 3226 X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), 3227 X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), 3228 X86_MATCH_HWP(ANY, 0), 3229 {} 3230 }; 3231 3232 static bool intel_pstate_hwp_is_enabled(void) 3233 { 3234 u64 value; 3235 3236 rdmsrl(MSR_PM_ENABLE, value); 3237 return !!(value & 0x1); 3238 } 3239 3240 static int __init intel_pstate_init(void) 3241 { 3242 const struct x86_cpu_id *id; 3243 int rc; 3244 3245 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 3246 return -ENODEV; 3247 3248 if (no_load) 3249 return -ENODEV; 3250 3251 id = x86_match_cpu(hwp_support_ids); 3252 if (id) { 3253 copy_cpu_funcs(&core_funcs); 3254 /* 3255 * Avoid enabling HWP for processors without EPP support, 3256 * because that means incomplete HWP implementation which is a 3257 * corner case and supporting it is generally problematic. 3258 * 3259 * If HWP is enabled already, though, there is no choice but to 3260 * deal with it. 3261 */ 3262 if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || 3263 intel_pstate_hwp_is_enabled()) { 3264 hwp_active++; 3265 hwp_mode_bdw = id->driver_data; 3266 intel_pstate.attr = hwp_cpufreq_attrs; 3267 intel_cpufreq.attr = hwp_cpufreq_attrs; 3268 intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS; 3269 intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf; 3270 if (!default_driver) 3271 default_driver = &intel_pstate; 3272 3273 goto hwp_cpu_matched; 3274 } 3275 } else { 3276 id = x86_match_cpu(intel_pstate_cpu_ids); 3277 if (!id) { 3278 pr_info("CPU model not supported\n"); 3279 return -ENODEV; 3280 } 3281 3282 copy_cpu_funcs((struct pstate_funcs *)id->driver_data); 3283 } 3284 3285 if (intel_pstate_msrs_not_valid()) { 3286 pr_info("Invalid MSRs\n"); 3287 return -ENODEV; 3288 } 3289 /* Without HWP start in the passive mode. */ 3290 if (!default_driver) 3291 default_driver = &intel_cpufreq; 3292 3293 hwp_cpu_matched: 3294 /* 3295 * The Intel pstate driver will be ignored if the platform 3296 * firmware has its own power management modes. 3297 */ 3298 if (intel_pstate_platform_pwr_mgmt_exists()) { 3299 pr_info("P-states controlled by the platform\n"); 3300 return -ENODEV; 3301 } 3302 3303 if (!hwp_active && hwp_only) 3304 return -ENOTSUPP; 3305 3306 pr_info("Intel P-state driver initializing\n"); 3307 3308 all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); 3309 if (!all_cpu_data) 3310 return -ENOMEM; 3311 3312 intel_pstate_request_control_from_smm(); 3313 3314 intel_pstate_sysfs_expose_params(); 3315 3316 mutex_lock(&intel_pstate_driver_lock); 3317 rc = intel_pstate_register_driver(default_driver); 3318 mutex_unlock(&intel_pstate_driver_lock); 3319 if (rc) { 3320 intel_pstate_sysfs_remove(); 3321 return rc; 3322 } 3323 3324 if (hwp_active) { 3325 const struct x86_cpu_id *id; 3326 3327 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); 3328 if (id) { 3329 set_power_ctl_ee_state(false); 3330 pr_info("Disabling energy efficiency optimization\n"); 3331 } 3332 3333 pr_info("HWP enabled\n"); 3334 } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { 3335 pr_warn("Problematic setup: Hybrid processor with disabled HWP\n"); 3336 } 3337 3338 return 0; 3339 } 3340 device_initcall(intel_pstate_init); 3341 3342 static int __init intel_pstate_setup(char *str) 3343 { 3344 if (!str) 3345 return -EINVAL; 3346 3347 if (!strcmp(str, "disable")) 3348 no_load = 1; 3349 else if (!strcmp(str, "active")) 3350 default_driver = &intel_pstate; 3351 else if (!strcmp(str, "passive")) 3352 default_driver = &intel_cpufreq; 3353 3354 if (!strcmp(str, "no_hwp")) { 3355 pr_info("HWP disabled\n"); 3356 no_hwp = 1; 3357 } 3358 if (!strcmp(str, "force")) 3359 force_load = 1; 3360 if (!strcmp(str, "hwp_only")) 3361 hwp_only = 1; 3362 if (!strcmp(str, "per_cpu_perf_limits")) 3363 per_cpu_limits = true; 3364 3365 #ifdef CONFIG_ACPI 3366 if (!strcmp(str, "support_acpi_ppc")) 3367 acpi_ppc = true; 3368 #endif 3369 3370 return 0; 3371 } 3372 early_param("intel_pstate", intel_pstate_setup); 3373 3374 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 3375 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 3376 MODULE_LICENSE("GPL"); 3377