1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched/cpufreq.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 #include <asm/intel-family.h> 39 40 #define INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) 41 #define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC) 42 43 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 44 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 45 46 #ifdef CONFIG_ACPI 47 #include <acpi/processor.h> 48 #include <acpi/cppc_acpi.h> 49 #endif 50 51 #define FRAC_BITS 8 52 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 53 #define fp_toint(X) ((X) >> FRAC_BITS) 54 55 #define EXT_BITS 6 56 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 57 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 58 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 59 60 static inline int32_t mul_fp(int32_t x, int32_t y) 61 { 62 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 63 } 64 65 static inline int32_t div_fp(s64 x, s64 y) 66 { 67 return div64_s64((int64_t)x << FRAC_BITS, y); 68 } 69 70 static inline int ceiling_fp(int32_t x) 71 { 72 int mask, ret; 73 74 ret = fp_toint(x); 75 mask = (1 << FRAC_BITS) - 1; 76 if (x & mask) 77 ret += 1; 78 return ret; 79 } 80 81 static inline int32_t percent_fp(int percent) 82 { 83 return div_fp(percent, 100); 84 } 85 86 static inline u64 mul_ext_fp(u64 x, u64 y) 87 { 88 return (x * y) >> EXT_FRAC_BITS; 89 } 90 91 static inline u64 div_ext_fp(u64 x, u64 y) 92 { 93 return div64_u64(x << EXT_FRAC_BITS, y); 94 } 95 96 static inline int32_t percent_ext_fp(int percent) 97 { 98 return div_ext_fp(percent, 100); 99 } 100 101 /** 102 * struct sample - Store performance sample 103 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 104 * performance during last sample period 105 * @busy_scaled: Scaled busy value which is used to calculate next 106 * P state. This can be different than core_avg_perf 107 * to account for cpu idle period 108 * @aperf: Difference of actual performance frequency clock count 109 * read from APERF MSR between last and current sample 110 * @mperf: Difference of maximum performance frequency clock count 111 * read from MPERF MSR between last and current sample 112 * @tsc: Difference of time stamp counter between last and 113 * current sample 114 * @time: Current time from scheduler 115 * 116 * This structure is used in the cpudata structure to store performance sample 117 * data for choosing next P State. 118 */ 119 struct sample { 120 int32_t core_avg_perf; 121 int32_t busy_scaled; 122 u64 aperf; 123 u64 mperf; 124 u64 tsc; 125 u64 time; 126 }; 127 128 /** 129 * struct pstate_data - Store P state data 130 * @current_pstate: Current requested P state 131 * @min_pstate: Min P state possible for this platform 132 * @max_pstate: Max P state possible for this platform 133 * @max_pstate_physical:This is physical Max P state for a processor 134 * This can be higher than the max_pstate which can 135 * be limited by platform thermal design power limits 136 * @scaling: Scaling factor to convert frequency to cpufreq 137 * frequency units 138 * @turbo_pstate: Max Turbo P state possible for this platform 139 * @max_freq: @max_pstate frequency in cpufreq units 140 * @turbo_freq: @turbo_pstate frequency in cpufreq units 141 * 142 * Stores the per cpu model P state limits and current P state. 143 */ 144 struct pstate_data { 145 int current_pstate; 146 int min_pstate; 147 int max_pstate; 148 int max_pstate_physical; 149 int scaling; 150 int turbo_pstate; 151 unsigned int max_freq; 152 unsigned int turbo_freq; 153 }; 154 155 /** 156 * struct vid_data - Stores voltage information data 157 * @min: VID data for this platform corresponding to 158 * the lowest P state 159 * @max: VID data corresponding to the highest P State. 160 * @turbo: VID data for turbo P state 161 * @ratio: Ratio of (vid max - vid min) / 162 * (max P state - Min P State) 163 * 164 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 165 * This data is used in Atom platforms, where in addition to target P state, 166 * the voltage data needs to be specified to select next P State. 167 */ 168 struct vid_data { 169 int min; 170 int max; 171 int turbo; 172 int32_t ratio; 173 }; 174 175 /** 176 * struct _pid - Stores PID data 177 * @setpoint: Target set point for busyness or performance 178 * @integral: Storage for accumulated error values 179 * @p_gain: PID proportional gain 180 * @i_gain: PID integral gain 181 * @d_gain: PID derivative gain 182 * @deadband: PID deadband 183 * @last_err: Last error storage for integral part of PID calculation 184 * 185 * Stores PID coefficients and last error for PID controller. 186 */ 187 struct _pid { 188 int setpoint; 189 int32_t integral; 190 int32_t p_gain; 191 int32_t i_gain; 192 int32_t d_gain; 193 int deadband; 194 int32_t last_err; 195 }; 196 197 /** 198 * struct global_params - Global parameters, mostly tunable via sysfs. 199 * @no_turbo: Whether or not to use turbo P-states. 200 * @turbo_disabled: Whethet or not turbo P-states are available at all, 201 * based on the MSR_IA32_MISC_ENABLE value and whether or 202 * not the maximum reported turbo P-state is different from 203 * the maximum reported non-turbo one. 204 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo 205 * P-state capacity. 206 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo 207 * P-state capacity. 208 */ 209 struct global_params { 210 bool no_turbo; 211 bool turbo_disabled; 212 int max_perf_pct; 213 int min_perf_pct; 214 }; 215 216 /** 217 * struct cpudata - Per CPU instance data storage 218 * @cpu: CPU number for this instance data 219 * @policy: CPUFreq policy value 220 * @update_util: CPUFreq utility callback information 221 * @update_util_set: CPUFreq utility callback is set 222 * @iowait_boost: iowait-related boost fraction 223 * @last_update: Time of the last update. 224 * @pstate: Stores P state limits for this CPU 225 * @vid: Stores VID limits for this CPU 226 * @pid: Stores PID parameters for this CPU 227 * @last_sample_time: Last Sample time 228 * @prev_aperf: Last APERF value read from APERF MSR 229 * @prev_mperf: Last MPERF value read from MPERF MSR 230 * @prev_tsc: Last timestamp counter (TSC) value 231 * @prev_cummulative_iowait: IO Wait time difference from last and 232 * current sample 233 * @sample: Storage for storing last Sample data 234 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios 235 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios 236 * @acpi_perf_data: Stores ACPI perf information read from _PSS 237 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 238 * @epp_powersave: Last saved HWP energy performance preference 239 * (EPP) or energy performance bias (EPB), 240 * when policy switched to performance 241 * @epp_policy: Last saved policy used to set EPP/EPB 242 * @epp_default: Power on default HWP energy performance 243 * preference/bias 244 * @epp_saved: Saved EPP/EPB during system suspend or CPU offline 245 * operation 246 * 247 * This structure stores per CPU instance data for all CPUs. 248 */ 249 struct cpudata { 250 int cpu; 251 252 unsigned int policy; 253 struct update_util_data update_util; 254 bool update_util_set; 255 256 struct pstate_data pstate; 257 struct vid_data vid; 258 struct _pid pid; 259 260 u64 last_update; 261 u64 last_sample_time; 262 u64 prev_aperf; 263 u64 prev_mperf; 264 u64 prev_tsc; 265 u64 prev_cummulative_iowait; 266 struct sample sample; 267 int32_t min_perf_ratio; 268 int32_t max_perf_ratio; 269 #ifdef CONFIG_ACPI 270 struct acpi_processor_performance acpi_perf_data; 271 bool valid_pss_table; 272 #endif 273 unsigned int iowait_boost; 274 s16 epp_powersave; 275 s16 epp_policy; 276 s16 epp_default; 277 s16 epp_saved; 278 }; 279 280 static struct cpudata **all_cpu_data; 281 282 /** 283 * struct pstate_adjust_policy - Stores static PID configuration data 284 * @sample_rate_ms: PID calculation sample rate in ms 285 * @sample_rate_ns: Sample rate calculation in ns 286 * @deadband: PID deadband 287 * @setpoint: PID Setpoint 288 * @p_gain_pct: PID proportional gain 289 * @i_gain_pct: PID integral gain 290 * @d_gain_pct: PID derivative gain 291 * 292 * Stores per CPU model static PID configuration data. 293 */ 294 struct pstate_adjust_policy { 295 int sample_rate_ms; 296 s64 sample_rate_ns; 297 int deadband; 298 int setpoint; 299 int p_gain_pct; 300 int d_gain_pct; 301 int i_gain_pct; 302 }; 303 304 /** 305 * struct pstate_funcs - Per CPU model specific callbacks 306 * @get_max: Callback to get maximum non turbo effective P state 307 * @get_max_physical: Callback to get maximum non turbo physical P state 308 * @get_min: Callback to get minimum P state 309 * @get_turbo: Callback to get turbo P state 310 * @get_scaling: Callback to get frequency scaling factor 311 * @get_val: Callback to convert P state to actual MSR write value 312 * @get_vid: Callback to get VID data for Atom platforms 313 * @update_util: Active mode utilization update callback. 314 * 315 * Core and Atom CPU models have different way to get P State limits. This 316 * structure is used to store those callbacks. 317 */ 318 struct pstate_funcs { 319 int (*get_max)(void); 320 int (*get_max_physical)(void); 321 int (*get_min)(void); 322 int (*get_turbo)(void); 323 int (*get_scaling)(void); 324 u64 (*get_val)(struct cpudata*, int pstate); 325 void (*get_vid)(struct cpudata *); 326 void (*update_util)(struct update_util_data *data, u64 time, 327 unsigned int flags); 328 }; 329 330 static struct pstate_funcs pstate_funcs __read_mostly; 331 static struct pstate_adjust_policy pid_params __read_mostly = { 332 .sample_rate_ms = 10, 333 .sample_rate_ns = 10 * NSEC_PER_MSEC, 334 .deadband = 0, 335 .setpoint = 97, 336 .p_gain_pct = 20, 337 .d_gain_pct = 0, 338 .i_gain_pct = 0, 339 }; 340 341 static int hwp_active __read_mostly; 342 static bool per_cpu_limits __read_mostly; 343 344 static struct cpufreq_driver *intel_pstate_driver __read_mostly; 345 346 #ifdef CONFIG_ACPI 347 static bool acpi_ppc; 348 #endif 349 350 static struct global_params global; 351 352 static DEFINE_MUTEX(intel_pstate_driver_lock); 353 static DEFINE_MUTEX(intel_pstate_limits_lock); 354 355 #ifdef CONFIG_ACPI 356 357 static bool intel_pstate_get_ppc_enable_status(void) 358 { 359 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 360 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 361 return true; 362 363 return acpi_ppc; 364 } 365 366 #ifdef CONFIG_ACPI_CPPC_LIB 367 368 /* The work item is needed to avoid CPU hotplug locking issues */ 369 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) 370 { 371 sched_set_itmt_support(); 372 } 373 374 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); 375 376 static void intel_pstate_set_itmt_prio(int cpu) 377 { 378 struct cppc_perf_caps cppc_perf; 379 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; 380 int ret; 381 382 ret = cppc_get_perf_caps(cpu, &cppc_perf); 383 if (ret) 384 return; 385 386 /* 387 * The priorities can be set regardless of whether or not 388 * sched_set_itmt_support(true) has been called and it is valid to 389 * update them at any time after it has been called. 390 */ 391 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); 392 393 if (max_highest_perf <= min_highest_perf) { 394 if (cppc_perf.highest_perf > max_highest_perf) 395 max_highest_perf = cppc_perf.highest_perf; 396 397 if (cppc_perf.highest_perf < min_highest_perf) 398 min_highest_perf = cppc_perf.highest_perf; 399 400 if (max_highest_perf > min_highest_perf) { 401 /* 402 * This code can be run during CPU online under the 403 * CPU hotplug locks, so sched_set_itmt_support() 404 * cannot be called from here. Queue up a work item 405 * to invoke it. 406 */ 407 schedule_work(&sched_itmt_work); 408 } 409 } 410 } 411 #else 412 static void intel_pstate_set_itmt_prio(int cpu) 413 { 414 } 415 #endif 416 417 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 418 { 419 struct cpudata *cpu; 420 int ret; 421 int i; 422 423 if (hwp_active) { 424 intel_pstate_set_itmt_prio(policy->cpu); 425 return; 426 } 427 428 if (!intel_pstate_get_ppc_enable_status()) 429 return; 430 431 cpu = all_cpu_data[policy->cpu]; 432 433 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 434 policy->cpu); 435 if (ret) 436 return; 437 438 /* 439 * Check if the control value in _PSS is for PERF_CTL MSR, which should 440 * guarantee that the states returned by it map to the states in our 441 * list directly. 442 */ 443 if (cpu->acpi_perf_data.control_register.space_id != 444 ACPI_ADR_SPACE_FIXED_HARDWARE) 445 goto err; 446 447 /* 448 * If there is only one entry _PSS, simply ignore _PSS and continue as 449 * usual without taking _PSS into account 450 */ 451 if (cpu->acpi_perf_data.state_count < 2) 452 goto err; 453 454 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 455 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 456 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 457 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 458 (u32) cpu->acpi_perf_data.states[i].core_frequency, 459 (u32) cpu->acpi_perf_data.states[i].power, 460 (u32) cpu->acpi_perf_data.states[i].control); 461 } 462 463 /* 464 * The _PSS table doesn't contain whole turbo frequency range. 465 * This just contains +1 MHZ above the max non turbo frequency, 466 * with control value corresponding to max turbo ratio. But 467 * when cpufreq set policy is called, it will call with this 468 * max frequency, which will cause a reduced performance as 469 * this driver uses real max turbo frequency as the max 470 * frequency. So correct this frequency in _PSS table to 471 * correct max turbo frequency based on the turbo state. 472 * Also need to convert to MHz as _PSS freq is in MHz. 473 */ 474 if (!global.turbo_disabled) 475 cpu->acpi_perf_data.states[0].core_frequency = 476 policy->cpuinfo.max_freq / 1000; 477 cpu->valid_pss_table = true; 478 pr_debug("_PPC limits will be enforced\n"); 479 480 return; 481 482 err: 483 cpu->valid_pss_table = false; 484 acpi_processor_unregister_performance(policy->cpu); 485 } 486 487 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 488 { 489 struct cpudata *cpu; 490 491 cpu = all_cpu_data[policy->cpu]; 492 if (!cpu->valid_pss_table) 493 return; 494 495 acpi_processor_unregister_performance(policy->cpu); 496 } 497 #else 498 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 499 { 500 } 501 502 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 503 { 504 } 505 #endif 506 507 static signed int pid_calc(struct _pid *pid, int32_t busy) 508 { 509 signed int result; 510 int32_t pterm, dterm, fp_error; 511 int32_t integral_limit; 512 513 fp_error = pid->setpoint - busy; 514 515 if (abs(fp_error) <= pid->deadband) 516 return 0; 517 518 pterm = mul_fp(pid->p_gain, fp_error); 519 520 pid->integral += fp_error; 521 522 /* 523 * We limit the integral here so that it will never 524 * get higher than 30. This prevents it from becoming 525 * too large an input over long periods of time and allows 526 * it to get factored out sooner. 527 * 528 * The value of 30 was chosen through experimentation. 529 */ 530 integral_limit = int_tofp(30); 531 if (pid->integral > integral_limit) 532 pid->integral = integral_limit; 533 if (pid->integral < -integral_limit) 534 pid->integral = -integral_limit; 535 536 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 537 pid->last_err = fp_error; 538 539 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 540 result = result + (1 << (FRAC_BITS-1)); 541 return (signed int)fp_toint(result); 542 } 543 544 static inline void intel_pstate_pid_reset(struct cpudata *cpu) 545 { 546 struct _pid *pid = &cpu->pid; 547 548 pid->p_gain = percent_fp(pid_params.p_gain_pct); 549 pid->d_gain = percent_fp(pid_params.d_gain_pct); 550 pid->i_gain = percent_fp(pid_params.i_gain_pct); 551 pid->setpoint = int_tofp(pid_params.setpoint); 552 pid->last_err = pid->setpoint - int_tofp(100); 553 pid->deadband = int_tofp(pid_params.deadband); 554 pid->integral = 0; 555 } 556 557 static inline void update_turbo_state(void) 558 { 559 u64 misc_en; 560 struct cpudata *cpu; 561 562 cpu = all_cpu_data[0]; 563 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 564 global.turbo_disabled = 565 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 566 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 567 } 568 569 static int min_perf_pct_min(void) 570 { 571 struct cpudata *cpu = all_cpu_data[0]; 572 int turbo_pstate = cpu->pstate.turbo_pstate; 573 574 return turbo_pstate ? 575 DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0; 576 } 577 578 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 579 { 580 u64 epb; 581 int ret; 582 583 if (!static_cpu_has(X86_FEATURE_EPB)) 584 return -ENXIO; 585 586 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 587 if (ret) 588 return (s16)ret; 589 590 return (s16)(epb & 0x0f); 591 } 592 593 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 594 { 595 s16 epp; 596 597 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 598 /* 599 * When hwp_req_data is 0, means that caller didn't read 600 * MSR_HWP_REQUEST, so need to read and get EPP. 601 */ 602 if (!hwp_req_data) { 603 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, 604 &hwp_req_data); 605 if (epp) 606 return epp; 607 } 608 epp = (hwp_req_data >> 24) & 0xff; 609 } else { 610 /* When there is no EPP present, HWP uses EPB settings */ 611 epp = intel_pstate_get_epb(cpu_data); 612 } 613 614 return epp; 615 } 616 617 static int intel_pstate_set_epb(int cpu, s16 pref) 618 { 619 u64 epb; 620 int ret; 621 622 if (!static_cpu_has(X86_FEATURE_EPB)) 623 return -ENXIO; 624 625 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 626 if (ret) 627 return ret; 628 629 epb = (epb & ~0x0f) | pref; 630 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 631 632 return 0; 633 } 634 635 /* 636 * EPP/EPB display strings corresponding to EPP index in the 637 * energy_perf_strings[] 638 * index String 639 *------------------------------------- 640 * 0 default 641 * 1 performance 642 * 2 balance_performance 643 * 3 balance_power 644 * 4 power 645 */ 646 static const char * const energy_perf_strings[] = { 647 "default", 648 "performance", 649 "balance_performance", 650 "balance_power", 651 "power", 652 NULL 653 }; 654 655 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) 656 { 657 s16 epp; 658 int index = -EINVAL; 659 660 epp = intel_pstate_get_epp(cpu_data, 0); 661 if (epp < 0) 662 return epp; 663 664 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 665 /* 666 * Range: 667 * 0x00-0x3F : Performance 668 * 0x40-0x7F : Balance performance 669 * 0x80-0xBF : Balance power 670 * 0xC0-0xFF : Power 671 * The EPP is a 8 bit value, but our ranges restrict the 672 * value which can be set. Here only using top two bits 673 * effectively. 674 */ 675 index = (epp >> 6) + 1; 676 } else if (static_cpu_has(X86_FEATURE_EPB)) { 677 /* 678 * Range: 679 * 0x00-0x03 : Performance 680 * 0x04-0x07 : Balance performance 681 * 0x08-0x0B : Balance power 682 * 0x0C-0x0F : Power 683 * The EPB is a 4 bit value, but our ranges restrict the 684 * value which can be set. Here only using top two bits 685 * effectively. 686 */ 687 index = (epp >> 2) + 1; 688 } 689 690 return index; 691 } 692 693 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, 694 int pref_index) 695 { 696 int epp = -EINVAL; 697 int ret; 698 699 if (!pref_index) 700 epp = cpu_data->epp_default; 701 702 mutex_lock(&intel_pstate_limits_lock); 703 704 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 705 u64 value; 706 707 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); 708 if (ret) 709 goto return_pref; 710 711 value &= ~GENMASK_ULL(31, 24); 712 713 /* 714 * If epp is not default, convert from index into 715 * energy_perf_strings to epp value, by shifting 6 716 * bits left to use only top two bits in epp. 717 * The resultant epp need to shifted by 24 bits to 718 * epp position in MSR_HWP_REQUEST. 719 */ 720 if (epp == -EINVAL) 721 epp = (pref_index - 1) << 6; 722 723 value |= (u64)epp << 24; 724 ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); 725 } else { 726 if (epp == -EINVAL) 727 epp = (pref_index - 1) << 2; 728 ret = intel_pstate_set_epb(cpu_data->cpu, epp); 729 } 730 return_pref: 731 mutex_unlock(&intel_pstate_limits_lock); 732 733 return ret; 734 } 735 736 static ssize_t show_energy_performance_available_preferences( 737 struct cpufreq_policy *policy, char *buf) 738 { 739 int i = 0; 740 int ret = 0; 741 742 while (energy_perf_strings[i] != NULL) 743 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); 744 745 ret += sprintf(&buf[ret], "\n"); 746 747 return ret; 748 } 749 750 cpufreq_freq_attr_ro(energy_performance_available_preferences); 751 752 static ssize_t store_energy_performance_preference( 753 struct cpufreq_policy *policy, const char *buf, size_t count) 754 { 755 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 756 char str_preference[21]; 757 int ret, i = 0; 758 759 ret = sscanf(buf, "%20s", str_preference); 760 if (ret != 1) 761 return -EINVAL; 762 763 while (energy_perf_strings[i] != NULL) { 764 if (!strcmp(str_preference, energy_perf_strings[i])) { 765 intel_pstate_set_energy_pref_index(cpu_data, i); 766 return count; 767 } 768 ++i; 769 } 770 771 return -EINVAL; 772 } 773 774 static ssize_t show_energy_performance_preference( 775 struct cpufreq_policy *policy, char *buf) 776 { 777 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 778 int preference; 779 780 preference = intel_pstate_get_energy_pref_index(cpu_data); 781 if (preference < 0) 782 return preference; 783 784 return sprintf(buf, "%s\n", energy_perf_strings[preference]); 785 } 786 787 cpufreq_freq_attr_rw(energy_performance_preference); 788 789 static struct freq_attr *hwp_cpufreq_attrs[] = { 790 &energy_performance_preference, 791 &energy_performance_available_preferences, 792 NULL, 793 }; 794 795 static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, 796 int *current_max) 797 { 798 u64 cap; 799 800 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 801 if (global.no_turbo) 802 *current_max = HWP_GUARANTEED_PERF(cap); 803 else 804 *current_max = HWP_HIGHEST_PERF(cap); 805 806 *phy_max = HWP_HIGHEST_PERF(cap); 807 } 808 809 static void intel_pstate_hwp_set(unsigned int cpu) 810 { 811 struct cpudata *cpu_data = all_cpu_data[cpu]; 812 int max, min; 813 u64 value; 814 s16 epp; 815 816 max = cpu_data->max_perf_ratio; 817 min = cpu_data->min_perf_ratio; 818 819 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 820 min = max; 821 822 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 823 824 value &= ~HWP_MIN_PERF(~0L); 825 value |= HWP_MIN_PERF(min); 826 827 value &= ~HWP_MAX_PERF(~0L); 828 value |= HWP_MAX_PERF(max); 829 830 if (cpu_data->epp_policy == cpu_data->policy) 831 goto skip_epp; 832 833 cpu_data->epp_policy = cpu_data->policy; 834 835 if (cpu_data->epp_saved >= 0) { 836 epp = cpu_data->epp_saved; 837 cpu_data->epp_saved = -EINVAL; 838 goto update_epp; 839 } 840 841 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 842 epp = intel_pstate_get_epp(cpu_data, value); 843 cpu_data->epp_powersave = epp; 844 /* If EPP read was failed, then don't try to write */ 845 if (epp < 0) 846 goto skip_epp; 847 848 epp = 0; 849 } else { 850 /* skip setting EPP, when saved value is invalid */ 851 if (cpu_data->epp_powersave < 0) 852 goto skip_epp; 853 854 /* 855 * No need to restore EPP when it is not zero. This 856 * means: 857 * - Policy is not changed 858 * - user has manually changed 859 * - Error reading EPB 860 */ 861 epp = intel_pstate_get_epp(cpu_data, value); 862 if (epp) 863 goto skip_epp; 864 865 epp = cpu_data->epp_powersave; 866 } 867 update_epp: 868 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 869 value &= ~GENMASK_ULL(31, 24); 870 value |= (u64)epp << 24; 871 } else { 872 intel_pstate_set_epb(cpu, epp); 873 } 874 skip_epp: 875 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 876 } 877 878 static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) 879 { 880 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 881 882 if (!hwp_active) 883 return 0; 884 885 cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0); 886 887 return 0; 888 } 889 890 static int intel_pstate_resume(struct cpufreq_policy *policy) 891 { 892 if (!hwp_active) 893 return 0; 894 895 mutex_lock(&intel_pstate_limits_lock); 896 897 all_cpu_data[policy->cpu]->epp_policy = 0; 898 intel_pstate_hwp_set(policy->cpu); 899 900 mutex_unlock(&intel_pstate_limits_lock); 901 902 return 0; 903 } 904 905 static void intel_pstate_update_policies(void) 906 { 907 int cpu; 908 909 for_each_possible_cpu(cpu) 910 cpufreq_update_policy(cpu); 911 } 912 913 /************************** debugfs begin ************************/ 914 static int pid_param_set(void *data, u64 val) 915 { 916 unsigned int cpu; 917 918 *(u32 *)data = val; 919 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 920 for_each_possible_cpu(cpu) 921 if (all_cpu_data[cpu]) 922 intel_pstate_pid_reset(all_cpu_data[cpu]); 923 924 return 0; 925 } 926 927 static int pid_param_get(void *data, u64 *val) 928 { 929 *val = *(u32 *)data; 930 return 0; 931 } 932 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 933 934 static struct dentry *debugfs_parent; 935 936 struct pid_param { 937 char *name; 938 void *value; 939 struct dentry *dentry; 940 }; 941 942 static struct pid_param pid_files[] = { 943 {"sample_rate_ms", &pid_params.sample_rate_ms, }, 944 {"d_gain_pct", &pid_params.d_gain_pct, }, 945 {"i_gain_pct", &pid_params.i_gain_pct, }, 946 {"deadband", &pid_params.deadband, }, 947 {"setpoint", &pid_params.setpoint, }, 948 {"p_gain_pct", &pid_params.p_gain_pct, }, 949 {NULL, NULL, } 950 }; 951 952 static void intel_pstate_debug_expose_params(void) 953 { 954 int i; 955 956 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 957 if (IS_ERR_OR_NULL(debugfs_parent)) 958 return; 959 960 for (i = 0; pid_files[i].name; i++) { 961 struct dentry *dentry; 962 963 dentry = debugfs_create_file(pid_files[i].name, 0660, 964 debugfs_parent, pid_files[i].value, 965 &fops_pid_param); 966 if (!IS_ERR(dentry)) 967 pid_files[i].dentry = dentry; 968 } 969 } 970 971 static void intel_pstate_debug_hide_params(void) 972 { 973 int i; 974 975 if (IS_ERR_OR_NULL(debugfs_parent)) 976 return; 977 978 for (i = 0; pid_files[i].name; i++) { 979 debugfs_remove(pid_files[i].dentry); 980 pid_files[i].dentry = NULL; 981 } 982 983 debugfs_remove(debugfs_parent); 984 debugfs_parent = NULL; 985 } 986 987 /************************** debugfs end ************************/ 988 989 /************************** sysfs begin ************************/ 990 #define show_one(file_name, object) \ 991 static ssize_t show_##file_name \ 992 (struct kobject *kobj, struct attribute *attr, char *buf) \ 993 { \ 994 return sprintf(buf, "%u\n", global.object); \ 995 } 996 997 static ssize_t intel_pstate_show_status(char *buf); 998 static int intel_pstate_update_status(const char *buf, size_t size); 999 1000 static ssize_t show_status(struct kobject *kobj, 1001 struct attribute *attr, char *buf) 1002 { 1003 ssize_t ret; 1004 1005 mutex_lock(&intel_pstate_driver_lock); 1006 ret = intel_pstate_show_status(buf); 1007 mutex_unlock(&intel_pstate_driver_lock); 1008 1009 return ret; 1010 } 1011 1012 static ssize_t store_status(struct kobject *a, struct attribute *b, 1013 const char *buf, size_t count) 1014 { 1015 char *p = memchr(buf, '\n', count); 1016 int ret; 1017 1018 mutex_lock(&intel_pstate_driver_lock); 1019 ret = intel_pstate_update_status(buf, p ? p - buf : count); 1020 mutex_unlock(&intel_pstate_driver_lock); 1021 1022 return ret < 0 ? ret : count; 1023 } 1024 1025 static ssize_t show_turbo_pct(struct kobject *kobj, 1026 struct attribute *attr, char *buf) 1027 { 1028 struct cpudata *cpu; 1029 int total, no_turbo, turbo_pct; 1030 uint32_t turbo_fp; 1031 1032 mutex_lock(&intel_pstate_driver_lock); 1033 1034 if (!intel_pstate_driver) { 1035 mutex_unlock(&intel_pstate_driver_lock); 1036 return -EAGAIN; 1037 } 1038 1039 cpu = all_cpu_data[0]; 1040 1041 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1042 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 1043 turbo_fp = div_fp(no_turbo, total); 1044 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 1045 1046 mutex_unlock(&intel_pstate_driver_lock); 1047 1048 return sprintf(buf, "%u\n", turbo_pct); 1049 } 1050 1051 static ssize_t show_num_pstates(struct kobject *kobj, 1052 struct attribute *attr, char *buf) 1053 { 1054 struct cpudata *cpu; 1055 int total; 1056 1057 mutex_lock(&intel_pstate_driver_lock); 1058 1059 if (!intel_pstate_driver) { 1060 mutex_unlock(&intel_pstate_driver_lock); 1061 return -EAGAIN; 1062 } 1063 1064 cpu = all_cpu_data[0]; 1065 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1066 1067 mutex_unlock(&intel_pstate_driver_lock); 1068 1069 return sprintf(buf, "%u\n", total); 1070 } 1071 1072 static ssize_t show_no_turbo(struct kobject *kobj, 1073 struct attribute *attr, char *buf) 1074 { 1075 ssize_t ret; 1076 1077 mutex_lock(&intel_pstate_driver_lock); 1078 1079 if (!intel_pstate_driver) { 1080 mutex_unlock(&intel_pstate_driver_lock); 1081 return -EAGAIN; 1082 } 1083 1084 update_turbo_state(); 1085 if (global.turbo_disabled) 1086 ret = sprintf(buf, "%u\n", global.turbo_disabled); 1087 else 1088 ret = sprintf(buf, "%u\n", global.no_turbo); 1089 1090 mutex_unlock(&intel_pstate_driver_lock); 1091 1092 return ret; 1093 } 1094 1095 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 1096 const char *buf, size_t count) 1097 { 1098 unsigned int input; 1099 int ret; 1100 1101 ret = sscanf(buf, "%u", &input); 1102 if (ret != 1) 1103 return -EINVAL; 1104 1105 mutex_lock(&intel_pstate_driver_lock); 1106 1107 if (!intel_pstate_driver) { 1108 mutex_unlock(&intel_pstate_driver_lock); 1109 return -EAGAIN; 1110 } 1111 1112 mutex_lock(&intel_pstate_limits_lock); 1113 1114 update_turbo_state(); 1115 if (global.turbo_disabled) { 1116 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 1117 mutex_unlock(&intel_pstate_limits_lock); 1118 mutex_unlock(&intel_pstate_driver_lock); 1119 return -EPERM; 1120 } 1121 1122 global.no_turbo = clamp_t(int, input, 0, 1); 1123 1124 if (global.no_turbo) { 1125 struct cpudata *cpu = all_cpu_data[0]; 1126 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; 1127 1128 /* Squash the global minimum into the permitted range. */ 1129 if (global.min_perf_pct > pct) 1130 global.min_perf_pct = pct; 1131 } 1132 1133 mutex_unlock(&intel_pstate_limits_lock); 1134 1135 intel_pstate_update_policies(); 1136 1137 mutex_unlock(&intel_pstate_driver_lock); 1138 1139 return count; 1140 } 1141 1142 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 1143 const char *buf, size_t count) 1144 { 1145 unsigned int input; 1146 int ret; 1147 1148 ret = sscanf(buf, "%u", &input); 1149 if (ret != 1) 1150 return -EINVAL; 1151 1152 mutex_lock(&intel_pstate_driver_lock); 1153 1154 if (!intel_pstate_driver) { 1155 mutex_unlock(&intel_pstate_driver_lock); 1156 return -EAGAIN; 1157 } 1158 1159 mutex_lock(&intel_pstate_limits_lock); 1160 1161 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); 1162 1163 mutex_unlock(&intel_pstate_limits_lock); 1164 1165 intel_pstate_update_policies(); 1166 1167 mutex_unlock(&intel_pstate_driver_lock); 1168 1169 return count; 1170 } 1171 1172 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 1173 const char *buf, size_t count) 1174 { 1175 unsigned int input; 1176 int ret; 1177 1178 ret = sscanf(buf, "%u", &input); 1179 if (ret != 1) 1180 return -EINVAL; 1181 1182 mutex_lock(&intel_pstate_driver_lock); 1183 1184 if (!intel_pstate_driver) { 1185 mutex_unlock(&intel_pstate_driver_lock); 1186 return -EAGAIN; 1187 } 1188 1189 mutex_lock(&intel_pstate_limits_lock); 1190 1191 global.min_perf_pct = clamp_t(int, input, 1192 min_perf_pct_min(), global.max_perf_pct); 1193 1194 mutex_unlock(&intel_pstate_limits_lock); 1195 1196 intel_pstate_update_policies(); 1197 1198 mutex_unlock(&intel_pstate_driver_lock); 1199 1200 return count; 1201 } 1202 1203 show_one(max_perf_pct, max_perf_pct); 1204 show_one(min_perf_pct, min_perf_pct); 1205 1206 define_one_global_rw(status); 1207 define_one_global_rw(no_turbo); 1208 define_one_global_rw(max_perf_pct); 1209 define_one_global_rw(min_perf_pct); 1210 define_one_global_ro(turbo_pct); 1211 define_one_global_ro(num_pstates); 1212 1213 static struct attribute *intel_pstate_attributes[] = { 1214 &status.attr, 1215 &no_turbo.attr, 1216 &turbo_pct.attr, 1217 &num_pstates.attr, 1218 NULL 1219 }; 1220 1221 static struct attribute_group intel_pstate_attr_group = { 1222 .attrs = intel_pstate_attributes, 1223 }; 1224 1225 static void __init intel_pstate_sysfs_expose_params(void) 1226 { 1227 struct kobject *intel_pstate_kobject; 1228 int rc; 1229 1230 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 1231 &cpu_subsys.dev_root->kobj); 1232 if (WARN_ON(!intel_pstate_kobject)) 1233 return; 1234 1235 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 1236 if (WARN_ON(rc)) 1237 return; 1238 1239 /* 1240 * If per cpu limits are enforced there are no global limits, so 1241 * return without creating max/min_perf_pct attributes 1242 */ 1243 if (per_cpu_limits) 1244 return; 1245 1246 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 1247 WARN_ON(rc); 1248 1249 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1250 WARN_ON(rc); 1251 1252 } 1253 /************************** sysfs end ************************/ 1254 1255 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1256 { 1257 /* First disable HWP notification interrupt as we don't process them */ 1258 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1259 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1260 1261 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1262 cpudata->epp_policy = 0; 1263 if (cpudata->epp_default == -EINVAL) 1264 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1265 } 1266 1267 #define MSR_IA32_POWER_CTL_BIT_EE 19 1268 1269 /* Disable energy efficiency optimization */ 1270 static void intel_pstate_disable_ee(int cpu) 1271 { 1272 u64 power_ctl; 1273 int ret; 1274 1275 ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl); 1276 if (ret) 1277 return; 1278 1279 if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) { 1280 pr_info("Disabling energy efficiency optimization\n"); 1281 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); 1282 wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl); 1283 } 1284 } 1285 1286 static int atom_get_min_pstate(void) 1287 { 1288 u64 value; 1289 1290 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1291 return (value >> 8) & 0x7F; 1292 } 1293 1294 static int atom_get_max_pstate(void) 1295 { 1296 u64 value; 1297 1298 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1299 return (value >> 16) & 0x7F; 1300 } 1301 1302 static int atom_get_turbo_pstate(void) 1303 { 1304 u64 value; 1305 1306 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); 1307 return value & 0x7F; 1308 } 1309 1310 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1311 { 1312 u64 val; 1313 int32_t vid_fp; 1314 u32 vid; 1315 1316 val = (u64)pstate << 8; 1317 if (global.no_turbo && !global.turbo_disabled) 1318 val |= (u64)1 << 32; 1319 1320 vid_fp = cpudata->vid.min + mul_fp( 1321 int_tofp(pstate - cpudata->pstate.min_pstate), 1322 cpudata->vid.ratio); 1323 1324 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1325 vid = ceiling_fp(vid_fp); 1326 1327 if (pstate > cpudata->pstate.max_pstate) 1328 vid = cpudata->vid.turbo; 1329 1330 return val | vid; 1331 } 1332 1333 static int silvermont_get_scaling(void) 1334 { 1335 u64 value; 1336 int i; 1337 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1338 static int silvermont_freq_table[] = { 1339 83300, 100000, 133300, 116700, 80000}; 1340 1341 rdmsrl(MSR_FSB_FREQ, value); 1342 i = value & 0x7; 1343 WARN_ON(i > 4); 1344 1345 return silvermont_freq_table[i]; 1346 } 1347 1348 static int airmont_get_scaling(void) 1349 { 1350 u64 value; 1351 int i; 1352 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1353 static int airmont_freq_table[] = { 1354 83300, 100000, 133300, 116700, 80000, 1355 93300, 90000, 88900, 87500}; 1356 1357 rdmsrl(MSR_FSB_FREQ, value); 1358 i = value & 0xF; 1359 WARN_ON(i > 8); 1360 1361 return airmont_freq_table[i]; 1362 } 1363 1364 static void atom_get_vid(struct cpudata *cpudata) 1365 { 1366 u64 value; 1367 1368 rdmsrl(MSR_ATOM_CORE_VIDS, value); 1369 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1370 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1371 cpudata->vid.ratio = div_fp( 1372 cpudata->vid.max - cpudata->vid.min, 1373 int_tofp(cpudata->pstate.max_pstate - 1374 cpudata->pstate.min_pstate)); 1375 1376 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); 1377 cpudata->vid.turbo = value & 0x7f; 1378 } 1379 1380 static int core_get_min_pstate(void) 1381 { 1382 u64 value; 1383 1384 rdmsrl(MSR_PLATFORM_INFO, value); 1385 return (value >> 40) & 0xFF; 1386 } 1387 1388 static int core_get_max_pstate_physical(void) 1389 { 1390 u64 value; 1391 1392 rdmsrl(MSR_PLATFORM_INFO, value); 1393 return (value >> 8) & 0xFF; 1394 } 1395 1396 static int core_get_tdp_ratio(u64 plat_info) 1397 { 1398 /* Check how many TDP levels present */ 1399 if (plat_info & 0x600000000) { 1400 u64 tdp_ctrl; 1401 u64 tdp_ratio; 1402 int tdp_msr; 1403 int err; 1404 1405 /* Get the TDP level (0, 1, 2) to get ratios */ 1406 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1407 if (err) 1408 return err; 1409 1410 /* TDP MSR are continuous starting at 0x648 */ 1411 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); 1412 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 1413 if (err) 1414 return err; 1415 1416 /* For level 1 and 2, bits[23:16] contain the ratio */ 1417 if (tdp_ctrl & 0x03) 1418 tdp_ratio >>= 16; 1419 1420 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1421 pr_debug("tdp_ratio %x\n", (int)tdp_ratio); 1422 1423 return (int)tdp_ratio; 1424 } 1425 1426 return -ENXIO; 1427 } 1428 1429 static int core_get_max_pstate(void) 1430 { 1431 u64 tar; 1432 u64 plat_info; 1433 int max_pstate; 1434 int tdp_ratio; 1435 int err; 1436 1437 rdmsrl(MSR_PLATFORM_INFO, plat_info); 1438 max_pstate = (plat_info >> 8) & 0xFF; 1439 1440 tdp_ratio = core_get_tdp_ratio(plat_info); 1441 if (tdp_ratio <= 0) 1442 return max_pstate; 1443 1444 if (hwp_active) { 1445 /* Turbo activation ratio is not used on HWP platforms */ 1446 return tdp_ratio; 1447 } 1448 1449 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 1450 if (!err) { 1451 int tar_levels; 1452 1453 /* Do some sanity checking for safety */ 1454 tar_levels = tar & 0xff; 1455 if (tdp_ratio - 1 == tar_levels) { 1456 max_pstate = tar_levels; 1457 pr_debug("max_pstate=TAC %x\n", max_pstate); 1458 } 1459 } 1460 1461 return max_pstate; 1462 } 1463 1464 static int core_get_turbo_pstate(void) 1465 { 1466 u64 value; 1467 int nont, ret; 1468 1469 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1470 nont = core_get_max_pstate(); 1471 ret = (value) & 255; 1472 if (ret <= nont) 1473 ret = nont; 1474 return ret; 1475 } 1476 1477 static inline int core_get_scaling(void) 1478 { 1479 return 100000; 1480 } 1481 1482 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1483 { 1484 u64 val; 1485 1486 val = (u64)pstate << 8; 1487 if (global.no_turbo && !global.turbo_disabled) 1488 val |= (u64)1 << 32; 1489 1490 return val; 1491 } 1492 1493 static int knl_get_turbo_pstate(void) 1494 { 1495 u64 value; 1496 int nont, ret; 1497 1498 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1499 nont = core_get_max_pstate(); 1500 ret = (((value) >> 8) & 0xFF); 1501 if (ret <= nont) 1502 ret = nont; 1503 return ret; 1504 } 1505 1506 static int intel_pstate_get_base_pstate(struct cpudata *cpu) 1507 { 1508 return global.no_turbo || global.turbo_disabled ? 1509 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1510 } 1511 1512 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1513 { 1514 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1515 cpu->pstate.current_pstate = pstate; 1516 /* 1517 * Generally, there is no guarantee that this code will always run on 1518 * the CPU being updated, so force the register update to run on the 1519 * right CPU. 1520 */ 1521 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1522 pstate_funcs.get_val(cpu, pstate)); 1523 } 1524 1525 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1526 { 1527 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1528 } 1529 1530 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1531 { 1532 int pstate; 1533 1534 update_turbo_state(); 1535 pstate = intel_pstate_get_base_pstate(cpu); 1536 pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); 1537 intel_pstate_set_pstate(cpu, pstate); 1538 } 1539 1540 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1541 { 1542 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1543 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1544 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1545 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1546 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1547 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; 1548 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1549 1550 if (pstate_funcs.get_vid) 1551 pstate_funcs.get_vid(cpu); 1552 1553 intel_pstate_set_min_pstate(cpu); 1554 } 1555 1556 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1557 { 1558 struct sample *sample = &cpu->sample; 1559 1560 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1561 } 1562 1563 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1564 { 1565 u64 aperf, mperf; 1566 unsigned long flags; 1567 u64 tsc; 1568 1569 local_irq_save(flags); 1570 rdmsrl(MSR_IA32_APERF, aperf); 1571 rdmsrl(MSR_IA32_MPERF, mperf); 1572 tsc = rdtsc(); 1573 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1574 local_irq_restore(flags); 1575 return false; 1576 } 1577 local_irq_restore(flags); 1578 1579 cpu->last_sample_time = cpu->sample.time; 1580 cpu->sample.time = time; 1581 cpu->sample.aperf = aperf; 1582 cpu->sample.mperf = mperf; 1583 cpu->sample.tsc = tsc; 1584 cpu->sample.aperf -= cpu->prev_aperf; 1585 cpu->sample.mperf -= cpu->prev_mperf; 1586 cpu->sample.tsc -= cpu->prev_tsc; 1587 1588 cpu->prev_aperf = aperf; 1589 cpu->prev_mperf = mperf; 1590 cpu->prev_tsc = tsc; 1591 /* 1592 * First time this function is invoked in a given cycle, all of the 1593 * previous sample data fields are equal to zero or stale and they must 1594 * be populated with meaningful numbers for things to work, so assume 1595 * that sample.time will always be reset before setting the utilization 1596 * update hook and make the caller skip the sample then. 1597 */ 1598 if (cpu->last_sample_time) { 1599 intel_pstate_calc_avg_perf(cpu); 1600 return true; 1601 } 1602 return false; 1603 } 1604 1605 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1606 { 1607 return mul_ext_fp(cpu->sample.core_avg_perf, 1608 cpu->pstate.max_pstate_physical * cpu->pstate.scaling); 1609 } 1610 1611 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1612 { 1613 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1614 cpu->sample.core_avg_perf); 1615 } 1616 1617 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1618 { 1619 struct sample *sample = &cpu->sample; 1620 int32_t busy_frac, boost; 1621 int target, avg_pstate; 1622 1623 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) 1624 return cpu->pstate.turbo_pstate; 1625 1626 busy_frac = div_fp(sample->mperf, sample->tsc); 1627 1628 boost = cpu->iowait_boost; 1629 cpu->iowait_boost >>= 1; 1630 1631 if (busy_frac < boost) 1632 busy_frac = boost; 1633 1634 sample->busy_scaled = busy_frac * 100; 1635 1636 target = global.no_turbo || global.turbo_disabled ? 1637 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1638 target += target >> 2; 1639 target = mul_fp(target, busy_frac); 1640 if (target < cpu->pstate.min_pstate) 1641 target = cpu->pstate.min_pstate; 1642 1643 /* 1644 * If the average P-state during the previous cycle was higher than the 1645 * current target, add 50% of the difference to the target to reduce 1646 * possible performance oscillations and offset possible performance 1647 * loss related to moving the workload from one CPU to another within 1648 * a package/module. 1649 */ 1650 avg_pstate = get_avg_pstate(cpu); 1651 if (avg_pstate > target) 1652 target += (avg_pstate - target) >> 1; 1653 1654 return target; 1655 } 1656 1657 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1658 { 1659 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1660 u64 duration_ns; 1661 1662 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) 1663 return cpu->pstate.turbo_pstate; 1664 1665 /* 1666 * perf_scaled is the ratio of the average P-state during the last 1667 * sampling period to the P-state requested last time (in percent). 1668 * 1669 * That measures the system's response to the previous P-state 1670 * selection. 1671 */ 1672 max_pstate = cpu->pstate.max_pstate_physical; 1673 current_pstate = cpu->pstate.current_pstate; 1674 perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, 1675 div_fp(100 * max_pstate, current_pstate)); 1676 1677 /* 1678 * Since our utilization update callback will not run unless we are 1679 * in C0, check if the actual elapsed time is significantly greater (3x) 1680 * than our sample interval. If it is, then we were idle for a long 1681 * enough period of time to adjust our performance metric. 1682 */ 1683 duration_ns = cpu->sample.time - cpu->last_sample_time; 1684 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1685 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1686 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1687 } else { 1688 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1689 if (sample_ratio < int_tofp(1)) 1690 perf_scaled = 0; 1691 } 1692 1693 cpu->sample.busy_scaled = perf_scaled; 1694 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); 1695 } 1696 1697 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 1698 { 1699 int max_pstate = intel_pstate_get_base_pstate(cpu); 1700 int min_pstate; 1701 1702 min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); 1703 max_pstate = max(min_pstate, cpu->max_perf_ratio); 1704 return clamp_t(int, pstate, min_pstate, max_pstate); 1705 } 1706 1707 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1708 { 1709 if (pstate == cpu->pstate.current_pstate) 1710 return; 1711 1712 cpu->pstate.current_pstate = pstate; 1713 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1714 } 1715 1716 static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate) 1717 { 1718 int from = cpu->pstate.current_pstate; 1719 struct sample *sample; 1720 1721 update_turbo_state(); 1722 1723 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 1724 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); 1725 intel_pstate_update_pstate(cpu, target_pstate); 1726 1727 sample = &cpu->sample; 1728 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1729 fp_toint(sample->busy_scaled), 1730 from, 1731 cpu->pstate.current_pstate, 1732 sample->mperf, 1733 sample->aperf, 1734 sample->tsc, 1735 get_avg_frequency(cpu), 1736 fp_toint(cpu->iowait_boost * 100)); 1737 } 1738 1739 static void intel_pstate_update_util_hwp(struct update_util_data *data, 1740 u64 time, unsigned int flags) 1741 { 1742 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1743 u64 delta_ns = time - cpu->sample.time; 1744 1745 if ((s64)delta_ns >= INTEL_PSTATE_HWP_SAMPLING_INTERVAL) 1746 intel_pstate_sample(cpu, time); 1747 } 1748 1749 static void intel_pstate_update_util_pid(struct update_util_data *data, 1750 u64 time, unsigned int flags) 1751 { 1752 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1753 u64 delta_ns = time - cpu->sample.time; 1754 1755 if ((s64)delta_ns < pid_params.sample_rate_ns) 1756 return; 1757 1758 if (intel_pstate_sample(cpu, time)) { 1759 int target_pstate; 1760 1761 target_pstate = get_target_pstate_use_performance(cpu); 1762 intel_pstate_adjust_pstate(cpu, target_pstate); 1763 } 1764 } 1765 1766 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1767 unsigned int flags) 1768 { 1769 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1770 u64 delta_ns; 1771 1772 if (flags & SCHED_CPUFREQ_IOWAIT) { 1773 cpu->iowait_boost = int_tofp(1); 1774 } else if (cpu->iowait_boost) { 1775 /* Clear iowait_boost if the CPU may have been idle. */ 1776 delta_ns = time - cpu->last_update; 1777 if (delta_ns > TICK_NSEC) 1778 cpu->iowait_boost = 0; 1779 } 1780 cpu->last_update = time; 1781 delta_ns = time - cpu->sample.time; 1782 if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL) 1783 return; 1784 1785 if (intel_pstate_sample(cpu, time)) { 1786 int target_pstate; 1787 1788 target_pstate = get_target_pstate_use_cpu_load(cpu); 1789 intel_pstate_adjust_pstate(cpu, target_pstate); 1790 } 1791 } 1792 1793 static struct pstate_funcs core_funcs = { 1794 .get_max = core_get_max_pstate, 1795 .get_max_physical = core_get_max_pstate_physical, 1796 .get_min = core_get_min_pstate, 1797 .get_turbo = core_get_turbo_pstate, 1798 .get_scaling = core_get_scaling, 1799 .get_val = core_get_val, 1800 .update_util = intel_pstate_update_util_pid, 1801 }; 1802 1803 static const struct pstate_funcs silvermont_funcs = { 1804 .get_max = atom_get_max_pstate, 1805 .get_max_physical = atom_get_max_pstate, 1806 .get_min = atom_get_min_pstate, 1807 .get_turbo = atom_get_turbo_pstate, 1808 .get_val = atom_get_val, 1809 .get_scaling = silvermont_get_scaling, 1810 .get_vid = atom_get_vid, 1811 .update_util = intel_pstate_update_util, 1812 }; 1813 1814 static const struct pstate_funcs airmont_funcs = { 1815 .get_max = atom_get_max_pstate, 1816 .get_max_physical = atom_get_max_pstate, 1817 .get_min = atom_get_min_pstate, 1818 .get_turbo = atom_get_turbo_pstate, 1819 .get_val = atom_get_val, 1820 .get_scaling = airmont_get_scaling, 1821 .get_vid = atom_get_vid, 1822 .update_util = intel_pstate_update_util, 1823 }; 1824 1825 static const struct pstate_funcs knl_funcs = { 1826 .get_max = core_get_max_pstate, 1827 .get_max_physical = core_get_max_pstate_physical, 1828 .get_min = core_get_min_pstate, 1829 .get_turbo = knl_get_turbo_pstate, 1830 .get_scaling = core_get_scaling, 1831 .get_val = core_get_val, 1832 .update_util = intel_pstate_update_util_pid, 1833 }; 1834 1835 static const struct pstate_funcs bxt_funcs = { 1836 .get_max = core_get_max_pstate, 1837 .get_max_physical = core_get_max_pstate_physical, 1838 .get_min = core_get_min_pstate, 1839 .get_turbo = core_get_turbo_pstate, 1840 .get_scaling = core_get_scaling, 1841 .get_val = core_get_val, 1842 .update_util = intel_pstate_update_util, 1843 }; 1844 1845 #define ICPU(model, policy) \ 1846 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1847 (unsigned long)&policy } 1848 1849 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1850 ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs), 1851 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs), 1852 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_funcs), 1853 ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs), 1854 ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs), 1855 ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs), 1856 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs), 1857 ICPU(INTEL_FAM6_HASWELL_X, core_funcs), 1858 ICPU(INTEL_FAM6_HASWELL_ULT, core_funcs), 1859 ICPU(INTEL_FAM6_HASWELL_GT3E, core_funcs), 1860 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_funcs), 1861 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs), 1862 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_funcs), 1863 ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), 1864 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs), 1865 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), 1866 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs), 1867 ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), 1868 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs), 1869 ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, bxt_funcs), 1870 {} 1871 }; 1872 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1873 1874 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 1875 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), 1876 ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), 1877 ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), 1878 {} 1879 }; 1880 1881 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { 1882 ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_funcs), 1883 {} 1884 }; 1885 1886 static bool pid_in_use(void); 1887 1888 static int intel_pstate_init_cpu(unsigned int cpunum) 1889 { 1890 struct cpudata *cpu; 1891 1892 cpu = all_cpu_data[cpunum]; 1893 1894 if (!cpu) { 1895 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); 1896 if (!cpu) 1897 return -ENOMEM; 1898 1899 all_cpu_data[cpunum] = cpu; 1900 1901 cpu->epp_default = -EINVAL; 1902 cpu->epp_powersave = -EINVAL; 1903 cpu->epp_saved = -EINVAL; 1904 } 1905 1906 cpu = all_cpu_data[cpunum]; 1907 1908 cpu->cpu = cpunum; 1909 1910 if (hwp_active) { 1911 const struct x86_cpu_id *id; 1912 1913 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); 1914 if (id) 1915 intel_pstate_disable_ee(cpunum); 1916 1917 intel_pstate_hwp_enable(cpu); 1918 } else if (pid_in_use()) { 1919 intel_pstate_pid_reset(cpu); 1920 } 1921 1922 intel_pstate_get_cpu_pstates(cpu); 1923 1924 pr_debug("controlling: cpu %d\n", cpunum); 1925 1926 return 0; 1927 } 1928 1929 static unsigned int intel_pstate_get(unsigned int cpu_num) 1930 { 1931 struct cpudata *cpu = all_cpu_data[cpu_num]; 1932 1933 return cpu ? get_avg_frequency(cpu) : 0; 1934 } 1935 1936 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1937 { 1938 struct cpudata *cpu = all_cpu_data[cpu_num]; 1939 1940 if (cpu->update_util_set) 1941 return; 1942 1943 /* Prevent intel_pstate_update_util() from using stale data. */ 1944 cpu->sample.time = 0; 1945 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1946 pstate_funcs.update_util); 1947 cpu->update_util_set = true; 1948 } 1949 1950 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1951 { 1952 struct cpudata *cpu_data = all_cpu_data[cpu]; 1953 1954 if (!cpu_data->update_util_set) 1955 return; 1956 1957 cpufreq_remove_update_util_hook(cpu); 1958 cpu_data->update_util_set = false; 1959 synchronize_sched(); 1960 } 1961 1962 static int intel_pstate_get_max_freq(struct cpudata *cpu) 1963 { 1964 return global.turbo_disabled || global.no_turbo ? 1965 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 1966 } 1967 1968 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, 1969 struct cpudata *cpu) 1970 { 1971 int max_freq = intel_pstate_get_max_freq(cpu); 1972 int32_t max_policy_perf, min_policy_perf; 1973 int max_state, turbo_max; 1974 1975 /* 1976 * HWP needs some special consideration, because on BDX the 1977 * HWP_REQUEST uses abstract value to represent performance 1978 * rather than pure ratios. 1979 */ 1980 if (hwp_active) { 1981 intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); 1982 } else { 1983 max_state = intel_pstate_get_base_pstate(cpu); 1984 turbo_max = cpu->pstate.turbo_pstate; 1985 } 1986 1987 max_policy_perf = max_state * policy->max / max_freq; 1988 if (policy->max == policy->min) { 1989 min_policy_perf = max_policy_perf; 1990 } else { 1991 min_policy_perf = max_state * policy->min / max_freq; 1992 min_policy_perf = clamp_t(int32_t, min_policy_perf, 1993 0, max_policy_perf); 1994 } 1995 1996 pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n", 1997 policy->cpu, max_state, 1998 min_policy_perf, max_policy_perf); 1999 2000 /* Normalize user input to [min_perf, max_perf] */ 2001 if (per_cpu_limits) { 2002 cpu->min_perf_ratio = min_policy_perf; 2003 cpu->max_perf_ratio = max_policy_perf; 2004 } else { 2005 int32_t global_min, global_max; 2006 2007 /* Global limits are in percent of the maximum turbo P-state. */ 2008 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 2009 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); 2010 global_min = clamp_t(int32_t, global_min, 0, global_max); 2011 2012 pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu, 2013 global_min, global_max); 2014 2015 cpu->min_perf_ratio = max(min_policy_perf, global_min); 2016 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); 2017 cpu->max_perf_ratio = min(max_policy_perf, global_max); 2018 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); 2019 2020 /* Make sure min_perf <= max_perf */ 2021 cpu->min_perf_ratio = min(cpu->min_perf_ratio, 2022 cpu->max_perf_ratio); 2023 2024 } 2025 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu, 2026 cpu->max_perf_ratio, 2027 cpu->min_perf_ratio); 2028 } 2029 2030 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2031 { 2032 struct cpudata *cpu; 2033 2034 if (!policy->cpuinfo.max_freq) 2035 return -ENODEV; 2036 2037 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2038 policy->cpuinfo.max_freq, policy->max); 2039 2040 cpu = all_cpu_data[policy->cpu]; 2041 cpu->policy = policy->policy; 2042 2043 mutex_lock(&intel_pstate_limits_lock); 2044 2045 intel_pstate_update_perf_limits(policy, cpu); 2046 2047 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2048 /* 2049 * NOHZ_FULL CPUs need this as the governor callback may not 2050 * be invoked on them. 2051 */ 2052 intel_pstate_clear_update_util_hook(policy->cpu); 2053 intel_pstate_max_within_limits(cpu); 2054 } 2055 2056 intel_pstate_set_update_util_hook(policy->cpu); 2057 2058 if (hwp_active) 2059 intel_pstate_hwp_set(policy->cpu); 2060 2061 mutex_unlock(&intel_pstate_limits_lock); 2062 2063 return 0; 2064 } 2065 2066 static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy, 2067 struct cpudata *cpu) 2068 { 2069 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2070 policy->max < policy->cpuinfo.max_freq && 2071 policy->max > cpu->pstate.max_freq) { 2072 pr_debug("policy->max > max non turbo frequency\n"); 2073 policy->max = policy->cpuinfo.max_freq; 2074 } 2075 } 2076 2077 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 2078 { 2079 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2080 2081 update_turbo_state(); 2082 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2083 intel_pstate_get_max_freq(cpu)); 2084 2085 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 2086 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 2087 return -EINVAL; 2088 2089 intel_pstate_adjust_policy_max(policy, cpu); 2090 2091 return 0; 2092 } 2093 2094 static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) 2095 { 2096 intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); 2097 } 2098 2099 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 2100 { 2101 pr_debug("CPU %d exiting\n", policy->cpu); 2102 2103 intel_pstate_clear_update_util_hook(policy->cpu); 2104 if (hwp_active) 2105 intel_pstate_hwp_save_state(policy); 2106 else 2107 intel_cpufreq_stop_cpu(policy); 2108 } 2109 2110 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 2111 { 2112 intel_pstate_exit_perf_limits(policy); 2113 2114 policy->fast_switch_possible = false; 2115 2116 return 0; 2117 } 2118 2119 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 2120 { 2121 struct cpudata *cpu; 2122 int rc; 2123 2124 rc = intel_pstate_init_cpu(policy->cpu); 2125 if (rc) 2126 return rc; 2127 2128 cpu = all_cpu_data[policy->cpu]; 2129 2130 cpu->max_perf_ratio = 0xFF; 2131 cpu->min_perf_ratio = 0; 2132 2133 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 2134 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 2135 2136 /* cpuinfo and default policy values */ 2137 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 2138 update_turbo_state(); 2139 policy->cpuinfo.max_freq = global.turbo_disabled ? 2140 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2141 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 2142 2143 intel_pstate_init_acpi_perf_limits(policy); 2144 cpumask_set_cpu(policy->cpu, policy->cpus); 2145 2146 policy->fast_switch_possible = true; 2147 2148 return 0; 2149 } 2150 2151 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 2152 { 2153 int ret = __intel_pstate_cpu_init(policy); 2154 2155 if (ret) 2156 return ret; 2157 2158 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 2159 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) 2160 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 2161 else 2162 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2163 2164 return 0; 2165 } 2166 2167 static struct cpufreq_driver intel_pstate = { 2168 .flags = CPUFREQ_CONST_LOOPS, 2169 .verify = intel_pstate_verify_policy, 2170 .setpolicy = intel_pstate_set_policy, 2171 .suspend = intel_pstate_hwp_save_state, 2172 .resume = intel_pstate_resume, 2173 .get = intel_pstate_get, 2174 .init = intel_pstate_cpu_init, 2175 .exit = intel_pstate_cpu_exit, 2176 .stop_cpu = intel_pstate_stop_cpu, 2177 .name = "intel_pstate", 2178 }; 2179 2180 static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) 2181 { 2182 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2183 2184 update_turbo_state(); 2185 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2186 intel_pstate_get_max_freq(cpu)); 2187 2188 intel_pstate_adjust_policy_max(policy, cpu); 2189 2190 intel_pstate_update_perf_limits(policy, cpu); 2191 2192 return 0; 2193 } 2194 2195 static int intel_cpufreq_target(struct cpufreq_policy *policy, 2196 unsigned int target_freq, 2197 unsigned int relation) 2198 { 2199 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2200 struct cpufreq_freqs freqs; 2201 int target_pstate; 2202 2203 update_turbo_state(); 2204 2205 freqs.old = policy->cur; 2206 freqs.new = target_freq; 2207 2208 cpufreq_freq_transition_begin(policy, &freqs); 2209 switch (relation) { 2210 case CPUFREQ_RELATION_L: 2211 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 2212 break; 2213 case CPUFREQ_RELATION_H: 2214 target_pstate = freqs.new / cpu->pstate.scaling; 2215 break; 2216 default: 2217 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 2218 break; 2219 } 2220 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2221 if (target_pstate != cpu->pstate.current_pstate) { 2222 cpu->pstate.current_pstate = target_pstate; 2223 wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, 2224 pstate_funcs.get_val(cpu, target_pstate)); 2225 } 2226 freqs.new = target_pstate * cpu->pstate.scaling; 2227 cpufreq_freq_transition_end(policy, &freqs, false); 2228 2229 return 0; 2230 } 2231 2232 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 2233 unsigned int target_freq) 2234 { 2235 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2236 int target_pstate; 2237 2238 update_turbo_state(); 2239 2240 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2241 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2242 intel_pstate_update_pstate(cpu, target_pstate); 2243 return target_pstate * cpu->pstate.scaling; 2244 } 2245 2246 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2247 { 2248 int ret = __intel_pstate_cpu_init(policy); 2249 2250 if (ret) 2251 return ret; 2252 2253 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 2254 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; 2255 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2256 policy->cur = policy->cpuinfo.min_freq; 2257 2258 return 0; 2259 } 2260 2261 static struct cpufreq_driver intel_cpufreq = { 2262 .flags = CPUFREQ_CONST_LOOPS, 2263 .verify = intel_cpufreq_verify_policy, 2264 .target = intel_cpufreq_target, 2265 .fast_switch = intel_cpufreq_fast_switch, 2266 .init = intel_cpufreq_cpu_init, 2267 .exit = intel_pstate_cpu_exit, 2268 .stop_cpu = intel_cpufreq_stop_cpu, 2269 .name = "intel_cpufreq", 2270 }; 2271 2272 static struct cpufreq_driver *default_driver = &intel_pstate; 2273 2274 static bool pid_in_use(void) 2275 { 2276 return intel_pstate_driver == &intel_pstate && 2277 pstate_funcs.update_util == intel_pstate_update_util_pid; 2278 } 2279 2280 static void intel_pstate_driver_cleanup(void) 2281 { 2282 unsigned int cpu; 2283 2284 get_online_cpus(); 2285 for_each_online_cpu(cpu) { 2286 if (all_cpu_data[cpu]) { 2287 if (intel_pstate_driver == &intel_pstate) 2288 intel_pstate_clear_update_util_hook(cpu); 2289 2290 kfree(all_cpu_data[cpu]); 2291 all_cpu_data[cpu] = NULL; 2292 } 2293 } 2294 put_online_cpus(); 2295 intel_pstate_driver = NULL; 2296 } 2297 2298 static int intel_pstate_register_driver(struct cpufreq_driver *driver) 2299 { 2300 int ret; 2301 2302 memset(&global, 0, sizeof(global)); 2303 global.max_perf_pct = 100; 2304 2305 intel_pstate_driver = driver; 2306 ret = cpufreq_register_driver(intel_pstate_driver); 2307 if (ret) { 2308 intel_pstate_driver_cleanup(); 2309 return ret; 2310 } 2311 2312 global.min_perf_pct = min_perf_pct_min(); 2313 2314 if (pid_in_use()) 2315 intel_pstate_debug_expose_params(); 2316 2317 return 0; 2318 } 2319 2320 static int intel_pstate_unregister_driver(void) 2321 { 2322 if (hwp_active) 2323 return -EBUSY; 2324 2325 if (pid_in_use()) 2326 intel_pstate_debug_hide_params(); 2327 2328 cpufreq_unregister_driver(intel_pstate_driver); 2329 intel_pstate_driver_cleanup(); 2330 2331 return 0; 2332 } 2333 2334 static ssize_t intel_pstate_show_status(char *buf) 2335 { 2336 if (!intel_pstate_driver) 2337 return sprintf(buf, "off\n"); 2338 2339 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 2340 "active" : "passive"); 2341 } 2342 2343 static int intel_pstate_update_status(const char *buf, size_t size) 2344 { 2345 int ret; 2346 2347 if (size == 3 && !strncmp(buf, "off", size)) 2348 return intel_pstate_driver ? 2349 intel_pstate_unregister_driver() : -EINVAL; 2350 2351 if (size == 6 && !strncmp(buf, "active", size)) { 2352 if (intel_pstate_driver) { 2353 if (intel_pstate_driver == &intel_pstate) 2354 return 0; 2355 2356 ret = intel_pstate_unregister_driver(); 2357 if (ret) 2358 return ret; 2359 } 2360 2361 return intel_pstate_register_driver(&intel_pstate); 2362 } 2363 2364 if (size == 7 && !strncmp(buf, "passive", size)) { 2365 if (intel_pstate_driver) { 2366 if (intel_pstate_driver == &intel_cpufreq) 2367 return 0; 2368 2369 ret = intel_pstate_unregister_driver(); 2370 if (ret) 2371 return ret; 2372 } 2373 2374 return intel_pstate_register_driver(&intel_cpufreq); 2375 } 2376 2377 return -EINVAL; 2378 } 2379 2380 static int no_load __initdata; 2381 static int no_hwp __initdata; 2382 static int hwp_only __initdata; 2383 static unsigned int force_load __initdata; 2384 2385 static int __init intel_pstate_msrs_not_valid(void) 2386 { 2387 if (!pstate_funcs.get_max() || 2388 !pstate_funcs.get_min() || 2389 !pstate_funcs.get_turbo()) 2390 return -ENODEV; 2391 2392 return 0; 2393 } 2394 2395 #ifdef CONFIG_ACPI 2396 static void intel_pstate_use_acpi_profile(void) 2397 { 2398 switch (acpi_gbl_FADT.preferred_profile) { 2399 case PM_MOBILE: 2400 case PM_TABLET: 2401 case PM_APPLIANCE_PC: 2402 case PM_DESKTOP: 2403 case PM_WORKSTATION: 2404 pstate_funcs.update_util = intel_pstate_update_util; 2405 } 2406 } 2407 #else 2408 static void intel_pstate_use_acpi_profile(void) 2409 { 2410 } 2411 #endif 2412 2413 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 2414 { 2415 pstate_funcs.get_max = funcs->get_max; 2416 pstate_funcs.get_max_physical = funcs->get_max_physical; 2417 pstate_funcs.get_min = funcs->get_min; 2418 pstate_funcs.get_turbo = funcs->get_turbo; 2419 pstate_funcs.get_scaling = funcs->get_scaling; 2420 pstate_funcs.get_val = funcs->get_val; 2421 pstate_funcs.get_vid = funcs->get_vid; 2422 pstate_funcs.update_util = funcs->update_util; 2423 2424 intel_pstate_use_acpi_profile(); 2425 } 2426 2427 #ifdef CONFIG_ACPI 2428 2429 static bool __init intel_pstate_no_acpi_pss(void) 2430 { 2431 int i; 2432 2433 for_each_possible_cpu(i) { 2434 acpi_status status; 2435 union acpi_object *pss; 2436 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 2437 struct acpi_processor *pr = per_cpu(processors, i); 2438 2439 if (!pr) 2440 continue; 2441 2442 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 2443 if (ACPI_FAILURE(status)) 2444 continue; 2445 2446 pss = buffer.pointer; 2447 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 2448 kfree(pss); 2449 return false; 2450 } 2451 2452 kfree(pss); 2453 } 2454 2455 return true; 2456 } 2457 2458 static bool __init intel_pstate_has_acpi_ppc(void) 2459 { 2460 int i; 2461 2462 for_each_possible_cpu(i) { 2463 struct acpi_processor *pr = per_cpu(processors, i); 2464 2465 if (!pr) 2466 continue; 2467 if (acpi_has_method(pr->handle, "_PPC")) 2468 return true; 2469 } 2470 return false; 2471 } 2472 2473 enum { 2474 PSS, 2475 PPC, 2476 }; 2477 2478 struct hw_vendor_info { 2479 u16 valid; 2480 char oem_id[ACPI_OEM_ID_SIZE]; 2481 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 2482 int oem_pwr_table; 2483 }; 2484 2485 /* Hardware vendor-specific info that has its own power management modes */ 2486 static struct hw_vendor_info vendor_info[] __initdata = { 2487 {1, "HP ", "ProLiant", PSS}, 2488 {1, "ORACLE", "X4-2 ", PPC}, 2489 {1, "ORACLE", "X4-2L ", PPC}, 2490 {1, "ORACLE", "X4-2B ", PPC}, 2491 {1, "ORACLE", "X3-2 ", PPC}, 2492 {1, "ORACLE", "X3-2L ", PPC}, 2493 {1, "ORACLE", "X3-2B ", PPC}, 2494 {1, "ORACLE", "X4470M2 ", PPC}, 2495 {1, "ORACLE", "X4270M3 ", PPC}, 2496 {1, "ORACLE", "X4270M2 ", PPC}, 2497 {1, "ORACLE", "X4170M2 ", PPC}, 2498 {1, "ORACLE", "X4170 M3", PPC}, 2499 {1, "ORACLE", "X4275 M3", PPC}, 2500 {1, "ORACLE", "X6-2 ", PPC}, 2501 {1, "ORACLE", "Sudbury ", PPC}, 2502 {0, "", ""}, 2503 }; 2504 2505 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 2506 { 2507 struct acpi_table_header hdr; 2508 struct hw_vendor_info *v_info; 2509 const struct x86_cpu_id *id; 2510 u64 misc_pwr; 2511 2512 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 2513 if (id) { 2514 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 2515 if ( misc_pwr & (1 << 8)) 2516 return true; 2517 } 2518 2519 if (acpi_disabled || 2520 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 2521 return false; 2522 2523 for (v_info = vendor_info; v_info->valid; v_info++) { 2524 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 2525 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 2526 ACPI_OEM_TABLE_ID_SIZE)) 2527 switch (v_info->oem_pwr_table) { 2528 case PSS: 2529 return intel_pstate_no_acpi_pss(); 2530 case PPC: 2531 return intel_pstate_has_acpi_ppc() && 2532 (!force_load); 2533 } 2534 } 2535 2536 return false; 2537 } 2538 2539 static void intel_pstate_request_control_from_smm(void) 2540 { 2541 /* 2542 * It may be unsafe to request P-states control from SMM if _PPC support 2543 * has not been enabled. 2544 */ 2545 if (acpi_ppc) 2546 acpi_processor_pstate_control(); 2547 } 2548 #else /* CONFIG_ACPI not enabled */ 2549 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 2550 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 2551 static inline void intel_pstate_request_control_from_smm(void) {} 2552 #endif /* CONFIG_ACPI */ 2553 2554 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 2555 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 2556 {} 2557 }; 2558 2559 static int __init intel_pstate_init(void) 2560 { 2561 int rc; 2562 2563 if (no_load) 2564 return -ENODEV; 2565 2566 if (x86_match_cpu(hwp_support_ids)) { 2567 copy_cpu_funcs(&core_funcs); 2568 if (no_hwp) { 2569 pstate_funcs.update_util = intel_pstate_update_util; 2570 } else { 2571 hwp_active++; 2572 intel_pstate.attr = hwp_cpufreq_attrs; 2573 pstate_funcs.update_util = intel_pstate_update_util_hwp; 2574 goto hwp_cpu_matched; 2575 } 2576 } else { 2577 const struct x86_cpu_id *id; 2578 2579 id = x86_match_cpu(intel_pstate_cpu_ids); 2580 if (!id) 2581 return -ENODEV; 2582 2583 copy_cpu_funcs((struct pstate_funcs *)id->driver_data); 2584 } 2585 2586 if (intel_pstate_msrs_not_valid()) 2587 return -ENODEV; 2588 2589 hwp_cpu_matched: 2590 /* 2591 * The Intel pstate driver will be ignored if the platform 2592 * firmware has its own power management modes. 2593 */ 2594 if (intel_pstate_platform_pwr_mgmt_exists()) 2595 return -ENODEV; 2596 2597 if (!hwp_active && hwp_only) 2598 return -ENOTSUPP; 2599 2600 pr_info("Intel P-state driver initializing\n"); 2601 2602 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 2603 if (!all_cpu_data) 2604 return -ENOMEM; 2605 2606 intel_pstate_request_control_from_smm(); 2607 2608 intel_pstate_sysfs_expose_params(); 2609 2610 mutex_lock(&intel_pstate_driver_lock); 2611 rc = intel_pstate_register_driver(default_driver); 2612 mutex_unlock(&intel_pstate_driver_lock); 2613 if (rc) 2614 return rc; 2615 2616 if (hwp_active) 2617 pr_info("HWP enabled\n"); 2618 2619 return 0; 2620 } 2621 device_initcall(intel_pstate_init); 2622 2623 static int __init intel_pstate_setup(char *str) 2624 { 2625 if (!str) 2626 return -EINVAL; 2627 2628 if (!strcmp(str, "disable")) { 2629 no_load = 1; 2630 } else if (!strcmp(str, "passive")) { 2631 pr_info("Passive mode enabled\n"); 2632 default_driver = &intel_cpufreq; 2633 no_hwp = 1; 2634 } 2635 if (!strcmp(str, "no_hwp")) { 2636 pr_info("HWP disabled\n"); 2637 no_hwp = 1; 2638 } 2639 if (!strcmp(str, "force")) 2640 force_load = 1; 2641 if (!strcmp(str, "hwp_only")) 2642 hwp_only = 1; 2643 if (!strcmp(str, "per_cpu_perf_limits")) 2644 per_cpu_limits = true; 2645 2646 #ifdef CONFIG_ACPI 2647 if (!strcmp(str, "support_acpi_ppc")) 2648 acpi_ppc = true; 2649 #endif 2650 2651 return 0; 2652 } 2653 early_param("intel_pstate", intel_pstate_setup); 2654 2655 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 2656 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 2657 MODULE_LICENSE("GPL"); 2658