1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched/cpufreq.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 #include <asm/intel-family.h> 39 40 #define INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) 41 #define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC) 42 43 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 44 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 45 46 #ifdef CONFIG_ACPI 47 #include <acpi/processor.h> 48 #include <acpi/cppc_acpi.h> 49 #endif 50 51 #define FRAC_BITS 8 52 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 53 #define fp_toint(X) ((X) >> FRAC_BITS) 54 55 #define EXT_BITS 6 56 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 57 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 58 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 59 60 static inline int32_t mul_fp(int32_t x, int32_t y) 61 { 62 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 63 } 64 65 static inline int32_t div_fp(s64 x, s64 y) 66 { 67 return div64_s64((int64_t)x << FRAC_BITS, y); 68 } 69 70 static inline int ceiling_fp(int32_t x) 71 { 72 int mask, ret; 73 74 ret = fp_toint(x); 75 mask = (1 << FRAC_BITS) - 1; 76 if (x & mask) 77 ret += 1; 78 return ret; 79 } 80 81 static inline int32_t percent_fp(int percent) 82 { 83 return div_fp(percent, 100); 84 } 85 86 static inline u64 mul_ext_fp(u64 x, u64 y) 87 { 88 return (x * y) >> EXT_FRAC_BITS; 89 } 90 91 static inline u64 div_ext_fp(u64 x, u64 y) 92 { 93 return div64_u64(x << EXT_FRAC_BITS, y); 94 } 95 96 static inline int32_t percent_ext_fp(int percent) 97 { 98 return div_ext_fp(percent, 100); 99 } 100 101 /** 102 * struct sample - Store performance sample 103 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 104 * performance during last sample period 105 * @busy_scaled: Scaled busy value which is used to calculate next 106 * P state. This can be different than core_avg_perf 107 * to account for cpu idle period 108 * @aperf: Difference of actual performance frequency clock count 109 * read from APERF MSR between last and current sample 110 * @mperf: Difference of maximum performance frequency clock count 111 * read from MPERF MSR between last and current sample 112 * @tsc: Difference of time stamp counter between last and 113 * current sample 114 * @time: Current time from scheduler 115 * 116 * This structure is used in the cpudata structure to store performance sample 117 * data for choosing next P State. 118 */ 119 struct sample { 120 int32_t core_avg_perf; 121 int32_t busy_scaled; 122 u64 aperf; 123 u64 mperf; 124 u64 tsc; 125 u64 time; 126 }; 127 128 /** 129 * struct pstate_data - Store P state data 130 * @current_pstate: Current requested P state 131 * @min_pstate: Min P state possible for this platform 132 * @max_pstate: Max P state possible for this platform 133 * @max_pstate_physical:This is physical Max P state for a processor 134 * This can be higher than the max_pstate which can 135 * be limited by platform thermal design power limits 136 * @scaling: Scaling factor to convert frequency to cpufreq 137 * frequency units 138 * @turbo_pstate: Max Turbo P state possible for this platform 139 * @max_freq: @max_pstate frequency in cpufreq units 140 * @turbo_freq: @turbo_pstate frequency in cpufreq units 141 * 142 * Stores the per cpu model P state limits and current P state. 143 */ 144 struct pstate_data { 145 int current_pstate; 146 int min_pstate; 147 int max_pstate; 148 int max_pstate_physical; 149 int scaling; 150 int turbo_pstate; 151 unsigned int max_freq; 152 unsigned int turbo_freq; 153 }; 154 155 /** 156 * struct vid_data - Stores voltage information data 157 * @min: VID data for this platform corresponding to 158 * the lowest P state 159 * @max: VID data corresponding to the highest P State. 160 * @turbo: VID data for turbo P state 161 * @ratio: Ratio of (vid max - vid min) / 162 * (max P state - Min P State) 163 * 164 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 165 * This data is used in Atom platforms, where in addition to target P state, 166 * the voltage data needs to be specified to select next P State. 167 */ 168 struct vid_data { 169 int min; 170 int max; 171 int turbo; 172 int32_t ratio; 173 }; 174 175 /** 176 * struct _pid - Stores PID data 177 * @setpoint: Target set point for busyness or performance 178 * @integral: Storage for accumulated error values 179 * @p_gain: PID proportional gain 180 * @i_gain: PID integral gain 181 * @d_gain: PID derivative gain 182 * @deadband: PID deadband 183 * @last_err: Last error storage for integral part of PID calculation 184 * 185 * Stores PID coefficients and last error for PID controller. 186 */ 187 struct _pid { 188 int setpoint; 189 int32_t integral; 190 int32_t p_gain; 191 int32_t i_gain; 192 int32_t d_gain; 193 int deadband; 194 int32_t last_err; 195 }; 196 197 /** 198 * struct global_params - Global parameters, mostly tunable via sysfs. 199 * @no_turbo: Whether or not to use turbo P-states. 200 * @turbo_disabled: Whethet or not turbo P-states are available at all, 201 * based on the MSR_IA32_MISC_ENABLE value and whether or 202 * not the maximum reported turbo P-state is different from 203 * the maximum reported non-turbo one. 204 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo 205 * P-state capacity. 206 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo 207 * P-state capacity. 208 */ 209 struct global_params { 210 bool no_turbo; 211 bool turbo_disabled; 212 int max_perf_pct; 213 int min_perf_pct; 214 }; 215 216 /** 217 * struct cpudata - Per CPU instance data storage 218 * @cpu: CPU number for this instance data 219 * @policy: CPUFreq policy value 220 * @update_util: CPUFreq utility callback information 221 * @update_util_set: CPUFreq utility callback is set 222 * @iowait_boost: iowait-related boost fraction 223 * @last_update: Time of the last update. 224 * @pstate: Stores P state limits for this CPU 225 * @vid: Stores VID limits for this CPU 226 * @pid: Stores PID parameters for this CPU 227 * @last_sample_time: Last Sample time 228 * @prev_aperf: Last APERF value read from APERF MSR 229 * @prev_mperf: Last MPERF value read from MPERF MSR 230 * @prev_tsc: Last timestamp counter (TSC) value 231 * @prev_cummulative_iowait: IO Wait time difference from last and 232 * current sample 233 * @sample: Storage for storing last Sample data 234 * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios 235 * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios 236 * @acpi_perf_data: Stores ACPI perf information read from _PSS 237 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 238 * @epp_powersave: Last saved HWP energy performance preference 239 * (EPP) or energy performance bias (EPB), 240 * when policy switched to performance 241 * @epp_policy: Last saved policy used to set EPP/EPB 242 * @epp_default: Power on default HWP energy performance 243 * preference/bias 244 * @epp_saved: Saved EPP/EPB during system suspend or CPU offline 245 * operation 246 * 247 * This structure stores per CPU instance data for all CPUs. 248 */ 249 struct cpudata { 250 int cpu; 251 252 unsigned int policy; 253 struct update_util_data update_util; 254 bool update_util_set; 255 256 struct pstate_data pstate; 257 struct vid_data vid; 258 struct _pid pid; 259 260 u64 last_update; 261 u64 last_sample_time; 262 u64 prev_aperf; 263 u64 prev_mperf; 264 u64 prev_tsc; 265 u64 prev_cummulative_iowait; 266 struct sample sample; 267 int32_t min_perf_ratio; 268 int32_t max_perf_ratio; 269 #ifdef CONFIG_ACPI 270 struct acpi_processor_performance acpi_perf_data; 271 bool valid_pss_table; 272 #endif 273 unsigned int iowait_boost; 274 s16 epp_powersave; 275 s16 epp_policy; 276 s16 epp_default; 277 s16 epp_saved; 278 }; 279 280 static struct cpudata **all_cpu_data; 281 282 /** 283 * struct pstate_adjust_policy - Stores static PID configuration data 284 * @sample_rate_ms: PID calculation sample rate in ms 285 * @sample_rate_ns: Sample rate calculation in ns 286 * @deadband: PID deadband 287 * @setpoint: PID Setpoint 288 * @p_gain_pct: PID proportional gain 289 * @i_gain_pct: PID integral gain 290 * @d_gain_pct: PID derivative gain 291 * 292 * Stores per CPU model static PID configuration data. 293 */ 294 struct pstate_adjust_policy { 295 int sample_rate_ms; 296 s64 sample_rate_ns; 297 int deadband; 298 int setpoint; 299 int p_gain_pct; 300 int d_gain_pct; 301 int i_gain_pct; 302 }; 303 304 /** 305 * struct pstate_funcs - Per CPU model specific callbacks 306 * @get_max: Callback to get maximum non turbo effective P state 307 * @get_max_physical: Callback to get maximum non turbo physical P state 308 * @get_min: Callback to get minimum P state 309 * @get_turbo: Callback to get turbo P state 310 * @get_scaling: Callback to get frequency scaling factor 311 * @get_val: Callback to convert P state to actual MSR write value 312 * @get_vid: Callback to get VID data for Atom platforms 313 * @update_util: Active mode utilization update callback. 314 * 315 * Core and Atom CPU models have different way to get P State limits. This 316 * structure is used to store those callbacks. 317 */ 318 struct pstate_funcs { 319 int (*get_max)(void); 320 int (*get_max_physical)(void); 321 int (*get_min)(void); 322 int (*get_turbo)(void); 323 int (*get_scaling)(void); 324 u64 (*get_val)(struct cpudata*, int pstate); 325 void (*get_vid)(struct cpudata *); 326 void (*update_util)(struct update_util_data *data, u64 time, 327 unsigned int flags); 328 }; 329 330 static struct pstate_funcs pstate_funcs __read_mostly; 331 static struct pstate_adjust_policy pid_params __read_mostly = { 332 .sample_rate_ms = 10, 333 .sample_rate_ns = 10 * NSEC_PER_MSEC, 334 .deadband = 0, 335 .setpoint = 97, 336 .p_gain_pct = 20, 337 .d_gain_pct = 0, 338 .i_gain_pct = 0, 339 }; 340 341 static int hwp_active __read_mostly; 342 static bool per_cpu_limits __read_mostly; 343 344 static struct cpufreq_driver *intel_pstate_driver __read_mostly; 345 346 #ifdef CONFIG_ACPI 347 static bool acpi_ppc; 348 #endif 349 350 static struct global_params global; 351 352 static DEFINE_MUTEX(intel_pstate_driver_lock); 353 static DEFINE_MUTEX(intel_pstate_limits_lock); 354 355 #ifdef CONFIG_ACPI 356 357 static bool intel_pstate_get_ppc_enable_status(void) 358 { 359 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 360 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 361 return true; 362 363 return acpi_ppc; 364 } 365 366 #ifdef CONFIG_ACPI_CPPC_LIB 367 368 /* The work item is needed to avoid CPU hotplug locking issues */ 369 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) 370 { 371 sched_set_itmt_support(); 372 } 373 374 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); 375 376 static void intel_pstate_set_itmt_prio(int cpu) 377 { 378 struct cppc_perf_caps cppc_perf; 379 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; 380 int ret; 381 382 ret = cppc_get_perf_caps(cpu, &cppc_perf); 383 if (ret) 384 return; 385 386 /* 387 * The priorities can be set regardless of whether or not 388 * sched_set_itmt_support(true) has been called and it is valid to 389 * update them at any time after it has been called. 390 */ 391 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); 392 393 if (max_highest_perf <= min_highest_perf) { 394 if (cppc_perf.highest_perf > max_highest_perf) 395 max_highest_perf = cppc_perf.highest_perf; 396 397 if (cppc_perf.highest_perf < min_highest_perf) 398 min_highest_perf = cppc_perf.highest_perf; 399 400 if (max_highest_perf > min_highest_perf) { 401 /* 402 * This code can be run during CPU online under the 403 * CPU hotplug locks, so sched_set_itmt_support() 404 * cannot be called from here. Queue up a work item 405 * to invoke it. 406 */ 407 schedule_work(&sched_itmt_work); 408 } 409 } 410 } 411 #else 412 static void intel_pstate_set_itmt_prio(int cpu) 413 { 414 } 415 #endif 416 417 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 418 { 419 struct cpudata *cpu; 420 int ret; 421 int i; 422 423 if (hwp_active) { 424 intel_pstate_set_itmt_prio(policy->cpu); 425 return; 426 } 427 428 if (!intel_pstate_get_ppc_enable_status()) 429 return; 430 431 cpu = all_cpu_data[policy->cpu]; 432 433 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 434 policy->cpu); 435 if (ret) 436 return; 437 438 /* 439 * Check if the control value in _PSS is for PERF_CTL MSR, which should 440 * guarantee that the states returned by it map to the states in our 441 * list directly. 442 */ 443 if (cpu->acpi_perf_data.control_register.space_id != 444 ACPI_ADR_SPACE_FIXED_HARDWARE) 445 goto err; 446 447 /* 448 * If there is only one entry _PSS, simply ignore _PSS and continue as 449 * usual without taking _PSS into account 450 */ 451 if (cpu->acpi_perf_data.state_count < 2) 452 goto err; 453 454 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 455 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 456 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 457 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 458 (u32) cpu->acpi_perf_data.states[i].core_frequency, 459 (u32) cpu->acpi_perf_data.states[i].power, 460 (u32) cpu->acpi_perf_data.states[i].control); 461 } 462 463 /* 464 * The _PSS table doesn't contain whole turbo frequency range. 465 * This just contains +1 MHZ above the max non turbo frequency, 466 * with control value corresponding to max turbo ratio. But 467 * when cpufreq set policy is called, it will call with this 468 * max frequency, which will cause a reduced performance as 469 * this driver uses real max turbo frequency as the max 470 * frequency. So correct this frequency in _PSS table to 471 * correct max turbo frequency based on the turbo state. 472 * Also need to convert to MHz as _PSS freq is in MHz. 473 */ 474 if (!global.turbo_disabled) 475 cpu->acpi_perf_data.states[0].core_frequency = 476 policy->cpuinfo.max_freq / 1000; 477 cpu->valid_pss_table = true; 478 pr_debug("_PPC limits will be enforced\n"); 479 480 return; 481 482 err: 483 cpu->valid_pss_table = false; 484 acpi_processor_unregister_performance(policy->cpu); 485 } 486 487 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 488 { 489 struct cpudata *cpu; 490 491 cpu = all_cpu_data[policy->cpu]; 492 if (!cpu->valid_pss_table) 493 return; 494 495 acpi_processor_unregister_performance(policy->cpu); 496 } 497 #else 498 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 499 { 500 } 501 502 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 503 { 504 } 505 #endif 506 507 static signed int pid_calc(struct _pid *pid, int32_t busy) 508 { 509 signed int result; 510 int32_t pterm, dterm, fp_error; 511 int32_t integral_limit; 512 513 fp_error = pid->setpoint - busy; 514 515 if (abs(fp_error) <= pid->deadband) 516 return 0; 517 518 pterm = mul_fp(pid->p_gain, fp_error); 519 520 pid->integral += fp_error; 521 522 /* 523 * We limit the integral here so that it will never 524 * get higher than 30. This prevents it from becoming 525 * too large an input over long periods of time and allows 526 * it to get factored out sooner. 527 * 528 * The value of 30 was chosen through experimentation. 529 */ 530 integral_limit = int_tofp(30); 531 if (pid->integral > integral_limit) 532 pid->integral = integral_limit; 533 if (pid->integral < -integral_limit) 534 pid->integral = -integral_limit; 535 536 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 537 pid->last_err = fp_error; 538 539 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 540 result = result + (1 << (FRAC_BITS-1)); 541 return (signed int)fp_toint(result); 542 } 543 544 static inline void intel_pstate_pid_reset(struct cpudata *cpu) 545 { 546 struct _pid *pid = &cpu->pid; 547 548 pid->p_gain = percent_fp(pid_params.p_gain_pct); 549 pid->d_gain = percent_fp(pid_params.d_gain_pct); 550 pid->i_gain = percent_fp(pid_params.i_gain_pct); 551 pid->setpoint = int_tofp(pid_params.setpoint); 552 pid->last_err = pid->setpoint - int_tofp(100); 553 pid->deadband = int_tofp(pid_params.deadband); 554 pid->integral = 0; 555 } 556 557 static inline void update_turbo_state(void) 558 { 559 u64 misc_en; 560 struct cpudata *cpu; 561 562 cpu = all_cpu_data[0]; 563 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 564 global.turbo_disabled = 565 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 566 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 567 } 568 569 static int min_perf_pct_min(void) 570 { 571 struct cpudata *cpu = all_cpu_data[0]; 572 int turbo_pstate = cpu->pstate.turbo_pstate; 573 574 return turbo_pstate ? 575 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; 576 } 577 578 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 579 { 580 u64 epb; 581 int ret; 582 583 if (!static_cpu_has(X86_FEATURE_EPB)) 584 return -ENXIO; 585 586 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 587 if (ret) 588 return (s16)ret; 589 590 return (s16)(epb & 0x0f); 591 } 592 593 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 594 { 595 s16 epp; 596 597 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 598 /* 599 * When hwp_req_data is 0, means that caller didn't read 600 * MSR_HWP_REQUEST, so need to read and get EPP. 601 */ 602 if (!hwp_req_data) { 603 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, 604 &hwp_req_data); 605 if (epp) 606 return epp; 607 } 608 epp = (hwp_req_data >> 24) & 0xff; 609 } else { 610 /* When there is no EPP present, HWP uses EPB settings */ 611 epp = intel_pstate_get_epb(cpu_data); 612 } 613 614 return epp; 615 } 616 617 static int intel_pstate_set_epb(int cpu, s16 pref) 618 { 619 u64 epb; 620 int ret; 621 622 if (!static_cpu_has(X86_FEATURE_EPB)) 623 return -ENXIO; 624 625 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 626 if (ret) 627 return ret; 628 629 epb = (epb & ~0x0f) | pref; 630 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 631 632 return 0; 633 } 634 635 /* 636 * EPP/EPB display strings corresponding to EPP index in the 637 * energy_perf_strings[] 638 * index String 639 *------------------------------------- 640 * 0 default 641 * 1 performance 642 * 2 balance_performance 643 * 3 balance_power 644 * 4 power 645 */ 646 static const char * const energy_perf_strings[] = { 647 "default", 648 "performance", 649 "balance_performance", 650 "balance_power", 651 "power", 652 NULL 653 }; 654 655 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) 656 { 657 s16 epp; 658 int index = -EINVAL; 659 660 epp = intel_pstate_get_epp(cpu_data, 0); 661 if (epp < 0) 662 return epp; 663 664 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 665 /* 666 * Range: 667 * 0x00-0x3F : Performance 668 * 0x40-0x7F : Balance performance 669 * 0x80-0xBF : Balance power 670 * 0xC0-0xFF : Power 671 * The EPP is a 8 bit value, but our ranges restrict the 672 * value which can be set. Here only using top two bits 673 * effectively. 674 */ 675 index = (epp >> 6) + 1; 676 } else if (static_cpu_has(X86_FEATURE_EPB)) { 677 /* 678 * Range: 679 * 0x00-0x03 : Performance 680 * 0x04-0x07 : Balance performance 681 * 0x08-0x0B : Balance power 682 * 0x0C-0x0F : Power 683 * The EPB is a 4 bit value, but our ranges restrict the 684 * value which can be set. Here only using top two bits 685 * effectively. 686 */ 687 index = (epp >> 2) + 1; 688 } 689 690 return index; 691 } 692 693 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, 694 int pref_index) 695 { 696 int epp = -EINVAL; 697 int ret; 698 699 if (!pref_index) 700 epp = cpu_data->epp_default; 701 702 mutex_lock(&intel_pstate_limits_lock); 703 704 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 705 u64 value; 706 707 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); 708 if (ret) 709 goto return_pref; 710 711 value &= ~GENMASK_ULL(31, 24); 712 713 /* 714 * If epp is not default, convert from index into 715 * energy_perf_strings to epp value, by shifting 6 716 * bits left to use only top two bits in epp. 717 * The resultant epp need to shifted by 24 bits to 718 * epp position in MSR_HWP_REQUEST. 719 */ 720 if (epp == -EINVAL) 721 epp = (pref_index - 1) << 6; 722 723 value |= (u64)epp << 24; 724 ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); 725 } else { 726 if (epp == -EINVAL) 727 epp = (pref_index - 1) << 2; 728 ret = intel_pstate_set_epb(cpu_data->cpu, epp); 729 } 730 return_pref: 731 mutex_unlock(&intel_pstate_limits_lock); 732 733 return ret; 734 } 735 736 static ssize_t show_energy_performance_available_preferences( 737 struct cpufreq_policy *policy, char *buf) 738 { 739 int i = 0; 740 int ret = 0; 741 742 while (energy_perf_strings[i] != NULL) 743 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); 744 745 ret += sprintf(&buf[ret], "\n"); 746 747 return ret; 748 } 749 750 cpufreq_freq_attr_ro(energy_performance_available_preferences); 751 752 static ssize_t store_energy_performance_preference( 753 struct cpufreq_policy *policy, const char *buf, size_t count) 754 { 755 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 756 char str_preference[21]; 757 int ret, i = 0; 758 759 ret = sscanf(buf, "%20s", str_preference); 760 if (ret != 1) 761 return -EINVAL; 762 763 while (energy_perf_strings[i] != NULL) { 764 if (!strcmp(str_preference, energy_perf_strings[i])) { 765 intel_pstate_set_energy_pref_index(cpu_data, i); 766 return count; 767 } 768 ++i; 769 } 770 771 return -EINVAL; 772 } 773 774 static ssize_t show_energy_performance_preference( 775 struct cpufreq_policy *policy, char *buf) 776 { 777 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 778 int preference; 779 780 preference = intel_pstate_get_energy_pref_index(cpu_data); 781 if (preference < 0) 782 return preference; 783 784 return sprintf(buf, "%s\n", energy_perf_strings[preference]); 785 } 786 787 cpufreq_freq_attr_rw(energy_performance_preference); 788 789 static struct freq_attr *hwp_cpufreq_attrs[] = { 790 &energy_performance_preference, 791 &energy_performance_available_preferences, 792 NULL, 793 }; 794 795 static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, 796 int *current_max) 797 { 798 u64 cap; 799 800 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 801 if (global.no_turbo) 802 *current_max = HWP_GUARANTEED_PERF(cap); 803 else 804 *current_max = HWP_HIGHEST_PERF(cap); 805 806 *phy_max = HWP_HIGHEST_PERF(cap); 807 } 808 809 static void intel_pstate_hwp_set(unsigned int cpu) 810 { 811 struct cpudata *cpu_data = all_cpu_data[cpu]; 812 int max, min; 813 u64 value; 814 s16 epp; 815 816 max = cpu_data->max_perf_ratio; 817 min = cpu_data->min_perf_ratio; 818 819 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 820 min = max; 821 822 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 823 824 value &= ~HWP_MIN_PERF(~0L); 825 value |= HWP_MIN_PERF(min); 826 827 value &= ~HWP_MAX_PERF(~0L); 828 value |= HWP_MAX_PERF(max); 829 830 if (cpu_data->epp_policy == cpu_data->policy) 831 goto skip_epp; 832 833 cpu_data->epp_policy = cpu_data->policy; 834 835 if (cpu_data->epp_saved >= 0) { 836 epp = cpu_data->epp_saved; 837 cpu_data->epp_saved = -EINVAL; 838 goto update_epp; 839 } 840 841 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 842 epp = intel_pstate_get_epp(cpu_data, value); 843 cpu_data->epp_powersave = epp; 844 /* If EPP read was failed, then don't try to write */ 845 if (epp < 0) 846 goto skip_epp; 847 848 epp = 0; 849 } else { 850 /* skip setting EPP, when saved value is invalid */ 851 if (cpu_data->epp_powersave < 0) 852 goto skip_epp; 853 854 /* 855 * No need to restore EPP when it is not zero. This 856 * means: 857 * - Policy is not changed 858 * - user has manually changed 859 * - Error reading EPB 860 */ 861 epp = intel_pstate_get_epp(cpu_data, value); 862 if (epp) 863 goto skip_epp; 864 865 epp = cpu_data->epp_powersave; 866 } 867 update_epp: 868 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 869 value &= ~GENMASK_ULL(31, 24); 870 value |= (u64)epp << 24; 871 } else { 872 intel_pstate_set_epb(cpu, epp); 873 } 874 skip_epp: 875 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 876 } 877 878 static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) 879 { 880 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 881 882 if (!hwp_active) 883 return 0; 884 885 cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0); 886 887 return 0; 888 } 889 890 static int intel_pstate_resume(struct cpufreq_policy *policy) 891 { 892 if (!hwp_active) 893 return 0; 894 895 mutex_lock(&intel_pstate_limits_lock); 896 897 all_cpu_data[policy->cpu]->epp_policy = 0; 898 intel_pstate_hwp_set(policy->cpu); 899 900 mutex_unlock(&intel_pstate_limits_lock); 901 902 return 0; 903 } 904 905 static void intel_pstate_update_policies(void) 906 { 907 int cpu; 908 909 for_each_possible_cpu(cpu) 910 cpufreq_update_policy(cpu); 911 } 912 913 /************************** debugfs begin ************************/ 914 static int pid_param_set(void *data, u64 val) 915 { 916 unsigned int cpu; 917 918 *(u32 *)data = val; 919 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 920 for_each_possible_cpu(cpu) 921 if (all_cpu_data[cpu]) 922 intel_pstate_pid_reset(all_cpu_data[cpu]); 923 924 return 0; 925 } 926 927 static int pid_param_get(void *data, u64 *val) 928 { 929 *val = *(u32 *)data; 930 return 0; 931 } 932 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 933 934 static struct dentry *debugfs_parent; 935 936 struct pid_param { 937 char *name; 938 void *value; 939 struct dentry *dentry; 940 }; 941 942 static struct pid_param pid_files[] = { 943 {"sample_rate_ms", &pid_params.sample_rate_ms, }, 944 {"d_gain_pct", &pid_params.d_gain_pct, }, 945 {"i_gain_pct", &pid_params.i_gain_pct, }, 946 {"deadband", &pid_params.deadband, }, 947 {"setpoint", &pid_params.setpoint, }, 948 {"p_gain_pct", &pid_params.p_gain_pct, }, 949 {NULL, NULL, } 950 }; 951 952 static void intel_pstate_debug_expose_params(void) 953 { 954 int i; 955 956 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 957 if (IS_ERR_OR_NULL(debugfs_parent)) 958 return; 959 960 for (i = 0; pid_files[i].name; i++) { 961 struct dentry *dentry; 962 963 dentry = debugfs_create_file(pid_files[i].name, 0660, 964 debugfs_parent, pid_files[i].value, 965 &fops_pid_param); 966 if (!IS_ERR(dentry)) 967 pid_files[i].dentry = dentry; 968 } 969 } 970 971 static void intel_pstate_debug_hide_params(void) 972 { 973 int i; 974 975 if (IS_ERR_OR_NULL(debugfs_parent)) 976 return; 977 978 for (i = 0; pid_files[i].name; i++) { 979 debugfs_remove(pid_files[i].dentry); 980 pid_files[i].dentry = NULL; 981 } 982 983 debugfs_remove(debugfs_parent); 984 debugfs_parent = NULL; 985 } 986 987 /************************** debugfs end ************************/ 988 989 /************************** sysfs begin ************************/ 990 #define show_one(file_name, object) \ 991 static ssize_t show_##file_name \ 992 (struct kobject *kobj, struct attribute *attr, char *buf) \ 993 { \ 994 return sprintf(buf, "%u\n", global.object); \ 995 } 996 997 static ssize_t intel_pstate_show_status(char *buf); 998 static int intel_pstate_update_status(const char *buf, size_t size); 999 1000 static ssize_t show_status(struct kobject *kobj, 1001 struct attribute *attr, char *buf) 1002 { 1003 ssize_t ret; 1004 1005 mutex_lock(&intel_pstate_driver_lock); 1006 ret = intel_pstate_show_status(buf); 1007 mutex_unlock(&intel_pstate_driver_lock); 1008 1009 return ret; 1010 } 1011 1012 static ssize_t store_status(struct kobject *a, struct attribute *b, 1013 const char *buf, size_t count) 1014 { 1015 char *p = memchr(buf, '\n', count); 1016 int ret; 1017 1018 mutex_lock(&intel_pstate_driver_lock); 1019 ret = intel_pstate_update_status(buf, p ? p - buf : count); 1020 mutex_unlock(&intel_pstate_driver_lock); 1021 1022 return ret < 0 ? ret : count; 1023 } 1024 1025 static ssize_t show_turbo_pct(struct kobject *kobj, 1026 struct attribute *attr, char *buf) 1027 { 1028 struct cpudata *cpu; 1029 int total, no_turbo, turbo_pct; 1030 uint32_t turbo_fp; 1031 1032 mutex_lock(&intel_pstate_driver_lock); 1033 1034 if (!intel_pstate_driver) { 1035 mutex_unlock(&intel_pstate_driver_lock); 1036 return -EAGAIN; 1037 } 1038 1039 cpu = all_cpu_data[0]; 1040 1041 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1042 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 1043 turbo_fp = div_fp(no_turbo, total); 1044 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 1045 1046 mutex_unlock(&intel_pstate_driver_lock); 1047 1048 return sprintf(buf, "%u\n", turbo_pct); 1049 } 1050 1051 static ssize_t show_num_pstates(struct kobject *kobj, 1052 struct attribute *attr, char *buf) 1053 { 1054 struct cpudata *cpu; 1055 int total; 1056 1057 mutex_lock(&intel_pstate_driver_lock); 1058 1059 if (!intel_pstate_driver) { 1060 mutex_unlock(&intel_pstate_driver_lock); 1061 return -EAGAIN; 1062 } 1063 1064 cpu = all_cpu_data[0]; 1065 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1066 1067 mutex_unlock(&intel_pstate_driver_lock); 1068 1069 return sprintf(buf, "%u\n", total); 1070 } 1071 1072 static ssize_t show_no_turbo(struct kobject *kobj, 1073 struct attribute *attr, char *buf) 1074 { 1075 ssize_t ret; 1076 1077 mutex_lock(&intel_pstate_driver_lock); 1078 1079 if (!intel_pstate_driver) { 1080 mutex_unlock(&intel_pstate_driver_lock); 1081 return -EAGAIN; 1082 } 1083 1084 update_turbo_state(); 1085 if (global.turbo_disabled) 1086 ret = sprintf(buf, "%u\n", global.turbo_disabled); 1087 else 1088 ret = sprintf(buf, "%u\n", global.no_turbo); 1089 1090 mutex_unlock(&intel_pstate_driver_lock); 1091 1092 return ret; 1093 } 1094 1095 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 1096 const char *buf, size_t count) 1097 { 1098 unsigned int input; 1099 int ret; 1100 1101 ret = sscanf(buf, "%u", &input); 1102 if (ret != 1) 1103 return -EINVAL; 1104 1105 mutex_lock(&intel_pstate_driver_lock); 1106 1107 if (!intel_pstate_driver) { 1108 mutex_unlock(&intel_pstate_driver_lock); 1109 return -EAGAIN; 1110 } 1111 1112 mutex_lock(&intel_pstate_limits_lock); 1113 1114 update_turbo_state(); 1115 if (global.turbo_disabled) { 1116 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 1117 mutex_unlock(&intel_pstate_limits_lock); 1118 mutex_unlock(&intel_pstate_driver_lock); 1119 return -EPERM; 1120 } 1121 1122 global.no_turbo = clamp_t(int, input, 0, 1); 1123 1124 if (global.no_turbo) { 1125 struct cpudata *cpu = all_cpu_data[0]; 1126 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; 1127 1128 /* Squash the global minimum into the permitted range. */ 1129 if (global.min_perf_pct > pct) 1130 global.min_perf_pct = pct; 1131 } 1132 1133 mutex_unlock(&intel_pstate_limits_lock); 1134 1135 intel_pstate_update_policies(); 1136 1137 mutex_unlock(&intel_pstate_driver_lock); 1138 1139 return count; 1140 } 1141 1142 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 1143 const char *buf, size_t count) 1144 { 1145 unsigned int input; 1146 int ret; 1147 1148 ret = sscanf(buf, "%u", &input); 1149 if (ret != 1) 1150 return -EINVAL; 1151 1152 mutex_lock(&intel_pstate_driver_lock); 1153 1154 if (!intel_pstate_driver) { 1155 mutex_unlock(&intel_pstate_driver_lock); 1156 return -EAGAIN; 1157 } 1158 1159 mutex_lock(&intel_pstate_limits_lock); 1160 1161 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); 1162 1163 mutex_unlock(&intel_pstate_limits_lock); 1164 1165 intel_pstate_update_policies(); 1166 1167 mutex_unlock(&intel_pstate_driver_lock); 1168 1169 return count; 1170 } 1171 1172 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 1173 const char *buf, size_t count) 1174 { 1175 unsigned int input; 1176 int ret; 1177 1178 ret = sscanf(buf, "%u", &input); 1179 if (ret != 1) 1180 return -EINVAL; 1181 1182 mutex_lock(&intel_pstate_driver_lock); 1183 1184 if (!intel_pstate_driver) { 1185 mutex_unlock(&intel_pstate_driver_lock); 1186 return -EAGAIN; 1187 } 1188 1189 mutex_lock(&intel_pstate_limits_lock); 1190 1191 global.min_perf_pct = clamp_t(int, input, 1192 min_perf_pct_min(), global.max_perf_pct); 1193 1194 mutex_unlock(&intel_pstate_limits_lock); 1195 1196 intel_pstate_update_policies(); 1197 1198 mutex_unlock(&intel_pstate_driver_lock); 1199 1200 return count; 1201 } 1202 1203 show_one(max_perf_pct, max_perf_pct); 1204 show_one(min_perf_pct, min_perf_pct); 1205 1206 define_one_global_rw(status); 1207 define_one_global_rw(no_turbo); 1208 define_one_global_rw(max_perf_pct); 1209 define_one_global_rw(min_perf_pct); 1210 define_one_global_ro(turbo_pct); 1211 define_one_global_ro(num_pstates); 1212 1213 static struct attribute *intel_pstate_attributes[] = { 1214 &status.attr, 1215 &no_turbo.attr, 1216 &turbo_pct.attr, 1217 &num_pstates.attr, 1218 NULL 1219 }; 1220 1221 static struct attribute_group intel_pstate_attr_group = { 1222 .attrs = intel_pstate_attributes, 1223 }; 1224 1225 static void __init intel_pstate_sysfs_expose_params(void) 1226 { 1227 struct kobject *intel_pstate_kobject; 1228 int rc; 1229 1230 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 1231 &cpu_subsys.dev_root->kobj); 1232 if (WARN_ON(!intel_pstate_kobject)) 1233 return; 1234 1235 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 1236 if (WARN_ON(rc)) 1237 return; 1238 1239 /* 1240 * If per cpu limits are enforced there are no global limits, so 1241 * return without creating max/min_perf_pct attributes 1242 */ 1243 if (per_cpu_limits) 1244 return; 1245 1246 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 1247 WARN_ON(rc); 1248 1249 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1250 WARN_ON(rc); 1251 1252 } 1253 /************************** sysfs end ************************/ 1254 1255 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1256 { 1257 /* First disable HWP notification interrupt as we don't process them */ 1258 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1259 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1260 1261 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1262 cpudata->epp_policy = 0; 1263 if (cpudata->epp_default == -EINVAL) 1264 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1265 } 1266 1267 #define MSR_IA32_POWER_CTL_BIT_EE 19 1268 1269 /* Disable energy efficiency optimization */ 1270 static void intel_pstate_disable_ee(int cpu) 1271 { 1272 u64 power_ctl; 1273 int ret; 1274 1275 ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl); 1276 if (ret) 1277 return; 1278 1279 if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) { 1280 pr_info("Disabling energy efficiency optimization\n"); 1281 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); 1282 wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl); 1283 } 1284 } 1285 1286 static int atom_get_min_pstate(void) 1287 { 1288 u64 value; 1289 1290 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1291 return (value >> 8) & 0x7F; 1292 } 1293 1294 static int atom_get_max_pstate(void) 1295 { 1296 u64 value; 1297 1298 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1299 return (value >> 16) & 0x7F; 1300 } 1301 1302 static int atom_get_turbo_pstate(void) 1303 { 1304 u64 value; 1305 1306 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); 1307 return value & 0x7F; 1308 } 1309 1310 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1311 { 1312 u64 val; 1313 int32_t vid_fp; 1314 u32 vid; 1315 1316 val = (u64)pstate << 8; 1317 if (global.no_turbo && !global.turbo_disabled) 1318 val |= (u64)1 << 32; 1319 1320 vid_fp = cpudata->vid.min + mul_fp( 1321 int_tofp(pstate - cpudata->pstate.min_pstate), 1322 cpudata->vid.ratio); 1323 1324 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1325 vid = ceiling_fp(vid_fp); 1326 1327 if (pstate > cpudata->pstate.max_pstate) 1328 vid = cpudata->vid.turbo; 1329 1330 return val | vid; 1331 } 1332 1333 static int silvermont_get_scaling(void) 1334 { 1335 u64 value; 1336 int i; 1337 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1338 static int silvermont_freq_table[] = { 1339 83300, 100000, 133300, 116700, 80000}; 1340 1341 rdmsrl(MSR_FSB_FREQ, value); 1342 i = value & 0x7; 1343 WARN_ON(i > 4); 1344 1345 return silvermont_freq_table[i]; 1346 } 1347 1348 static int airmont_get_scaling(void) 1349 { 1350 u64 value; 1351 int i; 1352 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1353 static int airmont_freq_table[] = { 1354 83300, 100000, 133300, 116700, 80000, 1355 93300, 90000, 88900, 87500}; 1356 1357 rdmsrl(MSR_FSB_FREQ, value); 1358 i = value & 0xF; 1359 WARN_ON(i > 8); 1360 1361 return airmont_freq_table[i]; 1362 } 1363 1364 static void atom_get_vid(struct cpudata *cpudata) 1365 { 1366 u64 value; 1367 1368 rdmsrl(MSR_ATOM_CORE_VIDS, value); 1369 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1370 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1371 cpudata->vid.ratio = div_fp( 1372 cpudata->vid.max - cpudata->vid.min, 1373 int_tofp(cpudata->pstate.max_pstate - 1374 cpudata->pstate.min_pstate)); 1375 1376 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); 1377 cpudata->vid.turbo = value & 0x7f; 1378 } 1379 1380 static int core_get_min_pstate(void) 1381 { 1382 u64 value; 1383 1384 rdmsrl(MSR_PLATFORM_INFO, value); 1385 return (value >> 40) & 0xFF; 1386 } 1387 1388 static int core_get_max_pstate_physical(void) 1389 { 1390 u64 value; 1391 1392 rdmsrl(MSR_PLATFORM_INFO, value); 1393 return (value >> 8) & 0xFF; 1394 } 1395 1396 static int core_get_tdp_ratio(u64 plat_info) 1397 { 1398 /* Check how many TDP levels present */ 1399 if (plat_info & 0x600000000) { 1400 u64 tdp_ctrl; 1401 u64 tdp_ratio; 1402 int tdp_msr; 1403 int err; 1404 1405 /* Get the TDP level (0, 1, 2) to get ratios */ 1406 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1407 if (err) 1408 return err; 1409 1410 /* TDP MSR are continuous starting at 0x648 */ 1411 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); 1412 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 1413 if (err) 1414 return err; 1415 1416 /* For level 1 and 2, bits[23:16] contain the ratio */ 1417 if (tdp_ctrl & 0x03) 1418 tdp_ratio >>= 16; 1419 1420 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1421 pr_debug("tdp_ratio %x\n", (int)tdp_ratio); 1422 1423 return (int)tdp_ratio; 1424 } 1425 1426 return -ENXIO; 1427 } 1428 1429 static int core_get_max_pstate(void) 1430 { 1431 u64 tar; 1432 u64 plat_info; 1433 int max_pstate; 1434 int tdp_ratio; 1435 int err; 1436 1437 rdmsrl(MSR_PLATFORM_INFO, plat_info); 1438 max_pstate = (plat_info >> 8) & 0xFF; 1439 1440 tdp_ratio = core_get_tdp_ratio(plat_info); 1441 if (tdp_ratio <= 0) 1442 return max_pstate; 1443 1444 if (hwp_active) { 1445 /* Turbo activation ratio is not used on HWP platforms */ 1446 return tdp_ratio; 1447 } 1448 1449 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 1450 if (!err) { 1451 int tar_levels; 1452 1453 /* Do some sanity checking for safety */ 1454 tar_levels = tar & 0xff; 1455 if (tdp_ratio - 1 == tar_levels) { 1456 max_pstate = tar_levels; 1457 pr_debug("max_pstate=TAC %x\n", max_pstate); 1458 } 1459 } 1460 1461 return max_pstate; 1462 } 1463 1464 static int core_get_turbo_pstate(void) 1465 { 1466 u64 value; 1467 int nont, ret; 1468 1469 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1470 nont = core_get_max_pstate(); 1471 ret = (value) & 255; 1472 if (ret <= nont) 1473 ret = nont; 1474 return ret; 1475 } 1476 1477 static inline int core_get_scaling(void) 1478 { 1479 return 100000; 1480 } 1481 1482 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1483 { 1484 u64 val; 1485 1486 val = (u64)pstate << 8; 1487 if (global.no_turbo && !global.turbo_disabled) 1488 val |= (u64)1 << 32; 1489 1490 return val; 1491 } 1492 1493 static int knl_get_turbo_pstate(void) 1494 { 1495 u64 value; 1496 int nont, ret; 1497 1498 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1499 nont = core_get_max_pstate(); 1500 ret = (((value) >> 8) & 0xFF); 1501 if (ret <= nont) 1502 ret = nont; 1503 return ret; 1504 } 1505 1506 static int intel_pstate_get_base_pstate(struct cpudata *cpu) 1507 { 1508 return global.no_turbo || global.turbo_disabled ? 1509 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1510 } 1511 1512 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1513 { 1514 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1515 cpu->pstate.current_pstate = pstate; 1516 /* 1517 * Generally, there is no guarantee that this code will always run on 1518 * the CPU being updated, so force the register update to run on the 1519 * right CPU. 1520 */ 1521 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1522 pstate_funcs.get_val(cpu, pstate)); 1523 } 1524 1525 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1526 { 1527 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1528 } 1529 1530 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1531 { 1532 int pstate; 1533 1534 update_turbo_state(); 1535 pstate = intel_pstate_get_base_pstate(cpu); 1536 pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); 1537 intel_pstate_set_pstate(cpu, pstate); 1538 } 1539 1540 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1541 { 1542 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1543 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1544 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1545 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1546 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1547 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; 1548 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1549 1550 if (pstate_funcs.get_vid) 1551 pstate_funcs.get_vid(cpu); 1552 1553 intel_pstate_set_min_pstate(cpu); 1554 } 1555 1556 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1557 { 1558 struct sample *sample = &cpu->sample; 1559 1560 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1561 } 1562 1563 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1564 { 1565 u64 aperf, mperf; 1566 unsigned long flags; 1567 u64 tsc; 1568 1569 local_irq_save(flags); 1570 rdmsrl(MSR_IA32_APERF, aperf); 1571 rdmsrl(MSR_IA32_MPERF, mperf); 1572 tsc = rdtsc(); 1573 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1574 local_irq_restore(flags); 1575 return false; 1576 } 1577 local_irq_restore(flags); 1578 1579 cpu->last_sample_time = cpu->sample.time; 1580 cpu->sample.time = time; 1581 cpu->sample.aperf = aperf; 1582 cpu->sample.mperf = mperf; 1583 cpu->sample.tsc = tsc; 1584 cpu->sample.aperf -= cpu->prev_aperf; 1585 cpu->sample.mperf -= cpu->prev_mperf; 1586 cpu->sample.tsc -= cpu->prev_tsc; 1587 1588 cpu->prev_aperf = aperf; 1589 cpu->prev_mperf = mperf; 1590 cpu->prev_tsc = tsc; 1591 /* 1592 * First time this function is invoked in a given cycle, all of the 1593 * previous sample data fields are equal to zero or stale and they must 1594 * be populated with meaningful numbers for things to work, so assume 1595 * that sample.time will always be reset before setting the utilization 1596 * update hook and make the caller skip the sample then. 1597 */ 1598 if (cpu->last_sample_time) { 1599 intel_pstate_calc_avg_perf(cpu); 1600 return true; 1601 } 1602 return false; 1603 } 1604 1605 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1606 { 1607 return mul_ext_fp(cpu->sample.core_avg_perf, 1608 cpu->pstate.max_pstate_physical * cpu->pstate.scaling); 1609 } 1610 1611 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1612 { 1613 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1614 cpu->sample.core_avg_perf); 1615 } 1616 1617 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1618 { 1619 struct sample *sample = &cpu->sample; 1620 int32_t busy_frac, boost; 1621 int target, avg_pstate; 1622 1623 busy_frac = div_fp(sample->mperf, sample->tsc); 1624 1625 boost = cpu->iowait_boost; 1626 cpu->iowait_boost >>= 1; 1627 1628 if (busy_frac < boost) 1629 busy_frac = boost; 1630 1631 sample->busy_scaled = busy_frac * 100; 1632 1633 target = global.no_turbo || global.turbo_disabled ? 1634 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1635 target += target >> 2; 1636 target = mul_fp(target, busy_frac); 1637 if (target < cpu->pstate.min_pstate) 1638 target = cpu->pstate.min_pstate; 1639 1640 /* 1641 * If the average P-state during the previous cycle was higher than the 1642 * current target, add 50% of the difference to the target to reduce 1643 * possible performance oscillations and offset possible performance 1644 * loss related to moving the workload from one CPU to another within 1645 * a package/module. 1646 */ 1647 avg_pstate = get_avg_pstate(cpu); 1648 if (avg_pstate > target) 1649 target += (avg_pstate - target) >> 1; 1650 1651 return target; 1652 } 1653 1654 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1655 { 1656 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1657 u64 duration_ns; 1658 1659 /* 1660 * perf_scaled is the ratio of the average P-state during the last 1661 * sampling period to the P-state requested last time (in percent). 1662 * 1663 * That measures the system's response to the previous P-state 1664 * selection. 1665 */ 1666 max_pstate = cpu->pstate.max_pstate_physical; 1667 current_pstate = cpu->pstate.current_pstate; 1668 perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, 1669 div_fp(100 * max_pstate, current_pstate)); 1670 1671 /* 1672 * Since our utilization update callback will not run unless we are 1673 * in C0, check if the actual elapsed time is significantly greater (3x) 1674 * than our sample interval. If it is, then we were idle for a long 1675 * enough period of time to adjust our performance metric. 1676 */ 1677 duration_ns = cpu->sample.time - cpu->last_sample_time; 1678 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1679 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1680 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1681 } else { 1682 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1683 if (sample_ratio < int_tofp(1)) 1684 perf_scaled = 0; 1685 } 1686 1687 cpu->sample.busy_scaled = perf_scaled; 1688 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); 1689 } 1690 1691 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 1692 { 1693 int max_pstate = intel_pstate_get_base_pstate(cpu); 1694 int min_pstate; 1695 1696 min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); 1697 max_pstate = max(min_pstate, cpu->max_perf_ratio); 1698 return clamp_t(int, pstate, min_pstate, max_pstate); 1699 } 1700 1701 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1702 { 1703 if (pstate == cpu->pstate.current_pstate) 1704 return; 1705 1706 cpu->pstate.current_pstate = pstate; 1707 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1708 } 1709 1710 static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate) 1711 { 1712 int from = cpu->pstate.current_pstate; 1713 struct sample *sample; 1714 1715 update_turbo_state(); 1716 1717 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 1718 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); 1719 intel_pstate_update_pstate(cpu, target_pstate); 1720 1721 sample = &cpu->sample; 1722 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1723 fp_toint(sample->busy_scaled), 1724 from, 1725 cpu->pstate.current_pstate, 1726 sample->mperf, 1727 sample->aperf, 1728 sample->tsc, 1729 get_avg_frequency(cpu), 1730 fp_toint(cpu->iowait_boost * 100)); 1731 } 1732 1733 static void intel_pstate_update_util_pid(struct update_util_data *data, 1734 u64 time, unsigned int flags) 1735 { 1736 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1737 u64 delta_ns = time - cpu->sample.time; 1738 1739 if ((s64)delta_ns < pid_params.sample_rate_ns) 1740 return; 1741 1742 if (intel_pstate_sample(cpu, time)) { 1743 int target_pstate; 1744 1745 target_pstate = get_target_pstate_use_performance(cpu); 1746 intel_pstate_adjust_pstate(cpu, target_pstate); 1747 } 1748 } 1749 1750 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1751 unsigned int flags) 1752 { 1753 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1754 u64 delta_ns; 1755 1756 if (flags & SCHED_CPUFREQ_IOWAIT) { 1757 cpu->iowait_boost = int_tofp(1); 1758 } else if (cpu->iowait_boost) { 1759 /* Clear iowait_boost if the CPU may have been idle. */ 1760 delta_ns = time - cpu->last_update; 1761 if (delta_ns > TICK_NSEC) 1762 cpu->iowait_boost = 0; 1763 } 1764 cpu->last_update = time; 1765 delta_ns = time - cpu->sample.time; 1766 if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL) 1767 return; 1768 1769 if (intel_pstate_sample(cpu, time)) { 1770 int target_pstate; 1771 1772 target_pstate = get_target_pstate_use_cpu_load(cpu); 1773 intel_pstate_adjust_pstate(cpu, target_pstate); 1774 } 1775 } 1776 1777 static struct pstate_funcs core_funcs = { 1778 .get_max = core_get_max_pstate, 1779 .get_max_physical = core_get_max_pstate_physical, 1780 .get_min = core_get_min_pstate, 1781 .get_turbo = core_get_turbo_pstate, 1782 .get_scaling = core_get_scaling, 1783 .get_val = core_get_val, 1784 .update_util = intel_pstate_update_util_pid, 1785 }; 1786 1787 static const struct pstate_funcs silvermont_funcs = { 1788 .get_max = atom_get_max_pstate, 1789 .get_max_physical = atom_get_max_pstate, 1790 .get_min = atom_get_min_pstate, 1791 .get_turbo = atom_get_turbo_pstate, 1792 .get_val = atom_get_val, 1793 .get_scaling = silvermont_get_scaling, 1794 .get_vid = atom_get_vid, 1795 .update_util = intel_pstate_update_util, 1796 }; 1797 1798 static const struct pstate_funcs airmont_funcs = { 1799 .get_max = atom_get_max_pstate, 1800 .get_max_physical = atom_get_max_pstate, 1801 .get_min = atom_get_min_pstate, 1802 .get_turbo = atom_get_turbo_pstate, 1803 .get_val = atom_get_val, 1804 .get_scaling = airmont_get_scaling, 1805 .get_vid = atom_get_vid, 1806 .update_util = intel_pstate_update_util, 1807 }; 1808 1809 static const struct pstate_funcs knl_funcs = { 1810 .get_max = core_get_max_pstate, 1811 .get_max_physical = core_get_max_pstate_physical, 1812 .get_min = core_get_min_pstate, 1813 .get_turbo = knl_get_turbo_pstate, 1814 .get_scaling = core_get_scaling, 1815 .get_val = core_get_val, 1816 .update_util = intel_pstate_update_util_pid, 1817 }; 1818 1819 static const struct pstate_funcs bxt_funcs = { 1820 .get_max = core_get_max_pstate, 1821 .get_max_physical = core_get_max_pstate_physical, 1822 .get_min = core_get_min_pstate, 1823 .get_turbo = core_get_turbo_pstate, 1824 .get_scaling = core_get_scaling, 1825 .get_val = core_get_val, 1826 .update_util = intel_pstate_update_util, 1827 }; 1828 1829 #define ICPU(model, policy) \ 1830 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1831 (unsigned long)&policy } 1832 1833 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1834 ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs), 1835 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs), 1836 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_funcs), 1837 ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs), 1838 ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs), 1839 ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs), 1840 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs), 1841 ICPU(INTEL_FAM6_HASWELL_X, core_funcs), 1842 ICPU(INTEL_FAM6_HASWELL_ULT, core_funcs), 1843 ICPU(INTEL_FAM6_HASWELL_GT3E, core_funcs), 1844 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_funcs), 1845 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs), 1846 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_funcs), 1847 ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), 1848 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs), 1849 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), 1850 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs), 1851 ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), 1852 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs), 1853 ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, bxt_funcs), 1854 {} 1855 }; 1856 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1857 1858 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 1859 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), 1860 ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), 1861 ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), 1862 {} 1863 }; 1864 1865 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { 1866 ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_funcs), 1867 {} 1868 }; 1869 1870 static bool pid_in_use(void); 1871 1872 static int intel_pstate_init_cpu(unsigned int cpunum) 1873 { 1874 struct cpudata *cpu; 1875 1876 cpu = all_cpu_data[cpunum]; 1877 1878 if (!cpu) { 1879 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); 1880 if (!cpu) 1881 return -ENOMEM; 1882 1883 all_cpu_data[cpunum] = cpu; 1884 1885 cpu->epp_default = -EINVAL; 1886 cpu->epp_powersave = -EINVAL; 1887 cpu->epp_saved = -EINVAL; 1888 } 1889 1890 cpu = all_cpu_data[cpunum]; 1891 1892 cpu->cpu = cpunum; 1893 1894 if (hwp_active) { 1895 const struct x86_cpu_id *id; 1896 1897 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); 1898 if (id) 1899 intel_pstate_disable_ee(cpunum); 1900 1901 intel_pstate_hwp_enable(cpu); 1902 } else if (pid_in_use()) { 1903 intel_pstate_pid_reset(cpu); 1904 } 1905 1906 intel_pstate_get_cpu_pstates(cpu); 1907 1908 pr_debug("controlling: cpu %d\n", cpunum); 1909 1910 return 0; 1911 } 1912 1913 static unsigned int intel_pstate_get(unsigned int cpu_num) 1914 { 1915 struct cpudata *cpu = all_cpu_data[cpu_num]; 1916 1917 return cpu ? get_avg_frequency(cpu) : 0; 1918 } 1919 1920 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1921 { 1922 struct cpudata *cpu = all_cpu_data[cpu_num]; 1923 1924 if (hwp_active) 1925 return; 1926 1927 if (cpu->update_util_set) 1928 return; 1929 1930 /* Prevent intel_pstate_update_util() from using stale data. */ 1931 cpu->sample.time = 0; 1932 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1933 pstate_funcs.update_util); 1934 cpu->update_util_set = true; 1935 } 1936 1937 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1938 { 1939 struct cpudata *cpu_data = all_cpu_data[cpu]; 1940 1941 if (!cpu_data->update_util_set) 1942 return; 1943 1944 cpufreq_remove_update_util_hook(cpu); 1945 cpu_data->update_util_set = false; 1946 synchronize_sched(); 1947 } 1948 1949 static int intel_pstate_get_max_freq(struct cpudata *cpu) 1950 { 1951 return global.turbo_disabled || global.no_turbo ? 1952 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 1953 } 1954 1955 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, 1956 struct cpudata *cpu) 1957 { 1958 int max_freq = intel_pstate_get_max_freq(cpu); 1959 int32_t max_policy_perf, min_policy_perf; 1960 int max_state, turbo_max; 1961 1962 /* 1963 * HWP needs some special consideration, because on BDX the 1964 * HWP_REQUEST uses abstract value to represent performance 1965 * rather than pure ratios. 1966 */ 1967 if (hwp_active) { 1968 intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); 1969 } else { 1970 max_state = intel_pstate_get_base_pstate(cpu); 1971 turbo_max = cpu->pstate.turbo_pstate; 1972 } 1973 1974 max_policy_perf = max_state * policy->max / max_freq; 1975 if (policy->max == policy->min) { 1976 min_policy_perf = max_policy_perf; 1977 } else { 1978 min_policy_perf = max_state * policy->min / max_freq; 1979 min_policy_perf = clamp_t(int32_t, min_policy_perf, 1980 0, max_policy_perf); 1981 } 1982 1983 pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n", 1984 policy->cpu, max_state, 1985 min_policy_perf, max_policy_perf); 1986 1987 /* Normalize user input to [min_perf, max_perf] */ 1988 if (per_cpu_limits) { 1989 cpu->min_perf_ratio = min_policy_perf; 1990 cpu->max_perf_ratio = max_policy_perf; 1991 } else { 1992 int32_t global_min, global_max; 1993 1994 /* Global limits are in percent of the maximum turbo P-state. */ 1995 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 1996 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); 1997 global_min = clamp_t(int32_t, global_min, 0, global_max); 1998 1999 pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu, 2000 global_min, global_max); 2001 2002 cpu->min_perf_ratio = max(min_policy_perf, global_min); 2003 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); 2004 cpu->max_perf_ratio = min(max_policy_perf, global_max); 2005 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); 2006 2007 /* Make sure min_perf <= max_perf */ 2008 cpu->min_perf_ratio = min(cpu->min_perf_ratio, 2009 cpu->max_perf_ratio); 2010 2011 } 2012 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu, 2013 cpu->max_perf_ratio, 2014 cpu->min_perf_ratio); 2015 } 2016 2017 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2018 { 2019 struct cpudata *cpu; 2020 2021 if (!policy->cpuinfo.max_freq) 2022 return -ENODEV; 2023 2024 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2025 policy->cpuinfo.max_freq, policy->max); 2026 2027 cpu = all_cpu_data[policy->cpu]; 2028 cpu->policy = policy->policy; 2029 2030 mutex_lock(&intel_pstate_limits_lock); 2031 2032 intel_pstate_update_perf_limits(policy, cpu); 2033 2034 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2035 /* 2036 * NOHZ_FULL CPUs need this as the governor callback may not 2037 * be invoked on them. 2038 */ 2039 intel_pstate_clear_update_util_hook(policy->cpu); 2040 intel_pstate_max_within_limits(cpu); 2041 } else { 2042 intel_pstate_set_update_util_hook(policy->cpu); 2043 } 2044 2045 if (hwp_active) 2046 intel_pstate_hwp_set(policy->cpu); 2047 2048 mutex_unlock(&intel_pstate_limits_lock); 2049 2050 return 0; 2051 } 2052 2053 static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy, 2054 struct cpudata *cpu) 2055 { 2056 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2057 policy->max < policy->cpuinfo.max_freq && 2058 policy->max > cpu->pstate.max_freq) { 2059 pr_debug("policy->max > max non turbo frequency\n"); 2060 policy->max = policy->cpuinfo.max_freq; 2061 } 2062 } 2063 2064 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 2065 { 2066 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2067 2068 update_turbo_state(); 2069 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2070 intel_pstate_get_max_freq(cpu)); 2071 2072 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 2073 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 2074 return -EINVAL; 2075 2076 intel_pstate_adjust_policy_max(policy, cpu); 2077 2078 return 0; 2079 } 2080 2081 static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) 2082 { 2083 intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); 2084 } 2085 2086 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 2087 { 2088 pr_debug("CPU %d exiting\n", policy->cpu); 2089 2090 intel_pstate_clear_update_util_hook(policy->cpu); 2091 if (hwp_active) 2092 intel_pstate_hwp_save_state(policy); 2093 else 2094 intel_cpufreq_stop_cpu(policy); 2095 } 2096 2097 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 2098 { 2099 intel_pstate_exit_perf_limits(policy); 2100 2101 policy->fast_switch_possible = false; 2102 2103 return 0; 2104 } 2105 2106 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 2107 { 2108 struct cpudata *cpu; 2109 int rc; 2110 2111 rc = intel_pstate_init_cpu(policy->cpu); 2112 if (rc) 2113 return rc; 2114 2115 cpu = all_cpu_data[policy->cpu]; 2116 2117 cpu->max_perf_ratio = 0xFF; 2118 cpu->min_perf_ratio = 0; 2119 2120 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 2121 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 2122 2123 /* cpuinfo and default policy values */ 2124 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 2125 update_turbo_state(); 2126 policy->cpuinfo.max_freq = global.turbo_disabled ? 2127 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2128 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 2129 2130 intel_pstate_init_acpi_perf_limits(policy); 2131 cpumask_set_cpu(policy->cpu, policy->cpus); 2132 2133 policy->fast_switch_possible = true; 2134 2135 return 0; 2136 } 2137 2138 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 2139 { 2140 int ret = __intel_pstate_cpu_init(policy); 2141 2142 if (ret) 2143 return ret; 2144 2145 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 2146 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) 2147 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 2148 else 2149 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2150 2151 return 0; 2152 } 2153 2154 static struct cpufreq_driver intel_pstate = { 2155 .flags = CPUFREQ_CONST_LOOPS, 2156 .verify = intel_pstate_verify_policy, 2157 .setpolicy = intel_pstate_set_policy, 2158 .suspend = intel_pstate_hwp_save_state, 2159 .resume = intel_pstate_resume, 2160 .get = intel_pstate_get, 2161 .init = intel_pstate_cpu_init, 2162 .exit = intel_pstate_cpu_exit, 2163 .stop_cpu = intel_pstate_stop_cpu, 2164 .name = "intel_pstate", 2165 }; 2166 2167 static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) 2168 { 2169 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2170 2171 update_turbo_state(); 2172 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2173 intel_pstate_get_max_freq(cpu)); 2174 2175 intel_pstate_adjust_policy_max(policy, cpu); 2176 2177 intel_pstate_update_perf_limits(policy, cpu); 2178 2179 return 0; 2180 } 2181 2182 static int intel_cpufreq_target(struct cpufreq_policy *policy, 2183 unsigned int target_freq, 2184 unsigned int relation) 2185 { 2186 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2187 struct cpufreq_freqs freqs; 2188 int target_pstate; 2189 2190 update_turbo_state(); 2191 2192 freqs.old = policy->cur; 2193 freqs.new = target_freq; 2194 2195 cpufreq_freq_transition_begin(policy, &freqs); 2196 switch (relation) { 2197 case CPUFREQ_RELATION_L: 2198 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 2199 break; 2200 case CPUFREQ_RELATION_H: 2201 target_pstate = freqs.new / cpu->pstate.scaling; 2202 break; 2203 default: 2204 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 2205 break; 2206 } 2207 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2208 if (target_pstate != cpu->pstate.current_pstate) { 2209 cpu->pstate.current_pstate = target_pstate; 2210 wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, 2211 pstate_funcs.get_val(cpu, target_pstate)); 2212 } 2213 freqs.new = target_pstate * cpu->pstate.scaling; 2214 cpufreq_freq_transition_end(policy, &freqs, false); 2215 2216 return 0; 2217 } 2218 2219 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 2220 unsigned int target_freq) 2221 { 2222 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2223 int target_pstate; 2224 2225 update_turbo_state(); 2226 2227 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2228 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2229 intel_pstate_update_pstate(cpu, target_pstate); 2230 return target_pstate * cpu->pstate.scaling; 2231 } 2232 2233 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2234 { 2235 int ret = __intel_pstate_cpu_init(policy); 2236 2237 if (ret) 2238 return ret; 2239 2240 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 2241 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; 2242 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2243 policy->cur = policy->cpuinfo.min_freq; 2244 2245 return 0; 2246 } 2247 2248 static struct cpufreq_driver intel_cpufreq = { 2249 .flags = CPUFREQ_CONST_LOOPS, 2250 .verify = intel_cpufreq_verify_policy, 2251 .target = intel_cpufreq_target, 2252 .fast_switch = intel_cpufreq_fast_switch, 2253 .init = intel_cpufreq_cpu_init, 2254 .exit = intel_pstate_cpu_exit, 2255 .stop_cpu = intel_cpufreq_stop_cpu, 2256 .name = "intel_cpufreq", 2257 }; 2258 2259 static struct cpufreq_driver *default_driver = &intel_pstate; 2260 2261 static bool pid_in_use(void) 2262 { 2263 return intel_pstate_driver == &intel_pstate && 2264 pstate_funcs.update_util == intel_pstate_update_util_pid; 2265 } 2266 2267 static void intel_pstate_driver_cleanup(void) 2268 { 2269 unsigned int cpu; 2270 2271 get_online_cpus(); 2272 for_each_online_cpu(cpu) { 2273 if (all_cpu_data[cpu]) { 2274 if (intel_pstate_driver == &intel_pstate) 2275 intel_pstate_clear_update_util_hook(cpu); 2276 2277 kfree(all_cpu_data[cpu]); 2278 all_cpu_data[cpu] = NULL; 2279 } 2280 } 2281 put_online_cpus(); 2282 intel_pstate_driver = NULL; 2283 } 2284 2285 static int intel_pstate_register_driver(struct cpufreq_driver *driver) 2286 { 2287 int ret; 2288 2289 memset(&global, 0, sizeof(global)); 2290 global.max_perf_pct = 100; 2291 2292 intel_pstate_driver = driver; 2293 ret = cpufreq_register_driver(intel_pstate_driver); 2294 if (ret) { 2295 intel_pstate_driver_cleanup(); 2296 return ret; 2297 } 2298 2299 global.min_perf_pct = min_perf_pct_min(); 2300 2301 if (pid_in_use()) 2302 intel_pstate_debug_expose_params(); 2303 2304 return 0; 2305 } 2306 2307 static int intel_pstate_unregister_driver(void) 2308 { 2309 if (hwp_active) 2310 return -EBUSY; 2311 2312 if (pid_in_use()) 2313 intel_pstate_debug_hide_params(); 2314 2315 cpufreq_unregister_driver(intel_pstate_driver); 2316 intel_pstate_driver_cleanup(); 2317 2318 return 0; 2319 } 2320 2321 static ssize_t intel_pstate_show_status(char *buf) 2322 { 2323 if (!intel_pstate_driver) 2324 return sprintf(buf, "off\n"); 2325 2326 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 2327 "active" : "passive"); 2328 } 2329 2330 static int intel_pstate_update_status(const char *buf, size_t size) 2331 { 2332 int ret; 2333 2334 if (size == 3 && !strncmp(buf, "off", size)) 2335 return intel_pstate_driver ? 2336 intel_pstate_unregister_driver() : -EINVAL; 2337 2338 if (size == 6 && !strncmp(buf, "active", size)) { 2339 if (intel_pstate_driver) { 2340 if (intel_pstate_driver == &intel_pstate) 2341 return 0; 2342 2343 ret = intel_pstate_unregister_driver(); 2344 if (ret) 2345 return ret; 2346 } 2347 2348 return intel_pstate_register_driver(&intel_pstate); 2349 } 2350 2351 if (size == 7 && !strncmp(buf, "passive", size)) { 2352 if (intel_pstate_driver) { 2353 if (intel_pstate_driver == &intel_cpufreq) 2354 return 0; 2355 2356 ret = intel_pstate_unregister_driver(); 2357 if (ret) 2358 return ret; 2359 } 2360 2361 return intel_pstate_register_driver(&intel_cpufreq); 2362 } 2363 2364 return -EINVAL; 2365 } 2366 2367 static int no_load __initdata; 2368 static int no_hwp __initdata; 2369 static int hwp_only __initdata; 2370 static unsigned int force_load __initdata; 2371 2372 static int __init intel_pstate_msrs_not_valid(void) 2373 { 2374 if (!pstate_funcs.get_max() || 2375 !pstate_funcs.get_min() || 2376 !pstate_funcs.get_turbo()) 2377 return -ENODEV; 2378 2379 return 0; 2380 } 2381 2382 #ifdef CONFIG_ACPI 2383 static void intel_pstate_use_acpi_profile(void) 2384 { 2385 switch (acpi_gbl_FADT.preferred_profile) { 2386 case PM_MOBILE: 2387 case PM_TABLET: 2388 case PM_APPLIANCE_PC: 2389 case PM_DESKTOP: 2390 case PM_WORKSTATION: 2391 pstate_funcs.update_util = intel_pstate_update_util; 2392 } 2393 } 2394 #else 2395 static void intel_pstate_use_acpi_profile(void) 2396 { 2397 } 2398 #endif 2399 2400 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 2401 { 2402 pstate_funcs.get_max = funcs->get_max; 2403 pstate_funcs.get_max_physical = funcs->get_max_physical; 2404 pstate_funcs.get_min = funcs->get_min; 2405 pstate_funcs.get_turbo = funcs->get_turbo; 2406 pstate_funcs.get_scaling = funcs->get_scaling; 2407 pstate_funcs.get_val = funcs->get_val; 2408 pstate_funcs.get_vid = funcs->get_vid; 2409 pstate_funcs.update_util = funcs->update_util; 2410 2411 intel_pstate_use_acpi_profile(); 2412 } 2413 2414 #ifdef CONFIG_ACPI 2415 2416 static bool __init intel_pstate_no_acpi_pss(void) 2417 { 2418 int i; 2419 2420 for_each_possible_cpu(i) { 2421 acpi_status status; 2422 union acpi_object *pss; 2423 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 2424 struct acpi_processor *pr = per_cpu(processors, i); 2425 2426 if (!pr) 2427 continue; 2428 2429 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 2430 if (ACPI_FAILURE(status)) 2431 continue; 2432 2433 pss = buffer.pointer; 2434 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 2435 kfree(pss); 2436 return false; 2437 } 2438 2439 kfree(pss); 2440 } 2441 2442 return true; 2443 } 2444 2445 static bool __init intel_pstate_has_acpi_ppc(void) 2446 { 2447 int i; 2448 2449 for_each_possible_cpu(i) { 2450 struct acpi_processor *pr = per_cpu(processors, i); 2451 2452 if (!pr) 2453 continue; 2454 if (acpi_has_method(pr->handle, "_PPC")) 2455 return true; 2456 } 2457 return false; 2458 } 2459 2460 enum { 2461 PSS, 2462 PPC, 2463 }; 2464 2465 struct hw_vendor_info { 2466 u16 valid; 2467 char oem_id[ACPI_OEM_ID_SIZE]; 2468 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 2469 int oem_pwr_table; 2470 }; 2471 2472 /* Hardware vendor-specific info that has its own power management modes */ 2473 static struct hw_vendor_info vendor_info[] __initdata = { 2474 {1, "HP ", "ProLiant", PSS}, 2475 {1, "ORACLE", "X4-2 ", PPC}, 2476 {1, "ORACLE", "X4-2L ", PPC}, 2477 {1, "ORACLE", "X4-2B ", PPC}, 2478 {1, "ORACLE", "X3-2 ", PPC}, 2479 {1, "ORACLE", "X3-2L ", PPC}, 2480 {1, "ORACLE", "X3-2B ", PPC}, 2481 {1, "ORACLE", "X4470M2 ", PPC}, 2482 {1, "ORACLE", "X4270M3 ", PPC}, 2483 {1, "ORACLE", "X4270M2 ", PPC}, 2484 {1, "ORACLE", "X4170M2 ", PPC}, 2485 {1, "ORACLE", "X4170 M3", PPC}, 2486 {1, "ORACLE", "X4275 M3", PPC}, 2487 {1, "ORACLE", "X6-2 ", PPC}, 2488 {1, "ORACLE", "Sudbury ", PPC}, 2489 {0, "", ""}, 2490 }; 2491 2492 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 2493 { 2494 struct acpi_table_header hdr; 2495 struct hw_vendor_info *v_info; 2496 const struct x86_cpu_id *id; 2497 u64 misc_pwr; 2498 2499 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 2500 if (id) { 2501 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 2502 if ( misc_pwr & (1 << 8)) 2503 return true; 2504 } 2505 2506 if (acpi_disabled || 2507 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 2508 return false; 2509 2510 for (v_info = vendor_info; v_info->valid; v_info++) { 2511 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 2512 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 2513 ACPI_OEM_TABLE_ID_SIZE)) 2514 switch (v_info->oem_pwr_table) { 2515 case PSS: 2516 return intel_pstate_no_acpi_pss(); 2517 case PPC: 2518 return intel_pstate_has_acpi_ppc() && 2519 (!force_load); 2520 } 2521 } 2522 2523 return false; 2524 } 2525 2526 static void intel_pstate_request_control_from_smm(void) 2527 { 2528 /* 2529 * It may be unsafe to request P-states control from SMM if _PPC support 2530 * has not been enabled. 2531 */ 2532 if (acpi_ppc) 2533 acpi_processor_pstate_control(); 2534 } 2535 #else /* CONFIG_ACPI not enabled */ 2536 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 2537 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 2538 static inline void intel_pstate_request_control_from_smm(void) {} 2539 #endif /* CONFIG_ACPI */ 2540 2541 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 2542 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 2543 {} 2544 }; 2545 2546 static int __init intel_pstate_init(void) 2547 { 2548 int rc; 2549 2550 if (no_load) 2551 return -ENODEV; 2552 2553 if (x86_match_cpu(hwp_support_ids)) { 2554 copy_cpu_funcs(&core_funcs); 2555 if (no_hwp) { 2556 pstate_funcs.update_util = intel_pstate_update_util; 2557 } else { 2558 hwp_active++; 2559 intel_pstate.attr = hwp_cpufreq_attrs; 2560 goto hwp_cpu_matched; 2561 } 2562 } else { 2563 const struct x86_cpu_id *id; 2564 2565 id = x86_match_cpu(intel_pstate_cpu_ids); 2566 if (!id) 2567 return -ENODEV; 2568 2569 copy_cpu_funcs((struct pstate_funcs *)id->driver_data); 2570 } 2571 2572 if (intel_pstate_msrs_not_valid()) 2573 return -ENODEV; 2574 2575 hwp_cpu_matched: 2576 /* 2577 * The Intel pstate driver will be ignored if the platform 2578 * firmware has its own power management modes. 2579 */ 2580 if (intel_pstate_platform_pwr_mgmt_exists()) 2581 return -ENODEV; 2582 2583 if (!hwp_active && hwp_only) 2584 return -ENOTSUPP; 2585 2586 pr_info("Intel P-state driver initializing\n"); 2587 2588 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 2589 if (!all_cpu_data) 2590 return -ENOMEM; 2591 2592 intel_pstate_request_control_from_smm(); 2593 2594 intel_pstate_sysfs_expose_params(); 2595 2596 mutex_lock(&intel_pstate_driver_lock); 2597 rc = intel_pstate_register_driver(default_driver); 2598 mutex_unlock(&intel_pstate_driver_lock); 2599 if (rc) 2600 return rc; 2601 2602 if (hwp_active) 2603 pr_info("HWP enabled\n"); 2604 2605 return 0; 2606 } 2607 device_initcall(intel_pstate_init); 2608 2609 static int __init intel_pstate_setup(char *str) 2610 { 2611 if (!str) 2612 return -EINVAL; 2613 2614 if (!strcmp(str, "disable")) { 2615 no_load = 1; 2616 } else if (!strcmp(str, "passive")) { 2617 pr_info("Passive mode enabled\n"); 2618 default_driver = &intel_cpufreq; 2619 no_hwp = 1; 2620 } 2621 if (!strcmp(str, "no_hwp")) { 2622 pr_info("HWP disabled\n"); 2623 no_hwp = 1; 2624 } 2625 if (!strcmp(str, "force")) 2626 force_load = 1; 2627 if (!strcmp(str, "hwp_only")) 2628 hwp_only = 1; 2629 if (!strcmp(str, "per_cpu_perf_limits")) 2630 per_cpu_limits = true; 2631 2632 #ifdef CONFIG_ACPI 2633 if (!strcmp(str, "support_acpi_ppc")) 2634 acpi_ppc = true; 2635 #endif 2636 2637 return 0; 2638 } 2639 early_param("intel_pstate", intel_pstate_setup); 2640 2641 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 2642 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 2643 MODULE_LICENSE("GPL"); 2644