1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched/cpufreq.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 #include <asm/intel-family.h> 39 40 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 41 42 #ifdef CONFIG_ACPI 43 #include <acpi/processor.h> 44 #include <acpi/cppc_acpi.h> 45 #endif 46 47 #define FRAC_BITS 8 48 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 49 #define fp_toint(X) ((X) >> FRAC_BITS) 50 51 #define EXT_BITS 6 52 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 53 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 54 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 55 56 static inline int32_t mul_fp(int32_t x, int32_t y) 57 { 58 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 59 } 60 61 static inline int32_t div_fp(s64 x, s64 y) 62 { 63 return div64_s64((int64_t)x << FRAC_BITS, y); 64 } 65 66 static inline int ceiling_fp(int32_t x) 67 { 68 int mask, ret; 69 70 ret = fp_toint(x); 71 mask = (1 << FRAC_BITS) - 1; 72 if (x & mask) 73 ret += 1; 74 return ret; 75 } 76 77 static inline int32_t percent_fp(int percent) 78 { 79 return div_fp(percent, 100); 80 } 81 82 static inline u64 mul_ext_fp(u64 x, u64 y) 83 { 84 return (x * y) >> EXT_FRAC_BITS; 85 } 86 87 static inline u64 div_ext_fp(u64 x, u64 y) 88 { 89 return div64_u64(x << EXT_FRAC_BITS, y); 90 } 91 92 static inline int32_t percent_ext_fp(int percent) 93 { 94 return div_ext_fp(percent, 100); 95 } 96 97 /** 98 * struct sample - Store performance sample 99 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 100 * performance during last sample period 101 * @busy_scaled: Scaled busy value which is used to calculate next 102 * P state. This can be different than core_avg_perf 103 * to account for cpu idle period 104 * @aperf: Difference of actual performance frequency clock count 105 * read from APERF MSR between last and current sample 106 * @mperf: Difference of maximum performance frequency clock count 107 * read from MPERF MSR between last and current sample 108 * @tsc: Difference of time stamp counter between last and 109 * current sample 110 * @time: Current time from scheduler 111 * 112 * This structure is used in the cpudata structure to store performance sample 113 * data for choosing next P State. 114 */ 115 struct sample { 116 int32_t core_avg_perf; 117 int32_t busy_scaled; 118 u64 aperf; 119 u64 mperf; 120 u64 tsc; 121 u64 time; 122 }; 123 124 /** 125 * struct pstate_data - Store P state data 126 * @current_pstate: Current requested P state 127 * @min_pstate: Min P state possible for this platform 128 * @max_pstate: Max P state possible for this platform 129 * @max_pstate_physical:This is physical Max P state for a processor 130 * This can be higher than the max_pstate which can 131 * be limited by platform thermal design power limits 132 * @scaling: Scaling factor to convert frequency to cpufreq 133 * frequency units 134 * @turbo_pstate: Max Turbo P state possible for this platform 135 * @max_freq: @max_pstate frequency in cpufreq units 136 * @turbo_freq: @turbo_pstate frequency in cpufreq units 137 * 138 * Stores the per cpu model P state limits and current P state. 139 */ 140 struct pstate_data { 141 int current_pstate; 142 int min_pstate; 143 int max_pstate; 144 int max_pstate_physical; 145 int scaling; 146 int turbo_pstate; 147 unsigned int max_freq; 148 unsigned int turbo_freq; 149 }; 150 151 /** 152 * struct vid_data - Stores voltage information data 153 * @min: VID data for this platform corresponding to 154 * the lowest P state 155 * @max: VID data corresponding to the highest P State. 156 * @turbo: VID data for turbo P state 157 * @ratio: Ratio of (vid max - vid min) / 158 * (max P state - Min P State) 159 * 160 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 161 * This data is used in Atom platforms, where in addition to target P state, 162 * the voltage data needs to be specified to select next P State. 163 */ 164 struct vid_data { 165 int min; 166 int max; 167 int turbo; 168 int32_t ratio; 169 }; 170 171 /** 172 * struct _pid - Stores PID data 173 * @setpoint: Target set point for busyness or performance 174 * @integral: Storage for accumulated error values 175 * @p_gain: PID proportional gain 176 * @i_gain: PID integral gain 177 * @d_gain: PID derivative gain 178 * @deadband: PID deadband 179 * @last_err: Last error storage for integral part of PID calculation 180 * 181 * Stores PID coefficients and last error for PID controller. 182 */ 183 struct _pid { 184 int setpoint; 185 int32_t integral; 186 int32_t p_gain; 187 int32_t i_gain; 188 int32_t d_gain; 189 int deadband; 190 int32_t last_err; 191 }; 192 193 /** 194 * struct global_params - Global parameters, mostly tunable via sysfs. 195 * @no_turbo: Whether or not to use turbo P-states. 196 * @turbo_disabled: Whethet or not turbo P-states are available at all, 197 * based on the MSR_IA32_MISC_ENABLE value and whether or 198 * not the maximum reported turbo P-state is different from 199 * the maximum reported non-turbo one. 200 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo 201 * P-state capacity. 202 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo 203 * P-state capacity. 204 */ 205 struct global_params { 206 bool no_turbo; 207 bool turbo_disabled; 208 int max_perf_pct; 209 int min_perf_pct; 210 }; 211 212 /** 213 * struct cpudata - Per CPU instance data storage 214 * @cpu: CPU number for this instance data 215 * @policy: CPUFreq policy value 216 * @update_util: CPUFreq utility callback information 217 * @update_util_set: CPUFreq utility callback is set 218 * @iowait_boost: iowait-related boost fraction 219 * @last_update: Time of the last update. 220 * @pstate: Stores P state limits for this CPU 221 * @vid: Stores VID limits for this CPU 222 * @pid: Stores PID parameters for this CPU 223 * @last_sample_time: Last Sample time 224 * @prev_aperf: Last APERF value read from APERF MSR 225 * @prev_mperf: Last MPERF value read from MPERF MSR 226 * @prev_tsc: Last timestamp counter (TSC) value 227 * @prev_cummulative_iowait: IO Wait time difference from last and 228 * current sample 229 * @sample: Storage for storing last Sample data 230 * @min_perf: Minimum capacity limit as a fraction of the maximum 231 * turbo P-state capacity. 232 * @max_perf: Maximum capacity limit as a fraction of the maximum 233 * turbo P-state capacity. 234 * @acpi_perf_data: Stores ACPI perf information read from _PSS 235 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 236 * @epp_powersave: Last saved HWP energy performance preference 237 * (EPP) or energy performance bias (EPB), 238 * when policy switched to performance 239 * @epp_policy: Last saved policy used to set EPP/EPB 240 * @epp_default: Power on default HWP energy performance 241 * preference/bias 242 * @epp_saved: Saved EPP/EPB during system suspend or CPU offline 243 * operation 244 * 245 * This structure stores per CPU instance data for all CPUs. 246 */ 247 struct cpudata { 248 int cpu; 249 250 unsigned int policy; 251 struct update_util_data update_util; 252 bool update_util_set; 253 254 struct pstate_data pstate; 255 struct vid_data vid; 256 struct _pid pid; 257 258 u64 last_update; 259 u64 last_sample_time; 260 u64 prev_aperf; 261 u64 prev_mperf; 262 u64 prev_tsc; 263 u64 prev_cummulative_iowait; 264 struct sample sample; 265 int32_t min_perf; 266 int32_t max_perf; 267 #ifdef CONFIG_ACPI 268 struct acpi_processor_performance acpi_perf_data; 269 bool valid_pss_table; 270 #endif 271 unsigned int iowait_boost; 272 s16 epp_powersave; 273 s16 epp_policy; 274 s16 epp_default; 275 s16 epp_saved; 276 }; 277 278 static struct cpudata **all_cpu_data; 279 280 /** 281 * struct pstate_adjust_policy - Stores static PID configuration data 282 * @sample_rate_ms: PID calculation sample rate in ms 283 * @sample_rate_ns: Sample rate calculation in ns 284 * @deadband: PID deadband 285 * @setpoint: PID Setpoint 286 * @p_gain_pct: PID proportional gain 287 * @i_gain_pct: PID integral gain 288 * @d_gain_pct: PID derivative gain 289 * 290 * Stores per CPU model static PID configuration data. 291 */ 292 struct pstate_adjust_policy { 293 int sample_rate_ms; 294 s64 sample_rate_ns; 295 int deadband; 296 int setpoint; 297 int p_gain_pct; 298 int d_gain_pct; 299 int i_gain_pct; 300 }; 301 302 /** 303 * struct pstate_funcs - Per CPU model specific callbacks 304 * @get_max: Callback to get maximum non turbo effective P state 305 * @get_max_physical: Callback to get maximum non turbo physical P state 306 * @get_min: Callback to get minimum P state 307 * @get_turbo: Callback to get turbo P state 308 * @get_scaling: Callback to get frequency scaling factor 309 * @get_val: Callback to convert P state to actual MSR write value 310 * @get_vid: Callback to get VID data for Atom platforms 311 * @get_target_pstate: Callback to a function to calculate next P state to use 312 * 313 * Core and Atom CPU models have different way to get P State limits. This 314 * structure is used to store those callbacks. 315 */ 316 struct pstate_funcs { 317 int (*get_max)(void); 318 int (*get_max_physical)(void); 319 int (*get_min)(void); 320 int (*get_turbo)(void); 321 int (*get_scaling)(void); 322 u64 (*get_val)(struct cpudata*, int pstate); 323 void (*get_vid)(struct cpudata *); 324 int32_t (*get_target_pstate)(struct cpudata *); 325 }; 326 327 /** 328 * struct cpu_defaults- Per CPU model default config data 329 * @funcs: Callback function data 330 */ 331 struct cpu_defaults { 332 struct pstate_funcs funcs; 333 }; 334 335 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 336 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 337 338 static struct pstate_funcs pstate_funcs __read_mostly; 339 static struct pstate_adjust_policy pid_params __read_mostly = { 340 .sample_rate_ms = 10, 341 .sample_rate_ns = 10 * NSEC_PER_MSEC, 342 .deadband = 0, 343 .setpoint = 97, 344 .p_gain_pct = 20, 345 .d_gain_pct = 0, 346 .i_gain_pct = 0, 347 }; 348 349 static int hwp_active __read_mostly; 350 static bool per_cpu_limits __read_mostly; 351 352 static struct cpufreq_driver *intel_pstate_driver __read_mostly; 353 354 #ifdef CONFIG_ACPI 355 static bool acpi_ppc; 356 #endif 357 358 static struct global_params global; 359 360 static DEFINE_MUTEX(intel_pstate_driver_lock); 361 static DEFINE_MUTEX(intel_pstate_limits_lock); 362 363 #ifdef CONFIG_ACPI 364 365 static bool intel_pstate_get_ppc_enable_status(void) 366 { 367 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 368 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 369 return true; 370 371 return acpi_ppc; 372 } 373 374 #ifdef CONFIG_ACPI_CPPC_LIB 375 376 /* The work item is needed to avoid CPU hotplug locking issues */ 377 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) 378 { 379 sched_set_itmt_support(); 380 } 381 382 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); 383 384 static void intel_pstate_set_itmt_prio(int cpu) 385 { 386 struct cppc_perf_caps cppc_perf; 387 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; 388 int ret; 389 390 ret = cppc_get_perf_caps(cpu, &cppc_perf); 391 if (ret) 392 return; 393 394 /* 395 * The priorities can be set regardless of whether or not 396 * sched_set_itmt_support(true) has been called and it is valid to 397 * update them at any time after it has been called. 398 */ 399 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); 400 401 if (max_highest_perf <= min_highest_perf) { 402 if (cppc_perf.highest_perf > max_highest_perf) 403 max_highest_perf = cppc_perf.highest_perf; 404 405 if (cppc_perf.highest_perf < min_highest_perf) 406 min_highest_perf = cppc_perf.highest_perf; 407 408 if (max_highest_perf > min_highest_perf) { 409 /* 410 * This code can be run during CPU online under the 411 * CPU hotplug locks, so sched_set_itmt_support() 412 * cannot be called from here. Queue up a work item 413 * to invoke it. 414 */ 415 schedule_work(&sched_itmt_work); 416 } 417 } 418 } 419 #else 420 static void intel_pstate_set_itmt_prio(int cpu) 421 { 422 } 423 #endif 424 425 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 426 { 427 struct cpudata *cpu; 428 int ret; 429 int i; 430 431 if (hwp_active) { 432 intel_pstate_set_itmt_prio(policy->cpu); 433 return; 434 } 435 436 if (!intel_pstate_get_ppc_enable_status()) 437 return; 438 439 cpu = all_cpu_data[policy->cpu]; 440 441 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 442 policy->cpu); 443 if (ret) 444 return; 445 446 /* 447 * Check if the control value in _PSS is for PERF_CTL MSR, which should 448 * guarantee that the states returned by it map to the states in our 449 * list directly. 450 */ 451 if (cpu->acpi_perf_data.control_register.space_id != 452 ACPI_ADR_SPACE_FIXED_HARDWARE) 453 goto err; 454 455 /* 456 * If there is only one entry _PSS, simply ignore _PSS and continue as 457 * usual without taking _PSS into account 458 */ 459 if (cpu->acpi_perf_data.state_count < 2) 460 goto err; 461 462 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 463 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 464 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 465 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 466 (u32) cpu->acpi_perf_data.states[i].core_frequency, 467 (u32) cpu->acpi_perf_data.states[i].power, 468 (u32) cpu->acpi_perf_data.states[i].control); 469 } 470 471 /* 472 * The _PSS table doesn't contain whole turbo frequency range. 473 * This just contains +1 MHZ above the max non turbo frequency, 474 * with control value corresponding to max turbo ratio. But 475 * when cpufreq set policy is called, it will call with this 476 * max frequency, which will cause a reduced performance as 477 * this driver uses real max turbo frequency as the max 478 * frequency. So correct this frequency in _PSS table to 479 * correct max turbo frequency based on the turbo state. 480 * Also need to convert to MHz as _PSS freq is in MHz. 481 */ 482 if (!global.turbo_disabled) 483 cpu->acpi_perf_data.states[0].core_frequency = 484 policy->cpuinfo.max_freq / 1000; 485 cpu->valid_pss_table = true; 486 pr_debug("_PPC limits will be enforced\n"); 487 488 return; 489 490 err: 491 cpu->valid_pss_table = false; 492 acpi_processor_unregister_performance(policy->cpu); 493 } 494 495 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 496 { 497 struct cpudata *cpu; 498 499 cpu = all_cpu_data[policy->cpu]; 500 if (!cpu->valid_pss_table) 501 return; 502 503 acpi_processor_unregister_performance(policy->cpu); 504 } 505 #else 506 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 507 { 508 } 509 510 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 511 { 512 } 513 #endif 514 515 static signed int pid_calc(struct _pid *pid, int32_t busy) 516 { 517 signed int result; 518 int32_t pterm, dterm, fp_error; 519 int32_t integral_limit; 520 521 fp_error = pid->setpoint - busy; 522 523 if (abs(fp_error) <= pid->deadband) 524 return 0; 525 526 pterm = mul_fp(pid->p_gain, fp_error); 527 528 pid->integral += fp_error; 529 530 /* 531 * We limit the integral here so that it will never 532 * get higher than 30. This prevents it from becoming 533 * too large an input over long periods of time and allows 534 * it to get factored out sooner. 535 * 536 * The value of 30 was chosen through experimentation. 537 */ 538 integral_limit = int_tofp(30); 539 if (pid->integral > integral_limit) 540 pid->integral = integral_limit; 541 if (pid->integral < -integral_limit) 542 pid->integral = -integral_limit; 543 544 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 545 pid->last_err = fp_error; 546 547 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 548 result = result + (1 << (FRAC_BITS-1)); 549 return (signed int)fp_toint(result); 550 } 551 552 static inline void intel_pstate_pid_reset(struct cpudata *cpu) 553 { 554 struct _pid *pid = &cpu->pid; 555 556 pid->p_gain = percent_fp(pid_params.p_gain_pct); 557 pid->d_gain = percent_fp(pid_params.d_gain_pct); 558 pid->i_gain = percent_fp(pid_params.i_gain_pct); 559 pid->setpoint = int_tofp(pid_params.setpoint); 560 pid->last_err = pid->setpoint - int_tofp(100); 561 pid->deadband = int_tofp(pid_params.deadband); 562 pid->integral = 0; 563 } 564 565 static inline void update_turbo_state(void) 566 { 567 u64 misc_en; 568 struct cpudata *cpu; 569 570 cpu = all_cpu_data[0]; 571 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 572 global.turbo_disabled = 573 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 574 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 575 } 576 577 static int min_perf_pct_min(void) 578 { 579 struct cpudata *cpu = all_cpu_data[0]; 580 581 return DIV_ROUND_UP(cpu->pstate.min_pstate * 100, 582 cpu->pstate.turbo_pstate); 583 } 584 585 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 586 { 587 u64 epb; 588 int ret; 589 590 if (!static_cpu_has(X86_FEATURE_EPB)) 591 return -ENXIO; 592 593 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 594 if (ret) 595 return (s16)ret; 596 597 return (s16)(epb & 0x0f); 598 } 599 600 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 601 { 602 s16 epp; 603 604 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 605 /* 606 * When hwp_req_data is 0, means that caller didn't read 607 * MSR_HWP_REQUEST, so need to read and get EPP. 608 */ 609 if (!hwp_req_data) { 610 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, 611 &hwp_req_data); 612 if (epp) 613 return epp; 614 } 615 epp = (hwp_req_data >> 24) & 0xff; 616 } else { 617 /* When there is no EPP present, HWP uses EPB settings */ 618 epp = intel_pstate_get_epb(cpu_data); 619 } 620 621 return epp; 622 } 623 624 static int intel_pstate_set_epb(int cpu, s16 pref) 625 { 626 u64 epb; 627 int ret; 628 629 if (!static_cpu_has(X86_FEATURE_EPB)) 630 return -ENXIO; 631 632 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 633 if (ret) 634 return ret; 635 636 epb = (epb & ~0x0f) | pref; 637 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 638 639 return 0; 640 } 641 642 /* 643 * EPP/EPB display strings corresponding to EPP index in the 644 * energy_perf_strings[] 645 * index String 646 *------------------------------------- 647 * 0 default 648 * 1 performance 649 * 2 balance_performance 650 * 3 balance_power 651 * 4 power 652 */ 653 static const char * const energy_perf_strings[] = { 654 "default", 655 "performance", 656 "balance_performance", 657 "balance_power", 658 "power", 659 NULL 660 }; 661 662 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) 663 { 664 s16 epp; 665 int index = -EINVAL; 666 667 epp = intel_pstate_get_epp(cpu_data, 0); 668 if (epp < 0) 669 return epp; 670 671 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 672 /* 673 * Range: 674 * 0x00-0x3F : Performance 675 * 0x40-0x7F : Balance performance 676 * 0x80-0xBF : Balance power 677 * 0xC0-0xFF : Power 678 * The EPP is a 8 bit value, but our ranges restrict the 679 * value which can be set. Here only using top two bits 680 * effectively. 681 */ 682 index = (epp >> 6) + 1; 683 } else if (static_cpu_has(X86_FEATURE_EPB)) { 684 /* 685 * Range: 686 * 0x00-0x03 : Performance 687 * 0x04-0x07 : Balance performance 688 * 0x08-0x0B : Balance power 689 * 0x0C-0x0F : Power 690 * The EPB is a 4 bit value, but our ranges restrict the 691 * value which can be set. Here only using top two bits 692 * effectively. 693 */ 694 index = (epp >> 2) + 1; 695 } 696 697 return index; 698 } 699 700 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, 701 int pref_index) 702 { 703 int epp = -EINVAL; 704 int ret; 705 706 if (!pref_index) 707 epp = cpu_data->epp_default; 708 709 mutex_lock(&intel_pstate_limits_lock); 710 711 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 712 u64 value; 713 714 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); 715 if (ret) 716 goto return_pref; 717 718 value &= ~GENMASK_ULL(31, 24); 719 720 /* 721 * If epp is not default, convert from index into 722 * energy_perf_strings to epp value, by shifting 6 723 * bits left to use only top two bits in epp. 724 * The resultant epp need to shifted by 24 bits to 725 * epp position in MSR_HWP_REQUEST. 726 */ 727 if (epp == -EINVAL) 728 epp = (pref_index - 1) << 6; 729 730 value |= (u64)epp << 24; 731 ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); 732 } else { 733 if (epp == -EINVAL) 734 epp = (pref_index - 1) << 2; 735 ret = intel_pstate_set_epb(cpu_data->cpu, epp); 736 } 737 return_pref: 738 mutex_unlock(&intel_pstate_limits_lock); 739 740 return ret; 741 } 742 743 static ssize_t show_energy_performance_available_preferences( 744 struct cpufreq_policy *policy, char *buf) 745 { 746 int i = 0; 747 int ret = 0; 748 749 while (energy_perf_strings[i] != NULL) 750 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); 751 752 ret += sprintf(&buf[ret], "\n"); 753 754 return ret; 755 } 756 757 cpufreq_freq_attr_ro(energy_performance_available_preferences); 758 759 static ssize_t store_energy_performance_preference( 760 struct cpufreq_policy *policy, const char *buf, size_t count) 761 { 762 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 763 char str_preference[21]; 764 int ret, i = 0; 765 766 ret = sscanf(buf, "%20s", str_preference); 767 if (ret != 1) 768 return -EINVAL; 769 770 while (energy_perf_strings[i] != NULL) { 771 if (!strcmp(str_preference, energy_perf_strings[i])) { 772 intel_pstate_set_energy_pref_index(cpu_data, i); 773 return count; 774 } 775 ++i; 776 } 777 778 return -EINVAL; 779 } 780 781 static ssize_t show_energy_performance_preference( 782 struct cpufreq_policy *policy, char *buf) 783 { 784 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 785 int preference; 786 787 preference = intel_pstate_get_energy_pref_index(cpu_data); 788 if (preference < 0) 789 return preference; 790 791 return sprintf(buf, "%s\n", energy_perf_strings[preference]); 792 } 793 794 cpufreq_freq_attr_rw(energy_performance_preference); 795 796 static struct freq_attr *hwp_cpufreq_attrs[] = { 797 &energy_performance_preference, 798 &energy_performance_available_preferences, 799 NULL, 800 }; 801 802 static void intel_pstate_hwp_set(struct cpufreq_policy *policy) 803 { 804 int min, hw_min, max, hw_max, cpu; 805 u64 value, cap; 806 807 for_each_cpu(cpu, policy->cpus) { 808 struct cpudata *cpu_data = all_cpu_data[cpu]; 809 s16 epp; 810 811 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 812 hw_min = HWP_LOWEST_PERF(cap); 813 if (global.no_turbo) 814 hw_max = HWP_GUARANTEED_PERF(cap); 815 else 816 hw_max = HWP_HIGHEST_PERF(cap); 817 818 max = fp_ext_toint(hw_max * cpu_data->max_perf); 819 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 820 min = max; 821 else 822 min = fp_ext_toint(hw_max * cpu_data->min_perf); 823 824 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 825 826 value &= ~HWP_MIN_PERF(~0L); 827 value |= HWP_MIN_PERF(min); 828 829 value &= ~HWP_MAX_PERF(~0L); 830 value |= HWP_MAX_PERF(max); 831 832 if (cpu_data->epp_policy == cpu_data->policy) 833 goto skip_epp; 834 835 cpu_data->epp_policy = cpu_data->policy; 836 837 if (cpu_data->epp_saved >= 0) { 838 epp = cpu_data->epp_saved; 839 cpu_data->epp_saved = -EINVAL; 840 goto update_epp; 841 } 842 843 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 844 epp = intel_pstate_get_epp(cpu_data, value); 845 cpu_data->epp_powersave = epp; 846 /* If EPP read was failed, then don't try to write */ 847 if (epp < 0) 848 goto skip_epp; 849 850 851 epp = 0; 852 } else { 853 /* skip setting EPP, when saved value is invalid */ 854 if (cpu_data->epp_powersave < 0) 855 goto skip_epp; 856 857 /* 858 * No need to restore EPP when it is not zero. This 859 * means: 860 * - Policy is not changed 861 * - user has manually changed 862 * - Error reading EPB 863 */ 864 epp = intel_pstate_get_epp(cpu_data, value); 865 if (epp) 866 goto skip_epp; 867 868 epp = cpu_data->epp_powersave; 869 } 870 update_epp: 871 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 872 value &= ~GENMASK_ULL(31, 24); 873 value |= (u64)epp << 24; 874 } else { 875 intel_pstate_set_epb(cpu, epp); 876 } 877 skip_epp: 878 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 879 } 880 } 881 882 static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) 883 { 884 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 885 886 if (!hwp_active) 887 return 0; 888 889 cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0); 890 891 return 0; 892 } 893 894 static int intel_pstate_resume(struct cpufreq_policy *policy) 895 { 896 if (!hwp_active) 897 return 0; 898 899 mutex_lock(&intel_pstate_limits_lock); 900 901 all_cpu_data[policy->cpu]->epp_policy = 0; 902 intel_pstate_hwp_set(policy); 903 904 mutex_unlock(&intel_pstate_limits_lock); 905 906 return 0; 907 } 908 909 static void intel_pstate_update_policies(void) 910 { 911 int cpu; 912 913 for_each_possible_cpu(cpu) 914 cpufreq_update_policy(cpu); 915 } 916 917 /************************** debugfs begin ************************/ 918 static int pid_param_set(void *data, u64 val) 919 { 920 unsigned int cpu; 921 922 *(u32 *)data = val; 923 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 924 for_each_possible_cpu(cpu) 925 if (all_cpu_data[cpu]) 926 intel_pstate_pid_reset(all_cpu_data[cpu]); 927 928 return 0; 929 } 930 931 static int pid_param_get(void *data, u64 *val) 932 { 933 *val = *(u32 *)data; 934 return 0; 935 } 936 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 937 938 static struct dentry *debugfs_parent; 939 940 struct pid_param { 941 char *name; 942 void *value; 943 struct dentry *dentry; 944 }; 945 946 static struct pid_param pid_files[] = { 947 {"sample_rate_ms", &pid_params.sample_rate_ms, }, 948 {"d_gain_pct", &pid_params.d_gain_pct, }, 949 {"i_gain_pct", &pid_params.i_gain_pct, }, 950 {"deadband", &pid_params.deadband, }, 951 {"setpoint", &pid_params.setpoint, }, 952 {"p_gain_pct", &pid_params.p_gain_pct, }, 953 {NULL, NULL, } 954 }; 955 956 static void intel_pstate_debug_expose_params(void) 957 { 958 int i; 959 960 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 961 if (IS_ERR_OR_NULL(debugfs_parent)) 962 return; 963 964 for (i = 0; pid_files[i].name; i++) { 965 struct dentry *dentry; 966 967 dentry = debugfs_create_file(pid_files[i].name, 0660, 968 debugfs_parent, pid_files[i].value, 969 &fops_pid_param); 970 if (!IS_ERR(dentry)) 971 pid_files[i].dentry = dentry; 972 } 973 } 974 975 static void intel_pstate_debug_hide_params(void) 976 { 977 int i; 978 979 if (IS_ERR_OR_NULL(debugfs_parent)) 980 return; 981 982 for (i = 0; pid_files[i].name; i++) { 983 debugfs_remove(pid_files[i].dentry); 984 pid_files[i].dentry = NULL; 985 } 986 987 debugfs_remove(debugfs_parent); 988 debugfs_parent = NULL; 989 } 990 991 /************************** debugfs end ************************/ 992 993 /************************** sysfs begin ************************/ 994 #define show_one(file_name, object) \ 995 static ssize_t show_##file_name \ 996 (struct kobject *kobj, struct attribute *attr, char *buf) \ 997 { \ 998 return sprintf(buf, "%u\n", global.object); \ 999 } 1000 1001 static ssize_t intel_pstate_show_status(char *buf); 1002 static int intel_pstate_update_status(const char *buf, size_t size); 1003 1004 static ssize_t show_status(struct kobject *kobj, 1005 struct attribute *attr, char *buf) 1006 { 1007 ssize_t ret; 1008 1009 mutex_lock(&intel_pstate_driver_lock); 1010 ret = intel_pstate_show_status(buf); 1011 mutex_unlock(&intel_pstate_driver_lock); 1012 1013 return ret; 1014 } 1015 1016 static ssize_t store_status(struct kobject *a, struct attribute *b, 1017 const char *buf, size_t count) 1018 { 1019 char *p = memchr(buf, '\n', count); 1020 int ret; 1021 1022 mutex_lock(&intel_pstate_driver_lock); 1023 ret = intel_pstate_update_status(buf, p ? p - buf : count); 1024 mutex_unlock(&intel_pstate_driver_lock); 1025 1026 return ret < 0 ? ret : count; 1027 } 1028 1029 static ssize_t show_turbo_pct(struct kobject *kobj, 1030 struct attribute *attr, char *buf) 1031 { 1032 struct cpudata *cpu; 1033 int total, no_turbo, turbo_pct; 1034 uint32_t turbo_fp; 1035 1036 mutex_lock(&intel_pstate_driver_lock); 1037 1038 if (!intel_pstate_driver) { 1039 mutex_unlock(&intel_pstate_driver_lock); 1040 return -EAGAIN; 1041 } 1042 1043 cpu = all_cpu_data[0]; 1044 1045 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1046 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 1047 turbo_fp = div_fp(no_turbo, total); 1048 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 1049 1050 mutex_unlock(&intel_pstate_driver_lock); 1051 1052 return sprintf(buf, "%u\n", turbo_pct); 1053 } 1054 1055 static ssize_t show_num_pstates(struct kobject *kobj, 1056 struct attribute *attr, char *buf) 1057 { 1058 struct cpudata *cpu; 1059 int total; 1060 1061 mutex_lock(&intel_pstate_driver_lock); 1062 1063 if (!intel_pstate_driver) { 1064 mutex_unlock(&intel_pstate_driver_lock); 1065 return -EAGAIN; 1066 } 1067 1068 cpu = all_cpu_data[0]; 1069 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1070 1071 mutex_unlock(&intel_pstate_driver_lock); 1072 1073 return sprintf(buf, "%u\n", total); 1074 } 1075 1076 static ssize_t show_no_turbo(struct kobject *kobj, 1077 struct attribute *attr, char *buf) 1078 { 1079 ssize_t ret; 1080 1081 mutex_lock(&intel_pstate_driver_lock); 1082 1083 if (!intel_pstate_driver) { 1084 mutex_unlock(&intel_pstate_driver_lock); 1085 return -EAGAIN; 1086 } 1087 1088 update_turbo_state(); 1089 if (global.turbo_disabled) 1090 ret = sprintf(buf, "%u\n", global.turbo_disabled); 1091 else 1092 ret = sprintf(buf, "%u\n", global.no_turbo); 1093 1094 mutex_unlock(&intel_pstate_driver_lock); 1095 1096 return ret; 1097 } 1098 1099 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 1100 const char *buf, size_t count) 1101 { 1102 unsigned int input; 1103 int ret; 1104 1105 ret = sscanf(buf, "%u", &input); 1106 if (ret != 1) 1107 return -EINVAL; 1108 1109 mutex_lock(&intel_pstate_driver_lock); 1110 1111 if (!intel_pstate_driver) { 1112 mutex_unlock(&intel_pstate_driver_lock); 1113 return -EAGAIN; 1114 } 1115 1116 mutex_lock(&intel_pstate_limits_lock); 1117 1118 update_turbo_state(); 1119 if (global.turbo_disabled) { 1120 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 1121 mutex_unlock(&intel_pstate_limits_lock); 1122 mutex_unlock(&intel_pstate_driver_lock); 1123 return -EPERM; 1124 } 1125 1126 global.no_turbo = clamp_t(int, input, 0, 1); 1127 1128 if (global.no_turbo) { 1129 struct cpudata *cpu = all_cpu_data[0]; 1130 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; 1131 1132 /* Squash the global minimum into the permitted range. */ 1133 if (global.min_perf_pct > pct) 1134 global.min_perf_pct = pct; 1135 } 1136 1137 mutex_unlock(&intel_pstate_limits_lock); 1138 1139 intel_pstate_update_policies(); 1140 1141 mutex_unlock(&intel_pstate_driver_lock); 1142 1143 return count; 1144 } 1145 1146 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 1147 const char *buf, size_t count) 1148 { 1149 unsigned int input; 1150 int ret; 1151 1152 ret = sscanf(buf, "%u", &input); 1153 if (ret != 1) 1154 return -EINVAL; 1155 1156 mutex_lock(&intel_pstate_driver_lock); 1157 1158 if (!intel_pstate_driver) { 1159 mutex_unlock(&intel_pstate_driver_lock); 1160 return -EAGAIN; 1161 } 1162 1163 mutex_lock(&intel_pstate_limits_lock); 1164 1165 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); 1166 1167 mutex_unlock(&intel_pstate_limits_lock); 1168 1169 intel_pstate_update_policies(); 1170 1171 mutex_unlock(&intel_pstate_driver_lock); 1172 1173 return count; 1174 } 1175 1176 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 1177 const char *buf, size_t count) 1178 { 1179 unsigned int input; 1180 int ret; 1181 1182 ret = sscanf(buf, "%u", &input); 1183 if (ret != 1) 1184 return -EINVAL; 1185 1186 mutex_lock(&intel_pstate_driver_lock); 1187 1188 if (!intel_pstate_driver) { 1189 mutex_unlock(&intel_pstate_driver_lock); 1190 return -EAGAIN; 1191 } 1192 1193 mutex_lock(&intel_pstate_limits_lock); 1194 1195 global.min_perf_pct = clamp_t(int, input, 1196 min_perf_pct_min(), global.max_perf_pct); 1197 1198 mutex_unlock(&intel_pstate_limits_lock); 1199 1200 intel_pstate_update_policies(); 1201 1202 mutex_unlock(&intel_pstate_driver_lock); 1203 1204 return count; 1205 } 1206 1207 show_one(max_perf_pct, max_perf_pct); 1208 show_one(min_perf_pct, min_perf_pct); 1209 1210 define_one_global_rw(status); 1211 define_one_global_rw(no_turbo); 1212 define_one_global_rw(max_perf_pct); 1213 define_one_global_rw(min_perf_pct); 1214 define_one_global_ro(turbo_pct); 1215 define_one_global_ro(num_pstates); 1216 1217 static struct attribute *intel_pstate_attributes[] = { 1218 &status.attr, 1219 &no_turbo.attr, 1220 &turbo_pct.attr, 1221 &num_pstates.attr, 1222 NULL 1223 }; 1224 1225 static struct attribute_group intel_pstate_attr_group = { 1226 .attrs = intel_pstate_attributes, 1227 }; 1228 1229 static void __init intel_pstate_sysfs_expose_params(void) 1230 { 1231 struct kobject *intel_pstate_kobject; 1232 int rc; 1233 1234 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 1235 &cpu_subsys.dev_root->kobj); 1236 if (WARN_ON(!intel_pstate_kobject)) 1237 return; 1238 1239 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 1240 if (WARN_ON(rc)) 1241 return; 1242 1243 /* 1244 * If per cpu limits are enforced there are no global limits, so 1245 * return without creating max/min_perf_pct attributes 1246 */ 1247 if (per_cpu_limits) 1248 return; 1249 1250 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 1251 WARN_ON(rc); 1252 1253 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1254 WARN_ON(rc); 1255 1256 } 1257 /************************** sysfs end ************************/ 1258 1259 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1260 { 1261 /* First disable HWP notification interrupt as we don't process them */ 1262 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1263 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1264 1265 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1266 cpudata->epp_policy = 0; 1267 if (cpudata->epp_default == -EINVAL) 1268 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1269 } 1270 1271 #define MSR_IA32_POWER_CTL_BIT_EE 19 1272 1273 /* Disable energy efficiency optimization */ 1274 static void intel_pstate_disable_ee(int cpu) 1275 { 1276 u64 power_ctl; 1277 int ret; 1278 1279 ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl); 1280 if (ret) 1281 return; 1282 1283 if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) { 1284 pr_info("Disabling energy efficiency optimization\n"); 1285 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); 1286 wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl); 1287 } 1288 } 1289 1290 static int atom_get_min_pstate(void) 1291 { 1292 u64 value; 1293 1294 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1295 return (value >> 8) & 0x7F; 1296 } 1297 1298 static int atom_get_max_pstate(void) 1299 { 1300 u64 value; 1301 1302 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1303 return (value >> 16) & 0x7F; 1304 } 1305 1306 static int atom_get_turbo_pstate(void) 1307 { 1308 u64 value; 1309 1310 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); 1311 return value & 0x7F; 1312 } 1313 1314 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1315 { 1316 u64 val; 1317 int32_t vid_fp; 1318 u32 vid; 1319 1320 val = (u64)pstate << 8; 1321 if (global.no_turbo && !global.turbo_disabled) 1322 val |= (u64)1 << 32; 1323 1324 vid_fp = cpudata->vid.min + mul_fp( 1325 int_tofp(pstate - cpudata->pstate.min_pstate), 1326 cpudata->vid.ratio); 1327 1328 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1329 vid = ceiling_fp(vid_fp); 1330 1331 if (pstate > cpudata->pstate.max_pstate) 1332 vid = cpudata->vid.turbo; 1333 1334 return val | vid; 1335 } 1336 1337 static int silvermont_get_scaling(void) 1338 { 1339 u64 value; 1340 int i; 1341 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1342 static int silvermont_freq_table[] = { 1343 83300, 100000, 133300, 116700, 80000}; 1344 1345 rdmsrl(MSR_FSB_FREQ, value); 1346 i = value & 0x7; 1347 WARN_ON(i > 4); 1348 1349 return silvermont_freq_table[i]; 1350 } 1351 1352 static int airmont_get_scaling(void) 1353 { 1354 u64 value; 1355 int i; 1356 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1357 static int airmont_freq_table[] = { 1358 83300, 100000, 133300, 116700, 80000, 1359 93300, 90000, 88900, 87500}; 1360 1361 rdmsrl(MSR_FSB_FREQ, value); 1362 i = value & 0xF; 1363 WARN_ON(i > 8); 1364 1365 return airmont_freq_table[i]; 1366 } 1367 1368 static void atom_get_vid(struct cpudata *cpudata) 1369 { 1370 u64 value; 1371 1372 rdmsrl(MSR_ATOM_CORE_VIDS, value); 1373 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1374 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1375 cpudata->vid.ratio = div_fp( 1376 cpudata->vid.max - cpudata->vid.min, 1377 int_tofp(cpudata->pstate.max_pstate - 1378 cpudata->pstate.min_pstate)); 1379 1380 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); 1381 cpudata->vid.turbo = value & 0x7f; 1382 } 1383 1384 static int core_get_min_pstate(void) 1385 { 1386 u64 value; 1387 1388 rdmsrl(MSR_PLATFORM_INFO, value); 1389 return (value >> 40) & 0xFF; 1390 } 1391 1392 static int core_get_max_pstate_physical(void) 1393 { 1394 u64 value; 1395 1396 rdmsrl(MSR_PLATFORM_INFO, value); 1397 return (value >> 8) & 0xFF; 1398 } 1399 1400 static int core_get_tdp_ratio(u64 plat_info) 1401 { 1402 /* Check how many TDP levels present */ 1403 if (plat_info & 0x600000000) { 1404 u64 tdp_ctrl; 1405 u64 tdp_ratio; 1406 int tdp_msr; 1407 int err; 1408 1409 /* Get the TDP level (0, 1, 2) to get ratios */ 1410 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1411 if (err) 1412 return err; 1413 1414 /* TDP MSR are continuous starting at 0x648 */ 1415 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); 1416 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 1417 if (err) 1418 return err; 1419 1420 /* For level 1 and 2, bits[23:16] contain the ratio */ 1421 if (tdp_ctrl & 0x03) 1422 tdp_ratio >>= 16; 1423 1424 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1425 pr_debug("tdp_ratio %x\n", (int)tdp_ratio); 1426 1427 return (int)tdp_ratio; 1428 } 1429 1430 return -ENXIO; 1431 } 1432 1433 static int core_get_max_pstate(void) 1434 { 1435 u64 tar; 1436 u64 plat_info; 1437 int max_pstate; 1438 int tdp_ratio; 1439 int err; 1440 1441 rdmsrl(MSR_PLATFORM_INFO, plat_info); 1442 max_pstate = (plat_info >> 8) & 0xFF; 1443 1444 tdp_ratio = core_get_tdp_ratio(plat_info); 1445 if (tdp_ratio <= 0) 1446 return max_pstate; 1447 1448 if (hwp_active) { 1449 /* Turbo activation ratio is not used on HWP platforms */ 1450 return tdp_ratio; 1451 } 1452 1453 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 1454 if (!err) { 1455 int tar_levels; 1456 1457 /* Do some sanity checking for safety */ 1458 tar_levels = tar & 0xff; 1459 if (tdp_ratio - 1 == tar_levels) { 1460 max_pstate = tar_levels; 1461 pr_debug("max_pstate=TAC %x\n", max_pstate); 1462 } 1463 } 1464 1465 return max_pstate; 1466 } 1467 1468 static int core_get_turbo_pstate(void) 1469 { 1470 u64 value; 1471 int nont, ret; 1472 1473 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1474 nont = core_get_max_pstate(); 1475 ret = (value) & 255; 1476 if (ret <= nont) 1477 ret = nont; 1478 return ret; 1479 } 1480 1481 static inline int core_get_scaling(void) 1482 { 1483 return 100000; 1484 } 1485 1486 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1487 { 1488 u64 val; 1489 1490 val = (u64)pstate << 8; 1491 if (global.no_turbo && !global.turbo_disabled) 1492 val |= (u64)1 << 32; 1493 1494 return val; 1495 } 1496 1497 static int knl_get_turbo_pstate(void) 1498 { 1499 u64 value; 1500 int nont, ret; 1501 1502 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1503 nont = core_get_max_pstate(); 1504 ret = (((value) >> 8) & 0xFF); 1505 if (ret <= nont) 1506 ret = nont; 1507 return ret; 1508 } 1509 1510 static struct cpu_defaults core_params = { 1511 .funcs = { 1512 .get_max = core_get_max_pstate, 1513 .get_max_physical = core_get_max_pstate_physical, 1514 .get_min = core_get_min_pstate, 1515 .get_turbo = core_get_turbo_pstate, 1516 .get_scaling = core_get_scaling, 1517 .get_val = core_get_val, 1518 .get_target_pstate = get_target_pstate_use_performance, 1519 }, 1520 }; 1521 1522 static const struct cpu_defaults silvermont_params = { 1523 .funcs = { 1524 .get_max = atom_get_max_pstate, 1525 .get_max_physical = atom_get_max_pstate, 1526 .get_min = atom_get_min_pstate, 1527 .get_turbo = atom_get_turbo_pstate, 1528 .get_val = atom_get_val, 1529 .get_scaling = silvermont_get_scaling, 1530 .get_vid = atom_get_vid, 1531 .get_target_pstate = get_target_pstate_use_cpu_load, 1532 }, 1533 }; 1534 1535 static const struct cpu_defaults airmont_params = { 1536 .funcs = { 1537 .get_max = atom_get_max_pstate, 1538 .get_max_physical = atom_get_max_pstate, 1539 .get_min = atom_get_min_pstate, 1540 .get_turbo = atom_get_turbo_pstate, 1541 .get_val = atom_get_val, 1542 .get_scaling = airmont_get_scaling, 1543 .get_vid = atom_get_vid, 1544 .get_target_pstate = get_target_pstate_use_cpu_load, 1545 }, 1546 }; 1547 1548 static const struct cpu_defaults knl_params = { 1549 .funcs = { 1550 .get_max = core_get_max_pstate, 1551 .get_max_physical = core_get_max_pstate_physical, 1552 .get_min = core_get_min_pstate, 1553 .get_turbo = knl_get_turbo_pstate, 1554 .get_scaling = core_get_scaling, 1555 .get_val = core_get_val, 1556 .get_target_pstate = get_target_pstate_use_performance, 1557 }, 1558 }; 1559 1560 static const struct cpu_defaults bxt_params = { 1561 .funcs = { 1562 .get_max = core_get_max_pstate, 1563 .get_max_physical = core_get_max_pstate_physical, 1564 .get_min = core_get_min_pstate, 1565 .get_turbo = core_get_turbo_pstate, 1566 .get_scaling = core_get_scaling, 1567 .get_val = core_get_val, 1568 .get_target_pstate = get_target_pstate_use_cpu_load, 1569 }, 1570 }; 1571 1572 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 1573 { 1574 int max_perf = cpu->pstate.turbo_pstate; 1575 int max_perf_adj; 1576 int min_perf; 1577 1578 if (global.no_turbo || global.turbo_disabled) 1579 max_perf = cpu->pstate.max_pstate; 1580 1581 /* 1582 * performance can be limited by user through sysfs, by cpufreq 1583 * policy, or by cpu specific default values determined through 1584 * experimentation. 1585 */ 1586 max_perf_adj = fp_ext_toint(max_perf * cpu->max_perf); 1587 *max = clamp_t(int, max_perf_adj, 1588 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 1589 1590 min_perf = fp_ext_toint(max_perf * cpu->min_perf); 1591 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 1592 } 1593 1594 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1595 { 1596 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1597 cpu->pstate.current_pstate = pstate; 1598 /* 1599 * Generally, there is no guarantee that this code will always run on 1600 * the CPU being updated, so force the register update to run on the 1601 * right CPU. 1602 */ 1603 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1604 pstate_funcs.get_val(cpu, pstate)); 1605 } 1606 1607 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1608 { 1609 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1610 } 1611 1612 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1613 { 1614 int min_pstate, max_pstate; 1615 1616 update_turbo_state(); 1617 intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); 1618 intel_pstate_set_pstate(cpu, max_pstate); 1619 } 1620 1621 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1622 { 1623 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1624 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1625 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1626 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1627 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1628 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; 1629 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1630 1631 if (pstate_funcs.get_vid) 1632 pstate_funcs.get_vid(cpu); 1633 1634 intel_pstate_set_min_pstate(cpu); 1635 } 1636 1637 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1638 { 1639 struct sample *sample = &cpu->sample; 1640 1641 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1642 } 1643 1644 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1645 { 1646 u64 aperf, mperf; 1647 unsigned long flags; 1648 u64 tsc; 1649 1650 local_irq_save(flags); 1651 rdmsrl(MSR_IA32_APERF, aperf); 1652 rdmsrl(MSR_IA32_MPERF, mperf); 1653 tsc = rdtsc(); 1654 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1655 local_irq_restore(flags); 1656 return false; 1657 } 1658 local_irq_restore(flags); 1659 1660 cpu->last_sample_time = cpu->sample.time; 1661 cpu->sample.time = time; 1662 cpu->sample.aperf = aperf; 1663 cpu->sample.mperf = mperf; 1664 cpu->sample.tsc = tsc; 1665 cpu->sample.aperf -= cpu->prev_aperf; 1666 cpu->sample.mperf -= cpu->prev_mperf; 1667 cpu->sample.tsc -= cpu->prev_tsc; 1668 1669 cpu->prev_aperf = aperf; 1670 cpu->prev_mperf = mperf; 1671 cpu->prev_tsc = tsc; 1672 /* 1673 * First time this function is invoked in a given cycle, all of the 1674 * previous sample data fields are equal to zero or stale and they must 1675 * be populated with meaningful numbers for things to work, so assume 1676 * that sample.time will always be reset before setting the utilization 1677 * update hook and make the caller skip the sample then. 1678 */ 1679 return !!cpu->last_sample_time; 1680 } 1681 1682 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1683 { 1684 return mul_ext_fp(cpu->sample.core_avg_perf, 1685 cpu->pstate.max_pstate_physical * cpu->pstate.scaling); 1686 } 1687 1688 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1689 { 1690 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1691 cpu->sample.core_avg_perf); 1692 } 1693 1694 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1695 { 1696 struct sample *sample = &cpu->sample; 1697 int32_t busy_frac, boost; 1698 int target, avg_pstate; 1699 1700 busy_frac = div_fp(sample->mperf, sample->tsc); 1701 1702 boost = cpu->iowait_boost; 1703 cpu->iowait_boost >>= 1; 1704 1705 if (busy_frac < boost) 1706 busy_frac = boost; 1707 1708 sample->busy_scaled = busy_frac * 100; 1709 1710 target = global.no_turbo || global.turbo_disabled ? 1711 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1712 target += target >> 2; 1713 target = mul_fp(target, busy_frac); 1714 if (target < cpu->pstate.min_pstate) 1715 target = cpu->pstate.min_pstate; 1716 1717 /* 1718 * If the average P-state during the previous cycle was higher than the 1719 * current target, add 50% of the difference to the target to reduce 1720 * possible performance oscillations and offset possible performance 1721 * loss related to moving the workload from one CPU to another within 1722 * a package/module. 1723 */ 1724 avg_pstate = get_avg_pstate(cpu); 1725 if (avg_pstate > target) 1726 target += (avg_pstate - target) >> 1; 1727 1728 return target; 1729 } 1730 1731 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1732 { 1733 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1734 u64 duration_ns; 1735 1736 /* 1737 * perf_scaled is the ratio of the average P-state during the last 1738 * sampling period to the P-state requested last time (in percent). 1739 * 1740 * That measures the system's response to the previous P-state 1741 * selection. 1742 */ 1743 max_pstate = cpu->pstate.max_pstate_physical; 1744 current_pstate = cpu->pstate.current_pstate; 1745 perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, 1746 div_fp(100 * max_pstate, current_pstate)); 1747 1748 /* 1749 * Since our utilization update callback will not run unless we are 1750 * in C0, check if the actual elapsed time is significantly greater (3x) 1751 * than our sample interval. If it is, then we were idle for a long 1752 * enough period of time to adjust our performance metric. 1753 */ 1754 duration_ns = cpu->sample.time - cpu->last_sample_time; 1755 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1756 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1757 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1758 } else { 1759 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1760 if (sample_ratio < int_tofp(1)) 1761 perf_scaled = 0; 1762 } 1763 1764 cpu->sample.busy_scaled = perf_scaled; 1765 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); 1766 } 1767 1768 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 1769 { 1770 int max_perf, min_perf; 1771 1772 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1773 pstate = clamp_t(int, pstate, min_perf, max_perf); 1774 return pstate; 1775 } 1776 1777 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1778 { 1779 if (pstate == cpu->pstate.current_pstate) 1780 return; 1781 1782 cpu->pstate.current_pstate = pstate; 1783 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1784 } 1785 1786 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1787 { 1788 int from, target_pstate; 1789 struct sample *sample; 1790 1791 from = cpu->pstate.current_pstate; 1792 1793 target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ? 1794 cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu); 1795 1796 update_turbo_state(); 1797 1798 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 1799 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); 1800 intel_pstate_update_pstate(cpu, target_pstate); 1801 1802 sample = &cpu->sample; 1803 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1804 fp_toint(sample->busy_scaled), 1805 from, 1806 cpu->pstate.current_pstate, 1807 sample->mperf, 1808 sample->aperf, 1809 sample->tsc, 1810 get_avg_frequency(cpu), 1811 fp_toint(cpu->iowait_boost * 100)); 1812 } 1813 1814 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1815 unsigned int flags) 1816 { 1817 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1818 u64 delta_ns; 1819 1820 if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) { 1821 if (flags & SCHED_CPUFREQ_IOWAIT) { 1822 cpu->iowait_boost = int_tofp(1); 1823 } else if (cpu->iowait_boost) { 1824 /* Clear iowait_boost if the CPU may have been idle. */ 1825 delta_ns = time - cpu->last_update; 1826 if (delta_ns > TICK_NSEC) 1827 cpu->iowait_boost = 0; 1828 } 1829 cpu->last_update = time; 1830 } 1831 1832 delta_ns = time - cpu->sample.time; 1833 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1834 bool sample_taken = intel_pstate_sample(cpu, time); 1835 1836 if (sample_taken) { 1837 intel_pstate_calc_avg_perf(cpu); 1838 if (!hwp_active) 1839 intel_pstate_adjust_busy_pstate(cpu); 1840 } 1841 } 1842 } 1843 1844 #define ICPU(model, policy) \ 1845 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1846 (unsigned long)&policy } 1847 1848 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1849 ICPU(INTEL_FAM6_SANDYBRIDGE, core_params), 1850 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params), 1851 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params), 1852 ICPU(INTEL_FAM6_IVYBRIDGE, core_params), 1853 ICPU(INTEL_FAM6_HASWELL_CORE, core_params), 1854 ICPU(INTEL_FAM6_BROADWELL_CORE, core_params), 1855 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params), 1856 ICPU(INTEL_FAM6_HASWELL_X, core_params), 1857 ICPU(INTEL_FAM6_HASWELL_ULT, core_params), 1858 ICPU(INTEL_FAM6_HASWELL_GT3E, core_params), 1859 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params), 1860 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params), 1861 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params), 1862 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1863 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params), 1864 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1865 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params), 1866 ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_params), 1867 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params), 1868 {} 1869 }; 1870 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1871 1872 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 1873 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1874 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1875 ICPU(INTEL_FAM6_SKYLAKE_X, core_params), 1876 {} 1877 }; 1878 1879 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { 1880 ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params), 1881 {} 1882 }; 1883 1884 static int intel_pstate_init_cpu(unsigned int cpunum) 1885 { 1886 struct cpudata *cpu; 1887 1888 cpu = all_cpu_data[cpunum]; 1889 1890 if (!cpu) { 1891 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); 1892 if (!cpu) 1893 return -ENOMEM; 1894 1895 all_cpu_data[cpunum] = cpu; 1896 1897 cpu->epp_default = -EINVAL; 1898 cpu->epp_powersave = -EINVAL; 1899 cpu->epp_saved = -EINVAL; 1900 } 1901 1902 cpu = all_cpu_data[cpunum]; 1903 1904 cpu->cpu = cpunum; 1905 1906 if (hwp_active) { 1907 const struct x86_cpu_id *id; 1908 1909 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); 1910 if (id) 1911 intel_pstate_disable_ee(cpunum); 1912 1913 intel_pstate_hwp_enable(cpu); 1914 } else if (pstate_funcs.get_target_pstate == get_target_pstate_use_performance) { 1915 intel_pstate_pid_reset(cpu); 1916 } 1917 1918 intel_pstate_get_cpu_pstates(cpu); 1919 1920 pr_debug("controlling: cpu %d\n", cpunum); 1921 1922 return 0; 1923 } 1924 1925 static unsigned int intel_pstate_get(unsigned int cpu_num) 1926 { 1927 struct cpudata *cpu = all_cpu_data[cpu_num]; 1928 1929 return cpu ? get_avg_frequency(cpu) : 0; 1930 } 1931 1932 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1933 { 1934 struct cpudata *cpu = all_cpu_data[cpu_num]; 1935 1936 if (cpu->update_util_set) 1937 return; 1938 1939 /* Prevent intel_pstate_update_util() from using stale data. */ 1940 cpu->sample.time = 0; 1941 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1942 intel_pstate_update_util); 1943 cpu->update_util_set = true; 1944 } 1945 1946 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1947 { 1948 struct cpudata *cpu_data = all_cpu_data[cpu]; 1949 1950 if (!cpu_data->update_util_set) 1951 return; 1952 1953 cpufreq_remove_update_util_hook(cpu); 1954 cpu_data->update_util_set = false; 1955 synchronize_sched(); 1956 } 1957 1958 static int intel_pstate_get_max_freq(struct cpudata *cpu) 1959 { 1960 return global.turbo_disabled || global.no_turbo ? 1961 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 1962 } 1963 1964 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, 1965 struct cpudata *cpu) 1966 { 1967 int max_freq = intel_pstate_get_max_freq(cpu); 1968 int32_t max_policy_perf, min_policy_perf; 1969 1970 max_policy_perf = div_ext_fp(policy->max, max_freq); 1971 max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1)); 1972 if (policy->max == policy->min) { 1973 min_policy_perf = max_policy_perf; 1974 } else { 1975 min_policy_perf = div_ext_fp(policy->min, max_freq); 1976 min_policy_perf = clamp_t(int32_t, min_policy_perf, 1977 0, max_policy_perf); 1978 } 1979 1980 /* Normalize user input to [min_perf, max_perf] */ 1981 if (per_cpu_limits) { 1982 cpu->min_perf = min_policy_perf; 1983 cpu->max_perf = max_policy_perf; 1984 } else { 1985 int32_t global_min, global_max; 1986 1987 /* Global limits are in percent of the maximum turbo P-state. */ 1988 global_max = percent_ext_fp(global.max_perf_pct); 1989 global_min = percent_ext_fp(global.min_perf_pct); 1990 if (max_freq != cpu->pstate.turbo_freq) { 1991 int32_t turbo_factor; 1992 1993 turbo_factor = div_ext_fp(cpu->pstate.turbo_pstate, 1994 cpu->pstate.max_pstate); 1995 global_min = mul_ext_fp(global_min, turbo_factor); 1996 global_max = mul_ext_fp(global_max, turbo_factor); 1997 } 1998 global_min = clamp_t(int32_t, global_min, 0, global_max); 1999 2000 cpu->min_perf = max(min_policy_perf, global_min); 2001 cpu->min_perf = min(cpu->min_perf, max_policy_perf); 2002 cpu->max_perf = min(max_policy_perf, global_max); 2003 cpu->max_perf = max(min_policy_perf, cpu->max_perf); 2004 2005 /* Make sure min_perf <= max_perf */ 2006 cpu->min_perf = min(cpu->min_perf, cpu->max_perf); 2007 } 2008 2009 cpu->max_perf = round_up(cpu->max_perf, EXT_FRAC_BITS); 2010 cpu->min_perf = round_up(cpu->min_perf, EXT_FRAC_BITS); 2011 2012 pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, 2013 fp_ext_toint(cpu->max_perf * 100), 2014 fp_ext_toint(cpu->min_perf * 100)); 2015 } 2016 2017 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2018 { 2019 struct cpudata *cpu; 2020 2021 if (!policy->cpuinfo.max_freq) 2022 return -ENODEV; 2023 2024 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2025 policy->cpuinfo.max_freq, policy->max); 2026 2027 cpu = all_cpu_data[policy->cpu]; 2028 cpu->policy = policy->policy; 2029 2030 mutex_lock(&intel_pstate_limits_lock); 2031 2032 intel_pstate_update_perf_limits(policy, cpu); 2033 2034 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2035 /* 2036 * NOHZ_FULL CPUs need this as the governor callback may not 2037 * be invoked on them. 2038 */ 2039 intel_pstate_clear_update_util_hook(policy->cpu); 2040 intel_pstate_max_within_limits(cpu); 2041 } 2042 2043 intel_pstate_set_update_util_hook(policy->cpu); 2044 2045 if (hwp_active) 2046 intel_pstate_hwp_set(policy); 2047 2048 mutex_unlock(&intel_pstate_limits_lock); 2049 2050 return 0; 2051 } 2052 2053 static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy, 2054 struct cpudata *cpu) 2055 { 2056 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2057 policy->max < policy->cpuinfo.max_freq && 2058 policy->max > cpu->pstate.max_freq) { 2059 pr_debug("policy->max > max non turbo frequency\n"); 2060 policy->max = policy->cpuinfo.max_freq; 2061 } 2062 } 2063 2064 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 2065 { 2066 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2067 2068 update_turbo_state(); 2069 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2070 intel_pstate_get_max_freq(cpu)); 2071 2072 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 2073 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 2074 return -EINVAL; 2075 2076 intel_pstate_adjust_policy_max(policy, cpu); 2077 2078 return 0; 2079 } 2080 2081 static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) 2082 { 2083 intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); 2084 } 2085 2086 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 2087 { 2088 pr_debug("CPU %d exiting\n", policy->cpu); 2089 2090 intel_pstate_clear_update_util_hook(policy->cpu); 2091 if (hwp_active) 2092 intel_pstate_hwp_save_state(policy); 2093 else 2094 intel_cpufreq_stop_cpu(policy); 2095 } 2096 2097 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 2098 { 2099 intel_pstate_exit_perf_limits(policy); 2100 2101 policy->fast_switch_possible = false; 2102 2103 return 0; 2104 } 2105 2106 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 2107 { 2108 struct cpudata *cpu; 2109 int rc; 2110 2111 rc = intel_pstate_init_cpu(policy->cpu); 2112 if (rc) 2113 return rc; 2114 2115 cpu = all_cpu_data[policy->cpu]; 2116 2117 cpu->max_perf = int_ext_tofp(1); 2118 cpu->min_perf = 0; 2119 2120 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 2121 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 2122 2123 /* cpuinfo and default policy values */ 2124 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 2125 update_turbo_state(); 2126 policy->cpuinfo.max_freq = global.turbo_disabled ? 2127 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2128 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 2129 2130 intel_pstate_init_acpi_perf_limits(policy); 2131 cpumask_set_cpu(policy->cpu, policy->cpus); 2132 2133 policy->fast_switch_possible = true; 2134 2135 return 0; 2136 } 2137 2138 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 2139 { 2140 int ret = __intel_pstate_cpu_init(policy); 2141 2142 if (ret) 2143 return ret; 2144 2145 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 2146 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) 2147 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 2148 else 2149 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2150 2151 return 0; 2152 } 2153 2154 static struct cpufreq_driver intel_pstate = { 2155 .flags = CPUFREQ_CONST_LOOPS, 2156 .verify = intel_pstate_verify_policy, 2157 .setpolicy = intel_pstate_set_policy, 2158 .suspend = intel_pstate_hwp_save_state, 2159 .resume = intel_pstate_resume, 2160 .get = intel_pstate_get, 2161 .init = intel_pstate_cpu_init, 2162 .exit = intel_pstate_cpu_exit, 2163 .stop_cpu = intel_pstate_stop_cpu, 2164 .name = "intel_pstate", 2165 }; 2166 2167 static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) 2168 { 2169 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2170 2171 update_turbo_state(); 2172 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2173 intel_pstate_get_max_freq(cpu)); 2174 2175 intel_pstate_adjust_policy_max(policy, cpu); 2176 2177 intel_pstate_update_perf_limits(policy, cpu); 2178 2179 return 0; 2180 } 2181 2182 static int intel_cpufreq_target(struct cpufreq_policy *policy, 2183 unsigned int target_freq, 2184 unsigned int relation) 2185 { 2186 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2187 struct cpufreq_freqs freqs; 2188 int target_pstate; 2189 2190 update_turbo_state(); 2191 2192 freqs.old = policy->cur; 2193 freqs.new = target_freq; 2194 2195 cpufreq_freq_transition_begin(policy, &freqs); 2196 switch (relation) { 2197 case CPUFREQ_RELATION_L: 2198 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 2199 break; 2200 case CPUFREQ_RELATION_H: 2201 target_pstate = freqs.new / cpu->pstate.scaling; 2202 break; 2203 default: 2204 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 2205 break; 2206 } 2207 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2208 if (target_pstate != cpu->pstate.current_pstate) { 2209 cpu->pstate.current_pstate = target_pstate; 2210 wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, 2211 pstate_funcs.get_val(cpu, target_pstate)); 2212 } 2213 freqs.new = target_pstate * cpu->pstate.scaling; 2214 cpufreq_freq_transition_end(policy, &freqs, false); 2215 2216 return 0; 2217 } 2218 2219 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 2220 unsigned int target_freq) 2221 { 2222 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2223 int target_pstate; 2224 2225 update_turbo_state(); 2226 2227 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2228 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2229 intel_pstate_update_pstate(cpu, target_pstate); 2230 return target_pstate * cpu->pstate.scaling; 2231 } 2232 2233 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2234 { 2235 int ret = __intel_pstate_cpu_init(policy); 2236 2237 if (ret) 2238 return ret; 2239 2240 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 2241 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2242 policy->cur = policy->cpuinfo.min_freq; 2243 2244 return 0; 2245 } 2246 2247 static struct cpufreq_driver intel_cpufreq = { 2248 .flags = CPUFREQ_CONST_LOOPS, 2249 .verify = intel_cpufreq_verify_policy, 2250 .target = intel_cpufreq_target, 2251 .fast_switch = intel_cpufreq_fast_switch, 2252 .init = intel_cpufreq_cpu_init, 2253 .exit = intel_pstate_cpu_exit, 2254 .stop_cpu = intel_cpufreq_stop_cpu, 2255 .name = "intel_cpufreq", 2256 }; 2257 2258 static struct cpufreq_driver *default_driver = &intel_pstate; 2259 2260 static void intel_pstate_driver_cleanup(void) 2261 { 2262 unsigned int cpu; 2263 2264 get_online_cpus(); 2265 for_each_online_cpu(cpu) { 2266 if (all_cpu_data[cpu]) { 2267 if (intel_pstate_driver == &intel_pstate) 2268 intel_pstate_clear_update_util_hook(cpu); 2269 2270 kfree(all_cpu_data[cpu]); 2271 all_cpu_data[cpu] = NULL; 2272 } 2273 } 2274 put_online_cpus(); 2275 intel_pstate_driver = NULL; 2276 } 2277 2278 static int intel_pstate_register_driver(struct cpufreq_driver *driver) 2279 { 2280 int ret; 2281 2282 memset(&global, 0, sizeof(global)); 2283 global.max_perf_pct = 100; 2284 2285 intel_pstate_driver = driver; 2286 ret = cpufreq_register_driver(intel_pstate_driver); 2287 if (ret) { 2288 intel_pstate_driver_cleanup(); 2289 return ret; 2290 } 2291 2292 global.min_perf_pct = min_perf_pct_min(); 2293 2294 if (intel_pstate_driver == &intel_pstate && !hwp_active && 2295 pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load) 2296 intel_pstate_debug_expose_params(); 2297 2298 return 0; 2299 } 2300 2301 static int intel_pstate_unregister_driver(void) 2302 { 2303 if (hwp_active) 2304 return -EBUSY; 2305 2306 if (intel_pstate_driver == &intel_pstate && !hwp_active && 2307 pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load) 2308 intel_pstate_debug_hide_params(); 2309 2310 cpufreq_unregister_driver(intel_pstate_driver); 2311 intel_pstate_driver_cleanup(); 2312 2313 return 0; 2314 } 2315 2316 static ssize_t intel_pstate_show_status(char *buf) 2317 { 2318 if (!intel_pstate_driver) 2319 return sprintf(buf, "off\n"); 2320 2321 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 2322 "active" : "passive"); 2323 } 2324 2325 static int intel_pstate_update_status(const char *buf, size_t size) 2326 { 2327 int ret; 2328 2329 if (size == 3 && !strncmp(buf, "off", size)) 2330 return intel_pstate_driver ? 2331 intel_pstate_unregister_driver() : -EINVAL; 2332 2333 if (size == 6 && !strncmp(buf, "active", size)) { 2334 if (intel_pstate_driver) { 2335 if (intel_pstate_driver == &intel_pstate) 2336 return 0; 2337 2338 ret = intel_pstate_unregister_driver(); 2339 if (ret) 2340 return ret; 2341 } 2342 2343 return intel_pstate_register_driver(&intel_pstate); 2344 } 2345 2346 if (size == 7 && !strncmp(buf, "passive", size)) { 2347 if (intel_pstate_driver) { 2348 if (intel_pstate_driver == &intel_cpufreq) 2349 return 0; 2350 2351 ret = intel_pstate_unregister_driver(); 2352 if (ret) 2353 return ret; 2354 } 2355 2356 return intel_pstate_register_driver(&intel_cpufreq); 2357 } 2358 2359 return -EINVAL; 2360 } 2361 2362 static int no_load __initdata; 2363 static int no_hwp __initdata; 2364 static int hwp_only __initdata; 2365 static unsigned int force_load __initdata; 2366 2367 static int __init intel_pstate_msrs_not_valid(void) 2368 { 2369 if (!pstate_funcs.get_max() || 2370 !pstate_funcs.get_min() || 2371 !pstate_funcs.get_turbo()) 2372 return -ENODEV; 2373 2374 return 0; 2375 } 2376 2377 #ifdef CONFIG_ACPI 2378 static void intel_pstate_use_acpi_profile(void) 2379 { 2380 switch (acpi_gbl_FADT.preferred_profile) { 2381 case PM_MOBILE: 2382 case PM_TABLET: 2383 case PM_APPLIANCE_PC: 2384 case PM_DESKTOP: 2385 case PM_WORKSTATION: 2386 pstate_funcs.get_target_pstate = 2387 get_target_pstate_use_cpu_load; 2388 } 2389 } 2390 #else 2391 static void intel_pstate_use_acpi_profile(void) 2392 { 2393 } 2394 #endif 2395 2396 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 2397 { 2398 pstate_funcs.get_max = funcs->get_max; 2399 pstate_funcs.get_max_physical = funcs->get_max_physical; 2400 pstate_funcs.get_min = funcs->get_min; 2401 pstate_funcs.get_turbo = funcs->get_turbo; 2402 pstate_funcs.get_scaling = funcs->get_scaling; 2403 pstate_funcs.get_val = funcs->get_val; 2404 pstate_funcs.get_vid = funcs->get_vid; 2405 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 2406 2407 intel_pstate_use_acpi_profile(); 2408 } 2409 2410 #ifdef CONFIG_ACPI 2411 2412 static bool __init intel_pstate_no_acpi_pss(void) 2413 { 2414 int i; 2415 2416 for_each_possible_cpu(i) { 2417 acpi_status status; 2418 union acpi_object *pss; 2419 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 2420 struct acpi_processor *pr = per_cpu(processors, i); 2421 2422 if (!pr) 2423 continue; 2424 2425 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 2426 if (ACPI_FAILURE(status)) 2427 continue; 2428 2429 pss = buffer.pointer; 2430 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 2431 kfree(pss); 2432 return false; 2433 } 2434 2435 kfree(pss); 2436 } 2437 2438 return true; 2439 } 2440 2441 static bool __init intel_pstate_has_acpi_ppc(void) 2442 { 2443 int i; 2444 2445 for_each_possible_cpu(i) { 2446 struct acpi_processor *pr = per_cpu(processors, i); 2447 2448 if (!pr) 2449 continue; 2450 if (acpi_has_method(pr->handle, "_PPC")) 2451 return true; 2452 } 2453 return false; 2454 } 2455 2456 enum { 2457 PSS, 2458 PPC, 2459 }; 2460 2461 struct hw_vendor_info { 2462 u16 valid; 2463 char oem_id[ACPI_OEM_ID_SIZE]; 2464 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 2465 int oem_pwr_table; 2466 }; 2467 2468 /* Hardware vendor-specific info that has its own power management modes */ 2469 static struct hw_vendor_info vendor_info[] __initdata = { 2470 {1, "HP ", "ProLiant", PSS}, 2471 {1, "ORACLE", "X4-2 ", PPC}, 2472 {1, "ORACLE", "X4-2L ", PPC}, 2473 {1, "ORACLE", "X4-2B ", PPC}, 2474 {1, "ORACLE", "X3-2 ", PPC}, 2475 {1, "ORACLE", "X3-2L ", PPC}, 2476 {1, "ORACLE", "X3-2B ", PPC}, 2477 {1, "ORACLE", "X4470M2 ", PPC}, 2478 {1, "ORACLE", "X4270M3 ", PPC}, 2479 {1, "ORACLE", "X4270M2 ", PPC}, 2480 {1, "ORACLE", "X4170M2 ", PPC}, 2481 {1, "ORACLE", "X4170 M3", PPC}, 2482 {1, "ORACLE", "X4275 M3", PPC}, 2483 {1, "ORACLE", "X6-2 ", PPC}, 2484 {1, "ORACLE", "Sudbury ", PPC}, 2485 {0, "", ""}, 2486 }; 2487 2488 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 2489 { 2490 struct acpi_table_header hdr; 2491 struct hw_vendor_info *v_info; 2492 const struct x86_cpu_id *id; 2493 u64 misc_pwr; 2494 2495 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 2496 if (id) { 2497 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 2498 if ( misc_pwr & (1 << 8)) 2499 return true; 2500 } 2501 2502 if (acpi_disabled || 2503 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 2504 return false; 2505 2506 for (v_info = vendor_info; v_info->valid; v_info++) { 2507 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 2508 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 2509 ACPI_OEM_TABLE_ID_SIZE)) 2510 switch (v_info->oem_pwr_table) { 2511 case PSS: 2512 return intel_pstate_no_acpi_pss(); 2513 case PPC: 2514 return intel_pstate_has_acpi_ppc() && 2515 (!force_load); 2516 } 2517 } 2518 2519 return false; 2520 } 2521 2522 static void intel_pstate_request_control_from_smm(void) 2523 { 2524 /* 2525 * It may be unsafe to request P-states control from SMM if _PPC support 2526 * has not been enabled. 2527 */ 2528 if (acpi_ppc) 2529 acpi_processor_pstate_control(); 2530 } 2531 #else /* CONFIG_ACPI not enabled */ 2532 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 2533 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 2534 static inline void intel_pstate_request_control_from_smm(void) {} 2535 #endif /* CONFIG_ACPI */ 2536 2537 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 2538 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 2539 {} 2540 }; 2541 2542 static int __init intel_pstate_init(void) 2543 { 2544 int rc; 2545 2546 if (no_load) 2547 return -ENODEV; 2548 2549 if (x86_match_cpu(hwp_support_ids)) { 2550 copy_cpu_funcs(&core_params.funcs); 2551 if (no_hwp) { 2552 pstate_funcs.get_target_pstate = get_target_pstate_use_cpu_load; 2553 } else { 2554 hwp_active++; 2555 intel_pstate.attr = hwp_cpufreq_attrs; 2556 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 2557 goto hwp_cpu_matched; 2558 } 2559 } else { 2560 const struct x86_cpu_id *id; 2561 struct cpu_defaults *cpu_def; 2562 2563 id = x86_match_cpu(intel_pstate_cpu_ids); 2564 if (!id) 2565 return -ENODEV; 2566 2567 cpu_def = (struct cpu_defaults *)id->driver_data; 2568 copy_cpu_funcs(&cpu_def->funcs); 2569 } 2570 2571 if (intel_pstate_msrs_not_valid()) 2572 return -ENODEV; 2573 2574 hwp_cpu_matched: 2575 /* 2576 * The Intel pstate driver will be ignored if the platform 2577 * firmware has its own power management modes. 2578 */ 2579 if (intel_pstate_platform_pwr_mgmt_exists()) 2580 return -ENODEV; 2581 2582 if (!hwp_active && hwp_only) 2583 return -ENOTSUPP; 2584 2585 pr_info("Intel P-state driver initializing\n"); 2586 2587 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 2588 if (!all_cpu_data) 2589 return -ENOMEM; 2590 2591 intel_pstate_request_control_from_smm(); 2592 2593 intel_pstate_sysfs_expose_params(); 2594 2595 mutex_lock(&intel_pstate_driver_lock); 2596 rc = intel_pstate_register_driver(default_driver); 2597 mutex_unlock(&intel_pstate_driver_lock); 2598 if (rc) 2599 return rc; 2600 2601 if (hwp_active) 2602 pr_info("HWP enabled\n"); 2603 2604 return 0; 2605 } 2606 device_initcall(intel_pstate_init); 2607 2608 static int __init intel_pstate_setup(char *str) 2609 { 2610 if (!str) 2611 return -EINVAL; 2612 2613 if (!strcmp(str, "disable")) { 2614 no_load = 1; 2615 } else if (!strcmp(str, "passive")) { 2616 pr_info("Passive mode enabled\n"); 2617 default_driver = &intel_cpufreq; 2618 no_hwp = 1; 2619 } 2620 if (!strcmp(str, "no_hwp")) { 2621 pr_info("HWP disabled\n"); 2622 no_hwp = 1; 2623 } 2624 if (!strcmp(str, "force")) 2625 force_load = 1; 2626 if (!strcmp(str, "hwp_only")) 2627 hwp_only = 1; 2628 if (!strcmp(str, "per_cpu_perf_limits")) 2629 per_cpu_limits = true; 2630 2631 #ifdef CONFIG_ACPI 2632 if (!strcmp(str, "support_acpi_ppc")) 2633 acpi_ppc = true; 2634 #endif 2635 2636 return 0; 2637 } 2638 early_param("intel_pstate", intel_pstate_setup); 2639 2640 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 2641 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 2642 MODULE_LICENSE("GPL"); 2643