1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched/cpufreq.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 #include <asm/intel-family.h> 39 40 #define INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) 41 #define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC) 42 43 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 44 45 #ifdef CONFIG_ACPI 46 #include <acpi/processor.h> 47 #include <acpi/cppc_acpi.h> 48 #endif 49 50 #define FRAC_BITS 8 51 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 52 #define fp_toint(X) ((X) >> FRAC_BITS) 53 54 #define EXT_BITS 6 55 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 56 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 57 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 58 59 static inline int32_t mul_fp(int32_t x, int32_t y) 60 { 61 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 62 } 63 64 static inline int32_t div_fp(s64 x, s64 y) 65 { 66 return div64_s64((int64_t)x << FRAC_BITS, y); 67 } 68 69 static inline int ceiling_fp(int32_t x) 70 { 71 int mask, ret; 72 73 ret = fp_toint(x); 74 mask = (1 << FRAC_BITS) - 1; 75 if (x & mask) 76 ret += 1; 77 return ret; 78 } 79 80 static inline int32_t percent_fp(int percent) 81 { 82 return div_fp(percent, 100); 83 } 84 85 static inline u64 mul_ext_fp(u64 x, u64 y) 86 { 87 return (x * y) >> EXT_FRAC_BITS; 88 } 89 90 static inline u64 div_ext_fp(u64 x, u64 y) 91 { 92 return div64_u64(x << EXT_FRAC_BITS, y); 93 } 94 95 static inline int32_t percent_ext_fp(int percent) 96 { 97 return div_ext_fp(percent, 100); 98 } 99 100 /** 101 * struct sample - Store performance sample 102 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 103 * performance during last sample period 104 * @busy_scaled: Scaled busy value which is used to calculate next 105 * P state. This can be different than core_avg_perf 106 * to account for cpu idle period 107 * @aperf: Difference of actual performance frequency clock count 108 * read from APERF MSR between last and current sample 109 * @mperf: Difference of maximum performance frequency clock count 110 * read from MPERF MSR between last and current sample 111 * @tsc: Difference of time stamp counter between last and 112 * current sample 113 * @time: Current time from scheduler 114 * 115 * This structure is used in the cpudata structure to store performance sample 116 * data for choosing next P State. 117 */ 118 struct sample { 119 int32_t core_avg_perf; 120 int32_t busy_scaled; 121 u64 aperf; 122 u64 mperf; 123 u64 tsc; 124 u64 time; 125 }; 126 127 /** 128 * struct pstate_data - Store P state data 129 * @current_pstate: Current requested P state 130 * @min_pstate: Min P state possible for this platform 131 * @max_pstate: Max P state possible for this platform 132 * @max_pstate_physical:This is physical Max P state for a processor 133 * This can be higher than the max_pstate which can 134 * be limited by platform thermal design power limits 135 * @scaling: Scaling factor to convert frequency to cpufreq 136 * frequency units 137 * @turbo_pstate: Max Turbo P state possible for this platform 138 * @max_freq: @max_pstate frequency in cpufreq units 139 * @turbo_freq: @turbo_pstate frequency in cpufreq units 140 * 141 * Stores the per cpu model P state limits and current P state. 142 */ 143 struct pstate_data { 144 int current_pstate; 145 int min_pstate; 146 int max_pstate; 147 int max_pstate_physical; 148 int scaling; 149 int turbo_pstate; 150 unsigned int max_freq; 151 unsigned int turbo_freq; 152 }; 153 154 /** 155 * struct vid_data - Stores voltage information data 156 * @min: VID data for this platform corresponding to 157 * the lowest P state 158 * @max: VID data corresponding to the highest P State. 159 * @turbo: VID data for turbo P state 160 * @ratio: Ratio of (vid max - vid min) / 161 * (max P state - Min P State) 162 * 163 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 164 * This data is used in Atom platforms, where in addition to target P state, 165 * the voltage data needs to be specified to select next P State. 166 */ 167 struct vid_data { 168 int min; 169 int max; 170 int turbo; 171 int32_t ratio; 172 }; 173 174 /** 175 * struct _pid - Stores PID data 176 * @setpoint: Target set point for busyness or performance 177 * @integral: Storage for accumulated error values 178 * @p_gain: PID proportional gain 179 * @i_gain: PID integral gain 180 * @d_gain: PID derivative gain 181 * @deadband: PID deadband 182 * @last_err: Last error storage for integral part of PID calculation 183 * 184 * Stores PID coefficients and last error for PID controller. 185 */ 186 struct _pid { 187 int setpoint; 188 int32_t integral; 189 int32_t p_gain; 190 int32_t i_gain; 191 int32_t d_gain; 192 int deadband; 193 int32_t last_err; 194 }; 195 196 /** 197 * struct global_params - Global parameters, mostly tunable via sysfs. 198 * @no_turbo: Whether or not to use turbo P-states. 199 * @turbo_disabled: Whethet or not turbo P-states are available at all, 200 * based on the MSR_IA32_MISC_ENABLE value and whether or 201 * not the maximum reported turbo P-state is different from 202 * the maximum reported non-turbo one. 203 * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo 204 * P-state capacity. 205 * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo 206 * P-state capacity. 207 */ 208 struct global_params { 209 bool no_turbo; 210 bool turbo_disabled; 211 int max_perf_pct; 212 int min_perf_pct; 213 }; 214 215 /** 216 * struct cpudata - Per CPU instance data storage 217 * @cpu: CPU number for this instance data 218 * @policy: CPUFreq policy value 219 * @update_util: CPUFreq utility callback information 220 * @update_util_set: CPUFreq utility callback is set 221 * @iowait_boost: iowait-related boost fraction 222 * @last_update: Time of the last update. 223 * @pstate: Stores P state limits for this CPU 224 * @vid: Stores VID limits for this CPU 225 * @pid: Stores PID parameters for this CPU 226 * @last_sample_time: Last Sample time 227 * @prev_aperf: Last APERF value read from APERF MSR 228 * @prev_mperf: Last MPERF value read from MPERF MSR 229 * @prev_tsc: Last timestamp counter (TSC) value 230 * @prev_cummulative_iowait: IO Wait time difference from last and 231 * current sample 232 * @sample: Storage for storing last Sample data 233 * @min_perf: Minimum capacity limit as a fraction of the maximum 234 * turbo P-state capacity. 235 * @max_perf: Maximum capacity limit as a fraction of the maximum 236 * turbo P-state capacity. 237 * @acpi_perf_data: Stores ACPI perf information read from _PSS 238 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 239 * @epp_powersave: Last saved HWP energy performance preference 240 * (EPP) or energy performance bias (EPB), 241 * when policy switched to performance 242 * @epp_policy: Last saved policy used to set EPP/EPB 243 * @epp_default: Power on default HWP energy performance 244 * preference/bias 245 * @epp_saved: Saved EPP/EPB during system suspend or CPU offline 246 * operation 247 * 248 * This structure stores per CPU instance data for all CPUs. 249 */ 250 struct cpudata { 251 int cpu; 252 253 unsigned int policy; 254 struct update_util_data update_util; 255 bool update_util_set; 256 257 struct pstate_data pstate; 258 struct vid_data vid; 259 struct _pid pid; 260 261 u64 last_update; 262 u64 last_sample_time; 263 u64 prev_aperf; 264 u64 prev_mperf; 265 u64 prev_tsc; 266 u64 prev_cummulative_iowait; 267 struct sample sample; 268 int32_t min_perf; 269 int32_t max_perf; 270 #ifdef CONFIG_ACPI 271 struct acpi_processor_performance acpi_perf_data; 272 bool valid_pss_table; 273 #endif 274 unsigned int iowait_boost; 275 s16 epp_powersave; 276 s16 epp_policy; 277 s16 epp_default; 278 s16 epp_saved; 279 }; 280 281 static struct cpudata **all_cpu_data; 282 283 /** 284 * struct pstate_adjust_policy - Stores static PID configuration data 285 * @sample_rate_ms: PID calculation sample rate in ms 286 * @sample_rate_ns: Sample rate calculation in ns 287 * @deadband: PID deadband 288 * @setpoint: PID Setpoint 289 * @p_gain_pct: PID proportional gain 290 * @i_gain_pct: PID integral gain 291 * @d_gain_pct: PID derivative gain 292 * 293 * Stores per CPU model static PID configuration data. 294 */ 295 struct pstate_adjust_policy { 296 int sample_rate_ms; 297 s64 sample_rate_ns; 298 int deadband; 299 int setpoint; 300 int p_gain_pct; 301 int d_gain_pct; 302 int i_gain_pct; 303 }; 304 305 /** 306 * struct pstate_funcs - Per CPU model specific callbacks 307 * @get_max: Callback to get maximum non turbo effective P state 308 * @get_max_physical: Callback to get maximum non turbo physical P state 309 * @get_min: Callback to get minimum P state 310 * @get_turbo: Callback to get turbo P state 311 * @get_scaling: Callback to get frequency scaling factor 312 * @get_val: Callback to convert P state to actual MSR write value 313 * @get_vid: Callback to get VID data for Atom platforms 314 * @update_util: Active mode utilization update callback. 315 * 316 * Core and Atom CPU models have different way to get P State limits. This 317 * structure is used to store those callbacks. 318 */ 319 struct pstate_funcs { 320 int (*get_max)(void); 321 int (*get_max_physical)(void); 322 int (*get_min)(void); 323 int (*get_turbo)(void); 324 int (*get_scaling)(void); 325 u64 (*get_val)(struct cpudata*, int pstate); 326 void (*get_vid)(struct cpudata *); 327 void (*update_util)(struct update_util_data *data, u64 time, 328 unsigned int flags); 329 }; 330 331 static struct pstate_funcs pstate_funcs __read_mostly; 332 static struct pstate_adjust_policy pid_params __read_mostly = { 333 .sample_rate_ms = 10, 334 .sample_rate_ns = 10 * NSEC_PER_MSEC, 335 .deadband = 0, 336 .setpoint = 97, 337 .p_gain_pct = 20, 338 .d_gain_pct = 0, 339 .i_gain_pct = 0, 340 }; 341 342 static int hwp_active __read_mostly; 343 static bool per_cpu_limits __read_mostly; 344 345 static struct cpufreq_driver *intel_pstate_driver __read_mostly; 346 347 #ifdef CONFIG_ACPI 348 static bool acpi_ppc; 349 #endif 350 351 static struct global_params global; 352 353 static DEFINE_MUTEX(intel_pstate_driver_lock); 354 static DEFINE_MUTEX(intel_pstate_limits_lock); 355 356 #ifdef CONFIG_ACPI 357 358 static bool intel_pstate_get_ppc_enable_status(void) 359 { 360 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 361 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 362 return true; 363 364 return acpi_ppc; 365 } 366 367 #ifdef CONFIG_ACPI_CPPC_LIB 368 369 /* The work item is needed to avoid CPU hotplug locking issues */ 370 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) 371 { 372 sched_set_itmt_support(); 373 } 374 375 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); 376 377 static void intel_pstate_set_itmt_prio(int cpu) 378 { 379 struct cppc_perf_caps cppc_perf; 380 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; 381 int ret; 382 383 ret = cppc_get_perf_caps(cpu, &cppc_perf); 384 if (ret) 385 return; 386 387 /* 388 * The priorities can be set regardless of whether or not 389 * sched_set_itmt_support(true) has been called and it is valid to 390 * update them at any time after it has been called. 391 */ 392 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); 393 394 if (max_highest_perf <= min_highest_perf) { 395 if (cppc_perf.highest_perf > max_highest_perf) 396 max_highest_perf = cppc_perf.highest_perf; 397 398 if (cppc_perf.highest_perf < min_highest_perf) 399 min_highest_perf = cppc_perf.highest_perf; 400 401 if (max_highest_perf > min_highest_perf) { 402 /* 403 * This code can be run during CPU online under the 404 * CPU hotplug locks, so sched_set_itmt_support() 405 * cannot be called from here. Queue up a work item 406 * to invoke it. 407 */ 408 schedule_work(&sched_itmt_work); 409 } 410 } 411 } 412 #else 413 static void intel_pstate_set_itmt_prio(int cpu) 414 { 415 } 416 #endif 417 418 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 419 { 420 struct cpudata *cpu; 421 int ret; 422 int i; 423 424 if (hwp_active) { 425 intel_pstate_set_itmt_prio(policy->cpu); 426 return; 427 } 428 429 if (!intel_pstate_get_ppc_enable_status()) 430 return; 431 432 cpu = all_cpu_data[policy->cpu]; 433 434 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 435 policy->cpu); 436 if (ret) 437 return; 438 439 /* 440 * Check if the control value in _PSS is for PERF_CTL MSR, which should 441 * guarantee that the states returned by it map to the states in our 442 * list directly. 443 */ 444 if (cpu->acpi_perf_data.control_register.space_id != 445 ACPI_ADR_SPACE_FIXED_HARDWARE) 446 goto err; 447 448 /* 449 * If there is only one entry _PSS, simply ignore _PSS and continue as 450 * usual without taking _PSS into account 451 */ 452 if (cpu->acpi_perf_data.state_count < 2) 453 goto err; 454 455 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 456 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 457 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 458 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 459 (u32) cpu->acpi_perf_data.states[i].core_frequency, 460 (u32) cpu->acpi_perf_data.states[i].power, 461 (u32) cpu->acpi_perf_data.states[i].control); 462 } 463 464 /* 465 * The _PSS table doesn't contain whole turbo frequency range. 466 * This just contains +1 MHZ above the max non turbo frequency, 467 * with control value corresponding to max turbo ratio. But 468 * when cpufreq set policy is called, it will call with this 469 * max frequency, which will cause a reduced performance as 470 * this driver uses real max turbo frequency as the max 471 * frequency. So correct this frequency in _PSS table to 472 * correct max turbo frequency based on the turbo state. 473 * Also need to convert to MHz as _PSS freq is in MHz. 474 */ 475 if (!global.turbo_disabled) 476 cpu->acpi_perf_data.states[0].core_frequency = 477 policy->cpuinfo.max_freq / 1000; 478 cpu->valid_pss_table = true; 479 pr_debug("_PPC limits will be enforced\n"); 480 481 return; 482 483 err: 484 cpu->valid_pss_table = false; 485 acpi_processor_unregister_performance(policy->cpu); 486 } 487 488 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 489 { 490 struct cpudata *cpu; 491 492 cpu = all_cpu_data[policy->cpu]; 493 if (!cpu->valid_pss_table) 494 return; 495 496 acpi_processor_unregister_performance(policy->cpu); 497 } 498 #else 499 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 500 { 501 } 502 503 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 504 { 505 } 506 #endif 507 508 static signed int pid_calc(struct _pid *pid, int32_t busy) 509 { 510 signed int result; 511 int32_t pterm, dterm, fp_error; 512 int32_t integral_limit; 513 514 fp_error = pid->setpoint - busy; 515 516 if (abs(fp_error) <= pid->deadband) 517 return 0; 518 519 pterm = mul_fp(pid->p_gain, fp_error); 520 521 pid->integral += fp_error; 522 523 /* 524 * We limit the integral here so that it will never 525 * get higher than 30. This prevents it from becoming 526 * too large an input over long periods of time and allows 527 * it to get factored out sooner. 528 * 529 * The value of 30 was chosen through experimentation. 530 */ 531 integral_limit = int_tofp(30); 532 if (pid->integral > integral_limit) 533 pid->integral = integral_limit; 534 if (pid->integral < -integral_limit) 535 pid->integral = -integral_limit; 536 537 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 538 pid->last_err = fp_error; 539 540 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 541 result = result + (1 << (FRAC_BITS-1)); 542 return (signed int)fp_toint(result); 543 } 544 545 static inline void intel_pstate_pid_reset(struct cpudata *cpu) 546 { 547 struct _pid *pid = &cpu->pid; 548 549 pid->p_gain = percent_fp(pid_params.p_gain_pct); 550 pid->d_gain = percent_fp(pid_params.d_gain_pct); 551 pid->i_gain = percent_fp(pid_params.i_gain_pct); 552 pid->setpoint = int_tofp(pid_params.setpoint); 553 pid->last_err = pid->setpoint - int_tofp(100); 554 pid->deadband = int_tofp(pid_params.deadband); 555 pid->integral = 0; 556 } 557 558 static inline void update_turbo_state(void) 559 { 560 u64 misc_en; 561 struct cpudata *cpu; 562 563 cpu = all_cpu_data[0]; 564 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 565 global.turbo_disabled = 566 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 567 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 568 } 569 570 static int min_perf_pct_min(void) 571 { 572 struct cpudata *cpu = all_cpu_data[0]; 573 574 return DIV_ROUND_UP(cpu->pstate.min_pstate * 100, 575 cpu->pstate.turbo_pstate); 576 } 577 578 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 579 { 580 u64 epb; 581 int ret; 582 583 if (!static_cpu_has(X86_FEATURE_EPB)) 584 return -ENXIO; 585 586 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 587 if (ret) 588 return (s16)ret; 589 590 return (s16)(epb & 0x0f); 591 } 592 593 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 594 { 595 s16 epp; 596 597 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 598 /* 599 * When hwp_req_data is 0, means that caller didn't read 600 * MSR_HWP_REQUEST, so need to read and get EPP. 601 */ 602 if (!hwp_req_data) { 603 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, 604 &hwp_req_data); 605 if (epp) 606 return epp; 607 } 608 epp = (hwp_req_data >> 24) & 0xff; 609 } else { 610 /* When there is no EPP present, HWP uses EPB settings */ 611 epp = intel_pstate_get_epb(cpu_data); 612 } 613 614 return epp; 615 } 616 617 static int intel_pstate_set_epb(int cpu, s16 pref) 618 { 619 u64 epb; 620 int ret; 621 622 if (!static_cpu_has(X86_FEATURE_EPB)) 623 return -ENXIO; 624 625 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 626 if (ret) 627 return ret; 628 629 epb = (epb & ~0x0f) | pref; 630 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 631 632 return 0; 633 } 634 635 /* 636 * EPP/EPB display strings corresponding to EPP index in the 637 * energy_perf_strings[] 638 * index String 639 *------------------------------------- 640 * 0 default 641 * 1 performance 642 * 2 balance_performance 643 * 3 balance_power 644 * 4 power 645 */ 646 static const char * const energy_perf_strings[] = { 647 "default", 648 "performance", 649 "balance_performance", 650 "balance_power", 651 "power", 652 NULL 653 }; 654 655 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) 656 { 657 s16 epp; 658 int index = -EINVAL; 659 660 epp = intel_pstate_get_epp(cpu_data, 0); 661 if (epp < 0) 662 return epp; 663 664 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 665 /* 666 * Range: 667 * 0x00-0x3F : Performance 668 * 0x40-0x7F : Balance performance 669 * 0x80-0xBF : Balance power 670 * 0xC0-0xFF : Power 671 * The EPP is a 8 bit value, but our ranges restrict the 672 * value which can be set. Here only using top two bits 673 * effectively. 674 */ 675 index = (epp >> 6) + 1; 676 } else if (static_cpu_has(X86_FEATURE_EPB)) { 677 /* 678 * Range: 679 * 0x00-0x03 : Performance 680 * 0x04-0x07 : Balance performance 681 * 0x08-0x0B : Balance power 682 * 0x0C-0x0F : Power 683 * The EPB is a 4 bit value, but our ranges restrict the 684 * value which can be set. Here only using top two bits 685 * effectively. 686 */ 687 index = (epp >> 2) + 1; 688 } 689 690 return index; 691 } 692 693 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, 694 int pref_index) 695 { 696 int epp = -EINVAL; 697 int ret; 698 699 if (!pref_index) 700 epp = cpu_data->epp_default; 701 702 mutex_lock(&intel_pstate_limits_lock); 703 704 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 705 u64 value; 706 707 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); 708 if (ret) 709 goto return_pref; 710 711 value &= ~GENMASK_ULL(31, 24); 712 713 /* 714 * If epp is not default, convert from index into 715 * energy_perf_strings to epp value, by shifting 6 716 * bits left to use only top two bits in epp. 717 * The resultant epp need to shifted by 24 bits to 718 * epp position in MSR_HWP_REQUEST. 719 */ 720 if (epp == -EINVAL) 721 epp = (pref_index - 1) << 6; 722 723 value |= (u64)epp << 24; 724 ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); 725 } else { 726 if (epp == -EINVAL) 727 epp = (pref_index - 1) << 2; 728 ret = intel_pstate_set_epb(cpu_data->cpu, epp); 729 } 730 return_pref: 731 mutex_unlock(&intel_pstate_limits_lock); 732 733 return ret; 734 } 735 736 static ssize_t show_energy_performance_available_preferences( 737 struct cpufreq_policy *policy, char *buf) 738 { 739 int i = 0; 740 int ret = 0; 741 742 while (energy_perf_strings[i] != NULL) 743 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); 744 745 ret += sprintf(&buf[ret], "\n"); 746 747 return ret; 748 } 749 750 cpufreq_freq_attr_ro(energy_performance_available_preferences); 751 752 static ssize_t store_energy_performance_preference( 753 struct cpufreq_policy *policy, const char *buf, size_t count) 754 { 755 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 756 char str_preference[21]; 757 int ret, i = 0; 758 759 ret = sscanf(buf, "%20s", str_preference); 760 if (ret != 1) 761 return -EINVAL; 762 763 while (energy_perf_strings[i] != NULL) { 764 if (!strcmp(str_preference, energy_perf_strings[i])) { 765 intel_pstate_set_energy_pref_index(cpu_data, i); 766 return count; 767 } 768 ++i; 769 } 770 771 return -EINVAL; 772 } 773 774 static ssize_t show_energy_performance_preference( 775 struct cpufreq_policy *policy, char *buf) 776 { 777 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 778 int preference; 779 780 preference = intel_pstate_get_energy_pref_index(cpu_data); 781 if (preference < 0) 782 return preference; 783 784 return sprintf(buf, "%s\n", energy_perf_strings[preference]); 785 } 786 787 cpufreq_freq_attr_rw(energy_performance_preference); 788 789 static struct freq_attr *hwp_cpufreq_attrs[] = { 790 &energy_performance_preference, 791 &energy_performance_available_preferences, 792 NULL, 793 }; 794 795 static void intel_pstate_hwp_set(unsigned int cpu) 796 { 797 struct cpudata *cpu_data = all_cpu_data[cpu]; 798 int min, hw_min, max, hw_max; 799 u64 value, cap; 800 s16 epp; 801 802 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 803 hw_min = HWP_LOWEST_PERF(cap); 804 if (global.no_turbo) 805 hw_max = HWP_GUARANTEED_PERF(cap); 806 else 807 hw_max = HWP_HIGHEST_PERF(cap); 808 809 max = fp_ext_toint(hw_max * cpu_data->max_perf); 810 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) 811 min = max; 812 else 813 min = fp_ext_toint(hw_max * cpu_data->min_perf); 814 815 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 816 817 value &= ~HWP_MIN_PERF(~0L); 818 value |= HWP_MIN_PERF(min); 819 820 value &= ~HWP_MAX_PERF(~0L); 821 value |= HWP_MAX_PERF(max); 822 823 if (cpu_data->epp_policy == cpu_data->policy) 824 goto skip_epp; 825 826 cpu_data->epp_policy = cpu_data->policy; 827 828 if (cpu_data->epp_saved >= 0) { 829 epp = cpu_data->epp_saved; 830 cpu_data->epp_saved = -EINVAL; 831 goto update_epp; 832 } 833 834 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 835 epp = intel_pstate_get_epp(cpu_data, value); 836 cpu_data->epp_powersave = epp; 837 /* If EPP read was failed, then don't try to write */ 838 if (epp < 0) 839 goto skip_epp; 840 841 epp = 0; 842 } else { 843 /* skip setting EPP, when saved value is invalid */ 844 if (cpu_data->epp_powersave < 0) 845 goto skip_epp; 846 847 /* 848 * No need to restore EPP when it is not zero. This 849 * means: 850 * - Policy is not changed 851 * - user has manually changed 852 * - Error reading EPB 853 */ 854 epp = intel_pstate_get_epp(cpu_data, value); 855 if (epp) 856 goto skip_epp; 857 858 epp = cpu_data->epp_powersave; 859 } 860 update_epp: 861 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 862 value &= ~GENMASK_ULL(31, 24); 863 value |= (u64)epp << 24; 864 } else { 865 intel_pstate_set_epb(cpu, epp); 866 } 867 skip_epp: 868 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 869 } 870 871 static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) 872 { 873 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 874 875 if (!hwp_active) 876 return 0; 877 878 cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0); 879 880 return 0; 881 } 882 883 static int intel_pstate_resume(struct cpufreq_policy *policy) 884 { 885 if (!hwp_active) 886 return 0; 887 888 mutex_lock(&intel_pstate_limits_lock); 889 890 all_cpu_data[policy->cpu]->epp_policy = 0; 891 intel_pstate_hwp_set(policy->cpu); 892 893 mutex_unlock(&intel_pstate_limits_lock); 894 895 return 0; 896 } 897 898 static void intel_pstate_update_policies(void) 899 { 900 int cpu; 901 902 for_each_possible_cpu(cpu) 903 cpufreq_update_policy(cpu); 904 } 905 906 /************************** debugfs begin ************************/ 907 static int pid_param_set(void *data, u64 val) 908 { 909 unsigned int cpu; 910 911 *(u32 *)data = val; 912 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 913 for_each_possible_cpu(cpu) 914 if (all_cpu_data[cpu]) 915 intel_pstate_pid_reset(all_cpu_data[cpu]); 916 917 return 0; 918 } 919 920 static int pid_param_get(void *data, u64 *val) 921 { 922 *val = *(u32 *)data; 923 return 0; 924 } 925 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 926 927 static struct dentry *debugfs_parent; 928 929 struct pid_param { 930 char *name; 931 void *value; 932 struct dentry *dentry; 933 }; 934 935 static struct pid_param pid_files[] = { 936 {"sample_rate_ms", &pid_params.sample_rate_ms, }, 937 {"d_gain_pct", &pid_params.d_gain_pct, }, 938 {"i_gain_pct", &pid_params.i_gain_pct, }, 939 {"deadband", &pid_params.deadband, }, 940 {"setpoint", &pid_params.setpoint, }, 941 {"p_gain_pct", &pid_params.p_gain_pct, }, 942 {NULL, NULL, } 943 }; 944 945 static void intel_pstate_debug_expose_params(void) 946 { 947 int i; 948 949 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 950 if (IS_ERR_OR_NULL(debugfs_parent)) 951 return; 952 953 for (i = 0; pid_files[i].name; i++) { 954 struct dentry *dentry; 955 956 dentry = debugfs_create_file(pid_files[i].name, 0660, 957 debugfs_parent, pid_files[i].value, 958 &fops_pid_param); 959 if (!IS_ERR(dentry)) 960 pid_files[i].dentry = dentry; 961 } 962 } 963 964 static void intel_pstate_debug_hide_params(void) 965 { 966 int i; 967 968 if (IS_ERR_OR_NULL(debugfs_parent)) 969 return; 970 971 for (i = 0; pid_files[i].name; i++) { 972 debugfs_remove(pid_files[i].dentry); 973 pid_files[i].dentry = NULL; 974 } 975 976 debugfs_remove(debugfs_parent); 977 debugfs_parent = NULL; 978 } 979 980 /************************** debugfs end ************************/ 981 982 /************************** sysfs begin ************************/ 983 #define show_one(file_name, object) \ 984 static ssize_t show_##file_name \ 985 (struct kobject *kobj, struct attribute *attr, char *buf) \ 986 { \ 987 return sprintf(buf, "%u\n", global.object); \ 988 } 989 990 static ssize_t intel_pstate_show_status(char *buf); 991 static int intel_pstate_update_status(const char *buf, size_t size); 992 993 static ssize_t show_status(struct kobject *kobj, 994 struct attribute *attr, char *buf) 995 { 996 ssize_t ret; 997 998 mutex_lock(&intel_pstate_driver_lock); 999 ret = intel_pstate_show_status(buf); 1000 mutex_unlock(&intel_pstate_driver_lock); 1001 1002 return ret; 1003 } 1004 1005 static ssize_t store_status(struct kobject *a, struct attribute *b, 1006 const char *buf, size_t count) 1007 { 1008 char *p = memchr(buf, '\n', count); 1009 int ret; 1010 1011 mutex_lock(&intel_pstate_driver_lock); 1012 ret = intel_pstate_update_status(buf, p ? p - buf : count); 1013 mutex_unlock(&intel_pstate_driver_lock); 1014 1015 return ret < 0 ? ret : count; 1016 } 1017 1018 static ssize_t show_turbo_pct(struct kobject *kobj, 1019 struct attribute *attr, char *buf) 1020 { 1021 struct cpudata *cpu; 1022 int total, no_turbo, turbo_pct; 1023 uint32_t turbo_fp; 1024 1025 mutex_lock(&intel_pstate_driver_lock); 1026 1027 if (!intel_pstate_driver) { 1028 mutex_unlock(&intel_pstate_driver_lock); 1029 return -EAGAIN; 1030 } 1031 1032 cpu = all_cpu_data[0]; 1033 1034 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1035 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 1036 turbo_fp = div_fp(no_turbo, total); 1037 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 1038 1039 mutex_unlock(&intel_pstate_driver_lock); 1040 1041 return sprintf(buf, "%u\n", turbo_pct); 1042 } 1043 1044 static ssize_t show_num_pstates(struct kobject *kobj, 1045 struct attribute *attr, char *buf) 1046 { 1047 struct cpudata *cpu; 1048 int total; 1049 1050 mutex_lock(&intel_pstate_driver_lock); 1051 1052 if (!intel_pstate_driver) { 1053 mutex_unlock(&intel_pstate_driver_lock); 1054 return -EAGAIN; 1055 } 1056 1057 cpu = all_cpu_data[0]; 1058 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1059 1060 mutex_unlock(&intel_pstate_driver_lock); 1061 1062 return sprintf(buf, "%u\n", total); 1063 } 1064 1065 static ssize_t show_no_turbo(struct kobject *kobj, 1066 struct attribute *attr, char *buf) 1067 { 1068 ssize_t ret; 1069 1070 mutex_lock(&intel_pstate_driver_lock); 1071 1072 if (!intel_pstate_driver) { 1073 mutex_unlock(&intel_pstate_driver_lock); 1074 return -EAGAIN; 1075 } 1076 1077 update_turbo_state(); 1078 if (global.turbo_disabled) 1079 ret = sprintf(buf, "%u\n", global.turbo_disabled); 1080 else 1081 ret = sprintf(buf, "%u\n", global.no_turbo); 1082 1083 mutex_unlock(&intel_pstate_driver_lock); 1084 1085 return ret; 1086 } 1087 1088 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 1089 const char *buf, size_t count) 1090 { 1091 unsigned int input; 1092 int ret; 1093 1094 ret = sscanf(buf, "%u", &input); 1095 if (ret != 1) 1096 return -EINVAL; 1097 1098 mutex_lock(&intel_pstate_driver_lock); 1099 1100 if (!intel_pstate_driver) { 1101 mutex_unlock(&intel_pstate_driver_lock); 1102 return -EAGAIN; 1103 } 1104 1105 mutex_lock(&intel_pstate_limits_lock); 1106 1107 update_turbo_state(); 1108 if (global.turbo_disabled) { 1109 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 1110 mutex_unlock(&intel_pstate_limits_lock); 1111 mutex_unlock(&intel_pstate_driver_lock); 1112 return -EPERM; 1113 } 1114 1115 global.no_turbo = clamp_t(int, input, 0, 1); 1116 1117 if (global.no_turbo) { 1118 struct cpudata *cpu = all_cpu_data[0]; 1119 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; 1120 1121 /* Squash the global minimum into the permitted range. */ 1122 if (global.min_perf_pct > pct) 1123 global.min_perf_pct = pct; 1124 } 1125 1126 mutex_unlock(&intel_pstate_limits_lock); 1127 1128 intel_pstate_update_policies(); 1129 1130 mutex_unlock(&intel_pstate_driver_lock); 1131 1132 return count; 1133 } 1134 1135 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 1136 const char *buf, size_t count) 1137 { 1138 unsigned int input; 1139 int ret; 1140 1141 ret = sscanf(buf, "%u", &input); 1142 if (ret != 1) 1143 return -EINVAL; 1144 1145 mutex_lock(&intel_pstate_driver_lock); 1146 1147 if (!intel_pstate_driver) { 1148 mutex_unlock(&intel_pstate_driver_lock); 1149 return -EAGAIN; 1150 } 1151 1152 mutex_lock(&intel_pstate_limits_lock); 1153 1154 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); 1155 1156 mutex_unlock(&intel_pstate_limits_lock); 1157 1158 intel_pstate_update_policies(); 1159 1160 mutex_unlock(&intel_pstate_driver_lock); 1161 1162 return count; 1163 } 1164 1165 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 1166 const char *buf, size_t count) 1167 { 1168 unsigned int input; 1169 int ret; 1170 1171 ret = sscanf(buf, "%u", &input); 1172 if (ret != 1) 1173 return -EINVAL; 1174 1175 mutex_lock(&intel_pstate_driver_lock); 1176 1177 if (!intel_pstate_driver) { 1178 mutex_unlock(&intel_pstate_driver_lock); 1179 return -EAGAIN; 1180 } 1181 1182 mutex_lock(&intel_pstate_limits_lock); 1183 1184 global.min_perf_pct = clamp_t(int, input, 1185 min_perf_pct_min(), global.max_perf_pct); 1186 1187 mutex_unlock(&intel_pstate_limits_lock); 1188 1189 intel_pstate_update_policies(); 1190 1191 mutex_unlock(&intel_pstate_driver_lock); 1192 1193 return count; 1194 } 1195 1196 show_one(max_perf_pct, max_perf_pct); 1197 show_one(min_perf_pct, min_perf_pct); 1198 1199 define_one_global_rw(status); 1200 define_one_global_rw(no_turbo); 1201 define_one_global_rw(max_perf_pct); 1202 define_one_global_rw(min_perf_pct); 1203 define_one_global_ro(turbo_pct); 1204 define_one_global_ro(num_pstates); 1205 1206 static struct attribute *intel_pstate_attributes[] = { 1207 &status.attr, 1208 &no_turbo.attr, 1209 &turbo_pct.attr, 1210 &num_pstates.attr, 1211 NULL 1212 }; 1213 1214 static struct attribute_group intel_pstate_attr_group = { 1215 .attrs = intel_pstate_attributes, 1216 }; 1217 1218 static void __init intel_pstate_sysfs_expose_params(void) 1219 { 1220 struct kobject *intel_pstate_kobject; 1221 int rc; 1222 1223 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 1224 &cpu_subsys.dev_root->kobj); 1225 if (WARN_ON(!intel_pstate_kobject)) 1226 return; 1227 1228 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 1229 if (WARN_ON(rc)) 1230 return; 1231 1232 /* 1233 * If per cpu limits are enforced there are no global limits, so 1234 * return without creating max/min_perf_pct attributes 1235 */ 1236 if (per_cpu_limits) 1237 return; 1238 1239 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 1240 WARN_ON(rc); 1241 1242 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1243 WARN_ON(rc); 1244 1245 } 1246 /************************** sysfs end ************************/ 1247 1248 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1249 { 1250 /* First disable HWP notification interrupt as we don't process them */ 1251 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1252 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1253 1254 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1255 cpudata->epp_policy = 0; 1256 if (cpudata->epp_default == -EINVAL) 1257 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1258 } 1259 1260 #define MSR_IA32_POWER_CTL_BIT_EE 19 1261 1262 /* Disable energy efficiency optimization */ 1263 static void intel_pstate_disable_ee(int cpu) 1264 { 1265 u64 power_ctl; 1266 int ret; 1267 1268 ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl); 1269 if (ret) 1270 return; 1271 1272 if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) { 1273 pr_info("Disabling energy efficiency optimization\n"); 1274 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); 1275 wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl); 1276 } 1277 } 1278 1279 static int atom_get_min_pstate(void) 1280 { 1281 u64 value; 1282 1283 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1284 return (value >> 8) & 0x7F; 1285 } 1286 1287 static int atom_get_max_pstate(void) 1288 { 1289 u64 value; 1290 1291 rdmsrl(MSR_ATOM_CORE_RATIOS, value); 1292 return (value >> 16) & 0x7F; 1293 } 1294 1295 static int atom_get_turbo_pstate(void) 1296 { 1297 u64 value; 1298 1299 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); 1300 return value & 0x7F; 1301 } 1302 1303 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1304 { 1305 u64 val; 1306 int32_t vid_fp; 1307 u32 vid; 1308 1309 val = (u64)pstate << 8; 1310 if (global.no_turbo && !global.turbo_disabled) 1311 val |= (u64)1 << 32; 1312 1313 vid_fp = cpudata->vid.min + mul_fp( 1314 int_tofp(pstate - cpudata->pstate.min_pstate), 1315 cpudata->vid.ratio); 1316 1317 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1318 vid = ceiling_fp(vid_fp); 1319 1320 if (pstate > cpudata->pstate.max_pstate) 1321 vid = cpudata->vid.turbo; 1322 1323 return val | vid; 1324 } 1325 1326 static int silvermont_get_scaling(void) 1327 { 1328 u64 value; 1329 int i; 1330 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1331 static int silvermont_freq_table[] = { 1332 83300, 100000, 133300, 116700, 80000}; 1333 1334 rdmsrl(MSR_FSB_FREQ, value); 1335 i = value & 0x7; 1336 WARN_ON(i > 4); 1337 1338 return silvermont_freq_table[i]; 1339 } 1340 1341 static int airmont_get_scaling(void) 1342 { 1343 u64 value; 1344 int i; 1345 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1346 static int airmont_freq_table[] = { 1347 83300, 100000, 133300, 116700, 80000, 1348 93300, 90000, 88900, 87500}; 1349 1350 rdmsrl(MSR_FSB_FREQ, value); 1351 i = value & 0xF; 1352 WARN_ON(i > 8); 1353 1354 return airmont_freq_table[i]; 1355 } 1356 1357 static void atom_get_vid(struct cpudata *cpudata) 1358 { 1359 u64 value; 1360 1361 rdmsrl(MSR_ATOM_CORE_VIDS, value); 1362 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1363 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1364 cpudata->vid.ratio = div_fp( 1365 cpudata->vid.max - cpudata->vid.min, 1366 int_tofp(cpudata->pstate.max_pstate - 1367 cpudata->pstate.min_pstate)); 1368 1369 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); 1370 cpudata->vid.turbo = value & 0x7f; 1371 } 1372 1373 static int core_get_min_pstate(void) 1374 { 1375 u64 value; 1376 1377 rdmsrl(MSR_PLATFORM_INFO, value); 1378 return (value >> 40) & 0xFF; 1379 } 1380 1381 static int core_get_max_pstate_physical(void) 1382 { 1383 u64 value; 1384 1385 rdmsrl(MSR_PLATFORM_INFO, value); 1386 return (value >> 8) & 0xFF; 1387 } 1388 1389 static int core_get_tdp_ratio(u64 plat_info) 1390 { 1391 /* Check how many TDP levels present */ 1392 if (plat_info & 0x600000000) { 1393 u64 tdp_ctrl; 1394 u64 tdp_ratio; 1395 int tdp_msr; 1396 int err; 1397 1398 /* Get the TDP level (0, 1, 2) to get ratios */ 1399 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1400 if (err) 1401 return err; 1402 1403 /* TDP MSR are continuous starting at 0x648 */ 1404 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); 1405 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 1406 if (err) 1407 return err; 1408 1409 /* For level 1 and 2, bits[23:16] contain the ratio */ 1410 if (tdp_ctrl & 0x03) 1411 tdp_ratio >>= 16; 1412 1413 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1414 pr_debug("tdp_ratio %x\n", (int)tdp_ratio); 1415 1416 return (int)tdp_ratio; 1417 } 1418 1419 return -ENXIO; 1420 } 1421 1422 static int core_get_max_pstate(void) 1423 { 1424 u64 tar; 1425 u64 plat_info; 1426 int max_pstate; 1427 int tdp_ratio; 1428 int err; 1429 1430 rdmsrl(MSR_PLATFORM_INFO, plat_info); 1431 max_pstate = (plat_info >> 8) & 0xFF; 1432 1433 tdp_ratio = core_get_tdp_ratio(plat_info); 1434 if (tdp_ratio <= 0) 1435 return max_pstate; 1436 1437 if (hwp_active) { 1438 /* Turbo activation ratio is not used on HWP platforms */ 1439 return tdp_ratio; 1440 } 1441 1442 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 1443 if (!err) { 1444 int tar_levels; 1445 1446 /* Do some sanity checking for safety */ 1447 tar_levels = tar & 0xff; 1448 if (tdp_ratio - 1 == tar_levels) { 1449 max_pstate = tar_levels; 1450 pr_debug("max_pstate=TAC %x\n", max_pstate); 1451 } 1452 } 1453 1454 return max_pstate; 1455 } 1456 1457 static int core_get_turbo_pstate(void) 1458 { 1459 u64 value; 1460 int nont, ret; 1461 1462 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1463 nont = core_get_max_pstate(); 1464 ret = (value) & 255; 1465 if (ret <= nont) 1466 ret = nont; 1467 return ret; 1468 } 1469 1470 static inline int core_get_scaling(void) 1471 { 1472 return 100000; 1473 } 1474 1475 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1476 { 1477 u64 val; 1478 1479 val = (u64)pstate << 8; 1480 if (global.no_turbo && !global.turbo_disabled) 1481 val |= (u64)1 << 32; 1482 1483 return val; 1484 } 1485 1486 static int knl_get_turbo_pstate(void) 1487 { 1488 u64 value; 1489 int nont, ret; 1490 1491 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1492 nont = core_get_max_pstate(); 1493 ret = (((value) >> 8) & 0xFF); 1494 if (ret <= nont) 1495 ret = nont; 1496 return ret; 1497 } 1498 1499 static int intel_pstate_get_base_pstate(struct cpudata *cpu) 1500 { 1501 return global.no_turbo || global.turbo_disabled ? 1502 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1503 } 1504 1505 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1506 { 1507 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1508 cpu->pstate.current_pstate = pstate; 1509 /* 1510 * Generally, there is no guarantee that this code will always run on 1511 * the CPU being updated, so force the register update to run on the 1512 * right CPU. 1513 */ 1514 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1515 pstate_funcs.get_val(cpu, pstate)); 1516 } 1517 1518 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1519 { 1520 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1521 } 1522 1523 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1524 { 1525 int pstate; 1526 1527 update_turbo_state(); 1528 pstate = intel_pstate_get_base_pstate(cpu); 1529 pstate = max(cpu->pstate.min_pstate, 1530 fp_ext_toint(pstate * cpu->max_perf)); 1531 intel_pstate_set_pstate(cpu, pstate); 1532 } 1533 1534 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1535 { 1536 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1537 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1538 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1539 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1540 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1541 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; 1542 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1543 1544 if (pstate_funcs.get_vid) 1545 pstate_funcs.get_vid(cpu); 1546 1547 intel_pstate_set_min_pstate(cpu); 1548 } 1549 1550 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1551 { 1552 struct sample *sample = &cpu->sample; 1553 1554 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1555 } 1556 1557 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1558 { 1559 u64 aperf, mperf; 1560 unsigned long flags; 1561 u64 tsc; 1562 1563 local_irq_save(flags); 1564 rdmsrl(MSR_IA32_APERF, aperf); 1565 rdmsrl(MSR_IA32_MPERF, mperf); 1566 tsc = rdtsc(); 1567 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1568 local_irq_restore(flags); 1569 return false; 1570 } 1571 local_irq_restore(flags); 1572 1573 cpu->last_sample_time = cpu->sample.time; 1574 cpu->sample.time = time; 1575 cpu->sample.aperf = aperf; 1576 cpu->sample.mperf = mperf; 1577 cpu->sample.tsc = tsc; 1578 cpu->sample.aperf -= cpu->prev_aperf; 1579 cpu->sample.mperf -= cpu->prev_mperf; 1580 cpu->sample.tsc -= cpu->prev_tsc; 1581 1582 cpu->prev_aperf = aperf; 1583 cpu->prev_mperf = mperf; 1584 cpu->prev_tsc = tsc; 1585 /* 1586 * First time this function is invoked in a given cycle, all of the 1587 * previous sample data fields are equal to zero or stale and they must 1588 * be populated with meaningful numbers for things to work, so assume 1589 * that sample.time will always be reset before setting the utilization 1590 * update hook and make the caller skip the sample then. 1591 */ 1592 if (cpu->last_sample_time) { 1593 intel_pstate_calc_avg_perf(cpu); 1594 return true; 1595 } 1596 return false; 1597 } 1598 1599 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1600 { 1601 return mul_ext_fp(cpu->sample.core_avg_perf, 1602 cpu->pstate.max_pstate_physical * cpu->pstate.scaling); 1603 } 1604 1605 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1606 { 1607 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1608 cpu->sample.core_avg_perf); 1609 } 1610 1611 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1612 { 1613 struct sample *sample = &cpu->sample; 1614 int32_t busy_frac, boost; 1615 int target, avg_pstate; 1616 1617 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) 1618 return cpu->pstate.turbo_pstate; 1619 1620 busy_frac = div_fp(sample->mperf, sample->tsc); 1621 1622 boost = cpu->iowait_boost; 1623 cpu->iowait_boost >>= 1; 1624 1625 if (busy_frac < boost) 1626 busy_frac = boost; 1627 1628 sample->busy_scaled = busy_frac * 100; 1629 1630 target = global.no_turbo || global.turbo_disabled ? 1631 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1632 target += target >> 2; 1633 target = mul_fp(target, busy_frac); 1634 if (target < cpu->pstate.min_pstate) 1635 target = cpu->pstate.min_pstate; 1636 1637 /* 1638 * If the average P-state during the previous cycle was higher than the 1639 * current target, add 50% of the difference to the target to reduce 1640 * possible performance oscillations and offset possible performance 1641 * loss related to moving the workload from one CPU to another within 1642 * a package/module. 1643 */ 1644 avg_pstate = get_avg_pstate(cpu); 1645 if (avg_pstate > target) 1646 target += (avg_pstate - target) >> 1; 1647 1648 return target; 1649 } 1650 1651 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1652 { 1653 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1654 u64 duration_ns; 1655 1656 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) 1657 return cpu->pstate.turbo_pstate; 1658 1659 /* 1660 * perf_scaled is the ratio of the average P-state during the last 1661 * sampling period to the P-state requested last time (in percent). 1662 * 1663 * That measures the system's response to the previous P-state 1664 * selection. 1665 */ 1666 max_pstate = cpu->pstate.max_pstate_physical; 1667 current_pstate = cpu->pstate.current_pstate; 1668 perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, 1669 div_fp(100 * max_pstate, current_pstate)); 1670 1671 /* 1672 * Since our utilization update callback will not run unless we are 1673 * in C0, check if the actual elapsed time is significantly greater (3x) 1674 * than our sample interval. If it is, then we were idle for a long 1675 * enough period of time to adjust our performance metric. 1676 */ 1677 duration_ns = cpu->sample.time - cpu->last_sample_time; 1678 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1679 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1680 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1681 } else { 1682 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1683 if (sample_ratio < int_tofp(1)) 1684 perf_scaled = 0; 1685 } 1686 1687 cpu->sample.busy_scaled = perf_scaled; 1688 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); 1689 } 1690 1691 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 1692 { 1693 int max_pstate = intel_pstate_get_base_pstate(cpu); 1694 int min_pstate; 1695 1696 min_pstate = max(cpu->pstate.min_pstate, 1697 fp_ext_toint(max_pstate * cpu->min_perf)); 1698 max_pstate = max(min_pstate, fp_ext_toint(max_pstate * cpu->max_perf)); 1699 return clamp_t(int, pstate, min_pstate, max_pstate); 1700 } 1701 1702 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1703 { 1704 if (pstate == cpu->pstate.current_pstate) 1705 return; 1706 1707 cpu->pstate.current_pstate = pstate; 1708 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1709 } 1710 1711 static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate) 1712 { 1713 int from = cpu->pstate.current_pstate; 1714 struct sample *sample; 1715 1716 update_turbo_state(); 1717 1718 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 1719 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); 1720 intel_pstate_update_pstate(cpu, target_pstate); 1721 1722 sample = &cpu->sample; 1723 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1724 fp_toint(sample->busy_scaled), 1725 from, 1726 cpu->pstate.current_pstate, 1727 sample->mperf, 1728 sample->aperf, 1729 sample->tsc, 1730 get_avg_frequency(cpu), 1731 fp_toint(cpu->iowait_boost * 100)); 1732 } 1733 1734 static void intel_pstate_update_util_hwp(struct update_util_data *data, 1735 u64 time, unsigned int flags) 1736 { 1737 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1738 u64 delta_ns = time - cpu->sample.time; 1739 1740 if ((s64)delta_ns >= INTEL_PSTATE_HWP_SAMPLING_INTERVAL) 1741 intel_pstate_sample(cpu, time); 1742 } 1743 1744 static void intel_pstate_update_util_pid(struct update_util_data *data, 1745 u64 time, unsigned int flags) 1746 { 1747 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1748 u64 delta_ns = time - cpu->sample.time; 1749 1750 if ((s64)delta_ns < pid_params.sample_rate_ns) 1751 return; 1752 1753 if (intel_pstate_sample(cpu, time)) { 1754 int target_pstate; 1755 1756 target_pstate = get_target_pstate_use_performance(cpu); 1757 intel_pstate_adjust_pstate(cpu, target_pstate); 1758 } 1759 } 1760 1761 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1762 unsigned int flags) 1763 { 1764 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1765 u64 delta_ns; 1766 1767 if (flags & SCHED_CPUFREQ_IOWAIT) { 1768 cpu->iowait_boost = int_tofp(1); 1769 } else if (cpu->iowait_boost) { 1770 /* Clear iowait_boost if the CPU may have been idle. */ 1771 delta_ns = time - cpu->last_update; 1772 if (delta_ns > TICK_NSEC) 1773 cpu->iowait_boost = 0; 1774 } 1775 cpu->last_update = time; 1776 delta_ns = time - cpu->sample.time; 1777 if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL) 1778 return; 1779 1780 if (intel_pstate_sample(cpu, time)) { 1781 int target_pstate; 1782 1783 target_pstate = get_target_pstate_use_cpu_load(cpu); 1784 intel_pstate_adjust_pstate(cpu, target_pstate); 1785 } 1786 } 1787 1788 static struct pstate_funcs core_funcs = { 1789 .get_max = core_get_max_pstate, 1790 .get_max_physical = core_get_max_pstate_physical, 1791 .get_min = core_get_min_pstate, 1792 .get_turbo = core_get_turbo_pstate, 1793 .get_scaling = core_get_scaling, 1794 .get_val = core_get_val, 1795 .update_util = intel_pstate_update_util_pid, 1796 }; 1797 1798 static const struct pstate_funcs silvermont_funcs = { 1799 .get_max = atom_get_max_pstate, 1800 .get_max_physical = atom_get_max_pstate, 1801 .get_min = atom_get_min_pstate, 1802 .get_turbo = atom_get_turbo_pstate, 1803 .get_val = atom_get_val, 1804 .get_scaling = silvermont_get_scaling, 1805 .get_vid = atom_get_vid, 1806 .update_util = intel_pstate_update_util, 1807 }; 1808 1809 static const struct pstate_funcs airmont_funcs = { 1810 .get_max = atom_get_max_pstate, 1811 .get_max_physical = atom_get_max_pstate, 1812 .get_min = atom_get_min_pstate, 1813 .get_turbo = atom_get_turbo_pstate, 1814 .get_val = atom_get_val, 1815 .get_scaling = airmont_get_scaling, 1816 .get_vid = atom_get_vid, 1817 .update_util = intel_pstate_update_util, 1818 }; 1819 1820 static const struct pstate_funcs knl_funcs = { 1821 .get_max = core_get_max_pstate, 1822 .get_max_physical = core_get_max_pstate_physical, 1823 .get_min = core_get_min_pstate, 1824 .get_turbo = knl_get_turbo_pstate, 1825 .get_scaling = core_get_scaling, 1826 .get_val = core_get_val, 1827 .update_util = intel_pstate_update_util_pid, 1828 }; 1829 1830 static const struct pstate_funcs bxt_funcs = { 1831 .get_max = core_get_max_pstate, 1832 .get_max_physical = core_get_max_pstate_physical, 1833 .get_min = core_get_min_pstate, 1834 .get_turbo = core_get_turbo_pstate, 1835 .get_scaling = core_get_scaling, 1836 .get_val = core_get_val, 1837 .update_util = intel_pstate_update_util, 1838 }; 1839 1840 #define ICPU(model, policy) \ 1841 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1842 (unsigned long)&policy } 1843 1844 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1845 ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs), 1846 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs), 1847 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_funcs), 1848 ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs), 1849 ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs), 1850 ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs), 1851 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs), 1852 ICPU(INTEL_FAM6_HASWELL_X, core_funcs), 1853 ICPU(INTEL_FAM6_HASWELL_ULT, core_funcs), 1854 ICPU(INTEL_FAM6_HASWELL_GT3E, core_funcs), 1855 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_funcs), 1856 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs), 1857 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_funcs), 1858 ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), 1859 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs), 1860 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), 1861 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs), 1862 ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), 1863 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs), 1864 {} 1865 }; 1866 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1867 1868 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 1869 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), 1870 ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), 1871 ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), 1872 {} 1873 }; 1874 1875 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { 1876 ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_funcs), 1877 {} 1878 }; 1879 1880 static bool pid_in_use(void); 1881 1882 static int intel_pstate_init_cpu(unsigned int cpunum) 1883 { 1884 struct cpudata *cpu; 1885 1886 cpu = all_cpu_data[cpunum]; 1887 1888 if (!cpu) { 1889 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); 1890 if (!cpu) 1891 return -ENOMEM; 1892 1893 all_cpu_data[cpunum] = cpu; 1894 1895 cpu->epp_default = -EINVAL; 1896 cpu->epp_powersave = -EINVAL; 1897 cpu->epp_saved = -EINVAL; 1898 } 1899 1900 cpu = all_cpu_data[cpunum]; 1901 1902 cpu->cpu = cpunum; 1903 1904 if (hwp_active) { 1905 const struct x86_cpu_id *id; 1906 1907 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); 1908 if (id) 1909 intel_pstate_disable_ee(cpunum); 1910 1911 intel_pstate_hwp_enable(cpu); 1912 } else if (pid_in_use()) { 1913 intel_pstate_pid_reset(cpu); 1914 } 1915 1916 intel_pstate_get_cpu_pstates(cpu); 1917 1918 pr_debug("controlling: cpu %d\n", cpunum); 1919 1920 return 0; 1921 } 1922 1923 static unsigned int intel_pstate_get(unsigned int cpu_num) 1924 { 1925 struct cpudata *cpu = all_cpu_data[cpu_num]; 1926 1927 return cpu ? get_avg_frequency(cpu) : 0; 1928 } 1929 1930 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1931 { 1932 struct cpudata *cpu = all_cpu_data[cpu_num]; 1933 1934 if (cpu->update_util_set) 1935 return; 1936 1937 /* Prevent intel_pstate_update_util() from using stale data. */ 1938 cpu->sample.time = 0; 1939 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1940 pstate_funcs.update_util); 1941 cpu->update_util_set = true; 1942 } 1943 1944 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1945 { 1946 struct cpudata *cpu_data = all_cpu_data[cpu]; 1947 1948 if (!cpu_data->update_util_set) 1949 return; 1950 1951 cpufreq_remove_update_util_hook(cpu); 1952 cpu_data->update_util_set = false; 1953 synchronize_sched(); 1954 } 1955 1956 static int intel_pstate_get_max_freq(struct cpudata *cpu) 1957 { 1958 return global.turbo_disabled || global.no_turbo ? 1959 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 1960 } 1961 1962 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, 1963 struct cpudata *cpu) 1964 { 1965 int max_freq = intel_pstate_get_max_freq(cpu); 1966 int32_t max_policy_perf, min_policy_perf; 1967 1968 max_policy_perf = div_ext_fp(policy->max, max_freq); 1969 max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1)); 1970 if (policy->max == policy->min) { 1971 min_policy_perf = max_policy_perf; 1972 } else { 1973 min_policy_perf = div_ext_fp(policy->min, max_freq); 1974 min_policy_perf = clamp_t(int32_t, min_policy_perf, 1975 0, max_policy_perf); 1976 } 1977 1978 /* Normalize user input to [min_perf, max_perf] */ 1979 if (per_cpu_limits) { 1980 cpu->min_perf = min_policy_perf; 1981 cpu->max_perf = max_policy_perf; 1982 } else { 1983 int32_t global_min, global_max; 1984 1985 /* Global limits are in percent of the maximum turbo P-state. */ 1986 global_max = percent_ext_fp(global.max_perf_pct); 1987 global_min = percent_ext_fp(global.min_perf_pct); 1988 if (max_freq != cpu->pstate.turbo_freq) { 1989 int32_t turbo_factor; 1990 1991 turbo_factor = div_ext_fp(cpu->pstate.turbo_pstate, 1992 cpu->pstate.max_pstate); 1993 global_min = mul_ext_fp(global_min, turbo_factor); 1994 global_max = mul_ext_fp(global_max, turbo_factor); 1995 } 1996 global_min = clamp_t(int32_t, global_min, 0, global_max); 1997 1998 cpu->min_perf = max(min_policy_perf, global_min); 1999 cpu->min_perf = min(cpu->min_perf, max_policy_perf); 2000 cpu->max_perf = min(max_policy_perf, global_max); 2001 cpu->max_perf = max(min_policy_perf, cpu->max_perf); 2002 2003 /* Make sure min_perf <= max_perf */ 2004 cpu->min_perf = min(cpu->min_perf, cpu->max_perf); 2005 } 2006 2007 cpu->max_perf = round_up(cpu->max_perf, EXT_FRAC_BITS); 2008 cpu->min_perf = round_up(cpu->min_perf, EXT_FRAC_BITS); 2009 2010 pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, 2011 fp_ext_toint(cpu->max_perf * 100), 2012 fp_ext_toint(cpu->min_perf * 100)); 2013 } 2014 2015 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2016 { 2017 struct cpudata *cpu; 2018 2019 if (!policy->cpuinfo.max_freq) 2020 return -ENODEV; 2021 2022 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2023 policy->cpuinfo.max_freq, policy->max); 2024 2025 cpu = all_cpu_data[policy->cpu]; 2026 cpu->policy = policy->policy; 2027 2028 mutex_lock(&intel_pstate_limits_lock); 2029 2030 intel_pstate_update_perf_limits(policy, cpu); 2031 2032 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2033 /* 2034 * NOHZ_FULL CPUs need this as the governor callback may not 2035 * be invoked on them. 2036 */ 2037 intel_pstate_clear_update_util_hook(policy->cpu); 2038 intel_pstate_max_within_limits(cpu); 2039 } 2040 2041 intel_pstate_set_update_util_hook(policy->cpu); 2042 2043 if (hwp_active) 2044 intel_pstate_hwp_set(policy->cpu); 2045 2046 mutex_unlock(&intel_pstate_limits_lock); 2047 2048 return 0; 2049 } 2050 2051 static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy, 2052 struct cpudata *cpu) 2053 { 2054 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2055 policy->max < policy->cpuinfo.max_freq && 2056 policy->max > cpu->pstate.max_freq) { 2057 pr_debug("policy->max > max non turbo frequency\n"); 2058 policy->max = policy->cpuinfo.max_freq; 2059 } 2060 } 2061 2062 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 2063 { 2064 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2065 2066 update_turbo_state(); 2067 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2068 intel_pstate_get_max_freq(cpu)); 2069 2070 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 2071 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 2072 return -EINVAL; 2073 2074 intel_pstate_adjust_policy_max(policy, cpu); 2075 2076 return 0; 2077 } 2078 2079 static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) 2080 { 2081 intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); 2082 } 2083 2084 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 2085 { 2086 pr_debug("CPU %d exiting\n", policy->cpu); 2087 2088 intel_pstate_clear_update_util_hook(policy->cpu); 2089 if (hwp_active) 2090 intel_pstate_hwp_save_state(policy); 2091 else 2092 intel_cpufreq_stop_cpu(policy); 2093 } 2094 2095 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 2096 { 2097 intel_pstate_exit_perf_limits(policy); 2098 2099 policy->fast_switch_possible = false; 2100 2101 return 0; 2102 } 2103 2104 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 2105 { 2106 struct cpudata *cpu; 2107 int rc; 2108 2109 rc = intel_pstate_init_cpu(policy->cpu); 2110 if (rc) 2111 return rc; 2112 2113 cpu = all_cpu_data[policy->cpu]; 2114 2115 cpu->max_perf = int_ext_tofp(1); 2116 cpu->min_perf = 0; 2117 2118 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 2119 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 2120 2121 /* cpuinfo and default policy values */ 2122 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 2123 update_turbo_state(); 2124 policy->cpuinfo.max_freq = global.turbo_disabled ? 2125 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2126 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 2127 2128 intel_pstate_init_acpi_perf_limits(policy); 2129 cpumask_set_cpu(policy->cpu, policy->cpus); 2130 2131 policy->fast_switch_possible = true; 2132 2133 return 0; 2134 } 2135 2136 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 2137 { 2138 int ret = __intel_pstate_cpu_init(policy); 2139 2140 if (ret) 2141 return ret; 2142 2143 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 2144 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) 2145 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 2146 else 2147 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2148 2149 return 0; 2150 } 2151 2152 static struct cpufreq_driver intel_pstate = { 2153 .flags = CPUFREQ_CONST_LOOPS, 2154 .verify = intel_pstate_verify_policy, 2155 .setpolicy = intel_pstate_set_policy, 2156 .suspend = intel_pstate_hwp_save_state, 2157 .resume = intel_pstate_resume, 2158 .get = intel_pstate_get, 2159 .init = intel_pstate_cpu_init, 2160 .exit = intel_pstate_cpu_exit, 2161 .stop_cpu = intel_pstate_stop_cpu, 2162 .name = "intel_pstate", 2163 }; 2164 2165 static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) 2166 { 2167 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2168 2169 update_turbo_state(); 2170 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2171 intel_pstate_get_max_freq(cpu)); 2172 2173 intel_pstate_adjust_policy_max(policy, cpu); 2174 2175 intel_pstate_update_perf_limits(policy, cpu); 2176 2177 return 0; 2178 } 2179 2180 static int intel_cpufreq_target(struct cpufreq_policy *policy, 2181 unsigned int target_freq, 2182 unsigned int relation) 2183 { 2184 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2185 struct cpufreq_freqs freqs; 2186 int target_pstate; 2187 2188 update_turbo_state(); 2189 2190 freqs.old = policy->cur; 2191 freqs.new = target_freq; 2192 2193 cpufreq_freq_transition_begin(policy, &freqs); 2194 switch (relation) { 2195 case CPUFREQ_RELATION_L: 2196 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 2197 break; 2198 case CPUFREQ_RELATION_H: 2199 target_pstate = freqs.new / cpu->pstate.scaling; 2200 break; 2201 default: 2202 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 2203 break; 2204 } 2205 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2206 if (target_pstate != cpu->pstate.current_pstate) { 2207 cpu->pstate.current_pstate = target_pstate; 2208 wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, 2209 pstate_funcs.get_val(cpu, target_pstate)); 2210 } 2211 freqs.new = target_pstate * cpu->pstate.scaling; 2212 cpufreq_freq_transition_end(policy, &freqs, false); 2213 2214 return 0; 2215 } 2216 2217 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 2218 unsigned int target_freq) 2219 { 2220 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2221 int target_pstate; 2222 2223 update_turbo_state(); 2224 2225 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2226 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2227 intel_pstate_update_pstate(cpu, target_pstate); 2228 return target_pstate * cpu->pstate.scaling; 2229 } 2230 2231 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2232 { 2233 int ret = __intel_pstate_cpu_init(policy); 2234 2235 if (ret) 2236 return ret; 2237 2238 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 2239 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2240 policy->cur = policy->cpuinfo.min_freq; 2241 2242 return 0; 2243 } 2244 2245 static struct cpufreq_driver intel_cpufreq = { 2246 .flags = CPUFREQ_CONST_LOOPS, 2247 .verify = intel_cpufreq_verify_policy, 2248 .target = intel_cpufreq_target, 2249 .fast_switch = intel_cpufreq_fast_switch, 2250 .init = intel_cpufreq_cpu_init, 2251 .exit = intel_pstate_cpu_exit, 2252 .stop_cpu = intel_cpufreq_stop_cpu, 2253 .name = "intel_cpufreq", 2254 }; 2255 2256 static struct cpufreq_driver *default_driver = &intel_pstate; 2257 2258 static bool pid_in_use(void) 2259 { 2260 return intel_pstate_driver == &intel_pstate && 2261 pstate_funcs.update_util == intel_pstate_update_util_pid; 2262 } 2263 2264 static void intel_pstate_driver_cleanup(void) 2265 { 2266 unsigned int cpu; 2267 2268 get_online_cpus(); 2269 for_each_online_cpu(cpu) { 2270 if (all_cpu_data[cpu]) { 2271 if (intel_pstate_driver == &intel_pstate) 2272 intel_pstate_clear_update_util_hook(cpu); 2273 2274 kfree(all_cpu_data[cpu]); 2275 all_cpu_data[cpu] = NULL; 2276 } 2277 } 2278 put_online_cpus(); 2279 intel_pstate_driver = NULL; 2280 } 2281 2282 static int intel_pstate_register_driver(struct cpufreq_driver *driver) 2283 { 2284 int ret; 2285 2286 memset(&global, 0, sizeof(global)); 2287 global.max_perf_pct = 100; 2288 2289 intel_pstate_driver = driver; 2290 ret = cpufreq_register_driver(intel_pstate_driver); 2291 if (ret) { 2292 intel_pstate_driver_cleanup(); 2293 return ret; 2294 } 2295 2296 global.min_perf_pct = min_perf_pct_min(); 2297 2298 if (pid_in_use()) 2299 intel_pstate_debug_expose_params(); 2300 2301 return 0; 2302 } 2303 2304 static int intel_pstate_unregister_driver(void) 2305 { 2306 if (hwp_active) 2307 return -EBUSY; 2308 2309 if (pid_in_use()) 2310 intel_pstate_debug_hide_params(); 2311 2312 cpufreq_unregister_driver(intel_pstate_driver); 2313 intel_pstate_driver_cleanup(); 2314 2315 return 0; 2316 } 2317 2318 static ssize_t intel_pstate_show_status(char *buf) 2319 { 2320 if (!intel_pstate_driver) 2321 return sprintf(buf, "off\n"); 2322 2323 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 2324 "active" : "passive"); 2325 } 2326 2327 static int intel_pstate_update_status(const char *buf, size_t size) 2328 { 2329 int ret; 2330 2331 if (size == 3 && !strncmp(buf, "off", size)) 2332 return intel_pstate_driver ? 2333 intel_pstate_unregister_driver() : -EINVAL; 2334 2335 if (size == 6 && !strncmp(buf, "active", size)) { 2336 if (intel_pstate_driver) { 2337 if (intel_pstate_driver == &intel_pstate) 2338 return 0; 2339 2340 ret = intel_pstate_unregister_driver(); 2341 if (ret) 2342 return ret; 2343 } 2344 2345 return intel_pstate_register_driver(&intel_pstate); 2346 } 2347 2348 if (size == 7 && !strncmp(buf, "passive", size)) { 2349 if (intel_pstate_driver) { 2350 if (intel_pstate_driver == &intel_cpufreq) 2351 return 0; 2352 2353 ret = intel_pstate_unregister_driver(); 2354 if (ret) 2355 return ret; 2356 } 2357 2358 return intel_pstate_register_driver(&intel_cpufreq); 2359 } 2360 2361 return -EINVAL; 2362 } 2363 2364 static int no_load __initdata; 2365 static int no_hwp __initdata; 2366 static int hwp_only __initdata; 2367 static unsigned int force_load __initdata; 2368 2369 static int __init intel_pstate_msrs_not_valid(void) 2370 { 2371 if (!pstate_funcs.get_max() || 2372 !pstate_funcs.get_min() || 2373 !pstate_funcs.get_turbo()) 2374 return -ENODEV; 2375 2376 return 0; 2377 } 2378 2379 #ifdef CONFIG_ACPI 2380 static void intel_pstate_use_acpi_profile(void) 2381 { 2382 switch (acpi_gbl_FADT.preferred_profile) { 2383 case PM_MOBILE: 2384 case PM_TABLET: 2385 case PM_APPLIANCE_PC: 2386 case PM_DESKTOP: 2387 case PM_WORKSTATION: 2388 pstate_funcs.update_util = intel_pstate_update_util; 2389 } 2390 } 2391 #else 2392 static void intel_pstate_use_acpi_profile(void) 2393 { 2394 } 2395 #endif 2396 2397 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 2398 { 2399 pstate_funcs.get_max = funcs->get_max; 2400 pstate_funcs.get_max_physical = funcs->get_max_physical; 2401 pstate_funcs.get_min = funcs->get_min; 2402 pstate_funcs.get_turbo = funcs->get_turbo; 2403 pstate_funcs.get_scaling = funcs->get_scaling; 2404 pstate_funcs.get_val = funcs->get_val; 2405 pstate_funcs.get_vid = funcs->get_vid; 2406 pstate_funcs.update_util = funcs->update_util; 2407 2408 intel_pstate_use_acpi_profile(); 2409 } 2410 2411 #ifdef CONFIG_ACPI 2412 2413 static bool __init intel_pstate_no_acpi_pss(void) 2414 { 2415 int i; 2416 2417 for_each_possible_cpu(i) { 2418 acpi_status status; 2419 union acpi_object *pss; 2420 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 2421 struct acpi_processor *pr = per_cpu(processors, i); 2422 2423 if (!pr) 2424 continue; 2425 2426 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 2427 if (ACPI_FAILURE(status)) 2428 continue; 2429 2430 pss = buffer.pointer; 2431 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 2432 kfree(pss); 2433 return false; 2434 } 2435 2436 kfree(pss); 2437 } 2438 2439 return true; 2440 } 2441 2442 static bool __init intel_pstate_has_acpi_ppc(void) 2443 { 2444 int i; 2445 2446 for_each_possible_cpu(i) { 2447 struct acpi_processor *pr = per_cpu(processors, i); 2448 2449 if (!pr) 2450 continue; 2451 if (acpi_has_method(pr->handle, "_PPC")) 2452 return true; 2453 } 2454 return false; 2455 } 2456 2457 enum { 2458 PSS, 2459 PPC, 2460 }; 2461 2462 struct hw_vendor_info { 2463 u16 valid; 2464 char oem_id[ACPI_OEM_ID_SIZE]; 2465 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 2466 int oem_pwr_table; 2467 }; 2468 2469 /* Hardware vendor-specific info that has its own power management modes */ 2470 static struct hw_vendor_info vendor_info[] __initdata = { 2471 {1, "HP ", "ProLiant", PSS}, 2472 {1, "ORACLE", "X4-2 ", PPC}, 2473 {1, "ORACLE", "X4-2L ", PPC}, 2474 {1, "ORACLE", "X4-2B ", PPC}, 2475 {1, "ORACLE", "X3-2 ", PPC}, 2476 {1, "ORACLE", "X3-2L ", PPC}, 2477 {1, "ORACLE", "X3-2B ", PPC}, 2478 {1, "ORACLE", "X4470M2 ", PPC}, 2479 {1, "ORACLE", "X4270M3 ", PPC}, 2480 {1, "ORACLE", "X4270M2 ", PPC}, 2481 {1, "ORACLE", "X4170M2 ", PPC}, 2482 {1, "ORACLE", "X4170 M3", PPC}, 2483 {1, "ORACLE", "X4275 M3", PPC}, 2484 {1, "ORACLE", "X6-2 ", PPC}, 2485 {1, "ORACLE", "Sudbury ", PPC}, 2486 {0, "", ""}, 2487 }; 2488 2489 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 2490 { 2491 struct acpi_table_header hdr; 2492 struct hw_vendor_info *v_info; 2493 const struct x86_cpu_id *id; 2494 u64 misc_pwr; 2495 2496 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 2497 if (id) { 2498 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 2499 if ( misc_pwr & (1 << 8)) 2500 return true; 2501 } 2502 2503 if (acpi_disabled || 2504 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 2505 return false; 2506 2507 for (v_info = vendor_info; v_info->valid; v_info++) { 2508 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 2509 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 2510 ACPI_OEM_TABLE_ID_SIZE)) 2511 switch (v_info->oem_pwr_table) { 2512 case PSS: 2513 return intel_pstate_no_acpi_pss(); 2514 case PPC: 2515 return intel_pstate_has_acpi_ppc() && 2516 (!force_load); 2517 } 2518 } 2519 2520 return false; 2521 } 2522 2523 static void intel_pstate_request_control_from_smm(void) 2524 { 2525 /* 2526 * It may be unsafe to request P-states control from SMM if _PPC support 2527 * has not been enabled. 2528 */ 2529 if (acpi_ppc) 2530 acpi_processor_pstate_control(); 2531 } 2532 #else /* CONFIG_ACPI not enabled */ 2533 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 2534 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 2535 static inline void intel_pstate_request_control_from_smm(void) {} 2536 #endif /* CONFIG_ACPI */ 2537 2538 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 2539 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 2540 {} 2541 }; 2542 2543 static int __init intel_pstate_init(void) 2544 { 2545 int rc; 2546 2547 if (no_load) 2548 return -ENODEV; 2549 2550 if (x86_match_cpu(hwp_support_ids)) { 2551 copy_cpu_funcs(&core_funcs); 2552 if (no_hwp) { 2553 pstate_funcs.update_util = intel_pstate_update_util; 2554 } else { 2555 hwp_active++; 2556 intel_pstate.attr = hwp_cpufreq_attrs; 2557 pstate_funcs.update_util = intel_pstate_update_util_hwp; 2558 goto hwp_cpu_matched; 2559 } 2560 } else { 2561 const struct x86_cpu_id *id; 2562 2563 id = x86_match_cpu(intel_pstate_cpu_ids); 2564 if (!id) 2565 return -ENODEV; 2566 2567 copy_cpu_funcs((struct pstate_funcs *)id->driver_data); 2568 } 2569 2570 if (intel_pstate_msrs_not_valid()) 2571 return -ENODEV; 2572 2573 hwp_cpu_matched: 2574 /* 2575 * The Intel pstate driver will be ignored if the platform 2576 * firmware has its own power management modes. 2577 */ 2578 if (intel_pstate_platform_pwr_mgmt_exists()) 2579 return -ENODEV; 2580 2581 if (!hwp_active && hwp_only) 2582 return -ENOTSUPP; 2583 2584 pr_info("Intel P-state driver initializing\n"); 2585 2586 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 2587 if (!all_cpu_data) 2588 return -ENOMEM; 2589 2590 intel_pstate_request_control_from_smm(); 2591 2592 intel_pstate_sysfs_expose_params(); 2593 2594 mutex_lock(&intel_pstate_driver_lock); 2595 rc = intel_pstate_register_driver(default_driver); 2596 mutex_unlock(&intel_pstate_driver_lock); 2597 if (rc) 2598 return rc; 2599 2600 if (hwp_active) 2601 pr_info("HWP enabled\n"); 2602 2603 return 0; 2604 } 2605 device_initcall(intel_pstate_init); 2606 2607 static int __init intel_pstate_setup(char *str) 2608 { 2609 if (!str) 2610 return -EINVAL; 2611 2612 if (!strcmp(str, "disable")) { 2613 no_load = 1; 2614 } else if (!strcmp(str, "passive")) { 2615 pr_info("Passive mode enabled\n"); 2616 default_driver = &intel_cpufreq; 2617 no_hwp = 1; 2618 } 2619 if (!strcmp(str, "no_hwp")) { 2620 pr_info("HWP disabled\n"); 2621 no_hwp = 1; 2622 } 2623 if (!strcmp(str, "force")) 2624 force_load = 1; 2625 if (!strcmp(str, "hwp_only")) 2626 hwp_only = 1; 2627 if (!strcmp(str, "per_cpu_perf_limits")) 2628 per_cpu_limits = true; 2629 2630 #ifdef CONFIG_ACPI 2631 if (!strcmp(str, "support_acpi_ppc")) 2632 acpi_ppc = true; 2633 #endif 2634 2635 return 0; 2636 } 2637 early_param("intel_pstate", intel_pstate_setup); 2638 2639 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 2640 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 2641 MODULE_LICENSE("GPL"); 2642