1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 #include <asm/intel-family.h> 39 40 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 41 42 #define ATOM_RATIOS 0x66a 43 #define ATOM_VIDS 0x66b 44 #define ATOM_TURBO_RATIOS 0x66c 45 #define ATOM_TURBO_VIDS 0x66d 46 47 #ifdef CONFIG_ACPI 48 #include <acpi/processor.h> 49 #endif 50 51 #define FRAC_BITS 8 52 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 53 #define fp_toint(X) ((X) >> FRAC_BITS) 54 55 #define EXT_BITS 6 56 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 57 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 58 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 59 60 static inline int32_t mul_fp(int32_t x, int32_t y) 61 { 62 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 63 } 64 65 static inline int32_t div_fp(s64 x, s64 y) 66 { 67 return div64_s64((int64_t)x << FRAC_BITS, y); 68 } 69 70 static inline int ceiling_fp(int32_t x) 71 { 72 int mask, ret; 73 74 ret = fp_toint(x); 75 mask = (1 << FRAC_BITS) - 1; 76 if (x & mask) 77 ret += 1; 78 return ret; 79 } 80 81 static inline u64 mul_ext_fp(u64 x, u64 y) 82 { 83 return (x * y) >> EXT_FRAC_BITS; 84 } 85 86 static inline u64 div_ext_fp(u64 x, u64 y) 87 { 88 return div64_u64(x << EXT_FRAC_BITS, y); 89 } 90 91 /** 92 * struct sample - Store performance sample 93 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 94 * performance during last sample period 95 * @busy_scaled: Scaled busy value which is used to calculate next 96 * P state. This can be different than core_avg_perf 97 * to account for cpu idle period 98 * @aperf: Difference of actual performance frequency clock count 99 * read from APERF MSR between last and current sample 100 * @mperf: Difference of maximum performance frequency clock count 101 * read from MPERF MSR between last and current sample 102 * @tsc: Difference of time stamp counter between last and 103 * current sample 104 * @time: Current time from scheduler 105 * 106 * This structure is used in the cpudata structure to store performance sample 107 * data for choosing next P State. 108 */ 109 struct sample { 110 int32_t core_avg_perf; 111 int32_t busy_scaled; 112 u64 aperf; 113 u64 mperf; 114 u64 tsc; 115 u64 time; 116 }; 117 118 /** 119 * struct pstate_data - Store P state data 120 * @current_pstate: Current requested P state 121 * @min_pstate: Min P state possible for this platform 122 * @max_pstate: Max P state possible for this platform 123 * @max_pstate_physical:This is physical Max P state for a processor 124 * This can be higher than the max_pstate which can 125 * be limited by platform thermal design power limits 126 * @scaling: Scaling factor to convert frequency to cpufreq 127 * frequency units 128 * @turbo_pstate: Max Turbo P state possible for this platform 129 * @max_freq: @max_pstate frequency in cpufreq units 130 * @turbo_freq: @turbo_pstate frequency in cpufreq units 131 * 132 * Stores the per cpu model P state limits and current P state. 133 */ 134 struct pstate_data { 135 int current_pstate; 136 int min_pstate; 137 int max_pstate; 138 int max_pstate_physical; 139 int scaling; 140 int turbo_pstate; 141 unsigned int max_freq; 142 unsigned int turbo_freq; 143 }; 144 145 /** 146 * struct vid_data - Stores voltage information data 147 * @min: VID data for this platform corresponding to 148 * the lowest P state 149 * @max: VID data corresponding to the highest P State. 150 * @turbo: VID data for turbo P state 151 * @ratio: Ratio of (vid max - vid min) / 152 * (max P state - Min P State) 153 * 154 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 155 * This data is used in Atom platforms, where in addition to target P state, 156 * the voltage data needs to be specified to select next P State. 157 */ 158 struct vid_data { 159 int min; 160 int max; 161 int turbo; 162 int32_t ratio; 163 }; 164 165 /** 166 * struct _pid - Stores PID data 167 * @setpoint: Target set point for busyness or performance 168 * @integral: Storage for accumulated error values 169 * @p_gain: PID proportional gain 170 * @i_gain: PID integral gain 171 * @d_gain: PID derivative gain 172 * @deadband: PID deadband 173 * @last_err: Last error storage for integral part of PID calculation 174 * 175 * Stores PID coefficients and last error for PID controller. 176 */ 177 struct _pid { 178 int setpoint; 179 int32_t integral; 180 int32_t p_gain; 181 int32_t i_gain; 182 int32_t d_gain; 183 int deadband; 184 int32_t last_err; 185 }; 186 187 /** 188 * struct perf_limits - Store user and policy limits 189 * @no_turbo: User requested turbo state from intel_pstate sysfs 190 * @turbo_disabled: Platform turbo status either from msr 191 * MSR_IA32_MISC_ENABLE or when maximum available pstate 192 * matches the maximum turbo pstate 193 * @max_perf_pct: Effective maximum performance limit in percentage, this 194 * is minimum of either limits enforced by cpufreq policy 195 * or limits from user set limits via intel_pstate sysfs 196 * @min_perf_pct: Effective minimum performance limit in percentage, this 197 * is maximum of either limits enforced by cpufreq policy 198 * or limits from user set limits via intel_pstate sysfs 199 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct 200 * This value is used to limit max pstate 201 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct 202 * This value is used to limit min pstate 203 * @max_policy_pct: The maximum performance in percentage enforced by 204 * cpufreq setpolicy interface 205 * @max_sysfs_pct: The maximum performance in percentage enforced by 206 * intel pstate sysfs interface, unused when per cpu 207 * controls are enforced 208 * @min_policy_pct: The minimum performance in percentage enforced by 209 * cpufreq setpolicy interface 210 * @min_sysfs_pct: The minimum performance in percentage enforced by 211 * intel pstate sysfs interface, unused when per cpu 212 * controls are enforced 213 * 214 * Storage for user and policy defined limits. 215 */ 216 struct perf_limits { 217 int no_turbo; 218 int turbo_disabled; 219 int max_perf_pct; 220 int min_perf_pct; 221 int32_t max_perf; 222 int32_t min_perf; 223 int max_policy_pct; 224 int max_sysfs_pct; 225 int min_policy_pct; 226 int min_sysfs_pct; 227 }; 228 229 /** 230 * struct cpudata - Per CPU instance data storage 231 * @cpu: CPU number for this instance data 232 * @policy: CPUFreq policy value 233 * @update_util: CPUFreq utility callback information 234 * @update_util_set: CPUFreq utility callback is set 235 * @iowait_boost: iowait-related boost fraction 236 * @last_update: Time of the last update. 237 * @pstate: Stores P state limits for this CPU 238 * @vid: Stores VID limits for this CPU 239 * @pid: Stores PID parameters for this CPU 240 * @last_sample_time: Last Sample time 241 * @prev_aperf: Last APERF value read from APERF MSR 242 * @prev_mperf: Last MPERF value read from MPERF MSR 243 * @prev_tsc: Last timestamp counter (TSC) value 244 * @prev_cummulative_iowait: IO Wait time difference from last and 245 * current sample 246 * @sample: Storage for storing last Sample data 247 * @perf_limits: Pointer to perf_limit unique to this CPU 248 * Not all field in the structure are applicable 249 * when per cpu controls are enforced 250 * @acpi_perf_data: Stores ACPI perf information read from _PSS 251 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 252 * @epp_saved: Last saved HWP energy performance preference 253 * (EPP) or energy performance bias (EPB) 254 * @epp_policy: Last saved policy used to set EPP/EPB 255 * 256 * This structure stores per CPU instance data for all CPUs. 257 */ 258 struct cpudata { 259 int cpu; 260 261 unsigned int policy; 262 struct update_util_data update_util; 263 bool update_util_set; 264 265 struct pstate_data pstate; 266 struct vid_data vid; 267 struct _pid pid; 268 269 u64 last_update; 270 u64 last_sample_time; 271 u64 prev_aperf; 272 u64 prev_mperf; 273 u64 prev_tsc; 274 u64 prev_cummulative_iowait; 275 struct sample sample; 276 struct perf_limits *perf_limits; 277 #ifdef CONFIG_ACPI 278 struct acpi_processor_performance acpi_perf_data; 279 bool valid_pss_table; 280 #endif 281 unsigned int iowait_boost; 282 s16 epp_saved; 283 s16 epp_policy; 284 }; 285 286 static struct cpudata **all_cpu_data; 287 288 /** 289 * struct pstate_adjust_policy - Stores static PID configuration data 290 * @sample_rate_ms: PID calculation sample rate in ms 291 * @sample_rate_ns: Sample rate calculation in ns 292 * @deadband: PID deadband 293 * @setpoint: PID Setpoint 294 * @p_gain_pct: PID proportional gain 295 * @i_gain_pct: PID integral gain 296 * @d_gain_pct: PID derivative gain 297 * 298 * Stores per CPU model static PID configuration data. 299 */ 300 struct pstate_adjust_policy { 301 int sample_rate_ms; 302 s64 sample_rate_ns; 303 int deadband; 304 int setpoint; 305 int p_gain_pct; 306 int d_gain_pct; 307 int i_gain_pct; 308 }; 309 310 /** 311 * struct pstate_funcs - Per CPU model specific callbacks 312 * @get_max: Callback to get maximum non turbo effective P state 313 * @get_max_physical: Callback to get maximum non turbo physical P state 314 * @get_min: Callback to get minimum P state 315 * @get_turbo: Callback to get turbo P state 316 * @get_scaling: Callback to get frequency scaling factor 317 * @get_val: Callback to convert P state to actual MSR write value 318 * @get_vid: Callback to get VID data for Atom platforms 319 * @get_target_pstate: Callback to a function to calculate next P state to use 320 * 321 * Core and Atom CPU models have different way to get P State limits. This 322 * structure is used to store those callbacks. 323 */ 324 struct pstate_funcs { 325 int (*get_max)(void); 326 int (*get_max_physical)(void); 327 int (*get_min)(void); 328 int (*get_turbo)(void); 329 int (*get_scaling)(void); 330 u64 (*get_val)(struct cpudata*, int pstate); 331 void (*get_vid)(struct cpudata *); 332 int32_t (*get_target_pstate)(struct cpudata *); 333 }; 334 335 /** 336 * struct cpu_defaults- Per CPU model default config data 337 * @pid_policy: PID config data 338 * @funcs: Callback function data 339 */ 340 struct cpu_defaults { 341 struct pstate_adjust_policy pid_policy; 342 struct pstate_funcs funcs; 343 }; 344 345 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 346 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 347 348 static struct pstate_adjust_policy pid_params __read_mostly; 349 static struct pstate_funcs pstate_funcs __read_mostly; 350 static int hwp_active __read_mostly; 351 static bool per_cpu_limits __read_mostly; 352 353 #ifdef CONFIG_ACPI 354 static bool acpi_ppc; 355 #endif 356 357 static struct perf_limits performance_limits = { 358 .no_turbo = 0, 359 .turbo_disabled = 0, 360 .max_perf_pct = 100, 361 .max_perf = int_ext_tofp(1), 362 .min_perf_pct = 100, 363 .min_perf = int_ext_tofp(1), 364 .max_policy_pct = 100, 365 .max_sysfs_pct = 100, 366 .min_policy_pct = 0, 367 .min_sysfs_pct = 0, 368 }; 369 370 static struct perf_limits powersave_limits = { 371 .no_turbo = 0, 372 .turbo_disabled = 0, 373 .max_perf_pct = 100, 374 .max_perf = int_ext_tofp(1), 375 .min_perf_pct = 0, 376 .min_perf = 0, 377 .max_policy_pct = 100, 378 .max_sysfs_pct = 100, 379 .min_policy_pct = 0, 380 .min_sysfs_pct = 0, 381 }; 382 383 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 384 static struct perf_limits *limits = &performance_limits; 385 #else 386 static struct perf_limits *limits = &powersave_limits; 387 #endif 388 389 static DEFINE_MUTEX(intel_pstate_limits_lock); 390 391 #ifdef CONFIG_ACPI 392 393 static bool intel_pstate_get_ppc_enable_status(void) 394 { 395 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 396 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 397 return true; 398 399 return acpi_ppc; 400 } 401 402 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 403 { 404 struct cpudata *cpu; 405 int ret; 406 int i; 407 408 if (hwp_active) 409 return; 410 411 if (!intel_pstate_get_ppc_enable_status()) 412 return; 413 414 cpu = all_cpu_data[policy->cpu]; 415 416 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 417 policy->cpu); 418 if (ret) 419 return; 420 421 /* 422 * Check if the control value in _PSS is for PERF_CTL MSR, which should 423 * guarantee that the states returned by it map to the states in our 424 * list directly. 425 */ 426 if (cpu->acpi_perf_data.control_register.space_id != 427 ACPI_ADR_SPACE_FIXED_HARDWARE) 428 goto err; 429 430 /* 431 * If there is only one entry _PSS, simply ignore _PSS and continue as 432 * usual without taking _PSS into account 433 */ 434 if (cpu->acpi_perf_data.state_count < 2) 435 goto err; 436 437 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 438 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 439 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 440 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 441 (u32) cpu->acpi_perf_data.states[i].core_frequency, 442 (u32) cpu->acpi_perf_data.states[i].power, 443 (u32) cpu->acpi_perf_data.states[i].control); 444 } 445 446 /* 447 * The _PSS table doesn't contain whole turbo frequency range. 448 * This just contains +1 MHZ above the max non turbo frequency, 449 * with control value corresponding to max turbo ratio. But 450 * when cpufreq set policy is called, it will call with this 451 * max frequency, which will cause a reduced performance as 452 * this driver uses real max turbo frequency as the max 453 * frequency. So correct this frequency in _PSS table to 454 * correct max turbo frequency based on the turbo state. 455 * Also need to convert to MHz as _PSS freq is in MHz. 456 */ 457 if (!limits->turbo_disabled) 458 cpu->acpi_perf_data.states[0].core_frequency = 459 policy->cpuinfo.max_freq / 1000; 460 cpu->valid_pss_table = true; 461 pr_debug("_PPC limits will be enforced\n"); 462 463 return; 464 465 err: 466 cpu->valid_pss_table = false; 467 acpi_processor_unregister_performance(policy->cpu); 468 } 469 470 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 471 { 472 struct cpudata *cpu; 473 474 cpu = all_cpu_data[policy->cpu]; 475 if (!cpu->valid_pss_table) 476 return; 477 478 acpi_processor_unregister_performance(policy->cpu); 479 } 480 481 #else 482 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 483 { 484 } 485 486 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 487 { 488 } 489 #endif 490 491 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 492 int deadband, int integral) { 493 pid->setpoint = int_tofp(setpoint); 494 pid->deadband = int_tofp(deadband); 495 pid->integral = int_tofp(integral); 496 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 497 } 498 499 static inline void pid_p_gain_set(struct _pid *pid, int percent) 500 { 501 pid->p_gain = div_fp(percent, 100); 502 } 503 504 static inline void pid_i_gain_set(struct _pid *pid, int percent) 505 { 506 pid->i_gain = div_fp(percent, 100); 507 } 508 509 static inline void pid_d_gain_set(struct _pid *pid, int percent) 510 { 511 pid->d_gain = div_fp(percent, 100); 512 } 513 514 static signed int pid_calc(struct _pid *pid, int32_t busy) 515 { 516 signed int result; 517 int32_t pterm, dterm, fp_error; 518 int32_t integral_limit; 519 520 fp_error = pid->setpoint - busy; 521 522 if (abs(fp_error) <= pid->deadband) 523 return 0; 524 525 pterm = mul_fp(pid->p_gain, fp_error); 526 527 pid->integral += fp_error; 528 529 /* 530 * We limit the integral here so that it will never 531 * get higher than 30. This prevents it from becoming 532 * too large an input over long periods of time and allows 533 * it to get factored out sooner. 534 * 535 * The value of 30 was chosen through experimentation. 536 */ 537 integral_limit = int_tofp(30); 538 if (pid->integral > integral_limit) 539 pid->integral = integral_limit; 540 if (pid->integral < -integral_limit) 541 pid->integral = -integral_limit; 542 543 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 544 pid->last_err = fp_error; 545 546 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 547 result = result + (1 << (FRAC_BITS-1)); 548 return (signed int)fp_toint(result); 549 } 550 551 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 552 { 553 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 554 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 555 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 556 557 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 558 } 559 560 static inline void intel_pstate_reset_all_pid(void) 561 { 562 unsigned int cpu; 563 564 for_each_online_cpu(cpu) { 565 if (all_cpu_data[cpu]) 566 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 567 } 568 } 569 570 static inline void update_turbo_state(void) 571 { 572 u64 misc_en; 573 struct cpudata *cpu; 574 575 cpu = all_cpu_data[0]; 576 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 577 limits->turbo_disabled = 578 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 579 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 580 } 581 582 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 583 { 584 u64 epb; 585 int ret; 586 587 if (!static_cpu_has(X86_FEATURE_EPB)) 588 return -ENXIO; 589 590 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 591 if (ret) 592 return (s16)ret; 593 594 return (s16)(epb & 0x0f); 595 } 596 597 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 598 { 599 s16 epp; 600 601 if (static_cpu_has(X86_FEATURE_HWP_EPP)) 602 epp = (hwp_req_data >> 24) & 0xff; 603 else 604 /* When there is no EPP present, HWP uses EPB settings */ 605 epp = intel_pstate_get_epb(cpu_data); 606 607 return epp; 608 } 609 610 static void intel_pstate_set_epb(int cpu, s16 pref) 611 { 612 u64 epb; 613 614 if (!static_cpu_has(X86_FEATURE_EPB)) 615 return; 616 617 if (rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb)) 618 return; 619 620 epb = (epb & ~0x0f) | pref; 621 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 622 } 623 624 static void intel_pstate_hwp_set(const struct cpumask *cpumask) 625 { 626 int min, hw_min, max, hw_max, cpu, range, adj_range; 627 struct perf_limits *perf_limits = limits; 628 u64 value, cap; 629 630 for_each_cpu(cpu, cpumask) { 631 int max_perf_pct, min_perf_pct; 632 struct cpudata *cpu_data = all_cpu_data[cpu]; 633 s16 epp; 634 635 if (per_cpu_limits) 636 perf_limits = all_cpu_data[cpu]->perf_limits; 637 638 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 639 hw_min = HWP_LOWEST_PERF(cap); 640 hw_max = HWP_HIGHEST_PERF(cap); 641 range = hw_max - hw_min; 642 643 max_perf_pct = perf_limits->max_perf_pct; 644 min_perf_pct = perf_limits->min_perf_pct; 645 646 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 647 adj_range = min_perf_pct * range / 100; 648 min = hw_min + adj_range; 649 value &= ~HWP_MIN_PERF(~0L); 650 value |= HWP_MIN_PERF(min); 651 652 adj_range = max_perf_pct * range / 100; 653 max = hw_min + adj_range; 654 if (limits->no_turbo) { 655 hw_max = HWP_GUARANTEED_PERF(cap); 656 if (hw_max < max) 657 max = hw_max; 658 } 659 660 value &= ~HWP_MAX_PERF(~0L); 661 value |= HWP_MAX_PERF(max); 662 663 if (cpu_data->epp_policy == cpu_data->policy) 664 goto skip_epp; 665 666 cpu_data->epp_policy = cpu_data->policy; 667 668 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 669 epp = intel_pstate_get_epp(cpu_data, value); 670 /* If EPP read was failed, then don't try to write */ 671 if (epp < 0) { 672 cpu_data->epp_saved = epp; 673 goto skip_epp; 674 } 675 676 cpu_data->epp_saved = epp; 677 678 epp = 0; 679 } else { 680 /* skip setting EPP, when saved value is invalid */ 681 if (cpu_data->epp_saved < 0) 682 goto skip_epp; 683 684 /* 685 * No need to restore EPP when it is not zero. This 686 * means: 687 * - Policy is not changed 688 * - user has manually changed 689 * - Error reading EPB 690 */ 691 epp = intel_pstate_get_epp(cpu_data, value); 692 if (epp) 693 goto skip_epp; 694 695 epp = cpu_data->epp_saved; 696 } 697 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 698 value &= ~GENMASK_ULL(31, 24); 699 value |= (u64)epp << 24; 700 } else { 701 intel_pstate_set_epb(cpu, epp); 702 } 703 skip_epp: 704 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 705 } 706 } 707 708 static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) 709 { 710 if (hwp_active) 711 intel_pstate_hwp_set(policy->cpus); 712 713 return 0; 714 } 715 716 static int intel_pstate_resume(struct cpufreq_policy *policy) 717 { 718 if (!hwp_active) 719 return 0; 720 721 all_cpu_data[policy->cpu]->epp_policy = 0; 722 all_cpu_data[policy->cpu]->epp_saved = -EINVAL; 723 724 return intel_pstate_hwp_set_policy(policy); 725 } 726 727 static void intel_pstate_hwp_set_online_cpus(void) 728 { 729 get_online_cpus(); 730 intel_pstate_hwp_set(cpu_online_mask); 731 put_online_cpus(); 732 } 733 734 /************************** debugfs begin ************************/ 735 static int pid_param_set(void *data, u64 val) 736 { 737 *(u32 *)data = val; 738 intel_pstate_reset_all_pid(); 739 return 0; 740 } 741 742 static int pid_param_get(void *data, u64 *val) 743 { 744 *val = *(u32 *)data; 745 return 0; 746 } 747 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 748 749 struct pid_param { 750 char *name; 751 void *value; 752 }; 753 754 static struct pid_param pid_files[] = { 755 {"sample_rate_ms", &pid_params.sample_rate_ms}, 756 {"d_gain_pct", &pid_params.d_gain_pct}, 757 {"i_gain_pct", &pid_params.i_gain_pct}, 758 {"deadband", &pid_params.deadband}, 759 {"setpoint", &pid_params.setpoint}, 760 {"p_gain_pct", &pid_params.p_gain_pct}, 761 {NULL, NULL} 762 }; 763 764 static void __init intel_pstate_debug_expose_params(void) 765 { 766 struct dentry *debugfs_parent; 767 int i = 0; 768 769 if (hwp_active || 770 pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) 771 return; 772 773 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 774 if (IS_ERR_OR_NULL(debugfs_parent)) 775 return; 776 while (pid_files[i].name) { 777 debugfs_create_file(pid_files[i].name, 0660, 778 debugfs_parent, pid_files[i].value, 779 &fops_pid_param); 780 i++; 781 } 782 } 783 784 /************************** debugfs end ************************/ 785 786 /************************** sysfs begin ************************/ 787 #define show_one(file_name, object) \ 788 static ssize_t show_##file_name \ 789 (struct kobject *kobj, struct attribute *attr, char *buf) \ 790 { \ 791 return sprintf(buf, "%u\n", limits->object); \ 792 } 793 794 static ssize_t show_turbo_pct(struct kobject *kobj, 795 struct attribute *attr, char *buf) 796 { 797 struct cpudata *cpu; 798 int total, no_turbo, turbo_pct; 799 uint32_t turbo_fp; 800 801 cpu = all_cpu_data[0]; 802 803 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 804 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 805 turbo_fp = div_fp(no_turbo, total); 806 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 807 return sprintf(buf, "%u\n", turbo_pct); 808 } 809 810 static ssize_t show_num_pstates(struct kobject *kobj, 811 struct attribute *attr, char *buf) 812 { 813 struct cpudata *cpu; 814 int total; 815 816 cpu = all_cpu_data[0]; 817 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 818 return sprintf(buf, "%u\n", total); 819 } 820 821 static ssize_t show_no_turbo(struct kobject *kobj, 822 struct attribute *attr, char *buf) 823 { 824 ssize_t ret; 825 826 update_turbo_state(); 827 if (limits->turbo_disabled) 828 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 829 else 830 ret = sprintf(buf, "%u\n", limits->no_turbo); 831 832 return ret; 833 } 834 835 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 836 const char *buf, size_t count) 837 { 838 unsigned int input; 839 int ret; 840 841 ret = sscanf(buf, "%u", &input); 842 if (ret != 1) 843 return -EINVAL; 844 845 mutex_lock(&intel_pstate_limits_lock); 846 847 update_turbo_state(); 848 if (limits->turbo_disabled) { 849 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 850 mutex_unlock(&intel_pstate_limits_lock); 851 return -EPERM; 852 } 853 854 limits->no_turbo = clamp_t(int, input, 0, 1); 855 856 mutex_unlock(&intel_pstate_limits_lock); 857 858 if (hwp_active) 859 intel_pstate_hwp_set_online_cpus(); 860 861 return count; 862 } 863 864 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 865 const char *buf, size_t count) 866 { 867 unsigned int input; 868 int ret; 869 870 ret = sscanf(buf, "%u", &input); 871 if (ret != 1) 872 return -EINVAL; 873 874 mutex_lock(&intel_pstate_limits_lock); 875 876 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 877 limits->max_perf_pct = min(limits->max_policy_pct, 878 limits->max_sysfs_pct); 879 limits->max_perf_pct = max(limits->min_policy_pct, 880 limits->max_perf_pct); 881 limits->max_perf_pct = max(limits->min_perf_pct, 882 limits->max_perf_pct); 883 limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); 884 885 mutex_unlock(&intel_pstate_limits_lock); 886 887 if (hwp_active) 888 intel_pstate_hwp_set_online_cpus(); 889 return count; 890 } 891 892 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 893 const char *buf, size_t count) 894 { 895 unsigned int input; 896 int ret; 897 898 ret = sscanf(buf, "%u", &input); 899 if (ret != 1) 900 return -EINVAL; 901 902 mutex_lock(&intel_pstate_limits_lock); 903 904 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 905 limits->min_perf_pct = max(limits->min_policy_pct, 906 limits->min_sysfs_pct); 907 limits->min_perf_pct = min(limits->max_policy_pct, 908 limits->min_perf_pct); 909 limits->min_perf_pct = min(limits->max_perf_pct, 910 limits->min_perf_pct); 911 limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); 912 913 mutex_unlock(&intel_pstate_limits_lock); 914 915 if (hwp_active) 916 intel_pstate_hwp_set_online_cpus(); 917 return count; 918 } 919 920 show_one(max_perf_pct, max_perf_pct); 921 show_one(min_perf_pct, min_perf_pct); 922 923 define_one_global_rw(no_turbo); 924 define_one_global_rw(max_perf_pct); 925 define_one_global_rw(min_perf_pct); 926 define_one_global_ro(turbo_pct); 927 define_one_global_ro(num_pstates); 928 929 static struct attribute *intel_pstate_attributes[] = { 930 &no_turbo.attr, 931 &turbo_pct.attr, 932 &num_pstates.attr, 933 NULL 934 }; 935 936 static struct attribute_group intel_pstate_attr_group = { 937 .attrs = intel_pstate_attributes, 938 }; 939 940 static void __init intel_pstate_sysfs_expose_params(void) 941 { 942 struct kobject *intel_pstate_kobject; 943 int rc; 944 945 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 946 &cpu_subsys.dev_root->kobj); 947 if (WARN_ON(!intel_pstate_kobject)) 948 return; 949 950 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 951 if (WARN_ON(rc)) 952 return; 953 954 /* 955 * If per cpu limits are enforced there are no global limits, so 956 * return without creating max/min_perf_pct attributes 957 */ 958 if (per_cpu_limits) 959 return; 960 961 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 962 WARN_ON(rc); 963 964 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 965 WARN_ON(rc); 966 967 } 968 /************************** sysfs end ************************/ 969 970 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 971 { 972 /* First disable HWP notification interrupt as we don't process them */ 973 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) 974 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 975 976 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 977 cpudata->epp_policy = 0; 978 cpudata->epp_saved = -EINVAL; 979 } 980 981 static int atom_get_min_pstate(void) 982 { 983 u64 value; 984 985 rdmsrl(ATOM_RATIOS, value); 986 return (value >> 8) & 0x7F; 987 } 988 989 static int atom_get_max_pstate(void) 990 { 991 u64 value; 992 993 rdmsrl(ATOM_RATIOS, value); 994 return (value >> 16) & 0x7F; 995 } 996 997 static int atom_get_turbo_pstate(void) 998 { 999 u64 value; 1000 1001 rdmsrl(ATOM_TURBO_RATIOS, value); 1002 return value & 0x7F; 1003 } 1004 1005 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1006 { 1007 u64 val; 1008 int32_t vid_fp; 1009 u32 vid; 1010 1011 val = (u64)pstate << 8; 1012 if (limits->no_turbo && !limits->turbo_disabled) 1013 val |= (u64)1 << 32; 1014 1015 vid_fp = cpudata->vid.min + mul_fp( 1016 int_tofp(pstate - cpudata->pstate.min_pstate), 1017 cpudata->vid.ratio); 1018 1019 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1020 vid = ceiling_fp(vid_fp); 1021 1022 if (pstate > cpudata->pstate.max_pstate) 1023 vid = cpudata->vid.turbo; 1024 1025 return val | vid; 1026 } 1027 1028 static int silvermont_get_scaling(void) 1029 { 1030 u64 value; 1031 int i; 1032 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1033 static int silvermont_freq_table[] = { 1034 83300, 100000, 133300, 116700, 80000}; 1035 1036 rdmsrl(MSR_FSB_FREQ, value); 1037 i = value & 0x7; 1038 WARN_ON(i > 4); 1039 1040 return silvermont_freq_table[i]; 1041 } 1042 1043 static int airmont_get_scaling(void) 1044 { 1045 u64 value; 1046 int i; 1047 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1048 static int airmont_freq_table[] = { 1049 83300, 100000, 133300, 116700, 80000, 1050 93300, 90000, 88900, 87500}; 1051 1052 rdmsrl(MSR_FSB_FREQ, value); 1053 i = value & 0xF; 1054 WARN_ON(i > 8); 1055 1056 return airmont_freq_table[i]; 1057 } 1058 1059 static void atom_get_vid(struct cpudata *cpudata) 1060 { 1061 u64 value; 1062 1063 rdmsrl(ATOM_VIDS, value); 1064 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1065 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1066 cpudata->vid.ratio = div_fp( 1067 cpudata->vid.max - cpudata->vid.min, 1068 int_tofp(cpudata->pstate.max_pstate - 1069 cpudata->pstate.min_pstate)); 1070 1071 rdmsrl(ATOM_TURBO_VIDS, value); 1072 cpudata->vid.turbo = value & 0x7f; 1073 } 1074 1075 static int core_get_min_pstate(void) 1076 { 1077 u64 value; 1078 1079 rdmsrl(MSR_PLATFORM_INFO, value); 1080 return (value >> 40) & 0xFF; 1081 } 1082 1083 static int core_get_max_pstate_physical(void) 1084 { 1085 u64 value; 1086 1087 rdmsrl(MSR_PLATFORM_INFO, value); 1088 return (value >> 8) & 0xFF; 1089 } 1090 1091 static int core_get_max_pstate(void) 1092 { 1093 u64 tar; 1094 u64 plat_info; 1095 int max_pstate; 1096 int err; 1097 1098 rdmsrl(MSR_PLATFORM_INFO, plat_info); 1099 max_pstate = (plat_info >> 8) & 0xFF; 1100 1101 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 1102 if (!err) { 1103 /* Do some sanity checking for safety */ 1104 if (plat_info & 0x600000000) { 1105 u64 tdp_ctrl; 1106 u64 tdp_ratio; 1107 int tdp_msr; 1108 1109 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1110 if (err) 1111 goto skip_tar; 1112 1113 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3); 1114 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 1115 if (err) 1116 goto skip_tar; 1117 1118 /* For level 1 and 2, bits[23:16] contain the ratio */ 1119 if (tdp_ctrl) 1120 tdp_ratio >>= 16; 1121 1122 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1123 if (tdp_ratio - 1 == tar) { 1124 max_pstate = tar; 1125 pr_debug("max_pstate=TAC %x\n", max_pstate); 1126 } else { 1127 goto skip_tar; 1128 } 1129 } 1130 } 1131 1132 skip_tar: 1133 return max_pstate; 1134 } 1135 1136 static int core_get_turbo_pstate(void) 1137 { 1138 u64 value; 1139 int nont, ret; 1140 1141 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1142 nont = core_get_max_pstate(); 1143 ret = (value) & 255; 1144 if (ret <= nont) 1145 ret = nont; 1146 return ret; 1147 } 1148 1149 static inline int core_get_scaling(void) 1150 { 1151 return 100000; 1152 } 1153 1154 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1155 { 1156 u64 val; 1157 1158 val = (u64)pstate << 8; 1159 if (limits->no_turbo && !limits->turbo_disabled) 1160 val |= (u64)1 << 32; 1161 1162 return val; 1163 } 1164 1165 static int knl_get_turbo_pstate(void) 1166 { 1167 u64 value; 1168 int nont, ret; 1169 1170 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1171 nont = core_get_max_pstate(); 1172 ret = (((value) >> 8) & 0xFF); 1173 if (ret <= nont) 1174 ret = nont; 1175 return ret; 1176 } 1177 1178 static struct cpu_defaults core_params = { 1179 .pid_policy = { 1180 .sample_rate_ms = 10, 1181 .deadband = 0, 1182 .setpoint = 97, 1183 .p_gain_pct = 20, 1184 .d_gain_pct = 0, 1185 .i_gain_pct = 0, 1186 }, 1187 .funcs = { 1188 .get_max = core_get_max_pstate, 1189 .get_max_physical = core_get_max_pstate_physical, 1190 .get_min = core_get_min_pstate, 1191 .get_turbo = core_get_turbo_pstate, 1192 .get_scaling = core_get_scaling, 1193 .get_val = core_get_val, 1194 .get_target_pstate = get_target_pstate_use_performance, 1195 }, 1196 }; 1197 1198 static const struct cpu_defaults silvermont_params = { 1199 .pid_policy = { 1200 .sample_rate_ms = 10, 1201 .deadband = 0, 1202 .setpoint = 60, 1203 .p_gain_pct = 14, 1204 .d_gain_pct = 0, 1205 .i_gain_pct = 4, 1206 }, 1207 .funcs = { 1208 .get_max = atom_get_max_pstate, 1209 .get_max_physical = atom_get_max_pstate, 1210 .get_min = atom_get_min_pstate, 1211 .get_turbo = atom_get_turbo_pstate, 1212 .get_val = atom_get_val, 1213 .get_scaling = silvermont_get_scaling, 1214 .get_vid = atom_get_vid, 1215 .get_target_pstate = get_target_pstate_use_cpu_load, 1216 }, 1217 }; 1218 1219 static const struct cpu_defaults airmont_params = { 1220 .pid_policy = { 1221 .sample_rate_ms = 10, 1222 .deadband = 0, 1223 .setpoint = 60, 1224 .p_gain_pct = 14, 1225 .d_gain_pct = 0, 1226 .i_gain_pct = 4, 1227 }, 1228 .funcs = { 1229 .get_max = atom_get_max_pstate, 1230 .get_max_physical = atom_get_max_pstate, 1231 .get_min = atom_get_min_pstate, 1232 .get_turbo = atom_get_turbo_pstate, 1233 .get_val = atom_get_val, 1234 .get_scaling = airmont_get_scaling, 1235 .get_vid = atom_get_vid, 1236 .get_target_pstate = get_target_pstate_use_cpu_load, 1237 }, 1238 }; 1239 1240 static const struct cpu_defaults knl_params = { 1241 .pid_policy = { 1242 .sample_rate_ms = 10, 1243 .deadband = 0, 1244 .setpoint = 97, 1245 .p_gain_pct = 20, 1246 .d_gain_pct = 0, 1247 .i_gain_pct = 0, 1248 }, 1249 .funcs = { 1250 .get_max = core_get_max_pstate, 1251 .get_max_physical = core_get_max_pstate_physical, 1252 .get_min = core_get_min_pstate, 1253 .get_turbo = knl_get_turbo_pstate, 1254 .get_scaling = core_get_scaling, 1255 .get_val = core_get_val, 1256 .get_target_pstate = get_target_pstate_use_performance, 1257 }, 1258 }; 1259 1260 static const struct cpu_defaults bxt_params = { 1261 .pid_policy = { 1262 .sample_rate_ms = 10, 1263 .deadband = 0, 1264 .setpoint = 60, 1265 .p_gain_pct = 14, 1266 .d_gain_pct = 0, 1267 .i_gain_pct = 4, 1268 }, 1269 .funcs = { 1270 .get_max = core_get_max_pstate, 1271 .get_max_physical = core_get_max_pstate_physical, 1272 .get_min = core_get_min_pstate, 1273 .get_turbo = core_get_turbo_pstate, 1274 .get_scaling = core_get_scaling, 1275 .get_val = core_get_val, 1276 .get_target_pstate = get_target_pstate_use_cpu_load, 1277 }, 1278 }; 1279 1280 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 1281 { 1282 int max_perf = cpu->pstate.turbo_pstate; 1283 int max_perf_adj; 1284 int min_perf; 1285 struct perf_limits *perf_limits = limits; 1286 1287 if (limits->no_turbo || limits->turbo_disabled) 1288 max_perf = cpu->pstate.max_pstate; 1289 1290 if (per_cpu_limits) 1291 perf_limits = cpu->perf_limits; 1292 1293 /* 1294 * performance can be limited by user through sysfs, by cpufreq 1295 * policy, or by cpu specific default values determined through 1296 * experimentation. 1297 */ 1298 max_perf_adj = fp_ext_toint(max_perf * perf_limits->max_perf); 1299 *max = clamp_t(int, max_perf_adj, 1300 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 1301 1302 min_perf = fp_ext_toint(max_perf * perf_limits->min_perf); 1303 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 1304 } 1305 1306 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1307 { 1308 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1309 cpu->pstate.current_pstate = pstate; 1310 /* 1311 * Generally, there is no guarantee that this code will always run on 1312 * the CPU being updated, so force the register update to run on the 1313 * right CPU. 1314 */ 1315 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1316 pstate_funcs.get_val(cpu, pstate)); 1317 } 1318 1319 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1320 { 1321 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1322 } 1323 1324 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1325 { 1326 int min_pstate, max_pstate; 1327 1328 update_turbo_state(); 1329 intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); 1330 intel_pstate_set_pstate(cpu, max_pstate); 1331 } 1332 1333 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1334 { 1335 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1336 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1337 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1338 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1339 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1340 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; 1341 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1342 1343 if (pstate_funcs.get_vid) 1344 pstate_funcs.get_vid(cpu); 1345 1346 intel_pstate_set_min_pstate(cpu); 1347 } 1348 1349 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1350 { 1351 struct sample *sample = &cpu->sample; 1352 1353 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1354 } 1355 1356 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1357 { 1358 u64 aperf, mperf; 1359 unsigned long flags; 1360 u64 tsc; 1361 1362 local_irq_save(flags); 1363 rdmsrl(MSR_IA32_APERF, aperf); 1364 rdmsrl(MSR_IA32_MPERF, mperf); 1365 tsc = rdtsc(); 1366 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1367 local_irq_restore(flags); 1368 return false; 1369 } 1370 local_irq_restore(flags); 1371 1372 cpu->last_sample_time = cpu->sample.time; 1373 cpu->sample.time = time; 1374 cpu->sample.aperf = aperf; 1375 cpu->sample.mperf = mperf; 1376 cpu->sample.tsc = tsc; 1377 cpu->sample.aperf -= cpu->prev_aperf; 1378 cpu->sample.mperf -= cpu->prev_mperf; 1379 cpu->sample.tsc -= cpu->prev_tsc; 1380 1381 cpu->prev_aperf = aperf; 1382 cpu->prev_mperf = mperf; 1383 cpu->prev_tsc = tsc; 1384 /* 1385 * First time this function is invoked in a given cycle, all of the 1386 * previous sample data fields are equal to zero or stale and they must 1387 * be populated with meaningful numbers for things to work, so assume 1388 * that sample.time will always be reset before setting the utilization 1389 * update hook and make the caller skip the sample then. 1390 */ 1391 return !!cpu->last_sample_time; 1392 } 1393 1394 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1395 { 1396 return mul_ext_fp(cpu->sample.core_avg_perf, 1397 cpu->pstate.max_pstate_physical * cpu->pstate.scaling); 1398 } 1399 1400 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1401 { 1402 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1403 cpu->sample.core_avg_perf); 1404 } 1405 1406 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1407 { 1408 struct sample *sample = &cpu->sample; 1409 int32_t busy_frac, boost; 1410 int target, avg_pstate; 1411 1412 busy_frac = div_fp(sample->mperf, sample->tsc); 1413 1414 boost = cpu->iowait_boost; 1415 cpu->iowait_boost >>= 1; 1416 1417 if (busy_frac < boost) 1418 busy_frac = boost; 1419 1420 sample->busy_scaled = busy_frac * 100; 1421 1422 target = limits->no_turbo || limits->turbo_disabled ? 1423 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1424 target += target >> 2; 1425 target = mul_fp(target, busy_frac); 1426 if (target < cpu->pstate.min_pstate) 1427 target = cpu->pstate.min_pstate; 1428 1429 /* 1430 * If the average P-state during the previous cycle was higher than the 1431 * current target, add 50% of the difference to the target to reduce 1432 * possible performance oscillations and offset possible performance 1433 * loss related to moving the workload from one CPU to another within 1434 * a package/module. 1435 */ 1436 avg_pstate = get_avg_pstate(cpu); 1437 if (avg_pstate > target) 1438 target += (avg_pstate - target) >> 1; 1439 1440 return target; 1441 } 1442 1443 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1444 { 1445 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1446 u64 duration_ns; 1447 1448 /* 1449 * perf_scaled is the ratio of the average P-state during the last 1450 * sampling period to the P-state requested last time (in percent). 1451 * 1452 * That measures the system's response to the previous P-state 1453 * selection. 1454 */ 1455 max_pstate = cpu->pstate.max_pstate_physical; 1456 current_pstate = cpu->pstate.current_pstate; 1457 perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, 1458 div_fp(100 * max_pstate, current_pstate)); 1459 1460 /* 1461 * Since our utilization update callback will not run unless we are 1462 * in C0, check if the actual elapsed time is significantly greater (3x) 1463 * than our sample interval. If it is, then we were idle for a long 1464 * enough period of time to adjust our performance metric. 1465 */ 1466 duration_ns = cpu->sample.time - cpu->last_sample_time; 1467 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1468 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1469 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1470 } else { 1471 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1472 if (sample_ratio < int_tofp(1)) 1473 perf_scaled = 0; 1474 } 1475 1476 cpu->sample.busy_scaled = perf_scaled; 1477 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); 1478 } 1479 1480 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 1481 { 1482 int max_perf, min_perf; 1483 1484 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1485 pstate = clamp_t(int, pstate, min_perf, max_perf); 1486 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1487 return pstate; 1488 } 1489 1490 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1491 { 1492 pstate = intel_pstate_prepare_request(cpu, pstate); 1493 if (pstate == cpu->pstate.current_pstate) 1494 return; 1495 1496 cpu->pstate.current_pstate = pstate; 1497 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1498 } 1499 1500 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1501 { 1502 int from, target_pstate; 1503 struct sample *sample; 1504 1505 from = cpu->pstate.current_pstate; 1506 1507 target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ? 1508 cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu); 1509 1510 update_turbo_state(); 1511 1512 intel_pstate_update_pstate(cpu, target_pstate); 1513 1514 sample = &cpu->sample; 1515 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1516 fp_toint(sample->busy_scaled), 1517 from, 1518 cpu->pstate.current_pstate, 1519 sample->mperf, 1520 sample->aperf, 1521 sample->tsc, 1522 get_avg_frequency(cpu), 1523 fp_toint(cpu->iowait_boost * 100)); 1524 } 1525 1526 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1527 unsigned int flags) 1528 { 1529 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1530 u64 delta_ns; 1531 1532 if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) { 1533 if (flags & SCHED_CPUFREQ_IOWAIT) { 1534 cpu->iowait_boost = int_tofp(1); 1535 } else if (cpu->iowait_boost) { 1536 /* Clear iowait_boost if the CPU may have been idle. */ 1537 delta_ns = time - cpu->last_update; 1538 if (delta_ns > TICK_NSEC) 1539 cpu->iowait_boost = 0; 1540 } 1541 cpu->last_update = time; 1542 } 1543 1544 delta_ns = time - cpu->sample.time; 1545 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1546 bool sample_taken = intel_pstate_sample(cpu, time); 1547 1548 if (sample_taken) { 1549 intel_pstate_calc_avg_perf(cpu); 1550 if (!hwp_active) 1551 intel_pstate_adjust_busy_pstate(cpu); 1552 } 1553 } 1554 } 1555 1556 #define ICPU(model, policy) \ 1557 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1558 (unsigned long)&policy } 1559 1560 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1561 ICPU(INTEL_FAM6_SANDYBRIDGE, core_params), 1562 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params), 1563 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params), 1564 ICPU(INTEL_FAM6_IVYBRIDGE, core_params), 1565 ICPU(INTEL_FAM6_HASWELL_CORE, core_params), 1566 ICPU(INTEL_FAM6_BROADWELL_CORE, core_params), 1567 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params), 1568 ICPU(INTEL_FAM6_HASWELL_X, core_params), 1569 ICPU(INTEL_FAM6_HASWELL_ULT, core_params), 1570 ICPU(INTEL_FAM6_HASWELL_GT3E, core_params), 1571 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params), 1572 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params), 1573 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params), 1574 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1575 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params), 1576 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1577 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params), 1578 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params), 1579 {} 1580 }; 1581 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1582 1583 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 1584 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1585 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1586 ICPU(INTEL_FAM6_SKYLAKE_X, core_params), 1587 {} 1588 }; 1589 1590 static int intel_pstate_init_cpu(unsigned int cpunum) 1591 { 1592 struct cpudata *cpu; 1593 1594 cpu = all_cpu_data[cpunum]; 1595 1596 if (!cpu) { 1597 unsigned int size = sizeof(struct cpudata); 1598 1599 if (per_cpu_limits) 1600 size += sizeof(struct perf_limits); 1601 1602 cpu = kzalloc(size, GFP_KERNEL); 1603 if (!cpu) 1604 return -ENOMEM; 1605 1606 all_cpu_data[cpunum] = cpu; 1607 if (per_cpu_limits) 1608 cpu->perf_limits = (struct perf_limits *)(cpu + 1); 1609 1610 } 1611 1612 cpu = all_cpu_data[cpunum]; 1613 1614 cpu->cpu = cpunum; 1615 1616 if (hwp_active) { 1617 intel_pstate_hwp_enable(cpu); 1618 pid_params.sample_rate_ms = 50; 1619 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1620 } 1621 1622 intel_pstate_get_cpu_pstates(cpu); 1623 1624 intel_pstate_busy_pid_reset(cpu); 1625 1626 pr_debug("controlling: cpu %d\n", cpunum); 1627 1628 return 0; 1629 } 1630 1631 static unsigned int intel_pstate_get(unsigned int cpu_num) 1632 { 1633 struct cpudata *cpu = all_cpu_data[cpu_num]; 1634 1635 return cpu ? get_avg_frequency(cpu) : 0; 1636 } 1637 1638 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1639 { 1640 struct cpudata *cpu = all_cpu_data[cpu_num]; 1641 1642 if (cpu->update_util_set) 1643 return; 1644 1645 /* Prevent intel_pstate_update_util() from using stale data. */ 1646 cpu->sample.time = 0; 1647 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1648 intel_pstate_update_util); 1649 cpu->update_util_set = true; 1650 } 1651 1652 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1653 { 1654 struct cpudata *cpu_data = all_cpu_data[cpu]; 1655 1656 if (!cpu_data->update_util_set) 1657 return; 1658 1659 cpufreq_remove_update_util_hook(cpu); 1660 cpu_data->update_util_set = false; 1661 synchronize_sched(); 1662 } 1663 1664 static void intel_pstate_set_performance_limits(struct perf_limits *limits) 1665 { 1666 mutex_lock(&intel_pstate_limits_lock); 1667 limits->no_turbo = 0; 1668 limits->turbo_disabled = 0; 1669 limits->max_perf_pct = 100; 1670 limits->max_perf = int_ext_tofp(1); 1671 limits->min_perf_pct = 100; 1672 limits->min_perf = int_ext_tofp(1); 1673 limits->max_policy_pct = 100; 1674 limits->max_sysfs_pct = 100; 1675 limits->min_policy_pct = 0; 1676 limits->min_sysfs_pct = 0; 1677 mutex_unlock(&intel_pstate_limits_lock); 1678 } 1679 1680 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, 1681 struct perf_limits *limits) 1682 { 1683 1684 mutex_lock(&intel_pstate_limits_lock); 1685 1686 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1687 policy->cpuinfo.max_freq); 1688 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100); 1689 if (policy->max == policy->min) { 1690 limits->min_policy_pct = limits->max_policy_pct; 1691 } else { 1692 limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100, 1693 policy->cpuinfo.max_freq); 1694 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 1695 0, 100); 1696 } 1697 1698 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1699 limits->min_perf_pct = max(limits->min_policy_pct, 1700 limits->min_sysfs_pct); 1701 limits->min_perf_pct = min(limits->max_policy_pct, 1702 limits->min_perf_pct); 1703 limits->max_perf_pct = min(limits->max_policy_pct, 1704 limits->max_sysfs_pct); 1705 limits->max_perf_pct = max(limits->min_policy_pct, 1706 limits->max_perf_pct); 1707 1708 /* Make sure min_perf_pct <= max_perf_pct */ 1709 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1710 1711 limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); 1712 limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); 1713 limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS); 1714 limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS); 1715 1716 mutex_unlock(&intel_pstate_limits_lock); 1717 1718 pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, 1719 limits->max_perf_pct, limits->min_perf_pct); 1720 } 1721 1722 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1723 { 1724 struct cpudata *cpu; 1725 struct perf_limits *perf_limits = NULL; 1726 1727 if (!policy->cpuinfo.max_freq) 1728 return -ENODEV; 1729 1730 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 1731 policy->cpuinfo.max_freq, policy->max); 1732 1733 cpu = all_cpu_data[policy->cpu]; 1734 cpu->policy = policy->policy; 1735 1736 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 1737 policy->max < policy->cpuinfo.max_freq && 1738 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { 1739 pr_debug("policy->max > max non turbo frequency\n"); 1740 policy->max = policy->cpuinfo.max_freq; 1741 } 1742 1743 if (per_cpu_limits) 1744 perf_limits = cpu->perf_limits; 1745 1746 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1747 if (!perf_limits) { 1748 limits = &performance_limits; 1749 perf_limits = limits; 1750 } 1751 if (policy->max >= policy->cpuinfo.max_freq) { 1752 pr_debug("set performance\n"); 1753 intel_pstate_set_performance_limits(perf_limits); 1754 goto out; 1755 } 1756 } else { 1757 pr_debug("set powersave\n"); 1758 if (!perf_limits) { 1759 limits = &powersave_limits; 1760 perf_limits = limits; 1761 } 1762 1763 } 1764 1765 intel_pstate_update_perf_limits(policy, perf_limits); 1766 out: 1767 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 1768 /* 1769 * NOHZ_FULL CPUs need this as the governor callback may not 1770 * be invoked on them. 1771 */ 1772 intel_pstate_clear_update_util_hook(policy->cpu); 1773 intel_pstate_max_within_limits(cpu); 1774 } 1775 1776 intel_pstate_set_update_util_hook(policy->cpu); 1777 1778 intel_pstate_hwp_set_policy(policy); 1779 1780 return 0; 1781 } 1782 1783 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1784 { 1785 cpufreq_verify_within_cpu_limits(policy); 1786 1787 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1788 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1789 return -EINVAL; 1790 1791 return 0; 1792 } 1793 1794 static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) 1795 { 1796 intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); 1797 } 1798 1799 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1800 { 1801 pr_debug("CPU %d exiting\n", policy->cpu); 1802 1803 intel_pstate_clear_update_util_hook(policy->cpu); 1804 if (!hwp_active) 1805 intel_cpufreq_stop_cpu(policy); 1806 } 1807 1808 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 1809 { 1810 intel_pstate_exit_perf_limits(policy); 1811 1812 policy->fast_switch_possible = false; 1813 1814 return 0; 1815 } 1816 1817 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 1818 { 1819 struct cpudata *cpu; 1820 int rc; 1821 1822 rc = intel_pstate_init_cpu(policy->cpu); 1823 if (rc) 1824 return rc; 1825 1826 cpu = all_cpu_data[policy->cpu]; 1827 1828 /* 1829 * We need sane value in the cpu->perf_limits, so inherit from global 1830 * perf_limits limits, which are seeded with values based on the 1831 * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up. 1832 */ 1833 if (per_cpu_limits) 1834 memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits)); 1835 1836 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1837 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1838 1839 /* cpuinfo and default policy values */ 1840 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1841 update_turbo_state(); 1842 policy->cpuinfo.max_freq = limits->turbo_disabled ? 1843 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1844 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 1845 1846 intel_pstate_init_acpi_perf_limits(policy); 1847 cpumask_set_cpu(policy->cpu, policy->cpus); 1848 1849 policy->fast_switch_possible = true; 1850 1851 return 0; 1852 } 1853 1854 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1855 { 1856 int ret = __intel_pstate_cpu_init(policy); 1857 1858 if (ret) 1859 return ret; 1860 1861 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1862 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1863 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1864 else 1865 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1866 1867 return 0; 1868 } 1869 1870 static struct cpufreq_driver intel_pstate = { 1871 .flags = CPUFREQ_CONST_LOOPS, 1872 .verify = intel_pstate_verify_policy, 1873 .setpolicy = intel_pstate_set_policy, 1874 .resume = intel_pstate_resume, 1875 .get = intel_pstate_get, 1876 .init = intel_pstate_cpu_init, 1877 .exit = intel_pstate_cpu_exit, 1878 .stop_cpu = intel_pstate_stop_cpu, 1879 .name = "intel_pstate", 1880 }; 1881 1882 static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) 1883 { 1884 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1885 struct perf_limits *perf_limits = limits; 1886 1887 update_turbo_state(); 1888 policy->cpuinfo.max_freq = limits->turbo_disabled ? 1889 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 1890 1891 cpufreq_verify_within_cpu_limits(policy); 1892 1893 if (per_cpu_limits) 1894 perf_limits = cpu->perf_limits; 1895 1896 intel_pstate_update_perf_limits(policy, perf_limits); 1897 1898 return 0; 1899 } 1900 1901 static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu, 1902 struct cpufreq_policy *policy, 1903 unsigned int target_freq) 1904 { 1905 unsigned int max_freq; 1906 1907 update_turbo_state(); 1908 1909 max_freq = limits->no_turbo || limits->turbo_disabled ? 1910 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 1911 policy->cpuinfo.max_freq = max_freq; 1912 if (policy->max > max_freq) 1913 policy->max = max_freq; 1914 1915 if (target_freq > max_freq) 1916 target_freq = max_freq; 1917 1918 return target_freq; 1919 } 1920 1921 static int intel_cpufreq_target(struct cpufreq_policy *policy, 1922 unsigned int target_freq, 1923 unsigned int relation) 1924 { 1925 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1926 struct cpufreq_freqs freqs; 1927 int target_pstate; 1928 1929 freqs.old = policy->cur; 1930 freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq); 1931 1932 cpufreq_freq_transition_begin(policy, &freqs); 1933 switch (relation) { 1934 case CPUFREQ_RELATION_L: 1935 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 1936 break; 1937 case CPUFREQ_RELATION_H: 1938 target_pstate = freqs.new / cpu->pstate.scaling; 1939 break; 1940 default: 1941 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 1942 break; 1943 } 1944 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 1945 if (target_pstate != cpu->pstate.current_pstate) { 1946 cpu->pstate.current_pstate = target_pstate; 1947 wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, 1948 pstate_funcs.get_val(cpu, target_pstate)); 1949 } 1950 cpufreq_freq_transition_end(policy, &freqs, false); 1951 1952 return 0; 1953 } 1954 1955 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 1956 unsigned int target_freq) 1957 { 1958 struct cpudata *cpu = all_cpu_data[policy->cpu]; 1959 int target_pstate; 1960 1961 target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); 1962 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 1963 intel_pstate_update_pstate(cpu, target_pstate); 1964 return target_freq; 1965 } 1966 1967 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 1968 { 1969 int ret = __intel_pstate_cpu_init(policy); 1970 1971 if (ret) 1972 return ret; 1973 1974 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 1975 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 1976 policy->cur = policy->cpuinfo.min_freq; 1977 1978 return 0; 1979 } 1980 1981 static struct cpufreq_driver intel_cpufreq = { 1982 .flags = CPUFREQ_CONST_LOOPS, 1983 .verify = intel_cpufreq_verify_policy, 1984 .target = intel_cpufreq_target, 1985 .fast_switch = intel_cpufreq_fast_switch, 1986 .init = intel_cpufreq_cpu_init, 1987 .exit = intel_pstate_cpu_exit, 1988 .stop_cpu = intel_cpufreq_stop_cpu, 1989 .name = "intel_cpufreq", 1990 }; 1991 1992 static struct cpufreq_driver *intel_pstate_driver = &intel_pstate; 1993 1994 static int no_load __initdata; 1995 static int no_hwp __initdata; 1996 static int hwp_only __initdata; 1997 static unsigned int force_load __initdata; 1998 1999 static int __init intel_pstate_msrs_not_valid(void) 2000 { 2001 if (!pstate_funcs.get_max() || 2002 !pstate_funcs.get_min() || 2003 !pstate_funcs.get_turbo()) 2004 return -ENODEV; 2005 2006 return 0; 2007 } 2008 2009 static void __init copy_pid_params(struct pstate_adjust_policy *policy) 2010 { 2011 pid_params.sample_rate_ms = policy->sample_rate_ms; 2012 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 2013 pid_params.p_gain_pct = policy->p_gain_pct; 2014 pid_params.i_gain_pct = policy->i_gain_pct; 2015 pid_params.d_gain_pct = policy->d_gain_pct; 2016 pid_params.deadband = policy->deadband; 2017 pid_params.setpoint = policy->setpoint; 2018 } 2019 2020 #ifdef CONFIG_ACPI 2021 static void intel_pstate_use_acpi_profile(void) 2022 { 2023 if (acpi_gbl_FADT.preferred_profile == PM_MOBILE) 2024 pstate_funcs.get_target_pstate = 2025 get_target_pstate_use_cpu_load; 2026 } 2027 #else 2028 static void intel_pstate_use_acpi_profile(void) 2029 { 2030 } 2031 #endif 2032 2033 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 2034 { 2035 pstate_funcs.get_max = funcs->get_max; 2036 pstate_funcs.get_max_physical = funcs->get_max_physical; 2037 pstate_funcs.get_min = funcs->get_min; 2038 pstate_funcs.get_turbo = funcs->get_turbo; 2039 pstate_funcs.get_scaling = funcs->get_scaling; 2040 pstate_funcs.get_val = funcs->get_val; 2041 pstate_funcs.get_vid = funcs->get_vid; 2042 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 2043 2044 intel_pstate_use_acpi_profile(); 2045 } 2046 2047 #ifdef CONFIG_ACPI 2048 2049 static bool __init intel_pstate_no_acpi_pss(void) 2050 { 2051 int i; 2052 2053 for_each_possible_cpu(i) { 2054 acpi_status status; 2055 union acpi_object *pss; 2056 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 2057 struct acpi_processor *pr = per_cpu(processors, i); 2058 2059 if (!pr) 2060 continue; 2061 2062 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 2063 if (ACPI_FAILURE(status)) 2064 continue; 2065 2066 pss = buffer.pointer; 2067 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 2068 kfree(pss); 2069 return false; 2070 } 2071 2072 kfree(pss); 2073 } 2074 2075 return true; 2076 } 2077 2078 static bool __init intel_pstate_has_acpi_ppc(void) 2079 { 2080 int i; 2081 2082 for_each_possible_cpu(i) { 2083 struct acpi_processor *pr = per_cpu(processors, i); 2084 2085 if (!pr) 2086 continue; 2087 if (acpi_has_method(pr->handle, "_PPC")) 2088 return true; 2089 } 2090 return false; 2091 } 2092 2093 enum { 2094 PSS, 2095 PPC, 2096 }; 2097 2098 struct hw_vendor_info { 2099 u16 valid; 2100 char oem_id[ACPI_OEM_ID_SIZE]; 2101 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 2102 int oem_pwr_table; 2103 }; 2104 2105 /* Hardware vendor-specific info that has its own power management modes */ 2106 static struct hw_vendor_info vendor_info[] __initdata = { 2107 {1, "HP ", "ProLiant", PSS}, 2108 {1, "ORACLE", "X4-2 ", PPC}, 2109 {1, "ORACLE", "X4-2L ", PPC}, 2110 {1, "ORACLE", "X4-2B ", PPC}, 2111 {1, "ORACLE", "X3-2 ", PPC}, 2112 {1, "ORACLE", "X3-2L ", PPC}, 2113 {1, "ORACLE", "X3-2B ", PPC}, 2114 {1, "ORACLE", "X4470M2 ", PPC}, 2115 {1, "ORACLE", "X4270M3 ", PPC}, 2116 {1, "ORACLE", "X4270M2 ", PPC}, 2117 {1, "ORACLE", "X4170M2 ", PPC}, 2118 {1, "ORACLE", "X4170 M3", PPC}, 2119 {1, "ORACLE", "X4275 M3", PPC}, 2120 {1, "ORACLE", "X6-2 ", PPC}, 2121 {1, "ORACLE", "Sudbury ", PPC}, 2122 {0, "", ""}, 2123 }; 2124 2125 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 2126 { 2127 struct acpi_table_header hdr; 2128 struct hw_vendor_info *v_info; 2129 const struct x86_cpu_id *id; 2130 u64 misc_pwr; 2131 2132 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 2133 if (id) { 2134 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 2135 if ( misc_pwr & (1 << 8)) 2136 return true; 2137 } 2138 2139 if (acpi_disabled || 2140 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 2141 return false; 2142 2143 for (v_info = vendor_info; v_info->valid; v_info++) { 2144 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 2145 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 2146 ACPI_OEM_TABLE_ID_SIZE)) 2147 switch (v_info->oem_pwr_table) { 2148 case PSS: 2149 return intel_pstate_no_acpi_pss(); 2150 case PPC: 2151 return intel_pstate_has_acpi_ppc() && 2152 (!force_load); 2153 } 2154 } 2155 2156 return false; 2157 } 2158 2159 static void intel_pstate_request_control_from_smm(void) 2160 { 2161 /* 2162 * It may be unsafe to request P-states control from SMM if _PPC support 2163 * has not been enabled. 2164 */ 2165 if (acpi_ppc) 2166 acpi_processor_pstate_control(); 2167 } 2168 #else /* CONFIG_ACPI not enabled */ 2169 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 2170 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 2171 static inline void intel_pstate_request_control_from_smm(void) {} 2172 #endif /* CONFIG_ACPI */ 2173 2174 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 2175 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 2176 {} 2177 }; 2178 2179 static int __init intel_pstate_init(void) 2180 { 2181 int cpu, rc = 0; 2182 const struct x86_cpu_id *id; 2183 struct cpu_defaults *cpu_def; 2184 2185 if (no_load) 2186 return -ENODEV; 2187 2188 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 2189 copy_cpu_funcs(&core_params.funcs); 2190 hwp_active++; 2191 goto hwp_cpu_matched; 2192 } 2193 2194 id = x86_match_cpu(intel_pstate_cpu_ids); 2195 if (!id) 2196 return -ENODEV; 2197 2198 cpu_def = (struct cpu_defaults *)id->driver_data; 2199 2200 copy_pid_params(&cpu_def->pid_policy); 2201 copy_cpu_funcs(&cpu_def->funcs); 2202 2203 if (intel_pstate_msrs_not_valid()) 2204 return -ENODEV; 2205 2206 hwp_cpu_matched: 2207 /* 2208 * The Intel pstate driver will be ignored if the platform 2209 * firmware has its own power management modes. 2210 */ 2211 if (intel_pstate_platform_pwr_mgmt_exists()) 2212 return -ENODEV; 2213 2214 pr_info("Intel P-state driver initializing\n"); 2215 2216 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 2217 if (!all_cpu_data) 2218 return -ENOMEM; 2219 2220 if (!hwp_active && hwp_only) 2221 goto out; 2222 2223 intel_pstate_request_control_from_smm(); 2224 2225 rc = cpufreq_register_driver(intel_pstate_driver); 2226 if (rc) 2227 goto out; 2228 2229 intel_pstate_debug_expose_params(); 2230 intel_pstate_sysfs_expose_params(); 2231 2232 if (hwp_active) 2233 pr_info("HWP enabled\n"); 2234 2235 return rc; 2236 out: 2237 get_online_cpus(); 2238 for_each_online_cpu(cpu) { 2239 if (all_cpu_data[cpu]) { 2240 if (intel_pstate_driver == &intel_pstate) 2241 intel_pstate_clear_update_util_hook(cpu); 2242 2243 kfree(all_cpu_data[cpu]); 2244 } 2245 } 2246 2247 put_online_cpus(); 2248 vfree(all_cpu_data); 2249 return -ENODEV; 2250 } 2251 device_initcall(intel_pstate_init); 2252 2253 static int __init intel_pstate_setup(char *str) 2254 { 2255 if (!str) 2256 return -EINVAL; 2257 2258 if (!strcmp(str, "disable")) { 2259 no_load = 1; 2260 } else if (!strcmp(str, "passive")) { 2261 pr_info("Passive mode enabled\n"); 2262 intel_pstate_driver = &intel_cpufreq; 2263 no_hwp = 1; 2264 } 2265 if (!strcmp(str, "no_hwp")) { 2266 pr_info("HWP disabled\n"); 2267 no_hwp = 1; 2268 } 2269 if (!strcmp(str, "force")) 2270 force_load = 1; 2271 if (!strcmp(str, "hwp_only")) 2272 hwp_only = 1; 2273 if (!strcmp(str, "per_cpu_perf_limits")) 2274 per_cpu_limits = true; 2275 2276 #ifdef CONFIG_ACPI 2277 if (!strcmp(str, "support_acpi_ppc")) 2278 acpi_ppc = true; 2279 #endif 2280 2281 return 0; 2282 } 2283 early_param("intel_pstate", intel_pstate_setup); 2284 2285 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 2286 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 2287 MODULE_LICENSE("GPL"); 2288