1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 #include <asm/intel-family.h> 39 40 #define ATOM_RATIOS 0x66a 41 #define ATOM_VIDS 0x66b 42 #define ATOM_TURBO_RATIOS 0x66c 43 #define ATOM_TURBO_VIDS 0x66d 44 45 #ifdef CONFIG_ACPI 46 #include <acpi/processor.h> 47 #endif 48 49 #define FRAC_BITS 8 50 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 51 #define fp_toint(X) ((X) >> FRAC_BITS) 52 53 #define EXT_BITS 6 54 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 55 56 static inline int32_t mul_fp(int32_t x, int32_t y) 57 { 58 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 59 } 60 61 static inline int32_t div_fp(s64 x, s64 y) 62 { 63 return div64_s64((int64_t)x << FRAC_BITS, y); 64 } 65 66 static inline int ceiling_fp(int32_t x) 67 { 68 int mask, ret; 69 70 ret = fp_toint(x); 71 mask = (1 << FRAC_BITS) - 1; 72 if (x & mask) 73 ret += 1; 74 return ret; 75 } 76 77 static inline u64 mul_ext_fp(u64 x, u64 y) 78 { 79 return (x * y) >> EXT_FRAC_BITS; 80 } 81 82 static inline u64 div_ext_fp(u64 x, u64 y) 83 { 84 return div64_u64(x << EXT_FRAC_BITS, y); 85 } 86 87 /** 88 * struct sample - Store performance sample 89 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 90 * performance during last sample period 91 * @busy_scaled: Scaled busy value which is used to calculate next 92 * P state. This can be different than core_avg_perf 93 * to account for cpu idle period 94 * @aperf: Difference of actual performance frequency clock count 95 * read from APERF MSR between last and current sample 96 * @mperf: Difference of maximum performance frequency clock count 97 * read from MPERF MSR between last and current sample 98 * @tsc: Difference of time stamp counter between last and 99 * current sample 100 * @time: Current time from scheduler 101 * 102 * This structure is used in the cpudata structure to store performance sample 103 * data for choosing next P State. 104 */ 105 struct sample { 106 int32_t core_avg_perf; 107 int32_t busy_scaled; 108 u64 aperf; 109 u64 mperf; 110 u64 tsc; 111 u64 time; 112 }; 113 114 /** 115 * struct pstate_data - Store P state data 116 * @current_pstate: Current requested P state 117 * @min_pstate: Min P state possible for this platform 118 * @max_pstate: Max P state possible for this platform 119 * @max_pstate_physical:This is physical Max P state for a processor 120 * This can be higher than the max_pstate which can 121 * be limited by platform thermal design power limits 122 * @scaling: Scaling factor to convert frequency to cpufreq 123 * frequency units 124 * @turbo_pstate: Max Turbo P state possible for this platform 125 * 126 * Stores the per cpu model P state limits and current P state. 127 */ 128 struct pstate_data { 129 int current_pstate; 130 int min_pstate; 131 int max_pstate; 132 int max_pstate_physical; 133 int scaling; 134 int turbo_pstate; 135 }; 136 137 /** 138 * struct vid_data - Stores voltage information data 139 * @min: VID data for this platform corresponding to 140 * the lowest P state 141 * @max: VID data corresponding to the highest P State. 142 * @turbo: VID data for turbo P state 143 * @ratio: Ratio of (vid max - vid min) / 144 * (max P state - Min P State) 145 * 146 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 147 * This data is used in Atom platforms, where in addition to target P state, 148 * the voltage data needs to be specified to select next P State. 149 */ 150 struct vid_data { 151 int min; 152 int max; 153 int turbo; 154 int32_t ratio; 155 }; 156 157 /** 158 * struct _pid - Stores PID data 159 * @setpoint: Target set point for busyness or performance 160 * @integral: Storage for accumulated error values 161 * @p_gain: PID proportional gain 162 * @i_gain: PID integral gain 163 * @d_gain: PID derivative gain 164 * @deadband: PID deadband 165 * @last_err: Last error storage for integral part of PID calculation 166 * 167 * Stores PID coefficients and last error for PID controller. 168 */ 169 struct _pid { 170 int setpoint; 171 int32_t integral; 172 int32_t p_gain; 173 int32_t i_gain; 174 int32_t d_gain; 175 int deadband; 176 int32_t last_err; 177 }; 178 179 /** 180 * struct cpudata - Per CPU instance data storage 181 * @cpu: CPU number for this instance data 182 * @update_util: CPUFreq utility callback information 183 * @update_util_set: CPUFreq utility callback is set 184 * @iowait_boost: iowait-related boost fraction 185 * @last_update: Time of the last update. 186 * @pstate: Stores P state limits for this CPU 187 * @vid: Stores VID limits for this CPU 188 * @pid: Stores PID parameters for this CPU 189 * @last_sample_time: Last Sample time 190 * @prev_aperf: Last APERF value read from APERF MSR 191 * @prev_mperf: Last MPERF value read from MPERF MSR 192 * @prev_tsc: Last timestamp counter (TSC) value 193 * @prev_cummulative_iowait: IO Wait time difference from last and 194 * current sample 195 * @sample: Storage for storing last Sample data 196 * @acpi_perf_data: Stores ACPI perf information read from _PSS 197 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 198 * 199 * This structure stores per CPU instance data for all CPUs. 200 */ 201 struct cpudata { 202 int cpu; 203 204 struct update_util_data update_util; 205 bool update_util_set; 206 207 struct pstate_data pstate; 208 struct vid_data vid; 209 struct _pid pid; 210 211 u64 last_update; 212 u64 last_sample_time; 213 u64 prev_aperf; 214 u64 prev_mperf; 215 u64 prev_tsc; 216 u64 prev_cummulative_iowait; 217 struct sample sample; 218 #ifdef CONFIG_ACPI 219 struct acpi_processor_performance acpi_perf_data; 220 bool valid_pss_table; 221 #endif 222 unsigned int iowait_boost; 223 }; 224 225 static struct cpudata **all_cpu_data; 226 227 /** 228 * struct pstate_adjust_policy - Stores static PID configuration data 229 * @sample_rate_ms: PID calculation sample rate in ms 230 * @sample_rate_ns: Sample rate calculation in ns 231 * @deadband: PID deadband 232 * @setpoint: PID Setpoint 233 * @p_gain_pct: PID proportional gain 234 * @i_gain_pct: PID integral gain 235 * @d_gain_pct: PID derivative gain 236 * 237 * Stores per CPU model static PID configuration data. 238 */ 239 struct pstate_adjust_policy { 240 int sample_rate_ms; 241 s64 sample_rate_ns; 242 int deadband; 243 int setpoint; 244 int p_gain_pct; 245 int d_gain_pct; 246 int i_gain_pct; 247 }; 248 249 /** 250 * struct pstate_funcs - Per CPU model specific callbacks 251 * @get_max: Callback to get maximum non turbo effective P state 252 * @get_max_physical: Callback to get maximum non turbo physical P state 253 * @get_min: Callback to get minimum P state 254 * @get_turbo: Callback to get turbo P state 255 * @get_scaling: Callback to get frequency scaling factor 256 * @get_val: Callback to convert P state to actual MSR write value 257 * @get_vid: Callback to get VID data for Atom platforms 258 * @get_target_pstate: Callback to a function to calculate next P state to use 259 * 260 * Core and Atom CPU models have different way to get P State limits. This 261 * structure is used to store those callbacks. 262 */ 263 struct pstate_funcs { 264 int (*get_max)(void); 265 int (*get_max_physical)(void); 266 int (*get_min)(void); 267 int (*get_turbo)(void); 268 int (*get_scaling)(void); 269 u64 (*get_val)(struct cpudata*, int pstate); 270 void (*get_vid)(struct cpudata *); 271 int32_t (*get_target_pstate)(struct cpudata *); 272 }; 273 274 /** 275 * struct cpu_defaults- Per CPU model default config data 276 * @pid_policy: PID config data 277 * @funcs: Callback function data 278 */ 279 struct cpu_defaults { 280 struct pstate_adjust_policy pid_policy; 281 struct pstate_funcs funcs; 282 }; 283 284 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 285 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 286 287 static struct pstate_adjust_policy pid_params __read_mostly; 288 static struct pstate_funcs pstate_funcs __read_mostly; 289 static int hwp_active __read_mostly; 290 291 #ifdef CONFIG_ACPI 292 static bool acpi_ppc; 293 #endif 294 295 /** 296 * struct perf_limits - Store user and policy limits 297 * @no_turbo: User requested turbo state from intel_pstate sysfs 298 * @turbo_disabled: Platform turbo status either from msr 299 * MSR_IA32_MISC_ENABLE or when maximum available pstate 300 * matches the maximum turbo pstate 301 * @max_perf_pct: Effective maximum performance limit in percentage, this 302 * is minimum of either limits enforced by cpufreq policy 303 * or limits from user set limits via intel_pstate sysfs 304 * @min_perf_pct: Effective minimum performance limit in percentage, this 305 * is maximum of either limits enforced by cpufreq policy 306 * or limits from user set limits via intel_pstate sysfs 307 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct 308 * This value is used to limit max pstate 309 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct 310 * This value is used to limit min pstate 311 * @max_policy_pct: The maximum performance in percentage enforced by 312 * cpufreq setpolicy interface 313 * @max_sysfs_pct: The maximum performance in percentage enforced by 314 * intel pstate sysfs interface 315 * @min_policy_pct: The minimum performance in percentage enforced by 316 * cpufreq setpolicy interface 317 * @min_sysfs_pct: The minimum performance in percentage enforced by 318 * intel pstate sysfs interface 319 * 320 * Storage for user and policy defined limits. 321 */ 322 struct perf_limits { 323 int no_turbo; 324 int turbo_disabled; 325 int max_perf_pct; 326 int min_perf_pct; 327 int32_t max_perf; 328 int32_t min_perf; 329 int max_policy_pct; 330 int max_sysfs_pct; 331 int min_policy_pct; 332 int min_sysfs_pct; 333 }; 334 335 static struct perf_limits performance_limits = { 336 .no_turbo = 0, 337 .turbo_disabled = 0, 338 .max_perf_pct = 100, 339 .max_perf = int_tofp(1), 340 .min_perf_pct = 100, 341 .min_perf = int_tofp(1), 342 .max_policy_pct = 100, 343 .max_sysfs_pct = 100, 344 .min_policy_pct = 0, 345 .min_sysfs_pct = 0, 346 }; 347 348 static struct perf_limits powersave_limits = { 349 .no_turbo = 0, 350 .turbo_disabled = 0, 351 .max_perf_pct = 100, 352 .max_perf = int_tofp(1), 353 .min_perf_pct = 0, 354 .min_perf = 0, 355 .max_policy_pct = 100, 356 .max_sysfs_pct = 100, 357 .min_policy_pct = 0, 358 .min_sysfs_pct = 0, 359 }; 360 361 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 362 static struct perf_limits *limits = &performance_limits; 363 #else 364 static struct perf_limits *limits = &powersave_limits; 365 #endif 366 367 #ifdef CONFIG_ACPI 368 369 static bool intel_pstate_get_ppc_enable_status(void) 370 { 371 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 372 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 373 return true; 374 375 return acpi_ppc; 376 } 377 378 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 379 { 380 struct cpudata *cpu; 381 int ret; 382 int i; 383 384 if (hwp_active) 385 return; 386 387 if (!intel_pstate_get_ppc_enable_status()) 388 return; 389 390 cpu = all_cpu_data[policy->cpu]; 391 392 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 393 policy->cpu); 394 if (ret) 395 return; 396 397 /* 398 * Check if the control value in _PSS is for PERF_CTL MSR, which should 399 * guarantee that the states returned by it map to the states in our 400 * list directly. 401 */ 402 if (cpu->acpi_perf_data.control_register.space_id != 403 ACPI_ADR_SPACE_FIXED_HARDWARE) 404 goto err; 405 406 /* 407 * If there is only one entry _PSS, simply ignore _PSS and continue as 408 * usual without taking _PSS into account 409 */ 410 if (cpu->acpi_perf_data.state_count < 2) 411 goto err; 412 413 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 414 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 415 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 416 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 417 (u32) cpu->acpi_perf_data.states[i].core_frequency, 418 (u32) cpu->acpi_perf_data.states[i].power, 419 (u32) cpu->acpi_perf_data.states[i].control); 420 } 421 422 /* 423 * The _PSS table doesn't contain whole turbo frequency range. 424 * This just contains +1 MHZ above the max non turbo frequency, 425 * with control value corresponding to max turbo ratio. But 426 * when cpufreq set policy is called, it will call with this 427 * max frequency, which will cause a reduced performance as 428 * this driver uses real max turbo frequency as the max 429 * frequency. So correct this frequency in _PSS table to 430 * correct max turbo frequency based on the turbo state. 431 * Also need to convert to MHz as _PSS freq is in MHz. 432 */ 433 if (!limits->turbo_disabled) 434 cpu->acpi_perf_data.states[0].core_frequency = 435 policy->cpuinfo.max_freq / 1000; 436 cpu->valid_pss_table = true; 437 pr_debug("_PPC limits will be enforced\n"); 438 439 return; 440 441 err: 442 cpu->valid_pss_table = false; 443 acpi_processor_unregister_performance(policy->cpu); 444 } 445 446 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 447 { 448 struct cpudata *cpu; 449 450 cpu = all_cpu_data[policy->cpu]; 451 if (!cpu->valid_pss_table) 452 return; 453 454 acpi_processor_unregister_performance(policy->cpu); 455 } 456 457 #else 458 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 459 { 460 } 461 462 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 463 { 464 } 465 #endif 466 467 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 468 int deadband, int integral) { 469 pid->setpoint = int_tofp(setpoint); 470 pid->deadband = int_tofp(deadband); 471 pid->integral = int_tofp(integral); 472 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 473 } 474 475 static inline void pid_p_gain_set(struct _pid *pid, int percent) 476 { 477 pid->p_gain = div_fp(percent, 100); 478 } 479 480 static inline void pid_i_gain_set(struct _pid *pid, int percent) 481 { 482 pid->i_gain = div_fp(percent, 100); 483 } 484 485 static inline void pid_d_gain_set(struct _pid *pid, int percent) 486 { 487 pid->d_gain = div_fp(percent, 100); 488 } 489 490 static signed int pid_calc(struct _pid *pid, int32_t busy) 491 { 492 signed int result; 493 int32_t pterm, dterm, fp_error; 494 int32_t integral_limit; 495 496 fp_error = pid->setpoint - busy; 497 498 if (abs(fp_error) <= pid->deadband) 499 return 0; 500 501 pterm = mul_fp(pid->p_gain, fp_error); 502 503 pid->integral += fp_error; 504 505 /* 506 * We limit the integral here so that it will never 507 * get higher than 30. This prevents it from becoming 508 * too large an input over long periods of time and allows 509 * it to get factored out sooner. 510 * 511 * The value of 30 was chosen through experimentation. 512 */ 513 integral_limit = int_tofp(30); 514 if (pid->integral > integral_limit) 515 pid->integral = integral_limit; 516 if (pid->integral < -integral_limit) 517 pid->integral = -integral_limit; 518 519 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 520 pid->last_err = fp_error; 521 522 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 523 result = result + (1 << (FRAC_BITS-1)); 524 return (signed int)fp_toint(result); 525 } 526 527 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 528 { 529 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 530 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 531 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 532 533 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 534 } 535 536 static inline void intel_pstate_reset_all_pid(void) 537 { 538 unsigned int cpu; 539 540 for_each_online_cpu(cpu) { 541 if (all_cpu_data[cpu]) 542 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 543 } 544 } 545 546 static inline void update_turbo_state(void) 547 { 548 u64 misc_en; 549 struct cpudata *cpu; 550 551 cpu = all_cpu_data[0]; 552 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 553 limits->turbo_disabled = 554 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 555 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 556 } 557 558 static void intel_pstate_hwp_set(const struct cpumask *cpumask) 559 { 560 int min, hw_min, max, hw_max, cpu, range, adj_range; 561 u64 value, cap; 562 563 for_each_cpu(cpu, cpumask) { 564 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 565 hw_min = HWP_LOWEST_PERF(cap); 566 hw_max = HWP_HIGHEST_PERF(cap); 567 range = hw_max - hw_min; 568 569 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 570 adj_range = limits->min_perf_pct * range / 100; 571 min = hw_min + adj_range; 572 value &= ~HWP_MIN_PERF(~0L); 573 value |= HWP_MIN_PERF(min); 574 575 adj_range = limits->max_perf_pct * range / 100; 576 max = hw_min + adj_range; 577 if (limits->no_turbo) { 578 hw_max = HWP_GUARANTEED_PERF(cap); 579 if (hw_max < max) 580 max = hw_max; 581 } 582 583 value &= ~HWP_MAX_PERF(~0L); 584 value |= HWP_MAX_PERF(max); 585 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 586 } 587 } 588 589 static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) 590 { 591 if (hwp_active) 592 intel_pstate_hwp_set(policy->cpus); 593 594 return 0; 595 } 596 597 static void intel_pstate_hwp_set_online_cpus(void) 598 { 599 get_online_cpus(); 600 intel_pstate_hwp_set(cpu_online_mask); 601 put_online_cpus(); 602 } 603 604 /************************** debugfs begin ************************/ 605 static int pid_param_set(void *data, u64 val) 606 { 607 *(u32 *)data = val; 608 intel_pstate_reset_all_pid(); 609 return 0; 610 } 611 612 static int pid_param_get(void *data, u64 *val) 613 { 614 *val = *(u32 *)data; 615 return 0; 616 } 617 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 618 619 struct pid_param { 620 char *name; 621 void *value; 622 }; 623 624 static struct pid_param pid_files[] = { 625 {"sample_rate_ms", &pid_params.sample_rate_ms}, 626 {"d_gain_pct", &pid_params.d_gain_pct}, 627 {"i_gain_pct", &pid_params.i_gain_pct}, 628 {"deadband", &pid_params.deadband}, 629 {"setpoint", &pid_params.setpoint}, 630 {"p_gain_pct", &pid_params.p_gain_pct}, 631 {NULL, NULL} 632 }; 633 634 static void __init intel_pstate_debug_expose_params(void) 635 { 636 struct dentry *debugfs_parent; 637 int i = 0; 638 639 if (hwp_active) 640 return; 641 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 642 if (IS_ERR_OR_NULL(debugfs_parent)) 643 return; 644 while (pid_files[i].name) { 645 debugfs_create_file(pid_files[i].name, 0660, 646 debugfs_parent, pid_files[i].value, 647 &fops_pid_param); 648 i++; 649 } 650 } 651 652 /************************** debugfs end ************************/ 653 654 /************************** sysfs begin ************************/ 655 #define show_one(file_name, object) \ 656 static ssize_t show_##file_name \ 657 (struct kobject *kobj, struct attribute *attr, char *buf) \ 658 { \ 659 return sprintf(buf, "%u\n", limits->object); \ 660 } 661 662 static ssize_t show_turbo_pct(struct kobject *kobj, 663 struct attribute *attr, char *buf) 664 { 665 struct cpudata *cpu; 666 int total, no_turbo, turbo_pct; 667 uint32_t turbo_fp; 668 669 cpu = all_cpu_data[0]; 670 671 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 672 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 673 turbo_fp = div_fp(no_turbo, total); 674 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 675 return sprintf(buf, "%u\n", turbo_pct); 676 } 677 678 static ssize_t show_num_pstates(struct kobject *kobj, 679 struct attribute *attr, char *buf) 680 { 681 struct cpudata *cpu; 682 int total; 683 684 cpu = all_cpu_data[0]; 685 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 686 return sprintf(buf, "%u\n", total); 687 } 688 689 static ssize_t show_no_turbo(struct kobject *kobj, 690 struct attribute *attr, char *buf) 691 { 692 ssize_t ret; 693 694 update_turbo_state(); 695 if (limits->turbo_disabled) 696 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 697 else 698 ret = sprintf(buf, "%u\n", limits->no_turbo); 699 700 return ret; 701 } 702 703 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 704 const char *buf, size_t count) 705 { 706 unsigned int input; 707 int ret; 708 709 ret = sscanf(buf, "%u", &input); 710 if (ret != 1) 711 return -EINVAL; 712 713 update_turbo_state(); 714 if (limits->turbo_disabled) { 715 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 716 return -EPERM; 717 } 718 719 limits->no_turbo = clamp_t(int, input, 0, 1); 720 721 if (hwp_active) 722 intel_pstate_hwp_set_online_cpus(); 723 724 return count; 725 } 726 727 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 728 const char *buf, size_t count) 729 { 730 unsigned int input; 731 int ret; 732 733 ret = sscanf(buf, "%u", &input); 734 if (ret != 1) 735 return -EINVAL; 736 737 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 738 limits->max_perf_pct = min(limits->max_policy_pct, 739 limits->max_sysfs_pct); 740 limits->max_perf_pct = max(limits->min_policy_pct, 741 limits->max_perf_pct); 742 limits->max_perf_pct = max(limits->min_perf_pct, 743 limits->max_perf_pct); 744 limits->max_perf = div_fp(limits->max_perf_pct, 100); 745 746 if (hwp_active) 747 intel_pstate_hwp_set_online_cpus(); 748 return count; 749 } 750 751 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 752 const char *buf, size_t count) 753 { 754 unsigned int input; 755 int ret; 756 757 ret = sscanf(buf, "%u", &input); 758 if (ret != 1) 759 return -EINVAL; 760 761 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 762 limits->min_perf_pct = max(limits->min_policy_pct, 763 limits->min_sysfs_pct); 764 limits->min_perf_pct = min(limits->max_policy_pct, 765 limits->min_perf_pct); 766 limits->min_perf_pct = min(limits->max_perf_pct, 767 limits->min_perf_pct); 768 limits->min_perf = div_fp(limits->min_perf_pct, 100); 769 770 if (hwp_active) 771 intel_pstate_hwp_set_online_cpus(); 772 return count; 773 } 774 775 show_one(max_perf_pct, max_perf_pct); 776 show_one(min_perf_pct, min_perf_pct); 777 778 define_one_global_rw(no_turbo); 779 define_one_global_rw(max_perf_pct); 780 define_one_global_rw(min_perf_pct); 781 define_one_global_ro(turbo_pct); 782 define_one_global_ro(num_pstates); 783 784 static struct attribute *intel_pstate_attributes[] = { 785 &no_turbo.attr, 786 &max_perf_pct.attr, 787 &min_perf_pct.attr, 788 &turbo_pct.attr, 789 &num_pstates.attr, 790 NULL 791 }; 792 793 static struct attribute_group intel_pstate_attr_group = { 794 .attrs = intel_pstate_attributes, 795 }; 796 797 static void __init intel_pstate_sysfs_expose_params(void) 798 { 799 struct kobject *intel_pstate_kobject; 800 int rc; 801 802 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 803 &cpu_subsys.dev_root->kobj); 804 BUG_ON(!intel_pstate_kobject); 805 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 806 BUG_ON(rc); 807 } 808 /************************** sysfs end ************************/ 809 810 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 811 { 812 /* First disable HWP notification interrupt as we don't process them */ 813 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) 814 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 815 816 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 817 } 818 819 static int atom_get_min_pstate(void) 820 { 821 u64 value; 822 823 rdmsrl(ATOM_RATIOS, value); 824 return (value >> 8) & 0x7F; 825 } 826 827 static int atom_get_max_pstate(void) 828 { 829 u64 value; 830 831 rdmsrl(ATOM_RATIOS, value); 832 return (value >> 16) & 0x7F; 833 } 834 835 static int atom_get_turbo_pstate(void) 836 { 837 u64 value; 838 839 rdmsrl(ATOM_TURBO_RATIOS, value); 840 return value & 0x7F; 841 } 842 843 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 844 { 845 u64 val; 846 int32_t vid_fp; 847 u32 vid; 848 849 val = (u64)pstate << 8; 850 if (limits->no_turbo && !limits->turbo_disabled) 851 val |= (u64)1 << 32; 852 853 vid_fp = cpudata->vid.min + mul_fp( 854 int_tofp(pstate - cpudata->pstate.min_pstate), 855 cpudata->vid.ratio); 856 857 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 858 vid = ceiling_fp(vid_fp); 859 860 if (pstate > cpudata->pstate.max_pstate) 861 vid = cpudata->vid.turbo; 862 863 return val | vid; 864 } 865 866 static int silvermont_get_scaling(void) 867 { 868 u64 value; 869 int i; 870 /* Defined in Table 35-6 from SDM (Sept 2015) */ 871 static int silvermont_freq_table[] = { 872 83300, 100000, 133300, 116700, 80000}; 873 874 rdmsrl(MSR_FSB_FREQ, value); 875 i = value & 0x7; 876 WARN_ON(i > 4); 877 878 return silvermont_freq_table[i]; 879 } 880 881 static int airmont_get_scaling(void) 882 { 883 u64 value; 884 int i; 885 /* Defined in Table 35-10 from SDM (Sept 2015) */ 886 static int airmont_freq_table[] = { 887 83300, 100000, 133300, 116700, 80000, 888 93300, 90000, 88900, 87500}; 889 890 rdmsrl(MSR_FSB_FREQ, value); 891 i = value & 0xF; 892 WARN_ON(i > 8); 893 894 return airmont_freq_table[i]; 895 } 896 897 static void atom_get_vid(struct cpudata *cpudata) 898 { 899 u64 value; 900 901 rdmsrl(ATOM_VIDS, value); 902 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 903 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 904 cpudata->vid.ratio = div_fp( 905 cpudata->vid.max - cpudata->vid.min, 906 int_tofp(cpudata->pstate.max_pstate - 907 cpudata->pstate.min_pstate)); 908 909 rdmsrl(ATOM_TURBO_VIDS, value); 910 cpudata->vid.turbo = value & 0x7f; 911 } 912 913 static int core_get_min_pstate(void) 914 { 915 u64 value; 916 917 rdmsrl(MSR_PLATFORM_INFO, value); 918 return (value >> 40) & 0xFF; 919 } 920 921 static int core_get_max_pstate_physical(void) 922 { 923 u64 value; 924 925 rdmsrl(MSR_PLATFORM_INFO, value); 926 return (value >> 8) & 0xFF; 927 } 928 929 static int core_get_max_pstate(void) 930 { 931 u64 tar; 932 u64 plat_info; 933 int max_pstate; 934 int err; 935 936 rdmsrl(MSR_PLATFORM_INFO, plat_info); 937 max_pstate = (plat_info >> 8) & 0xFF; 938 939 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 940 if (!err) { 941 /* Do some sanity checking for safety */ 942 if (plat_info & 0x600000000) { 943 u64 tdp_ctrl; 944 u64 tdp_ratio; 945 int tdp_msr; 946 947 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 948 if (err) 949 goto skip_tar; 950 951 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3); 952 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 953 if (err) 954 goto skip_tar; 955 956 /* For level 1 and 2, bits[23:16] contain the ratio */ 957 if (tdp_ctrl) 958 tdp_ratio >>= 16; 959 960 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 961 if (tdp_ratio - 1 == tar) { 962 max_pstate = tar; 963 pr_debug("max_pstate=TAC %x\n", max_pstate); 964 } else { 965 goto skip_tar; 966 } 967 } 968 } 969 970 skip_tar: 971 return max_pstate; 972 } 973 974 static int core_get_turbo_pstate(void) 975 { 976 u64 value; 977 int nont, ret; 978 979 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 980 nont = core_get_max_pstate(); 981 ret = (value) & 255; 982 if (ret <= nont) 983 ret = nont; 984 return ret; 985 } 986 987 static inline int core_get_scaling(void) 988 { 989 return 100000; 990 } 991 992 static u64 core_get_val(struct cpudata *cpudata, int pstate) 993 { 994 u64 val; 995 996 val = (u64)pstate << 8; 997 if (limits->no_turbo && !limits->turbo_disabled) 998 val |= (u64)1 << 32; 999 1000 return val; 1001 } 1002 1003 static int knl_get_turbo_pstate(void) 1004 { 1005 u64 value; 1006 int nont, ret; 1007 1008 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1009 nont = core_get_max_pstate(); 1010 ret = (((value) >> 8) & 0xFF); 1011 if (ret <= nont) 1012 ret = nont; 1013 return ret; 1014 } 1015 1016 static struct cpu_defaults core_params = { 1017 .pid_policy = { 1018 .sample_rate_ms = 10, 1019 .deadband = 0, 1020 .setpoint = 97, 1021 .p_gain_pct = 20, 1022 .d_gain_pct = 0, 1023 .i_gain_pct = 0, 1024 }, 1025 .funcs = { 1026 .get_max = core_get_max_pstate, 1027 .get_max_physical = core_get_max_pstate_physical, 1028 .get_min = core_get_min_pstate, 1029 .get_turbo = core_get_turbo_pstate, 1030 .get_scaling = core_get_scaling, 1031 .get_val = core_get_val, 1032 .get_target_pstate = get_target_pstate_use_performance, 1033 }, 1034 }; 1035 1036 static const struct cpu_defaults silvermont_params = { 1037 .pid_policy = { 1038 .sample_rate_ms = 10, 1039 .deadband = 0, 1040 .setpoint = 60, 1041 .p_gain_pct = 14, 1042 .d_gain_pct = 0, 1043 .i_gain_pct = 4, 1044 }, 1045 .funcs = { 1046 .get_max = atom_get_max_pstate, 1047 .get_max_physical = atom_get_max_pstate, 1048 .get_min = atom_get_min_pstate, 1049 .get_turbo = atom_get_turbo_pstate, 1050 .get_val = atom_get_val, 1051 .get_scaling = silvermont_get_scaling, 1052 .get_vid = atom_get_vid, 1053 .get_target_pstate = get_target_pstate_use_cpu_load, 1054 }, 1055 }; 1056 1057 static const struct cpu_defaults airmont_params = { 1058 .pid_policy = { 1059 .sample_rate_ms = 10, 1060 .deadband = 0, 1061 .setpoint = 60, 1062 .p_gain_pct = 14, 1063 .d_gain_pct = 0, 1064 .i_gain_pct = 4, 1065 }, 1066 .funcs = { 1067 .get_max = atom_get_max_pstate, 1068 .get_max_physical = atom_get_max_pstate, 1069 .get_min = atom_get_min_pstate, 1070 .get_turbo = atom_get_turbo_pstate, 1071 .get_val = atom_get_val, 1072 .get_scaling = airmont_get_scaling, 1073 .get_vid = atom_get_vid, 1074 .get_target_pstate = get_target_pstate_use_cpu_load, 1075 }, 1076 }; 1077 1078 static const struct cpu_defaults knl_params = { 1079 .pid_policy = { 1080 .sample_rate_ms = 10, 1081 .deadband = 0, 1082 .setpoint = 97, 1083 .p_gain_pct = 20, 1084 .d_gain_pct = 0, 1085 .i_gain_pct = 0, 1086 }, 1087 .funcs = { 1088 .get_max = core_get_max_pstate, 1089 .get_max_physical = core_get_max_pstate_physical, 1090 .get_min = core_get_min_pstate, 1091 .get_turbo = knl_get_turbo_pstate, 1092 .get_scaling = core_get_scaling, 1093 .get_val = core_get_val, 1094 .get_target_pstate = get_target_pstate_use_performance, 1095 }, 1096 }; 1097 1098 static const struct cpu_defaults bxt_params = { 1099 .pid_policy = { 1100 .sample_rate_ms = 10, 1101 .deadband = 0, 1102 .setpoint = 60, 1103 .p_gain_pct = 14, 1104 .d_gain_pct = 0, 1105 .i_gain_pct = 4, 1106 }, 1107 .funcs = { 1108 .get_max = core_get_max_pstate, 1109 .get_max_physical = core_get_max_pstate_physical, 1110 .get_min = core_get_min_pstate, 1111 .get_turbo = core_get_turbo_pstate, 1112 .get_scaling = core_get_scaling, 1113 .get_val = core_get_val, 1114 .get_target_pstate = get_target_pstate_use_cpu_load, 1115 }, 1116 }; 1117 1118 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 1119 { 1120 int max_perf = cpu->pstate.turbo_pstate; 1121 int max_perf_adj; 1122 int min_perf; 1123 1124 if (limits->no_turbo || limits->turbo_disabled) 1125 max_perf = cpu->pstate.max_pstate; 1126 1127 /* 1128 * performance can be limited by user through sysfs, by cpufreq 1129 * policy, or by cpu specific default values determined through 1130 * experimentation. 1131 */ 1132 max_perf_adj = fp_toint(max_perf * limits->max_perf); 1133 *max = clamp_t(int, max_perf_adj, 1134 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 1135 1136 min_perf = fp_toint(max_perf * limits->min_perf); 1137 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 1138 } 1139 1140 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1141 { 1142 int pstate = cpu->pstate.min_pstate; 1143 1144 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1145 cpu->pstate.current_pstate = pstate; 1146 /* 1147 * Generally, there is no guarantee that this code will always run on 1148 * the CPU being updated, so force the register update to run on the 1149 * right CPU. 1150 */ 1151 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1152 pstate_funcs.get_val(cpu, pstate)); 1153 } 1154 1155 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1156 { 1157 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1158 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1159 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1160 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1161 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1162 1163 if (pstate_funcs.get_vid) 1164 pstate_funcs.get_vid(cpu); 1165 1166 intel_pstate_set_min_pstate(cpu); 1167 } 1168 1169 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1170 { 1171 struct sample *sample = &cpu->sample; 1172 1173 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1174 } 1175 1176 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1177 { 1178 u64 aperf, mperf; 1179 unsigned long flags; 1180 u64 tsc; 1181 1182 local_irq_save(flags); 1183 rdmsrl(MSR_IA32_APERF, aperf); 1184 rdmsrl(MSR_IA32_MPERF, mperf); 1185 tsc = rdtsc(); 1186 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1187 local_irq_restore(flags); 1188 return false; 1189 } 1190 local_irq_restore(flags); 1191 1192 cpu->last_sample_time = cpu->sample.time; 1193 cpu->sample.time = time; 1194 cpu->sample.aperf = aperf; 1195 cpu->sample.mperf = mperf; 1196 cpu->sample.tsc = tsc; 1197 cpu->sample.aperf -= cpu->prev_aperf; 1198 cpu->sample.mperf -= cpu->prev_mperf; 1199 cpu->sample.tsc -= cpu->prev_tsc; 1200 1201 cpu->prev_aperf = aperf; 1202 cpu->prev_mperf = mperf; 1203 cpu->prev_tsc = tsc; 1204 /* 1205 * First time this function is invoked in a given cycle, all of the 1206 * previous sample data fields are equal to zero or stale and they must 1207 * be populated with meaningful numbers for things to work, so assume 1208 * that sample.time will always be reset before setting the utilization 1209 * update hook and make the caller skip the sample then. 1210 */ 1211 return !!cpu->last_sample_time; 1212 } 1213 1214 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1215 { 1216 return mul_ext_fp(cpu->sample.core_avg_perf, 1217 cpu->pstate.max_pstate_physical * cpu->pstate.scaling); 1218 } 1219 1220 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1221 { 1222 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1223 cpu->sample.core_avg_perf); 1224 } 1225 1226 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1227 { 1228 struct sample *sample = &cpu->sample; 1229 int32_t busy_frac, boost; 1230 int target, avg_pstate; 1231 1232 busy_frac = div_fp(sample->mperf, sample->tsc); 1233 1234 boost = cpu->iowait_boost; 1235 cpu->iowait_boost >>= 1; 1236 1237 if (busy_frac < boost) 1238 busy_frac = boost; 1239 1240 sample->busy_scaled = busy_frac * 100; 1241 1242 target = limits->no_turbo || limits->turbo_disabled ? 1243 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1244 target += target >> 2; 1245 target = mul_fp(target, busy_frac); 1246 if (target < cpu->pstate.min_pstate) 1247 target = cpu->pstate.min_pstate; 1248 1249 /* 1250 * If the average P-state during the previous cycle was higher than the 1251 * current target, add 50% of the difference to the target to reduce 1252 * possible performance oscillations and offset possible performance 1253 * loss related to moving the workload from one CPU to another within 1254 * a package/module. 1255 */ 1256 avg_pstate = get_avg_pstate(cpu); 1257 if (avg_pstate > target) 1258 target += (avg_pstate - target) >> 1; 1259 1260 return target; 1261 } 1262 1263 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1264 { 1265 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1266 u64 duration_ns; 1267 1268 /* 1269 * perf_scaled is the ratio of the average P-state during the last 1270 * sampling period to the P-state requested last time (in percent). 1271 * 1272 * That measures the system's response to the previous P-state 1273 * selection. 1274 */ 1275 max_pstate = cpu->pstate.max_pstate_physical; 1276 current_pstate = cpu->pstate.current_pstate; 1277 perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, 1278 div_fp(100 * max_pstate, current_pstate)); 1279 1280 /* 1281 * Since our utilization update callback will not run unless we are 1282 * in C0, check if the actual elapsed time is significantly greater (3x) 1283 * than our sample interval. If it is, then we were idle for a long 1284 * enough period of time to adjust our performance metric. 1285 */ 1286 duration_ns = cpu->sample.time - cpu->last_sample_time; 1287 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1288 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1289 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1290 } else { 1291 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1292 if (sample_ratio < int_tofp(1)) 1293 perf_scaled = 0; 1294 } 1295 1296 cpu->sample.busy_scaled = perf_scaled; 1297 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); 1298 } 1299 1300 static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1301 { 1302 int max_perf, min_perf; 1303 1304 update_turbo_state(); 1305 1306 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1307 pstate = clamp_t(int, pstate, min_perf, max_perf); 1308 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1309 if (pstate == cpu->pstate.current_pstate) 1310 return; 1311 1312 cpu->pstate.current_pstate = pstate; 1313 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1314 } 1315 1316 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1317 { 1318 int from, target_pstate; 1319 struct sample *sample; 1320 1321 from = cpu->pstate.current_pstate; 1322 1323 target_pstate = pstate_funcs.get_target_pstate(cpu); 1324 1325 intel_pstate_update_pstate(cpu, target_pstate); 1326 1327 sample = &cpu->sample; 1328 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1329 fp_toint(sample->busy_scaled), 1330 from, 1331 cpu->pstate.current_pstate, 1332 sample->mperf, 1333 sample->aperf, 1334 sample->tsc, 1335 get_avg_frequency(cpu), 1336 fp_toint(cpu->iowait_boost * 100)); 1337 } 1338 1339 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1340 unsigned int flags) 1341 { 1342 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1343 u64 delta_ns; 1344 1345 if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) { 1346 if (flags & SCHED_CPUFREQ_IOWAIT) { 1347 cpu->iowait_boost = int_tofp(1); 1348 } else if (cpu->iowait_boost) { 1349 /* Clear iowait_boost if the CPU may have been idle. */ 1350 delta_ns = time - cpu->last_update; 1351 if (delta_ns > TICK_NSEC) 1352 cpu->iowait_boost = 0; 1353 } 1354 cpu->last_update = time; 1355 } 1356 1357 delta_ns = time - cpu->sample.time; 1358 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1359 bool sample_taken = intel_pstate_sample(cpu, time); 1360 1361 if (sample_taken) { 1362 intel_pstate_calc_avg_perf(cpu); 1363 if (!hwp_active) 1364 intel_pstate_adjust_busy_pstate(cpu); 1365 } 1366 } 1367 } 1368 1369 #define ICPU(model, policy) \ 1370 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1371 (unsigned long)&policy } 1372 1373 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1374 ICPU(INTEL_FAM6_SANDYBRIDGE, core_params), 1375 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params), 1376 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params), 1377 ICPU(INTEL_FAM6_IVYBRIDGE, core_params), 1378 ICPU(INTEL_FAM6_HASWELL_CORE, core_params), 1379 ICPU(INTEL_FAM6_BROADWELL_CORE, core_params), 1380 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params), 1381 ICPU(INTEL_FAM6_HASWELL_X, core_params), 1382 ICPU(INTEL_FAM6_HASWELL_ULT, core_params), 1383 ICPU(INTEL_FAM6_HASWELL_GT3E, core_params), 1384 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params), 1385 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params), 1386 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params), 1387 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1388 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params), 1389 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1390 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params), 1391 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params), 1392 {} 1393 }; 1394 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1395 1396 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 1397 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1398 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1399 ICPU(INTEL_FAM6_SKYLAKE_X, core_params), 1400 {} 1401 }; 1402 1403 static int intel_pstate_init_cpu(unsigned int cpunum) 1404 { 1405 struct cpudata *cpu; 1406 1407 if (!all_cpu_data[cpunum]) 1408 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 1409 GFP_KERNEL); 1410 if (!all_cpu_data[cpunum]) 1411 return -ENOMEM; 1412 1413 cpu = all_cpu_data[cpunum]; 1414 1415 cpu->cpu = cpunum; 1416 1417 if (hwp_active) { 1418 intel_pstate_hwp_enable(cpu); 1419 pid_params.sample_rate_ms = 50; 1420 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1421 } 1422 1423 intel_pstate_get_cpu_pstates(cpu); 1424 1425 intel_pstate_busy_pid_reset(cpu); 1426 1427 pr_debug("controlling: cpu %d\n", cpunum); 1428 1429 return 0; 1430 } 1431 1432 static unsigned int intel_pstate_get(unsigned int cpu_num) 1433 { 1434 struct cpudata *cpu = all_cpu_data[cpu_num]; 1435 1436 return cpu ? get_avg_frequency(cpu) : 0; 1437 } 1438 1439 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1440 { 1441 struct cpudata *cpu = all_cpu_data[cpu_num]; 1442 1443 if (cpu->update_util_set) 1444 return; 1445 1446 /* Prevent intel_pstate_update_util() from using stale data. */ 1447 cpu->sample.time = 0; 1448 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1449 intel_pstate_update_util); 1450 cpu->update_util_set = true; 1451 } 1452 1453 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1454 { 1455 struct cpudata *cpu_data = all_cpu_data[cpu]; 1456 1457 if (!cpu_data->update_util_set) 1458 return; 1459 1460 cpufreq_remove_update_util_hook(cpu); 1461 cpu_data->update_util_set = false; 1462 synchronize_sched(); 1463 } 1464 1465 static void intel_pstate_set_performance_limits(struct perf_limits *limits) 1466 { 1467 limits->no_turbo = 0; 1468 limits->turbo_disabled = 0; 1469 limits->max_perf_pct = 100; 1470 limits->max_perf = int_tofp(1); 1471 limits->min_perf_pct = 100; 1472 limits->min_perf = int_tofp(1); 1473 limits->max_policy_pct = 100; 1474 limits->max_sysfs_pct = 100; 1475 limits->min_policy_pct = 0; 1476 limits->min_sysfs_pct = 0; 1477 } 1478 1479 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1480 { 1481 struct cpudata *cpu; 1482 1483 if (!policy->cpuinfo.max_freq) 1484 return -ENODEV; 1485 1486 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 1487 policy->cpuinfo.max_freq, policy->max); 1488 1489 cpu = all_cpu_data[0]; 1490 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 1491 policy->max < policy->cpuinfo.max_freq && 1492 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { 1493 pr_debug("policy->max > max non turbo frequency\n"); 1494 policy->max = policy->cpuinfo.max_freq; 1495 } 1496 1497 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1498 limits = &performance_limits; 1499 if (policy->max >= policy->cpuinfo.max_freq) { 1500 pr_debug("set performance\n"); 1501 intel_pstate_set_performance_limits(limits); 1502 goto out; 1503 } 1504 } else { 1505 pr_debug("set powersave\n"); 1506 limits = &powersave_limits; 1507 } 1508 1509 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1510 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1511 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1512 policy->cpuinfo.max_freq); 1513 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1514 1515 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1516 limits->min_perf_pct = max(limits->min_policy_pct, 1517 limits->min_sysfs_pct); 1518 limits->min_perf_pct = min(limits->max_policy_pct, 1519 limits->min_perf_pct); 1520 limits->max_perf_pct = min(limits->max_policy_pct, 1521 limits->max_sysfs_pct); 1522 limits->max_perf_pct = max(limits->min_policy_pct, 1523 limits->max_perf_pct); 1524 1525 /* Make sure min_perf_pct <= max_perf_pct */ 1526 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1527 1528 limits->min_perf = div_fp(limits->min_perf_pct, 100); 1529 limits->max_perf = div_fp(limits->max_perf_pct, 100); 1530 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1531 1532 out: 1533 intel_pstate_set_update_util_hook(policy->cpu); 1534 1535 intel_pstate_hwp_set_policy(policy); 1536 1537 return 0; 1538 } 1539 1540 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1541 { 1542 cpufreq_verify_within_cpu_limits(policy); 1543 1544 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1545 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1546 return -EINVAL; 1547 1548 return 0; 1549 } 1550 1551 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1552 { 1553 int cpu_num = policy->cpu; 1554 struct cpudata *cpu = all_cpu_data[cpu_num]; 1555 1556 pr_debug("CPU %d exiting\n", cpu_num); 1557 1558 intel_pstate_clear_update_util_hook(cpu_num); 1559 1560 if (hwp_active) 1561 return; 1562 1563 intel_pstate_set_min_pstate(cpu); 1564 } 1565 1566 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1567 { 1568 struct cpudata *cpu; 1569 int rc; 1570 1571 rc = intel_pstate_init_cpu(policy->cpu); 1572 if (rc) 1573 return rc; 1574 1575 cpu = all_cpu_data[policy->cpu]; 1576 1577 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1578 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1579 else 1580 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1581 1582 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1583 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1584 1585 /* cpuinfo and default policy values */ 1586 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1587 update_turbo_state(); 1588 policy->cpuinfo.max_freq = limits->turbo_disabled ? 1589 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1590 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 1591 1592 intel_pstate_init_acpi_perf_limits(policy); 1593 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1594 cpumask_set_cpu(policy->cpu, policy->cpus); 1595 1596 return 0; 1597 } 1598 1599 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 1600 { 1601 intel_pstate_exit_perf_limits(policy); 1602 1603 return 0; 1604 } 1605 1606 static struct cpufreq_driver intel_pstate_driver = { 1607 .flags = CPUFREQ_CONST_LOOPS, 1608 .verify = intel_pstate_verify_policy, 1609 .setpolicy = intel_pstate_set_policy, 1610 .resume = intel_pstate_hwp_set_policy, 1611 .get = intel_pstate_get, 1612 .init = intel_pstate_cpu_init, 1613 .exit = intel_pstate_cpu_exit, 1614 .stop_cpu = intel_pstate_stop_cpu, 1615 .name = "intel_pstate", 1616 }; 1617 1618 static int no_load __initdata; 1619 static int no_hwp __initdata; 1620 static int hwp_only __initdata; 1621 static unsigned int force_load __initdata; 1622 1623 static int __init intel_pstate_msrs_not_valid(void) 1624 { 1625 if (!pstate_funcs.get_max() || 1626 !pstate_funcs.get_min() || 1627 !pstate_funcs.get_turbo()) 1628 return -ENODEV; 1629 1630 return 0; 1631 } 1632 1633 static void __init copy_pid_params(struct pstate_adjust_policy *policy) 1634 { 1635 pid_params.sample_rate_ms = policy->sample_rate_ms; 1636 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 1637 pid_params.p_gain_pct = policy->p_gain_pct; 1638 pid_params.i_gain_pct = policy->i_gain_pct; 1639 pid_params.d_gain_pct = policy->d_gain_pct; 1640 pid_params.deadband = policy->deadband; 1641 pid_params.setpoint = policy->setpoint; 1642 } 1643 1644 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 1645 { 1646 pstate_funcs.get_max = funcs->get_max; 1647 pstate_funcs.get_max_physical = funcs->get_max_physical; 1648 pstate_funcs.get_min = funcs->get_min; 1649 pstate_funcs.get_turbo = funcs->get_turbo; 1650 pstate_funcs.get_scaling = funcs->get_scaling; 1651 pstate_funcs.get_val = funcs->get_val; 1652 pstate_funcs.get_vid = funcs->get_vid; 1653 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 1654 1655 } 1656 1657 #ifdef CONFIG_ACPI 1658 1659 static bool __init intel_pstate_no_acpi_pss(void) 1660 { 1661 int i; 1662 1663 for_each_possible_cpu(i) { 1664 acpi_status status; 1665 union acpi_object *pss; 1666 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1667 struct acpi_processor *pr = per_cpu(processors, i); 1668 1669 if (!pr) 1670 continue; 1671 1672 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1673 if (ACPI_FAILURE(status)) 1674 continue; 1675 1676 pss = buffer.pointer; 1677 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1678 kfree(pss); 1679 return false; 1680 } 1681 1682 kfree(pss); 1683 } 1684 1685 return true; 1686 } 1687 1688 static bool __init intel_pstate_has_acpi_ppc(void) 1689 { 1690 int i; 1691 1692 for_each_possible_cpu(i) { 1693 struct acpi_processor *pr = per_cpu(processors, i); 1694 1695 if (!pr) 1696 continue; 1697 if (acpi_has_method(pr->handle, "_PPC")) 1698 return true; 1699 } 1700 return false; 1701 } 1702 1703 enum { 1704 PSS, 1705 PPC, 1706 }; 1707 1708 struct hw_vendor_info { 1709 u16 valid; 1710 char oem_id[ACPI_OEM_ID_SIZE]; 1711 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1712 int oem_pwr_table; 1713 }; 1714 1715 /* Hardware vendor-specific info that has its own power management modes */ 1716 static struct hw_vendor_info vendor_info[] __initdata = { 1717 {1, "HP ", "ProLiant", PSS}, 1718 {1, "ORACLE", "X4-2 ", PPC}, 1719 {1, "ORACLE", "X4-2L ", PPC}, 1720 {1, "ORACLE", "X4-2B ", PPC}, 1721 {1, "ORACLE", "X3-2 ", PPC}, 1722 {1, "ORACLE", "X3-2L ", PPC}, 1723 {1, "ORACLE", "X3-2B ", PPC}, 1724 {1, "ORACLE", "X4470M2 ", PPC}, 1725 {1, "ORACLE", "X4270M3 ", PPC}, 1726 {1, "ORACLE", "X4270M2 ", PPC}, 1727 {1, "ORACLE", "X4170M2 ", PPC}, 1728 {1, "ORACLE", "X4170 M3", PPC}, 1729 {1, "ORACLE", "X4275 M3", PPC}, 1730 {1, "ORACLE", "X6-2 ", PPC}, 1731 {1, "ORACLE", "Sudbury ", PPC}, 1732 {0, "", ""}, 1733 }; 1734 1735 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 1736 { 1737 struct acpi_table_header hdr; 1738 struct hw_vendor_info *v_info; 1739 const struct x86_cpu_id *id; 1740 u64 misc_pwr; 1741 1742 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1743 if (id) { 1744 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1745 if ( misc_pwr & (1 << 8)) 1746 return true; 1747 } 1748 1749 if (acpi_disabled || 1750 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1751 return false; 1752 1753 for (v_info = vendor_info; v_info->valid; v_info++) { 1754 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1755 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1756 ACPI_OEM_TABLE_ID_SIZE)) 1757 switch (v_info->oem_pwr_table) { 1758 case PSS: 1759 return intel_pstate_no_acpi_pss(); 1760 case PPC: 1761 return intel_pstate_has_acpi_ppc() && 1762 (!force_load); 1763 } 1764 } 1765 1766 return false; 1767 } 1768 #else /* CONFIG_ACPI not enabled */ 1769 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1770 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1771 #endif /* CONFIG_ACPI */ 1772 1773 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 1774 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 1775 {} 1776 }; 1777 1778 static int __init intel_pstate_init(void) 1779 { 1780 int cpu, rc = 0; 1781 const struct x86_cpu_id *id; 1782 struct cpu_defaults *cpu_def; 1783 1784 if (no_load) 1785 return -ENODEV; 1786 1787 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 1788 copy_cpu_funcs(&core_params.funcs); 1789 hwp_active++; 1790 goto hwp_cpu_matched; 1791 } 1792 1793 id = x86_match_cpu(intel_pstate_cpu_ids); 1794 if (!id) 1795 return -ENODEV; 1796 1797 cpu_def = (struct cpu_defaults *)id->driver_data; 1798 1799 copy_pid_params(&cpu_def->pid_policy); 1800 copy_cpu_funcs(&cpu_def->funcs); 1801 1802 if (intel_pstate_msrs_not_valid()) 1803 return -ENODEV; 1804 1805 hwp_cpu_matched: 1806 /* 1807 * The Intel pstate driver will be ignored if the platform 1808 * firmware has its own power management modes. 1809 */ 1810 if (intel_pstate_platform_pwr_mgmt_exists()) 1811 return -ENODEV; 1812 1813 pr_info("Intel P-state driver initializing\n"); 1814 1815 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1816 if (!all_cpu_data) 1817 return -ENOMEM; 1818 1819 if (!hwp_active && hwp_only) 1820 goto out; 1821 1822 rc = cpufreq_register_driver(&intel_pstate_driver); 1823 if (rc) 1824 goto out; 1825 1826 intel_pstate_debug_expose_params(); 1827 intel_pstate_sysfs_expose_params(); 1828 1829 if (hwp_active) 1830 pr_info("HWP enabled\n"); 1831 1832 return rc; 1833 out: 1834 get_online_cpus(); 1835 for_each_online_cpu(cpu) { 1836 if (all_cpu_data[cpu]) { 1837 intel_pstate_clear_update_util_hook(cpu); 1838 kfree(all_cpu_data[cpu]); 1839 } 1840 } 1841 1842 put_online_cpus(); 1843 vfree(all_cpu_data); 1844 return -ENODEV; 1845 } 1846 device_initcall(intel_pstate_init); 1847 1848 static int __init intel_pstate_setup(char *str) 1849 { 1850 if (!str) 1851 return -EINVAL; 1852 1853 if (!strcmp(str, "disable")) 1854 no_load = 1; 1855 if (!strcmp(str, "no_hwp")) { 1856 pr_info("HWP disabled\n"); 1857 no_hwp = 1; 1858 } 1859 if (!strcmp(str, "force")) 1860 force_load = 1; 1861 if (!strcmp(str, "hwp_only")) 1862 hwp_only = 1; 1863 1864 #ifdef CONFIG_ACPI 1865 if (!strcmp(str, "support_acpi_ppc")) 1866 acpi_ppc = true; 1867 #endif 1868 1869 return 0; 1870 } 1871 early_param("intel_pstate", intel_pstate_setup); 1872 1873 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1874 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1875 MODULE_LICENSE("GPL"); 1876