1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 #include <asm/intel-family.h> 39 40 #define ATOM_RATIOS 0x66a 41 #define ATOM_VIDS 0x66b 42 #define ATOM_TURBO_RATIOS 0x66c 43 #define ATOM_TURBO_VIDS 0x66d 44 45 #ifdef CONFIG_ACPI 46 #include <acpi/processor.h> 47 #endif 48 49 #define FRAC_BITS 8 50 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 51 #define fp_toint(X) ((X) >> FRAC_BITS) 52 53 #define EXT_BITS 6 54 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 55 56 static inline int32_t mul_fp(int32_t x, int32_t y) 57 { 58 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 59 } 60 61 static inline int32_t div_fp(s64 x, s64 y) 62 { 63 return div64_s64((int64_t)x << FRAC_BITS, y); 64 } 65 66 static inline int ceiling_fp(int32_t x) 67 { 68 int mask, ret; 69 70 ret = fp_toint(x); 71 mask = (1 << FRAC_BITS) - 1; 72 if (x & mask) 73 ret += 1; 74 return ret; 75 } 76 77 static inline u64 mul_ext_fp(u64 x, u64 y) 78 { 79 return (x * y) >> EXT_FRAC_BITS; 80 } 81 82 static inline u64 div_ext_fp(u64 x, u64 y) 83 { 84 return div64_u64(x << EXT_FRAC_BITS, y); 85 } 86 87 /** 88 * struct sample - Store performance sample 89 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 90 * performance during last sample period 91 * @busy_scaled: Scaled busy value which is used to calculate next 92 * P state. This can be different than core_avg_perf 93 * to account for cpu idle period 94 * @aperf: Difference of actual performance frequency clock count 95 * read from APERF MSR between last and current sample 96 * @mperf: Difference of maximum performance frequency clock count 97 * read from MPERF MSR between last and current sample 98 * @tsc: Difference of time stamp counter between last and 99 * current sample 100 * @freq: Effective frequency calculated from APERF/MPERF 101 * @time: Current time from scheduler 102 * 103 * This structure is used in the cpudata structure to store performance sample 104 * data for choosing next P State. 105 */ 106 struct sample { 107 int32_t core_avg_perf; 108 int32_t busy_scaled; 109 u64 aperf; 110 u64 mperf; 111 u64 tsc; 112 int freq; 113 u64 time; 114 }; 115 116 /** 117 * struct pstate_data - Store P state data 118 * @current_pstate: Current requested P state 119 * @min_pstate: Min P state possible for this platform 120 * @max_pstate: Max P state possible for this platform 121 * @max_pstate_physical:This is physical Max P state for a processor 122 * This can be higher than the max_pstate which can 123 * be limited by platform thermal design power limits 124 * @scaling: Scaling factor to convert frequency to cpufreq 125 * frequency units 126 * @turbo_pstate: Max Turbo P state possible for this platform 127 * 128 * Stores the per cpu model P state limits and current P state. 129 */ 130 struct pstate_data { 131 int current_pstate; 132 int min_pstate; 133 int max_pstate; 134 int max_pstate_physical; 135 int scaling; 136 int turbo_pstate; 137 }; 138 139 /** 140 * struct vid_data - Stores voltage information data 141 * @min: VID data for this platform corresponding to 142 * the lowest P state 143 * @max: VID data corresponding to the highest P State. 144 * @turbo: VID data for turbo P state 145 * @ratio: Ratio of (vid max - vid min) / 146 * (max P state - Min P State) 147 * 148 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 149 * This data is used in Atom platforms, where in addition to target P state, 150 * the voltage data needs to be specified to select next P State. 151 */ 152 struct vid_data { 153 int min; 154 int max; 155 int turbo; 156 int32_t ratio; 157 }; 158 159 /** 160 * struct _pid - Stores PID data 161 * @setpoint: Target set point for busyness or performance 162 * @integral: Storage for accumulated error values 163 * @p_gain: PID proportional gain 164 * @i_gain: PID integral gain 165 * @d_gain: PID derivative gain 166 * @deadband: PID deadband 167 * @last_err: Last error storage for integral part of PID calculation 168 * 169 * Stores PID coefficients and last error for PID controller. 170 */ 171 struct _pid { 172 int setpoint; 173 int32_t integral; 174 int32_t p_gain; 175 int32_t i_gain; 176 int32_t d_gain; 177 int deadband; 178 int32_t last_err; 179 }; 180 181 /** 182 * struct cpudata - Per CPU instance data storage 183 * @cpu: CPU number for this instance data 184 * @update_util: CPUFreq utility callback information 185 * @update_util_set: CPUFreq utility callback is set 186 * @pstate: Stores P state limits for this CPU 187 * @vid: Stores VID limits for this CPU 188 * @pid: Stores PID parameters for this CPU 189 * @last_sample_time: Last Sample time 190 * @prev_aperf: Last APERF value read from APERF MSR 191 * @prev_mperf: Last MPERF value read from MPERF MSR 192 * @prev_tsc: Last timestamp counter (TSC) value 193 * @prev_cummulative_iowait: IO Wait time difference from last and 194 * current sample 195 * @sample: Storage for storing last Sample data 196 * @acpi_perf_data: Stores ACPI perf information read from _PSS 197 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 198 * 199 * This structure stores per CPU instance data for all CPUs. 200 */ 201 struct cpudata { 202 int cpu; 203 204 struct update_util_data update_util; 205 bool update_util_set; 206 207 struct pstate_data pstate; 208 struct vid_data vid; 209 struct _pid pid; 210 211 u64 last_sample_time; 212 u64 prev_aperf; 213 u64 prev_mperf; 214 u64 prev_tsc; 215 u64 prev_cummulative_iowait; 216 struct sample sample; 217 #ifdef CONFIG_ACPI 218 struct acpi_processor_performance acpi_perf_data; 219 bool valid_pss_table; 220 #endif 221 }; 222 223 static struct cpudata **all_cpu_data; 224 225 /** 226 * struct pid_adjust_policy - Stores static PID configuration data 227 * @sample_rate_ms: PID calculation sample rate in ms 228 * @sample_rate_ns: Sample rate calculation in ns 229 * @deadband: PID deadband 230 * @setpoint: PID Setpoint 231 * @p_gain_pct: PID proportional gain 232 * @i_gain_pct: PID integral gain 233 * @d_gain_pct: PID derivative gain 234 * 235 * Stores per CPU model static PID configuration data. 236 */ 237 struct pstate_adjust_policy { 238 int sample_rate_ms; 239 s64 sample_rate_ns; 240 int deadband; 241 int setpoint; 242 int p_gain_pct; 243 int d_gain_pct; 244 int i_gain_pct; 245 }; 246 247 /** 248 * struct pstate_funcs - Per CPU model specific callbacks 249 * @get_max: Callback to get maximum non turbo effective P state 250 * @get_max_physical: Callback to get maximum non turbo physical P state 251 * @get_min: Callback to get minimum P state 252 * @get_turbo: Callback to get turbo P state 253 * @get_scaling: Callback to get frequency scaling factor 254 * @get_val: Callback to convert P state to actual MSR write value 255 * @get_vid: Callback to get VID data for Atom platforms 256 * @get_target_pstate: Callback to a function to calculate next P state to use 257 * 258 * Core and Atom CPU models have different way to get P State limits. This 259 * structure is used to store those callbacks. 260 */ 261 struct pstate_funcs { 262 int (*get_max)(void); 263 int (*get_max_physical)(void); 264 int (*get_min)(void); 265 int (*get_turbo)(void); 266 int (*get_scaling)(void); 267 u64 (*get_val)(struct cpudata*, int pstate); 268 void (*get_vid)(struct cpudata *); 269 int32_t (*get_target_pstate)(struct cpudata *); 270 }; 271 272 /** 273 * struct cpu_defaults- Per CPU model default config data 274 * @pid_policy: PID config data 275 * @funcs: Callback function data 276 */ 277 struct cpu_defaults { 278 struct pstate_adjust_policy pid_policy; 279 struct pstate_funcs funcs; 280 }; 281 282 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 283 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 284 285 static struct pstate_adjust_policy pid_params; 286 static struct pstate_funcs pstate_funcs; 287 static int hwp_active; 288 289 #ifdef CONFIG_ACPI 290 static bool acpi_ppc; 291 #endif 292 293 /** 294 * struct perf_limits - Store user and policy limits 295 * @no_turbo: User requested turbo state from intel_pstate sysfs 296 * @turbo_disabled: Platform turbo status either from msr 297 * MSR_IA32_MISC_ENABLE or when maximum available pstate 298 * matches the maximum turbo pstate 299 * @max_perf_pct: Effective maximum performance limit in percentage, this 300 * is minimum of either limits enforced by cpufreq policy 301 * or limits from user set limits via intel_pstate sysfs 302 * @min_perf_pct: Effective minimum performance limit in percentage, this 303 * is maximum of either limits enforced by cpufreq policy 304 * or limits from user set limits via intel_pstate sysfs 305 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct 306 * This value is used to limit max pstate 307 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct 308 * This value is used to limit min pstate 309 * @max_policy_pct: The maximum performance in percentage enforced by 310 * cpufreq setpolicy interface 311 * @max_sysfs_pct: The maximum performance in percentage enforced by 312 * intel pstate sysfs interface 313 * @min_policy_pct: The minimum performance in percentage enforced by 314 * cpufreq setpolicy interface 315 * @min_sysfs_pct: The minimum performance in percentage enforced by 316 * intel pstate sysfs interface 317 * 318 * Storage for user and policy defined limits. 319 */ 320 struct perf_limits { 321 int no_turbo; 322 int turbo_disabled; 323 int max_perf_pct; 324 int min_perf_pct; 325 int32_t max_perf; 326 int32_t min_perf; 327 int max_policy_pct; 328 int max_sysfs_pct; 329 int min_policy_pct; 330 int min_sysfs_pct; 331 }; 332 333 static struct perf_limits performance_limits = { 334 .no_turbo = 0, 335 .turbo_disabled = 0, 336 .max_perf_pct = 100, 337 .max_perf = int_tofp(1), 338 .min_perf_pct = 100, 339 .min_perf = int_tofp(1), 340 .max_policy_pct = 100, 341 .max_sysfs_pct = 100, 342 .min_policy_pct = 0, 343 .min_sysfs_pct = 0, 344 }; 345 346 static struct perf_limits powersave_limits = { 347 .no_turbo = 0, 348 .turbo_disabled = 0, 349 .max_perf_pct = 100, 350 .max_perf = int_tofp(1), 351 .min_perf_pct = 0, 352 .min_perf = 0, 353 .max_policy_pct = 100, 354 .max_sysfs_pct = 100, 355 .min_policy_pct = 0, 356 .min_sysfs_pct = 0, 357 }; 358 359 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 360 static struct perf_limits *limits = &performance_limits; 361 #else 362 static struct perf_limits *limits = &powersave_limits; 363 #endif 364 365 #ifdef CONFIG_ACPI 366 367 static bool intel_pstate_get_ppc_enable_status(void) 368 { 369 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 370 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 371 return true; 372 373 return acpi_ppc; 374 } 375 376 /* 377 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and 378 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and 379 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state 380 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting 381 * target ratio 0x17. The _PSS control value stores in a format which can be 382 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift 383 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). 384 * This function converts the _PSS control value to intel pstate driver format 385 * for comparison and assignment. 386 */ 387 static int convert_to_native_pstate_format(struct cpudata *cpu, int index) 388 { 389 return cpu->acpi_perf_data.states[index].control >> 8; 390 } 391 392 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 393 { 394 struct cpudata *cpu; 395 int turbo_pss_ctl; 396 int ret; 397 int i; 398 399 if (hwp_active) 400 return; 401 402 if (!intel_pstate_get_ppc_enable_status()) 403 return; 404 405 cpu = all_cpu_data[policy->cpu]; 406 407 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 408 policy->cpu); 409 if (ret) 410 return; 411 412 /* 413 * Check if the control value in _PSS is for PERF_CTL MSR, which should 414 * guarantee that the states returned by it map to the states in our 415 * list directly. 416 */ 417 if (cpu->acpi_perf_data.control_register.space_id != 418 ACPI_ADR_SPACE_FIXED_HARDWARE) 419 goto err; 420 421 /* 422 * If there is only one entry _PSS, simply ignore _PSS and continue as 423 * usual without taking _PSS into account 424 */ 425 if (cpu->acpi_perf_data.state_count < 2) 426 goto err; 427 428 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 429 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 430 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 431 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 432 (u32) cpu->acpi_perf_data.states[i].core_frequency, 433 (u32) cpu->acpi_perf_data.states[i].power, 434 (u32) cpu->acpi_perf_data.states[i].control); 435 } 436 437 /* 438 * The _PSS table doesn't contain whole turbo frequency range. 439 * This just contains +1 MHZ above the max non turbo frequency, 440 * with control value corresponding to max turbo ratio. But 441 * when cpufreq set policy is called, it will call with this 442 * max frequency, which will cause a reduced performance as 443 * this driver uses real max turbo frequency as the max 444 * frequency. So correct this frequency in _PSS table to 445 * correct max turbo frequency based on the turbo ratio. 446 * Also need to convert to MHz as _PSS freq is in MHz. 447 */ 448 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); 449 if (turbo_pss_ctl > cpu->pstate.max_pstate) 450 cpu->acpi_perf_data.states[0].core_frequency = 451 policy->cpuinfo.max_freq / 1000; 452 cpu->valid_pss_table = true; 453 pr_debug("_PPC limits will be enforced\n"); 454 455 return; 456 457 err: 458 cpu->valid_pss_table = false; 459 acpi_processor_unregister_performance(policy->cpu); 460 } 461 462 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 463 { 464 struct cpudata *cpu; 465 466 cpu = all_cpu_data[policy->cpu]; 467 if (!cpu->valid_pss_table) 468 return; 469 470 acpi_processor_unregister_performance(policy->cpu); 471 } 472 473 #else 474 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 475 { 476 } 477 478 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 479 { 480 } 481 #endif 482 483 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 484 int deadband, int integral) { 485 pid->setpoint = int_tofp(setpoint); 486 pid->deadband = int_tofp(deadband); 487 pid->integral = int_tofp(integral); 488 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 489 } 490 491 static inline void pid_p_gain_set(struct _pid *pid, int percent) 492 { 493 pid->p_gain = div_fp(percent, 100); 494 } 495 496 static inline void pid_i_gain_set(struct _pid *pid, int percent) 497 { 498 pid->i_gain = div_fp(percent, 100); 499 } 500 501 static inline void pid_d_gain_set(struct _pid *pid, int percent) 502 { 503 pid->d_gain = div_fp(percent, 100); 504 } 505 506 static signed int pid_calc(struct _pid *pid, int32_t busy) 507 { 508 signed int result; 509 int32_t pterm, dterm, fp_error; 510 int32_t integral_limit; 511 512 fp_error = pid->setpoint - busy; 513 514 if (abs(fp_error) <= pid->deadband) 515 return 0; 516 517 pterm = mul_fp(pid->p_gain, fp_error); 518 519 pid->integral += fp_error; 520 521 /* 522 * We limit the integral here so that it will never 523 * get higher than 30. This prevents it from becoming 524 * too large an input over long periods of time and allows 525 * it to get factored out sooner. 526 * 527 * The value of 30 was chosen through experimentation. 528 */ 529 integral_limit = int_tofp(30); 530 if (pid->integral > integral_limit) 531 pid->integral = integral_limit; 532 if (pid->integral < -integral_limit) 533 pid->integral = -integral_limit; 534 535 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 536 pid->last_err = fp_error; 537 538 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 539 result = result + (1 << (FRAC_BITS-1)); 540 return (signed int)fp_toint(result); 541 } 542 543 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 544 { 545 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 546 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 547 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 548 549 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 550 } 551 552 static inline void intel_pstate_reset_all_pid(void) 553 { 554 unsigned int cpu; 555 556 for_each_online_cpu(cpu) { 557 if (all_cpu_data[cpu]) 558 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 559 } 560 } 561 562 static inline void update_turbo_state(void) 563 { 564 u64 misc_en; 565 struct cpudata *cpu; 566 567 cpu = all_cpu_data[0]; 568 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 569 limits->turbo_disabled = 570 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 571 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 572 } 573 574 static void intel_pstate_hwp_set(const struct cpumask *cpumask) 575 { 576 int min, hw_min, max, hw_max, cpu, range, adj_range; 577 u64 value, cap; 578 579 rdmsrl(MSR_HWP_CAPABILITIES, cap); 580 hw_min = HWP_LOWEST_PERF(cap); 581 hw_max = HWP_HIGHEST_PERF(cap); 582 range = hw_max - hw_min; 583 584 for_each_cpu(cpu, cpumask) { 585 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 586 adj_range = limits->min_perf_pct * range / 100; 587 min = hw_min + adj_range; 588 value &= ~HWP_MIN_PERF(~0L); 589 value |= HWP_MIN_PERF(min); 590 591 adj_range = limits->max_perf_pct * range / 100; 592 max = hw_min + adj_range; 593 if (limits->no_turbo) { 594 hw_max = HWP_GUARANTEED_PERF(cap); 595 if (hw_max < max) 596 max = hw_max; 597 } 598 599 value &= ~HWP_MAX_PERF(~0L); 600 value |= HWP_MAX_PERF(max); 601 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 602 } 603 } 604 605 static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) 606 { 607 if (hwp_active) 608 intel_pstate_hwp_set(policy->cpus); 609 610 return 0; 611 } 612 613 static void intel_pstate_hwp_set_online_cpus(void) 614 { 615 get_online_cpus(); 616 intel_pstate_hwp_set(cpu_online_mask); 617 put_online_cpus(); 618 } 619 620 /************************** debugfs begin ************************/ 621 static int pid_param_set(void *data, u64 val) 622 { 623 *(u32 *)data = val; 624 intel_pstate_reset_all_pid(); 625 return 0; 626 } 627 628 static int pid_param_get(void *data, u64 *val) 629 { 630 *val = *(u32 *)data; 631 return 0; 632 } 633 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 634 635 struct pid_param { 636 char *name; 637 void *value; 638 }; 639 640 static struct pid_param pid_files[] = { 641 {"sample_rate_ms", &pid_params.sample_rate_ms}, 642 {"d_gain_pct", &pid_params.d_gain_pct}, 643 {"i_gain_pct", &pid_params.i_gain_pct}, 644 {"deadband", &pid_params.deadband}, 645 {"setpoint", &pid_params.setpoint}, 646 {"p_gain_pct", &pid_params.p_gain_pct}, 647 {NULL, NULL} 648 }; 649 650 static void __init intel_pstate_debug_expose_params(void) 651 { 652 struct dentry *debugfs_parent; 653 int i = 0; 654 655 if (hwp_active) 656 return; 657 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 658 if (IS_ERR_OR_NULL(debugfs_parent)) 659 return; 660 while (pid_files[i].name) { 661 debugfs_create_file(pid_files[i].name, 0660, 662 debugfs_parent, pid_files[i].value, 663 &fops_pid_param); 664 i++; 665 } 666 } 667 668 /************************** debugfs end ************************/ 669 670 /************************** sysfs begin ************************/ 671 #define show_one(file_name, object) \ 672 static ssize_t show_##file_name \ 673 (struct kobject *kobj, struct attribute *attr, char *buf) \ 674 { \ 675 return sprintf(buf, "%u\n", limits->object); \ 676 } 677 678 static ssize_t show_turbo_pct(struct kobject *kobj, 679 struct attribute *attr, char *buf) 680 { 681 struct cpudata *cpu; 682 int total, no_turbo, turbo_pct; 683 uint32_t turbo_fp; 684 685 cpu = all_cpu_data[0]; 686 687 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 688 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 689 turbo_fp = div_fp(no_turbo, total); 690 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 691 return sprintf(buf, "%u\n", turbo_pct); 692 } 693 694 static ssize_t show_num_pstates(struct kobject *kobj, 695 struct attribute *attr, char *buf) 696 { 697 struct cpudata *cpu; 698 int total; 699 700 cpu = all_cpu_data[0]; 701 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 702 return sprintf(buf, "%u\n", total); 703 } 704 705 static ssize_t show_no_turbo(struct kobject *kobj, 706 struct attribute *attr, char *buf) 707 { 708 ssize_t ret; 709 710 update_turbo_state(); 711 if (limits->turbo_disabled) 712 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 713 else 714 ret = sprintf(buf, "%u\n", limits->no_turbo); 715 716 return ret; 717 } 718 719 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 720 const char *buf, size_t count) 721 { 722 unsigned int input; 723 int ret; 724 725 ret = sscanf(buf, "%u", &input); 726 if (ret != 1) 727 return -EINVAL; 728 729 update_turbo_state(); 730 if (limits->turbo_disabled) { 731 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 732 return -EPERM; 733 } 734 735 limits->no_turbo = clamp_t(int, input, 0, 1); 736 737 if (hwp_active) 738 intel_pstate_hwp_set_online_cpus(); 739 740 return count; 741 } 742 743 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 744 const char *buf, size_t count) 745 { 746 unsigned int input; 747 int ret; 748 749 ret = sscanf(buf, "%u", &input); 750 if (ret != 1) 751 return -EINVAL; 752 753 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 754 limits->max_perf_pct = min(limits->max_policy_pct, 755 limits->max_sysfs_pct); 756 limits->max_perf_pct = max(limits->min_policy_pct, 757 limits->max_perf_pct); 758 limits->max_perf_pct = max(limits->min_perf_pct, 759 limits->max_perf_pct); 760 limits->max_perf = div_fp(limits->max_perf_pct, 100); 761 762 if (hwp_active) 763 intel_pstate_hwp_set_online_cpus(); 764 return count; 765 } 766 767 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 768 const char *buf, size_t count) 769 { 770 unsigned int input; 771 int ret; 772 773 ret = sscanf(buf, "%u", &input); 774 if (ret != 1) 775 return -EINVAL; 776 777 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 778 limits->min_perf_pct = max(limits->min_policy_pct, 779 limits->min_sysfs_pct); 780 limits->min_perf_pct = min(limits->max_policy_pct, 781 limits->min_perf_pct); 782 limits->min_perf_pct = min(limits->max_perf_pct, 783 limits->min_perf_pct); 784 limits->min_perf = div_fp(limits->min_perf_pct, 100); 785 786 if (hwp_active) 787 intel_pstate_hwp_set_online_cpus(); 788 return count; 789 } 790 791 show_one(max_perf_pct, max_perf_pct); 792 show_one(min_perf_pct, min_perf_pct); 793 794 define_one_global_rw(no_turbo); 795 define_one_global_rw(max_perf_pct); 796 define_one_global_rw(min_perf_pct); 797 define_one_global_ro(turbo_pct); 798 define_one_global_ro(num_pstates); 799 800 static struct attribute *intel_pstate_attributes[] = { 801 &no_turbo.attr, 802 &max_perf_pct.attr, 803 &min_perf_pct.attr, 804 &turbo_pct.attr, 805 &num_pstates.attr, 806 NULL 807 }; 808 809 static struct attribute_group intel_pstate_attr_group = { 810 .attrs = intel_pstate_attributes, 811 }; 812 813 static void __init intel_pstate_sysfs_expose_params(void) 814 { 815 struct kobject *intel_pstate_kobject; 816 int rc; 817 818 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 819 &cpu_subsys.dev_root->kobj); 820 BUG_ON(!intel_pstate_kobject); 821 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 822 BUG_ON(rc); 823 } 824 /************************** sysfs end ************************/ 825 826 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 827 { 828 /* First disable HWP notification interrupt as we don't process them */ 829 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 830 831 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 832 } 833 834 static int atom_get_min_pstate(void) 835 { 836 u64 value; 837 838 rdmsrl(ATOM_RATIOS, value); 839 return (value >> 8) & 0x7F; 840 } 841 842 static int atom_get_max_pstate(void) 843 { 844 u64 value; 845 846 rdmsrl(ATOM_RATIOS, value); 847 return (value >> 16) & 0x7F; 848 } 849 850 static int atom_get_turbo_pstate(void) 851 { 852 u64 value; 853 854 rdmsrl(ATOM_TURBO_RATIOS, value); 855 return value & 0x7F; 856 } 857 858 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 859 { 860 u64 val; 861 int32_t vid_fp; 862 u32 vid; 863 864 val = (u64)pstate << 8; 865 if (limits->no_turbo && !limits->turbo_disabled) 866 val |= (u64)1 << 32; 867 868 vid_fp = cpudata->vid.min + mul_fp( 869 int_tofp(pstate - cpudata->pstate.min_pstate), 870 cpudata->vid.ratio); 871 872 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 873 vid = ceiling_fp(vid_fp); 874 875 if (pstate > cpudata->pstate.max_pstate) 876 vid = cpudata->vid.turbo; 877 878 return val | vid; 879 } 880 881 static int silvermont_get_scaling(void) 882 { 883 u64 value; 884 int i; 885 /* Defined in Table 35-6 from SDM (Sept 2015) */ 886 static int silvermont_freq_table[] = { 887 83300, 100000, 133300, 116700, 80000}; 888 889 rdmsrl(MSR_FSB_FREQ, value); 890 i = value & 0x7; 891 WARN_ON(i > 4); 892 893 return silvermont_freq_table[i]; 894 } 895 896 static int airmont_get_scaling(void) 897 { 898 u64 value; 899 int i; 900 /* Defined in Table 35-10 from SDM (Sept 2015) */ 901 static int airmont_freq_table[] = { 902 83300, 100000, 133300, 116700, 80000, 903 93300, 90000, 88900, 87500}; 904 905 rdmsrl(MSR_FSB_FREQ, value); 906 i = value & 0xF; 907 WARN_ON(i > 8); 908 909 return airmont_freq_table[i]; 910 } 911 912 static void atom_get_vid(struct cpudata *cpudata) 913 { 914 u64 value; 915 916 rdmsrl(ATOM_VIDS, value); 917 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 918 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 919 cpudata->vid.ratio = div_fp( 920 cpudata->vid.max - cpudata->vid.min, 921 int_tofp(cpudata->pstate.max_pstate - 922 cpudata->pstate.min_pstate)); 923 924 rdmsrl(ATOM_TURBO_VIDS, value); 925 cpudata->vid.turbo = value & 0x7f; 926 } 927 928 static int core_get_min_pstate(void) 929 { 930 u64 value; 931 932 rdmsrl(MSR_PLATFORM_INFO, value); 933 return (value >> 40) & 0xFF; 934 } 935 936 static int core_get_max_pstate_physical(void) 937 { 938 u64 value; 939 940 rdmsrl(MSR_PLATFORM_INFO, value); 941 return (value >> 8) & 0xFF; 942 } 943 944 static int core_get_max_pstate(void) 945 { 946 u64 tar; 947 u64 plat_info; 948 int max_pstate; 949 int err; 950 951 rdmsrl(MSR_PLATFORM_INFO, plat_info); 952 max_pstate = (plat_info >> 8) & 0xFF; 953 954 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 955 if (!err) { 956 /* Do some sanity checking for safety */ 957 if (plat_info & 0x600000000) { 958 u64 tdp_ctrl; 959 u64 tdp_ratio; 960 int tdp_msr; 961 962 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 963 if (err) 964 goto skip_tar; 965 966 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; 967 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 968 if (err) 969 goto skip_tar; 970 971 /* For level 1 and 2, bits[23:16] contain the ratio */ 972 if (tdp_ctrl) 973 tdp_ratio >>= 16; 974 975 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 976 if (tdp_ratio - 1 == tar) { 977 max_pstate = tar; 978 pr_debug("max_pstate=TAC %x\n", max_pstate); 979 } else { 980 goto skip_tar; 981 } 982 } 983 } 984 985 skip_tar: 986 return max_pstate; 987 } 988 989 static int core_get_turbo_pstate(void) 990 { 991 u64 value; 992 int nont, ret; 993 994 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 995 nont = core_get_max_pstate(); 996 ret = (value) & 255; 997 if (ret <= nont) 998 ret = nont; 999 return ret; 1000 } 1001 1002 static inline int core_get_scaling(void) 1003 { 1004 return 100000; 1005 } 1006 1007 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1008 { 1009 u64 val; 1010 1011 val = (u64)pstate << 8; 1012 if (limits->no_turbo && !limits->turbo_disabled) 1013 val |= (u64)1 << 32; 1014 1015 return val; 1016 } 1017 1018 static int knl_get_turbo_pstate(void) 1019 { 1020 u64 value; 1021 int nont, ret; 1022 1023 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 1024 nont = core_get_max_pstate(); 1025 ret = (((value) >> 8) & 0xFF); 1026 if (ret <= nont) 1027 ret = nont; 1028 return ret; 1029 } 1030 1031 static struct cpu_defaults core_params = { 1032 .pid_policy = { 1033 .sample_rate_ms = 10, 1034 .deadband = 0, 1035 .setpoint = 97, 1036 .p_gain_pct = 20, 1037 .d_gain_pct = 0, 1038 .i_gain_pct = 0, 1039 }, 1040 .funcs = { 1041 .get_max = core_get_max_pstate, 1042 .get_max_physical = core_get_max_pstate_physical, 1043 .get_min = core_get_min_pstate, 1044 .get_turbo = core_get_turbo_pstate, 1045 .get_scaling = core_get_scaling, 1046 .get_val = core_get_val, 1047 .get_target_pstate = get_target_pstate_use_performance, 1048 }, 1049 }; 1050 1051 static struct cpu_defaults silvermont_params = { 1052 .pid_policy = { 1053 .sample_rate_ms = 10, 1054 .deadband = 0, 1055 .setpoint = 60, 1056 .p_gain_pct = 14, 1057 .d_gain_pct = 0, 1058 .i_gain_pct = 4, 1059 }, 1060 .funcs = { 1061 .get_max = atom_get_max_pstate, 1062 .get_max_physical = atom_get_max_pstate, 1063 .get_min = atom_get_min_pstate, 1064 .get_turbo = atom_get_turbo_pstate, 1065 .get_val = atom_get_val, 1066 .get_scaling = silvermont_get_scaling, 1067 .get_vid = atom_get_vid, 1068 .get_target_pstate = get_target_pstate_use_cpu_load, 1069 }, 1070 }; 1071 1072 static struct cpu_defaults airmont_params = { 1073 .pid_policy = { 1074 .sample_rate_ms = 10, 1075 .deadband = 0, 1076 .setpoint = 60, 1077 .p_gain_pct = 14, 1078 .d_gain_pct = 0, 1079 .i_gain_pct = 4, 1080 }, 1081 .funcs = { 1082 .get_max = atom_get_max_pstate, 1083 .get_max_physical = atom_get_max_pstate, 1084 .get_min = atom_get_min_pstate, 1085 .get_turbo = atom_get_turbo_pstate, 1086 .get_val = atom_get_val, 1087 .get_scaling = airmont_get_scaling, 1088 .get_vid = atom_get_vid, 1089 .get_target_pstate = get_target_pstate_use_cpu_load, 1090 }, 1091 }; 1092 1093 static struct cpu_defaults knl_params = { 1094 .pid_policy = { 1095 .sample_rate_ms = 10, 1096 .deadband = 0, 1097 .setpoint = 97, 1098 .p_gain_pct = 20, 1099 .d_gain_pct = 0, 1100 .i_gain_pct = 0, 1101 }, 1102 .funcs = { 1103 .get_max = core_get_max_pstate, 1104 .get_max_physical = core_get_max_pstate_physical, 1105 .get_min = core_get_min_pstate, 1106 .get_turbo = knl_get_turbo_pstate, 1107 .get_scaling = core_get_scaling, 1108 .get_val = core_get_val, 1109 .get_target_pstate = get_target_pstate_use_performance, 1110 }, 1111 }; 1112 1113 static struct cpu_defaults bxt_params = { 1114 .pid_policy = { 1115 .sample_rate_ms = 10, 1116 .deadband = 0, 1117 .setpoint = 60, 1118 .p_gain_pct = 14, 1119 .d_gain_pct = 0, 1120 .i_gain_pct = 4, 1121 }, 1122 .funcs = { 1123 .get_max = core_get_max_pstate, 1124 .get_max_physical = core_get_max_pstate_physical, 1125 .get_min = core_get_min_pstate, 1126 .get_turbo = core_get_turbo_pstate, 1127 .get_scaling = core_get_scaling, 1128 .get_val = core_get_val, 1129 .get_target_pstate = get_target_pstate_use_cpu_load, 1130 }, 1131 }; 1132 1133 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 1134 { 1135 int max_perf = cpu->pstate.turbo_pstate; 1136 int max_perf_adj; 1137 int min_perf; 1138 1139 if (limits->no_turbo || limits->turbo_disabled) 1140 max_perf = cpu->pstate.max_pstate; 1141 1142 /* 1143 * performance can be limited by user through sysfs, by cpufreq 1144 * policy, or by cpu specific default values determined through 1145 * experimentation. 1146 */ 1147 max_perf_adj = fp_toint(max_perf * limits->max_perf); 1148 *max = clamp_t(int, max_perf_adj, 1149 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 1150 1151 min_perf = fp_toint(max_perf * limits->min_perf); 1152 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 1153 } 1154 1155 static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate) 1156 { 1157 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1158 cpu->pstate.current_pstate = pstate; 1159 } 1160 1161 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1162 { 1163 int pstate = cpu->pstate.min_pstate; 1164 1165 intel_pstate_record_pstate(cpu, pstate); 1166 /* 1167 * Generally, there is no guarantee that this code will always run on 1168 * the CPU being updated, so force the register update to run on the 1169 * right CPU. 1170 */ 1171 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1172 pstate_funcs.get_val(cpu, pstate)); 1173 } 1174 1175 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1176 { 1177 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1178 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1179 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1180 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1181 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1182 1183 if (pstate_funcs.get_vid) 1184 pstate_funcs.get_vid(cpu); 1185 1186 intel_pstate_set_min_pstate(cpu); 1187 } 1188 1189 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1190 { 1191 struct sample *sample = &cpu->sample; 1192 1193 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1194 } 1195 1196 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1197 { 1198 u64 aperf, mperf; 1199 unsigned long flags; 1200 u64 tsc; 1201 1202 local_irq_save(flags); 1203 rdmsrl(MSR_IA32_APERF, aperf); 1204 rdmsrl(MSR_IA32_MPERF, mperf); 1205 tsc = rdtsc(); 1206 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1207 local_irq_restore(flags); 1208 return false; 1209 } 1210 local_irq_restore(flags); 1211 1212 cpu->last_sample_time = cpu->sample.time; 1213 cpu->sample.time = time; 1214 cpu->sample.aperf = aperf; 1215 cpu->sample.mperf = mperf; 1216 cpu->sample.tsc = tsc; 1217 cpu->sample.aperf -= cpu->prev_aperf; 1218 cpu->sample.mperf -= cpu->prev_mperf; 1219 cpu->sample.tsc -= cpu->prev_tsc; 1220 1221 cpu->prev_aperf = aperf; 1222 cpu->prev_mperf = mperf; 1223 cpu->prev_tsc = tsc; 1224 /* 1225 * First time this function is invoked in a given cycle, all of the 1226 * previous sample data fields are equal to zero or stale and they must 1227 * be populated with meaningful numbers for things to work, so assume 1228 * that sample.time will always be reset before setting the utilization 1229 * update hook and make the caller skip the sample then. 1230 */ 1231 return !!cpu->last_sample_time; 1232 } 1233 1234 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1235 { 1236 return mul_ext_fp(cpu->sample.core_avg_perf, 1237 cpu->pstate.max_pstate_physical * cpu->pstate.scaling); 1238 } 1239 1240 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1241 { 1242 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1243 cpu->sample.core_avg_perf); 1244 } 1245 1246 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1247 { 1248 struct sample *sample = &cpu->sample; 1249 u64 cummulative_iowait, delta_iowait_us; 1250 u64 delta_iowait_mperf; 1251 u64 mperf, now; 1252 int32_t cpu_load; 1253 1254 cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now); 1255 1256 /* 1257 * Convert iowait time into number of IO cycles spent at max_freq. 1258 * IO is considered as busy only for the cpu_load algorithm. For 1259 * performance this is not needed since we always try to reach the 1260 * maximum P-State, so we are already boosting the IOs. 1261 */ 1262 delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait; 1263 delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling * 1264 cpu->pstate.max_pstate, MSEC_PER_SEC); 1265 1266 mperf = cpu->sample.mperf + delta_iowait_mperf; 1267 cpu->prev_cummulative_iowait = cummulative_iowait; 1268 1269 /* 1270 * The load can be estimated as the ratio of the mperf counter 1271 * running at a constant frequency during active periods 1272 * (C0) and the time stamp counter running at the same frequency 1273 * also during C-states. 1274 */ 1275 cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc); 1276 cpu->sample.busy_scaled = cpu_load; 1277 1278 return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load); 1279 } 1280 1281 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1282 { 1283 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1284 u64 duration_ns; 1285 1286 /* 1287 * perf_scaled is the average performance during the last sampling 1288 * period scaled by the ratio of the maximum P-state to the P-state 1289 * requested last time (in percent). That measures the system's 1290 * response to the previous P-state selection. 1291 */ 1292 max_pstate = cpu->pstate.max_pstate_physical; 1293 current_pstate = cpu->pstate.current_pstate; 1294 perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, 1295 div_fp(100 * max_pstate, current_pstate)); 1296 1297 /* 1298 * Since our utilization update callback will not run unless we are 1299 * in C0, check if the actual elapsed time is significantly greater (3x) 1300 * than our sample interval. If it is, then we were idle for a long 1301 * enough period of time to adjust our performance metric. 1302 */ 1303 duration_ns = cpu->sample.time - cpu->last_sample_time; 1304 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1305 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1306 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1307 } else { 1308 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1309 if (sample_ratio < int_tofp(1)) 1310 perf_scaled = 0; 1311 } 1312 1313 cpu->sample.busy_scaled = perf_scaled; 1314 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); 1315 } 1316 1317 static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1318 { 1319 int max_perf, min_perf; 1320 1321 update_turbo_state(); 1322 1323 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1324 pstate = clamp_t(int, pstate, min_perf, max_perf); 1325 if (pstate == cpu->pstate.current_pstate) 1326 return; 1327 1328 intel_pstate_record_pstate(cpu, pstate); 1329 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1330 } 1331 1332 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1333 { 1334 int from, target_pstate; 1335 struct sample *sample; 1336 1337 from = cpu->pstate.current_pstate; 1338 1339 target_pstate = pstate_funcs.get_target_pstate(cpu); 1340 1341 intel_pstate_update_pstate(cpu, target_pstate); 1342 1343 sample = &cpu->sample; 1344 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1345 fp_toint(sample->busy_scaled), 1346 from, 1347 cpu->pstate.current_pstate, 1348 sample->mperf, 1349 sample->aperf, 1350 sample->tsc, 1351 get_avg_frequency(cpu)); 1352 } 1353 1354 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1355 unsigned long util, unsigned long max) 1356 { 1357 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1358 u64 delta_ns = time - cpu->sample.time; 1359 1360 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1361 bool sample_taken = intel_pstate_sample(cpu, time); 1362 1363 if (sample_taken) { 1364 intel_pstate_calc_avg_perf(cpu); 1365 if (!hwp_active) 1366 intel_pstate_adjust_busy_pstate(cpu); 1367 } 1368 } 1369 } 1370 1371 #define ICPU(model, policy) \ 1372 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1373 (unsigned long)&policy } 1374 1375 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1376 ICPU(INTEL_FAM6_SANDYBRIDGE, core_params), 1377 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params), 1378 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params), 1379 ICPU(INTEL_FAM6_IVYBRIDGE, core_params), 1380 ICPU(INTEL_FAM6_HASWELL_CORE, core_params), 1381 ICPU(INTEL_FAM6_BROADWELL_CORE, core_params), 1382 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params), 1383 ICPU(INTEL_FAM6_HASWELL_X, core_params), 1384 ICPU(INTEL_FAM6_HASWELL_ULT, core_params), 1385 ICPU(INTEL_FAM6_HASWELL_GT3E, core_params), 1386 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params), 1387 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params), 1388 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params), 1389 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1390 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params), 1391 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1392 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params), 1393 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params), 1394 {} 1395 }; 1396 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1397 1398 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 1399 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1400 {} 1401 }; 1402 1403 static int intel_pstate_init_cpu(unsigned int cpunum) 1404 { 1405 struct cpudata *cpu; 1406 1407 if (!all_cpu_data[cpunum]) 1408 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 1409 GFP_KERNEL); 1410 if (!all_cpu_data[cpunum]) 1411 return -ENOMEM; 1412 1413 cpu = all_cpu_data[cpunum]; 1414 1415 cpu->cpu = cpunum; 1416 1417 if (hwp_active) { 1418 intel_pstate_hwp_enable(cpu); 1419 pid_params.sample_rate_ms = 50; 1420 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1421 } 1422 1423 intel_pstate_get_cpu_pstates(cpu); 1424 1425 intel_pstate_busy_pid_reset(cpu); 1426 1427 pr_debug("controlling: cpu %d\n", cpunum); 1428 1429 return 0; 1430 } 1431 1432 static unsigned int intel_pstate_get(unsigned int cpu_num) 1433 { 1434 struct cpudata *cpu = all_cpu_data[cpu_num]; 1435 1436 return cpu ? get_avg_frequency(cpu) : 0; 1437 } 1438 1439 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1440 { 1441 struct cpudata *cpu = all_cpu_data[cpu_num]; 1442 1443 /* Prevent intel_pstate_update_util() from using stale data. */ 1444 cpu->sample.time = 0; 1445 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1446 intel_pstate_update_util); 1447 cpu->update_util_set = true; 1448 } 1449 1450 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1451 { 1452 struct cpudata *cpu_data = all_cpu_data[cpu]; 1453 1454 if (!cpu_data->update_util_set) 1455 return; 1456 1457 cpufreq_remove_update_util_hook(cpu); 1458 cpu_data->update_util_set = false; 1459 synchronize_sched(); 1460 } 1461 1462 static void intel_pstate_set_performance_limits(struct perf_limits *limits) 1463 { 1464 limits->no_turbo = 0; 1465 limits->turbo_disabled = 0; 1466 limits->max_perf_pct = 100; 1467 limits->max_perf = int_tofp(1); 1468 limits->min_perf_pct = 100; 1469 limits->min_perf = int_tofp(1); 1470 limits->max_policy_pct = 100; 1471 limits->max_sysfs_pct = 100; 1472 limits->min_policy_pct = 0; 1473 limits->min_sysfs_pct = 0; 1474 } 1475 1476 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1477 { 1478 struct cpudata *cpu; 1479 1480 if (!policy->cpuinfo.max_freq) 1481 return -ENODEV; 1482 1483 intel_pstate_clear_update_util_hook(policy->cpu); 1484 1485 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 1486 policy->cpuinfo.max_freq, policy->max); 1487 1488 cpu = all_cpu_data[0]; 1489 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 1490 policy->max < policy->cpuinfo.max_freq && 1491 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { 1492 pr_debug("policy->max > max non turbo frequency\n"); 1493 policy->max = policy->cpuinfo.max_freq; 1494 } 1495 1496 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1497 limits = &performance_limits; 1498 if (policy->max >= policy->cpuinfo.max_freq) { 1499 pr_debug("set performance\n"); 1500 intel_pstate_set_performance_limits(limits); 1501 goto out; 1502 } 1503 } else { 1504 pr_debug("set powersave\n"); 1505 limits = &powersave_limits; 1506 } 1507 1508 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1509 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1510 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1511 policy->cpuinfo.max_freq); 1512 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1513 1514 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1515 limits->min_perf_pct = max(limits->min_policy_pct, 1516 limits->min_sysfs_pct); 1517 limits->min_perf_pct = min(limits->max_policy_pct, 1518 limits->min_perf_pct); 1519 limits->max_perf_pct = min(limits->max_policy_pct, 1520 limits->max_sysfs_pct); 1521 limits->max_perf_pct = max(limits->min_policy_pct, 1522 limits->max_perf_pct); 1523 1524 /* Make sure min_perf_pct <= max_perf_pct */ 1525 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1526 1527 limits->min_perf = div_fp(limits->min_perf_pct, 100); 1528 limits->max_perf = div_fp(limits->max_perf_pct, 100); 1529 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1530 1531 out: 1532 intel_pstate_set_update_util_hook(policy->cpu); 1533 1534 intel_pstate_hwp_set_policy(policy); 1535 1536 return 0; 1537 } 1538 1539 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1540 { 1541 cpufreq_verify_within_cpu_limits(policy); 1542 1543 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1544 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1545 return -EINVAL; 1546 1547 return 0; 1548 } 1549 1550 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1551 { 1552 int cpu_num = policy->cpu; 1553 struct cpudata *cpu = all_cpu_data[cpu_num]; 1554 1555 pr_debug("CPU %d exiting\n", cpu_num); 1556 1557 intel_pstate_clear_update_util_hook(cpu_num); 1558 1559 if (hwp_active) 1560 return; 1561 1562 intel_pstate_set_min_pstate(cpu); 1563 } 1564 1565 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1566 { 1567 struct cpudata *cpu; 1568 int rc; 1569 1570 rc = intel_pstate_init_cpu(policy->cpu); 1571 if (rc) 1572 return rc; 1573 1574 cpu = all_cpu_data[policy->cpu]; 1575 1576 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1577 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1578 else 1579 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1580 1581 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1582 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1583 1584 /* cpuinfo and default policy values */ 1585 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1586 update_turbo_state(); 1587 policy->cpuinfo.max_freq = limits->turbo_disabled ? 1588 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1589 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 1590 1591 intel_pstate_init_acpi_perf_limits(policy); 1592 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1593 cpumask_set_cpu(policy->cpu, policy->cpus); 1594 1595 return 0; 1596 } 1597 1598 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 1599 { 1600 intel_pstate_exit_perf_limits(policy); 1601 1602 return 0; 1603 } 1604 1605 static struct cpufreq_driver intel_pstate_driver = { 1606 .flags = CPUFREQ_CONST_LOOPS, 1607 .verify = intel_pstate_verify_policy, 1608 .setpolicy = intel_pstate_set_policy, 1609 .resume = intel_pstate_hwp_set_policy, 1610 .get = intel_pstate_get, 1611 .init = intel_pstate_cpu_init, 1612 .exit = intel_pstate_cpu_exit, 1613 .stop_cpu = intel_pstate_stop_cpu, 1614 .name = "intel_pstate", 1615 }; 1616 1617 static int __initdata no_load; 1618 static int __initdata no_hwp; 1619 static int __initdata hwp_only; 1620 static unsigned int force_load; 1621 1622 static int intel_pstate_msrs_not_valid(void) 1623 { 1624 if (!pstate_funcs.get_max() || 1625 !pstate_funcs.get_min() || 1626 !pstate_funcs.get_turbo()) 1627 return -ENODEV; 1628 1629 return 0; 1630 } 1631 1632 static void copy_pid_params(struct pstate_adjust_policy *policy) 1633 { 1634 pid_params.sample_rate_ms = policy->sample_rate_ms; 1635 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 1636 pid_params.p_gain_pct = policy->p_gain_pct; 1637 pid_params.i_gain_pct = policy->i_gain_pct; 1638 pid_params.d_gain_pct = policy->d_gain_pct; 1639 pid_params.deadband = policy->deadband; 1640 pid_params.setpoint = policy->setpoint; 1641 } 1642 1643 static void copy_cpu_funcs(struct pstate_funcs *funcs) 1644 { 1645 pstate_funcs.get_max = funcs->get_max; 1646 pstate_funcs.get_max_physical = funcs->get_max_physical; 1647 pstate_funcs.get_min = funcs->get_min; 1648 pstate_funcs.get_turbo = funcs->get_turbo; 1649 pstate_funcs.get_scaling = funcs->get_scaling; 1650 pstate_funcs.get_val = funcs->get_val; 1651 pstate_funcs.get_vid = funcs->get_vid; 1652 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 1653 1654 } 1655 1656 #ifdef CONFIG_ACPI 1657 1658 static bool intel_pstate_no_acpi_pss(void) 1659 { 1660 int i; 1661 1662 for_each_possible_cpu(i) { 1663 acpi_status status; 1664 union acpi_object *pss; 1665 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1666 struct acpi_processor *pr = per_cpu(processors, i); 1667 1668 if (!pr) 1669 continue; 1670 1671 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1672 if (ACPI_FAILURE(status)) 1673 continue; 1674 1675 pss = buffer.pointer; 1676 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1677 kfree(pss); 1678 return false; 1679 } 1680 1681 kfree(pss); 1682 } 1683 1684 return true; 1685 } 1686 1687 static bool intel_pstate_has_acpi_ppc(void) 1688 { 1689 int i; 1690 1691 for_each_possible_cpu(i) { 1692 struct acpi_processor *pr = per_cpu(processors, i); 1693 1694 if (!pr) 1695 continue; 1696 if (acpi_has_method(pr->handle, "_PPC")) 1697 return true; 1698 } 1699 return false; 1700 } 1701 1702 enum { 1703 PSS, 1704 PPC, 1705 }; 1706 1707 struct hw_vendor_info { 1708 u16 valid; 1709 char oem_id[ACPI_OEM_ID_SIZE]; 1710 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1711 int oem_pwr_table; 1712 }; 1713 1714 /* Hardware vendor-specific info that has its own power management modes */ 1715 static struct hw_vendor_info vendor_info[] = { 1716 {1, "HP ", "ProLiant", PSS}, 1717 {1, "ORACLE", "X4-2 ", PPC}, 1718 {1, "ORACLE", "X4-2L ", PPC}, 1719 {1, "ORACLE", "X4-2B ", PPC}, 1720 {1, "ORACLE", "X3-2 ", PPC}, 1721 {1, "ORACLE", "X3-2L ", PPC}, 1722 {1, "ORACLE", "X3-2B ", PPC}, 1723 {1, "ORACLE", "X4470M2 ", PPC}, 1724 {1, "ORACLE", "X4270M3 ", PPC}, 1725 {1, "ORACLE", "X4270M2 ", PPC}, 1726 {1, "ORACLE", "X4170M2 ", PPC}, 1727 {1, "ORACLE", "X4170 M3", PPC}, 1728 {1, "ORACLE", "X4275 M3", PPC}, 1729 {1, "ORACLE", "X6-2 ", PPC}, 1730 {1, "ORACLE", "Sudbury ", PPC}, 1731 {0, "", ""}, 1732 }; 1733 1734 static bool intel_pstate_platform_pwr_mgmt_exists(void) 1735 { 1736 struct acpi_table_header hdr; 1737 struct hw_vendor_info *v_info; 1738 const struct x86_cpu_id *id; 1739 u64 misc_pwr; 1740 1741 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1742 if (id) { 1743 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1744 if ( misc_pwr & (1 << 8)) 1745 return true; 1746 } 1747 1748 if (acpi_disabled || 1749 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1750 return false; 1751 1752 for (v_info = vendor_info; v_info->valid; v_info++) { 1753 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1754 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1755 ACPI_OEM_TABLE_ID_SIZE)) 1756 switch (v_info->oem_pwr_table) { 1757 case PSS: 1758 return intel_pstate_no_acpi_pss(); 1759 case PPC: 1760 return intel_pstate_has_acpi_ppc() && 1761 (!force_load); 1762 } 1763 } 1764 1765 return false; 1766 } 1767 #else /* CONFIG_ACPI not enabled */ 1768 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1769 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1770 #endif /* CONFIG_ACPI */ 1771 1772 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 1773 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 1774 {} 1775 }; 1776 1777 static int __init intel_pstate_init(void) 1778 { 1779 int cpu, rc = 0; 1780 const struct x86_cpu_id *id; 1781 struct cpu_defaults *cpu_def; 1782 1783 if (no_load) 1784 return -ENODEV; 1785 1786 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 1787 copy_cpu_funcs(&core_params.funcs); 1788 hwp_active++; 1789 goto hwp_cpu_matched; 1790 } 1791 1792 id = x86_match_cpu(intel_pstate_cpu_ids); 1793 if (!id) 1794 return -ENODEV; 1795 1796 cpu_def = (struct cpu_defaults *)id->driver_data; 1797 1798 copy_pid_params(&cpu_def->pid_policy); 1799 copy_cpu_funcs(&cpu_def->funcs); 1800 1801 if (intel_pstate_msrs_not_valid()) 1802 return -ENODEV; 1803 1804 hwp_cpu_matched: 1805 /* 1806 * The Intel pstate driver will be ignored if the platform 1807 * firmware has its own power management modes. 1808 */ 1809 if (intel_pstate_platform_pwr_mgmt_exists()) 1810 return -ENODEV; 1811 1812 pr_info("Intel P-state driver initializing\n"); 1813 1814 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1815 if (!all_cpu_data) 1816 return -ENOMEM; 1817 1818 if (!hwp_active && hwp_only) 1819 goto out; 1820 1821 rc = cpufreq_register_driver(&intel_pstate_driver); 1822 if (rc) 1823 goto out; 1824 1825 intel_pstate_debug_expose_params(); 1826 intel_pstate_sysfs_expose_params(); 1827 1828 if (hwp_active) 1829 pr_info("HWP enabled\n"); 1830 1831 return rc; 1832 out: 1833 get_online_cpus(); 1834 for_each_online_cpu(cpu) { 1835 if (all_cpu_data[cpu]) { 1836 intel_pstate_clear_update_util_hook(cpu); 1837 kfree(all_cpu_data[cpu]); 1838 } 1839 } 1840 1841 put_online_cpus(); 1842 vfree(all_cpu_data); 1843 return -ENODEV; 1844 } 1845 device_initcall(intel_pstate_init); 1846 1847 static int __init intel_pstate_setup(char *str) 1848 { 1849 if (!str) 1850 return -EINVAL; 1851 1852 if (!strcmp(str, "disable")) 1853 no_load = 1; 1854 if (!strcmp(str, "no_hwp")) { 1855 pr_info("HWP disabled\n"); 1856 no_hwp = 1; 1857 } 1858 if (!strcmp(str, "force")) 1859 force_load = 1; 1860 if (!strcmp(str, "hwp_only")) 1861 hwp_only = 1; 1862 1863 #ifdef CONFIG_ACPI 1864 if (!strcmp(str, "support_acpi_ppc")) 1865 acpi_ppc = true; 1866 #endif 1867 1868 return 0; 1869 } 1870 early_param("intel_pstate", intel_pstate_setup); 1871 1872 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1873 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1874 MODULE_LICENSE("GPL"); 1875