1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 #include <asm/intel-family.h> 39 40 #define ATOM_RATIOS 0x66a 41 #define ATOM_VIDS 0x66b 42 #define ATOM_TURBO_RATIOS 0x66c 43 #define ATOM_TURBO_VIDS 0x66d 44 45 #ifdef CONFIG_ACPI 46 #include <acpi/processor.h> 47 #endif 48 49 #define FRAC_BITS 8 50 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 51 #define fp_toint(X) ((X) >> FRAC_BITS) 52 53 #define EXT_BITS 6 54 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 55 56 static inline int32_t mul_fp(int32_t x, int32_t y) 57 { 58 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 59 } 60 61 static inline int32_t div_fp(s64 x, s64 y) 62 { 63 return div64_s64((int64_t)x << FRAC_BITS, y); 64 } 65 66 static inline int ceiling_fp(int32_t x) 67 { 68 int mask, ret; 69 70 ret = fp_toint(x); 71 mask = (1 << FRAC_BITS) - 1; 72 if (x & mask) 73 ret += 1; 74 return ret; 75 } 76 77 static inline u64 mul_ext_fp(u64 x, u64 y) 78 { 79 return (x * y) >> EXT_FRAC_BITS; 80 } 81 82 static inline u64 div_ext_fp(u64 x, u64 y) 83 { 84 return div64_u64(x << EXT_FRAC_BITS, y); 85 } 86 87 /** 88 * struct sample - Store performance sample 89 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 90 * performance during last sample period 91 * @busy_scaled: Scaled busy value which is used to calculate next 92 * P state. This can be different than core_avg_perf 93 * to account for cpu idle period 94 * @aperf: Difference of actual performance frequency clock count 95 * read from APERF MSR between last and current sample 96 * @mperf: Difference of maximum performance frequency clock count 97 * read from MPERF MSR between last and current sample 98 * @tsc: Difference of time stamp counter between last and 99 * current sample 100 * @time: Current time from scheduler 101 * 102 * This structure is used in the cpudata structure to store performance sample 103 * data for choosing next P State. 104 */ 105 struct sample { 106 int32_t core_avg_perf; 107 int32_t busy_scaled; 108 u64 aperf; 109 u64 mperf; 110 u64 tsc; 111 u64 time; 112 }; 113 114 /** 115 * struct pstate_data - Store P state data 116 * @current_pstate: Current requested P state 117 * @min_pstate: Min P state possible for this platform 118 * @max_pstate: Max P state possible for this platform 119 * @max_pstate_physical:This is physical Max P state for a processor 120 * This can be higher than the max_pstate which can 121 * be limited by platform thermal design power limits 122 * @scaling: Scaling factor to convert frequency to cpufreq 123 * frequency units 124 * @turbo_pstate: Max Turbo P state possible for this platform 125 * 126 * Stores the per cpu model P state limits and current P state. 127 */ 128 struct pstate_data { 129 int current_pstate; 130 int min_pstate; 131 int max_pstate; 132 int max_pstate_physical; 133 int scaling; 134 int turbo_pstate; 135 }; 136 137 /** 138 * struct vid_data - Stores voltage information data 139 * @min: VID data for this platform corresponding to 140 * the lowest P state 141 * @max: VID data corresponding to the highest P State. 142 * @turbo: VID data for turbo P state 143 * @ratio: Ratio of (vid max - vid min) / 144 * (max P state - Min P State) 145 * 146 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 147 * This data is used in Atom platforms, where in addition to target P state, 148 * the voltage data needs to be specified to select next P State. 149 */ 150 struct vid_data { 151 int min; 152 int max; 153 int turbo; 154 int32_t ratio; 155 }; 156 157 /** 158 * struct _pid - Stores PID data 159 * @setpoint: Target set point for busyness or performance 160 * @integral: Storage for accumulated error values 161 * @p_gain: PID proportional gain 162 * @i_gain: PID integral gain 163 * @d_gain: PID derivative gain 164 * @deadband: PID deadband 165 * @last_err: Last error storage for integral part of PID calculation 166 * 167 * Stores PID coefficients and last error for PID controller. 168 */ 169 struct _pid { 170 int setpoint; 171 int32_t integral; 172 int32_t p_gain; 173 int32_t i_gain; 174 int32_t d_gain; 175 int deadband; 176 int32_t last_err; 177 }; 178 179 /** 180 * struct cpudata - Per CPU instance data storage 181 * @cpu: CPU number for this instance data 182 * @update_util: CPUFreq utility callback information 183 * @update_util_set: CPUFreq utility callback is set 184 * @iowait_boost: iowait-related boost fraction 185 * @last_update: Time of the last update. 186 * @pstate: Stores P state limits for this CPU 187 * @vid: Stores VID limits for this CPU 188 * @pid: Stores PID parameters for this CPU 189 * @last_sample_time: Last Sample time 190 * @prev_aperf: Last APERF value read from APERF MSR 191 * @prev_mperf: Last MPERF value read from MPERF MSR 192 * @prev_tsc: Last timestamp counter (TSC) value 193 * @prev_cummulative_iowait: IO Wait time difference from last and 194 * current sample 195 * @sample: Storage for storing last Sample data 196 * @acpi_perf_data: Stores ACPI perf information read from _PSS 197 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 198 * 199 * This structure stores per CPU instance data for all CPUs. 200 */ 201 struct cpudata { 202 int cpu; 203 204 struct update_util_data update_util; 205 bool update_util_set; 206 207 struct pstate_data pstate; 208 struct vid_data vid; 209 struct _pid pid; 210 211 u64 last_update; 212 u64 last_sample_time; 213 u64 prev_aperf; 214 u64 prev_mperf; 215 u64 prev_tsc; 216 u64 prev_cummulative_iowait; 217 struct sample sample; 218 #ifdef CONFIG_ACPI 219 struct acpi_processor_performance acpi_perf_data; 220 bool valid_pss_table; 221 #endif 222 unsigned int iowait_boost; 223 }; 224 225 static struct cpudata **all_cpu_data; 226 227 /** 228 * struct pstate_adjust_policy - Stores static PID configuration data 229 * @sample_rate_ms: PID calculation sample rate in ms 230 * @sample_rate_ns: Sample rate calculation in ns 231 * @deadband: PID deadband 232 * @setpoint: PID Setpoint 233 * @p_gain_pct: PID proportional gain 234 * @i_gain_pct: PID integral gain 235 * @d_gain_pct: PID derivative gain 236 * @boost_iowait: Whether or not to use iowait boosting. 237 * 238 * Stores per CPU model static PID configuration data. 239 */ 240 struct pstate_adjust_policy { 241 int sample_rate_ms; 242 s64 sample_rate_ns; 243 int deadband; 244 int setpoint; 245 int p_gain_pct; 246 int d_gain_pct; 247 int i_gain_pct; 248 bool boost_iowait; 249 }; 250 251 /** 252 * struct pstate_funcs - Per CPU model specific callbacks 253 * @get_max: Callback to get maximum non turbo effective P state 254 * @get_max_physical: Callback to get maximum non turbo physical P state 255 * @get_min: Callback to get minimum P state 256 * @get_turbo: Callback to get turbo P state 257 * @get_scaling: Callback to get frequency scaling factor 258 * @get_val: Callback to convert P state to actual MSR write value 259 * @get_vid: Callback to get VID data for Atom platforms 260 * @get_target_pstate: Callback to a function to calculate next P state to use 261 * 262 * Core and Atom CPU models have different way to get P State limits. This 263 * structure is used to store those callbacks. 264 */ 265 struct pstate_funcs { 266 int (*get_max)(void); 267 int (*get_max_physical)(void); 268 int (*get_min)(void); 269 int (*get_turbo)(void); 270 int (*get_scaling)(void); 271 u64 (*get_val)(struct cpudata*, int pstate); 272 void (*get_vid)(struct cpudata *); 273 int32_t (*get_target_pstate)(struct cpudata *); 274 }; 275 276 /** 277 * struct cpu_defaults- Per CPU model default config data 278 * @pid_policy: PID config data 279 * @funcs: Callback function data 280 */ 281 struct cpu_defaults { 282 struct pstate_adjust_policy pid_policy; 283 struct pstate_funcs funcs; 284 }; 285 286 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 287 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 288 289 static struct pstate_adjust_policy pid_params __read_mostly; 290 static struct pstate_funcs pstate_funcs __read_mostly; 291 static int hwp_active __read_mostly; 292 293 #ifdef CONFIG_ACPI 294 static bool acpi_ppc; 295 #endif 296 297 /** 298 * struct perf_limits - Store user and policy limits 299 * @no_turbo: User requested turbo state from intel_pstate sysfs 300 * @turbo_disabled: Platform turbo status either from msr 301 * MSR_IA32_MISC_ENABLE or when maximum available pstate 302 * matches the maximum turbo pstate 303 * @max_perf_pct: Effective maximum performance limit in percentage, this 304 * is minimum of either limits enforced by cpufreq policy 305 * or limits from user set limits via intel_pstate sysfs 306 * @min_perf_pct: Effective minimum performance limit in percentage, this 307 * is maximum of either limits enforced by cpufreq policy 308 * or limits from user set limits via intel_pstate sysfs 309 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct 310 * This value is used to limit max pstate 311 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct 312 * This value is used to limit min pstate 313 * @max_policy_pct: The maximum performance in percentage enforced by 314 * cpufreq setpolicy interface 315 * @max_sysfs_pct: The maximum performance in percentage enforced by 316 * intel pstate sysfs interface 317 * @min_policy_pct: The minimum performance in percentage enforced by 318 * cpufreq setpolicy interface 319 * @min_sysfs_pct: The minimum performance in percentage enforced by 320 * intel pstate sysfs interface 321 * 322 * Storage for user and policy defined limits. 323 */ 324 struct perf_limits { 325 int no_turbo; 326 int turbo_disabled; 327 int max_perf_pct; 328 int min_perf_pct; 329 int32_t max_perf; 330 int32_t min_perf; 331 int max_policy_pct; 332 int max_sysfs_pct; 333 int min_policy_pct; 334 int min_sysfs_pct; 335 }; 336 337 static struct perf_limits performance_limits = { 338 .no_turbo = 0, 339 .turbo_disabled = 0, 340 .max_perf_pct = 100, 341 .max_perf = int_tofp(1), 342 .min_perf_pct = 100, 343 .min_perf = int_tofp(1), 344 .max_policy_pct = 100, 345 .max_sysfs_pct = 100, 346 .min_policy_pct = 0, 347 .min_sysfs_pct = 0, 348 }; 349 350 static struct perf_limits powersave_limits = { 351 .no_turbo = 0, 352 .turbo_disabled = 0, 353 .max_perf_pct = 100, 354 .max_perf = int_tofp(1), 355 .min_perf_pct = 0, 356 .min_perf = 0, 357 .max_policy_pct = 100, 358 .max_sysfs_pct = 100, 359 .min_policy_pct = 0, 360 .min_sysfs_pct = 0, 361 }; 362 363 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 364 static struct perf_limits *limits = &performance_limits; 365 #else 366 static struct perf_limits *limits = &powersave_limits; 367 #endif 368 369 #ifdef CONFIG_ACPI 370 371 static bool intel_pstate_get_ppc_enable_status(void) 372 { 373 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 374 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 375 return true; 376 377 return acpi_ppc; 378 } 379 380 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 381 { 382 struct cpudata *cpu; 383 int ret; 384 int i; 385 386 if (hwp_active) 387 return; 388 389 if (!intel_pstate_get_ppc_enable_status()) 390 return; 391 392 cpu = all_cpu_data[policy->cpu]; 393 394 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 395 policy->cpu); 396 if (ret) 397 return; 398 399 /* 400 * Check if the control value in _PSS is for PERF_CTL MSR, which should 401 * guarantee that the states returned by it map to the states in our 402 * list directly. 403 */ 404 if (cpu->acpi_perf_data.control_register.space_id != 405 ACPI_ADR_SPACE_FIXED_HARDWARE) 406 goto err; 407 408 /* 409 * If there is only one entry _PSS, simply ignore _PSS and continue as 410 * usual without taking _PSS into account 411 */ 412 if (cpu->acpi_perf_data.state_count < 2) 413 goto err; 414 415 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 416 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 417 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 418 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 419 (u32) cpu->acpi_perf_data.states[i].core_frequency, 420 (u32) cpu->acpi_perf_data.states[i].power, 421 (u32) cpu->acpi_perf_data.states[i].control); 422 } 423 424 /* 425 * The _PSS table doesn't contain whole turbo frequency range. 426 * This just contains +1 MHZ above the max non turbo frequency, 427 * with control value corresponding to max turbo ratio. But 428 * when cpufreq set policy is called, it will call with this 429 * max frequency, which will cause a reduced performance as 430 * this driver uses real max turbo frequency as the max 431 * frequency. So correct this frequency in _PSS table to 432 * correct max turbo frequency based on the turbo state. 433 * Also need to convert to MHz as _PSS freq is in MHz. 434 */ 435 if (!limits->turbo_disabled) 436 cpu->acpi_perf_data.states[0].core_frequency = 437 policy->cpuinfo.max_freq / 1000; 438 cpu->valid_pss_table = true; 439 pr_debug("_PPC limits will be enforced\n"); 440 441 return; 442 443 err: 444 cpu->valid_pss_table = false; 445 acpi_processor_unregister_performance(policy->cpu); 446 } 447 448 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 449 { 450 struct cpudata *cpu; 451 452 cpu = all_cpu_data[policy->cpu]; 453 if (!cpu->valid_pss_table) 454 return; 455 456 acpi_processor_unregister_performance(policy->cpu); 457 } 458 459 #else 460 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 461 { 462 } 463 464 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 465 { 466 } 467 #endif 468 469 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 470 int deadband, int integral) { 471 pid->setpoint = int_tofp(setpoint); 472 pid->deadband = int_tofp(deadband); 473 pid->integral = int_tofp(integral); 474 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 475 } 476 477 static inline void pid_p_gain_set(struct _pid *pid, int percent) 478 { 479 pid->p_gain = div_fp(percent, 100); 480 } 481 482 static inline void pid_i_gain_set(struct _pid *pid, int percent) 483 { 484 pid->i_gain = div_fp(percent, 100); 485 } 486 487 static inline void pid_d_gain_set(struct _pid *pid, int percent) 488 { 489 pid->d_gain = div_fp(percent, 100); 490 } 491 492 static signed int pid_calc(struct _pid *pid, int32_t busy) 493 { 494 signed int result; 495 int32_t pterm, dterm, fp_error; 496 int32_t integral_limit; 497 498 fp_error = pid->setpoint - busy; 499 500 if (abs(fp_error) <= pid->deadband) 501 return 0; 502 503 pterm = mul_fp(pid->p_gain, fp_error); 504 505 pid->integral += fp_error; 506 507 /* 508 * We limit the integral here so that it will never 509 * get higher than 30. This prevents it from becoming 510 * too large an input over long periods of time and allows 511 * it to get factored out sooner. 512 * 513 * The value of 30 was chosen through experimentation. 514 */ 515 integral_limit = int_tofp(30); 516 if (pid->integral > integral_limit) 517 pid->integral = integral_limit; 518 if (pid->integral < -integral_limit) 519 pid->integral = -integral_limit; 520 521 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 522 pid->last_err = fp_error; 523 524 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 525 result = result + (1 << (FRAC_BITS-1)); 526 return (signed int)fp_toint(result); 527 } 528 529 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 530 { 531 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 532 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 533 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 534 535 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 536 } 537 538 static inline void intel_pstate_reset_all_pid(void) 539 { 540 unsigned int cpu; 541 542 for_each_online_cpu(cpu) { 543 if (all_cpu_data[cpu]) 544 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 545 } 546 } 547 548 static inline void update_turbo_state(void) 549 { 550 u64 misc_en; 551 struct cpudata *cpu; 552 553 cpu = all_cpu_data[0]; 554 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 555 limits->turbo_disabled = 556 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 557 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 558 } 559 560 static void intel_pstate_hwp_set(const struct cpumask *cpumask) 561 { 562 int min, hw_min, max, hw_max, cpu, range, adj_range; 563 u64 value, cap; 564 565 for_each_cpu(cpu, cpumask) { 566 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 567 hw_min = HWP_LOWEST_PERF(cap); 568 hw_max = HWP_HIGHEST_PERF(cap); 569 range = hw_max - hw_min; 570 571 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 572 adj_range = limits->min_perf_pct * range / 100; 573 min = hw_min + adj_range; 574 value &= ~HWP_MIN_PERF(~0L); 575 value |= HWP_MIN_PERF(min); 576 577 adj_range = limits->max_perf_pct * range / 100; 578 max = hw_min + adj_range; 579 if (limits->no_turbo) { 580 hw_max = HWP_GUARANTEED_PERF(cap); 581 if (hw_max < max) 582 max = hw_max; 583 } 584 585 value &= ~HWP_MAX_PERF(~0L); 586 value |= HWP_MAX_PERF(max); 587 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 588 } 589 } 590 591 static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) 592 { 593 if (hwp_active) 594 intel_pstate_hwp_set(policy->cpus); 595 596 return 0; 597 } 598 599 static void intel_pstate_hwp_set_online_cpus(void) 600 { 601 get_online_cpus(); 602 intel_pstate_hwp_set(cpu_online_mask); 603 put_online_cpus(); 604 } 605 606 /************************** debugfs begin ************************/ 607 static int pid_param_set(void *data, u64 val) 608 { 609 *(u32 *)data = val; 610 intel_pstate_reset_all_pid(); 611 return 0; 612 } 613 614 static int pid_param_get(void *data, u64 *val) 615 { 616 *val = *(u32 *)data; 617 return 0; 618 } 619 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 620 621 struct pid_param { 622 char *name; 623 void *value; 624 }; 625 626 static struct pid_param pid_files[] = { 627 {"sample_rate_ms", &pid_params.sample_rate_ms}, 628 {"d_gain_pct", &pid_params.d_gain_pct}, 629 {"i_gain_pct", &pid_params.i_gain_pct}, 630 {"deadband", &pid_params.deadband}, 631 {"setpoint", &pid_params.setpoint}, 632 {"p_gain_pct", &pid_params.p_gain_pct}, 633 {NULL, NULL} 634 }; 635 636 static void __init intel_pstate_debug_expose_params(void) 637 { 638 struct dentry *debugfs_parent; 639 int i = 0; 640 641 if (hwp_active) 642 return; 643 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 644 if (IS_ERR_OR_NULL(debugfs_parent)) 645 return; 646 while (pid_files[i].name) { 647 debugfs_create_file(pid_files[i].name, 0660, 648 debugfs_parent, pid_files[i].value, 649 &fops_pid_param); 650 i++; 651 } 652 } 653 654 /************************** debugfs end ************************/ 655 656 /************************** sysfs begin ************************/ 657 #define show_one(file_name, object) \ 658 static ssize_t show_##file_name \ 659 (struct kobject *kobj, struct attribute *attr, char *buf) \ 660 { \ 661 return sprintf(buf, "%u\n", limits->object); \ 662 } 663 664 static ssize_t show_turbo_pct(struct kobject *kobj, 665 struct attribute *attr, char *buf) 666 { 667 struct cpudata *cpu; 668 int total, no_turbo, turbo_pct; 669 uint32_t turbo_fp; 670 671 cpu = all_cpu_data[0]; 672 673 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 674 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 675 turbo_fp = div_fp(no_turbo, total); 676 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 677 return sprintf(buf, "%u\n", turbo_pct); 678 } 679 680 static ssize_t show_num_pstates(struct kobject *kobj, 681 struct attribute *attr, char *buf) 682 { 683 struct cpudata *cpu; 684 int total; 685 686 cpu = all_cpu_data[0]; 687 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 688 return sprintf(buf, "%u\n", total); 689 } 690 691 static ssize_t show_no_turbo(struct kobject *kobj, 692 struct attribute *attr, char *buf) 693 { 694 ssize_t ret; 695 696 update_turbo_state(); 697 if (limits->turbo_disabled) 698 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 699 else 700 ret = sprintf(buf, "%u\n", limits->no_turbo); 701 702 return ret; 703 } 704 705 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 706 const char *buf, size_t count) 707 { 708 unsigned int input; 709 int ret; 710 711 ret = sscanf(buf, "%u", &input); 712 if (ret != 1) 713 return -EINVAL; 714 715 update_turbo_state(); 716 if (limits->turbo_disabled) { 717 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 718 return -EPERM; 719 } 720 721 limits->no_turbo = clamp_t(int, input, 0, 1); 722 723 if (hwp_active) 724 intel_pstate_hwp_set_online_cpus(); 725 726 return count; 727 } 728 729 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 730 const char *buf, size_t count) 731 { 732 unsigned int input; 733 int ret; 734 735 ret = sscanf(buf, "%u", &input); 736 if (ret != 1) 737 return -EINVAL; 738 739 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 740 limits->max_perf_pct = min(limits->max_policy_pct, 741 limits->max_sysfs_pct); 742 limits->max_perf_pct = max(limits->min_policy_pct, 743 limits->max_perf_pct); 744 limits->max_perf_pct = max(limits->min_perf_pct, 745 limits->max_perf_pct); 746 limits->max_perf = div_fp(limits->max_perf_pct, 100); 747 748 if (hwp_active) 749 intel_pstate_hwp_set_online_cpus(); 750 return count; 751 } 752 753 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 754 const char *buf, size_t count) 755 { 756 unsigned int input; 757 int ret; 758 759 ret = sscanf(buf, "%u", &input); 760 if (ret != 1) 761 return -EINVAL; 762 763 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 764 limits->min_perf_pct = max(limits->min_policy_pct, 765 limits->min_sysfs_pct); 766 limits->min_perf_pct = min(limits->max_policy_pct, 767 limits->min_perf_pct); 768 limits->min_perf_pct = min(limits->max_perf_pct, 769 limits->min_perf_pct); 770 limits->min_perf = div_fp(limits->min_perf_pct, 100); 771 772 if (hwp_active) 773 intel_pstate_hwp_set_online_cpus(); 774 return count; 775 } 776 777 show_one(max_perf_pct, max_perf_pct); 778 show_one(min_perf_pct, min_perf_pct); 779 780 define_one_global_rw(no_turbo); 781 define_one_global_rw(max_perf_pct); 782 define_one_global_rw(min_perf_pct); 783 define_one_global_ro(turbo_pct); 784 define_one_global_ro(num_pstates); 785 786 static struct attribute *intel_pstate_attributes[] = { 787 &no_turbo.attr, 788 &max_perf_pct.attr, 789 &min_perf_pct.attr, 790 &turbo_pct.attr, 791 &num_pstates.attr, 792 NULL 793 }; 794 795 static struct attribute_group intel_pstate_attr_group = { 796 .attrs = intel_pstate_attributes, 797 }; 798 799 static void __init intel_pstate_sysfs_expose_params(void) 800 { 801 struct kobject *intel_pstate_kobject; 802 int rc; 803 804 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 805 &cpu_subsys.dev_root->kobj); 806 BUG_ON(!intel_pstate_kobject); 807 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 808 BUG_ON(rc); 809 } 810 /************************** sysfs end ************************/ 811 812 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 813 { 814 /* First disable HWP notification interrupt as we don't process them */ 815 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) 816 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 817 818 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 819 } 820 821 static int atom_get_min_pstate(void) 822 { 823 u64 value; 824 825 rdmsrl(ATOM_RATIOS, value); 826 return (value >> 8) & 0x7F; 827 } 828 829 static int atom_get_max_pstate(void) 830 { 831 u64 value; 832 833 rdmsrl(ATOM_RATIOS, value); 834 return (value >> 16) & 0x7F; 835 } 836 837 static int atom_get_turbo_pstate(void) 838 { 839 u64 value; 840 841 rdmsrl(ATOM_TURBO_RATIOS, value); 842 return value & 0x7F; 843 } 844 845 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 846 { 847 u64 val; 848 int32_t vid_fp; 849 u32 vid; 850 851 val = (u64)pstate << 8; 852 if (limits->no_turbo && !limits->turbo_disabled) 853 val |= (u64)1 << 32; 854 855 vid_fp = cpudata->vid.min + mul_fp( 856 int_tofp(pstate - cpudata->pstate.min_pstate), 857 cpudata->vid.ratio); 858 859 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 860 vid = ceiling_fp(vid_fp); 861 862 if (pstate > cpudata->pstate.max_pstate) 863 vid = cpudata->vid.turbo; 864 865 return val | vid; 866 } 867 868 static int silvermont_get_scaling(void) 869 { 870 u64 value; 871 int i; 872 /* Defined in Table 35-6 from SDM (Sept 2015) */ 873 static int silvermont_freq_table[] = { 874 83300, 100000, 133300, 116700, 80000}; 875 876 rdmsrl(MSR_FSB_FREQ, value); 877 i = value & 0x7; 878 WARN_ON(i > 4); 879 880 return silvermont_freq_table[i]; 881 } 882 883 static int airmont_get_scaling(void) 884 { 885 u64 value; 886 int i; 887 /* Defined in Table 35-10 from SDM (Sept 2015) */ 888 static int airmont_freq_table[] = { 889 83300, 100000, 133300, 116700, 80000, 890 93300, 90000, 88900, 87500}; 891 892 rdmsrl(MSR_FSB_FREQ, value); 893 i = value & 0xF; 894 WARN_ON(i > 8); 895 896 return airmont_freq_table[i]; 897 } 898 899 static void atom_get_vid(struct cpudata *cpudata) 900 { 901 u64 value; 902 903 rdmsrl(ATOM_VIDS, value); 904 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 905 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 906 cpudata->vid.ratio = div_fp( 907 cpudata->vid.max - cpudata->vid.min, 908 int_tofp(cpudata->pstate.max_pstate - 909 cpudata->pstate.min_pstate)); 910 911 rdmsrl(ATOM_TURBO_VIDS, value); 912 cpudata->vid.turbo = value & 0x7f; 913 } 914 915 static int core_get_min_pstate(void) 916 { 917 u64 value; 918 919 rdmsrl(MSR_PLATFORM_INFO, value); 920 return (value >> 40) & 0xFF; 921 } 922 923 static int core_get_max_pstate_physical(void) 924 { 925 u64 value; 926 927 rdmsrl(MSR_PLATFORM_INFO, value); 928 return (value >> 8) & 0xFF; 929 } 930 931 static int core_get_max_pstate(void) 932 { 933 u64 tar; 934 u64 plat_info; 935 int max_pstate; 936 int err; 937 938 rdmsrl(MSR_PLATFORM_INFO, plat_info); 939 max_pstate = (plat_info >> 8) & 0xFF; 940 941 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 942 if (!err) { 943 /* Do some sanity checking for safety */ 944 if (plat_info & 0x600000000) { 945 u64 tdp_ctrl; 946 u64 tdp_ratio; 947 int tdp_msr; 948 949 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 950 if (err) 951 goto skip_tar; 952 953 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3); 954 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 955 if (err) 956 goto skip_tar; 957 958 /* For level 1 and 2, bits[23:16] contain the ratio */ 959 if (tdp_ctrl) 960 tdp_ratio >>= 16; 961 962 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 963 if (tdp_ratio - 1 == tar) { 964 max_pstate = tar; 965 pr_debug("max_pstate=TAC %x\n", max_pstate); 966 } else { 967 goto skip_tar; 968 } 969 } 970 } 971 972 skip_tar: 973 return max_pstate; 974 } 975 976 static int core_get_turbo_pstate(void) 977 { 978 u64 value; 979 int nont, ret; 980 981 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 982 nont = core_get_max_pstate(); 983 ret = (value) & 255; 984 if (ret <= nont) 985 ret = nont; 986 return ret; 987 } 988 989 static inline int core_get_scaling(void) 990 { 991 return 100000; 992 } 993 994 static u64 core_get_val(struct cpudata *cpudata, int pstate) 995 { 996 u64 val; 997 998 val = (u64)pstate << 8; 999 if (limits->no_turbo && !limits->turbo_disabled) 1000 val |= (u64)1 << 32; 1001 1002 return val; 1003 } 1004 1005 static int knl_get_turbo_pstate(void) 1006 { 1007 u64 value; 1008 int nont, ret; 1009 1010 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1011 nont = core_get_max_pstate(); 1012 ret = (((value) >> 8) & 0xFF); 1013 if (ret <= nont) 1014 ret = nont; 1015 return ret; 1016 } 1017 1018 static struct cpu_defaults core_params = { 1019 .pid_policy = { 1020 .sample_rate_ms = 10, 1021 .deadband = 0, 1022 .setpoint = 97, 1023 .p_gain_pct = 20, 1024 .d_gain_pct = 0, 1025 .i_gain_pct = 0, 1026 }, 1027 .funcs = { 1028 .get_max = core_get_max_pstate, 1029 .get_max_physical = core_get_max_pstate_physical, 1030 .get_min = core_get_min_pstate, 1031 .get_turbo = core_get_turbo_pstate, 1032 .get_scaling = core_get_scaling, 1033 .get_val = core_get_val, 1034 .get_target_pstate = get_target_pstate_use_performance, 1035 }, 1036 }; 1037 1038 static const struct cpu_defaults silvermont_params = { 1039 .pid_policy = { 1040 .sample_rate_ms = 10, 1041 .deadband = 0, 1042 .setpoint = 60, 1043 .p_gain_pct = 14, 1044 .d_gain_pct = 0, 1045 .i_gain_pct = 4, 1046 .boost_iowait = true, 1047 }, 1048 .funcs = { 1049 .get_max = atom_get_max_pstate, 1050 .get_max_physical = atom_get_max_pstate, 1051 .get_min = atom_get_min_pstate, 1052 .get_turbo = atom_get_turbo_pstate, 1053 .get_val = atom_get_val, 1054 .get_scaling = silvermont_get_scaling, 1055 .get_vid = atom_get_vid, 1056 .get_target_pstate = get_target_pstate_use_cpu_load, 1057 }, 1058 }; 1059 1060 static const struct cpu_defaults airmont_params = { 1061 .pid_policy = { 1062 .sample_rate_ms = 10, 1063 .deadband = 0, 1064 .setpoint = 60, 1065 .p_gain_pct = 14, 1066 .d_gain_pct = 0, 1067 .i_gain_pct = 4, 1068 .boost_iowait = true, 1069 }, 1070 .funcs = { 1071 .get_max = atom_get_max_pstate, 1072 .get_max_physical = atom_get_max_pstate, 1073 .get_min = atom_get_min_pstate, 1074 .get_turbo = atom_get_turbo_pstate, 1075 .get_val = atom_get_val, 1076 .get_scaling = airmont_get_scaling, 1077 .get_vid = atom_get_vid, 1078 .get_target_pstate = get_target_pstate_use_cpu_load, 1079 }, 1080 }; 1081 1082 static const struct cpu_defaults knl_params = { 1083 .pid_policy = { 1084 .sample_rate_ms = 10, 1085 .deadband = 0, 1086 .setpoint = 97, 1087 .p_gain_pct = 20, 1088 .d_gain_pct = 0, 1089 .i_gain_pct = 0, 1090 }, 1091 .funcs = { 1092 .get_max = core_get_max_pstate, 1093 .get_max_physical = core_get_max_pstate_physical, 1094 .get_min = core_get_min_pstate, 1095 .get_turbo = knl_get_turbo_pstate, 1096 .get_scaling = core_get_scaling, 1097 .get_val = core_get_val, 1098 .get_target_pstate = get_target_pstate_use_performance, 1099 }, 1100 }; 1101 1102 static const struct cpu_defaults bxt_params = { 1103 .pid_policy = { 1104 .sample_rate_ms = 10, 1105 .deadband = 0, 1106 .setpoint = 60, 1107 .p_gain_pct = 14, 1108 .d_gain_pct = 0, 1109 .i_gain_pct = 4, 1110 .boost_iowait = true, 1111 }, 1112 .funcs = { 1113 .get_max = core_get_max_pstate, 1114 .get_max_physical = core_get_max_pstate_physical, 1115 .get_min = core_get_min_pstate, 1116 .get_turbo = core_get_turbo_pstate, 1117 .get_scaling = core_get_scaling, 1118 .get_val = core_get_val, 1119 .get_target_pstate = get_target_pstate_use_cpu_load, 1120 }, 1121 }; 1122 1123 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 1124 { 1125 int max_perf = cpu->pstate.turbo_pstate; 1126 int max_perf_adj; 1127 int min_perf; 1128 1129 if (limits->no_turbo || limits->turbo_disabled) 1130 max_perf = cpu->pstate.max_pstate; 1131 1132 /* 1133 * performance can be limited by user through sysfs, by cpufreq 1134 * policy, or by cpu specific default values determined through 1135 * experimentation. 1136 */ 1137 max_perf_adj = fp_toint(max_perf * limits->max_perf); 1138 *max = clamp_t(int, max_perf_adj, 1139 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 1140 1141 min_perf = fp_toint(max_perf * limits->min_perf); 1142 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 1143 } 1144 1145 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1146 { 1147 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1148 cpu->pstate.current_pstate = pstate; 1149 /* 1150 * Generally, there is no guarantee that this code will always run on 1151 * the CPU being updated, so force the register update to run on the 1152 * right CPU. 1153 */ 1154 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1155 pstate_funcs.get_val(cpu, pstate)); 1156 } 1157 1158 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1159 { 1160 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1161 } 1162 1163 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1164 { 1165 int min_pstate, max_pstate; 1166 1167 update_turbo_state(); 1168 intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); 1169 intel_pstate_set_pstate(cpu, max_pstate); 1170 } 1171 1172 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1173 { 1174 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1175 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1176 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1177 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1178 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1179 1180 if (pstate_funcs.get_vid) 1181 pstate_funcs.get_vid(cpu); 1182 1183 intel_pstate_set_min_pstate(cpu); 1184 } 1185 1186 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1187 { 1188 struct sample *sample = &cpu->sample; 1189 1190 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1191 } 1192 1193 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1194 { 1195 u64 aperf, mperf; 1196 unsigned long flags; 1197 u64 tsc; 1198 1199 local_irq_save(flags); 1200 rdmsrl(MSR_IA32_APERF, aperf); 1201 rdmsrl(MSR_IA32_MPERF, mperf); 1202 tsc = rdtsc(); 1203 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1204 local_irq_restore(flags); 1205 return false; 1206 } 1207 local_irq_restore(flags); 1208 1209 cpu->last_sample_time = cpu->sample.time; 1210 cpu->sample.time = time; 1211 cpu->sample.aperf = aperf; 1212 cpu->sample.mperf = mperf; 1213 cpu->sample.tsc = tsc; 1214 cpu->sample.aperf -= cpu->prev_aperf; 1215 cpu->sample.mperf -= cpu->prev_mperf; 1216 cpu->sample.tsc -= cpu->prev_tsc; 1217 1218 cpu->prev_aperf = aperf; 1219 cpu->prev_mperf = mperf; 1220 cpu->prev_tsc = tsc; 1221 /* 1222 * First time this function is invoked in a given cycle, all of the 1223 * previous sample data fields are equal to zero or stale and they must 1224 * be populated with meaningful numbers for things to work, so assume 1225 * that sample.time will always be reset before setting the utilization 1226 * update hook and make the caller skip the sample then. 1227 */ 1228 return !!cpu->last_sample_time; 1229 } 1230 1231 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1232 { 1233 return mul_ext_fp(cpu->sample.core_avg_perf, 1234 cpu->pstate.max_pstate_physical * cpu->pstate.scaling); 1235 } 1236 1237 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1238 { 1239 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1240 cpu->sample.core_avg_perf); 1241 } 1242 1243 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1244 { 1245 struct sample *sample = &cpu->sample; 1246 int32_t busy_frac, boost; 1247 int target, avg_pstate; 1248 1249 busy_frac = div_fp(sample->mperf, sample->tsc); 1250 1251 boost = cpu->iowait_boost; 1252 cpu->iowait_boost >>= 1; 1253 1254 if (busy_frac < boost) 1255 busy_frac = boost; 1256 1257 sample->busy_scaled = busy_frac * 100; 1258 1259 target = limits->no_turbo || limits->turbo_disabled ? 1260 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1261 target += target >> 2; 1262 target = mul_fp(target, busy_frac); 1263 if (target < cpu->pstate.min_pstate) 1264 target = cpu->pstate.min_pstate; 1265 1266 /* 1267 * If the average P-state during the previous cycle was higher than the 1268 * current target, add 50% of the difference to the target to reduce 1269 * possible performance oscillations and offset possible performance 1270 * loss related to moving the workload from one CPU to another within 1271 * a package/module. 1272 */ 1273 avg_pstate = get_avg_pstate(cpu); 1274 if (avg_pstate > target) 1275 target += (avg_pstate - target) >> 1; 1276 1277 return target; 1278 } 1279 1280 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1281 { 1282 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1283 u64 duration_ns; 1284 1285 /* 1286 * perf_scaled is the ratio of the average P-state during the last 1287 * sampling period to the P-state requested last time (in percent). 1288 * 1289 * That measures the system's response to the previous P-state 1290 * selection. 1291 */ 1292 max_pstate = cpu->pstate.max_pstate_physical; 1293 current_pstate = cpu->pstate.current_pstate; 1294 perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, 1295 div_fp(100 * max_pstate, current_pstate)); 1296 1297 /* 1298 * Since our utilization update callback will not run unless we are 1299 * in C0, check if the actual elapsed time is significantly greater (3x) 1300 * than our sample interval. If it is, then we were idle for a long 1301 * enough period of time to adjust our performance metric. 1302 */ 1303 duration_ns = cpu->sample.time - cpu->last_sample_time; 1304 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1305 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1306 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1307 } else { 1308 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1309 if (sample_ratio < int_tofp(1)) 1310 perf_scaled = 0; 1311 } 1312 1313 cpu->sample.busy_scaled = perf_scaled; 1314 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); 1315 } 1316 1317 static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1318 { 1319 int max_perf, min_perf; 1320 1321 update_turbo_state(); 1322 1323 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1324 pstate = clamp_t(int, pstate, min_perf, max_perf); 1325 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1326 if (pstate == cpu->pstate.current_pstate) 1327 return; 1328 1329 cpu->pstate.current_pstate = pstate; 1330 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1331 } 1332 1333 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1334 { 1335 int from, target_pstate; 1336 struct sample *sample; 1337 1338 from = cpu->pstate.current_pstate; 1339 1340 target_pstate = pstate_funcs.get_target_pstate(cpu); 1341 1342 intel_pstate_update_pstate(cpu, target_pstate); 1343 1344 sample = &cpu->sample; 1345 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1346 fp_toint(sample->busy_scaled), 1347 from, 1348 cpu->pstate.current_pstate, 1349 sample->mperf, 1350 sample->aperf, 1351 sample->tsc, 1352 get_avg_frequency(cpu), 1353 fp_toint(cpu->iowait_boost * 100)); 1354 } 1355 1356 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1357 unsigned int flags) 1358 { 1359 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1360 u64 delta_ns; 1361 1362 if (pid_params.boost_iowait) { 1363 if (flags & SCHED_CPUFREQ_IOWAIT) { 1364 cpu->iowait_boost = int_tofp(1); 1365 } else if (cpu->iowait_boost) { 1366 /* Clear iowait_boost if the CPU may have been idle. */ 1367 delta_ns = time - cpu->last_update; 1368 if (delta_ns > TICK_NSEC) 1369 cpu->iowait_boost = 0; 1370 } 1371 cpu->last_update = time; 1372 } 1373 1374 delta_ns = time - cpu->sample.time; 1375 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1376 bool sample_taken = intel_pstate_sample(cpu, time); 1377 1378 if (sample_taken) { 1379 intel_pstate_calc_avg_perf(cpu); 1380 if (!hwp_active) 1381 intel_pstate_adjust_busy_pstate(cpu); 1382 } 1383 } 1384 } 1385 1386 #define ICPU(model, policy) \ 1387 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1388 (unsigned long)&policy } 1389 1390 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1391 ICPU(INTEL_FAM6_SANDYBRIDGE, core_params), 1392 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params), 1393 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params), 1394 ICPU(INTEL_FAM6_IVYBRIDGE, core_params), 1395 ICPU(INTEL_FAM6_HASWELL_CORE, core_params), 1396 ICPU(INTEL_FAM6_BROADWELL_CORE, core_params), 1397 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params), 1398 ICPU(INTEL_FAM6_HASWELL_X, core_params), 1399 ICPU(INTEL_FAM6_HASWELL_ULT, core_params), 1400 ICPU(INTEL_FAM6_HASWELL_GT3E, core_params), 1401 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params), 1402 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params), 1403 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params), 1404 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1405 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params), 1406 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1407 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params), 1408 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params), 1409 {} 1410 }; 1411 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1412 1413 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 1414 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1415 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1416 ICPU(INTEL_FAM6_SKYLAKE_X, core_params), 1417 {} 1418 }; 1419 1420 static int intel_pstate_init_cpu(unsigned int cpunum) 1421 { 1422 struct cpudata *cpu; 1423 1424 if (!all_cpu_data[cpunum]) 1425 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 1426 GFP_KERNEL); 1427 if (!all_cpu_data[cpunum]) 1428 return -ENOMEM; 1429 1430 cpu = all_cpu_data[cpunum]; 1431 1432 cpu->cpu = cpunum; 1433 1434 if (hwp_active) { 1435 intel_pstate_hwp_enable(cpu); 1436 pid_params.sample_rate_ms = 50; 1437 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1438 } 1439 1440 intel_pstate_get_cpu_pstates(cpu); 1441 1442 intel_pstate_busy_pid_reset(cpu); 1443 1444 pr_debug("controlling: cpu %d\n", cpunum); 1445 1446 return 0; 1447 } 1448 1449 static unsigned int intel_pstate_get(unsigned int cpu_num) 1450 { 1451 struct cpudata *cpu = all_cpu_data[cpu_num]; 1452 1453 return cpu ? get_avg_frequency(cpu) : 0; 1454 } 1455 1456 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1457 { 1458 struct cpudata *cpu = all_cpu_data[cpu_num]; 1459 1460 if (cpu->update_util_set) 1461 return; 1462 1463 /* Prevent intel_pstate_update_util() from using stale data. */ 1464 cpu->sample.time = 0; 1465 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1466 intel_pstate_update_util); 1467 cpu->update_util_set = true; 1468 } 1469 1470 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1471 { 1472 struct cpudata *cpu_data = all_cpu_data[cpu]; 1473 1474 if (!cpu_data->update_util_set) 1475 return; 1476 1477 cpufreq_remove_update_util_hook(cpu); 1478 cpu_data->update_util_set = false; 1479 synchronize_sched(); 1480 } 1481 1482 static void intel_pstate_set_performance_limits(struct perf_limits *limits) 1483 { 1484 limits->no_turbo = 0; 1485 limits->turbo_disabled = 0; 1486 limits->max_perf_pct = 100; 1487 limits->max_perf = int_tofp(1); 1488 limits->min_perf_pct = 100; 1489 limits->min_perf = int_tofp(1); 1490 limits->max_policy_pct = 100; 1491 limits->max_sysfs_pct = 100; 1492 limits->min_policy_pct = 0; 1493 limits->min_sysfs_pct = 0; 1494 } 1495 1496 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1497 { 1498 struct cpudata *cpu; 1499 1500 if (!policy->cpuinfo.max_freq) 1501 return -ENODEV; 1502 1503 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 1504 policy->cpuinfo.max_freq, policy->max); 1505 1506 cpu = all_cpu_data[policy->cpu]; 1507 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 1508 policy->max < policy->cpuinfo.max_freq && 1509 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { 1510 pr_debug("policy->max > max non turbo frequency\n"); 1511 policy->max = policy->cpuinfo.max_freq; 1512 } 1513 1514 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1515 limits = &performance_limits; 1516 if (policy->max >= policy->cpuinfo.max_freq) { 1517 pr_debug("set performance\n"); 1518 intel_pstate_set_performance_limits(limits); 1519 goto out; 1520 } 1521 } else { 1522 pr_debug("set powersave\n"); 1523 limits = &powersave_limits; 1524 } 1525 1526 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1527 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1528 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1529 policy->cpuinfo.max_freq); 1530 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1531 1532 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1533 limits->min_perf_pct = max(limits->min_policy_pct, 1534 limits->min_sysfs_pct); 1535 limits->min_perf_pct = min(limits->max_policy_pct, 1536 limits->min_perf_pct); 1537 limits->max_perf_pct = min(limits->max_policy_pct, 1538 limits->max_sysfs_pct); 1539 limits->max_perf_pct = max(limits->min_policy_pct, 1540 limits->max_perf_pct); 1541 1542 /* Make sure min_perf_pct <= max_perf_pct */ 1543 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1544 1545 limits->min_perf = div_fp(limits->min_perf_pct, 100); 1546 limits->max_perf = div_fp(limits->max_perf_pct, 100); 1547 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1548 1549 out: 1550 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1551 /* 1552 * NOHZ_FULL CPUs need this as the governor callback may not 1553 * be invoked on them. 1554 */ 1555 intel_pstate_clear_update_util_hook(policy->cpu); 1556 intel_pstate_max_within_limits(cpu); 1557 } 1558 1559 intel_pstate_set_update_util_hook(policy->cpu); 1560 1561 intel_pstate_hwp_set_policy(policy); 1562 1563 return 0; 1564 } 1565 1566 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1567 { 1568 cpufreq_verify_within_cpu_limits(policy); 1569 1570 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1571 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1572 return -EINVAL; 1573 1574 return 0; 1575 } 1576 1577 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1578 { 1579 int cpu_num = policy->cpu; 1580 struct cpudata *cpu = all_cpu_data[cpu_num]; 1581 1582 pr_debug("CPU %d exiting\n", cpu_num); 1583 1584 intel_pstate_clear_update_util_hook(cpu_num); 1585 1586 if (hwp_active) 1587 return; 1588 1589 intel_pstate_set_min_pstate(cpu); 1590 } 1591 1592 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1593 { 1594 struct cpudata *cpu; 1595 int rc; 1596 1597 rc = intel_pstate_init_cpu(policy->cpu); 1598 if (rc) 1599 return rc; 1600 1601 cpu = all_cpu_data[policy->cpu]; 1602 1603 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1604 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1605 else 1606 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1607 1608 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1609 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1610 1611 /* cpuinfo and default policy values */ 1612 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1613 update_turbo_state(); 1614 policy->cpuinfo.max_freq = limits->turbo_disabled ? 1615 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1616 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 1617 1618 intel_pstate_init_acpi_perf_limits(policy); 1619 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1620 cpumask_set_cpu(policy->cpu, policy->cpus); 1621 1622 return 0; 1623 } 1624 1625 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 1626 { 1627 intel_pstate_exit_perf_limits(policy); 1628 1629 return 0; 1630 } 1631 1632 static struct cpufreq_driver intel_pstate_driver = { 1633 .flags = CPUFREQ_CONST_LOOPS, 1634 .verify = intel_pstate_verify_policy, 1635 .setpolicy = intel_pstate_set_policy, 1636 .resume = intel_pstate_hwp_set_policy, 1637 .get = intel_pstate_get, 1638 .init = intel_pstate_cpu_init, 1639 .exit = intel_pstate_cpu_exit, 1640 .stop_cpu = intel_pstate_stop_cpu, 1641 .name = "intel_pstate", 1642 }; 1643 1644 static int no_load __initdata; 1645 static int no_hwp __initdata; 1646 static int hwp_only __initdata; 1647 static unsigned int force_load __initdata; 1648 1649 static int __init intel_pstate_msrs_not_valid(void) 1650 { 1651 if (!pstate_funcs.get_max() || 1652 !pstate_funcs.get_min() || 1653 !pstate_funcs.get_turbo()) 1654 return -ENODEV; 1655 1656 return 0; 1657 } 1658 1659 static void __init copy_pid_params(struct pstate_adjust_policy *policy) 1660 { 1661 pid_params.sample_rate_ms = policy->sample_rate_ms; 1662 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 1663 pid_params.p_gain_pct = policy->p_gain_pct; 1664 pid_params.i_gain_pct = policy->i_gain_pct; 1665 pid_params.d_gain_pct = policy->d_gain_pct; 1666 pid_params.deadband = policy->deadband; 1667 pid_params.setpoint = policy->setpoint; 1668 } 1669 1670 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 1671 { 1672 pstate_funcs.get_max = funcs->get_max; 1673 pstate_funcs.get_max_physical = funcs->get_max_physical; 1674 pstate_funcs.get_min = funcs->get_min; 1675 pstate_funcs.get_turbo = funcs->get_turbo; 1676 pstate_funcs.get_scaling = funcs->get_scaling; 1677 pstate_funcs.get_val = funcs->get_val; 1678 pstate_funcs.get_vid = funcs->get_vid; 1679 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 1680 1681 } 1682 1683 #ifdef CONFIG_ACPI 1684 1685 static bool __init intel_pstate_no_acpi_pss(void) 1686 { 1687 int i; 1688 1689 for_each_possible_cpu(i) { 1690 acpi_status status; 1691 union acpi_object *pss; 1692 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1693 struct acpi_processor *pr = per_cpu(processors, i); 1694 1695 if (!pr) 1696 continue; 1697 1698 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1699 if (ACPI_FAILURE(status)) 1700 continue; 1701 1702 pss = buffer.pointer; 1703 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1704 kfree(pss); 1705 return false; 1706 } 1707 1708 kfree(pss); 1709 } 1710 1711 return true; 1712 } 1713 1714 static bool __init intel_pstate_has_acpi_ppc(void) 1715 { 1716 int i; 1717 1718 for_each_possible_cpu(i) { 1719 struct acpi_processor *pr = per_cpu(processors, i); 1720 1721 if (!pr) 1722 continue; 1723 if (acpi_has_method(pr->handle, "_PPC")) 1724 return true; 1725 } 1726 return false; 1727 } 1728 1729 enum { 1730 PSS, 1731 PPC, 1732 }; 1733 1734 struct hw_vendor_info { 1735 u16 valid; 1736 char oem_id[ACPI_OEM_ID_SIZE]; 1737 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1738 int oem_pwr_table; 1739 }; 1740 1741 /* Hardware vendor-specific info that has its own power management modes */ 1742 static struct hw_vendor_info vendor_info[] __initdata = { 1743 {1, "HP ", "ProLiant", PSS}, 1744 {1, "ORACLE", "X4-2 ", PPC}, 1745 {1, "ORACLE", "X4-2L ", PPC}, 1746 {1, "ORACLE", "X4-2B ", PPC}, 1747 {1, "ORACLE", "X3-2 ", PPC}, 1748 {1, "ORACLE", "X3-2L ", PPC}, 1749 {1, "ORACLE", "X3-2B ", PPC}, 1750 {1, "ORACLE", "X4470M2 ", PPC}, 1751 {1, "ORACLE", "X4270M3 ", PPC}, 1752 {1, "ORACLE", "X4270M2 ", PPC}, 1753 {1, "ORACLE", "X4170M2 ", PPC}, 1754 {1, "ORACLE", "X4170 M3", PPC}, 1755 {1, "ORACLE", "X4275 M3", PPC}, 1756 {1, "ORACLE", "X6-2 ", PPC}, 1757 {1, "ORACLE", "Sudbury ", PPC}, 1758 {0, "", ""}, 1759 }; 1760 1761 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 1762 { 1763 struct acpi_table_header hdr; 1764 struct hw_vendor_info *v_info; 1765 const struct x86_cpu_id *id; 1766 u64 misc_pwr; 1767 1768 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1769 if (id) { 1770 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1771 if ( misc_pwr & (1 << 8)) 1772 return true; 1773 } 1774 1775 if (acpi_disabled || 1776 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1777 return false; 1778 1779 for (v_info = vendor_info; v_info->valid; v_info++) { 1780 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1781 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1782 ACPI_OEM_TABLE_ID_SIZE)) 1783 switch (v_info->oem_pwr_table) { 1784 case PSS: 1785 return intel_pstate_no_acpi_pss(); 1786 case PPC: 1787 return intel_pstate_has_acpi_ppc() && 1788 (!force_load); 1789 } 1790 } 1791 1792 return false; 1793 } 1794 #else /* CONFIG_ACPI not enabled */ 1795 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1796 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1797 #endif /* CONFIG_ACPI */ 1798 1799 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 1800 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 1801 {} 1802 }; 1803 1804 static int __init intel_pstate_init(void) 1805 { 1806 int cpu, rc = 0; 1807 const struct x86_cpu_id *id; 1808 struct cpu_defaults *cpu_def; 1809 1810 if (no_load) 1811 return -ENODEV; 1812 1813 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 1814 copy_cpu_funcs(&core_params.funcs); 1815 hwp_active++; 1816 goto hwp_cpu_matched; 1817 } 1818 1819 id = x86_match_cpu(intel_pstate_cpu_ids); 1820 if (!id) 1821 return -ENODEV; 1822 1823 cpu_def = (struct cpu_defaults *)id->driver_data; 1824 1825 copy_pid_params(&cpu_def->pid_policy); 1826 copy_cpu_funcs(&cpu_def->funcs); 1827 1828 if (intel_pstate_msrs_not_valid()) 1829 return -ENODEV; 1830 1831 hwp_cpu_matched: 1832 /* 1833 * The Intel pstate driver will be ignored if the platform 1834 * firmware has its own power management modes. 1835 */ 1836 if (intel_pstate_platform_pwr_mgmt_exists()) 1837 return -ENODEV; 1838 1839 pr_info("Intel P-state driver initializing\n"); 1840 1841 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1842 if (!all_cpu_data) 1843 return -ENOMEM; 1844 1845 if (!hwp_active && hwp_only) 1846 goto out; 1847 1848 rc = cpufreq_register_driver(&intel_pstate_driver); 1849 if (rc) 1850 goto out; 1851 1852 intel_pstate_debug_expose_params(); 1853 intel_pstate_sysfs_expose_params(); 1854 1855 if (hwp_active) 1856 pr_info("HWP enabled\n"); 1857 1858 return rc; 1859 out: 1860 get_online_cpus(); 1861 for_each_online_cpu(cpu) { 1862 if (all_cpu_data[cpu]) { 1863 intel_pstate_clear_update_util_hook(cpu); 1864 kfree(all_cpu_data[cpu]); 1865 } 1866 } 1867 1868 put_online_cpus(); 1869 vfree(all_cpu_data); 1870 return -ENODEV; 1871 } 1872 device_initcall(intel_pstate_init); 1873 1874 static int __init intel_pstate_setup(char *str) 1875 { 1876 if (!str) 1877 return -EINVAL; 1878 1879 if (!strcmp(str, "disable")) 1880 no_load = 1; 1881 if (!strcmp(str, "no_hwp")) { 1882 pr_info("HWP disabled\n"); 1883 no_hwp = 1; 1884 } 1885 if (!strcmp(str, "force")) 1886 force_load = 1; 1887 if (!strcmp(str, "hwp_only")) 1888 hwp_only = 1; 1889 1890 #ifdef CONFIG_ACPI 1891 if (!strcmp(str, "support_acpi_ppc")) 1892 acpi_ppc = true; 1893 #endif 1894 1895 return 0; 1896 } 1897 early_param("intel_pstate", intel_pstate_setup); 1898 1899 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1900 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1901 MODULE_LICENSE("GPL"); 1902