1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 39 #define ATOM_RATIOS 0x66a 40 #define ATOM_VIDS 0x66b 41 #define ATOM_TURBO_RATIOS 0x66c 42 #define ATOM_TURBO_VIDS 0x66d 43 44 #ifdef CONFIG_ACPI 45 #include <acpi/processor.h> 46 #endif 47 48 #define FRAC_BITS 8 49 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 50 #define fp_toint(X) ((X) >> FRAC_BITS) 51 52 static inline int32_t mul_fp(int32_t x, int32_t y) 53 { 54 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 55 } 56 57 static inline int32_t div_fp(s64 x, s64 y) 58 { 59 return div64_s64((int64_t)x << FRAC_BITS, y); 60 } 61 62 static inline int ceiling_fp(int32_t x) 63 { 64 int mask, ret; 65 66 ret = fp_toint(x); 67 mask = (1 << FRAC_BITS) - 1; 68 if (x & mask) 69 ret += 1; 70 return ret; 71 } 72 73 /** 74 * struct sample - Store performance sample 75 * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual 76 * performance during last sample period 77 * @busy_scaled: Scaled busy value which is used to calculate next 78 * P state. This can be different than core_pct_busy 79 * to account for cpu idle period 80 * @aperf: Difference of actual performance frequency clock count 81 * read from APERF MSR between last and current sample 82 * @mperf: Difference of maximum performance frequency clock count 83 * read from MPERF MSR between last and current sample 84 * @tsc: Difference of time stamp counter between last and 85 * current sample 86 * @freq: Effective frequency calculated from APERF/MPERF 87 * @time: Current time from scheduler 88 * 89 * This structure is used in the cpudata structure to store performance sample 90 * data for choosing next P State. 91 */ 92 struct sample { 93 int32_t core_pct_busy; 94 int32_t busy_scaled; 95 u64 aperf; 96 u64 mperf; 97 u64 tsc; 98 int freq; 99 u64 time; 100 }; 101 102 /** 103 * struct pstate_data - Store P state data 104 * @current_pstate: Current requested P state 105 * @min_pstate: Min P state possible for this platform 106 * @max_pstate: Max P state possible for this platform 107 * @max_pstate_physical:This is physical Max P state for a processor 108 * This can be higher than the max_pstate which can 109 * be limited by platform thermal design power limits 110 * @scaling: Scaling factor to convert frequency to cpufreq 111 * frequency units 112 * @turbo_pstate: Max Turbo P state possible for this platform 113 * 114 * Stores the per cpu model P state limits and current P state. 115 */ 116 struct pstate_data { 117 int current_pstate; 118 int min_pstate; 119 int max_pstate; 120 int max_pstate_physical; 121 int scaling; 122 int turbo_pstate; 123 }; 124 125 /** 126 * struct vid_data - Stores voltage information data 127 * @min: VID data for this platform corresponding to 128 * the lowest P state 129 * @max: VID data corresponding to the highest P State. 130 * @turbo: VID data for turbo P state 131 * @ratio: Ratio of (vid max - vid min) / 132 * (max P state - Min P State) 133 * 134 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 135 * This data is used in Atom platforms, where in addition to target P state, 136 * the voltage data needs to be specified to select next P State. 137 */ 138 struct vid_data { 139 int min; 140 int max; 141 int turbo; 142 int32_t ratio; 143 }; 144 145 /** 146 * struct _pid - Stores PID data 147 * @setpoint: Target set point for busyness or performance 148 * @integral: Storage for accumulated error values 149 * @p_gain: PID proportional gain 150 * @i_gain: PID integral gain 151 * @d_gain: PID derivative gain 152 * @deadband: PID deadband 153 * @last_err: Last error storage for integral part of PID calculation 154 * 155 * Stores PID coefficients and last error for PID controller. 156 */ 157 struct _pid { 158 int setpoint; 159 int32_t integral; 160 int32_t p_gain; 161 int32_t i_gain; 162 int32_t d_gain; 163 int deadband; 164 int32_t last_err; 165 }; 166 167 /** 168 * struct cpudata - Per CPU instance data storage 169 * @cpu: CPU number for this instance data 170 * @update_util: CPUFreq utility callback information 171 * @pstate: Stores P state limits for this CPU 172 * @vid: Stores VID limits for this CPU 173 * @pid: Stores PID parameters for this CPU 174 * @last_sample_time: Last Sample time 175 * @prev_aperf: Last APERF value read from APERF MSR 176 * @prev_mperf: Last MPERF value read from MPERF MSR 177 * @prev_tsc: Last timestamp counter (TSC) value 178 * @prev_cummulative_iowait: IO Wait time difference from last and 179 * current sample 180 * @sample: Storage for storing last Sample data 181 * @acpi_perf_data: Stores ACPI perf information read from _PSS 182 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 183 * 184 * This structure stores per CPU instance data for all CPUs. 185 */ 186 struct cpudata { 187 int cpu; 188 189 struct update_util_data update_util; 190 191 struct pstate_data pstate; 192 struct vid_data vid; 193 struct _pid pid; 194 195 u64 last_sample_time; 196 u64 prev_aperf; 197 u64 prev_mperf; 198 u64 prev_tsc; 199 u64 prev_cummulative_iowait; 200 struct sample sample; 201 #ifdef CONFIG_ACPI 202 struct acpi_processor_performance acpi_perf_data; 203 bool valid_pss_table; 204 #endif 205 }; 206 207 static struct cpudata **all_cpu_data; 208 209 /** 210 * struct pid_adjust_policy - Stores static PID configuration data 211 * @sample_rate_ms: PID calculation sample rate in ms 212 * @sample_rate_ns: Sample rate calculation in ns 213 * @deadband: PID deadband 214 * @setpoint: PID Setpoint 215 * @p_gain_pct: PID proportional gain 216 * @i_gain_pct: PID integral gain 217 * @d_gain_pct: PID derivative gain 218 * 219 * Stores per CPU model static PID configuration data. 220 */ 221 struct pstate_adjust_policy { 222 int sample_rate_ms; 223 s64 sample_rate_ns; 224 int deadband; 225 int setpoint; 226 int p_gain_pct; 227 int d_gain_pct; 228 int i_gain_pct; 229 }; 230 231 /** 232 * struct pstate_funcs - Per CPU model specific callbacks 233 * @get_max: Callback to get maximum non turbo effective P state 234 * @get_max_physical: Callback to get maximum non turbo physical P state 235 * @get_min: Callback to get minimum P state 236 * @get_turbo: Callback to get turbo P state 237 * @get_scaling: Callback to get frequency scaling factor 238 * @get_val: Callback to convert P state to actual MSR write value 239 * @get_vid: Callback to get VID data for Atom platforms 240 * @get_target_pstate: Callback to a function to calculate next P state to use 241 * 242 * Core and Atom CPU models have different way to get P State limits. This 243 * structure is used to store those callbacks. 244 */ 245 struct pstate_funcs { 246 int (*get_max)(void); 247 int (*get_max_physical)(void); 248 int (*get_min)(void); 249 int (*get_turbo)(void); 250 int (*get_scaling)(void); 251 u64 (*get_val)(struct cpudata*, int pstate); 252 void (*get_vid)(struct cpudata *); 253 int32_t (*get_target_pstate)(struct cpudata *); 254 }; 255 256 /** 257 * struct cpu_defaults- Per CPU model default config data 258 * @pid_policy: PID config data 259 * @funcs: Callback function data 260 */ 261 struct cpu_defaults { 262 struct pstate_adjust_policy pid_policy; 263 struct pstate_funcs funcs; 264 }; 265 266 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 267 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 268 269 static struct pstate_adjust_policy pid_params; 270 static struct pstate_funcs pstate_funcs; 271 static int hwp_active; 272 273 #ifdef CONFIG_ACPI 274 static bool acpi_ppc; 275 #endif 276 277 /** 278 * struct perf_limits - Store user and policy limits 279 * @no_turbo: User requested turbo state from intel_pstate sysfs 280 * @turbo_disabled: Platform turbo status either from msr 281 * MSR_IA32_MISC_ENABLE or when maximum available pstate 282 * matches the maximum turbo pstate 283 * @max_perf_pct: Effective maximum performance limit in percentage, this 284 * is minimum of either limits enforced by cpufreq policy 285 * or limits from user set limits via intel_pstate sysfs 286 * @min_perf_pct: Effective minimum performance limit in percentage, this 287 * is maximum of either limits enforced by cpufreq policy 288 * or limits from user set limits via intel_pstate sysfs 289 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct 290 * This value is used to limit max pstate 291 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct 292 * This value is used to limit min pstate 293 * @max_policy_pct: The maximum performance in percentage enforced by 294 * cpufreq setpolicy interface 295 * @max_sysfs_pct: The maximum performance in percentage enforced by 296 * intel pstate sysfs interface 297 * @min_policy_pct: The minimum performance in percentage enforced by 298 * cpufreq setpolicy interface 299 * @min_sysfs_pct: The minimum performance in percentage enforced by 300 * intel pstate sysfs interface 301 * 302 * Storage for user and policy defined limits. 303 */ 304 struct perf_limits { 305 int no_turbo; 306 int turbo_disabled; 307 int max_perf_pct; 308 int min_perf_pct; 309 int32_t max_perf; 310 int32_t min_perf; 311 int max_policy_pct; 312 int max_sysfs_pct; 313 int min_policy_pct; 314 int min_sysfs_pct; 315 }; 316 317 static struct perf_limits performance_limits = { 318 .no_turbo = 0, 319 .turbo_disabled = 0, 320 .max_perf_pct = 100, 321 .max_perf = int_tofp(1), 322 .min_perf_pct = 100, 323 .min_perf = int_tofp(1), 324 .max_policy_pct = 100, 325 .max_sysfs_pct = 100, 326 .min_policy_pct = 0, 327 .min_sysfs_pct = 0, 328 }; 329 330 static struct perf_limits powersave_limits = { 331 .no_turbo = 0, 332 .turbo_disabled = 0, 333 .max_perf_pct = 100, 334 .max_perf = int_tofp(1), 335 .min_perf_pct = 0, 336 .min_perf = 0, 337 .max_policy_pct = 100, 338 .max_sysfs_pct = 100, 339 .min_policy_pct = 0, 340 .min_sysfs_pct = 0, 341 }; 342 343 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 344 static struct perf_limits *limits = &performance_limits; 345 #else 346 static struct perf_limits *limits = &powersave_limits; 347 #endif 348 349 #ifdef CONFIG_ACPI 350 351 static bool intel_pstate_get_ppc_enable_status(void) 352 { 353 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 354 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 355 return true; 356 357 return acpi_ppc; 358 } 359 360 /* 361 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and 362 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and 363 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state 364 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting 365 * target ratio 0x17. The _PSS control value stores in a format which can be 366 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift 367 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). 368 * This function converts the _PSS control value to intel pstate driver format 369 * for comparison and assignment. 370 */ 371 static int convert_to_native_pstate_format(struct cpudata *cpu, int index) 372 { 373 return cpu->acpi_perf_data.states[index].control >> 8; 374 } 375 376 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 377 { 378 struct cpudata *cpu; 379 int turbo_pss_ctl; 380 int ret; 381 int i; 382 383 if (hwp_active) 384 return; 385 386 if (!intel_pstate_get_ppc_enable_status()) 387 return; 388 389 cpu = all_cpu_data[policy->cpu]; 390 391 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 392 policy->cpu); 393 if (ret) 394 return; 395 396 /* 397 * Check if the control value in _PSS is for PERF_CTL MSR, which should 398 * guarantee that the states returned by it map to the states in our 399 * list directly. 400 */ 401 if (cpu->acpi_perf_data.control_register.space_id != 402 ACPI_ADR_SPACE_FIXED_HARDWARE) 403 goto err; 404 405 /* 406 * If there is only one entry _PSS, simply ignore _PSS and continue as 407 * usual without taking _PSS into account 408 */ 409 if (cpu->acpi_perf_data.state_count < 2) 410 goto err; 411 412 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 413 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 414 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 415 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 416 (u32) cpu->acpi_perf_data.states[i].core_frequency, 417 (u32) cpu->acpi_perf_data.states[i].power, 418 (u32) cpu->acpi_perf_data.states[i].control); 419 } 420 421 /* 422 * The _PSS table doesn't contain whole turbo frequency range. 423 * This just contains +1 MHZ above the max non turbo frequency, 424 * with control value corresponding to max turbo ratio. But 425 * when cpufreq set policy is called, it will call with this 426 * max frequency, which will cause a reduced performance as 427 * this driver uses real max turbo frequency as the max 428 * frequency. So correct this frequency in _PSS table to 429 * correct max turbo frequency based on the turbo ratio. 430 * Also need to convert to MHz as _PSS freq is in MHz. 431 */ 432 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); 433 if (turbo_pss_ctl > cpu->pstate.max_pstate) 434 cpu->acpi_perf_data.states[0].core_frequency = 435 policy->cpuinfo.max_freq / 1000; 436 cpu->valid_pss_table = true; 437 pr_info("_PPC limits will be enforced\n"); 438 439 return; 440 441 err: 442 cpu->valid_pss_table = false; 443 acpi_processor_unregister_performance(policy->cpu); 444 } 445 446 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 447 { 448 struct cpudata *cpu; 449 450 cpu = all_cpu_data[policy->cpu]; 451 if (!cpu->valid_pss_table) 452 return; 453 454 acpi_processor_unregister_performance(policy->cpu); 455 } 456 457 #else 458 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 459 { 460 } 461 462 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 463 { 464 } 465 #endif 466 467 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 468 int deadband, int integral) { 469 pid->setpoint = int_tofp(setpoint); 470 pid->deadband = int_tofp(deadband); 471 pid->integral = int_tofp(integral); 472 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 473 } 474 475 static inline void pid_p_gain_set(struct _pid *pid, int percent) 476 { 477 pid->p_gain = div_fp(percent, 100); 478 } 479 480 static inline void pid_i_gain_set(struct _pid *pid, int percent) 481 { 482 pid->i_gain = div_fp(percent, 100); 483 } 484 485 static inline void pid_d_gain_set(struct _pid *pid, int percent) 486 { 487 pid->d_gain = div_fp(percent, 100); 488 } 489 490 static signed int pid_calc(struct _pid *pid, int32_t busy) 491 { 492 signed int result; 493 int32_t pterm, dterm, fp_error; 494 int32_t integral_limit; 495 496 fp_error = pid->setpoint - busy; 497 498 if (abs(fp_error) <= pid->deadband) 499 return 0; 500 501 pterm = mul_fp(pid->p_gain, fp_error); 502 503 pid->integral += fp_error; 504 505 /* 506 * We limit the integral here so that it will never 507 * get higher than 30. This prevents it from becoming 508 * too large an input over long periods of time and allows 509 * it to get factored out sooner. 510 * 511 * The value of 30 was chosen through experimentation. 512 */ 513 integral_limit = int_tofp(30); 514 if (pid->integral > integral_limit) 515 pid->integral = integral_limit; 516 if (pid->integral < -integral_limit) 517 pid->integral = -integral_limit; 518 519 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 520 pid->last_err = fp_error; 521 522 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 523 result = result + (1 << (FRAC_BITS-1)); 524 return (signed int)fp_toint(result); 525 } 526 527 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 528 { 529 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 530 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 531 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 532 533 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 534 } 535 536 static inline void intel_pstate_reset_all_pid(void) 537 { 538 unsigned int cpu; 539 540 for_each_online_cpu(cpu) { 541 if (all_cpu_data[cpu]) 542 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 543 } 544 } 545 546 static inline void update_turbo_state(void) 547 { 548 u64 misc_en; 549 struct cpudata *cpu; 550 551 cpu = all_cpu_data[0]; 552 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 553 limits->turbo_disabled = 554 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 555 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 556 } 557 558 static void intel_pstate_hwp_set(const struct cpumask *cpumask) 559 { 560 int min, hw_min, max, hw_max, cpu, range, adj_range; 561 u64 value, cap; 562 563 rdmsrl(MSR_HWP_CAPABILITIES, cap); 564 hw_min = HWP_LOWEST_PERF(cap); 565 hw_max = HWP_HIGHEST_PERF(cap); 566 range = hw_max - hw_min; 567 568 for_each_cpu(cpu, cpumask) { 569 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 570 adj_range = limits->min_perf_pct * range / 100; 571 min = hw_min + adj_range; 572 value &= ~HWP_MIN_PERF(~0L); 573 value |= HWP_MIN_PERF(min); 574 575 adj_range = limits->max_perf_pct * range / 100; 576 max = hw_min + adj_range; 577 if (limits->no_turbo) { 578 hw_max = HWP_GUARANTEED_PERF(cap); 579 if (hw_max < max) 580 max = hw_max; 581 } 582 583 value &= ~HWP_MAX_PERF(~0L); 584 value |= HWP_MAX_PERF(max); 585 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 586 } 587 } 588 589 static void intel_pstate_hwp_set_online_cpus(void) 590 { 591 get_online_cpus(); 592 intel_pstate_hwp_set(cpu_online_mask); 593 put_online_cpus(); 594 } 595 596 /************************** debugfs begin ************************/ 597 static int pid_param_set(void *data, u64 val) 598 { 599 *(u32 *)data = val; 600 intel_pstate_reset_all_pid(); 601 return 0; 602 } 603 604 static int pid_param_get(void *data, u64 *val) 605 { 606 *val = *(u32 *)data; 607 return 0; 608 } 609 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 610 611 struct pid_param { 612 char *name; 613 void *value; 614 }; 615 616 static struct pid_param pid_files[] = { 617 {"sample_rate_ms", &pid_params.sample_rate_ms}, 618 {"d_gain_pct", &pid_params.d_gain_pct}, 619 {"i_gain_pct", &pid_params.i_gain_pct}, 620 {"deadband", &pid_params.deadband}, 621 {"setpoint", &pid_params.setpoint}, 622 {"p_gain_pct", &pid_params.p_gain_pct}, 623 {NULL, NULL} 624 }; 625 626 static void __init intel_pstate_debug_expose_params(void) 627 { 628 struct dentry *debugfs_parent; 629 int i = 0; 630 631 if (hwp_active) 632 return; 633 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 634 if (IS_ERR_OR_NULL(debugfs_parent)) 635 return; 636 while (pid_files[i].name) { 637 debugfs_create_file(pid_files[i].name, 0660, 638 debugfs_parent, pid_files[i].value, 639 &fops_pid_param); 640 i++; 641 } 642 } 643 644 /************************** debugfs end ************************/ 645 646 /************************** sysfs begin ************************/ 647 #define show_one(file_name, object) \ 648 static ssize_t show_##file_name \ 649 (struct kobject *kobj, struct attribute *attr, char *buf) \ 650 { \ 651 return sprintf(buf, "%u\n", limits->object); \ 652 } 653 654 static ssize_t show_turbo_pct(struct kobject *kobj, 655 struct attribute *attr, char *buf) 656 { 657 struct cpudata *cpu; 658 int total, no_turbo, turbo_pct; 659 uint32_t turbo_fp; 660 661 cpu = all_cpu_data[0]; 662 663 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 664 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 665 turbo_fp = div_fp(no_turbo, total); 666 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 667 return sprintf(buf, "%u\n", turbo_pct); 668 } 669 670 static ssize_t show_num_pstates(struct kobject *kobj, 671 struct attribute *attr, char *buf) 672 { 673 struct cpudata *cpu; 674 int total; 675 676 cpu = all_cpu_data[0]; 677 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 678 return sprintf(buf, "%u\n", total); 679 } 680 681 static ssize_t show_no_turbo(struct kobject *kobj, 682 struct attribute *attr, char *buf) 683 { 684 ssize_t ret; 685 686 update_turbo_state(); 687 if (limits->turbo_disabled) 688 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 689 else 690 ret = sprintf(buf, "%u\n", limits->no_turbo); 691 692 return ret; 693 } 694 695 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 696 const char *buf, size_t count) 697 { 698 unsigned int input; 699 int ret; 700 701 ret = sscanf(buf, "%u", &input); 702 if (ret != 1) 703 return -EINVAL; 704 705 update_turbo_state(); 706 if (limits->turbo_disabled) { 707 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 708 return -EPERM; 709 } 710 711 limits->no_turbo = clamp_t(int, input, 0, 1); 712 713 if (hwp_active) 714 intel_pstate_hwp_set_online_cpus(); 715 716 return count; 717 } 718 719 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 720 const char *buf, size_t count) 721 { 722 unsigned int input; 723 int ret; 724 725 ret = sscanf(buf, "%u", &input); 726 if (ret != 1) 727 return -EINVAL; 728 729 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 730 limits->max_perf_pct = min(limits->max_policy_pct, 731 limits->max_sysfs_pct); 732 limits->max_perf_pct = max(limits->min_policy_pct, 733 limits->max_perf_pct); 734 limits->max_perf_pct = max(limits->min_perf_pct, 735 limits->max_perf_pct); 736 limits->max_perf = div_fp(limits->max_perf_pct, 100); 737 738 if (hwp_active) 739 intel_pstate_hwp_set_online_cpus(); 740 return count; 741 } 742 743 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 744 const char *buf, size_t count) 745 { 746 unsigned int input; 747 int ret; 748 749 ret = sscanf(buf, "%u", &input); 750 if (ret != 1) 751 return -EINVAL; 752 753 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 754 limits->min_perf_pct = max(limits->min_policy_pct, 755 limits->min_sysfs_pct); 756 limits->min_perf_pct = min(limits->max_policy_pct, 757 limits->min_perf_pct); 758 limits->min_perf_pct = min(limits->max_perf_pct, 759 limits->min_perf_pct); 760 limits->min_perf = div_fp(limits->min_perf_pct, 100); 761 762 if (hwp_active) 763 intel_pstate_hwp_set_online_cpus(); 764 return count; 765 } 766 767 show_one(max_perf_pct, max_perf_pct); 768 show_one(min_perf_pct, min_perf_pct); 769 770 define_one_global_rw(no_turbo); 771 define_one_global_rw(max_perf_pct); 772 define_one_global_rw(min_perf_pct); 773 define_one_global_ro(turbo_pct); 774 define_one_global_ro(num_pstates); 775 776 static struct attribute *intel_pstate_attributes[] = { 777 &no_turbo.attr, 778 &max_perf_pct.attr, 779 &min_perf_pct.attr, 780 &turbo_pct.attr, 781 &num_pstates.attr, 782 NULL 783 }; 784 785 static struct attribute_group intel_pstate_attr_group = { 786 .attrs = intel_pstate_attributes, 787 }; 788 789 static void __init intel_pstate_sysfs_expose_params(void) 790 { 791 struct kobject *intel_pstate_kobject; 792 int rc; 793 794 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 795 &cpu_subsys.dev_root->kobj); 796 BUG_ON(!intel_pstate_kobject); 797 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 798 BUG_ON(rc); 799 } 800 /************************** sysfs end ************************/ 801 802 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 803 { 804 /* First disable HWP notification interrupt as we don't process them */ 805 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 806 807 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 808 } 809 810 static int atom_get_min_pstate(void) 811 { 812 u64 value; 813 814 rdmsrl(ATOM_RATIOS, value); 815 return (value >> 8) & 0x7F; 816 } 817 818 static int atom_get_max_pstate(void) 819 { 820 u64 value; 821 822 rdmsrl(ATOM_RATIOS, value); 823 return (value >> 16) & 0x7F; 824 } 825 826 static int atom_get_turbo_pstate(void) 827 { 828 u64 value; 829 830 rdmsrl(ATOM_TURBO_RATIOS, value); 831 return value & 0x7F; 832 } 833 834 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 835 { 836 u64 val; 837 int32_t vid_fp; 838 u32 vid; 839 840 val = (u64)pstate << 8; 841 if (limits->no_turbo && !limits->turbo_disabled) 842 val |= (u64)1 << 32; 843 844 vid_fp = cpudata->vid.min + mul_fp( 845 int_tofp(pstate - cpudata->pstate.min_pstate), 846 cpudata->vid.ratio); 847 848 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 849 vid = ceiling_fp(vid_fp); 850 851 if (pstate > cpudata->pstate.max_pstate) 852 vid = cpudata->vid.turbo; 853 854 return val | vid; 855 } 856 857 static int silvermont_get_scaling(void) 858 { 859 u64 value; 860 int i; 861 /* Defined in Table 35-6 from SDM (Sept 2015) */ 862 static int silvermont_freq_table[] = { 863 83300, 100000, 133300, 116700, 80000}; 864 865 rdmsrl(MSR_FSB_FREQ, value); 866 i = value & 0x7; 867 WARN_ON(i > 4); 868 869 return silvermont_freq_table[i]; 870 } 871 872 static int airmont_get_scaling(void) 873 { 874 u64 value; 875 int i; 876 /* Defined in Table 35-10 from SDM (Sept 2015) */ 877 static int airmont_freq_table[] = { 878 83300, 100000, 133300, 116700, 80000, 879 93300, 90000, 88900, 87500}; 880 881 rdmsrl(MSR_FSB_FREQ, value); 882 i = value & 0xF; 883 WARN_ON(i > 8); 884 885 return airmont_freq_table[i]; 886 } 887 888 static void atom_get_vid(struct cpudata *cpudata) 889 { 890 u64 value; 891 892 rdmsrl(ATOM_VIDS, value); 893 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 894 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 895 cpudata->vid.ratio = div_fp( 896 cpudata->vid.max - cpudata->vid.min, 897 int_tofp(cpudata->pstate.max_pstate - 898 cpudata->pstate.min_pstate)); 899 900 rdmsrl(ATOM_TURBO_VIDS, value); 901 cpudata->vid.turbo = value & 0x7f; 902 } 903 904 static int core_get_min_pstate(void) 905 { 906 u64 value; 907 908 rdmsrl(MSR_PLATFORM_INFO, value); 909 return (value >> 40) & 0xFF; 910 } 911 912 static int core_get_max_pstate_physical(void) 913 { 914 u64 value; 915 916 rdmsrl(MSR_PLATFORM_INFO, value); 917 return (value >> 8) & 0xFF; 918 } 919 920 static int core_get_max_pstate(void) 921 { 922 u64 tar; 923 u64 plat_info; 924 int max_pstate; 925 int err; 926 927 rdmsrl(MSR_PLATFORM_INFO, plat_info); 928 max_pstate = (plat_info >> 8) & 0xFF; 929 930 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 931 if (!err) { 932 /* Do some sanity checking for safety */ 933 if (plat_info & 0x600000000) { 934 u64 tdp_ctrl; 935 u64 tdp_ratio; 936 int tdp_msr; 937 938 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 939 if (err) 940 goto skip_tar; 941 942 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; 943 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 944 if (err) 945 goto skip_tar; 946 947 if (tdp_ratio - 1 == tar) { 948 max_pstate = tar; 949 pr_debug("max_pstate=TAC %x\n", max_pstate); 950 } else { 951 goto skip_tar; 952 } 953 } 954 } 955 956 skip_tar: 957 return max_pstate; 958 } 959 960 static int core_get_turbo_pstate(void) 961 { 962 u64 value; 963 int nont, ret; 964 965 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 966 nont = core_get_max_pstate(); 967 ret = (value) & 255; 968 if (ret <= nont) 969 ret = nont; 970 return ret; 971 } 972 973 static inline int core_get_scaling(void) 974 { 975 return 100000; 976 } 977 978 static u64 core_get_val(struct cpudata *cpudata, int pstate) 979 { 980 u64 val; 981 982 val = (u64)pstate << 8; 983 if (limits->no_turbo && !limits->turbo_disabled) 984 val |= (u64)1 << 32; 985 986 return val; 987 } 988 989 static int knl_get_turbo_pstate(void) 990 { 991 u64 value; 992 int nont, ret; 993 994 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 995 nont = core_get_max_pstate(); 996 ret = (((value) >> 8) & 0xFF); 997 if (ret <= nont) 998 ret = nont; 999 return ret; 1000 } 1001 1002 static struct cpu_defaults core_params = { 1003 .pid_policy = { 1004 .sample_rate_ms = 10, 1005 .deadband = 0, 1006 .setpoint = 97, 1007 .p_gain_pct = 20, 1008 .d_gain_pct = 0, 1009 .i_gain_pct = 0, 1010 }, 1011 .funcs = { 1012 .get_max = core_get_max_pstate, 1013 .get_max_physical = core_get_max_pstate_physical, 1014 .get_min = core_get_min_pstate, 1015 .get_turbo = core_get_turbo_pstate, 1016 .get_scaling = core_get_scaling, 1017 .get_val = core_get_val, 1018 .get_target_pstate = get_target_pstate_use_performance, 1019 }, 1020 }; 1021 1022 static struct cpu_defaults silvermont_params = { 1023 .pid_policy = { 1024 .sample_rate_ms = 10, 1025 .deadband = 0, 1026 .setpoint = 60, 1027 .p_gain_pct = 14, 1028 .d_gain_pct = 0, 1029 .i_gain_pct = 4, 1030 }, 1031 .funcs = { 1032 .get_max = atom_get_max_pstate, 1033 .get_max_physical = atom_get_max_pstate, 1034 .get_min = atom_get_min_pstate, 1035 .get_turbo = atom_get_turbo_pstate, 1036 .get_val = atom_get_val, 1037 .get_scaling = silvermont_get_scaling, 1038 .get_vid = atom_get_vid, 1039 .get_target_pstate = get_target_pstate_use_cpu_load, 1040 }, 1041 }; 1042 1043 static struct cpu_defaults airmont_params = { 1044 .pid_policy = { 1045 .sample_rate_ms = 10, 1046 .deadband = 0, 1047 .setpoint = 60, 1048 .p_gain_pct = 14, 1049 .d_gain_pct = 0, 1050 .i_gain_pct = 4, 1051 }, 1052 .funcs = { 1053 .get_max = atom_get_max_pstate, 1054 .get_max_physical = atom_get_max_pstate, 1055 .get_min = atom_get_min_pstate, 1056 .get_turbo = atom_get_turbo_pstate, 1057 .get_val = atom_get_val, 1058 .get_scaling = airmont_get_scaling, 1059 .get_vid = atom_get_vid, 1060 .get_target_pstate = get_target_pstate_use_cpu_load, 1061 }, 1062 }; 1063 1064 static struct cpu_defaults knl_params = { 1065 .pid_policy = { 1066 .sample_rate_ms = 10, 1067 .deadband = 0, 1068 .setpoint = 97, 1069 .p_gain_pct = 20, 1070 .d_gain_pct = 0, 1071 .i_gain_pct = 0, 1072 }, 1073 .funcs = { 1074 .get_max = core_get_max_pstate, 1075 .get_max_physical = core_get_max_pstate_physical, 1076 .get_min = core_get_min_pstate, 1077 .get_turbo = knl_get_turbo_pstate, 1078 .get_scaling = core_get_scaling, 1079 .get_val = core_get_val, 1080 .get_target_pstate = get_target_pstate_use_performance, 1081 }, 1082 }; 1083 1084 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 1085 { 1086 int max_perf = cpu->pstate.turbo_pstate; 1087 int max_perf_adj; 1088 int min_perf; 1089 1090 if (limits->no_turbo || limits->turbo_disabled) 1091 max_perf = cpu->pstate.max_pstate; 1092 1093 /* 1094 * performance can be limited by user through sysfs, by cpufreq 1095 * policy, or by cpu specific default values determined through 1096 * experimentation. 1097 */ 1098 max_perf_adj = fp_toint(max_perf * limits->max_perf); 1099 *max = clamp_t(int, max_perf_adj, 1100 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 1101 1102 min_perf = fp_toint(max_perf * limits->min_perf); 1103 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 1104 } 1105 1106 static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate) 1107 { 1108 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1109 cpu->pstate.current_pstate = pstate; 1110 } 1111 1112 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1113 { 1114 int pstate = cpu->pstate.min_pstate; 1115 1116 intel_pstate_record_pstate(cpu, pstate); 1117 /* 1118 * Generally, there is no guarantee that this code will always run on 1119 * the CPU being updated, so force the register update to run on the 1120 * right CPU. 1121 */ 1122 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1123 pstate_funcs.get_val(cpu, pstate)); 1124 } 1125 1126 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1127 { 1128 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1129 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1130 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1131 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1132 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1133 1134 if (pstate_funcs.get_vid) 1135 pstate_funcs.get_vid(cpu); 1136 1137 intel_pstate_set_min_pstate(cpu); 1138 } 1139 1140 static inline void intel_pstate_calc_busy(struct cpudata *cpu) 1141 { 1142 struct sample *sample = &cpu->sample; 1143 int64_t core_pct; 1144 1145 core_pct = sample->aperf * int_tofp(100); 1146 core_pct = div64_u64(core_pct, sample->mperf); 1147 1148 sample->core_pct_busy = (int32_t)core_pct; 1149 } 1150 1151 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1152 { 1153 u64 aperf, mperf; 1154 unsigned long flags; 1155 u64 tsc; 1156 1157 local_irq_save(flags); 1158 rdmsrl(MSR_IA32_APERF, aperf); 1159 rdmsrl(MSR_IA32_MPERF, mperf); 1160 tsc = rdtsc(); 1161 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1162 local_irq_restore(flags); 1163 return false; 1164 } 1165 local_irq_restore(flags); 1166 1167 cpu->last_sample_time = cpu->sample.time; 1168 cpu->sample.time = time; 1169 cpu->sample.aperf = aperf; 1170 cpu->sample.mperf = mperf; 1171 cpu->sample.tsc = tsc; 1172 cpu->sample.aperf -= cpu->prev_aperf; 1173 cpu->sample.mperf -= cpu->prev_mperf; 1174 cpu->sample.tsc -= cpu->prev_tsc; 1175 1176 cpu->prev_aperf = aperf; 1177 cpu->prev_mperf = mperf; 1178 cpu->prev_tsc = tsc; 1179 /* 1180 * First time this function is invoked in a given cycle, all of the 1181 * previous sample data fields are equal to zero or stale and they must 1182 * be populated with meaningful numbers for things to work, so assume 1183 * that sample.time will always be reset before setting the utilization 1184 * update hook and make the caller skip the sample then. 1185 */ 1186 return !!cpu->last_sample_time; 1187 } 1188 1189 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1190 { 1191 return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf * 1192 cpu->pstate.scaling, cpu->sample.mperf); 1193 } 1194 1195 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1196 { 1197 return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf, 1198 cpu->sample.mperf); 1199 } 1200 1201 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1202 { 1203 struct sample *sample = &cpu->sample; 1204 u64 cummulative_iowait, delta_iowait_us; 1205 u64 delta_iowait_mperf; 1206 u64 mperf, now; 1207 int32_t cpu_load; 1208 1209 cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now); 1210 1211 /* 1212 * Convert iowait time into number of IO cycles spent at max_freq. 1213 * IO is considered as busy only for the cpu_load algorithm. For 1214 * performance this is not needed since we always try to reach the 1215 * maximum P-State, so we are already boosting the IOs. 1216 */ 1217 delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait; 1218 delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling * 1219 cpu->pstate.max_pstate, MSEC_PER_SEC); 1220 1221 mperf = cpu->sample.mperf + delta_iowait_mperf; 1222 cpu->prev_cummulative_iowait = cummulative_iowait; 1223 1224 /* 1225 * The load can be estimated as the ratio of the mperf counter 1226 * running at a constant frequency during active periods 1227 * (C0) and the time stamp counter running at the same frequency 1228 * also during C-states. 1229 */ 1230 cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc); 1231 cpu->sample.busy_scaled = cpu_load; 1232 1233 return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load); 1234 } 1235 1236 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1237 { 1238 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 1239 u64 duration_ns; 1240 1241 intel_pstate_calc_busy(cpu); 1242 1243 /* 1244 * core_busy is the ratio of actual performance to max 1245 * max_pstate is the max non turbo pstate available 1246 * current_pstate was the pstate that was requested during 1247 * the last sample period. 1248 * 1249 * We normalize core_busy, which was our actual percent 1250 * performance to what we requested during the last sample 1251 * period. The result will be a percentage of busy at a 1252 * specified pstate. 1253 */ 1254 core_busy = cpu->sample.core_pct_busy; 1255 max_pstate = cpu->pstate.max_pstate_physical; 1256 current_pstate = cpu->pstate.current_pstate; 1257 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 1258 1259 /* 1260 * Since our utilization update callback will not run unless we are 1261 * in C0, check if the actual elapsed time is significantly greater (3x) 1262 * than our sample interval. If it is, then we were idle for a long 1263 * enough period of time to adjust our busyness. 1264 */ 1265 duration_ns = cpu->sample.time - cpu->last_sample_time; 1266 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1267 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1268 core_busy = mul_fp(core_busy, sample_ratio); 1269 } else { 1270 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1271 if (sample_ratio < int_tofp(1)) 1272 core_busy = 0; 1273 } 1274 1275 cpu->sample.busy_scaled = core_busy; 1276 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy); 1277 } 1278 1279 static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1280 { 1281 int max_perf, min_perf; 1282 1283 update_turbo_state(); 1284 1285 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1286 pstate = clamp_t(int, pstate, min_perf, max_perf); 1287 if (pstate == cpu->pstate.current_pstate) 1288 return; 1289 1290 intel_pstate_record_pstate(cpu, pstate); 1291 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1292 } 1293 1294 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1295 { 1296 int from, target_pstate; 1297 struct sample *sample; 1298 1299 from = cpu->pstate.current_pstate; 1300 1301 target_pstate = pstate_funcs.get_target_pstate(cpu); 1302 1303 intel_pstate_update_pstate(cpu, target_pstate); 1304 1305 sample = &cpu->sample; 1306 trace_pstate_sample(fp_toint(sample->core_pct_busy), 1307 fp_toint(sample->busy_scaled), 1308 from, 1309 cpu->pstate.current_pstate, 1310 sample->mperf, 1311 sample->aperf, 1312 sample->tsc, 1313 get_avg_frequency(cpu)); 1314 } 1315 1316 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1317 unsigned long util, unsigned long max) 1318 { 1319 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1320 u64 delta_ns = time - cpu->sample.time; 1321 1322 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1323 bool sample_taken = intel_pstate_sample(cpu, time); 1324 1325 if (sample_taken && !hwp_active) 1326 intel_pstate_adjust_busy_pstate(cpu); 1327 } 1328 } 1329 1330 #define ICPU(model, policy) \ 1331 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1332 (unsigned long)&policy } 1333 1334 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1335 ICPU(0x2a, core_params), 1336 ICPU(0x2d, core_params), 1337 ICPU(0x37, silvermont_params), 1338 ICPU(0x3a, core_params), 1339 ICPU(0x3c, core_params), 1340 ICPU(0x3d, core_params), 1341 ICPU(0x3e, core_params), 1342 ICPU(0x3f, core_params), 1343 ICPU(0x45, core_params), 1344 ICPU(0x46, core_params), 1345 ICPU(0x47, core_params), 1346 ICPU(0x4c, airmont_params), 1347 ICPU(0x4e, core_params), 1348 ICPU(0x4f, core_params), 1349 ICPU(0x5e, core_params), 1350 ICPU(0x56, core_params), 1351 ICPU(0x57, knl_params), 1352 {} 1353 }; 1354 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1355 1356 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 1357 ICPU(0x56, core_params), 1358 {} 1359 }; 1360 1361 static int intel_pstate_init_cpu(unsigned int cpunum) 1362 { 1363 struct cpudata *cpu; 1364 1365 if (!all_cpu_data[cpunum]) 1366 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 1367 GFP_KERNEL); 1368 if (!all_cpu_data[cpunum]) 1369 return -ENOMEM; 1370 1371 cpu = all_cpu_data[cpunum]; 1372 1373 cpu->cpu = cpunum; 1374 1375 if (hwp_active) { 1376 intel_pstate_hwp_enable(cpu); 1377 pid_params.sample_rate_ms = 50; 1378 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1379 } 1380 1381 intel_pstate_get_cpu_pstates(cpu); 1382 1383 intel_pstate_busy_pid_reset(cpu); 1384 1385 pr_debug("controlling: cpu %d\n", cpunum); 1386 1387 return 0; 1388 } 1389 1390 static unsigned int intel_pstate_get(unsigned int cpu_num) 1391 { 1392 struct sample *sample; 1393 struct cpudata *cpu; 1394 1395 cpu = all_cpu_data[cpu_num]; 1396 if (!cpu) 1397 return 0; 1398 sample = &cpu->sample; 1399 return get_avg_frequency(cpu); 1400 } 1401 1402 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1403 { 1404 struct cpudata *cpu = all_cpu_data[cpu_num]; 1405 1406 /* Prevent intel_pstate_update_util() from using stale data. */ 1407 cpu->sample.time = 0; 1408 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1409 intel_pstate_update_util); 1410 } 1411 1412 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1413 { 1414 cpufreq_remove_update_util_hook(cpu); 1415 synchronize_sched(); 1416 } 1417 1418 static void intel_pstate_set_performance_limits(struct perf_limits *limits) 1419 { 1420 limits->no_turbo = 0; 1421 limits->turbo_disabled = 0; 1422 limits->max_perf_pct = 100; 1423 limits->max_perf = int_tofp(1); 1424 limits->min_perf_pct = 100; 1425 limits->min_perf = int_tofp(1); 1426 limits->max_policy_pct = 100; 1427 limits->max_sysfs_pct = 100; 1428 limits->min_policy_pct = 0; 1429 limits->min_sysfs_pct = 0; 1430 } 1431 1432 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1433 { 1434 struct cpudata *cpu; 1435 1436 if (!policy->cpuinfo.max_freq) 1437 return -ENODEV; 1438 1439 intel_pstate_clear_update_util_hook(policy->cpu); 1440 1441 cpu = all_cpu_data[0]; 1442 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate) { 1443 if (policy->max < policy->cpuinfo.max_freq && 1444 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { 1445 pr_debug("policy->max > max non turbo frequency\n"); 1446 policy->max = policy->cpuinfo.max_freq; 1447 } 1448 } 1449 1450 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1451 limits = &performance_limits; 1452 if (policy->max >= policy->cpuinfo.max_freq) { 1453 pr_debug("set performance\n"); 1454 intel_pstate_set_performance_limits(limits); 1455 goto out; 1456 } 1457 } else { 1458 pr_debug("set powersave\n"); 1459 limits = &powersave_limits; 1460 } 1461 1462 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1463 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1464 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1465 policy->cpuinfo.max_freq); 1466 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1467 1468 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1469 limits->min_perf_pct = max(limits->min_policy_pct, 1470 limits->min_sysfs_pct); 1471 limits->min_perf_pct = min(limits->max_policy_pct, 1472 limits->min_perf_pct); 1473 limits->max_perf_pct = min(limits->max_policy_pct, 1474 limits->max_sysfs_pct); 1475 limits->max_perf_pct = max(limits->min_policy_pct, 1476 limits->max_perf_pct); 1477 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1478 1479 /* Make sure min_perf_pct <= max_perf_pct */ 1480 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1481 1482 limits->min_perf = div_fp(limits->min_perf_pct, 100); 1483 limits->max_perf = div_fp(limits->max_perf_pct, 100); 1484 1485 out: 1486 intel_pstate_set_update_util_hook(policy->cpu); 1487 1488 if (hwp_active) 1489 intel_pstate_hwp_set(policy->cpus); 1490 1491 return 0; 1492 } 1493 1494 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1495 { 1496 cpufreq_verify_within_cpu_limits(policy); 1497 1498 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1499 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1500 return -EINVAL; 1501 1502 return 0; 1503 } 1504 1505 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1506 { 1507 int cpu_num = policy->cpu; 1508 struct cpudata *cpu = all_cpu_data[cpu_num]; 1509 1510 pr_debug("CPU %d exiting\n", cpu_num); 1511 1512 intel_pstate_clear_update_util_hook(cpu_num); 1513 1514 if (hwp_active) 1515 return; 1516 1517 intel_pstate_set_min_pstate(cpu); 1518 } 1519 1520 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1521 { 1522 struct cpudata *cpu; 1523 int rc; 1524 1525 rc = intel_pstate_init_cpu(policy->cpu); 1526 if (rc) 1527 return rc; 1528 1529 cpu = all_cpu_data[policy->cpu]; 1530 1531 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1532 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1533 else 1534 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1535 1536 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1537 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1538 1539 /* cpuinfo and default policy values */ 1540 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1541 policy->cpuinfo.max_freq = 1542 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1543 intel_pstate_init_acpi_perf_limits(policy); 1544 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1545 cpumask_set_cpu(policy->cpu, policy->cpus); 1546 1547 return 0; 1548 } 1549 1550 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 1551 { 1552 intel_pstate_exit_perf_limits(policy); 1553 1554 return 0; 1555 } 1556 1557 static struct cpufreq_driver intel_pstate_driver = { 1558 .flags = CPUFREQ_CONST_LOOPS, 1559 .verify = intel_pstate_verify_policy, 1560 .setpolicy = intel_pstate_set_policy, 1561 .get = intel_pstate_get, 1562 .init = intel_pstate_cpu_init, 1563 .exit = intel_pstate_cpu_exit, 1564 .stop_cpu = intel_pstate_stop_cpu, 1565 .name = "intel_pstate", 1566 }; 1567 1568 static int __initdata no_load; 1569 static int __initdata no_hwp; 1570 static int __initdata hwp_only; 1571 static unsigned int force_load; 1572 1573 static int intel_pstate_msrs_not_valid(void) 1574 { 1575 if (!pstate_funcs.get_max() || 1576 !pstate_funcs.get_min() || 1577 !pstate_funcs.get_turbo()) 1578 return -ENODEV; 1579 1580 return 0; 1581 } 1582 1583 static void copy_pid_params(struct pstate_adjust_policy *policy) 1584 { 1585 pid_params.sample_rate_ms = policy->sample_rate_ms; 1586 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 1587 pid_params.p_gain_pct = policy->p_gain_pct; 1588 pid_params.i_gain_pct = policy->i_gain_pct; 1589 pid_params.d_gain_pct = policy->d_gain_pct; 1590 pid_params.deadband = policy->deadband; 1591 pid_params.setpoint = policy->setpoint; 1592 } 1593 1594 static void copy_cpu_funcs(struct pstate_funcs *funcs) 1595 { 1596 pstate_funcs.get_max = funcs->get_max; 1597 pstate_funcs.get_max_physical = funcs->get_max_physical; 1598 pstate_funcs.get_min = funcs->get_min; 1599 pstate_funcs.get_turbo = funcs->get_turbo; 1600 pstate_funcs.get_scaling = funcs->get_scaling; 1601 pstate_funcs.get_val = funcs->get_val; 1602 pstate_funcs.get_vid = funcs->get_vid; 1603 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 1604 1605 } 1606 1607 #ifdef CONFIG_ACPI 1608 1609 static bool intel_pstate_no_acpi_pss(void) 1610 { 1611 int i; 1612 1613 for_each_possible_cpu(i) { 1614 acpi_status status; 1615 union acpi_object *pss; 1616 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1617 struct acpi_processor *pr = per_cpu(processors, i); 1618 1619 if (!pr) 1620 continue; 1621 1622 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1623 if (ACPI_FAILURE(status)) 1624 continue; 1625 1626 pss = buffer.pointer; 1627 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1628 kfree(pss); 1629 return false; 1630 } 1631 1632 kfree(pss); 1633 } 1634 1635 return true; 1636 } 1637 1638 static bool intel_pstate_has_acpi_ppc(void) 1639 { 1640 int i; 1641 1642 for_each_possible_cpu(i) { 1643 struct acpi_processor *pr = per_cpu(processors, i); 1644 1645 if (!pr) 1646 continue; 1647 if (acpi_has_method(pr->handle, "_PPC")) 1648 return true; 1649 } 1650 return false; 1651 } 1652 1653 enum { 1654 PSS, 1655 PPC, 1656 }; 1657 1658 struct hw_vendor_info { 1659 u16 valid; 1660 char oem_id[ACPI_OEM_ID_SIZE]; 1661 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1662 int oem_pwr_table; 1663 }; 1664 1665 /* Hardware vendor-specific info that has its own power management modes */ 1666 static struct hw_vendor_info vendor_info[] = { 1667 {1, "HP ", "ProLiant", PSS}, 1668 {1, "ORACLE", "X4-2 ", PPC}, 1669 {1, "ORACLE", "X4-2L ", PPC}, 1670 {1, "ORACLE", "X4-2B ", PPC}, 1671 {1, "ORACLE", "X3-2 ", PPC}, 1672 {1, "ORACLE", "X3-2L ", PPC}, 1673 {1, "ORACLE", "X3-2B ", PPC}, 1674 {1, "ORACLE", "X4470M2 ", PPC}, 1675 {1, "ORACLE", "X4270M3 ", PPC}, 1676 {1, "ORACLE", "X4270M2 ", PPC}, 1677 {1, "ORACLE", "X4170M2 ", PPC}, 1678 {1, "ORACLE", "X4170 M3", PPC}, 1679 {1, "ORACLE", "X4275 M3", PPC}, 1680 {1, "ORACLE", "X6-2 ", PPC}, 1681 {1, "ORACLE", "Sudbury ", PPC}, 1682 {0, "", ""}, 1683 }; 1684 1685 static bool intel_pstate_platform_pwr_mgmt_exists(void) 1686 { 1687 struct acpi_table_header hdr; 1688 struct hw_vendor_info *v_info; 1689 const struct x86_cpu_id *id; 1690 u64 misc_pwr; 1691 1692 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1693 if (id) { 1694 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1695 if ( misc_pwr & (1 << 8)) 1696 return true; 1697 } 1698 1699 if (acpi_disabled || 1700 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1701 return false; 1702 1703 for (v_info = vendor_info; v_info->valid; v_info++) { 1704 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1705 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1706 ACPI_OEM_TABLE_ID_SIZE)) 1707 switch (v_info->oem_pwr_table) { 1708 case PSS: 1709 return intel_pstate_no_acpi_pss(); 1710 case PPC: 1711 return intel_pstate_has_acpi_ppc() && 1712 (!force_load); 1713 } 1714 } 1715 1716 return false; 1717 } 1718 #else /* CONFIG_ACPI not enabled */ 1719 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1720 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1721 #endif /* CONFIG_ACPI */ 1722 1723 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 1724 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 1725 {} 1726 }; 1727 1728 static int __init intel_pstate_init(void) 1729 { 1730 int cpu, rc = 0; 1731 const struct x86_cpu_id *id; 1732 struct cpu_defaults *cpu_def; 1733 1734 if (no_load) 1735 return -ENODEV; 1736 1737 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 1738 copy_cpu_funcs(&core_params.funcs); 1739 hwp_active++; 1740 goto hwp_cpu_matched; 1741 } 1742 1743 id = x86_match_cpu(intel_pstate_cpu_ids); 1744 if (!id) 1745 return -ENODEV; 1746 1747 cpu_def = (struct cpu_defaults *)id->driver_data; 1748 1749 copy_pid_params(&cpu_def->pid_policy); 1750 copy_cpu_funcs(&cpu_def->funcs); 1751 1752 if (intel_pstate_msrs_not_valid()) 1753 return -ENODEV; 1754 1755 hwp_cpu_matched: 1756 /* 1757 * The Intel pstate driver will be ignored if the platform 1758 * firmware has its own power management modes. 1759 */ 1760 if (intel_pstate_platform_pwr_mgmt_exists()) 1761 return -ENODEV; 1762 1763 pr_info("Intel P-state driver initializing\n"); 1764 1765 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1766 if (!all_cpu_data) 1767 return -ENOMEM; 1768 1769 if (!hwp_active && hwp_only) 1770 goto out; 1771 1772 rc = cpufreq_register_driver(&intel_pstate_driver); 1773 if (rc) 1774 goto out; 1775 1776 intel_pstate_debug_expose_params(); 1777 intel_pstate_sysfs_expose_params(); 1778 1779 if (hwp_active) 1780 pr_info("HWP enabled\n"); 1781 1782 return rc; 1783 out: 1784 get_online_cpus(); 1785 for_each_online_cpu(cpu) { 1786 if (all_cpu_data[cpu]) { 1787 intel_pstate_clear_update_util_hook(cpu); 1788 kfree(all_cpu_data[cpu]); 1789 } 1790 } 1791 1792 put_online_cpus(); 1793 vfree(all_cpu_data); 1794 return -ENODEV; 1795 } 1796 device_initcall(intel_pstate_init); 1797 1798 static int __init intel_pstate_setup(char *str) 1799 { 1800 if (!str) 1801 return -EINVAL; 1802 1803 if (!strcmp(str, "disable")) 1804 no_load = 1; 1805 if (!strcmp(str, "no_hwp")) { 1806 pr_info("HWP disabled\n"); 1807 no_hwp = 1; 1808 } 1809 if (!strcmp(str, "force")) 1810 force_load = 1; 1811 if (!strcmp(str, "hwp_only")) 1812 hwp_only = 1; 1813 1814 #ifdef CONFIG_ACPI 1815 if (!strcmp(str, "support_acpi_ppc")) 1816 acpi_ppc = true; 1817 #endif 1818 1819 return 0; 1820 } 1821 early_param("intel_pstate", intel_pstate_setup); 1822 1823 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1824 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1825 MODULE_LICENSE("GPL"); 1826