1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 39 #define ATOM_RATIOS 0x66a 40 #define ATOM_VIDS 0x66b 41 #define ATOM_TURBO_RATIOS 0x66c 42 #define ATOM_TURBO_VIDS 0x66d 43 44 #ifdef CONFIG_ACPI 45 #include <acpi/processor.h> 46 #endif 47 48 #define FRAC_BITS 8 49 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 50 #define fp_toint(X) ((X) >> FRAC_BITS) 51 52 #define EXT_BITS 6 53 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 54 55 static inline int32_t mul_fp(int32_t x, int32_t y) 56 { 57 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 58 } 59 60 static inline int32_t div_fp(s64 x, s64 y) 61 { 62 return div64_s64((int64_t)x << FRAC_BITS, y); 63 } 64 65 static inline int ceiling_fp(int32_t x) 66 { 67 int mask, ret; 68 69 ret = fp_toint(x); 70 mask = (1 << FRAC_BITS) - 1; 71 if (x & mask) 72 ret += 1; 73 return ret; 74 } 75 76 static inline u64 mul_ext_fp(u64 x, u64 y) 77 { 78 return (x * y) >> EXT_FRAC_BITS; 79 } 80 81 static inline u64 div_ext_fp(u64 x, u64 y) 82 { 83 return div64_u64(x << EXT_FRAC_BITS, y); 84 } 85 86 /** 87 * struct sample - Store performance sample 88 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 89 * performance during last sample period 90 * @busy_scaled: Scaled busy value which is used to calculate next 91 * P state. This can be different than core_avg_perf 92 * to account for cpu idle period 93 * @aperf: Difference of actual performance frequency clock count 94 * read from APERF MSR between last and current sample 95 * @mperf: Difference of maximum performance frequency clock count 96 * read from MPERF MSR between last and current sample 97 * @tsc: Difference of time stamp counter between last and 98 * current sample 99 * @freq: Effective frequency calculated from APERF/MPERF 100 * @time: Current time from scheduler 101 * 102 * This structure is used in the cpudata structure to store performance sample 103 * data for choosing next P State. 104 */ 105 struct sample { 106 int32_t core_avg_perf; 107 int32_t busy_scaled; 108 u64 aperf; 109 u64 mperf; 110 u64 tsc; 111 int freq; 112 u64 time; 113 }; 114 115 /** 116 * struct pstate_data - Store P state data 117 * @current_pstate: Current requested P state 118 * @min_pstate: Min P state possible for this platform 119 * @max_pstate: Max P state possible for this platform 120 * @max_pstate_physical:This is physical Max P state for a processor 121 * This can be higher than the max_pstate which can 122 * be limited by platform thermal design power limits 123 * @scaling: Scaling factor to convert frequency to cpufreq 124 * frequency units 125 * @turbo_pstate: Max Turbo P state possible for this platform 126 * 127 * Stores the per cpu model P state limits and current P state. 128 */ 129 struct pstate_data { 130 int current_pstate; 131 int min_pstate; 132 int max_pstate; 133 int max_pstate_physical; 134 int scaling; 135 int turbo_pstate; 136 }; 137 138 /** 139 * struct vid_data - Stores voltage information data 140 * @min: VID data for this platform corresponding to 141 * the lowest P state 142 * @max: VID data corresponding to the highest P State. 143 * @turbo: VID data for turbo P state 144 * @ratio: Ratio of (vid max - vid min) / 145 * (max P state - Min P State) 146 * 147 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 148 * This data is used in Atom platforms, where in addition to target P state, 149 * the voltage data needs to be specified to select next P State. 150 */ 151 struct vid_data { 152 int min; 153 int max; 154 int turbo; 155 int32_t ratio; 156 }; 157 158 /** 159 * struct _pid - Stores PID data 160 * @setpoint: Target set point for busyness or performance 161 * @integral: Storage for accumulated error values 162 * @p_gain: PID proportional gain 163 * @i_gain: PID integral gain 164 * @d_gain: PID derivative gain 165 * @deadband: PID deadband 166 * @last_err: Last error storage for integral part of PID calculation 167 * 168 * Stores PID coefficients and last error for PID controller. 169 */ 170 struct _pid { 171 int setpoint; 172 int32_t integral; 173 int32_t p_gain; 174 int32_t i_gain; 175 int32_t d_gain; 176 int deadband; 177 int32_t last_err; 178 }; 179 180 /** 181 * struct cpudata - Per CPU instance data storage 182 * @cpu: CPU number for this instance data 183 * @update_util: CPUFreq utility callback information 184 * @update_util_set: CPUFreq utility callback is set 185 * @pstate: Stores P state limits for this CPU 186 * @vid: Stores VID limits for this CPU 187 * @pid: Stores PID parameters for this CPU 188 * @last_sample_time: Last Sample time 189 * @prev_aperf: Last APERF value read from APERF MSR 190 * @prev_mperf: Last MPERF value read from MPERF MSR 191 * @prev_tsc: Last timestamp counter (TSC) value 192 * @prev_cummulative_iowait: IO Wait time difference from last and 193 * current sample 194 * @sample: Storage for storing last Sample data 195 * @acpi_perf_data: Stores ACPI perf information read from _PSS 196 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 197 * 198 * This structure stores per CPU instance data for all CPUs. 199 */ 200 struct cpudata { 201 int cpu; 202 203 struct update_util_data update_util; 204 bool update_util_set; 205 206 struct pstate_data pstate; 207 struct vid_data vid; 208 struct _pid pid; 209 210 u64 last_sample_time; 211 u64 prev_aperf; 212 u64 prev_mperf; 213 u64 prev_tsc; 214 u64 prev_cummulative_iowait; 215 struct sample sample; 216 #ifdef CONFIG_ACPI 217 struct acpi_processor_performance acpi_perf_data; 218 bool valid_pss_table; 219 #endif 220 }; 221 222 static struct cpudata **all_cpu_data; 223 224 /** 225 * struct pid_adjust_policy - Stores static PID configuration data 226 * @sample_rate_ms: PID calculation sample rate in ms 227 * @sample_rate_ns: Sample rate calculation in ns 228 * @deadband: PID deadband 229 * @setpoint: PID Setpoint 230 * @p_gain_pct: PID proportional gain 231 * @i_gain_pct: PID integral gain 232 * @d_gain_pct: PID derivative gain 233 * 234 * Stores per CPU model static PID configuration data. 235 */ 236 struct pstate_adjust_policy { 237 int sample_rate_ms; 238 s64 sample_rate_ns; 239 int deadband; 240 int setpoint; 241 int p_gain_pct; 242 int d_gain_pct; 243 int i_gain_pct; 244 }; 245 246 /** 247 * struct pstate_funcs - Per CPU model specific callbacks 248 * @get_max: Callback to get maximum non turbo effective P state 249 * @get_max_physical: Callback to get maximum non turbo physical P state 250 * @get_min: Callback to get minimum P state 251 * @get_turbo: Callback to get turbo P state 252 * @get_scaling: Callback to get frequency scaling factor 253 * @get_val: Callback to convert P state to actual MSR write value 254 * @get_vid: Callback to get VID data for Atom platforms 255 * @get_target_pstate: Callback to a function to calculate next P state to use 256 * 257 * Core and Atom CPU models have different way to get P State limits. This 258 * structure is used to store those callbacks. 259 */ 260 struct pstate_funcs { 261 int (*get_max)(void); 262 int (*get_max_physical)(void); 263 int (*get_min)(void); 264 int (*get_turbo)(void); 265 int (*get_scaling)(void); 266 u64 (*get_val)(struct cpudata*, int pstate); 267 void (*get_vid)(struct cpudata *); 268 int32_t (*get_target_pstate)(struct cpudata *); 269 }; 270 271 /** 272 * struct cpu_defaults- Per CPU model default config data 273 * @pid_policy: PID config data 274 * @funcs: Callback function data 275 */ 276 struct cpu_defaults { 277 struct pstate_adjust_policy pid_policy; 278 struct pstate_funcs funcs; 279 }; 280 281 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 282 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 283 284 static struct pstate_adjust_policy pid_params; 285 static struct pstate_funcs pstate_funcs; 286 static int hwp_active; 287 288 #ifdef CONFIG_ACPI 289 static bool acpi_ppc; 290 #endif 291 292 /** 293 * struct perf_limits - Store user and policy limits 294 * @no_turbo: User requested turbo state from intel_pstate sysfs 295 * @turbo_disabled: Platform turbo status either from msr 296 * MSR_IA32_MISC_ENABLE or when maximum available pstate 297 * matches the maximum turbo pstate 298 * @max_perf_pct: Effective maximum performance limit in percentage, this 299 * is minimum of either limits enforced by cpufreq policy 300 * or limits from user set limits via intel_pstate sysfs 301 * @min_perf_pct: Effective minimum performance limit in percentage, this 302 * is maximum of either limits enforced by cpufreq policy 303 * or limits from user set limits via intel_pstate sysfs 304 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct 305 * This value is used to limit max pstate 306 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct 307 * This value is used to limit min pstate 308 * @max_policy_pct: The maximum performance in percentage enforced by 309 * cpufreq setpolicy interface 310 * @max_sysfs_pct: The maximum performance in percentage enforced by 311 * intel pstate sysfs interface 312 * @min_policy_pct: The minimum performance in percentage enforced by 313 * cpufreq setpolicy interface 314 * @min_sysfs_pct: The minimum performance in percentage enforced by 315 * intel pstate sysfs interface 316 * 317 * Storage for user and policy defined limits. 318 */ 319 struct perf_limits { 320 int no_turbo; 321 int turbo_disabled; 322 int max_perf_pct; 323 int min_perf_pct; 324 int32_t max_perf; 325 int32_t min_perf; 326 int max_policy_pct; 327 int max_sysfs_pct; 328 int min_policy_pct; 329 int min_sysfs_pct; 330 }; 331 332 static struct perf_limits performance_limits = { 333 .no_turbo = 0, 334 .turbo_disabled = 0, 335 .max_perf_pct = 100, 336 .max_perf = int_tofp(1), 337 .min_perf_pct = 100, 338 .min_perf = int_tofp(1), 339 .max_policy_pct = 100, 340 .max_sysfs_pct = 100, 341 .min_policy_pct = 0, 342 .min_sysfs_pct = 0, 343 }; 344 345 static struct perf_limits powersave_limits = { 346 .no_turbo = 0, 347 .turbo_disabled = 0, 348 .max_perf_pct = 100, 349 .max_perf = int_tofp(1), 350 .min_perf_pct = 0, 351 .min_perf = 0, 352 .max_policy_pct = 100, 353 .max_sysfs_pct = 100, 354 .min_policy_pct = 0, 355 .min_sysfs_pct = 0, 356 }; 357 358 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 359 static struct perf_limits *limits = &performance_limits; 360 #else 361 static struct perf_limits *limits = &powersave_limits; 362 #endif 363 364 #ifdef CONFIG_ACPI 365 366 static bool intel_pstate_get_ppc_enable_status(void) 367 { 368 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 369 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 370 return true; 371 372 return acpi_ppc; 373 } 374 375 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 376 { 377 struct cpudata *cpu; 378 int ret; 379 int i; 380 381 if (hwp_active) 382 return; 383 384 if (!intel_pstate_get_ppc_enable_status()) 385 return; 386 387 cpu = all_cpu_data[policy->cpu]; 388 389 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 390 policy->cpu); 391 if (ret) 392 return; 393 394 /* 395 * Check if the control value in _PSS is for PERF_CTL MSR, which should 396 * guarantee that the states returned by it map to the states in our 397 * list directly. 398 */ 399 if (cpu->acpi_perf_data.control_register.space_id != 400 ACPI_ADR_SPACE_FIXED_HARDWARE) 401 goto err; 402 403 /* 404 * If there is only one entry _PSS, simply ignore _PSS and continue as 405 * usual without taking _PSS into account 406 */ 407 if (cpu->acpi_perf_data.state_count < 2) 408 goto err; 409 410 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 411 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 412 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 413 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 414 (u32) cpu->acpi_perf_data.states[i].core_frequency, 415 (u32) cpu->acpi_perf_data.states[i].power, 416 (u32) cpu->acpi_perf_data.states[i].control); 417 } 418 419 /* 420 * The _PSS table doesn't contain whole turbo frequency range. 421 * This just contains +1 MHZ above the max non turbo frequency, 422 * with control value corresponding to max turbo ratio. But 423 * when cpufreq set policy is called, it will call with this 424 * max frequency, which will cause a reduced performance as 425 * this driver uses real max turbo frequency as the max 426 * frequency. So correct this frequency in _PSS table to 427 * correct max turbo frequency based on the turbo state. 428 * Also need to convert to MHz as _PSS freq is in MHz. 429 */ 430 if (!limits->turbo_disabled) 431 cpu->acpi_perf_data.states[0].core_frequency = 432 policy->cpuinfo.max_freq / 1000; 433 cpu->valid_pss_table = true; 434 pr_debug("_PPC limits will be enforced\n"); 435 436 return; 437 438 err: 439 cpu->valid_pss_table = false; 440 acpi_processor_unregister_performance(policy->cpu); 441 } 442 443 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 444 { 445 struct cpudata *cpu; 446 447 cpu = all_cpu_data[policy->cpu]; 448 if (!cpu->valid_pss_table) 449 return; 450 451 acpi_processor_unregister_performance(policy->cpu); 452 } 453 454 #else 455 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 456 { 457 } 458 459 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 460 { 461 } 462 #endif 463 464 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 465 int deadband, int integral) { 466 pid->setpoint = int_tofp(setpoint); 467 pid->deadband = int_tofp(deadband); 468 pid->integral = int_tofp(integral); 469 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 470 } 471 472 static inline void pid_p_gain_set(struct _pid *pid, int percent) 473 { 474 pid->p_gain = div_fp(percent, 100); 475 } 476 477 static inline void pid_i_gain_set(struct _pid *pid, int percent) 478 { 479 pid->i_gain = div_fp(percent, 100); 480 } 481 482 static inline void pid_d_gain_set(struct _pid *pid, int percent) 483 { 484 pid->d_gain = div_fp(percent, 100); 485 } 486 487 static signed int pid_calc(struct _pid *pid, int32_t busy) 488 { 489 signed int result; 490 int32_t pterm, dterm, fp_error; 491 int32_t integral_limit; 492 493 fp_error = pid->setpoint - busy; 494 495 if (abs(fp_error) <= pid->deadband) 496 return 0; 497 498 pterm = mul_fp(pid->p_gain, fp_error); 499 500 pid->integral += fp_error; 501 502 /* 503 * We limit the integral here so that it will never 504 * get higher than 30. This prevents it from becoming 505 * too large an input over long periods of time and allows 506 * it to get factored out sooner. 507 * 508 * The value of 30 was chosen through experimentation. 509 */ 510 integral_limit = int_tofp(30); 511 if (pid->integral > integral_limit) 512 pid->integral = integral_limit; 513 if (pid->integral < -integral_limit) 514 pid->integral = -integral_limit; 515 516 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 517 pid->last_err = fp_error; 518 519 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 520 result = result + (1 << (FRAC_BITS-1)); 521 return (signed int)fp_toint(result); 522 } 523 524 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 525 { 526 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 527 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 528 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 529 530 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 531 } 532 533 static inline void intel_pstate_reset_all_pid(void) 534 { 535 unsigned int cpu; 536 537 for_each_online_cpu(cpu) { 538 if (all_cpu_data[cpu]) 539 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 540 } 541 } 542 543 static inline void update_turbo_state(void) 544 { 545 u64 misc_en; 546 struct cpudata *cpu; 547 548 cpu = all_cpu_data[0]; 549 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 550 limits->turbo_disabled = 551 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 552 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 553 } 554 555 static void intel_pstate_hwp_set(const struct cpumask *cpumask) 556 { 557 int min, hw_min, max, hw_max, cpu, range, adj_range; 558 u64 value, cap; 559 560 rdmsrl(MSR_HWP_CAPABILITIES, cap); 561 hw_min = HWP_LOWEST_PERF(cap); 562 hw_max = HWP_HIGHEST_PERF(cap); 563 range = hw_max - hw_min; 564 565 for_each_cpu(cpu, cpumask) { 566 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 567 adj_range = limits->min_perf_pct * range / 100; 568 min = hw_min + adj_range; 569 value &= ~HWP_MIN_PERF(~0L); 570 value |= HWP_MIN_PERF(min); 571 572 adj_range = limits->max_perf_pct * range / 100; 573 max = hw_min + adj_range; 574 if (limits->no_turbo) { 575 hw_max = HWP_GUARANTEED_PERF(cap); 576 if (hw_max < max) 577 max = hw_max; 578 } 579 580 value &= ~HWP_MAX_PERF(~0L); 581 value |= HWP_MAX_PERF(max); 582 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 583 } 584 } 585 586 static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) 587 { 588 if (hwp_active) 589 intel_pstate_hwp_set(policy->cpus); 590 591 return 0; 592 } 593 594 static void intel_pstate_hwp_set_online_cpus(void) 595 { 596 get_online_cpus(); 597 intel_pstate_hwp_set(cpu_online_mask); 598 put_online_cpus(); 599 } 600 601 /************************** debugfs begin ************************/ 602 static int pid_param_set(void *data, u64 val) 603 { 604 *(u32 *)data = val; 605 intel_pstate_reset_all_pid(); 606 return 0; 607 } 608 609 static int pid_param_get(void *data, u64 *val) 610 { 611 *val = *(u32 *)data; 612 return 0; 613 } 614 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 615 616 struct pid_param { 617 char *name; 618 void *value; 619 }; 620 621 static struct pid_param pid_files[] = { 622 {"sample_rate_ms", &pid_params.sample_rate_ms}, 623 {"d_gain_pct", &pid_params.d_gain_pct}, 624 {"i_gain_pct", &pid_params.i_gain_pct}, 625 {"deadband", &pid_params.deadband}, 626 {"setpoint", &pid_params.setpoint}, 627 {"p_gain_pct", &pid_params.p_gain_pct}, 628 {NULL, NULL} 629 }; 630 631 static void __init intel_pstate_debug_expose_params(void) 632 { 633 struct dentry *debugfs_parent; 634 int i = 0; 635 636 if (hwp_active) 637 return; 638 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 639 if (IS_ERR_OR_NULL(debugfs_parent)) 640 return; 641 while (pid_files[i].name) { 642 debugfs_create_file(pid_files[i].name, 0660, 643 debugfs_parent, pid_files[i].value, 644 &fops_pid_param); 645 i++; 646 } 647 } 648 649 /************************** debugfs end ************************/ 650 651 /************************** sysfs begin ************************/ 652 #define show_one(file_name, object) \ 653 static ssize_t show_##file_name \ 654 (struct kobject *kobj, struct attribute *attr, char *buf) \ 655 { \ 656 return sprintf(buf, "%u\n", limits->object); \ 657 } 658 659 static ssize_t show_turbo_pct(struct kobject *kobj, 660 struct attribute *attr, char *buf) 661 { 662 struct cpudata *cpu; 663 int total, no_turbo, turbo_pct; 664 uint32_t turbo_fp; 665 666 cpu = all_cpu_data[0]; 667 668 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 669 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 670 turbo_fp = div_fp(no_turbo, total); 671 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 672 return sprintf(buf, "%u\n", turbo_pct); 673 } 674 675 static ssize_t show_num_pstates(struct kobject *kobj, 676 struct attribute *attr, char *buf) 677 { 678 struct cpudata *cpu; 679 int total; 680 681 cpu = all_cpu_data[0]; 682 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 683 return sprintf(buf, "%u\n", total); 684 } 685 686 static ssize_t show_no_turbo(struct kobject *kobj, 687 struct attribute *attr, char *buf) 688 { 689 ssize_t ret; 690 691 update_turbo_state(); 692 if (limits->turbo_disabled) 693 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 694 else 695 ret = sprintf(buf, "%u\n", limits->no_turbo); 696 697 return ret; 698 } 699 700 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 701 const char *buf, size_t count) 702 { 703 unsigned int input; 704 int ret; 705 706 ret = sscanf(buf, "%u", &input); 707 if (ret != 1) 708 return -EINVAL; 709 710 update_turbo_state(); 711 if (limits->turbo_disabled) { 712 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 713 return -EPERM; 714 } 715 716 limits->no_turbo = clamp_t(int, input, 0, 1); 717 718 if (hwp_active) 719 intel_pstate_hwp_set_online_cpus(); 720 721 return count; 722 } 723 724 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 725 const char *buf, size_t count) 726 { 727 unsigned int input; 728 int ret; 729 730 ret = sscanf(buf, "%u", &input); 731 if (ret != 1) 732 return -EINVAL; 733 734 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 735 limits->max_perf_pct = min(limits->max_policy_pct, 736 limits->max_sysfs_pct); 737 limits->max_perf_pct = max(limits->min_policy_pct, 738 limits->max_perf_pct); 739 limits->max_perf_pct = max(limits->min_perf_pct, 740 limits->max_perf_pct); 741 limits->max_perf = div_fp(limits->max_perf_pct, 100); 742 743 if (hwp_active) 744 intel_pstate_hwp_set_online_cpus(); 745 return count; 746 } 747 748 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 749 const char *buf, size_t count) 750 { 751 unsigned int input; 752 int ret; 753 754 ret = sscanf(buf, "%u", &input); 755 if (ret != 1) 756 return -EINVAL; 757 758 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 759 limits->min_perf_pct = max(limits->min_policy_pct, 760 limits->min_sysfs_pct); 761 limits->min_perf_pct = min(limits->max_policy_pct, 762 limits->min_perf_pct); 763 limits->min_perf_pct = min(limits->max_perf_pct, 764 limits->min_perf_pct); 765 limits->min_perf = div_fp(limits->min_perf_pct, 100); 766 767 if (hwp_active) 768 intel_pstate_hwp_set_online_cpus(); 769 return count; 770 } 771 772 show_one(max_perf_pct, max_perf_pct); 773 show_one(min_perf_pct, min_perf_pct); 774 775 define_one_global_rw(no_turbo); 776 define_one_global_rw(max_perf_pct); 777 define_one_global_rw(min_perf_pct); 778 define_one_global_ro(turbo_pct); 779 define_one_global_ro(num_pstates); 780 781 static struct attribute *intel_pstate_attributes[] = { 782 &no_turbo.attr, 783 &max_perf_pct.attr, 784 &min_perf_pct.attr, 785 &turbo_pct.attr, 786 &num_pstates.attr, 787 NULL 788 }; 789 790 static struct attribute_group intel_pstate_attr_group = { 791 .attrs = intel_pstate_attributes, 792 }; 793 794 static void __init intel_pstate_sysfs_expose_params(void) 795 { 796 struct kobject *intel_pstate_kobject; 797 int rc; 798 799 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 800 &cpu_subsys.dev_root->kobj); 801 BUG_ON(!intel_pstate_kobject); 802 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 803 BUG_ON(rc); 804 } 805 /************************** sysfs end ************************/ 806 807 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 808 { 809 /* First disable HWP notification interrupt as we don't process them */ 810 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 811 812 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 813 } 814 815 static int atom_get_min_pstate(void) 816 { 817 u64 value; 818 819 rdmsrl(ATOM_RATIOS, value); 820 return (value >> 8) & 0x7F; 821 } 822 823 static int atom_get_max_pstate(void) 824 { 825 u64 value; 826 827 rdmsrl(ATOM_RATIOS, value); 828 return (value >> 16) & 0x7F; 829 } 830 831 static int atom_get_turbo_pstate(void) 832 { 833 u64 value; 834 835 rdmsrl(ATOM_TURBO_RATIOS, value); 836 return value & 0x7F; 837 } 838 839 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 840 { 841 u64 val; 842 int32_t vid_fp; 843 u32 vid; 844 845 val = (u64)pstate << 8; 846 if (limits->no_turbo && !limits->turbo_disabled) 847 val |= (u64)1 << 32; 848 849 vid_fp = cpudata->vid.min + mul_fp( 850 int_tofp(pstate - cpudata->pstate.min_pstate), 851 cpudata->vid.ratio); 852 853 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 854 vid = ceiling_fp(vid_fp); 855 856 if (pstate > cpudata->pstate.max_pstate) 857 vid = cpudata->vid.turbo; 858 859 return val | vid; 860 } 861 862 static int silvermont_get_scaling(void) 863 { 864 u64 value; 865 int i; 866 /* Defined in Table 35-6 from SDM (Sept 2015) */ 867 static int silvermont_freq_table[] = { 868 83300, 100000, 133300, 116700, 80000}; 869 870 rdmsrl(MSR_FSB_FREQ, value); 871 i = value & 0x7; 872 WARN_ON(i > 4); 873 874 return silvermont_freq_table[i]; 875 } 876 877 static int airmont_get_scaling(void) 878 { 879 u64 value; 880 int i; 881 /* Defined in Table 35-10 from SDM (Sept 2015) */ 882 static int airmont_freq_table[] = { 883 83300, 100000, 133300, 116700, 80000, 884 93300, 90000, 88900, 87500}; 885 886 rdmsrl(MSR_FSB_FREQ, value); 887 i = value & 0xF; 888 WARN_ON(i > 8); 889 890 return airmont_freq_table[i]; 891 } 892 893 static void atom_get_vid(struct cpudata *cpudata) 894 { 895 u64 value; 896 897 rdmsrl(ATOM_VIDS, value); 898 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 899 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 900 cpudata->vid.ratio = div_fp( 901 cpudata->vid.max - cpudata->vid.min, 902 int_tofp(cpudata->pstate.max_pstate - 903 cpudata->pstate.min_pstate)); 904 905 rdmsrl(ATOM_TURBO_VIDS, value); 906 cpudata->vid.turbo = value & 0x7f; 907 } 908 909 static int core_get_min_pstate(void) 910 { 911 u64 value; 912 913 rdmsrl(MSR_PLATFORM_INFO, value); 914 return (value >> 40) & 0xFF; 915 } 916 917 static int core_get_max_pstate_physical(void) 918 { 919 u64 value; 920 921 rdmsrl(MSR_PLATFORM_INFO, value); 922 return (value >> 8) & 0xFF; 923 } 924 925 static int core_get_max_pstate(void) 926 { 927 u64 tar; 928 u64 plat_info; 929 int max_pstate; 930 int err; 931 932 rdmsrl(MSR_PLATFORM_INFO, plat_info); 933 max_pstate = (plat_info >> 8) & 0xFF; 934 935 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 936 if (!err) { 937 /* Do some sanity checking for safety */ 938 if (plat_info & 0x600000000) { 939 u64 tdp_ctrl; 940 u64 tdp_ratio; 941 int tdp_msr; 942 943 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 944 if (err) 945 goto skip_tar; 946 947 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; 948 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 949 if (err) 950 goto skip_tar; 951 952 /* For level 1 and 2, bits[23:16] contain the ratio */ 953 if (tdp_ctrl) 954 tdp_ratio >>= 16; 955 956 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 957 if (tdp_ratio - 1 == tar) { 958 max_pstate = tar; 959 pr_debug("max_pstate=TAC %x\n", max_pstate); 960 } else { 961 goto skip_tar; 962 } 963 } 964 } 965 966 skip_tar: 967 return max_pstate; 968 } 969 970 static int core_get_turbo_pstate(void) 971 { 972 u64 value; 973 int nont, ret; 974 975 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 976 nont = core_get_max_pstate(); 977 ret = (value) & 255; 978 if (ret <= nont) 979 ret = nont; 980 return ret; 981 } 982 983 static inline int core_get_scaling(void) 984 { 985 return 100000; 986 } 987 988 static u64 core_get_val(struct cpudata *cpudata, int pstate) 989 { 990 u64 val; 991 992 val = (u64)pstate << 8; 993 if (limits->no_turbo && !limits->turbo_disabled) 994 val |= (u64)1 << 32; 995 996 return val; 997 } 998 999 static int knl_get_turbo_pstate(void) 1000 { 1001 u64 value; 1002 int nont, ret; 1003 1004 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 1005 nont = core_get_max_pstate(); 1006 ret = (((value) >> 8) & 0xFF); 1007 if (ret <= nont) 1008 ret = nont; 1009 return ret; 1010 } 1011 1012 static struct cpu_defaults core_params = { 1013 .pid_policy = { 1014 .sample_rate_ms = 10, 1015 .deadband = 0, 1016 .setpoint = 97, 1017 .p_gain_pct = 20, 1018 .d_gain_pct = 0, 1019 .i_gain_pct = 0, 1020 }, 1021 .funcs = { 1022 .get_max = core_get_max_pstate, 1023 .get_max_physical = core_get_max_pstate_physical, 1024 .get_min = core_get_min_pstate, 1025 .get_turbo = core_get_turbo_pstate, 1026 .get_scaling = core_get_scaling, 1027 .get_val = core_get_val, 1028 .get_target_pstate = get_target_pstate_use_performance, 1029 }, 1030 }; 1031 1032 static struct cpu_defaults silvermont_params = { 1033 .pid_policy = { 1034 .sample_rate_ms = 10, 1035 .deadband = 0, 1036 .setpoint = 60, 1037 .p_gain_pct = 14, 1038 .d_gain_pct = 0, 1039 .i_gain_pct = 4, 1040 }, 1041 .funcs = { 1042 .get_max = atom_get_max_pstate, 1043 .get_max_physical = atom_get_max_pstate, 1044 .get_min = atom_get_min_pstate, 1045 .get_turbo = atom_get_turbo_pstate, 1046 .get_val = atom_get_val, 1047 .get_scaling = silvermont_get_scaling, 1048 .get_vid = atom_get_vid, 1049 .get_target_pstate = get_target_pstate_use_cpu_load, 1050 }, 1051 }; 1052 1053 static struct cpu_defaults airmont_params = { 1054 .pid_policy = { 1055 .sample_rate_ms = 10, 1056 .deadband = 0, 1057 .setpoint = 60, 1058 .p_gain_pct = 14, 1059 .d_gain_pct = 0, 1060 .i_gain_pct = 4, 1061 }, 1062 .funcs = { 1063 .get_max = atom_get_max_pstate, 1064 .get_max_physical = atom_get_max_pstate, 1065 .get_min = atom_get_min_pstate, 1066 .get_turbo = atom_get_turbo_pstate, 1067 .get_val = atom_get_val, 1068 .get_scaling = airmont_get_scaling, 1069 .get_vid = atom_get_vid, 1070 .get_target_pstate = get_target_pstate_use_cpu_load, 1071 }, 1072 }; 1073 1074 static struct cpu_defaults knl_params = { 1075 .pid_policy = { 1076 .sample_rate_ms = 10, 1077 .deadband = 0, 1078 .setpoint = 97, 1079 .p_gain_pct = 20, 1080 .d_gain_pct = 0, 1081 .i_gain_pct = 0, 1082 }, 1083 .funcs = { 1084 .get_max = core_get_max_pstate, 1085 .get_max_physical = core_get_max_pstate_physical, 1086 .get_min = core_get_min_pstate, 1087 .get_turbo = knl_get_turbo_pstate, 1088 .get_scaling = core_get_scaling, 1089 .get_val = core_get_val, 1090 .get_target_pstate = get_target_pstate_use_performance, 1091 }, 1092 }; 1093 1094 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 1095 { 1096 int max_perf = cpu->pstate.turbo_pstate; 1097 int max_perf_adj; 1098 int min_perf; 1099 1100 if (limits->no_turbo || limits->turbo_disabled) 1101 max_perf = cpu->pstate.max_pstate; 1102 1103 /* 1104 * performance can be limited by user through sysfs, by cpufreq 1105 * policy, or by cpu specific default values determined through 1106 * experimentation. 1107 */ 1108 max_perf_adj = fp_toint(max_perf * limits->max_perf); 1109 *max = clamp_t(int, max_perf_adj, 1110 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 1111 1112 min_perf = fp_toint(max_perf * limits->min_perf); 1113 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 1114 } 1115 1116 static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate) 1117 { 1118 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1119 cpu->pstate.current_pstate = pstate; 1120 } 1121 1122 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1123 { 1124 int pstate = cpu->pstate.min_pstate; 1125 1126 intel_pstate_record_pstate(cpu, pstate); 1127 /* 1128 * Generally, there is no guarantee that this code will always run on 1129 * the CPU being updated, so force the register update to run on the 1130 * right CPU. 1131 */ 1132 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1133 pstate_funcs.get_val(cpu, pstate)); 1134 } 1135 1136 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1137 { 1138 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1139 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1140 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1141 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1142 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1143 1144 if (pstate_funcs.get_vid) 1145 pstate_funcs.get_vid(cpu); 1146 1147 intel_pstate_set_min_pstate(cpu); 1148 } 1149 1150 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1151 { 1152 struct sample *sample = &cpu->sample; 1153 1154 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1155 } 1156 1157 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1158 { 1159 u64 aperf, mperf; 1160 unsigned long flags; 1161 u64 tsc; 1162 1163 local_irq_save(flags); 1164 rdmsrl(MSR_IA32_APERF, aperf); 1165 rdmsrl(MSR_IA32_MPERF, mperf); 1166 tsc = rdtsc(); 1167 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1168 local_irq_restore(flags); 1169 return false; 1170 } 1171 local_irq_restore(flags); 1172 1173 cpu->last_sample_time = cpu->sample.time; 1174 cpu->sample.time = time; 1175 cpu->sample.aperf = aperf; 1176 cpu->sample.mperf = mperf; 1177 cpu->sample.tsc = tsc; 1178 cpu->sample.aperf -= cpu->prev_aperf; 1179 cpu->sample.mperf -= cpu->prev_mperf; 1180 cpu->sample.tsc -= cpu->prev_tsc; 1181 1182 cpu->prev_aperf = aperf; 1183 cpu->prev_mperf = mperf; 1184 cpu->prev_tsc = tsc; 1185 /* 1186 * First time this function is invoked in a given cycle, all of the 1187 * previous sample data fields are equal to zero or stale and they must 1188 * be populated with meaningful numbers for things to work, so assume 1189 * that sample.time will always be reset before setting the utilization 1190 * update hook and make the caller skip the sample then. 1191 */ 1192 return !!cpu->last_sample_time; 1193 } 1194 1195 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1196 { 1197 return mul_ext_fp(cpu->sample.core_avg_perf, 1198 cpu->pstate.max_pstate_physical * cpu->pstate.scaling); 1199 } 1200 1201 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1202 { 1203 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1204 cpu->sample.core_avg_perf); 1205 } 1206 1207 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1208 { 1209 struct sample *sample = &cpu->sample; 1210 u64 cummulative_iowait, delta_iowait_us; 1211 u64 delta_iowait_mperf; 1212 u64 mperf, now; 1213 int32_t cpu_load; 1214 1215 cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now); 1216 1217 /* 1218 * Convert iowait time into number of IO cycles spent at max_freq. 1219 * IO is considered as busy only for the cpu_load algorithm. For 1220 * performance this is not needed since we always try to reach the 1221 * maximum P-State, so we are already boosting the IOs. 1222 */ 1223 delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait; 1224 delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling * 1225 cpu->pstate.max_pstate, MSEC_PER_SEC); 1226 1227 mperf = cpu->sample.mperf + delta_iowait_mperf; 1228 cpu->prev_cummulative_iowait = cummulative_iowait; 1229 1230 /* 1231 * The load can be estimated as the ratio of the mperf counter 1232 * running at a constant frequency during active periods 1233 * (C0) and the time stamp counter running at the same frequency 1234 * also during C-states. 1235 */ 1236 cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc); 1237 cpu->sample.busy_scaled = cpu_load; 1238 1239 return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load); 1240 } 1241 1242 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1243 { 1244 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1245 u64 duration_ns; 1246 1247 /* 1248 * perf_scaled is the average performance during the last sampling 1249 * period scaled by the ratio of the maximum P-state to the P-state 1250 * requested last time (in percent). That measures the system's 1251 * response to the previous P-state selection. 1252 */ 1253 max_pstate = cpu->pstate.max_pstate_physical; 1254 current_pstate = cpu->pstate.current_pstate; 1255 perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, 1256 div_fp(100 * max_pstate, current_pstate)); 1257 1258 /* 1259 * Since our utilization update callback will not run unless we are 1260 * in C0, check if the actual elapsed time is significantly greater (3x) 1261 * than our sample interval. If it is, then we were idle for a long 1262 * enough period of time to adjust our performance metric. 1263 */ 1264 duration_ns = cpu->sample.time - cpu->last_sample_time; 1265 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1266 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1267 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1268 } else { 1269 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1270 if (sample_ratio < int_tofp(1)) 1271 perf_scaled = 0; 1272 } 1273 1274 cpu->sample.busy_scaled = perf_scaled; 1275 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); 1276 } 1277 1278 static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1279 { 1280 int max_perf, min_perf; 1281 1282 update_turbo_state(); 1283 1284 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1285 pstate = clamp_t(int, pstate, min_perf, max_perf); 1286 if (pstate == cpu->pstate.current_pstate) 1287 return; 1288 1289 intel_pstate_record_pstate(cpu, pstate); 1290 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1291 } 1292 1293 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1294 { 1295 int from, target_pstate; 1296 struct sample *sample; 1297 1298 from = cpu->pstate.current_pstate; 1299 1300 target_pstate = pstate_funcs.get_target_pstate(cpu); 1301 1302 intel_pstate_update_pstate(cpu, target_pstate); 1303 1304 sample = &cpu->sample; 1305 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1306 fp_toint(sample->busy_scaled), 1307 from, 1308 cpu->pstate.current_pstate, 1309 sample->mperf, 1310 sample->aperf, 1311 sample->tsc, 1312 get_avg_frequency(cpu)); 1313 } 1314 1315 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1316 unsigned long util, unsigned long max) 1317 { 1318 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1319 u64 delta_ns = time - cpu->sample.time; 1320 1321 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1322 bool sample_taken = intel_pstate_sample(cpu, time); 1323 1324 if (sample_taken) { 1325 intel_pstate_calc_avg_perf(cpu); 1326 if (!hwp_active) 1327 intel_pstate_adjust_busy_pstate(cpu); 1328 } 1329 } 1330 } 1331 1332 #define ICPU(model, policy) \ 1333 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1334 (unsigned long)&policy } 1335 1336 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1337 ICPU(0x2a, core_params), 1338 ICPU(0x2d, core_params), 1339 ICPU(0x37, silvermont_params), 1340 ICPU(0x3a, core_params), 1341 ICPU(0x3c, core_params), 1342 ICPU(0x3d, core_params), 1343 ICPU(0x3e, core_params), 1344 ICPU(0x3f, core_params), 1345 ICPU(0x45, core_params), 1346 ICPU(0x46, core_params), 1347 ICPU(0x47, core_params), 1348 ICPU(0x4c, airmont_params), 1349 ICPU(0x4e, core_params), 1350 ICPU(0x4f, core_params), 1351 ICPU(0x5e, core_params), 1352 ICPU(0x56, core_params), 1353 ICPU(0x57, knl_params), 1354 {} 1355 }; 1356 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1357 1358 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 1359 ICPU(0x56, core_params), 1360 {} 1361 }; 1362 1363 static int intel_pstate_init_cpu(unsigned int cpunum) 1364 { 1365 struct cpudata *cpu; 1366 1367 if (!all_cpu_data[cpunum]) 1368 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 1369 GFP_KERNEL); 1370 if (!all_cpu_data[cpunum]) 1371 return -ENOMEM; 1372 1373 cpu = all_cpu_data[cpunum]; 1374 1375 cpu->cpu = cpunum; 1376 1377 if (hwp_active) { 1378 intel_pstate_hwp_enable(cpu); 1379 pid_params.sample_rate_ms = 50; 1380 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1381 } 1382 1383 intel_pstate_get_cpu_pstates(cpu); 1384 1385 intel_pstate_busy_pid_reset(cpu); 1386 1387 pr_debug("controlling: cpu %d\n", cpunum); 1388 1389 return 0; 1390 } 1391 1392 static unsigned int intel_pstate_get(unsigned int cpu_num) 1393 { 1394 struct cpudata *cpu = all_cpu_data[cpu_num]; 1395 1396 return cpu ? get_avg_frequency(cpu) : 0; 1397 } 1398 1399 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1400 { 1401 struct cpudata *cpu = all_cpu_data[cpu_num]; 1402 1403 if (cpu->update_util_set) 1404 return; 1405 1406 /* Prevent intel_pstate_update_util() from using stale data. */ 1407 cpu->sample.time = 0; 1408 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1409 intel_pstate_update_util); 1410 cpu->update_util_set = true; 1411 } 1412 1413 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1414 { 1415 struct cpudata *cpu_data = all_cpu_data[cpu]; 1416 1417 if (!cpu_data->update_util_set) 1418 return; 1419 1420 cpufreq_remove_update_util_hook(cpu); 1421 cpu_data->update_util_set = false; 1422 synchronize_sched(); 1423 } 1424 1425 static void intel_pstate_set_performance_limits(struct perf_limits *limits) 1426 { 1427 limits->no_turbo = 0; 1428 limits->turbo_disabled = 0; 1429 limits->max_perf_pct = 100; 1430 limits->max_perf = int_tofp(1); 1431 limits->min_perf_pct = 100; 1432 limits->min_perf = int_tofp(1); 1433 limits->max_policy_pct = 100; 1434 limits->max_sysfs_pct = 100; 1435 limits->min_policy_pct = 0; 1436 limits->min_sysfs_pct = 0; 1437 } 1438 1439 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1440 { 1441 struct cpudata *cpu; 1442 1443 if (!policy->cpuinfo.max_freq) 1444 return -ENODEV; 1445 1446 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 1447 policy->cpuinfo.max_freq, policy->max); 1448 1449 cpu = all_cpu_data[0]; 1450 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 1451 policy->max < policy->cpuinfo.max_freq && 1452 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { 1453 pr_debug("policy->max > max non turbo frequency\n"); 1454 policy->max = policy->cpuinfo.max_freq; 1455 } 1456 1457 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1458 limits = &performance_limits; 1459 if (policy->max >= policy->cpuinfo.max_freq) { 1460 pr_debug("set performance\n"); 1461 intel_pstate_set_performance_limits(limits); 1462 goto out; 1463 } 1464 } else { 1465 pr_debug("set powersave\n"); 1466 limits = &powersave_limits; 1467 } 1468 1469 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1470 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1471 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1472 policy->cpuinfo.max_freq); 1473 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1474 1475 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1476 limits->min_perf_pct = max(limits->min_policy_pct, 1477 limits->min_sysfs_pct); 1478 limits->min_perf_pct = min(limits->max_policy_pct, 1479 limits->min_perf_pct); 1480 limits->max_perf_pct = min(limits->max_policy_pct, 1481 limits->max_sysfs_pct); 1482 limits->max_perf_pct = max(limits->min_policy_pct, 1483 limits->max_perf_pct); 1484 1485 /* Make sure min_perf_pct <= max_perf_pct */ 1486 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1487 1488 limits->min_perf = div_fp(limits->min_perf_pct, 100); 1489 limits->max_perf = div_fp(limits->max_perf_pct, 100); 1490 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1491 1492 out: 1493 intel_pstate_set_update_util_hook(policy->cpu); 1494 1495 intel_pstate_hwp_set_policy(policy); 1496 1497 return 0; 1498 } 1499 1500 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1501 { 1502 cpufreq_verify_within_cpu_limits(policy); 1503 1504 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1505 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1506 return -EINVAL; 1507 1508 return 0; 1509 } 1510 1511 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1512 { 1513 int cpu_num = policy->cpu; 1514 struct cpudata *cpu = all_cpu_data[cpu_num]; 1515 1516 pr_debug("CPU %d exiting\n", cpu_num); 1517 1518 intel_pstate_clear_update_util_hook(cpu_num); 1519 1520 if (hwp_active) 1521 return; 1522 1523 intel_pstate_set_min_pstate(cpu); 1524 } 1525 1526 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1527 { 1528 struct cpudata *cpu; 1529 int rc; 1530 1531 rc = intel_pstate_init_cpu(policy->cpu); 1532 if (rc) 1533 return rc; 1534 1535 cpu = all_cpu_data[policy->cpu]; 1536 1537 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1538 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1539 else 1540 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1541 1542 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1543 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1544 1545 /* cpuinfo and default policy values */ 1546 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1547 update_turbo_state(); 1548 policy->cpuinfo.max_freq = limits->turbo_disabled ? 1549 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1550 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 1551 1552 intel_pstate_init_acpi_perf_limits(policy); 1553 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1554 cpumask_set_cpu(policy->cpu, policy->cpus); 1555 1556 return 0; 1557 } 1558 1559 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 1560 { 1561 intel_pstate_exit_perf_limits(policy); 1562 1563 return 0; 1564 } 1565 1566 static struct cpufreq_driver intel_pstate_driver = { 1567 .flags = CPUFREQ_CONST_LOOPS, 1568 .verify = intel_pstate_verify_policy, 1569 .setpolicy = intel_pstate_set_policy, 1570 .resume = intel_pstate_hwp_set_policy, 1571 .get = intel_pstate_get, 1572 .init = intel_pstate_cpu_init, 1573 .exit = intel_pstate_cpu_exit, 1574 .stop_cpu = intel_pstate_stop_cpu, 1575 .name = "intel_pstate", 1576 }; 1577 1578 static int __initdata no_load; 1579 static int __initdata no_hwp; 1580 static int __initdata hwp_only; 1581 static unsigned int force_load; 1582 1583 static int intel_pstate_msrs_not_valid(void) 1584 { 1585 if (!pstate_funcs.get_max() || 1586 !pstate_funcs.get_min() || 1587 !pstate_funcs.get_turbo()) 1588 return -ENODEV; 1589 1590 return 0; 1591 } 1592 1593 static void copy_pid_params(struct pstate_adjust_policy *policy) 1594 { 1595 pid_params.sample_rate_ms = policy->sample_rate_ms; 1596 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 1597 pid_params.p_gain_pct = policy->p_gain_pct; 1598 pid_params.i_gain_pct = policy->i_gain_pct; 1599 pid_params.d_gain_pct = policy->d_gain_pct; 1600 pid_params.deadband = policy->deadband; 1601 pid_params.setpoint = policy->setpoint; 1602 } 1603 1604 static void copy_cpu_funcs(struct pstate_funcs *funcs) 1605 { 1606 pstate_funcs.get_max = funcs->get_max; 1607 pstate_funcs.get_max_physical = funcs->get_max_physical; 1608 pstate_funcs.get_min = funcs->get_min; 1609 pstate_funcs.get_turbo = funcs->get_turbo; 1610 pstate_funcs.get_scaling = funcs->get_scaling; 1611 pstate_funcs.get_val = funcs->get_val; 1612 pstate_funcs.get_vid = funcs->get_vid; 1613 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 1614 1615 } 1616 1617 #ifdef CONFIG_ACPI 1618 1619 static bool intel_pstate_no_acpi_pss(void) 1620 { 1621 int i; 1622 1623 for_each_possible_cpu(i) { 1624 acpi_status status; 1625 union acpi_object *pss; 1626 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1627 struct acpi_processor *pr = per_cpu(processors, i); 1628 1629 if (!pr) 1630 continue; 1631 1632 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1633 if (ACPI_FAILURE(status)) 1634 continue; 1635 1636 pss = buffer.pointer; 1637 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1638 kfree(pss); 1639 return false; 1640 } 1641 1642 kfree(pss); 1643 } 1644 1645 return true; 1646 } 1647 1648 static bool intel_pstate_has_acpi_ppc(void) 1649 { 1650 int i; 1651 1652 for_each_possible_cpu(i) { 1653 struct acpi_processor *pr = per_cpu(processors, i); 1654 1655 if (!pr) 1656 continue; 1657 if (acpi_has_method(pr->handle, "_PPC")) 1658 return true; 1659 } 1660 return false; 1661 } 1662 1663 enum { 1664 PSS, 1665 PPC, 1666 }; 1667 1668 struct hw_vendor_info { 1669 u16 valid; 1670 char oem_id[ACPI_OEM_ID_SIZE]; 1671 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1672 int oem_pwr_table; 1673 }; 1674 1675 /* Hardware vendor-specific info that has its own power management modes */ 1676 static struct hw_vendor_info vendor_info[] = { 1677 {1, "HP ", "ProLiant", PSS}, 1678 {1, "ORACLE", "X4-2 ", PPC}, 1679 {1, "ORACLE", "X4-2L ", PPC}, 1680 {1, "ORACLE", "X4-2B ", PPC}, 1681 {1, "ORACLE", "X3-2 ", PPC}, 1682 {1, "ORACLE", "X3-2L ", PPC}, 1683 {1, "ORACLE", "X3-2B ", PPC}, 1684 {1, "ORACLE", "X4470M2 ", PPC}, 1685 {1, "ORACLE", "X4270M3 ", PPC}, 1686 {1, "ORACLE", "X4270M2 ", PPC}, 1687 {1, "ORACLE", "X4170M2 ", PPC}, 1688 {1, "ORACLE", "X4170 M3", PPC}, 1689 {1, "ORACLE", "X4275 M3", PPC}, 1690 {1, "ORACLE", "X6-2 ", PPC}, 1691 {1, "ORACLE", "Sudbury ", PPC}, 1692 {0, "", ""}, 1693 }; 1694 1695 static bool intel_pstate_platform_pwr_mgmt_exists(void) 1696 { 1697 struct acpi_table_header hdr; 1698 struct hw_vendor_info *v_info; 1699 const struct x86_cpu_id *id; 1700 u64 misc_pwr; 1701 1702 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1703 if (id) { 1704 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1705 if ( misc_pwr & (1 << 8)) 1706 return true; 1707 } 1708 1709 if (acpi_disabled || 1710 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1711 return false; 1712 1713 for (v_info = vendor_info; v_info->valid; v_info++) { 1714 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1715 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1716 ACPI_OEM_TABLE_ID_SIZE)) 1717 switch (v_info->oem_pwr_table) { 1718 case PSS: 1719 return intel_pstate_no_acpi_pss(); 1720 case PPC: 1721 return intel_pstate_has_acpi_ppc() && 1722 (!force_load); 1723 } 1724 } 1725 1726 return false; 1727 } 1728 #else /* CONFIG_ACPI not enabled */ 1729 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1730 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1731 #endif /* CONFIG_ACPI */ 1732 1733 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 1734 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 1735 {} 1736 }; 1737 1738 static int __init intel_pstate_init(void) 1739 { 1740 int cpu, rc = 0; 1741 const struct x86_cpu_id *id; 1742 struct cpu_defaults *cpu_def; 1743 1744 if (no_load) 1745 return -ENODEV; 1746 1747 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 1748 copy_cpu_funcs(&core_params.funcs); 1749 hwp_active++; 1750 goto hwp_cpu_matched; 1751 } 1752 1753 id = x86_match_cpu(intel_pstate_cpu_ids); 1754 if (!id) 1755 return -ENODEV; 1756 1757 cpu_def = (struct cpu_defaults *)id->driver_data; 1758 1759 copy_pid_params(&cpu_def->pid_policy); 1760 copy_cpu_funcs(&cpu_def->funcs); 1761 1762 if (intel_pstate_msrs_not_valid()) 1763 return -ENODEV; 1764 1765 hwp_cpu_matched: 1766 /* 1767 * The Intel pstate driver will be ignored if the platform 1768 * firmware has its own power management modes. 1769 */ 1770 if (intel_pstate_platform_pwr_mgmt_exists()) 1771 return -ENODEV; 1772 1773 pr_info("Intel P-state driver initializing\n"); 1774 1775 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1776 if (!all_cpu_data) 1777 return -ENOMEM; 1778 1779 if (!hwp_active && hwp_only) 1780 goto out; 1781 1782 rc = cpufreq_register_driver(&intel_pstate_driver); 1783 if (rc) 1784 goto out; 1785 1786 intel_pstate_debug_expose_params(); 1787 intel_pstate_sysfs_expose_params(); 1788 1789 if (hwp_active) 1790 pr_info("HWP enabled\n"); 1791 1792 return rc; 1793 out: 1794 get_online_cpus(); 1795 for_each_online_cpu(cpu) { 1796 if (all_cpu_data[cpu]) { 1797 intel_pstate_clear_update_util_hook(cpu); 1798 kfree(all_cpu_data[cpu]); 1799 } 1800 } 1801 1802 put_online_cpus(); 1803 vfree(all_cpu_data); 1804 return -ENODEV; 1805 } 1806 device_initcall(intel_pstate_init); 1807 1808 static int __init intel_pstate_setup(char *str) 1809 { 1810 if (!str) 1811 return -EINVAL; 1812 1813 if (!strcmp(str, "disable")) 1814 no_load = 1; 1815 if (!strcmp(str, "no_hwp")) { 1816 pr_info("HWP disabled\n"); 1817 no_hwp = 1; 1818 } 1819 if (!strcmp(str, "force")) 1820 force_load = 1; 1821 if (!strcmp(str, "hwp_only")) 1822 hwp_only = 1; 1823 1824 #ifdef CONFIG_ACPI 1825 if (!strcmp(str, "support_acpi_ppc")) 1826 acpi_ppc = true; 1827 #endif 1828 1829 return 0; 1830 } 1831 early_param("intel_pstate", intel_pstate_setup); 1832 1833 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1834 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1835 MODULE_LICENSE("GPL"); 1836