1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/module.h> 16 #include <linux/ktime.h> 17 #include <linux/hrtimer.h> 18 #include <linux/tick.h> 19 #include <linux/slab.h> 20 #include <linux/sched.h> 21 #include <linux/list.h> 22 #include <linux/cpu.h> 23 #include <linux/cpufreq.h> 24 #include <linux/sysfs.h> 25 #include <linux/types.h> 26 #include <linux/fs.h> 27 #include <linux/debugfs.h> 28 #include <linux/acpi.h> 29 #include <linux/vmalloc.h> 30 #include <trace/events/power.h> 31 32 #include <asm/div64.h> 33 #include <asm/msr.h> 34 #include <asm/cpu_device_id.h> 35 #include <asm/cpufeature.h> 36 37 #define ATOM_RATIOS 0x66a 38 #define ATOM_VIDS 0x66b 39 #define ATOM_TURBO_RATIOS 0x66c 40 #define ATOM_TURBO_VIDS 0x66d 41 42 #define FRAC_BITS 8 43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 44 #define fp_toint(X) ((X) >> FRAC_BITS) 45 46 static inline int32_t mul_fp(int32_t x, int32_t y) 47 { 48 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 49 } 50 51 static inline int32_t div_fp(s64 x, s64 y) 52 { 53 return div64_s64((int64_t)x << FRAC_BITS, y); 54 } 55 56 static inline int ceiling_fp(int32_t x) 57 { 58 int mask, ret; 59 60 ret = fp_toint(x); 61 mask = (1 << FRAC_BITS) - 1; 62 if (x & mask) 63 ret += 1; 64 return ret; 65 } 66 67 /** 68 * struct sample - Store performance sample 69 * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual 70 * performance during last sample period 71 * @busy_scaled: Scaled busy value which is used to calculate next 72 * P state. This can be different than core_pct_busy 73 * to account for cpu idle period 74 * @aperf: Difference of actual performance frequency clock count 75 * read from APERF MSR between last and current sample 76 * @mperf: Difference of maximum performance frequency clock count 77 * read from MPERF MSR between last and current sample 78 * @tsc: Difference of time stamp counter between last and 79 * current sample 80 * @freq: Effective frequency calculated from APERF/MPERF 81 * @time: Current time from scheduler 82 * 83 * This structure is used in the cpudata structure to store performance sample 84 * data for choosing next P State. 85 */ 86 struct sample { 87 int32_t core_pct_busy; 88 int32_t busy_scaled; 89 u64 aperf; 90 u64 mperf; 91 u64 tsc; 92 int freq; 93 u64 time; 94 }; 95 96 /** 97 * struct pstate_data - Store P state data 98 * @current_pstate: Current requested P state 99 * @min_pstate: Min P state possible for this platform 100 * @max_pstate: Max P state possible for this platform 101 * @max_pstate_physical:This is physical Max P state for a processor 102 * This can be higher than the max_pstate which can 103 * be limited by platform thermal design power limits 104 * @scaling: Scaling factor to convert frequency to cpufreq 105 * frequency units 106 * @turbo_pstate: Max Turbo P state possible for this platform 107 * 108 * Stores the per cpu model P state limits and current P state. 109 */ 110 struct pstate_data { 111 int current_pstate; 112 int min_pstate; 113 int max_pstate; 114 int max_pstate_physical; 115 int scaling; 116 int turbo_pstate; 117 }; 118 119 /** 120 * struct vid_data - Stores voltage information data 121 * @min: VID data for this platform corresponding to 122 * the lowest P state 123 * @max: VID data corresponding to the highest P State. 124 * @turbo: VID data for turbo P state 125 * @ratio: Ratio of (vid max - vid min) / 126 * (max P state - Min P State) 127 * 128 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 129 * This data is used in Atom platforms, where in addition to target P state, 130 * the voltage data needs to be specified to select next P State. 131 */ 132 struct vid_data { 133 int min; 134 int max; 135 int turbo; 136 int32_t ratio; 137 }; 138 139 /** 140 * struct _pid - Stores PID data 141 * @setpoint: Target set point for busyness or performance 142 * @integral: Storage for accumulated error values 143 * @p_gain: PID proportional gain 144 * @i_gain: PID integral gain 145 * @d_gain: PID derivative gain 146 * @deadband: PID deadband 147 * @last_err: Last error storage for integral part of PID calculation 148 * 149 * Stores PID coefficients and last error for PID controller. 150 */ 151 struct _pid { 152 int setpoint; 153 int32_t integral; 154 int32_t p_gain; 155 int32_t i_gain; 156 int32_t d_gain; 157 int deadband; 158 int32_t last_err; 159 }; 160 161 /** 162 * struct cpudata - Per CPU instance data storage 163 * @cpu: CPU number for this instance data 164 * @update_util: CPUFreq utility callback information 165 * @pstate: Stores P state limits for this CPU 166 * @vid: Stores VID limits for this CPU 167 * @pid: Stores PID parameters for this CPU 168 * @last_sample_time: Last Sample time 169 * @prev_aperf: Last APERF value read from APERF MSR 170 * @prev_mperf: Last MPERF value read from MPERF MSR 171 * @prev_tsc: Last timestamp counter (TSC) value 172 * @prev_cummulative_iowait: IO Wait time difference from last and 173 * current sample 174 * @sample: Storage for storing last Sample data 175 * 176 * This structure stores per CPU instance data for all CPUs. 177 */ 178 struct cpudata { 179 int cpu; 180 181 struct update_util_data update_util; 182 183 struct pstate_data pstate; 184 struct vid_data vid; 185 struct _pid pid; 186 187 u64 last_sample_time; 188 u64 prev_aperf; 189 u64 prev_mperf; 190 u64 prev_tsc; 191 u64 prev_cummulative_iowait; 192 struct sample sample; 193 }; 194 195 static struct cpudata **all_cpu_data; 196 197 /** 198 * struct pid_adjust_policy - Stores static PID configuration data 199 * @sample_rate_ms: PID calculation sample rate in ms 200 * @sample_rate_ns: Sample rate calculation in ns 201 * @deadband: PID deadband 202 * @setpoint: PID Setpoint 203 * @p_gain_pct: PID proportional gain 204 * @i_gain_pct: PID integral gain 205 * @d_gain_pct: PID derivative gain 206 * 207 * Stores per CPU model static PID configuration data. 208 */ 209 struct pstate_adjust_policy { 210 int sample_rate_ms; 211 s64 sample_rate_ns; 212 int deadband; 213 int setpoint; 214 int p_gain_pct; 215 int d_gain_pct; 216 int i_gain_pct; 217 }; 218 219 /** 220 * struct pstate_funcs - Per CPU model specific callbacks 221 * @get_max: Callback to get maximum non turbo effective P state 222 * @get_max_physical: Callback to get maximum non turbo physical P state 223 * @get_min: Callback to get minimum P state 224 * @get_turbo: Callback to get turbo P state 225 * @get_scaling: Callback to get frequency scaling factor 226 * @get_val: Callback to convert P state to actual MSR write value 227 * @get_vid: Callback to get VID data for Atom platforms 228 * @get_target_pstate: Callback to a function to calculate next P state to use 229 * 230 * Core and Atom CPU models have different way to get P State limits. This 231 * structure is used to store those callbacks. 232 */ 233 struct pstate_funcs { 234 int (*get_max)(void); 235 int (*get_max_physical)(void); 236 int (*get_min)(void); 237 int (*get_turbo)(void); 238 int (*get_scaling)(void); 239 u64 (*get_val)(struct cpudata*, int pstate); 240 void (*get_vid)(struct cpudata *); 241 int32_t (*get_target_pstate)(struct cpudata *); 242 }; 243 244 /** 245 * struct cpu_defaults- Per CPU model default config data 246 * @pid_policy: PID config data 247 * @funcs: Callback function data 248 */ 249 struct cpu_defaults { 250 struct pstate_adjust_policy pid_policy; 251 struct pstate_funcs funcs; 252 }; 253 254 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 255 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 256 257 static struct pstate_adjust_policy pid_params; 258 static struct pstate_funcs pstate_funcs; 259 static int hwp_active; 260 261 262 /** 263 * struct perf_limits - Store user and policy limits 264 * @no_turbo: User requested turbo state from intel_pstate sysfs 265 * @turbo_disabled: Platform turbo status either from msr 266 * MSR_IA32_MISC_ENABLE or when maximum available pstate 267 * matches the maximum turbo pstate 268 * @max_perf_pct: Effective maximum performance limit in percentage, this 269 * is minimum of either limits enforced by cpufreq policy 270 * or limits from user set limits via intel_pstate sysfs 271 * @min_perf_pct: Effective minimum performance limit in percentage, this 272 * is maximum of either limits enforced by cpufreq policy 273 * or limits from user set limits via intel_pstate sysfs 274 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct 275 * This value is used to limit max pstate 276 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct 277 * This value is used to limit min pstate 278 * @max_policy_pct: The maximum performance in percentage enforced by 279 * cpufreq setpolicy interface 280 * @max_sysfs_pct: The maximum performance in percentage enforced by 281 * intel pstate sysfs interface 282 * @min_policy_pct: The minimum performance in percentage enforced by 283 * cpufreq setpolicy interface 284 * @min_sysfs_pct: The minimum performance in percentage enforced by 285 * intel pstate sysfs interface 286 * 287 * Storage for user and policy defined limits. 288 */ 289 struct perf_limits { 290 int no_turbo; 291 int turbo_disabled; 292 int max_perf_pct; 293 int min_perf_pct; 294 int32_t max_perf; 295 int32_t min_perf; 296 int max_policy_pct; 297 int max_sysfs_pct; 298 int min_policy_pct; 299 int min_sysfs_pct; 300 }; 301 302 static struct perf_limits performance_limits = { 303 .no_turbo = 0, 304 .turbo_disabled = 0, 305 .max_perf_pct = 100, 306 .max_perf = int_tofp(1), 307 .min_perf_pct = 100, 308 .min_perf = int_tofp(1), 309 .max_policy_pct = 100, 310 .max_sysfs_pct = 100, 311 .min_policy_pct = 0, 312 .min_sysfs_pct = 0, 313 }; 314 315 static struct perf_limits powersave_limits = { 316 .no_turbo = 0, 317 .turbo_disabled = 0, 318 .max_perf_pct = 100, 319 .max_perf = int_tofp(1), 320 .min_perf_pct = 0, 321 .min_perf = 0, 322 .max_policy_pct = 100, 323 .max_sysfs_pct = 100, 324 .min_policy_pct = 0, 325 .min_sysfs_pct = 0, 326 }; 327 328 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 329 static struct perf_limits *limits = &performance_limits; 330 #else 331 static struct perf_limits *limits = &powersave_limits; 332 #endif 333 334 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 335 int deadband, int integral) { 336 pid->setpoint = int_tofp(setpoint); 337 pid->deadband = int_tofp(deadband); 338 pid->integral = int_tofp(integral); 339 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 340 } 341 342 static inline void pid_p_gain_set(struct _pid *pid, int percent) 343 { 344 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 345 } 346 347 static inline void pid_i_gain_set(struct _pid *pid, int percent) 348 { 349 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 350 } 351 352 static inline void pid_d_gain_set(struct _pid *pid, int percent) 353 { 354 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 355 } 356 357 static signed int pid_calc(struct _pid *pid, int32_t busy) 358 { 359 signed int result; 360 int32_t pterm, dterm, fp_error; 361 int32_t integral_limit; 362 363 fp_error = pid->setpoint - busy; 364 365 if (abs(fp_error) <= pid->deadband) 366 return 0; 367 368 pterm = mul_fp(pid->p_gain, fp_error); 369 370 pid->integral += fp_error; 371 372 /* 373 * We limit the integral here so that it will never 374 * get higher than 30. This prevents it from becoming 375 * too large an input over long periods of time and allows 376 * it to get factored out sooner. 377 * 378 * The value of 30 was chosen through experimentation. 379 */ 380 integral_limit = int_tofp(30); 381 if (pid->integral > integral_limit) 382 pid->integral = integral_limit; 383 if (pid->integral < -integral_limit) 384 pid->integral = -integral_limit; 385 386 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 387 pid->last_err = fp_error; 388 389 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 390 result = result + (1 << (FRAC_BITS-1)); 391 return (signed int)fp_toint(result); 392 } 393 394 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 395 { 396 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 397 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 398 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 399 400 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 401 } 402 403 static inline void intel_pstate_reset_all_pid(void) 404 { 405 unsigned int cpu; 406 407 for_each_online_cpu(cpu) { 408 if (all_cpu_data[cpu]) 409 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 410 } 411 } 412 413 static inline void update_turbo_state(void) 414 { 415 u64 misc_en; 416 struct cpudata *cpu; 417 418 cpu = all_cpu_data[0]; 419 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 420 limits->turbo_disabled = 421 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 422 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 423 } 424 425 static void intel_pstate_hwp_set(const struct cpumask *cpumask) 426 { 427 int min, hw_min, max, hw_max, cpu, range, adj_range; 428 u64 value, cap; 429 430 rdmsrl(MSR_HWP_CAPABILITIES, cap); 431 hw_min = HWP_LOWEST_PERF(cap); 432 hw_max = HWP_HIGHEST_PERF(cap); 433 range = hw_max - hw_min; 434 435 for_each_cpu(cpu, cpumask) { 436 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 437 adj_range = limits->min_perf_pct * range / 100; 438 min = hw_min + adj_range; 439 value &= ~HWP_MIN_PERF(~0L); 440 value |= HWP_MIN_PERF(min); 441 442 adj_range = limits->max_perf_pct * range / 100; 443 max = hw_min + adj_range; 444 if (limits->no_turbo) { 445 hw_max = HWP_GUARANTEED_PERF(cap); 446 if (hw_max < max) 447 max = hw_max; 448 } 449 450 value &= ~HWP_MAX_PERF(~0L); 451 value |= HWP_MAX_PERF(max); 452 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 453 } 454 } 455 456 static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) 457 { 458 if (hwp_active) 459 intel_pstate_hwp_set(policy->cpus); 460 461 return 0; 462 } 463 464 static void intel_pstate_hwp_set_online_cpus(void) 465 { 466 get_online_cpus(); 467 intel_pstate_hwp_set(cpu_online_mask); 468 put_online_cpus(); 469 } 470 471 /************************** debugfs begin ************************/ 472 static int pid_param_set(void *data, u64 val) 473 { 474 *(u32 *)data = val; 475 intel_pstate_reset_all_pid(); 476 return 0; 477 } 478 479 static int pid_param_get(void *data, u64 *val) 480 { 481 *val = *(u32 *)data; 482 return 0; 483 } 484 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 485 486 struct pid_param { 487 char *name; 488 void *value; 489 }; 490 491 static struct pid_param pid_files[] = { 492 {"sample_rate_ms", &pid_params.sample_rate_ms}, 493 {"d_gain_pct", &pid_params.d_gain_pct}, 494 {"i_gain_pct", &pid_params.i_gain_pct}, 495 {"deadband", &pid_params.deadband}, 496 {"setpoint", &pid_params.setpoint}, 497 {"p_gain_pct", &pid_params.p_gain_pct}, 498 {NULL, NULL} 499 }; 500 501 static void __init intel_pstate_debug_expose_params(void) 502 { 503 struct dentry *debugfs_parent; 504 int i = 0; 505 506 if (hwp_active) 507 return; 508 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 509 if (IS_ERR_OR_NULL(debugfs_parent)) 510 return; 511 while (pid_files[i].name) { 512 debugfs_create_file(pid_files[i].name, 0660, 513 debugfs_parent, pid_files[i].value, 514 &fops_pid_param); 515 i++; 516 } 517 } 518 519 /************************** debugfs end ************************/ 520 521 /************************** sysfs begin ************************/ 522 #define show_one(file_name, object) \ 523 static ssize_t show_##file_name \ 524 (struct kobject *kobj, struct attribute *attr, char *buf) \ 525 { \ 526 return sprintf(buf, "%u\n", limits->object); \ 527 } 528 529 static ssize_t show_turbo_pct(struct kobject *kobj, 530 struct attribute *attr, char *buf) 531 { 532 struct cpudata *cpu; 533 int total, no_turbo, turbo_pct; 534 uint32_t turbo_fp; 535 536 cpu = all_cpu_data[0]; 537 538 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 539 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 540 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total)); 541 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 542 return sprintf(buf, "%u\n", turbo_pct); 543 } 544 545 static ssize_t show_num_pstates(struct kobject *kobj, 546 struct attribute *attr, char *buf) 547 { 548 struct cpudata *cpu; 549 int total; 550 551 cpu = all_cpu_data[0]; 552 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 553 return sprintf(buf, "%u\n", total); 554 } 555 556 static ssize_t show_no_turbo(struct kobject *kobj, 557 struct attribute *attr, char *buf) 558 { 559 ssize_t ret; 560 561 update_turbo_state(); 562 if (limits->turbo_disabled) 563 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 564 else 565 ret = sprintf(buf, "%u\n", limits->no_turbo); 566 567 return ret; 568 } 569 570 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 571 const char *buf, size_t count) 572 { 573 unsigned int input; 574 int ret; 575 576 ret = sscanf(buf, "%u", &input); 577 if (ret != 1) 578 return -EINVAL; 579 580 update_turbo_state(); 581 if (limits->turbo_disabled) { 582 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); 583 return -EPERM; 584 } 585 586 limits->no_turbo = clamp_t(int, input, 0, 1); 587 588 if (hwp_active) 589 intel_pstate_hwp_set_online_cpus(); 590 591 return count; 592 } 593 594 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 595 const char *buf, size_t count) 596 { 597 unsigned int input; 598 int ret; 599 600 ret = sscanf(buf, "%u", &input); 601 if (ret != 1) 602 return -EINVAL; 603 604 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 605 limits->max_perf_pct = min(limits->max_policy_pct, 606 limits->max_sysfs_pct); 607 limits->max_perf_pct = max(limits->min_policy_pct, 608 limits->max_perf_pct); 609 limits->max_perf_pct = max(limits->min_perf_pct, 610 limits->max_perf_pct); 611 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 612 int_tofp(100)); 613 614 if (hwp_active) 615 intel_pstate_hwp_set_online_cpus(); 616 return count; 617 } 618 619 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 620 const char *buf, size_t count) 621 { 622 unsigned int input; 623 int ret; 624 625 ret = sscanf(buf, "%u", &input); 626 if (ret != 1) 627 return -EINVAL; 628 629 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 630 limits->min_perf_pct = max(limits->min_policy_pct, 631 limits->min_sysfs_pct); 632 limits->min_perf_pct = min(limits->max_policy_pct, 633 limits->min_perf_pct); 634 limits->min_perf_pct = min(limits->max_perf_pct, 635 limits->min_perf_pct); 636 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 637 int_tofp(100)); 638 639 if (hwp_active) 640 intel_pstate_hwp_set_online_cpus(); 641 return count; 642 } 643 644 show_one(max_perf_pct, max_perf_pct); 645 show_one(min_perf_pct, min_perf_pct); 646 647 define_one_global_rw(no_turbo); 648 define_one_global_rw(max_perf_pct); 649 define_one_global_rw(min_perf_pct); 650 define_one_global_ro(turbo_pct); 651 define_one_global_ro(num_pstates); 652 653 static struct attribute *intel_pstate_attributes[] = { 654 &no_turbo.attr, 655 &max_perf_pct.attr, 656 &min_perf_pct.attr, 657 &turbo_pct.attr, 658 &num_pstates.attr, 659 NULL 660 }; 661 662 static struct attribute_group intel_pstate_attr_group = { 663 .attrs = intel_pstate_attributes, 664 }; 665 666 static void __init intel_pstate_sysfs_expose_params(void) 667 { 668 struct kobject *intel_pstate_kobject; 669 int rc; 670 671 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 672 &cpu_subsys.dev_root->kobj); 673 BUG_ON(!intel_pstate_kobject); 674 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 675 BUG_ON(rc); 676 } 677 /************************** sysfs end ************************/ 678 679 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 680 { 681 /* First disable HWP notification interrupt as we don't process them */ 682 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 683 684 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 685 } 686 687 static int atom_get_min_pstate(void) 688 { 689 u64 value; 690 691 rdmsrl(ATOM_RATIOS, value); 692 return (value >> 8) & 0x7F; 693 } 694 695 static int atom_get_max_pstate(void) 696 { 697 u64 value; 698 699 rdmsrl(ATOM_RATIOS, value); 700 return (value >> 16) & 0x7F; 701 } 702 703 static int atom_get_turbo_pstate(void) 704 { 705 u64 value; 706 707 rdmsrl(ATOM_TURBO_RATIOS, value); 708 return value & 0x7F; 709 } 710 711 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 712 { 713 u64 val; 714 int32_t vid_fp; 715 u32 vid; 716 717 val = (u64)pstate << 8; 718 if (limits->no_turbo && !limits->turbo_disabled) 719 val |= (u64)1 << 32; 720 721 vid_fp = cpudata->vid.min + mul_fp( 722 int_tofp(pstate - cpudata->pstate.min_pstate), 723 cpudata->vid.ratio); 724 725 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 726 vid = ceiling_fp(vid_fp); 727 728 if (pstate > cpudata->pstate.max_pstate) 729 vid = cpudata->vid.turbo; 730 731 return val | vid; 732 } 733 734 static int silvermont_get_scaling(void) 735 { 736 u64 value; 737 int i; 738 /* Defined in Table 35-6 from SDM (Sept 2015) */ 739 static int silvermont_freq_table[] = { 740 83300, 100000, 133300, 116700, 80000}; 741 742 rdmsrl(MSR_FSB_FREQ, value); 743 i = value & 0x7; 744 WARN_ON(i > 4); 745 746 return silvermont_freq_table[i]; 747 } 748 749 static int airmont_get_scaling(void) 750 { 751 u64 value; 752 int i; 753 /* Defined in Table 35-10 from SDM (Sept 2015) */ 754 static int airmont_freq_table[] = { 755 83300, 100000, 133300, 116700, 80000, 756 93300, 90000, 88900, 87500}; 757 758 rdmsrl(MSR_FSB_FREQ, value); 759 i = value & 0xF; 760 WARN_ON(i > 8); 761 762 return airmont_freq_table[i]; 763 } 764 765 static void atom_get_vid(struct cpudata *cpudata) 766 { 767 u64 value; 768 769 rdmsrl(ATOM_VIDS, value); 770 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 771 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 772 cpudata->vid.ratio = div_fp( 773 cpudata->vid.max - cpudata->vid.min, 774 int_tofp(cpudata->pstate.max_pstate - 775 cpudata->pstate.min_pstate)); 776 777 rdmsrl(ATOM_TURBO_VIDS, value); 778 cpudata->vid.turbo = value & 0x7f; 779 } 780 781 static int core_get_min_pstate(void) 782 { 783 u64 value; 784 785 rdmsrl(MSR_PLATFORM_INFO, value); 786 return (value >> 40) & 0xFF; 787 } 788 789 static int core_get_max_pstate_physical(void) 790 { 791 u64 value; 792 793 rdmsrl(MSR_PLATFORM_INFO, value); 794 return (value >> 8) & 0xFF; 795 } 796 797 static int core_get_max_pstate(void) 798 { 799 u64 tar; 800 u64 plat_info; 801 int max_pstate; 802 int err; 803 804 rdmsrl(MSR_PLATFORM_INFO, plat_info); 805 max_pstate = (plat_info >> 8) & 0xFF; 806 807 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 808 if (!err) { 809 /* Do some sanity checking for safety */ 810 if (plat_info & 0x600000000) { 811 u64 tdp_ctrl; 812 u64 tdp_ratio; 813 int tdp_msr; 814 815 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 816 if (err) 817 goto skip_tar; 818 819 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; 820 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 821 if (err) 822 goto skip_tar; 823 824 /* For level 1 and 2, bits[23:16] contain the ratio */ 825 if (tdp_ctrl) 826 tdp_ratio >>= 16; 827 828 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 829 if (tdp_ratio - 1 == tar) { 830 max_pstate = tar; 831 pr_debug("max_pstate=TAC %x\n", max_pstate); 832 } else { 833 goto skip_tar; 834 } 835 } 836 } 837 838 skip_tar: 839 return max_pstate; 840 } 841 842 static int core_get_turbo_pstate(void) 843 { 844 u64 value; 845 int nont, ret; 846 847 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 848 nont = core_get_max_pstate(); 849 ret = (value) & 255; 850 if (ret <= nont) 851 ret = nont; 852 return ret; 853 } 854 855 static inline int core_get_scaling(void) 856 { 857 return 100000; 858 } 859 860 static u64 core_get_val(struct cpudata *cpudata, int pstate) 861 { 862 u64 val; 863 864 val = (u64)pstate << 8; 865 if (limits->no_turbo && !limits->turbo_disabled) 866 val |= (u64)1 << 32; 867 868 return val; 869 } 870 871 static int knl_get_turbo_pstate(void) 872 { 873 u64 value; 874 int nont, ret; 875 876 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 877 nont = core_get_max_pstate(); 878 ret = (((value) >> 8) & 0xFF); 879 if (ret <= nont) 880 ret = nont; 881 return ret; 882 } 883 884 static struct cpu_defaults core_params = { 885 .pid_policy = { 886 .sample_rate_ms = 10, 887 .deadband = 0, 888 .setpoint = 97, 889 .p_gain_pct = 20, 890 .d_gain_pct = 0, 891 .i_gain_pct = 0, 892 }, 893 .funcs = { 894 .get_max = core_get_max_pstate, 895 .get_max_physical = core_get_max_pstate_physical, 896 .get_min = core_get_min_pstate, 897 .get_turbo = core_get_turbo_pstate, 898 .get_scaling = core_get_scaling, 899 .get_val = core_get_val, 900 .get_target_pstate = get_target_pstate_use_performance, 901 }, 902 }; 903 904 static struct cpu_defaults silvermont_params = { 905 .pid_policy = { 906 .sample_rate_ms = 10, 907 .deadband = 0, 908 .setpoint = 60, 909 .p_gain_pct = 14, 910 .d_gain_pct = 0, 911 .i_gain_pct = 4, 912 }, 913 .funcs = { 914 .get_max = atom_get_max_pstate, 915 .get_max_physical = atom_get_max_pstate, 916 .get_min = atom_get_min_pstate, 917 .get_turbo = atom_get_turbo_pstate, 918 .get_val = atom_get_val, 919 .get_scaling = silvermont_get_scaling, 920 .get_vid = atom_get_vid, 921 .get_target_pstate = get_target_pstate_use_cpu_load, 922 }, 923 }; 924 925 static struct cpu_defaults airmont_params = { 926 .pid_policy = { 927 .sample_rate_ms = 10, 928 .deadband = 0, 929 .setpoint = 60, 930 .p_gain_pct = 14, 931 .d_gain_pct = 0, 932 .i_gain_pct = 4, 933 }, 934 .funcs = { 935 .get_max = atom_get_max_pstate, 936 .get_max_physical = atom_get_max_pstate, 937 .get_min = atom_get_min_pstate, 938 .get_turbo = atom_get_turbo_pstate, 939 .get_val = atom_get_val, 940 .get_scaling = airmont_get_scaling, 941 .get_vid = atom_get_vid, 942 .get_target_pstate = get_target_pstate_use_cpu_load, 943 }, 944 }; 945 946 static struct cpu_defaults knl_params = { 947 .pid_policy = { 948 .sample_rate_ms = 10, 949 .deadband = 0, 950 .setpoint = 97, 951 .p_gain_pct = 20, 952 .d_gain_pct = 0, 953 .i_gain_pct = 0, 954 }, 955 .funcs = { 956 .get_max = core_get_max_pstate, 957 .get_max_physical = core_get_max_pstate_physical, 958 .get_min = core_get_min_pstate, 959 .get_turbo = knl_get_turbo_pstate, 960 .get_scaling = core_get_scaling, 961 .get_val = core_get_val, 962 .get_target_pstate = get_target_pstate_use_performance, 963 }, 964 }; 965 966 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 967 { 968 int max_perf = cpu->pstate.turbo_pstate; 969 int max_perf_adj; 970 int min_perf; 971 972 if (limits->no_turbo || limits->turbo_disabled) 973 max_perf = cpu->pstate.max_pstate; 974 975 /* 976 * performance can be limited by user through sysfs, by cpufreq 977 * policy, or by cpu specific default values determined through 978 * experimentation. 979 */ 980 max_perf_adj = fp_toint(max_perf * limits->max_perf); 981 *max = clamp_t(int, max_perf_adj, 982 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 983 984 min_perf = fp_toint(max_perf * limits->min_perf); 985 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 986 } 987 988 static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate) 989 { 990 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 991 cpu->pstate.current_pstate = pstate; 992 } 993 994 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 995 { 996 int pstate = cpu->pstate.min_pstate; 997 998 intel_pstate_record_pstate(cpu, pstate); 999 /* 1000 * Generally, there is no guarantee that this code will always run on 1001 * the CPU being updated, so force the register update to run on the 1002 * right CPU. 1003 */ 1004 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1005 pstate_funcs.get_val(cpu, pstate)); 1006 } 1007 1008 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1009 { 1010 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1011 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1012 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1013 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1014 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1015 1016 if (pstate_funcs.get_vid) 1017 pstate_funcs.get_vid(cpu); 1018 1019 intel_pstate_set_min_pstate(cpu); 1020 } 1021 1022 static inline void intel_pstate_calc_busy(struct cpudata *cpu) 1023 { 1024 struct sample *sample = &cpu->sample; 1025 int64_t core_pct; 1026 1027 core_pct = int_tofp(sample->aperf) * int_tofp(100); 1028 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 1029 1030 sample->core_pct_busy = (int32_t)core_pct; 1031 } 1032 1033 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1034 { 1035 u64 aperf, mperf; 1036 unsigned long flags; 1037 u64 tsc; 1038 1039 local_irq_save(flags); 1040 rdmsrl(MSR_IA32_APERF, aperf); 1041 rdmsrl(MSR_IA32_MPERF, mperf); 1042 tsc = rdtsc(); 1043 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1044 local_irq_restore(flags); 1045 return false; 1046 } 1047 local_irq_restore(flags); 1048 1049 cpu->last_sample_time = cpu->sample.time; 1050 cpu->sample.time = time; 1051 cpu->sample.aperf = aperf; 1052 cpu->sample.mperf = mperf; 1053 cpu->sample.tsc = tsc; 1054 cpu->sample.aperf -= cpu->prev_aperf; 1055 cpu->sample.mperf -= cpu->prev_mperf; 1056 cpu->sample.tsc -= cpu->prev_tsc; 1057 1058 cpu->prev_aperf = aperf; 1059 cpu->prev_mperf = mperf; 1060 cpu->prev_tsc = tsc; 1061 /* 1062 * First time this function is invoked in a given cycle, all of the 1063 * previous sample data fields are equal to zero or stale and they must 1064 * be populated with meaningful numbers for things to work, so assume 1065 * that sample.time will always be reset before setting the utilization 1066 * update hook and make the caller skip the sample then. 1067 */ 1068 return !!cpu->last_sample_time; 1069 } 1070 1071 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1072 { 1073 return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf * 1074 cpu->pstate.scaling, cpu->sample.mperf); 1075 } 1076 1077 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1078 { 1079 struct sample *sample = &cpu->sample; 1080 u64 cummulative_iowait, delta_iowait_us; 1081 u64 delta_iowait_mperf; 1082 u64 mperf, now; 1083 int32_t cpu_load; 1084 1085 cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now); 1086 1087 /* 1088 * Convert iowait time into number of IO cycles spent at max_freq. 1089 * IO is considered as busy only for the cpu_load algorithm. For 1090 * performance this is not needed since we always try to reach the 1091 * maximum P-State, so we are already boosting the IOs. 1092 */ 1093 delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait; 1094 delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling * 1095 cpu->pstate.max_pstate, MSEC_PER_SEC); 1096 1097 mperf = cpu->sample.mperf + delta_iowait_mperf; 1098 cpu->prev_cummulative_iowait = cummulative_iowait; 1099 1100 /* 1101 * The load can be estimated as the ratio of the mperf counter 1102 * running at a constant frequency during active periods 1103 * (C0) and the time stamp counter running at the same frequency 1104 * also during C-states. 1105 */ 1106 cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc); 1107 cpu->sample.busy_scaled = cpu_load; 1108 1109 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load); 1110 } 1111 1112 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1113 { 1114 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 1115 u64 duration_ns; 1116 1117 intel_pstate_calc_busy(cpu); 1118 1119 /* 1120 * core_busy is the ratio of actual performance to max 1121 * max_pstate is the max non turbo pstate available 1122 * current_pstate was the pstate that was requested during 1123 * the last sample period. 1124 * 1125 * We normalize core_busy, which was our actual percent 1126 * performance to what we requested during the last sample 1127 * period. The result will be a percentage of busy at a 1128 * specified pstate. 1129 */ 1130 core_busy = cpu->sample.core_pct_busy; 1131 max_pstate = int_tofp(cpu->pstate.max_pstate_physical); 1132 current_pstate = int_tofp(cpu->pstate.current_pstate); 1133 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 1134 1135 /* 1136 * Since our utilization update callback will not run unless we are 1137 * in C0, check if the actual elapsed time is significantly greater (3x) 1138 * than our sample interval. If it is, then we were idle for a long 1139 * enough period of time to adjust our busyness. 1140 */ 1141 duration_ns = cpu->sample.time - cpu->last_sample_time; 1142 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1143 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), 1144 int_tofp(duration_ns)); 1145 core_busy = mul_fp(core_busy, sample_ratio); 1146 } else { 1147 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1148 if (sample_ratio < int_tofp(1)) 1149 core_busy = 0; 1150 } 1151 1152 cpu->sample.busy_scaled = core_busy; 1153 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy); 1154 } 1155 1156 static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1157 { 1158 int max_perf, min_perf; 1159 1160 update_turbo_state(); 1161 1162 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1163 pstate = clamp_t(int, pstate, min_perf, max_perf); 1164 if (pstate == cpu->pstate.current_pstate) 1165 return; 1166 1167 intel_pstate_record_pstate(cpu, pstate); 1168 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1169 } 1170 1171 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1172 { 1173 int from, target_pstate; 1174 struct sample *sample; 1175 1176 from = cpu->pstate.current_pstate; 1177 1178 target_pstate = pstate_funcs.get_target_pstate(cpu); 1179 1180 intel_pstate_update_pstate(cpu, target_pstate); 1181 1182 sample = &cpu->sample; 1183 trace_pstate_sample(fp_toint(sample->core_pct_busy), 1184 fp_toint(sample->busy_scaled), 1185 from, 1186 cpu->pstate.current_pstate, 1187 sample->mperf, 1188 sample->aperf, 1189 sample->tsc, 1190 get_avg_frequency(cpu)); 1191 } 1192 1193 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1194 unsigned long util, unsigned long max) 1195 { 1196 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1197 u64 delta_ns = time - cpu->sample.time; 1198 1199 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1200 bool sample_taken = intel_pstate_sample(cpu, time); 1201 1202 if (sample_taken && !hwp_active) 1203 intel_pstate_adjust_busy_pstate(cpu); 1204 } 1205 } 1206 1207 #define ICPU(model, policy) \ 1208 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1209 (unsigned long)&policy } 1210 1211 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1212 ICPU(0x2a, core_params), 1213 ICPU(0x2d, core_params), 1214 ICPU(0x37, silvermont_params), 1215 ICPU(0x3a, core_params), 1216 ICPU(0x3c, core_params), 1217 ICPU(0x3d, core_params), 1218 ICPU(0x3e, core_params), 1219 ICPU(0x3f, core_params), 1220 ICPU(0x45, core_params), 1221 ICPU(0x46, core_params), 1222 ICPU(0x47, core_params), 1223 ICPU(0x4c, airmont_params), 1224 ICPU(0x4e, core_params), 1225 ICPU(0x4f, core_params), 1226 ICPU(0x5e, core_params), 1227 ICPU(0x56, core_params), 1228 ICPU(0x57, knl_params), 1229 {} 1230 }; 1231 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1232 1233 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 1234 ICPU(0x56, core_params), 1235 {} 1236 }; 1237 1238 static int intel_pstate_init_cpu(unsigned int cpunum) 1239 { 1240 struct cpudata *cpu; 1241 1242 if (!all_cpu_data[cpunum]) 1243 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 1244 GFP_KERNEL); 1245 if (!all_cpu_data[cpunum]) 1246 return -ENOMEM; 1247 1248 cpu = all_cpu_data[cpunum]; 1249 1250 cpu->cpu = cpunum; 1251 1252 if (hwp_active) { 1253 intel_pstate_hwp_enable(cpu); 1254 pid_params.sample_rate_ms = 50; 1255 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1256 } 1257 1258 intel_pstate_get_cpu_pstates(cpu); 1259 1260 intel_pstate_busy_pid_reset(cpu); 1261 1262 cpu->update_util.func = intel_pstate_update_util; 1263 1264 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); 1265 1266 return 0; 1267 } 1268 1269 static unsigned int intel_pstate_get(unsigned int cpu_num) 1270 { 1271 struct sample *sample; 1272 struct cpudata *cpu; 1273 1274 cpu = all_cpu_data[cpu_num]; 1275 if (!cpu) 1276 return 0; 1277 sample = &cpu->sample; 1278 return get_avg_frequency(cpu); 1279 } 1280 1281 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1282 { 1283 struct cpudata *cpu = all_cpu_data[cpu_num]; 1284 1285 /* Prevent intel_pstate_update_util() from using stale data. */ 1286 cpu->sample.time = 0; 1287 cpufreq_set_update_util_data(cpu_num, &cpu->update_util); 1288 } 1289 1290 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1291 { 1292 cpufreq_set_update_util_data(cpu, NULL); 1293 synchronize_sched(); 1294 } 1295 1296 static void intel_pstate_set_performance_limits(struct perf_limits *limits) 1297 { 1298 limits->no_turbo = 0; 1299 limits->turbo_disabled = 0; 1300 limits->max_perf_pct = 100; 1301 limits->max_perf = int_tofp(1); 1302 limits->min_perf_pct = 100; 1303 limits->min_perf = int_tofp(1); 1304 limits->max_policy_pct = 100; 1305 limits->max_sysfs_pct = 100; 1306 limits->min_policy_pct = 0; 1307 limits->min_sysfs_pct = 0; 1308 } 1309 1310 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1311 { 1312 if (!policy->cpuinfo.max_freq) 1313 return -ENODEV; 1314 1315 intel_pstate_clear_update_util_hook(policy->cpu); 1316 1317 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1318 limits = &performance_limits; 1319 if (policy->max >= policy->cpuinfo.max_freq) { 1320 pr_debug("intel_pstate: set performance\n"); 1321 intel_pstate_set_performance_limits(limits); 1322 goto out; 1323 } 1324 } else { 1325 pr_debug("intel_pstate: set powersave\n"); 1326 limits = &powersave_limits; 1327 } 1328 1329 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1330 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1331 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1332 policy->cpuinfo.max_freq); 1333 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1334 1335 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1336 limits->min_perf_pct = max(limits->min_policy_pct, 1337 limits->min_sysfs_pct); 1338 limits->min_perf_pct = min(limits->max_policy_pct, 1339 limits->min_perf_pct); 1340 limits->max_perf_pct = min(limits->max_policy_pct, 1341 limits->max_sysfs_pct); 1342 limits->max_perf_pct = max(limits->min_policy_pct, 1343 limits->max_perf_pct); 1344 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1345 1346 /* Make sure min_perf_pct <= max_perf_pct */ 1347 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1348 1349 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 1350 int_tofp(100)); 1351 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1352 int_tofp(100)); 1353 1354 out: 1355 intel_pstate_set_update_util_hook(policy->cpu); 1356 1357 intel_pstate_hwp_set_policy(policy); 1358 1359 return 0; 1360 } 1361 1362 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1363 { 1364 cpufreq_verify_within_cpu_limits(policy); 1365 1366 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1367 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1368 return -EINVAL; 1369 1370 return 0; 1371 } 1372 1373 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1374 { 1375 int cpu_num = policy->cpu; 1376 struct cpudata *cpu = all_cpu_data[cpu_num]; 1377 1378 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); 1379 1380 intel_pstate_clear_update_util_hook(cpu_num); 1381 1382 if (hwp_active) 1383 return; 1384 1385 intel_pstate_set_min_pstate(cpu); 1386 } 1387 1388 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1389 { 1390 struct cpudata *cpu; 1391 int rc; 1392 1393 rc = intel_pstate_init_cpu(policy->cpu); 1394 if (rc) 1395 return rc; 1396 1397 cpu = all_cpu_data[policy->cpu]; 1398 1399 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1400 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1401 else 1402 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1403 1404 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1405 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1406 1407 /* cpuinfo and default policy values */ 1408 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1409 policy->cpuinfo.max_freq = 1410 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1411 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1412 cpumask_set_cpu(policy->cpu, policy->cpus); 1413 1414 return 0; 1415 } 1416 1417 static struct cpufreq_driver intel_pstate_driver = { 1418 .flags = CPUFREQ_CONST_LOOPS, 1419 .verify = intel_pstate_verify_policy, 1420 .setpolicy = intel_pstate_set_policy, 1421 .resume = intel_pstate_hwp_set_policy, 1422 .get = intel_pstate_get, 1423 .init = intel_pstate_cpu_init, 1424 .stop_cpu = intel_pstate_stop_cpu, 1425 .name = "intel_pstate", 1426 }; 1427 1428 static int __initdata no_load; 1429 static int __initdata no_hwp; 1430 static int __initdata hwp_only; 1431 static unsigned int force_load; 1432 1433 static int intel_pstate_msrs_not_valid(void) 1434 { 1435 if (!pstate_funcs.get_max() || 1436 !pstate_funcs.get_min() || 1437 !pstate_funcs.get_turbo()) 1438 return -ENODEV; 1439 1440 return 0; 1441 } 1442 1443 static void copy_pid_params(struct pstate_adjust_policy *policy) 1444 { 1445 pid_params.sample_rate_ms = policy->sample_rate_ms; 1446 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 1447 pid_params.p_gain_pct = policy->p_gain_pct; 1448 pid_params.i_gain_pct = policy->i_gain_pct; 1449 pid_params.d_gain_pct = policy->d_gain_pct; 1450 pid_params.deadband = policy->deadband; 1451 pid_params.setpoint = policy->setpoint; 1452 } 1453 1454 static void copy_cpu_funcs(struct pstate_funcs *funcs) 1455 { 1456 pstate_funcs.get_max = funcs->get_max; 1457 pstate_funcs.get_max_physical = funcs->get_max_physical; 1458 pstate_funcs.get_min = funcs->get_min; 1459 pstate_funcs.get_turbo = funcs->get_turbo; 1460 pstate_funcs.get_scaling = funcs->get_scaling; 1461 pstate_funcs.get_val = funcs->get_val; 1462 pstate_funcs.get_vid = funcs->get_vid; 1463 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 1464 1465 } 1466 1467 #if IS_ENABLED(CONFIG_ACPI) 1468 #include <acpi/processor.h> 1469 1470 static bool intel_pstate_no_acpi_pss(void) 1471 { 1472 int i; 1473 1474 for_each_possible_cpu(i) { 1475 acpi_status status; 1476 union acpi_object *pss; 1477 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1478 struct acpi_processor *pr = per_cpu(processors, i); 1479 1480 if (!pr) 1481 continue; 1482 1483 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1484 if (ACPI_FAILURE(status)) 1485 continue; 1486 1487 pss = buffer.pointer; 1488 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1489 kfree(pss); 1490 return false; 1491 } 1492 1493 kfree(pss); 1494 } 1495 1496 return true; 1497 } 1498 1499 static bool intel_pstate_has_acpi_ppc(void) 1500 { 1501 int i; 1502 1503 for_each_possible_cpu(i) { 1504 struct acpi_processor *pr = per_cpu(processors, i); 1505 1506 if (!pr) 1507 continue; 1508 if (acpi_has_method(pr->handle, "_PPC")) 1509 return true; 1510 } 1511 return false; 1512 } 1513 1514 enum { 1515 PSS, 1516 PPC, 1517 }; 1518 1519 struct hw_vendor_info { 1520 u16 valid; 1521 char oem_id[ACPI_OEM_ID_SIZE]; 1522 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1523 int oem_pwr_table; 1524 }; 1525 1526 /* Hardware vendor-specific info that has its own power management modes */ 1527 static struct hw_vendor_info vendor_info[] = { 1528 {1, "HP ", "ProLiant", PSS}, 1529 {1, "ORACLE", "X4-2 ", PPC}, 1530 {1, "ORACLE", "X4-2L ", PPC}, 1531 {1, "ORACLE", "X4-2B ", PPC}, 1532 {1, "ORACLE", "X3-2 ", PPC}, 1533 {1, "ORACLE", "X3-2L ", PPC}, 1534 {1, "ORACLE", "X3-2B ", PPC}, 1535 {1, "ORACLE", "X4470M2 ", PPC}, 1536 {1, "ORACLE", "X4270M3 ", PPC}, 1537 {1, "ORACLE", "X4270M2 ", PPC}, 1538 {1, "ORACLE", "X4170M2 ", PPC}, 1539 {1, "ORACLE", "X4170 M3", PPC}, 1540 {1, "ORACLE", "X4275 M3", PPC}, 1541 {1, "ORACLE", "X6-2 ", PPC}, 1542 {1, "ORACLE", "Sudbury ", PPC}, 1543 {0, "", ""}, 1544 }; 1545 1546 static bool intel_pstate_platform_pwr_mgmt_exists(void) 1547 { 1548 struct acpi_table_header hdr; 1549 struct hw_vendor_info *v_info; 1550 const struct x86_cpu_id *id; 1551 u64 misc_pwr; 1552 1553 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1554 if (id) { 1555 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1556 if ( misc_pwr & (1 << 8)) 1557 return true; 1558 } 1559 1560 if (acpi_disabled || 1561 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1562 return false; 1563 1564 for (v_info = vendor_info; v_info->valid; v_info++) { 1565 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1566 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1567 ACPI_OEM_TABLE_ID_SIZE)) 1568 switch (v_info->oem_pwr_table) { 1569 case PSS: 1570 return intel_pstate_no_acpi_pss(); 1571 case PPC: 1572 return intel_pstate_has_acpi_ppc() && 1573 (!force_load); 1574 } 1575 } 1576 1577 return false; 1578 } 1579 #else /* CONFIG_ACPI not enabled */ 1580 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1581 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1582 #endif /* CONFIG_ACPI */ 1583 1584 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 1585 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 1586 {} 1587 }; 1588 1589 static int __init intel_pstate_init(void) 1590 { 1591 int cpu, rc = 0; 1592 const struct x86_cpu_id *id; 1593 struct cpu_defaults *cpu_def; 1594 1595 if (no_load) 1596 return -ENODEV; 1597 1598 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 1599 copy_cpu_funcs(&core_params.funcs); 1600 hwp_active++; 1601 goto hwp_cpu_matched; 1602 } 1603 1604 id = x86_match_cpu(intel_pstate_cpu_ids); 1605 if (!id) 1606 return -ENODEV; 1607 1608 cpu_def = (struct cpu_defaults *)id->driver_data; 1609 1610 copy_pid_params(&cpu_def->pid_policy); 1611 copy_cpu_funcs(&cpu_def->funcs); 1612 1613 if (intel_pstate_msrs_not_valid()) 1614 return -ENODEV; 1615 1616 hwp_cpu_matched: 1617 /* 1618 * The Intel pstate driver will be ignored if the platform 1619 * firmware has its own power management modes. 1620 */ 1621 if (intel_pstate_platform_pwr_mgmt_exists()) 1622 return -ENODEV; 1623 1624 pr_info("Intel P-state driver initializing.\n"); 1625 1626 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1627 if (!all_cpu_data) 1628 return -ENOMEM; 1629 1630 if (!hwp_active && hwp_only) 1631 goto out; 1632 1633 rc = cpufreq_register_driver(&intel_pstate_driver); 1634 if (rc) 1635 goto out; 1636 1637 intel_pstate_debug_expose_params(); 1638 intel_pstate_sysfs_expose_params(); 1639 1640 if (hwp_active) 1641 pr_info("intel_pstate: HWP enabled\n"); 1642 1643 return rc; 1644 out: 1645 get_online_cpus(); 1646 for_each_online_cpu(cpu) { 1647 if (all_cpu_data[cpu]) { 1648 intel_pstate_clear_update_util_hook(cpu); 1649 kfree(all_cpu_data[cpu]); 1650 } 1651 } 1652 1653 put_online_cpus(); 1654 vfree(all_cpu_data); 1655 return -ENODEV; 1656 } 1657 device_initcall(intel_pstate_init); 1658 1659 static int __init intel_pstate_setup(char *str) 1660 { 1661 if (!str) 1662 return -EINVAL; 1663 1664 if (!strcmp(str, "disable")) 1665 no_load = 1; 1666 if (!strcmp(str, "no_hwp")) { 1667 pr_info("intel_pstate: HWP disabled\n"); 1668 no_hwp = 1; 1669 } 1670 if (!strcmp(str, "force")) 1671 force_load = 1; 1672 if (!strcmp(str, "hwp_only")) 1673 hwp_only = 1; 1674 return 0; 1675 } 1676 early_param("intel_pstate", intel_pstate_setup); 1677 1678 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1679 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1680 MODULE_LICENSE("GPL"); 1681