1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 #include <asm/intel-family.h> 39 40 #define ATOM_RATIOS 0x66a 41 #define ATOM_VIDS 0x66b 42 #define ATOM_TURBO_RATIOS 0x66c 43 #define ATOM_TURBO_VIDS 0x66d 44 45 #ifdef CONFIG_ACPI 46 #include <acpi/processor.h> 47 #endif 48 49 #define FRAC_BITS 8 50 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 51 #define fp_toint(X) ((X) >> FRAC_BITS) 52 53 #define EXT_BITS 6 54 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 55 56 static inline int32_t mul_fp(int32_t x, int32_t y) 57 { 58 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 59 } 60 61 static inline int32_t div_fp(s64 x, s64 y) 62 { 63 return div64_s64((int64_t)x << FRAC_BITS, y); 64 } 65 66 static inline int ceiling_fp(int32_t x) 67 { 68 int mask, ret; 69 70 ret = fp_toint(x); 71 mask = (1 << FRAC_BITS) - 1; 72 if (x & mask) 73 ret += 1; 74 return ret; 75 } 76 77 static inline u64 mul_ext_fp(u64 x, u64 y) 78 { 79 return (x * y) >> EXT_FRAC_BITS; 80 } 81 82 static inline u64 div_ext_fp(u64 x, u64 y) 83 { 84 return div64_u64(x << EXT_FRAC_BITS, y); 85 } 86 87 /** 88 * struct sample - Store performance sample 89 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 90 * performance during last sample period 91 * @busy_scaled: Scaled busy value which is used to calculate next 92 * P state. This can be different than core_avg_perf 93 * to account for cpu idle period 94 * @aperf: Difference of actual performance frequency clock count 95 * read from APERF MSR between last and current sample 96 * @mperf: Difference of maximum performance frequency clock count 97 * read from MPERF MSR between last and current sample 98 * @tsc: Difference of time stamp counter between last and 99 * current sample 100 * @time: Current time from scheduler 101 * 102 * This structure is used in the cpudata structure to store performance sample 103 * data for choosing next P State. 104 */ 105 struct sample { 106 int32_t core_avg_perf; 107 int32_t busy_scaled; 108 u64 aperf; 109 u64 mperf; 110 u64 tsc; 111 u64 time; 112 }; 113 114 /** 115 * struct pstate_data - Store P state data 116 * @current_pstate: Current requested P state 117 * @min_pstate: Min P state possible for this platform 118 * @max_pstate: Max P state possible for this platform 119 * @max_pstate_physical:This is physical Max P state for a processor 120 * This can be higher than the max_pstate which can 121 * be limited by platform thermal design power limits 122 * @scaling: Scaling factor to convert frequency to cpufreq 123 * frequency units 124 * @turbo_pstate: Max Turbo P state possible for this platform 125 * 126 * Stores the per cpu model P state limits and current P state. 127 */ 128 struct pstate_data { 129 int current_pstate; 130 int min_pstate; 131 int max_pstate; 132 int max_pstate_physical; 133 int scaling; 134 int turbo_pstate; 135 }; 136 137 /** 138 * struct vid_data - Stores voltage information data 139 * @min: VID data for this platform corresponding to 140 * the lowest P state 141 * @max: VID data corresponding to the highest P State. 142 * @turbo: VID data for turbo P state 143 * @ratio: Ratio of (vid max - vid min) / 144 * (max P state - Min P State) 145 * 146 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 147 * This data is used in Atom platforms, where in addition to target P state, 148 * the voltage data needs to be specified to select next P State. 149 */ 150 struct vid_data { 151 int min; 152 int max; 153 int turbo; 154 int32_t ratio; 155 }; 156 157 /** 158 * struct _pid - Stores PID data 159 * @setpoint: Target set point for busyness or performance 160 * @integral: Storage for accumulated error values 161 * @p_gain: PID proportional gain 162 * @i_gain: PID integral gain 163 * @d_gain: PID derivative gain 164 * @deadband: PID deadband 165 * @last_err: Last error storage for integral part of PID calculation 166 * 167 * Stores PID coefficients and last error for PID controller. 168 */ 169 struct _pid { 170 int setpoint; 171 int32_t integral; 172 int32_t p_gain; 173 int32_t i_gain; 174 int32_t d_gain; 175 int deadband; 176 int32_t last_err; 177 }; 178 179 /** 180 * struct perf_limits - Store user and policy limits 181 * @no_turbo: User requested turbo state from intel_pstate sysfs 182 * @turbo_disabled: Platform turbo status either from msr 183 * MSR_IA32_MISC_ENABLE or when maximum available pstate 184 * matches the maximum turbo pstate 185 * @max_perf_pct: Effective maximum performance limit in percentage, this 186 * is minimum of either limits enforced by cpufreq policy 187 * or limits from user set limits via intel_pstate sysfs 188 * @min_perf_pct: Effective minimum performance limit in percentage, this 189 * is maximum of either limits enforced by cpufreq policy 190 * or limits from user set limits via intel_pstate sysfs 191 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct 192 * This value is used to limit max pstate 193 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct 194 * This value is used to limit min pstate 195 * @max_policy_pct: The maximum performance in percentage enforced by 196 * cpufreq setpolicy interface 197 * @max_sysfs_pct: The maximum performance in percentage enforced by 198 * intel pstate sysfs interface, unused when per cpu 199 * controls are enforced 200 * @min_policy_pct: The minimum performance in percentage enforced by 201 * cpufreq setpolicy interface 202 * @min_sysfs_pct: The minimum performance in percentage enforced by 203 * intel pstate sysfs interface, unused when per cpu 204 * controls are enforced 205 * 206 * Storage for user and policy defined limits. 207 */ 208 struct perf_limits { 209 int no_turbo; 210 int turbo_disabled; 211 int max_perf_pct; 212 int min_perf_pct; 213 int32_t max_perf; 214 int32_t min_perf; 215 int max_policy_pct; 216 int max_sysfs_pct; 217 int min_policy_pct; 218 int min_sysfs_pct; 219 }; 220 221 /** 222 * struct cpudata - Per CPU instance data storage 223 * @cpu: CPU number for this instance data 224 * @policy: CPUFreq policy value 225 * @update_util: CPUFreq utility callback information 226 * @update_util_set: CPUFreq utility callback is set 227 * @iowait_boost: iowait-related boost fraction 228 * @last_update: Time of the last update. 229 * @pstate: Stores P state limits for this CPU 230 * @vid: Stores VID limits for this CPU 231 * @pid: Stores PID parameters for this CPU 232 * @last_sample_time: Last Sample time 233 * @prev_aperf: Last APERF value read from APERF MSR 234 * @prev_mperf: Last MPERF value read from MPERF MSR 235 * @prev_tsc: Last timestamp counter (TSC) value 236 * @prev_cummulative_iowait: IO Wait time difference from last and 237 * current sample 238 * @sample: Storage for storing last Sample data 239 * @perf_limits: Pointer to perf_limit unique to this CPU 240 * Not all field in the structure are applicable 241 * when per cpu controls are enforced 242 * @acpi_perf_data: Stores ACPI perf information read from _PSS 243 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 244 * 245 * This structure stores per CPU instance data for all CPUs. 246 */ 247 struct cpudata { 248 int cpu; 249 250 unsigned int policy; 251 struct update_util_data update_util; 252 bool update_util_set; 253 254 struct pstate_data pstate; 255 struct vid_data vid; 256 struct _pid pid; 257 258 u64 last_update; 259 u64 last_sample_time; 260 u64 prev_aperf; 261 u64 prev_mperf; 262 u64 prev_tsc; 263 u64 prev_cummulative_iowait; 264 struct sample sample; 265 struct perf_limits *perf_limits; 266 #ifdef CONFIG_ACPI 267 struct acpi_processor_performance acpi_perf_data; 268 bool valid_pss_table; 269 #endif 270 unsigned int iowait_boost; 271 }; 272 273 static struct cpudata **all_cpu_data; 274 275 /** 276 * struct pstate_adjust_policy - Stores static PID configuration data 277 * @sample_rate_ms: PID calculation sample rate in ms 278 * @sample_rate_ns: Sample rate calculation in ns 279 * @deadband: PID deadband 280 * @setpoint: PID Setpoint 281 * @p_gain_pct: PID proportional gain 282 * @i_gain_pct: PID integral gain 283 * @d_gain_pct: PID derivative gain 284 * 285 * Stores per CPU model static PID configuration data. 286 */ 287 struct pstate_adjust_policy { 288 int sample_rate_ms; 289 s64 sample_rate_ns; 290 int deadband; 291 int setpoint; 292 int p_gain_pct; 293 int d_gain_pct; 294 int i_gain_pct; 295 }; 296 297 /** 298 * struct pstate_funcs - Per CPU model specific callbacks 299 * @get_max: Callback to get maximum non turbo effective P state 300 * @get_max_physical: Callback to get maximum non turbo physical P state 301 * @get_min: Callback to get minimum P state 302 * @get_turbo: Callback to get turbo P state 303 * @get_scaling: Callback to get frequency scaling factor 304 * @get_val: Callback to convert P state to actual MSR write value 305 * @get_vid: Callback to get VID data for Atom platforms 306 * @get_target_pstate: Callback to a function to calculate next P state to use 307 * 308 * Core and Atom CPU models have different way to get P State limits. This 309 * structure is used to store those callbacks. 310 */ 311 struct pstate_funcs { 312 int (*get_max)(void); 313 int (*get_max_physical)(void); 314 int (*get_min)(void); 315 int (*get_turbo)(void); 316 int (*get_scaling)(void); 317 u64 (*get_val)(struct cpudata*, int pstate); 318 void (*get_vid)(struct cpudata *); 319 int32_t (*get_target_pstate)(struct cpudata *); 320 }; 321 322 /** 323 * struct cpu_defaults- Per CPU model default config data 324 * @pid_policy: PID config data 325 * @funcs: Callback function data 326 */ 327 struct cpu_defaults { 328 struct pstate_adjust_policy pid_policy; 329 struct pstate_funcs funcs; 330 }; 331 332 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 333 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 334 335 static struct pstate_adjust_policy pid_params __read_mostly; 336 static struct pstate_funcs pstate_funcs __read_mostly; 337 static int hwp_active __read_mostly; 338 static bool per_cpu_limits __read_mostly; 339 340 #ifdef CONFIG_ACPI 341 static bool acpi_ppc; 342 #endif 343 344 static struct perf_limits performance_limits = { 345 .no_turbo = 0, 346 .turbo_disabled = 0, 347 .max_perf_pct = 100, 348 .max_perf = int_tofp(1), 349 .min_perf_pct = 100, 350 .min_perf = int_tofp(1), 351 .max_policy_pct = 100, 352 .max_sysfs_pct = 100, 353 .min_policy_pct = 0, 354 .min_sysfs_pct = 0, 355 }; 356 357 static struct perf_limits powersave_limits = { 358 .no_turbo = 0, 359 .turbo_disabled = 0, 360 .max_perf_pct = 100, 361 .max_perf = int_tofp(1), 362 .min_perf_pct = 0, 363 .min_perf = 0, 364 .max_policy_pct = 100, 365 .max_sysfs_pct = 100, 366 .min_policy_pct = 0, 367 .min_sysfs_pct = 0, 368 }; 369 370 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 371 static struct perf_limits *limits = &performance_limits; 372 #else 373 static struct perf_limits *limits = &powersave_limits; 374 #endif 375 376 static DEFINE_MUTEX(intel_pstate_limits_lock); 377 378 #ifdef CONFIG_ACPI 379 380 static bool intel_pstate_get_ppc_enable_status(void) 381 { 382 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 383 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 384 return true; 385 386 return acpi_ppc; 387 } 388 389 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 390 { 391 struct cpudata *cpu; 392 int ret; 393 int i; 394 395 if (hwp_active) 396 return; 397 398 if (!intel_pstate_get_ppc_enable_status()) 399 return; 400 401 cpu = all_cpu_data[policy->cpu]; 402 403 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 404 policy->cpu); 405 if (ret) 406 return; 407 408 /* 409 * Check if the control value in _PSS is for PERF_CTL MSR, which should 410 * guarantee that the states returned by it map to the states in our 411 * list directly. 412 */ 413 if (cpu->acpi_perf_data.control_register.space_id != 414 ACPI_ADR_SPACE_FIXED_HARDWARE) 415 goto err; 416 417 /* 418 * If there is only one entry _PSS, simply ignore _PSS and continue as 419 * usual without taking _PSS into account 420 */ 421 if (cpu->acpi_perf_data.state_count < 2) 422 goto err; 423 424 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 425 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 426 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 427 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 428 (u32) cpu->acpi_perf_data.states[i].core_frequency, 429 (u32) cpu->acpi_perf_data.states[i].power, 430 (u32) cpu->acpi_perf_data.states[i].control); 431 } 432 433 /* 434 * The _PSS table doesn't contain whole turbo frequency range. 435 * This just contains +1 MHZ above the max non turbo frequency, 436 * with control value corresponding to max turbo ratio. But 437 * when cpufreq set policy is called, it will call with this 438 * max frequency, which will cause a reduced performance as 439 * this driver uses real max turbo frequency as the max 440 * frequency. So correct this frequency in _PSS table to 441 * correct max turbo frequency based on the turbo state. 442 * Also need to convert to MHz as _PSS freq is in MHz. 443 */ 444 if (!limits->turbo_disabled) 445 cpu->acpi_perf_data.states[0].core_frequency = 446 policy->cpuinfo.max_freq / 1000; 447 cpu->valid_pss_table = true; 448 pr_debug("_PPC limits will be enforced\n"); 449 450 return; 451 452 err: 453 cpu->valid_pss_table = false; 454 acpi_processor_unregister_performance(policy->cpu); 455 } 456 457 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 458 { 459 struct cpudata *cpu; 460 461 cpu = all_cpu_data[policy->cpu]; 462 if (!cpu->valid_pss_table) 463 return; 464 465 acpi_processor_unregister_performance(policy->cpu); 466 } 467 468 #else 469 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 470 { 471 } 472 473 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 474 { 475 } 476 #endif 477 478 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 479 int deadband, int integral) { 480 pid->setpoint = int_tofp(setpoint); 481 pid->deadband = int_tofp(deadband); 482 pid->integral = int_tofp(integral); 483 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 484 } 485 486 static inline void pid_p_gain_set(struct _pid *pid, int percent) 487 { 488 pid->p_gain = div_fp(percent, 100); 489 } 490 491 static inline void pid_i_gain_set(struct _pid *pid, int percent) 492 { 493 pid->i_gain = div_fp(percent, 100); 494 } 495 496 static inline void pid_d_gain_set(struct _pid *pid, int percent) 497 { 498 pid->d_gain = div_fp(percent, 100); 499 } 500 501 static signed int pid_calc(struct _pid *pid, int32_t busy) 502 { 503 signed int result; 504 int32_t pterm, dterm, fp_error; 505 int32_t integral_limit; 506 507 fp_error = pid->setpoint - busy; 508 509 if (abs(fp_error) <= pid->deadband) 510 return 0; 511 512 pterm = mul_fp(pid->p_gain, fp_error); 513 514 pid->integral += fp_error; 515 516 /* 517 * We limit the integral here so that it will never 518 * get higher than 30. This prevents it from becoming 519 * too large an input over long periods of time and allows 520 * it to get factored out sooner. 521 * 522 * The value of 30 was chosen through experimentation. 523 */ 524 integral_limit = int_tofp(30); 525 if (pid->integral > integral_limit) 526 pid->integral = integral_limit; 527 if (pid->integral < -integral_limit) 528 pid->integral = -integral_limit; 529 530 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 531 pid->last_err = fp_error; 532 533 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 534 result = result + (1 << (FRAC_BITS-1)); 535 return (signed int)fp_toint(result); 536 } 537 538 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 539 { 540 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 541 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 542 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 543 544 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 545 } 546 547 static inline void intel_pstate_reset_all_pid(void) 548 { 549 unsigned int cpu; 550 551 for_each_online_cpu(cpu) { 552 if (all_cpu_data[cpu]) 553 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 554 } 555 } 556 557 static inline void update_turbo_state(void) 558 { 559 u64 misc_en; 560 struct cpudata *cpu; 561 562 cpu = all_cpu_data[0]; 563 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 564 limits->turbo_disabled = 565 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 566 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 567 } 568 569 static void intel_pstate_hwp_set(const struct cpumask *cpumask) 570 { 571 int min, hw_min, max, hw_max, cpu, range, adj_range; 572 struct perf_limits *perf_limits = limits; 573 u64 value, cap; 574 575 for_each_cpu(cpu, cpumask) { 576 int max_perf_pct, min_perf_pct; 577 578 if (per_cpu_limits) 579 perf_limits = all_cpu_data[cpu]->perf_limits; 580 581 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 582 hw_min = HWP_LOWEST_PERF(cap); 583 hw_max = HWP_HIGHEST_PERF(cap); 584 range = hw_max - hw_min; 585 586 max_perf_pct = perf_limits->max_perf_pct; 587 min_perf_pct = perf_limits->min_perf_pct; 588 589 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 590 adj_range = min_perf_pct * range / 100; 591 min = hw_min + adj_range; 592 value &= ~HWP_MIN_PERF(~0L); 593 value |= HWP_MIN_PERF(min); 594 595 adj_range = max_perf_pct * range / 100; 596 max = hw_min + adj_range; 597 if (limits->no_turbo) { 598 hw_max = HWP_GUARANTEED_PERF(cap); 599 if (hw_max < max) 600 max = hw_max; 601 } 602 603 value &= ~HWP_MAX_PERF(~0L); 604 value |= HWP_MAX_PERF(max); 605 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 606 } 607 } 608 609 static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) 610 { 611 if (hwp_active) 612 intel_pstate_hwp_set(policy->cpus); 613 614 return 0; 615 } 616 617 static void intel_pstate_hwp_set_online_cpus(void) 618 { 619 get_online_cpus(); 620 intel_pstate_hwp_set(cpu_online_mask); 621 put_online_cpus(); 622 } 623 624 /************************** debugfs begin ************************/ 625 static int pid_param_set(void *data, u64 val) 626 { 627 *(u32 *)data = val; 628 intel_pstate_reset_all_pid(); 629 return 0; 630 } 631 632 static int pid_param_get(void *data, u64 *val) 633 { 634 *val = *(u32 *)data; 635 return 0; 636 } 637 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 638 639 struct pid_param { 640 char *name; 641 void *value; 642 }; 643 644 static struct pid_param pid_files[] = { 645 {"sample_rate_ms", &pid_params.sample_rate_ms}, 646 {"d_gain_pct", &pid_params.d_gain_pct}, 647 {"i_gain_pct", &pid_params.i_gain_pct}, 648 {"deadband", &pid_params.deadband}, 649 {"setpoint", &pid_params.setpoint}, 650 {"p_gain_pct", &pid_params.p_gain_pct}, 651 {NULL, NULL} 652 }; 653 654 static void __init intel_pstate_debug_expose_params(void) 655 { 656 struct dentry *debugfs_parent; 657 int i = 0; 658 659 if (hwp_active || 660 pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) 661 return; 662 663 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 664 if (IS_ERR_OR_NULL(debugfs_parent)) 665 return; 666 while (pid_files[i].name) { 667 debugfs_create_file(pid_files[i].name, 0660, 668 debugfs_parent, pid_files[i].value, 669 &fops_pid_param); 670 i++; 671 } 672 } 673 674 /************************** debugfs end ************************/ 675 676 /************************** sysfs begin ************************/ 677 #define show_one(file_name, object) \ 678 static ssize_t show_##file_name \ 679 (struct kobject *kobj, struct attribute *attr, char *buf) \ 680 { \ 681 return sprintf(buf, "%u\n", limits->object); \ 682 } 683 684 static ssize_t show_turbo_pct(struct kobject *kobj, 685 struct attribute *attr, char *buf) 686 { 687 struct cpudata *cpu; 688 int total, no_turbo, turbo_pct; 689 uint32_t turbo_fp; 690 691 cpu = all_cpu_data[0]; 692 693 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 694 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 695 turbo_fp = div_fp(no_turbo, total); 696 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 697 return sprintf(buf, "%u\n", turbo_pct); 698 } 699 700 static ssize_t show_num_pstates(struct kobject *kobj, 701 struct attribute *attr, char *buf) 702 { 703 struct cpudata *cpu; 704 int total; 705 706 cpu = all_cpu_data[0]; 707 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 708 return sprintf(buf, "%u\n", total); 709 } 710 711 static ssize_t show_no_turbo(struct kobject *kobj, 712 struct attribute *attr, char *buf) 713 { 714 ssize_t ret; 715 716 update_turbo_state(); 717 if (limits->turbo_disabled) 718 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 719 else 720 ret = sprintf(buf, "%u\n", limits->no_turbo); 721 722 return ret; 723 } 724 725 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 726 const char *buf, size_t count) 727 { 728 unsigned int input; 729 int ret; 730 731 ret = sscanf(buf, "%u", &input); 732 if (ret != 1) 733 return -EINVAL; 734 735 mutex_lock(&intel_pstate_limits_lock); 736 737 update_turbo_state(); 738 if (limits->turbo_disabled) { 739 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 740 mutex_unlock(&intel_pstate_limits_lock); 741 return -EPERM; 742 } 743 744 limits->no_turbo = clamp_t(int, input, 0, 1); 745 746 mutex_unlock(&intel_pstate_limits_lock); 747 748 if (hwp_active) 749 intel_pstate_hwp_set_online_cpus(); 750 751 return count; 752 } 753 754 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 755 const char *buf, size_t count) 756 { 757 unsigned int input; 758 int ret; 759 760 ret = sscanf(buf, "%u", &input); 761 if (ret != 1) 762 return -EINVAL; 763 764 mutex_lock(&intel_pstate_limits_lock); 765 766 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 767 limits->max_perf_pct = min(limits->max_policy_pct, 768 limits->max_sysfs_pct); 769 limits->max_perf_pct = max(limits->min_policy_pct, 770 limits->max_perf_pct); 771 limits->max_perf_pct = max(limits->min_perf_pct, 772 limits->max_perf_pct); 773 limits->max_perf = div_fp(limits->max_perf_pct, 100); 774 775 mutex_unlock(&intel_pstate_limits_lock); 776 777 if (hwp_active) 778 intel_pstate_hwp_set_online_cpus(); 779 return count; 780 } 781 782 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 783 const char *buf, size_t count) 784 { 785 unsigned int input; 786 int ret; 787 788 ret = sscanf(buf, "%u", &input); 789 if (ret != 1) 790 return -EINVAL; 791 792 mutex_lock(&intel_pstate_limits_lock); 793 794 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 795 limits->min_perf_pct = max(limits->min_policy_pct, 796 limits->min_sysfs_pct); 797 limits->min_perf_pct = min(limits->max_policy_pct, 798 limits->min_perf_pct); 799 limits->min_perf_pct = min(limits->max_perf_pct, 800 limits->min_perf_pct); 801 limits->min_perf = div_fp(limits->min_perf_pct, 100); 802 803 mutex_unlock(&intel_pstate_limits_lock); 804 805 if (hwp_active) 806 intel_pstate_hwp_set_online_cpus(); 807 return count; 808 } 809 810 show_one(max_perf_pct, max_perf_pct); 811 show_one(min_perf_pct, min_perf_pct); 812 813 define_one_global_rw(no_turbo); 814 define_one_global_rw(max_perf_pct); 815 define_one_global_rw(min_perf_pct); 816 define_one_global_ro(turbo_pct); 817 define_one_global_ro(num_pstates); 818 819 static struct attribute *intel_pstate_attributes[] = { 820 &no_turbo.attr, 821 &turbo_pct.attr, 822 &num_pstates.attr, 823 NULL 824 }; 825 826 static struct attribute_group intel_pstate_attr_group = { 827 .attrs = intel_pstate_attributes, 828 }; 829 830 static void __init intel_pstate_sysfs_expose_params(void) 831 { 832 struct kobject *intel_pstate_kobject; 833 int rc; 834 835 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 836 &cpu_subsys.dev_root->kobj); 837 if (WARN_ON(!intel_pstate_kobject)) 838 return; 839 840 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 841 if (WARN_ON(rc)) 842 return; 843 844 /* 845 * If per cpu limits are enforced there are no global limits, so 846 * return without creating max/min_perf_pct attributes 847 */ 848 if (per_cpu_limits) 849 return; 850 851 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 852 WARN_ON(rc); 853 854 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 855 WARN_ON(rc); 856 857 } 858 /************************** sysfs end ************************/ 859 860 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 861 { 862 /* First disable HWP notification interrupt as we don't process them */ 863 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) 864 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 865 866 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 867 } 868 869 static int atom_get_min_pstate(void) 870 { 871 u64 value; 872 873 rdmsrl(ATOM_RATIOS, value); 874 return (value >> 8) & 0x7F; 875 } 876 877 static int atom_get_max_pstate(void) 878 { 879 u64 value; 880 881 rdmsrl(ATOM_RATIOS, value); 882 return (value >> 16) & 0x7F; 883 } 884 885 static int atom_get_turbo_pstate(void) 886 { 887 u64 value; 888 889 rdmsrl(ATOM_TURBO_RATIOS, value); 890 return value & 0x7F; 891 } 892 893 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 894 { 895 u64 val; 896 int32_t vid_fp; 897 u32 vid; 898 899 val = (u64)pstate << 8; 900 if (limits->no_turbo && !limits->turbo_disabled) 901 val |= (u64)1 << 32; 902 903 vid_fp = cpudata->vid.min + mul_fp( 904 int_tofp(pstate - cpudata->pstate.min_pstate), 905 cpudata->vid.ratio); 906 907 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 908 vid = ceiling_fp(vid_fp); 909 910 if (pstate > cpudata->pstate.max_pstate) 911 vid = cpudata->vid.turbo; 912 913 return val | vid; 914 } 915 916 static int silvermont_get_scaling(void) 917 { 918 u64 value; 919 int i; 920 /* Defined in Table 35-6 from SDM (Sept 2015) */ 921 static int silvermont_freq_table[] = { 922 83300, 100000, 133300, 116700, 80000}; 923 924 rdmsrl(MSR_FSB_FREQ, value); 925 i = value & 0x7; 926 WARN_ON(i > 4); 927 928 return silvermont_freq_table[i]; 929 } 930 931 static int airmont_get_scaling(void) 932 { 933 u64 value; 934 int i; 935 /* Defined in Table 35-10 from SDM (Sept 2015) */ 936 static int airmont_freq_table[] = { 937 83300, 100000, 133300, 116700, 80000, 938 93300, 90000, 88900, 87500}; 939 940 rdmsrl(MSR_FSB_FREQ, value); 941 i = value & 0xF; 942 WARN_ON(i > 8); 943 944 return airmont_freq_table[i]; 945 } 946 947 static void atom_get_vid(struct cpudata *cpudata) 948 { 949 u64 value; 950 951 rdmsrl(ATOM_VIDS, value); 952 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 953 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 954 cpudata->vid.ratio = div_fp( 955 cpudata->vid.max - cpudata->vid.min, 956 int_tofp(cpudata->pstate.max_pstate - 957 cpudata->pstate.min_pstate)); 958 959 rdmsrl(ATOM_TURBO_VIDS, value); 960 cpudata->vid.turbo = value & 0x7f; 961 } 962 963 static int core_get_min_pstate(void) 964 { 965 u64 value; 966 967 rdmsrl(MSR_PLATFORM_INFO, value); 968 return (value >> 40) & 0xFF; 969 } 970 971 static int core_get_max_pstate_physical(void) 972 { 973 u64 value; 974 975 rdmsrl(MSR_PLATFORM_INFO, value); 976 return (value >> 8) & 0xFF; 977 } 978 979 static int core_get_max_pstate(void) 980 { 981 u64 tar; 982 u64 plat_info; 983 int max_pstate; 984 int err; 985 986 rdmsrl(MSR_PLATFORM_INFO, plat_info); 987 max_pstate = (plat_info >> 8) & 0xFF; 988 989 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 990 if (!err) { 991 /* Do some sanity checking for safety */ 992 if (plat_info & 0x600000000) { 993 u64 tdp_ctrl; 994 u64 tdp_ratio; 995 int tdp_msr; 996 997 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 998 if (err) 999 goto skip_tar; 1000 1001 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3); 1002 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 1003 if (err) 1004 goto skip_tar; 1005 1006 /* For level 1 and 2, bits[23:16] contain the ratio */ 1007 if (tdp_ctrl) 1008 tdp_ratio >>= 16; 1009 1010 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1011 if (tdp_ratio - 1 == tar) { 1012 max_pstate = tar; 1013 pr_debug("max_pstate=TAC %x\n", max_pstate); 1014 } else { 1015 goto skip_tar; 1016 } 1017 } 1018 } 1019 1020 skip_tar: 1021 return max_pstate; 1022 } 1023 1024 static int core_get_turbo_pstate(void) 1025 { 1026 u64 value; 1027 int nont, ret; 1028 1029 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1030 nont = core_get_max_pstate(); 1031 ret = (value) & 255; 1032 if (ret <= nont) 1033 ret = nont; 1034 return ret; 1035 } 1036 1037 static inline int core_get_scaling(void) 1038 { 1039 return 100000; 1040 } 1041 1042 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1043 { 1044 u64 val; 1045 1046 val = (u64)pstate << 8; 1047 if (limits->no_turbo && !limits->turbo_disabled) 1048 val |= (u64)1 << 32; 1049 1050 return val; 1051 } 1052 1053 static int knl_get_turbo_pstate(void) 1054 { 1055 u64 value; 1056 int nont, ret; 1057 1058 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1059 nont = core_get_max_pstate(); 1060 ret = (((value) >> 8) & 0xFF); 1061 if (ret <= nont) 1062 ret = nont; 1063 return ret; 1064 } 1065 1066 static struct cpu_defaults core_params = { 1067 .pid_policy = { 1068 .sample_rate_ms = 10, 1069 .deadband = 0, 1070 .setpoint = 97, 1071 .p_gain_pct = 20, 1072 .d_gain_pct = 0, 1073 .i_gain_pct = 0, 1074 }, 1075 .funcs = { 1076 .get_max = core_get_max_pstate, 1077 .get_max_physical = core_get_max_pstate_physical, 1078 .get_min = core_get_min_pstate, 1079 .get_turbo = core_get_turbo_pstate, 1080 .get_scaling = core_get_scaling, 1081 .get_val = core_get_val, 1082 .get_target_pstate = get_target_pstate_use_performance, 1083 }, 1084 }; 1085 1086 static const struct cpu_defaults silvermont_params = { 1087 .pid_policy = { 1088 .sample_rate_ms = 10, 1089 .deadband = 0, 1090 .setpoint = 60, 1091 .p_gain_pct = 14, 1092 .d_gain_pct = 0, 1093 .i_gain_pct = 4, 1094 }, 1095 .funcs = { 1096 .get_max = atom_get_max_pstate, 1097 .get_max_physical = atom_get_max_pstate, 1098 .get_min = atom_get_min_pstate, 1099 .get_turbo = atom_get_turbo_pstate, 1100 .get_val = atom_get_val, 1101 .get_scaling = silvermont_get_scaling, 1102 .get_vid = atom_get_vid, 1103 .get_target_pstate = get_target_pstate_use_cpu_load, 1104 }, 1105 }; 1106 1107 static const struct cpu_defaults airmont_params = { 1108 .pid_policy = { 1109 .sample_rate_ms = 10, 1110 .deadband = 0, 1111 .setpoint = 60, 1112 .p_gain_pct = 14, 1113 .d_gain_pct = 0, 1114 .i_gain_pct = 4, 1115 }, 1116 .funcs = { 1117 .get_max = atom_get_max_pstate, 1118 .get_max_physical = atom_get_max_pstate, 1119 .get_min = atom_get_min_pstate, 1120 .get_turbo = atom_get_turbo_pstate, 1121 .get_val = atom_get_val, 1122 .get_scaling = airmont_get_scaling, 1123 .get_vid = atom_get_vid, 1124 .get_target_pstate = get_target_pstate_use_cpu_load, 1125 }, 1126 }; 1127 1128 static const struct cpu_defaults knl_params = { 1129 .pid_policy = { 1130 .sample_rate_ms = 10, 1131 .deadband = 0, 1132 .setpoint = 97, 1133 .p_gain_pct = 20, 1134 .d_gain_pct = 0, 1135 .i_gain_pct = 0, 1136 }, 1137 .funcs = { 1138 .get_max = core_get_max_pstate, 1139 .get_max_physical = core_get_max_pstate_physical, 1140 .get_min = core_get_min_pstate, 1141 .get_turbo = knl_get_turbo_pstate, 1142 .get_scaling = core_get_scaling, 1143 .get_val = core_get_val, 1144 .get_target_pstate = get_target_pstate_use_performance, 1145 }, 1146 }; 1147 1148 static const struct cpu_defaults bxt_params = { 1149 .pid_policy = { 1150 .sample_rate_ms = 10, 1151 .deadband = 0, 1152 .setpoint = 60, 1153 .p_gain_pct = 14, 1154 .d_gain_pct = 0, 1155 .i_gain_pct = 4, 1156 }, 1157 .funcs = { 1158 .get_max = core_get_max_pstate, 1159 .get_max_physical = core_get_max_pstate_physical, 1160 .get_min = core_get_min_pstate, 1161 .get_turbo = core_get_turbo_pstate, 1162 .get_scaling = core_get_scaling, 1163 .get_val = core_get_val, 1164 .get_target_pstate = get_target_pstate_use_cpu_load, 1165 }, 1166 }; 1167 1168 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 1169 { 1170 int max_perf = cpu->pstate.turbo_pstate; 1171 int max_perf_adj; 1172 int min_perf; 1173 struct perf_limits *perf_limits = limits; 1174 1175 if (limits->no_turbo || limits->turbo_disabled) 1176 max_perf = cpu->pstate.max_pstate; 1177 1178 if (per_cpu_limits) 1179 perf_limits = cpu->perf_limits; 1180 1181 /* 1182 * performance can be limited by user through sysfs, by cpufreq 1183 * policy, or by cpu specific default values determined through 1184 * experimentation. 1185 */ 1186 max_perf_adj = fp_toint(max_perf * perf_limits->max_perf); 1187 *max = clamp_t(int, max_perf_adj, 1188 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 1189 1190 min_perf = fp_toint(max_perf * perf_limits->min_perf); 1191 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 1192 } 1193 1194 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1195 { 1196 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1197 cpu->pstate.current_pstate = pstate; 1198 /* 1199 * Generally, there is no guarantee that this code will always run on 1200 * the CPU being updated, so force the register update to run on the 1201 * right CPU. 1202 */ 1203 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1204 pstate_funcs.get_val(cpu, pstate)); 1205 } 1206 1207 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1208 { 1209 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1210 } 1211 1212 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1213 { 1214 int min_pstate, max_pstate; 1215 1216 update_turbo_state(); 1217 intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); 1218 intel_pstate_set_pstate(cpu, max_pstate); 1219 } 1220 1221 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1222 { 1223 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1224 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1225 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1226 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1227 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1228 1229 if (pstate_funcs.get_vid) 1230 pstate_funcs.get_vid(cpu); 1231 1232 intel_pstate_set_min_pstate(cpu); 1233 } 1234 1235 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1236 { 1237 struct sample *sample = &cpu->sample; 1238 1239 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1240 } 1241 1242 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1243 { 1244 u64 aperf, mperf; 1245 unsigned long flags; 1246 u64 tsc; 1247 1248 local_irq_save(flags); 1249 rdmsrl(MSR_IA32_APERF, aperf); 1250 rdmsrl(MSR_IA32_MPERF, mperf); 1251 tsc = rdtsc(); 1252 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1253 local_irq_restore(flags); 1254 return false; 1255 } 1256 local_irq_restore(flags); 1257 1258 cpu->last_sample_time = cpu->sample.time; 1259 cpu->sample.time = time; 1260 cpu->sample.aperf = aperf; 1261 cpu->sample.mperf = mperf; 1262 cpu->sample.tsc = tsc; 1263 cpu->sample.aperf -= cpu->prev_aperf; 1264 cpu->sample.mperf -= cpu->prev_mperf; 1265 cpu->sample.tsc -= cpu->prev_tsc; 1266 1267 cpu->prev_aperf = aperf; 1268 cpu->prev_mperf = mperf; 1269 cpu->prev_tsc = tsc; 1270 /* 1271 * First time this function is invoked in a given cycle, all of the 1272 * previous sample data fields are equal to zero or stale and they must 1273 * be populated with meaningful numbers for things to work, so assume 1274 * that sample.time will always be reset before setting the utilization 1275 * update hook and make the caller skip the sample then. 1276 */ 1277 return !!cpu->last_sample_time; 1278 } 1279 1280 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1281 { 1282 return mul_ext_fp(cpu->sample.core_avg_perf, 1283 cpu->pstate.max_pstate_physical * cpu->pstate.scaling); 1284 } 1285 1286 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1287 { 1288 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1289 cpu->sample.core_avg_perf); 1290 } 1291 1292 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1293 { 1294 struct sample *sample = &cpu->sample; 1295 int32_t busy_frac, boost; 1296 int target, avg_pstate; 1297 1298 busy_frac = div_fp(sample->mperf, sample->tsc); 1299 1300 boost = cpu->iowait_boost; 1301 cpu->iowait_boost >>= 1; 1302 1303 if (busy_frac < boost) 1304 busy_frac = boost; 1305 1306 sample->busy_scaled = busy_frac * 100; 1307 1308 target = limits->no_turbo || limits->turbo_disabled ? 1309 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1310 target += target >> 2; 1311 target = mul_fp(target, busy_frac); 1312 if (target < cpu->pstate.min_pstate) 1313 target = cpu->pstate.min_pstate; 1314 1315 /* 1316 * If the average P-state during the previous cycle was higher than the 1317 * current target, add 50% of the difference to the target to reduce 1318 * possible performance oscillations and offset possible performance 1319 * loss related to moving the workload from one CPU to another within 1320 * a package/module. 1321 */ 1322 avg_pstate = get_avg_pstate(cpu); 1323 if (avg_pstate > target) 1324 target += (avg_pstate - target) >> 1; 1325 1326 return target; 1327 } 1328 1329 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1330 { 1331 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1332 u64 duration_ns; 1333 1334 /* 1335 * perf_scaled is the ratio of the average P-state during the last 1336 * sampling period to the P-state requested last time (in percent). 1337 * 1338 * That measures the system's response to the previous P-state 1339 * selection. 1340 */ 1341 max_pstate = cpu->pstate.max_pstate_physical; 1342 current_pstate = cpu->pstate.current_pstate; 1343 perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, 1344 div_fp(100 * max_pstate, current_pstate)); 1345 1346 /* 1347 * Since our utilization update callback will not run unless we are 1348 * in C0, check if the actual elapsed time is significantly greater (3x) 1349 * than our sample interval. If it is, then we were idle for a long 1350 * enough period of time to adjust our performance metric. 1351 */ 1352 duration_ns = cpu->sample.time - cpu->last_sample_time; 1353 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1354 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1355 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1356 } else { 1357 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1358 if (sample_ratio < int_tofp(1)) 1359 perf_scaled = 0; 1360 } 1361 1362 cpu->sample.busy_scaled = perf_scaled; 1363 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); 1364 } 1365 1366 static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1367 { 1368 int max_perf, min_perf; 1369 1370 update_turbo_state(); 1371 1372 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1373 pstate = clamp_t(int, pstate, min_perf, max_perf); 1374 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1375 if (pstate == cpu->pstate.current_pstate) 1376 return; 1377 1378 cpu->pstate.current_pstate = pstate; 1379 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1380 } 1381 1382 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1383 { 1384 int from, target_pstate; 1385 struct sample *sample; 1386 1387 from = cpu->pstate.current_pstate; 1388 1389 target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ? 1390 cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu); 1391 1392 intel_pstate_update_pstate(cpu, target_pstate); 1393 1394 sample = &cpu->sample; 1395 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1396 fp_toint(sample->busy_scaled), 1397 from, 1398 cpu->pstate.current_pstate, 1399 sample->mperf, 1400 sample->aperf, 1401 sample->tsc, 1402 get_avg_frequency(cpu), 1403 fp_toint(cpu->iowait_boost * 100)); 1404 } 1405 1406 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1407 unsigned int flags) 1408 { 1409 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1410 u64 delta_ns; 1411 1412 if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) { 1413 if (flags & SCHED_CPUFREQ_IOWAIT) { 1414 cpu->iowait_boost = int_tofp(1); 1415 } else if (cpu->iowait_boost) { 1416 /* Clear iowait_boost if the CPU may have been idle. */ 1417 delta_ns = time - cpu->last_update; 1418 if (delta_ns > TICK_NSEC) 1419 cpu->iowait_boost = 0; 1420 } 1421 cpu->last_update = time; 1422 } 1423 1424 delta_ns = time - cpu->sample.time; 1425 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1426 bool sample_taken = intel_pstate_sample(cpu, time); 1427 1428 if (sample_taken) { 1429 intel_pstate_calc_avg_perf(cpu); 1430 if (!hwp_active) 1431 intel_pstate_adjust_busy_pstate(cpu); 1432 } 1433 } 1434 } 1435 1436 #define ICPU(model, policy) \ 1437 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1438 (unsigned long)&policy } 1439 1440 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1441 ICPU(INTEL_FAM6_SANDYBRIDGE, core_params), 1442 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params), 1443 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params), 1444 ICPU(INTEL_FAM6_IVYBRIDGE, core_params), 1445 ICPU(INTEL_FAM6_HASWELL_CORE, core_params), 1446 ICPU(INTEL_FAM6_BROADWELL_CORE, core_params), 1447 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params), 1448 ICPU(INTEL_FAM6_HASWELL_X, core_params), 1449 ICPU(INTEL_FAM6_HASWELL_ULT, core_params), 1450 ICPU(INTEL_FAM6_HASWELL_GT3E, core_params), 1451 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params), 1452 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params), 1453 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params), 1454 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1455 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params), 1456 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1457 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params), 1458 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params), 1459 {} 1460 }; 1461 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1462 1463 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 1464 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1465 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1466 ICPU(INTEL_FAM6_SKYLAKE_X, core_params), 1467 {} 1468 }; 1469 1470 static int intel_pstate_init_cpu(unsigned int cpunum) 1471 { 1472 struct cpudata *cpu; 1473 1474 cpu = all_cpu_data[cpunum]; 1475 1476 if (!cpu) { 1477 unsigned int size = sizeof(struct cpudata); 1478 1479 if (per_cpu_limits) 1480 size += sizeof(struct perf_limits); 1481 1482 cpu = kzalloc(size, GFP_KERNEL); 1483 if (!cpu) 1484 return -ENOMEM; 1485 1486 all_cpu_data[cpunum] = cpu; 1487 if (per_cpu_limits) 1488 cpu->perf_limits = (struct perf_limits *)(cpu + 1); 1489 1490 } 1491 1492 cpu = all_cpu_data[cpunum]; 1493 1494 cpu->cpu = cpunum; 1495 1496 if (hwp_active) { 1497 intel_pstate_hwp_enable(cpu); 1498 pid_params.sample_rate_ms = 50; 1499 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1500 } 1501 1502 intel_pstate_get_cpu_pstates(cpu); 1503 1504 intel_pstate_busy_pid_reset(cpu); 1505 1506 pr_debug("controlling: cpu %d\n", cpunum); 1507 1508 return 0; 1509 } 1510 1511 static unsigned int intel_pstate_get(unsigned int cpu_num) 1512 { 1513 struct cpudata *cpu = all_cpu_data[cpu_num]; 1514 1515 return cpu ? get_avg_frequency(cpu) : 0; 1516 } 1517 1518 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1519 { 1520 struct cpudata *cpu = all_cpu_data[cpu_num]; 1521 1522 if (cpu->update_util_set) 1523 return; 1524 1525 /* Prevent intel_pstate_update_util() from using stale data. */ 1526 cpu->sample.time = 0; 1527 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1528 intel_pstate_update_util); 1529 cpu->update_util_set = true; 1530 } 1531 1532 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1533 { 1534 struct cpudata *cpu_data = all_cpu_data[cpu]; 1535 1536 if (!cpu_data->update_util_set) 1537 return; 1538 1539 cpufreq_remove_update_util_hook(cpu); 1540 cpu_data->update_util_set = false; 1541 synchronize_sched(); 1542 } 1543 1544 static void intel_pstate_set_performance_limits(struct perf_limits *limits) 1545 { 1546 mutex_lock(&intel_pstate_limits_lock); 1547 limits->no_turbo = 0; 1548 limits->turbo_disabled = 0; 1549 limits->max_perf_pct = 100; 1550 limits->max_perf = int_tofp(1); 1551 limits->min_perf_pct = 100; 1552 limits->min_perf = int_tofp(1); 1553 limits->max_policy_pct = 100; 1554 limits->max_sysfs_pct = 100; 1555 limits->min_policy_pct = 0; 1556 limits->min_sysfs_pct = 0; 1557 mutex_unlock(&intel_pstate_limits_lock); 1558 } 1559 1560 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, 1561 struct perf_limits *limits) 1562 { 1563 1564 mutex_lock(&intel_pstate_limits_lock); 1565 1566 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1567 policy->cpuinfo.max_freq); 1568 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100); 1569 if (policy->max == policy->min) { 1570 limits->min_policy_pct = limits->max_policy_pct; 1571 } else { 1572 limits->min_policy_pct = (policy->min * 100) / 1573 policy->cpuinfo.max_freq; 1574 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 1575 0, 100); 1576 } 1577 1578 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1579 limits->min_perf_pct = max(limits->min_policy_pct, 1580 limits->min_sysfs_pct); 1581 limits->min_perf_pct = min(limits->max_policy_pct, 1582 limits->min_perf_pct); 1583 limits->max_perf_pct = min(limits->max_policy_pct, 1584 limits->max_sysfs_pct); 1585 limits->max_perf_pct = max(limits->min_policy_pct, 1586 limits->max_perf_pct); 1587 1588 /* Make sure min_perf_pct <= max_perf_pct */ 1589 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1590 1591 limits->min_perf = div_fp(limits->min_perf_pct, 100); 1592 limits->max_perf = div_fp(limits->max_perf_pct, 100); 1593 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1594 1595 mutex_unlock(&intel_pstate_limits_lock); 1596 1597 pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, 1598 limits->max_perf_pct, limits->min_perf_pct); 1599 } 1600 1601 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1602 { 1603 struct cpudata *cpu; 1604 struct perf_limits *perf_limits = NULL; 1605 1606 if (!policy->cpuinfo.max_freq) 1607 return -ENODEV; 1608 1609 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 1610 policy->cpuinfo.max_freq, policy->max); 1611 1612 cpu = all_cpu_data[policy->cpu]; 1613 cpu->policy = policy->policy; 1614 1615 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 1616 policy->max < policy->cpuinfo.max_freq && 1617 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { 1618 pr_debug("policy->max > max non turbo frequency\n"); 1619 policy->max = policy->cpuinfo.max_freq; 1620 } 1621 1622 if (per_cpu_limits) 1623 perf_limits = cpu->perf_limits; 1624 1625 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1626 if (!perf_limits) { 1627 limits = &performance_limits; 1628 perf_limits = limits; 1629 } 1630 if (policy->max >= policy->cpuinfo.max_freq) { 1631 pr_debug("set performance\n"); 1632 intel_pstate_set_performance_limits(perf_limits); 1633 goto out; 1634 } 1635 } else { 1636 pr_debug("set powersave\n"); 1637 if (!perf_limits) { 1638 limits = &powersave_limits; 1639 perf_limits = limits; 1640 } 1641 1642 } 1643 1644 intel_pstate_update_perf_limits(policy, perf_limits); 1645 out: 1646 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 1647 /* 1648 * NOHZ_FULL CPUs need this as the governor callback may not 1649 * be invoked on them. 1650 */ 1651 intel_pstate_clear_update_util_hook(policy->cpu); 1652 intel_pstate_max_within_limits(cpu); 1653 } 1654 1655 intel_pstate_set_update_util_hook(policy->cpu); 1656 1657 intel_pstate_hwp_set_policy(policy); 1658 1659 return 0; 1660 } 1661 1662 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1663 { 1664 cpufreq_verify_within_cpu_limits(policy); 1665 1666 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1667 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1668 return -EINVAL; 1669 1670 return 0; 1671 } 1672 1673 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1674 { 1675 int cpu_num = policy->cpu; 1676 struct cpudata *cpu = all_cpu_data[cpu_num]; 1677 1678 pr_debug("CPU %d exiting\n", cpu_num); 1679 1680 intel_pstate_clear_update_util_hook(cpu_num); 1681 1682 if (hwp_active) 1683 return; 1684 1685 intel_pstate_set_min_pstate(cpu); 1686 } 1687 1688 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1689 { 1690 struct cpudata *cpu; 1691 int rc; 1692 1693 rc = intel_pstate_init_cpu(policy->cpu); 1694 if (rc) 1695 return rc; 1696 1697 cpu = all_cpu_data[policy->cpu]; 1698 1699 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1700 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1701 else 1702 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1703 1704 /* 1705 * We need sane value in the cpu->perf_limits, so inherit from global 1706 * perf_limits limits, which are seeded with values based on the 1707 * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up. 1708 */ 1709 if (per_cpu_limits) 1710 memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits)); 1711 1712 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1713 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1714 1715 /* cpuinfo and default policy values */ 1716 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1717 update_turbo_state(); 1718 policy->cpuinfo.max_freq = limits->turbo_disabled ? 1719 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1720 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 1721 1722 intel_pstate_init_acpi_perf_limits(policy); 1723 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1724 cpumask_set_cpu(policy->cpu, policy->cpus); 1725 1726 return 0; 1727 } 1728 1729 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 1730 { 1731 intel_pstate_exit_perf_limits(policy); 1732 1733 return 0; 1734 } 1735 1736 static struct cpufreq_driver intel_pstate_driver = { 1737 .flags = CPUFREQ_CONST_LOOPS, 1738 .verify = intel_pstate_verify_policy, 1739 .setpolicy = intel_pstate_set_policy, 1740 .resume = intel_pstate_hwp_set_policy, 1741 .get = intel_pstate_get, 1742 .init = intel_pstate_cpu_init, 1743 .exit = intel_pstate_cpu_exit, 1744 .stop_cpu = intel_pstate_stop_cpu, 1745 .name = "intel_pstate", 1746 }; 1747 1748 static int no_load __initdata; 1749 static int no_hwp __initdata; 1750 static int hwp_only __initdata; 1751 static unsigned int force_load __initdata; 1752 1753 static int __init intel_pstate_msrs_not_valid(void) 1754 { 1755 if (!pstate_funcs.get_max() || 1756 !pstate_funcs.get_min() || 1757 !pstate_funcs.get_turbo()) 1758 return -ENODEV; 1759 1760 return 0; 1761 } 1762 1763 static void __init copy_pid_params(struct pstate_adjust_policy *policy) 1764 { 1765 pid_params.sample_rate_ms = policy->sample_rate_ms; 1766 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 1767 pid_params.p_gain_pct = policy->p_gain_pct; 1768 pid_params.i_gain_pct = policy->i_gain_pct; 1769 pid_params.d_gain_pct = policy->d_gain_pct; 1770 pid_params.deadband = policy->deadband; 1771 pid_params.setpoint = policy->setpoint; 1772 } 1773 1774 #ifdef CONFIG_ACPI 1775 static void intel_pstate_use_acpi_profile(void) 1776 { 1777 if (acpi_gbl_FADT.preferred_profile == PM_MOBILE) 1778 pstate_funcs.get_target_pstate = 1779 get_target_pstate_use_cpu_load; 1780 } 1781 #else 1782 static void intel_pstate_use_acpi_profile(void) 1783 { 1784 } 1785 #endif 1786 1787 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 1788 { 1789 pstate_funcs.get_max = funcs->get_max; 1790 pstate_funcs.get_max_physical = funcs->get_max_physical; 1791 pstate_funcs.get_min = funcs->get_min; 1792 pstate_funcs.get_turbo = funcs->get_turbo; 1793 pstate_funcs.get_scaling = funcs->get_scaling; 1794 pstate_funcs.get_val = funcs->get_val; 1795 pstate_funcs.get_vid = funcs->get_vid; 1796 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 1797 1798 intel_pstate_use_acpi_profile(); 1799 } 1800 1801 #ifdef CONFIG_ACPI 1802 1803 static bool __init intel_pstate_no_acpi_pss(void) 1804 { 1805 int i; 1806 1807 for_each_possible_cpu(i) { 1808 acpi_status status; 1809 union acpi_object *pss; 1810 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1811 struct acpi_processor *pr = per_cpu(processors, i); 1812 1813 if (!pr) 1814 continue; 1815 1816 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1817 if (ACPI_FAILURE(status)) 1818 continue; 1819 1820 pss = buffer.pointer; 1821 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1822 kfree(pss); 1823 return false; 1824 } 1825 1826 kfree(pss); 1827 } 1828 1829 return true; 1830 } 1831 1832 static bool __init intel_pstate_has_acpi_ppc(void) 1833 { 1834 int i; 1835 1836 for_each_possible_cpu(i) { 1837 struct acpi_processor *pr = per_cpu(processors, i); 1838 1839 if (!pr) 1840 continue; 1841 if (acpi_has_method(pr->handle, "_PPC")) 1842 return true; 1843 } 1844 return false; 1845 } 1846 1847 enum { 1848 PSS, 1849 PPC, 1850 }; 1851 1852 struct hw_vendor_info { 1853 u16 valid; 1854 char oem_id[ACPI_OEM_ID_SIZE]; 1855 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1856 int oem_pwr_table; 1857 }; 1858 1859 /* Hardware vendor-specific info that has its own power management modes */ 1860 static struct hw_vendor_info vendor_info[] __initdata = { 1861 {1, "HP ", "ProLiant", PSS}, 1862 {1, "ORACLE", "X4-2 ", PPC}, 1863 {1, "ORACLE", "X4-2L ", PPC}, 1864 {1, "ORACLE", "X4-2B ", PPC}, 1865 {1, "ORACLE", "X3-2 ", PPC}, 1866 {1, "ORACLE", "X3-2L ", PPC}, 1867 {1, "ORACLE", "X3-2B ", PPC}, 1868 {1, "ORACLE", "X4470M2 ", PPC}, 1869 {1, "ORACLE", "X4270M3 ", PPC}, 1870 {1, "ORACLE", "X4270M2 ", PPC}, 1871 {1, "ORACLE", "X4170M2 ", PPC}, 1872 {1, "ORACLE", "X4170 M3", PPC}, 1873 {1, "ORACLE", "X4275 M3", PPC}, 1874 {1, "ORACLE", "X6-2 ", PPC}, 1875 {1, "ORACLE", "Sudbury ", PPC}, 1876 {0, "", ""}, 1877 }; 1878 1879 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 1880 { 1881 struct acpi_table_header hdr; 1882 struct hw_vendor_info *v_info; 1883 const struct x86_cpu_id *id; 1884 u64 misc_pwr; 1885 1886 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1887 if (id) { 1888 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1889 if ( misc_pwr & (1 << 8)) 1890 return true; 1891 } 1892 1893 if (acpi_disabled || 1894 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1895 return false; 1896 1897 for (v_info = vendor_info; v_info->valid; v_info++) { 1898 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1899 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1900 ACPI_OEM_TABLE_ID_SIZE)) 1901 switch (v_info->oem_pwr_table) { 1902 case PSS: 1903 return intel_pstate_no_acpi_pss(); 1904 case PPC: 1905 return intel_pstate_has_acpi_ppc() && 1906 (!force_load); 1907 } 1908 } 1909 1910 return false; 1911 } 1912 #else /* CONFIG_ACPI not enabled */ 1913 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1914 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1915 #endif /* CONFIG_ACPI */ 1916 1917 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 1918 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 1919 {} 1920 }; 1921 1922 static int __init intel_pstate_init(void) 1923 { 1924 int cpu, rc = 0; 1925 const struct x86_cpu_id *id; 1926 struct cpu_defaults *cpu_def; 1927 1928 if (no_load) 1929 return -ENODEV; 1930 1931 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 1932 copy_cpu_funcs(&core_params.funcs); 1933 hwp_active++; 1934 goto hwp_cpu_matched; 1935 } 1936 1937 id = x86_match_cpu(intel_pstate_cpu_ids); 1938 if (!id) 1939 return -ENODEV; 1940 1941 cpu_def = (struct cpu_defaults *)id->driver_data; 1942 1943 copy_pid_params(&cpu_def->pid_policy); 1944 copy_cpu_funcs(&cpu_def->funcs); 1945 1946 if (intel_pstate_msrs_not_valid()) 1947 return -ENODEV; 1948 1949 hwp_cpu_matched: 1950 /* 1951 * The Intel pstate driver will be ignored if the platform 1952 * firmware has its own power management modes. 1953 */ 1954 if (intel_pstate_platform_pwr_mgmt_exists()) 1955 return -ENODEV; 1956 1957 pr_info("Intel P-state driver initializing\n"); 1958 1959 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1960 if (!all_cpu_data) 1961 return -ENOMEM; 1962 1963 if (!hwp_active && hwp_only) 1964 goto out; 1965 1966 rc = cpufreq_register_driver(&intel_pstate_driver); 1967 if (rc) 1968 goto out; 1969 1970 intel_pstate_debug_expose_params(); 1971 intel_pstate_sysfs_expose_params(); 1972 1973 if (hwp_active) 1974 pr_info("HWP enabled\n"); 1975 1976 return rc; 1977 out: 1978 get_online_cpus(); 1979 for_each_online_cpu(cpu) { 1980 if (all_cpu_data[cpu]) { 1981 intel_pstate_clear_update_util_hook(cpu); 1982 kfree(all_cpu_data[cpu]); 1983 } 1984 } 1985 1986 put_online_cpus(); 1987 vfree(all_cpu_data); 1988 return -ENODEV; 1989 } 1990 device_initcall(intel_pstate_init); 1991 1992 static int __init intel_pstate_setup(char *str) 1993 { 1994 if (!str) 1995 return -EINVAL; 1996 1997 if (!strcmp(str, "disable")) 1998 no_load = 1; 1999 if (!strcmp(str, "no_hwp")) { 2000 pr_info("HWP disabled\n"); 2001 no_hwp = 1; 2002 } 2003 if (!strcmp(str, "force")) 2004 force_load = 1; 2005 if (!strcmp(str, "hwp_only")) 2006 hwp_only = 1; 2007 if (!strcmp(str, "per_cpu_perf_limits")) 2008 per_cpu_limits = true; 2009 2010 #ifdef CONFIG_ACPI 2011 if (!strcmp(str, "support_acpi_ppc")) 2012 acpi_ppc = true; 2013 #endif 2014 2015 return 0; 2016 } 2017 early_param("intel_pstate", intel_pstate_setup); 2018 2019 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 2020 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 2021 MODULE_LICENSE("GPL"); 2022