1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/module.h> 16 #include <linux/ktime.h> 17 #include <linux/hrtimer.h> 18 #include <linux/tick.h> 19 #include <linux/slab.h> 20 #include <linux/sched.h> 21 #include <linux/list.h> 22 #include <linux/cpu.h> 23 #include <linux/cpufreq.h> 24 #include <linux/sysfs.h> 25 #include <linux/types.h> 26 #include <linux/fs.h> 27 #include <linux/debugfs.h> 28 #include <linux/acpi.h> 29 #include <linux/vmalloc.h> 30 #include <trace/events/power.h> 31 32 #include <asm/div64.h> 33 #include <asm/msr.h> 34 #include <asm/cpu_device_id.h> 35 #include <asm/cpufeature.h> 36 37 #if IS_ENABLED(CONFIG_ACPI) 38 #include <acpi/processor.h> 39 #endif 40 41 #define BYT_RATIOS 0x66a 42 #define BYT_VIDS 0x66b 43 #define BYT_TURBO_RATIOS 0x66c 44 #define BYT_TURBO_VIDS 0x66d 45 46 #define FRAC_BITS 8 47 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 48 #define fp_toint(X) ((X) >> FRAC_BITS) 49 50 static inline int32_t mul_fp(int32_t x, int32_t y) 51 { 52 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 53 } 54 55 static inline int32_t div_fp(s64 x, s64 y) 56 { 57 return div64_s64((int64_t)x << FRAC_BITS, y); 58 } 59 60 static inline int ceiling_fp(int32_t x) 61 { 62 int mask, ret; 63 64 ret = fp_toint(x); 65 mask = (1 << FRAC_BITS) - 1; 66 if (x & mask) 67 ret += 1; 68 return ret; 69 } 70 71 struct sample { 72 int32_t core_pct_busy; 73 u64 aperf; 74 u64 mperf; 75 u64 tsc; 76 int freq; 77 ktime_t time; 78 }; 79 80 struct pstate_data { 81 int current_pstate; 82 int min_pstate; 83 int max_pstate; 84 int max_pstate_physical; 85 int scaling; 86 int turbo_pstate; 87 }; 88 89 struct vid_data { 90 int min; 91 int max; 92 int turbo; 93 int32_t ratio; 94 }; 95 96 struct _pid { 97 int setpoint; 98 int32_t integral; 99 int32_t p_gain; 100 int32_t i_gain; 101 int32_t d_gain; 102 int deadband; 103 int32_t last_err; 104 }; 105 106 struct cpudata { 107 int cpu; 108 109 struct timer_list timer; 110 111 struct pstate_data pstate; 112 struct vid_data vid; 113 struct _pid pid; 114 115 ktime_t last_sample_time; 116 u64 prev_aperf; 117 u64 prev_mperf; 118 u64 prev_tsc; 119 struct sample sample; 120 #if IS_ENABLED(CONFIG_ACPI) 121 struct acpi_processor_performance acpi_perf_data; 122 #endif 123 }; 124 125 static struct cpudata **all_cpu_data; 126 struct pstate_adjust_policy { 127 int sample_rate_ms; 128 int deadband; 129 int setpoint; 130 int p_gain_pct; 131 int d_gain_pct; 132 int i_gain_pct; 133 }; 134 135 struct pstate_funcs { 136 int (*get_max)(void); 137 int (*get_max_physical)(void); 138 int (*get_min)(void); 139 int (*get_turbo)(void); 140 int (*get_scaling)(void); 141 void (*set)(struct cpudata*, int pstate); 142 void (*get_vid)(struct cpudata *); 143 }; 144 145 struct cpu_defaults { 146 struct pstate_adjust_policy pid_policy; 147 struct pstate_funcs funcs; 148 }; 149 150 static struct pstate_adjust_policy pid_params; 151 static struct pstate_funcs pstate_funcs; 152 static int hwp_active; 153 static int no_acpi_perf; 154 155 struct perf_limits { 156 int no_turbo; 157 int turbo_disabled; 158 int max_perf_pct; 159 int min_perf_pct; 160 int32_t max_perf; 161 int32_t min_perf; 162 int max_policy_pct; 163 int max_sysfs_pct; 164 int min_policy_pct; 165 int min_sysfs_pct; 166 int max_perf_ctl; 167 int min_perf_ctl; 168 }; 169 170 static struct perf_limits performance_limits = { 171 .no_turbo = 0, 172 .turbo_disabled = 0, 173 .max_perf_pct = 100, 174 .max_perf = int_tofp(1), 175 .min_perf_pct = 100, 176 .min_perf = int_tofp(1), 177 .max_policy_pct = 100, 178 .max_sysfs_pct = 100, 179 .min_policy_pct = 0, 180 .min_sysfs_pct = 0, 181 }; 182 183 static struct perf_limits powersave_limits = { 184 .no_turbo = 0, 185 .turbo_disabled = 0, 186 .max_perf_pct = 100, 187 .max_perf = int_tofp(1), 188 .min_perf_pct = 0, 189 .min_perf = 0, 190 .max_policy_pct = 100, 191 .max_sysfs_pct = 100, 192 .min_policy_pct = 0, 193 .min_sysfs_pct = 0, 194 .max_perf_ctl = 0, 195 .min_perf_ctl = 0, 196 }; 197 198 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 199 static struct perf_limits *limits = &performance_limits; 200 #else 201 static struct perf_limits *limits = &powersave_limits; 202 #endif 203 204 #if IS_ENABLED(CONFIG_ACPI) 205 /* 206 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and 207 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and 208 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state 209 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting 210 * target ratio 0x17. The _PSS control value stores in a format which can be 211 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift 212 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). 213 * This function converts the _PSS control value to intel pstate driver format 214 * for comparison and assignment. 215 */ 216 static int convert_to_native_pstate_format(struct cpudata *cpu, int index) 217 { 218 return cpu->acpi_perf_data.states[index].control >> 8; 219 } 220 221 static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) 222 { 223 struct cpudata *cpu; 224 int ret; 225 bool turbo_absent = false; 226 int max_pstate_index; 227 int min_pss_ctl, max_pss_ctl, turbo_pss_ctl; 228 int i; 229 230 cpu = all_cpu_data[policy->cpu]; 231 232 pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n", 233 cpu->pstate.min_pstate, cpu->pstate.max_pstate, 234 cpu->pstate.turbo_pstate); 235 236 if (!cpu->acpi_perf_data.shared_cpu_map && 237 zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map, 238 GFP_KERNEL, cpu_to_node(policy->cpu))) { 239 return -ENOMEM; 240 } 241 242 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 243 policy->cpu); 244 if (ret) 245 return ret; 246 247 /* 248 * Check if the control value in _PSS is for PERF_CTL MSR, which should 249 * guarantee that the states returned by it map to the states in our 250 * list directly. 251 */ 252 if (cpu->acpi_perf_data.control_register.space_id != 253 ACPI_ADR_SPACE_FIXED_HARDWARE) 254 return -EIO; 255 256 pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu); 257 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) 258 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 259 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 260 (u32) cpu->acpi_perf_data.states[i].core_frequency, 261 (u32) cpu->acpi_perf_data.states[i].power, 262 (u32) cpu->acpi_perf_data.states[i].control); 263 264 /* 265 * If there is only one entry _PSS, simply ignore _PSS and continue as 266 * usual without taking _PSS into account 267 */ 268 if (cpu->acpi_perf_data.state_count < 2) 269 return 0; 270 271 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); 272 min_pss_ctl = convert_to_native_pstate_format(cpu, 273 cpu->acpi_perf_data.state_count - 1); 274 /* Check if there is a turbo freq in _PSS */ 275 if (turbo_pss_ctl <= cpu->pstate.max_pstate && 276 turbo_pss_ctl > cpu->pstate.min_pstate) { 277 pr_debug("intel_pstate: no turbo range exists in _PSS\n"); 278 limits->no_turbo = limits->turbo_disabled = 1; 279 cpu->pstate.turbo_pstate = cpu->pstate.max_pstate; 280 turbo_absent = true; 281 } 282 283 /* Check if the max non turbo p state < Intel P state max */ 284 max_pstate_index = turbo_absent ? 0 : 1; 285 max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index); 286 if (max_pss_ctl < cpu->pstate.max_pstate && 287 max_pss_ctl > cpu->pstate.min_pstate) 288 cpu->pstate.max_pstate = max_pss_ctl; 289 290 /* check If min perf > Intel P State min */ 291 if (min_pss_ctl > cpu->pstate.min_pstate && 292 min_pss_ctl < cpu->pstate.max_pstate) { 293 cpu->pstate.min_pstate = min_pss_ctl; 294 policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling; 295 } 296 297 if (turbo_absent) 298 policy->cpuinfo.max_freq = cpu->pstate.max_pstate * 299 cpu->pstate.scaling; 300 else { 301 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 302 cpu->pstate.scaling; 303 /* 304 * The _PSS table doesn't contain whole turbo frequency range. 305 * This just contains +1 MHZ above the max non turbo frequency, 306 * with control value corresponding to max turbo ratio. But 307 * when cpufreq set policy is called, it will call with this 308 * max frequency, which will cause a reduced performance as 309 * this driver uses real max turbo frequency as the max 310 * frequeny. So correct this frequency in _PSS table to 311 * correct max turbo frequency based on the turbo ratio. 312 * Also need to convert to MHz as _PSS freq is in MHz. 313 */ 314 cpu->acpi_perf_data.states[0].core_frequency = 315 turbo_pss_ctl * 100; 316 } 317 318 pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n", 319 cpu->pstate.min_pstate, cpu->pstate.max_pstate, 320 cpu->pstate.turbo_pstate); 321 pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n", 322 policy->cpuinfo.max_freq, policy->cpuinfo.min_freq); 323 324 return 0; 325 } 326 327 static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 328 { 329 struct cpudata *cpu; 330 331 if (!no_acpi_perf) 332 return 0; 333 334 cpu = all_cpu_data[policy->cpu]; 335 acpi_processor_unregister_performance(policy->cpu); 336 return 0; 337 } 338 339 #else 340 static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) 341 { 342 return 0; 343 } 344 345 static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 346 { 347 return 0; 348 } 349 #endif 350 351 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 352 int deadband, int integral) { 353 pid->setpoint = setpoint; 354 pid->deadband = deadband; 355 pid->integral = int_tofp(integral); 356 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 357 } 358 359 static inline void pid_p_gain_set(struct _pid *pid, int percent) 360 { 361 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 362 } 363 364 static inline void pid_i_gain_set(struct _pid *pid, int percent) 365 { 366 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 367 } 368 369 static inline void pid_d_gain_set(struct _pid *pid, int percent) 370 { 371 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 372 } 373 374 static signed int pid_calc(struct _pid *pid, int32_t busy) 375 { 376 signed int result; 377 int32_t pterm, dterm, fp_error; 378 int32_t integral_limit; 379 380 fp_error = int_tofp(pid->setpoint) - busy; 381 382 if (abs(fp_error) <= int_tofp(pid->deadband)) 383 return 0; 384 385 pterm = mul_fp(pid->p_gain, fp_error); 386 387 pid->integral += fp_error; 388 389 /* 390 * We limit the integral here so that it will never 391 * get higher than 30. This prevents it from becoming 392 * too large an input over long periods of time and allows 393 * it to get factored out sooner. 394 * 395 * The value of 30 was chosen through experimentation. 396 */ 397 integral_limit = int_tofp(30); 398 if (pid->integral > integral_limit) 399 pid->integral = integral_limit; 400 if (pid->integral < -integral_limit) 401 pid->integral = -integral_limit; 402 403 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 404 pid->last_err = fp_error; 405 406 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 407 result = result + (1 << (FRAC_BITS-1)); 408 return (signed int)fp_toint(result); 409 } 410 411 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 412 { 413 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 414 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 415 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 416 417 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 418 } 419 420 static inline void intel_pstate_reset_all_pid(void) 421 { 422 unsigned int cpu; 423 424 for_each_online_cpu(cpu) { 425 if (all_cpu_data[cpu]) 426 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 427 } 428 } 429 430 static inline void update_turbo_state(void) 431 { 432 u64 misc_en; 433 struct cpudata *cpu; 434 435 cpu = all_cpu_data[0]; 436 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 437 limits->turbo_disabled = 438 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 439 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 440 } 441 442 static void intel_pstate_hwp_set(void) 443 { 444 int min, hw_min, max, hw_max, cpu, range, adj_range; 445 u64 value, cap; 446 447 rdmsrl(MSR_HWP_CAPABILITIES, cap); 448 hw_min = HWP_LOWEST_PERF(cap); 449 hw_max = HWP_HIGHEST_PERF(cap); 450 range = hw_max - hw_min; 451 452 get_online_cpus(); 453 454 for_each_online_cpu(cpu) { 455 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 456 adj_range = limits->min_perf_pct * range / 100; 457 min = hw_min + adj_range; 458 value &= ~HWP_MIN_PERF(~0L); 459 value |= HWP_MIN_PERF(min); 460 461 adj_range = limits->max_perf_pct * range / 100; 462 max = hw_min + adj_range; 463 if (limits->no_turbo) { 464 hw_max = HWP_GUARANTEED_PERF(cap); 465 if (hw_max < max) 466 max = hw_max; 467 } 468 469 value &= ~HWP_MAX_PERF(~0L); 470 value |= HWP_MAX_PERF(max); 471 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 472 } 473 474 put_online_cpus(); 475 } 476 477 /************************** debugfs begin ************************/ 478 static int pid_param_set(void *data, u64 val) 479 { 480 *(u32 *)data = val; 481 intel_pstate_reset_all_pid(); 482 return 0; 483 } 484 485 static int pid_param_get(void *data, u64 *val) 486 { 487 *val = *(u32 *)data; 488 return 0; 489 } 490 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 491 492 struct pid_param { 493 char *name; 494 void *value; 495 }; 496 497 static struct pid_param pid_files[] = { 498 {"sample_rate_ms", &pid_params.sample_rate_ms}, 499 {"d_gain_pct", &pid_params.d_gain_pct}, 500 {"i_gain_pct", &pid_params.i_gain_pct}, 501 {"deadband", &pid_params.deadband}, 502 {"setpoint", &pid_params.setpoint}, 503 {"p_gain_pct", &pid_params.p_gain_pct}, 504 {NULL, NULL} 505 }; 506 507 static void __init intel_pstate_debug_expose_params(void) 508 { 509 struct dentry *debugfs_parent; 510 int i = 0; 511 512 if (hwp_active) 513 return; 514 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 515 if (IS_ERR_OR_NULL(debugfs_parent)) 516 return; 517 while (pid_files[i].name) { 518 debugfs_create_file(pid_files[i].name, 0660, 519 debugfs_parent, pid_files[i].value, 520 &fops_pid_param); 521 i++; 522 } 523 } 524 525 /************************** debugfs end ************************/ 526 527 /************************** sysfs begin ************************/ 528 #define show_one(file_name, object) \ 529 static ssize_t show_##file_name \ 530 (struct kobject *kobj, struct attribute *attr, char *buf) \ 531 { \ 532 return sprintf(buf, "%u\n", limits->object); \ 533 } 534 535 static ssize_t show_turbo_pct(struct kobject *kobj, 536 struct attribute *attr, char *buf) 537 { 538 struct cpudata *cpu; 539 int total, no_turbo, turbo_pct; 540 uint32_t turbo_fp; 541 542 cpu = all_cpu_data[0]; 543 544 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 545 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 546 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total)); 547 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 548 return sprintf(buf, "%u\n", turbo_pct); 549 } 550 551 static ssize_t show_num_pstates(struct kobject *kobj, 552 struct attribute *attr, char *buf) 553 { 554 struct cpudata *cpu; 555 int total; 556 557 cpu = all_cpu_data[0]; 558 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 559 return sprintf(buf, "%u\n", total); 560 } 561 562 static ssize_t show_no_turbo(struct kobject *kobj, 563 struct attribute *attr, char *buf) 564 { 565 ssize_t ret; 566 567 update_turbo_state(); 568 if (limits->turbo_disabled) 569 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 570 else 571 ret = sprintf(buf, "%u\n", limits->no_turbo); 572 573 return ret; 574 } 575 576 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 577 const char *buf, size_t count) 578 { 579 unsigned int input; 580 int ret; 581 582 ret = sscanf(buf, "%u", &input); 583 if (ret != 1) 584 return -EINVAL; 585 586 update_turbo_state(); 587 if (limits->turbo_disabled) { 588 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); 589 return -EPERM; 590 } 591 592 limits->no_turbo = clamp_t(int, input, 0, 1); 593 594 if (hwp_active) 595 intel_pstate_hwp_set(); 596 597 return count; 598 } 599 600 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 601 const char *buf, size_t count) 602 { 603 unsigned int input; 604 int ret; 605 606 ret = sscanf(buf, "%u", &input); 607 if (ret != 1) 608 return -EINVAL; 609 610 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 611 limits->max_perf_pct = min(limits->max_policy_pct, 612 limits->max_sysfs_pct); 613 limits->max_perf_pct = max(limits->min_policy_pct, 614 limits->max_perf_pct); 615 limits->max_perf_pct = max(limits->min_perf_pct, 616 limits->max_perf_pct); 617 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 618 int_tofp(100)); 619 620 if (hwp_active) 621 intel_pstate_hwp_set(); 622 return count; 623 } 624 625 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 626 const char *buf, size_t count) 627 { 628 unsigned int input; 629 int ret; 630 631 ret = sscanf(buf, "%u", &input); 632 if (ret != 1) 633 return -EINVAL; 634 635 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 636 limits->min_perf_pct = max(limits->min_policy_pct, 637 limits->min_sysfs_pct); 638 limits->min_perf_pct = min(limits->max_policy_pct, 639 limits->min_perf_pct); 640 limits->min_perf_pct = min(limits->max_perf_pct, 641 limits->min_perf_pct); 642 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 643 int_tofp(100)); 644 645 if (hwp_active) 646 intel_pstate_hwp_set(); 647 return count; 648 } 649 650 show_one(max_perf_pct, max_perf_pct); 651 show_one(min_perf_pct, min_perf_pct); 652 653 define_one_global_rw(no_turbo); 654 define_one_global_rw(max_perf_pct); 655 define_one_global_rw(min_perf_pct); 656 define_one_global_ro(turbo_pct); 657 define_one_global_ro(num_pstates); 658 659 static struct attribute *intel_pstate_attributes[] = { 660 &no_turbo.attr, 661 &max_perf_pct.attr, 662 &min_perf_pct.attr, 663 &turbo_pct.attr, 664 &num_pstates.attr, 665 NULL 666 }; 667 668 static struct attribute_group intel_pstate_attr_group = { 669 .attrs = intel_pstate_attributes, 670 }; 671 672 static void __init intel_pstate_sysfs_expose_params(void) 673 { 674 struct kobject *intel_pstate_kobject; 675 int rc; 676 677 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 678 &cpu_subsys.dev_root->kobj); 679 BUG_ON(!intel_pstate_kobject); 680 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 681 BUG_ON(rc); 682 } 683 /************************** sysfs end ************************/ 684 685 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 686 { 687 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 688 } 689 690 static int byt_get_min_pstate(void) 691 { 692 u64 value; 693 694 rdmsrl(BYT_RATIOS, value); 695 return (value >> 8) & 0x7F; 696 } 697 698 static int byt_get_max_pstate(void) 699 { 700 u64 value; 701 702 rdmsrl(BYT_RATIOS, value); 703 return (value >> 16) & 0x7F; 704 } 705 706 static int byt_get_turbo_pstate(void) 707 { 708 u64 value; 709 710 rdmsrl(BYT_TURBO_RATIOS, value); 711 return value & 0x7F; 712 } 713 714 static void byt_set_pstate(struct cpudata *cpudata, int pstate) 715 { 716 u64 val; 717 int32_t vid_fp; 718 u32 vid; 719 720 val = (u64)pstate << 8; 721 if (limits->no_turbo && !limits->turbo_disabled) 722 val |= (u64)1 << 32; 723 724 vid_fp = cpudata->vid.min + mul_fp( 725 int_tofp(pstate - cpudata->pstate.min_pstate), 726 cpudata->vid.ratio); 727 728 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 729 vid = ceiling_fp(vid_fp); 730 731 if (pstate > cpudata->pstate.max_pstate) 732 vid = cpudata->vid.turbo; 733 734 val |= vid; 735 736 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 737 } 738 739 #define BYT_BCLK_FREQS 5 740 static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; 741 742 static int byt_get_scaling(void) 743 { 744 u64 value; 745 int i; 746 747 rdmsrl(MSR_FSB_FREQ, value); 748 i = value & 0x3; 749 750 BUG_ON(i > BYT_BCLK_FREQS); 751 752 return byt_freq_table[i] * 100; 753 } 754 755 static void byt_get_vid(struct cpudata *cpudata) 756 { 757 u64 value; 758 759 rdmsrl(BYT_VIDS, value); 760 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 761 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 762 cpudata->vid.ratio = div_fp( 763 cpudata->vid.max - cpudata->vid.min, 764 int_tofp(cpudata->pstate.max_pstate - 765 cpudata->pstate.min_pstate)); 766 767 rdmsrl(BYT_TURBO_VIDS, value); 768 cpudata->vid.turbo = value & 0x7f; 769 } 770 771 static int core_get_min_pstate(void) 772 { 773 u64 value; 774 775 rdmsrl(MSR_PLATFORM_INFO, value); 776 return (value >> 40) & 0xFF; 777 } 778 779 static int core_get_max_pstate_physical(void) 780 { 781 u64 value; 782 783 rdmsrl(MSR_PLATFORM_INFO, value); 784 return (value >> 8) & 0xFF; 785 } 786 787 static int core_get_max_pstate(void) 788 { 789 u64 tar; 790 u64 plat_info; 791 int max_pstate; 792 int err; 793 794 rdmsrl(MSR_PLATFORM_INFO, plat_info); 795 max_pstate = (plat_info >> 8) & 0xFF; 796 797 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 798 if (!err) { 799 /* Do some sanity checking for safety */ 800 if (plat_info & 0x600000000) { 801 u64 tdp_ctrl; 802 u64 tdp_ratio; 803 int tdp_msr; 804 805 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 806 if (err) 807 goto skip_tar; 808 809 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; 810 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 811 if (err) 812 goto skip_tar; 813 814 if (tdp_ratio - 1 == tar) { 815 max_pstate = tar; 816 pr_debug("max_pstate=TAC %x\n", max_pstate); 817 } else { 818 goto skip_tar; 819 } 820 } 821 } 822 823 skip_tar: 824 return max_pstate; 825 } 826 827 static int core_get_turbo_pstate(void) 828 { 829 u64 value; 830 int nont, ret; 831 832 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 833 nont = core_get_max_pstate(); 834 ret = (value) & 255; 835 if (ret <= nont) 836 ret = nont; 837 return ret; 838 } 839 840 static inline int core_get_scaling(void) 841 { 842 return 100000; 843 } 844 845 static void core_set_pstate(struct cpudata *cpudata, int pstate) 846 { 847 u64 val; 848 849 val = (u64)pstate << 8; 850 if (limits->no_turbo && !limits->turbo_disabled) 851 val |= (u64)1 << 32; 852 853 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 854 } 855 856 static int knl_get_turbo_pstate(void) 857 { 858 u64 value; 859 int nont, ret; 860 861 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 862 nont = core_get_max_pstate(); 863 ret = (((value) >> 8) & 0xFF); 864 if (ret <= nont) 865 ret = nont; 866 return ret; 867 } 868 869 static struct cpu_defaults core_params = { 870 .pid_policy = { 871 .sample_rate_ms = 10, 872 .deadband = 0, 873 .setpoint = 97, 874 .p_gain_pct = 20, 875 .d_gain_pct = 0, 876 .i_gain_pct = 0, 877 }, 878 .funcs = { 879 .get_max = core_get_max_pstate, 880 .get_max_physical = core_get_max_pstate_physical, 881 .get_min = core_get_min_pstate, 882 .get_turbo = core_get_turbo_pstate, 883 .get_scaling = core_get_scaling, 884 .set = core_set_pstate, 885 }, 886 }; 887 888 static struct cpu_defaults byt_params = { 889 .pid_policy = { 890 .sample_rate_ms = 10, 891 .deadband = 0, 892 .setpoint = 60, 893 .p_gain_pct = 14, 894 .d_gain_pct = 0, 895 .i_gain_pct = 4, 896 }, 897 .funcs = { 898 .get_max = byt_get_max_pstate, 899 .get_max_physical = byt_get_max_pstate, 900 .get_min = byt_get_min_pstate, 901 .get_turbo = byt_get_turbo_pstate, 902 .set = byt_set_pstate, 903 .get_scaling = byt_get_scaling, 904 .get_vid = byt_get_vid, 905 }, 906 }; 907 908 static struct cpu_defaults knl_params = { 909 .pid_policy = { 910 .sample_rate_ms = 10, 911 .deadband = 0, 912 .setpoint = 97, 913 .p_gain_pct = 20, 914 .d_gain_pct = 0, 915 .i_gain_pct = 0, 916 }, 917 .funcs = { 918 .get_max = core_get_max_pstate, 919 .get_max_physical = core_get_max_pstate_physical, 920 .get_min = core_get_min_pstate, 921 .get_turbo = knl_get_turbo_pstate, 922 .get_scaling = core_get_scaling, 923 .set = core_set_pstate, 924 }, 925 }; 926 927 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 928 { 929 int max_perf = cpu->pstate.turbo_pstate; 930 int max_perf_adj; 931 int min_perf; 932 933 if (limits->no_turbo || limits->turbo_disabled) 934 max_perf = cpu->pstate.max_pstate; 935 936 /* 937 * performance can be limited by user through sysfs, by cpufreq 938 * policy, or by cpu specific default values determined through 939 * experimentation. 940 */ 941 if (limits->max_perf_ctl && limits->max_sysfs_pct >= 942 limits->max_policy_pct) { 943 *max = limits->max_perf_ctl; 944 } else { 945 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), 946 limits->max_perf)); 947 *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, 948 cpu->pstate.turbo_pstate); 949 } 950 951 if (limits->min_perf_ctl) { 952 *min = limits->min_perf_ctl; 953 } else { 954 min_perf = fp_toint(mul_fp(int_tofp(max_perf), 955 limits->min_perf)); 956 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 957 } 958 } 959 960 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) 961 { 962 int max_perf, min_perf; 963 964 if (force) { 965 update_turbo_state(); 966 967 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 968 969 pstate = clamp_t(int, pstate, min_perf, max_perf); 970 971 if (pstate == cpu->pstate.current_pstate) 972 return; 973 } 974 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 975 976 cpu->pstate.current_pstate = pstate; 977 978 pstate_funcs.set(cpu, pstate); 979 } 980 981 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 982 { 983 cpu->pstate.min_pstate = pstate_funcs.get_min(); 984 cpu->pstate.max_pstate = pstate_funcs.get_max(); 985 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 986 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 987 cpu->pstate.scaling = pstate_funcs.get_scaling(); 988 989 if (pstate_funcs.get_vid) 990 pstate_funcs.get_vid(cpu); 991 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 992 } 993 994 static inline void intel_pstate_calc_busy(struct cpudata *cpu) 995 { 996 struct sample *sample = &cpu->sample; 997 int64_t core_pct; 998 999 core_pct = int_tofp(sample->aperf) * int_tofp(100); 1000 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 1001 1002 sample->freq = fp_toint( 1003 mul_fp(int_tofp( 1004 cpu->pstate.max_pstate_physical * 1005 cpu->pstate.scaling / 100), 1006 core_pct)); 1007 1008 sample->core_pct_busy = (int32_t)core_pct; 1009 } 1010 1011 static inline void intel_pstate_sample(struct cpudata *cpu) 1012 { 1013 u64 aperf, mperf; 1014 unsigned long flags; 1015 u64 tsc; 1016 1017 local_irq_save(flags); 1018 rdmsrl(MSR_IA32_APERF, aperf); 1019 rdmsrl(MSR_IA32_MPERF, mperf); 1020 if (cpu->prev_mperf == mperf) { 1021 local_irq_restore(flags); 1022 return; 1023 } 1024 1025 tsc = rdtsc(); 1026 local_irq_restore(flags); 1027 1028 cpu->last_sample_time = cpu->sample.time; 1029 cpu->sample.time = ktime_get(); 1030 cpu->sample.aperf = aperf; 1031 cpu->sample.mperf = mperf; 1032 cpu->sample.tsc = tsc; 1033 cpu->sample.aperf -= cpu->prev_aperf; 1034 cpu->sample.mperf -= cpu->prev_mperf; 1035 cpu->sample.tsc -= cpu->prev_tsc; 1036 1037 intel_pstate_calc_busy(cpu); 1038 1039 cpu->prev_aperf = aperf; 1040 cpu->prev_mperf = mperf; 1041 cpu->prev_tsc = tsc; 1042 } 1043 1044 static inline void intel_hwp_set_sample_time(struct cpudata *cpu) 1045 { 1046 int delay; 1047 1048 delay = msecs_to_jiffies(50); 1049 mod_timer_pinned(&cpu->timer, jiffies + delay); 1050 } 1051 1052 static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 1053 { 1054 int delay; 1055 1056 delay = msecs_to_jiffies(pid_params.sample_rate_ms); 1057 mod_timer_pinned(&cpu->timer, jiffies + delay); 1058 } 1059 1060 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) 1061 { 1062 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 1063 s64 duration_us; 1064 u32 sample_time; 1065 1066 /* 1067 * core_busy is the ratio of actual performance to max 1068 * max_pstate is the max non turbo pstate available 1069 * current_pstate was the pstate that was requested during 1070 * the last sample period. 1071 * 1072 * We normalize core_busy, which was our actual percent 1073 * performance to what we requested during the last sample 1074 * period. The result will be a percentage of busy at a 1075 * specified pstate. 1076 */ 1077 core_busy = cpu->sample.core_pct_busy; 1078 max_pstate = int_tofp(cpu->pstate.max_pstate_physical); 1079 current_pstate = int_tofp(cpu->pstate.current_pstate); 1080 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 1081 1082 /* 1083 * Since we have a deferred timer, it will not fire unless 1084 * we are in C0. So, determine if the actual elapsed time 1085 * is significantly greater (3x) than our sample interval. If it 1086 * is, then we were idle for a long enough period of time 1087 * to adjust our busyness. 1088 */ 1089 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; 1090 duration_us = ktime_us_delta(cpu->sample.time, 1091 cpu->last_sample_time); 1092 if (duration_us > sample_time * 3) { 1093 sample_ratio = div_fp(int_tofp(sample_time), 1094 int_tofp(duration_us)); 1095 core_busy = mul_fp(core_busy, sample_ratio); 1096 } 1097 1098 return core_busy; 1099 } 1100 1101 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1102 { 1103 int32_t busy_scaled; 1104 struct _pid *pid; 1105 signed int ctl; 1106 int from; 1107 struct sample *sample; 1108 1109 from = cpu->pstate.current_pstate; 1110 1111 pid = &cpu->pid; 1112 busy_scaled = intel_pstate_get_scaled_busy(cpu); 1113 1114 ctl = pid_calc(pid, busy_scaled); 1115 1116 /* Negative values of ctl increase the pstate and vice versa */ 1117 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true); 1118 1119 sample = &cpu->sample; 1120 trace_pstate_sample(fp_toint(sample->core_pct_busy), 1121 fp_toint(busy_scaled), 1122 from, 1123 cpu->pstate.current_pstate, 1124 sample->mperf, 1125 sample->aperf, 1126 sample->tsc, 1127 sample->freq); 1128 } 1129 1130 static void intel_hwp_timer_func(unsigned long __data) 1131 { 1132 struct cpudata *cpu = (struct cpudata *) __data; 1133 1134 intel_pstate_sample(cpu); 1135 intel_hwp_set_sample_time(cpu); 1136 } 1137 1138 static void intel_pstate_timer_func(unsigned long __data) 1139 { 1140 struct cpudata *cpu = (struct cpudata *) __data; 1141 1142 intel_pstate_sample(cpu); 1143 1144 intel_pstate_adjust_busy_pstate(cpu); 1145 1146 intel_pstate_set_sample_time(cpu); 1147 } 1148 1149 #define ICPU(model, policy) \ 1150 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1151 (unsigned long)&policy } 1152 1153 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1154 ICPU(0x2a, core_params), 1155 ICPU(0x2d, core_params), 1156 ICPU(0x37, byt_params), 1157 ICPU(0x3a, core_params), 1158 ICPU(0x3c, core_params), 1159 ICPU(0x3d, core_params), 1160 ICPU(0x3e, core_params), 1161 ICPU(0x3f, core_params), 1162 ICPU(0x45, core_params), 1163 ICPU(0x46, core_params), 1164 ICPU(0x47, core_params), 1165 ICPU(0x4c, byt_params), 1166 ICPU(0x4e, core_params), 1167 ICPU(0x4f, core_params), 1168 ICPU(0x5e, core_params), 1169 ICPU(0x56, core_params), 1170 ICPU(0x57, knl_params), 1171 {} 1172 }; 1173 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1174 1175 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 1176 ICPU(0x56, core_params), 1177 {} 1178 }; 1179 1180 static int intel_pstate_init_cpu(unsigned int cpunum) 1181 { 1182 struct cpudata *cpu; 1183 1184 if (!all_cpu_data[cpunum]) 1185 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 1186 GFP_KERNEL); 1187 if (!all_cpu_data[cpunum]) 1188 return -ENOMEM; 1189 1190 cpu = all_cpu_data[cpunum]; 1191 1192 cpu->cpu = cpunum; 1193 1194 if (hwp_active) 1195 intel_pstate_hwp_enable(cpu); 1196 1197 intel_pstate_get_cpu_pstates(cpu); 1198 1199 init_timer_deferrable(&cpu->timer); 1200 cpu->timer.data = (unsigned long)cpu; 1201 cpu->timer.expires = jiffies + HZ/100; 1202 1203 if (!hwp_active) 1204 cpu->timer.function = intel_pstate_timer_func; 1205 else 1206 cpu->timer.function = intel_hwp_timer_func; 1207 1208 intel_pstate_busy_pid_reset(cpu); 1209 intel_pstate_sample(cpu); 1210 1211 add_timer_on(&cpu->timer, cpunum); 1212 1213 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); 1214 1215 return 0; 1216 } 1217 1218 static unsigned int intel_pstate_get(unsigned int cpu_num) 1219 { 1220 struct sample *sample; 1221 struct cpudata *cpu; 1222 1223 cpu = all_cpu_data[cpu_num]; 1224 if (!cpu) 1225 return 0; 1226 sample = &cpu->sample; 1227 return sample->freq; 1228 } 1229 1230 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1231 { 1232 #if IS_ENABLED(CONFIG_ACPI) 1233 struct cpudata *cpu; 1234 int i; 1235 #endif 1236 pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__, 1237 policy->cpuinfo.max_freq, policy->max); 1238 if (!policy->cpuinfo.max_freq) 1239 return -ENODEV; 1240 1241 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 1242 policy->max >= policy->cpuinfo.max_freq) { 1243 pr_debug("intel_pstate: set performance\n"); 1244 limits = &performance_limits; 1245 return 0; 1246 } 1247 1248 pr_debug("intel_pstate: set powersave\n"); 1249 limits = &powersave_limits; 1250 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1251 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1252 limits->max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 1253 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1254 1255 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1256 limits->min_perf_pct = max(limits->min_policy_pct, 1257 limits->min_sysfs_pct); 1258 limits->min_perf_pct = min(limits->max_policy_pct, 1259 limits->min_perf_pct); 1260 limits->max_perf_pct = min(limits->max_policy_pct, 1261 limits->max_sysfs_pct); 1262 limits->max_perf_pct = max(limits->min_policy_pct, 1263 limits->max_perf_pct); 1264 1265 /* Make sure min_perf_pct <= max_perf_pct */ 1266 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1267 1268 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 1269 int_tofp(100)); 1270 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1271 int_tofp(100)); 1272 1273 #if IS_ENABLED(CONFIG_ACPI) 1274 cpu = all_cpu_data[policy->cpu]; 1275 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 1276 int control; 1277 1278 control = convert_to_native_pstate_format(cpu, i); 1279 if (control * cpu->pstate.scaling == policy->max) 1280 limits->max_perf_ctl = control; 1281 if (control * cpu->pstate.scaling == policy->min) 1282 limits->min_perf_ctl = control; 1283 } 1284 1285 pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n", 1286 policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl, 1287 limits->max_perf_ctl); 1288 #endif 1289 1290 if (hwp_active) 1291 intel_pstate_hwp_set(); 1292 1293 return 0; 1294 } 1295 1296 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1297 { 1298 cpufreq_verify_within_cpu_limits(policy); 1299 1300 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1301 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1302 return -EINVAL; 1303 1304 return 0; 1305 } 1306 1307 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1308 { 1309 int cpu_num = policy->cpu; 1310 struct cpudata *cpu = all_cpu_data[cpu_num]; 1311 1312 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); 1313 1314 del_timer_sync(&all_cpu_data[cpu_num]->timer); 1315 if (hwp_active) 1316 return; 1317 1318 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 1319 } 1320 1321 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1322 { 1323 struct cpudata *cpu; 1324 int rc; 1325 1326 rc = intel_pstate_init_cpu(policy->cpu); 1327 if (rc) 1328 return rc; 1329 1330 cpu = all_cpu_data[policy->cpu]; 1331 1332 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1333 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1334 else 1335 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1336 1337 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1338 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1339 1340 /* cpuinfo and default policy values */ 1341 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1342 policy->cpuinfo.max_freq = 1343 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1344 if (!no_acpi_perf) 1345 intel_pstate_init_perf_limits(policy); 1346 /* 1347 * If there is no acpi perf data or error, we ignore and use Intel P 1348 * state calculated limits, So this is not fatal error. 1349 */ 1350 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1351 cpumask_set_cpu(policy->cpu, policy->cpus); 1352 1353 return 0; 1354 } 1355 1356 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 1357 { 1358 return intel_pstate_exit_perf_limits(policy); 1359 } 1360 1361 static struct cpufreq_driver intel_pstate_driver = { 1362 .flags = CPUFREQ_CONST_LOOPS, 1363 .verify = intel_pstate_verify_policy, 1364 .setpolicy = intel_pstate_set_policy, 1365 .get = intel_pstate_get, 1366 .init = intel_pstate_cpu_init, 1367 .exit = intel_pstate_cpu_exit, 1368 .stop_cpu = intel_pstate_stop_cpu, 1369 .name = "intel_pstate", 1370 }; 1371 1372 static int __initdata no_load; 1373 static int __initdata no_hwp; 1374 static int __initdata hwp_only; 1375 static unsigned int force_load; 1376 1377 static int intel_pstate_msrs_not_valid(void) 1378 { 1379 if (!pstate_funcs.get_max() || 1380 !pstate_funcs.get_min() || 1381 !pstate_funcs.get_turbo()) 1382 return -ENODEV; 1383 1384 return 0; 1385 } 1386 1387 static void copy_pid_params(struct pstate_adjust_policy *policy) 1388 { 1389 pid_params.sample_rate_ms = policy->sample_rate_ms; 1390 pid_params.p_gain_pct = policy->p_gain_pct; 1391 pid_params.i_gain_pct = policy->i_gain_pct; 1392 pid_params.d_gain_pct = policy->d_gain_pct; 1393 pid_params.deadband = policy->deadband; 1394 pid_params.setpoint = policy->setpoint; 1395 } 1396 1397 static void copy_cpu_funcs(struct pstate_funcs *funcs) 1398 { 1399 pstate_funcs.get_max = funcs->get_max; 1400 pstate_funcs.get_max_physical = funcs->get_max_physical; 1401 pstate_funcs.get_min = funcs->get_min; 1402 pstate_funcs.get_turbo = funcs->get_turbo; 1403 pstate_funcs.get_scaling = funcs->get_scaling; 1404 pstate_funcs.set = funcs->set; 1405 pstate_funcs.get_vid = funcs->get_vid; 1406 } 1407 1408 #if IS_ENABLED(CONFIG_ACPI) 1409 1410 static bool intel_pstate_no_acpi_pss(void) 1411 { 1412 int i; 1413 1414 for_each_possible_cpu(i) { 1415 acpi_status status; 1416 union acpi_object *pss; 1417 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1418 struct acpi_processor *pr = per_cpu(processors, i); 1419 1420 if (!pr) 1421 continue; 1422 1423 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1424 if (ACPI_FAILURE(status)) 1425 continue; 1426 1427 pss = buffer.pointer; 1428 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1429 kfree(pss); 1430 return false; 1431 } 1432 1433 kfree(pss); 1434 } 1435 1436 return true; 1437 } 1438 1439 static bool intel_pstate_has_acpi_ppc(void) 1440 { 1441 int i; 1442 1443 for_each_possible_cpu(i) { 1444 struct acpi_processor *pr = per_cpu(processors, i); 1445 1446 if (!pr) 1447 continue; 1448 if (acpi_has_method(pr->handle, "_PPC")) 1449 return true; 1450 } 1451 return false; 1452 } 1453 1454 enum { 1455 PSS, 1456 PPC, 1457 }; 1458 1459 struct hw_vendor_info { 1460 u16 valid; 1461 char oem_id[ACPI_OEM_ID_SIZE]; 1462 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1463 int oem_pwr_table; 1464 }; 1465 1466 /* Hardware vendor-specific info that has its own power management modes */ 1467 static struct hw_vendor_info vendor_info[] = { 1468 {1, "HP ", "ProLiant", PSS}, 1469 {1, "ORACLE", "X4-2 ", PPC}, 1470 {1, "ORACLE", "X4-2L ", PPC}, 1471 {1, "ORACLE", "X4-2B ", PPC}, 1472 {1, "ORACLE", "X3-2 ", PPC}, 1473 {1, "ORACLE", "X3-2L ", PPC}, 1474 {1, "ORACLE", "X3-2B ", PPC}, 1475 {1, "ORACLE", "X4470M2 ", PPC}, 1476 {1, "ORACLE", "X4270M3 ", PPC}, 1477 {1, "ORACLE", "X4270M2 ", PPC}, 1478 {1, "ORACLE", "X4170M2 ", PPC}, 1479 {1, "ORACLE", "X4170 M3", PPC}, 1480 {1, "ORACLE", "X4275 M3", PPC}, 1481 {1, "ORACLE", "X6-2 ", PPC}, 1482 {1, "ORACLE", "Sudbury ", PPC}, 1483 {0, "", ""}, 1484 }; 1485 1486 static bool intel_pstate_platform_pwr_mgmt_exists(void) 1487 { 1488 struct acpi_table_header hdr; 1489 struct hw_vendor_info *v_info; 1490 const struct x86_cpu_id *id; 1491 u64 misc_pwr; 1492 1493 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1494 if (id) { 1495 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1496 if ( misc_pwr & (1 << 8)) 1497 return true; 1498 } 1499 1500 if (acpi_disabled || 1501 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1502 return false; 1503 1504 for (v_info = vendor_info; v_info->valid; v_info++) { 1505 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1506 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1507 ACPI_OEM_TABLE_ID_SIZE)) 1508 switch (v_info->oem_pwr_table) { 1509 case PSS: 1510 return intel_pstate_no_acpi_pss(); 1511 case PPC: 1512 return intel_pstate_has_acpi_ppc() && 1513 (!force_load); 1514 } 1515 } 1516 1517 return false; 1518 } 1519 #else /* CONFIG_ACPI not enabled */ 1520 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1521 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1522 #endif /* CONFIG_ACPI */ 1523 1524 static int __init intel_pstate_init(void) 1525 { 1526 int cpu, rc = 0; 1527 const struct x86_cpu_id *id; 1528 struct cpu_defaults *cpu_def; 1529 1530 if (no_load) 1531 return -ENODEV; 1532 1533 id = x86_match_cpu(intel_pstate_cpu_ids); 1534 if (!id) 1535 return -ENODEV; 1536 1537 /* 1538 * The Intel pstate driver will be ignored if the platform 1539 * firmware has its own power management modes. 1540 */ 1541 if (intel_pstate_platform_pwr_mgmt_exists()) 1542 return -ENODEV; 1543 1544 cpu_def = (struct cpu_defaults *)id->driver_data; 1545 1546 copy_pid_params(&cpu_def->pid_policy); 1547 copy_cpu_funcs(&cpu_def->funcs); 1548 1549 if (intel_pstate_msrs_not_valid()) 1550 return -ENODEV; 1551 1552 pr_info("Intel P-state driver initializing.\n"); 1553 1554 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1555 if (!all_cpu_data) 1556 return -ENOMEM; 1557 1558 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) { 1559 pr_info("intel_pstate: HWP enabled\n"); 1560 hwp_active++; 1561 } 1562 1563 if (!hwp_active && hwp_only) 1564 goto out; 1565 1566 rc = cpufreq_register_driver(&intel_pstate_driver); 1567 if (rc) 1568 goto out; 1569 1570 intel_pstate_debug_expose_params(); 1571 intel_pstate_sysfs_expose_params(); 1572 1573 return rc; 1574 out: 1575 get_online_cpus(); 1576 for_each_online_cpu(cpu) { 1577 if (all_cpu_data[cpu]) { 1578 del_timer_sync(&all_cpu_data[cpu]->timer); 1579 kfree(all_cpu_data[cpu]); 1580 } 1581 } 1582 1583 put_online_cpus(); 1584 vfree(all_cpu_data); 1585 return -ENODEV; 1586 } 1587 device_initcall(intel_pstate_init); 1588 1589 static int __init intel_pstate_setup(char *str) 1590 { 1591 if (!str) 1592 return -EINVAL; 1593 1594 if (!strcmp(str, "disable")) 1595 no_load = 1; 1596 if (!strcmp(str, "no_hwp")) { 1597 pr_info("intel_pstate: HWP disabled\n"); 1598 no_hwp = 1; 1599 } 1600 if (!strcmp(str, "force")) 1601 force_load = 1; 1602 if (!strcmp(str, "hwp_only")) 1603 hwp_only = 1; 1604 if (!strcmp(str, "no_acpi")) 1605 no_acpi_perf = 1; 1606 1607 return 0; 1608 } 1609 early_param("intel_pstate", intel_pstate_setup); 1610 1611 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1612 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1613 MODULE_LICENSE("GPL"); 1614