1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/module.h> 16 #include <linux/ktime.h> 17 #include <linux/hrtimer.h> 18 #include <linux/tick.h> 19 #include <linux/slab.h> 20 #include <linux/sched.h> 21 #include <linux/list.h> 22 #include <linux/cpu.h> 23 #include <linux/cpufreq.h> 24 #include <linux/sysfs.h> 25 #include <linux/types.h> 26 #include <linux/fs.h> 27 #include <linux/debugfs.h> 28 #include <linux/acpi.h> 29 #include <linux/vmalloc.h> 30 #include <trace/events/power.h> 31 32 #include <asm/div64.h> 33 #include <asm/msr.h> 34 #include <asm/cpu_device_id.h> 35 #include <asm/cpufeature.h> 36 37 #define BYT_RATIOS 0x66a 38 #define BYT_VIDS 0x66b 39 #define BYT_TURBO_RATIOS 0x66c 40 #define BYT_TURBO_VIDS 0x66d 41 42 #define FRAC_BITS 8 43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 44 #define fp_toint(X) ((X) >> FRAC_BITS) 45 46 47 static inline int32_t mul_fp(int32_t x, int32_t y) 48 { 49 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 50 } 51 52 static inline int32_t div_fp(s64 x, s64 y) 53 { 54 return div64_s64((int64_t)x << FRAC_BITS, y); 55 } 56 57 static inline int ceiling_fp(int32_t x) 58 { 59 int mask, ret; 60 61 ret = fp_toint(x); 62 mask = (1 << FRAC_BITS) - 1; 63 if (x & mask) 64 ret += 1; 65 return ret; 66 } 67 68 struct sample { 69 int32_t core_pct_busy; 70 u64 aperf; 71 u64 mperf; 72 u64 tsc; 73 int freq; 74 ktime_t time; 75 }; 76 77 struct pstate_data { 78 int current_pstate; 79 int min_pstate; 80 int max_pstate; 81 int scaling; 82 int turbo_pstate; 83 }; 84 85 struct vid_data { 86 int min; 87 int max; 88 int turbo; 89 int32_t ratio; 90 }; 91 92 struct _pid { 93 int setpoint; 94 int32_t integral; 95 int32_t p_gain; 96 int32_t i_gain; 97 int32_t d_gain; 98 int deadband; 99 int32_t last_err; 100 }; 101 102 struct cpudata { 103 int cpu; 104 105 struct timer_list timer; 106 107 struct pstate_data pstate; 108 struct vid_data vid; 109 struct _pid pid; 110 111 ktime_t last_sample_time; 112 u64 prev_aperf; 113 u64 prev_mperf; 114 u64 prev_tsc; 115 struct sample sample; 116 }; 117 118 static struct cpudata **all_cpu_data; 119 struct pstate_adjust_policy { 120 int sample_rate_ms; 121 int deadband; 122 int setpoint; 123 int p_gain_pct; 124 int d_gain_pct; 125 int i_gain_pct; 126 }; 127 128 struct pstate_funcs { 129 int (*get_max)(void); 130 int (*get_min)(void); 131 int (*get_turbo)(void); 132 int (*get_scaling)(void); 133 void (*set)(struct cpudata*, int pstate); 134 void (*get_vid)(struct cpudata *); 135 }; 136 137 struct cpu_defaults { 138 struct pstate_adjust_policy pid_policy; 139 struct pstate_funcs funcs; 140 }; 141 142 static struct pstate_adjust_policy pid_params; 143 static struct pstate_funcs pstate_funcs; 144 static int hwp_active; 145 146 struct perf_limits { 147 int no_turbo; 148 int turbo_disabled; 149 int max_perf_pct; 150 int min_perf_pct; 151 int32_t max_perf; 152 int32_t min_perf; 153 int max_policy_pct; 154 int max_sysfs_pct; 155 int min_policy_pct; 156 int min_sysfs_pct; 157 }; 158 159 static struct perf_limits limits = { 160 .no_turbo = 0, 161 .turbo_disabled = 0, 162 .max_perf_pct = 100, 163 .max_perf = int_tofp(1), 164 .min_perf_pct = 0, 165 .min_perf = 0, 166 .max_policy_pct = 100, 167 .max_sysfs_pct = 100, 168 .min_policy_pct = 0, 169 .min_sysfs_pct = 0, 170 }; 171 172 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 173 int deadband, int integral) { 174 pid->setpoint = setpoint; 175 pid->deadband = deadband; 176 pid->integral = int_tofp(integral); 177 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 178 } 179 180 static inline void pid_p_gain_set(struct _pid *pid, int percent) 181 { 182 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 183 } 184 185 static inline void pid_i_gain_set(struct _pid *pid, int percent) 186 { 187 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 188 } 189 190 static inline void pid_d_gain_set(struct _pid *pid, int percent) 191 { 192 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 193 } 194 195 static signed int pid_calc(struct _pid *pid, int32_t busy) 196 { 197 signed int result; 198 int32_t pterm, dterm, fp_error; 199 int32_t integral_limit; 200 201 fp_error = int_tofp(pid->setpoint) - busy; 202 203 if (abs(fp_error) <= int_tofp(pid->deadband)) 204 return 0; 205 206 pterm = mul_fp(pid->p_gain, fp_error); 207 208 pid->integral += fp_error; 209 210 /* 211 * We limit the integral here so that it will never 212 * get higher than 30. This prevents it from becoming 213 * too large an input over long periods of time and allows 214 * it to get factored out sooner. 215 * 216 * The value of 30 was chosen through experimentation. 217 */ 218 integral_limit = int_tofp(30); 219 if (pid->integral > integral_limit) 220 pid->integral = integral_limit; 221 if (pid->integral < -integral_limit) 222 pid->integral = -integral_limit; 223 224 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 225 pid->last_err = fp_error; 226 227 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 228 result = result + (1 << (FRAC_BITS-1)); 229 return (signed int)fp_toint(result); 230 } 231 232 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 233 { 234 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 235 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 236 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 237 238 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 239 } 240 241 static inline void intel_pstate_reset_all_pid(void) 242 { 243 unsigned int cpu; 244 245 for_each_online_cpu(cpu) { 246 if (all_cpu_data[cpu]) 247 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 248 } 249 } 250 251 static inline void update_turbo_state(void) 252 { 253 u64 misc_en; 254 struct cpudata *cpu; 255 256 cpu = all_cpu_data[0]; 257 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 258 limits.turbo_disabled = 259 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 260 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 261 } 262 263 #define PCT_TO_HWP(x) (x * 255 / 100) 264 static void intel_pstate_hwp_set(void) 265 { 266 int min, max, cpu; 267 u64 value, freq; 268 269 get_online_cpus(); 270 271 for_each_online_cpu(cpu) { 272 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 273 min = PCT_TO_HWP(limits.min_perf_pct); 274 value &= ~HWP_MIN_PERF(~0L); 275 value |= HWP_MIN_PERF(min); 276 277 max = PCT_TO_HWP(limits.max_perf_pct); 278 if (limits.no_turbo) { 279 rdmsrl( MSR_HWP_CAPABILITIES, freq); 280 max = HWP_GUARANTEED_PERF(freq); 281 } 282 283 value &= ~HWP_MAX_PERF(~0L); 284 value |= HWP_MAX_PERF(max); 285 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 286 } 287 288 put_online_cpus(); 289 } 290 291 /************************** debugfs begin ************************/ 292 static int pid_param_set(void *data, u64 val) 293 { 294 *(u32 *)data = val; 295 intel_pstate_reset_all_pid(); 296 return 0; 297 } 298 299 static int pid_param_get(void *data, u64 *val) 300 { 301 *val = *(u32 *)data; 302 return 0; 303 } 304 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 305 306 struct pid_param { 307 char *name; 308 void *value; 309 }; 310 311 static struct pid_param pid_files[] = { 312 {"sample_rate_ms", &pid_params.sample_rate_ms}, 313 {"d_gain_pct", &pid_params.d_gain_pct}, 314 {"i_gain_pct", &pid_params.i_gain_pct}, 315 {"deadband", &pid_params.deadband}, 316 {"setpoint", &pid_params.setpoint}, 317 {"p_gain_pct", &pid_params.p_gain_pct}, 318 {NULL, NULL} 319 }; 320 321 static void __init intel_pstate_debug_expose_params(void) 322 { 323 struct dentry *debugfs_parent; 324 int i = 0; 325 326 if (hwp_active) 327 return; 328 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 329 if (IS_ERR_OR_NULL(debugfs_parent)) 330 return; 331 while (pid_files[i].name) { 332 debugfs_create_file(pid_files[i].name, 0660, 333 debugfs_parent, pid_files[i].value, 334 &fops_pid_param); 335 i++; 336 } 337 } 338 339 /************************** debugfs end ************************/ 340 341 /************************** sysfs begin ************************/ 342 #define show_one(file_name, object) \ 343 static ssize_t show_##file_name \ 344 (struct kobject *kobj, struct attribute *attr, char *buf) \ 345 { \ 346 return sprintf(buf, "%u\n", limits.object); \ 347 } 348 349 static ssize_t show_turbo_pct(struct kobject *kobj, 350 struct attribute *attr, char *buf) 351 { 352 struct cpudata *cpu; 353 int total, no_turbo, turbo_pct; 354 uint32_t turbo_fp; 355 356 cpu = all_cpu_data[0]; 357 358 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 359 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 360 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total)); 361 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 362 return sprintf(buf, "%u\n", turbo_pct); 363 } 364 365 static ssize_t show_num_pstates(struct kobject *kobj, 366 struct attribute *attr, char *buf) 367 { 368 struct cpudata *cpu; 369 int total; 370 371 cpu = all_cpu_data[0]; 372 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 373 return sprintf(buf, "%u\n", total); 374 } 375 376 static ssize_t show_no_turbo(struct kobject *kobj, 377 struct attribute *attr, char *buf) 378 { 379 ssize_t ret; 380 381 update_turbo_state(); 382 if (limits.turbo_disabled) 383 ret = sprintf(buf, "%u\n", limits.turbo_disabled); 384 else 385 ret = sprintf(buf, "%u\n", limits.no_turbo); 386 387 return ret; 388 } 389 390 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 391 const char *buf, size_t count) 392 { 393 unsigned int input; 394 int ret; 395 396 ret = sscanf(buf, "%u", &input); 397 if (ret != 1) 398 return -EINVAL; 399 400 update_turbo_state(); 401 if (limits.turbo_disabled) { 402 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); 403 return -EPERM; 404 } 405 406 limits.no_turbo = clamp_t(int, input, 0, 1); 407 408 if (hwp_active) 409 intel_pstate_hwp_set(); 410 411 return count; 412 } 413 414 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 415 const char *buf, size_t count) 416 { 417 unsigned int input; 418 int ret; 419 420 ret = sscanf(buf, "%u", &input); 421 if (ret != 1) 422 return -EINVAL; 423 424 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); 425 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 426 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct); 427 limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct); 428 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 429 430 if (hwp_active) 431 intel_pstate_hwp_set(); 432 return count; 433 } 434 435 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 436 const char *buf, size_t count) 437 { 438 unsigned int input; 439 int ret; 440 441 ret = sscanf(buf, "%u", &input); 442 if (ret != 1) 443 return -EINVAL; 444 445 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); 446 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 447 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct); 448 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct); 449 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 450 451 if (hwp_active) 452 intel_pstate_hwp_set(); 453 return count; 454 } 455 456 show_one(max_perf_pct, max_perf_pct); 457 show_one(min_perf_pct, min_perf_pct); 458 459 define_one_global_rw(no_turbo); 460 define_one_global_rw(max_perf_pct); 461 define_one_global_rw(min_perf_pct); 462 define_one_global_ro(turbo_pct); 463 define_one_global_ro(num_pstates); 464 465 static struct attribute *intel_pstate_attributes[] = { 466 &no_turbo.attr, 467 &max_perf_pct.attr, 468 &min_perf_pct.attr, 469 &turbo_pct.attr, 470 &num_pstates.attr, 471 NULL 472 }; 473 474 static struct attribute_group intel_pstate_attr_group = { 475 .attrs = intel_pstate_attributes, 476 }; 477 478 static void __init intel_pstate_sysfs_expose_params(void) 479 { 480 struct kobject *intel_pstate_kobject; 481 int rc; 482 483 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 484 &cpu_subsys.dev_root->kobj); 485 BUG_ON(!intel_pstate_kobject); 486 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 487 BUG_ON(rc); 488 } 489 /************************** sysfs end ************************/ 490 491 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 492 { 493 pr_info("intel_pstate: HWP enabled\n"); 494 495 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 496 } 497 498 static int byt_get_min_pstate(void) 499 { 500 u64 value; 501 502 rdmsrl(BYT_RATIOS, value); 503 return (value >> 8) & 0x7F; 504 } 505 506 static int byt_get_max_pstate(void) 507 { 508 u64 value; 509 510 rdmsrl(BYT_RATIOS, value); 511 return (value >> 16) & 0x7F; 512 } 513 514 static int byt_get_turbo_pstate(void) 515 { 516 u64 value; 517 518 rdmsrl(BYT_TURBO_RATIOS, value); 519 return value & 0x7F; 520 } 521 522 static void byt_set_pstate(struct cpudata *cpudata, int pstate) 523 { 524 u64 val; 525 int32_t vid_fp; 526 u32 vid; 527 528 val = (u64)pstate << 8; 529 if (limits.no_turbo && !limits.turbo_disabled) 530 val |= (u64)1 << 32; 531 532 vid_fp = cpudata->vid.min + mul_fp( 533 int_tofp(pstate - cpudata->pstate.min_pstate), 534 cpudata->vid.ratio); 535 536 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 537 vid = ceiling_fp(vid_fp); 538 539 if (pstate > cpudata->pstate.max_pstate) 540 vid = cpudata->vid.turbo; 541 542 val |= vid; 543 544 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 545 } 546 547 #define BYT_BCLK_FREQS 5 548 static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; 549 550 static int byt_get_scaling(void) 551 { 552 u64 value; 553 int i; 554 555 rdmsrl(MSR_FSB_FREQ, value); 556 i = value & 0x3; 557 558 BUG_ON(i > BYT_BCLK_FREQS); 559 560 return byt_freq_table[i] * 100; 561 } 562 563 static void byt_get_vid(struct cpudata *cpudata) 564 { 565 u64 value; 566 567 rdmsrl(BYT_VIDS, value); 568 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 569 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 570 cpudata->vid.ratio = div_fp( 571 cpudata->vid.max - cpudata->vid.min, 572 int_tofp(cpudata->pstate.max_pstate - 573 cpudata->pstate.min_pstate)); 574 575 rdmsrl(BYT_TURBO_VIDS, value); 576 cpudata->vid.turbo = value & 0x7f; 577 } 578 579 static int core_get_min_pstate(void) 580 { 581 u64 value; 582 583 rdmsrl(MSR_PLATFORM_INFO, value); 584 return (value >> 40) & 0xFF; 585 } 586 587 static int core_get_max_pstate(void) 588 { 589 u64 value; 590 591 rdmsrl(MSR_PLATFORM_INFO, value); 592 return (value >> 8) & 0xFF; 593 } 594 595 static int core_get_turbo_pstate(void) 596 { 597 u64 value; 598 int nont, ret; 599 600 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 601 nont = core_get_max_pstate(); 602 ret = (value) & 255; 603 if (ret <= nont) 604 ret = nont; 605 return ret; 606 } 607 608 static inline int core_get_scaling(void) 609 { 610 return 100000; 611 } 612 613 static void core_set_pstate(struct cpudata *cpudata, int pstate) 614 { 615 u64 val; 616 617 val = (u64)pstate << 8; 618 if (limits.no_turbo && !limits.turbo_disabled) 619 val |= (u64)1 << 32; 620 621 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 622 } 623 624 static int knl_get_turbo_pstate(void) 625 { 626 u64 value; 627 int nont, ret; 628 629 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 630 nont = core_get_max_pstate(); 631 ret = (((value) >> 8) & 0xFF); 632 if (ret <= nont) 633 ret = nont; 634 return ret; 635 } 636 637 static struct cpu_defaults core_params = { 638 .pid_policy = { 639 .sample_rate_ms = 10, 640 .deadband = 0, 641 .setpoint = 97, 642 .p_gain_pct = 20, 643 .d_gain_pct = 0, 644 .i_gain_pct = 0, 645 }, 646 .funcs = { 647 .get_max = core_get_max_pstate, 648 .get_min = core_get_min_pstate, 649 .get_turbo = core_get_turbo_pstate, 650 .get_scaling = core_get_scaling, 651 .set = core_set_pstate, 652 }, 653 }; 654 655 static struct cpu_defaults byt_params = { 656 .pid_policy = { 657 .sample_rate_ms = 10, 658 .deadband = 0, 659 .setpoint = 60, 660 .p_gain_pct = 14, 661 .d_gain_pct = 0, 662 .i_gain_pct = 4, 663 }, 664 .funcs = { 665 .get_max = byt_get_max_pstate, 666 .get_min = byt_get_min_pstate, 667 .get_turbo = byt_get_turbo_pstate, 668 .set = byt_set_pstate, 669 .get_scaling = byt_get_scaling, 670 .get_vid = byt_get_vid, 671 }, 672 }; 673 674 static struct cpu_defaults knl_params = { 675 .pid_policy = { 676 .sample_rate_ms = 10, 677 .deadband = 0, 678 .setpoint = 97, 679 .p_gain_pct = 20, 680 .d_gain_pct = 0, 681 .i_gain_pct = 0, 682 }, 683 .funcs = { 684 .get_max = core_get_max_pstate, 685 .get_min = core_get_min_pstate, 686 .get_turbo = knl_get_turbo_pstate, 687 .get_scaling = core_get_scaling, 688 .set = core_set_pstate, 689 }, 690 }; 691 692 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 693 { 694 int max_perf = cpu->pstate.turbo_pstate; 695 int max_perf_adj; 696 int min_perf; 697 698 if (limits.no_turbo || limits.turbo_disabled) 699 max_perf = cpu->pstate.max_pstate; 700 701 /* 702 * performance can be limited by user through sysfs, by cpufreq 703 * policy, or by cpu specific default values determined through 704 * experimentation. 705 */ 706 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 707 *max = clamp_t(int, max_perf_adj, 708 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 709 710 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 711 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 712 } 713 714 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) 715 { 716 int max_perf, min_perf; 717 718 if (force) { 719 update_turbo_state(); 720 721 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 722 723 pstate = clamp_t(int, pstate, min_perf, max_perf); 724 725 if (pstate == cpu->pstate.current_pstate) 726 return; 727 } 728 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 729 730 cpu->pstate.current_pstate = pstate; 731 732 pstate_funcs.set(cpu, pstate); 733 } 734 735 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 736 { 737 cpu->pstate.min_pstate = pstate_funcs.get_min(); 738 cpu->pstate.max_pstate = pstate_funcs.get_max(); 739 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 740 cpu->pstate.scaling = pstate_funcs.get_scaling(); 741 742 if (pstate_funcs.get_vid) 743 pstate_funcs.get_vid(cpu); 744 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 745 } 746 747 static inline void intel_pstate_calc_busy(struct cpudata *cpu) 748 { 749 struct sample *sample = &cpu->sample; 750 int64_t core_pct; 751 752 core_pct = int_tofp(sample->aperf) * int_tofp(100); 753 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 754 755 sample->freq = fp_toint( 756 mul_fp(int_tofp( 757 cpu->pstate.max_pstate * cpu->pstate.scaling / 100), 758 core_pct)); 759 760 sample->core_pct_busy = (int32_t)core_pct; 761 } 762 763 static inline void intel_pstate_sample(struct cpudata *cpu) 764 { 765 u64 aperf, mperf; 766 unsigned long flags; 767 u64 tsc; 768 769 local_irq_save(flags); 770 rdmsrl(MSR_IA32_APERF, aperf); 771 rdmsrl(MSR_IA32_MPERF, mperf); 772 tsc = native_read_tsc(); 773 local_irq_restore(flags); 774 775 cpu->last_sample_time = cpu->sample.time; 776 cpu->sample.time = ktime_get(); 777 cpu->sample.aperf = aperf; 778 cpu->sample.mperf = mperf; 779 cpu->sample.tsc = tsc; 780 cpu->sample.aperf -= cpu->prev_aperf; 781 cpu->sample.mperf -= cpu->prev_mperf; 782 cpu->sample.tsc -= cpu->prev_tsc; 783 784 intel_pstate_calc_busy(cpu); 785 786 cpu->prev_aperf = aperf; 787 cpu->prev_mperf = mperf; 788 cpu->prev_tsc = tsc; 789 } 790 791 static inline void intel_hwp_set_sample_time(struct cpudata *cpu) 792 { 793 int delay; 794 795 delay = msecs_to_jiffies(50); 796 mod_timer_pinned(&cpu->timer, jiffies + delay); 797 } 798 799 static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 800 { 801 int delay; 802 803 delay = msecs_to_jiffies(pid_params.sample_rate_ms); 804 mod_timer_pinned(&cpu->timer, jiffies + delay); 805 } 806 807 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) 808 { 809 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 810 s64 duration_us; 811 u32 sample_time; 812 813 /* 814 * core_busy is the ratio of actual performance to max 815 * max_pstate is the max non turbo pstate available 816 * current_pstate was the pstate that was requested during 817 * the last sample period. 818 * 819 * We normalize core_busy, which was our actual percent 820 * performance to what we requested during the last sample 821 * period. The result will be a percentage of busy at a 822 * specified pstate. 823 */ 824 core_busy = cpu->sample.core_pct_busy; 825 max_pstate = int_tofp(cpu->pstate.max_pstate); 826 current_pstate = int_tofp(cpu->pstate.current_pstate); 827 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 828 829 /* 830 * Since we have a deferred timer, it will not fire unless 831 * we are in C0. So, determine if the actual elapsed time 832 * is significantly greater (3x) than our sample interval. If it 833 * is, then we were idle for a long enough period of time 834 * to adjust our busyness. 835 */ 836 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; 837 duration_us = ktime_us_delta(cpu->sample.time, 838 cpu->last_sample_time); 839 if (duration_us > sample_time * 3) { 840 sample_ratio = div_fp(int_tofp(sample_time), 841 int_tofp(duration_us)); 842 core_busy = mul_fp(core_busy, sample_ratio); 843 } 844 845 return core_busy; 846 } 847 848 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 849 { 850 int32_t busy_scaled; 851 struct _pid *pid; 852 signed int ctl; 853 int from; 854 struct sample *sample; 855 856 from = cpu->pstate.current_pstate; 857 858 pid = &cpu->pid; 859 busy_scaled = intel_pstate_get_scaled_busy(cpu); 860 861 ctl = pid_calc(pid, busy_scaled); 862 863 /* Negative values of ctl increase the pstate and vice versa */ 864 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true); 865 866 sample = &cpu->sample; 867 trace_pstate_sample(fp_toint(sample->core_pct_busy), 868 fp_toint(busy_scaled), 869 from, 870 cpu->pstate.current_pstate, 871 sample->mperf, 872 sample->aperf, 873 sample->tsc, 874 sample->freq); 875 } 876 877 static void intel_hwp_timer_func(unsigned long __data) 878 { 879 struct cpudata *cpu = (struct cpudata *) __data; 880 881 intel_pstate_sample(cpu); 882 intel_hwp_set_sample_time(cpu); 883 } 884 885 static void intel_pstate_timer_func(unsigned long __data) 886 { 887 struct cpudata *cpu = (struct cpudata *) __data; 888 889 intel_pstate_sample(cpu); 890 891 intel_pstate_adjust_busy_pstate(cpu); 892 893 intel_pstate_set_sample_time(cpu); 894 } 895 896 #define ICPU(model, policy) \ 897 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 898 (unsigned long)&policy } 899 900 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 901 ICPU(0x2a, core_params), 902 ICPU(0x2d, core_params), 903 ICPU(0x37, byt_params), 904 ICPU(0x3a, core_params), 905 ICPU(0x3c, core_params), 906 ICPU(0x3d, core_params), 907 ICPU(0x3e, core_params), 908 ICPU(0x3f, core_params), 909 ICPU(0x45, core_params), 910 ICPU(0x46, core_params), 911 ICPU(0x47, core_params), 912 ICPU(0x4c, byt_params), 913 ICPU(0x4e, core_params), 914 ICPU(0x4f, core_params), 915 ICPU(0x5e, core_params), 916 ICPU(0x56, core_params), 917 ICPU(0x57, knl_params), 918 {} 919 }; 920 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 921 922 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 923 ICPU(0x56, core_params), 924 {} 925 }; 926 927 static int intel_pstate_init_cpu(unsigned int cpunum) 928 { 929 struct cpudata *cpu; 930 931 if (!all_cpu_data[cpunum]) 932 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 933 GFP_KERNEL); 934 if (!all_cpu_data[cpunum]) 935 return -ENOMEM; 936 937 cpu = all_cpu_data[cpunum]; 938 939 cpu->cpu = cpunum; 940 941 if (hwp_active) 942 intel_pstate_hwp_enable(cpu); 943 944 intel_pstate_get_cpu_pstates(cpu); 945 946 init_timer_deferrable(&cpu->timer); 947 cpu->timer.data = (unsigned long)cpu; 948 cpu->timer.expires = jiffies + HZ/100; 949 950 if (!hwp_active) 951 cpu->timer.function = intel_pstate_timer_func; 952 else 953 cpu->timer.function = intel_hwp_timer_func; 954 955 intel_pstate_busy_pid_reset(cpu); 956 intel_pstate_sample(cpu); 957 958 add_timer_on(&cpu->timer, cpunum); 959 960 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); 961 962 return 0; 963 } 964 965 static unsigned int intel_pstate_get(unsigned int cpu_num) 966 { 967 struct sample *sample; 968 struct cpudata *cpu; 969 970 cpu = all_cpu_data[cpu_num]; 971 if (!cpu) 972 return 0; 973 sample = &cpu->sample; 974 return sample->freq; 975 } 976 977 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 978 { 979 if (!policy->cpuinfo.max_freq) 980 return -ENODEV; 981 982 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 983 policy->max >= policy->cpuinfo.max_freq) { 984 limits.min_policy_pct = 100; 985 limits.min_perf_pct = 100; 986 limits.min_perf = int_tofp(1); 987 limits.max_policy_pct = 100; 988 limits.max_perf_pct = 100; 989 limits.max_perf = int_tofp(1); 990 limits.no_turbo = 0; 991 return 0; 992 } 993 994 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 995 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); 996 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 997 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 998 999 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1000 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 1001 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct); 1002 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 1003 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct); 1004 1005 /* Make sure min_perf_pct <= max_perf_pct */ 1006 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct); 1007 1008 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 1009 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 1010 1011 if (hwp_active) 1012 intel_pstate_hwp_set(); 1013 1014 return 0; 1015 } 1016 1017 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1018 { 1019 cpufreq_verify_within_cpu_limits(policy); 1020 1021 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1022 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1023 return -EINVAL; 1024 1025 return 0; 1026 } 1027 1028 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1029 { 1030 int cpu_num = policy->cpu; 1031 struct cpudata *cpu = all_cpu_data[cpu_num]; 1032 1033 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); 1034 1035 del_timer_sync(&all_cpu_data[cpu_num]->timer); 1036 if (hwp_active) 1037 return; 1038 1039 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 1040 } 1041 1042 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1043 { 1044 struct cpudata *cpu; 1045 int rc; 1046 1047 rc = intel_pstate_init_cpu(policy->cpu); 1048 if (rc) 1049 return rc; 1050 1051 cpu = all_cpu_data[policy->cpu]; 1052 1053 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 1054 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1055 else 1056 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1057 1058 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1059 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1060 1061 /* cpuinfo and default policy values */ 1062 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1063 policy->cpuinfo.max_freq = 1064 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1065 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1066 cpumask_set_cpu(policy->cpu, policy->cpus); 1067 1068 return 0; 1069 } 1070 1071 static struct cpufreq_driver intel_pstate_driver = { 1072 .flags = CPUFREQ_CONST_LOOPS, 1073 .verify = intel_pstate_verify_policy, 1074 .setpolicy = intel_pstate_set_policy, 1075 .get = intel_pstate_get, 1076 .init = intel_pstate_cpu_init, 1077 .stop_cpu = intel_pstate_stop_cpu, 1078 .name = "intel_pstate", 1079 }; 1080 1081 static int __initdata no_load; 1082 static int __initdata no_hwp; 1083 static int __initdata hwp_only; 1084 static unsigned int force_load; 1085 1086 static int intel_pstate_msrs_not_valid(void) 1087 { 1088 if (!pstate_funcs.get_max() || 1089 !pstate_funcs.get_min() || 1090 !pstate_funcs.get_turbo()) 1091 return -ENODEV; 1092 1093 return 0; 1094 } 1095 1096 static void copy_pid_params(struct pstate_adjust_policy *policy) 1097 { 1098 pid_params.sample_rate_ms = policy->sample_rate_ms; 1099 pid_params.p_gain_pct = policy->p_gain_pct; 1100 pid_params.i_gain_pct = policy->i_gain_pct; 1101 pid_params.d_gain_pct = policy->d_gain_pct; 1102 pid_params.deadband = policy->deadband; 1103 pid_params.setpoint = policy->setpoint; 1104 } 1105 1106 static void copy_cpu_funcs(struct pstate_funcs *funcs) 1107 { 1108 pstate_funcs.get_max = funcs->get_max; 1109 pstate_funcs.get_min = funcs->get_min; 1110 pstate_funcs.get_turbo = funcs->get_turbo; 1111 pstate_funcs.get_scaling = funcs->get_scaling; 1112 pstate_funcs.set = funcs->set; 1113 pstate_funcs.get_vid = funcs->get_vid; 1114 } 1115 1116 #if IS_ENABLED(CONFIG_ACPI) 1117 #include <acpi/processor.h> 1118 1119 static bool intel_pstate_no_acpi_pss(void) 1120 { 1121 int i; 1122 1123 for_each_possible_cpu(i) { 1124 acpi_status status; 1125 union acpi_object *pss; 1126 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1127 struct acpi_processor *pr = per_cpu(processors, i); 1128 1129 if (!pr) 1130 continue; 1131 1132 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1133 if (ACPI_FAILURE(status)) 1134 continue; 1135 1136 pss = buffer.pointer; 1137 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1138 kfree(pss); 1139 return false; 1140 } 1141 1142 kfree(pss); 1143 } 1144 1145 return true; 1146 } 1147 1148 static bool intel_pstate_has_acpi_ppc(void) 1149 { 1150 int i; 1151 1152 for_each_possible_cpu(i) { 1153 struct acpi_processor *pr = per_cpu(processors, i); 1154 1155 if (!pr) 1156 continue; 1157 if (acpi_has_method(pr->handle, "_PPC")) 1158 return true; 1159 } 1160 return false; 1161 } 1162 1163 enum { 1164 PSS, 1165 PPC, 1166 }; 1167 1168 struct hw_vendor_info { 1169 u16 valid; 1170 char oem_id[ACPI_OEM_ID_SIZE]; 1171 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1172 int oem_pwr_table; 1173 }; 1174 1175 /* Hardware vendor-specific info that has its own power management modes */ 1176 static struct hw_vendor_info vendor_info[] = { 1177 {1, "HP ", "ProLiant", PSS}, 1178 {1, "ORACLE", "X4-2 ", PPC}, 1179 {1, "ORACLE", "X4-2L ", PPC}, 1180 {1, "ORACLE", "X4-2B ", PPC}, 1181 {1, "ORACLE", "X3-2 ", PPC}, 1182 {1, "ORACLE", "X3-2L ", PPC}, 1183 {1, "ORACLE", "X3-2B ", PPC}, 1184 {1, "ORACLE", "X4470M2 ", PPC}, 1185 {1, "ORACLE", "X4270M3 ", PPC}, 1186 {1, "ORACLE", "X4270M2 ", PPC}, 1187 {1, "ORACLE", "X4170M2 ", PPC}, 1188 {1, "ORACLE", "X4170 M3", PPC}, 1189 {1, "ORACLE", "X4275 M3", PPC}, 1190 {1, "ORACLE", "X6-2 ", PPC}, 1191 {1, "ORACLE", "Sudbury ", PPC}, 1192 {0, "", ""}, 1193 }; 1194 1195 static bool intel_pstate_platform_pwr_mgmt_exists(void) 1196 { 1197 struct acpi_table_header hdr; 1198 struct hw_vendor_info *v_info; 1199 const struct x86_cpu_id *id; 1200 u64 misc_pwr; 1201 1202 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1203 if (id) { 1204 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1205 if ( misc_pwr & (1 << 8)) 1206 return true; 1207 } 1208 1209 if (acpi_disabled || 1210 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1211 return false; 1212 1213 for (v_info = vendor_info; v_info->valid; v_info++) { 1214 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1215 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1216 ACPI_OEM_TABLE_ID_SIZE)) 1217 switch (v_info->oem_pwr_table) { 1218 case PSS: 1219 return intel_pstate_no_acpi_pss(); 1220 case PPC: 1221 return intel_pstate_has_acpi_ppc() && 1222 (!force_load); 1223 } 1224 } 1225 1226 return false; 1227 } 1228 #else /* CONFIG_ACPI not enabled */ 1229 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1230 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1231 #endif /* CONFIG_ACPI */ 1232 1233 static int __init intel_pstate_init(void) 1234 { 1235 int cpu, rc = 0; 1236 const struct x86_cpu_id *id; 1237 struct cpu_defaults *cpu_def; 1238 1239 if (no_load) 1240 return -ENODEV; 1241 1242 id = x86_match_cpu(intel_pstate_cpu_ids); 1243 if (!id) 1244 return -ENODEV; 1245 1246 /* 1247 * The Intel pstate driver will be ignored if the platform 1248 * firmware has its own power management modes. 1249 */ 1250 if (intel_pstate_platform_pwr_mgmt_exists()) 1251 return -ENODEV; 1252 1253 cpu_def = (struct cpu_defaults *)id->driver_data; 1254 1255 copy_pid_params(&cpu_def->pid_policy); 1256 copy_cpu_funcs(&cpu_def->funcs); 1257 1258 if (intel_pstate_msrs_not_valid()) 1259 return -ENODEV; 1260 1261 pr_info("Intel P-state driver initializing.\n"); 1262 1263 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1264 if (!all_cpu_data) 1265 return -ENOMEM; 1266 1267 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) 1268 hwp_active++; 1269 1270 if (!hwp_active && hwp_only) 1271 goto out; 1272 1273 rc = cpufreq_register_driver(&intel_pstate_driver); 1274 if (rc) 1275 goto out; 1276 1277 intel_pstate_debug_expose_params(); 1278 intel_pstate_sysfs_expose_params(); 1279 1280 return rc; 1281 out: 1282 get_online_cpus(); 1283 for_each_online_cpu(cpu) { 1284 if (all_cpu_data[cpu]) { 1285 del_timer_sync(&all_cpu_data[cpu]->timer); 1286 kfree(all_cpu_data[cpu]); 1287 } 1288 } 1289 1290 put_online_cpus(); 1291 vfree(all_cpu_data); 1292 return -ENODEV; 1293 } 1294 device_initcall(intel_pstate_init); 1295 1296 static int __init intel_pstate_setup(char *str) 1297 { 1298 if (!str) 1299 return -EINVAL; 1300 1301 if (!strcmp(str, "disable")) 1302 no_load = 1; 1303 if (!strcmp(str, "no_hwp")) 1304 no_hwp = 1; 1305 if (!strcmp(str, "force")) 1306 force_load = 1; 1307 if (!strcmp(str, "hwp_only")) 1308 hwp_only = 1; 1309 return 0; 1310 } 1311 early_param("intel_pstate", intel_pstate_setup); 1312 1313 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1314 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1315 MODULE_LICENSE("GPL"); 1316