1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/module.h> 16 #include <linux/ktime.h> 17 #include <linux/hrtimer.h> 18 #include <linux/tick.h> 19 #include <linux/slab.h> 20 #include <linux/sched.h> 21 #include <linux/list.h> 22 #include <linux/cpu.h> 23 #include <linux/cpufreq.h> 24 #include <linux/sysfs.h> 25 #include <linux/types.h> 26 #include <linux/fs.h> 27 #include <linux/debugfs.h> 28 #include <linux/acpi.h> 29 #include <trace/events/power.h> 30 31 #include <asm/div64.h> 32 #include <asm/msr.h> 33 #include <asm/cpu_device_id.h> 34 #include <asm/cpufeature.h> 35 36 #define BYT_RATIOS 0x66a 37 #define BYT_VIDS 0x66b 38 #define BYT_TURBO_RATIOS 0x66c 39 #define BYT_TURBO_VIDS 0x66d 40 41 #define FRAC_BITS 8 42 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 43 #define fp_toint(X) ((X) >> FRAC_BITS) 44 45 46 static inline int32_t mul_fp(int32_t x, int32_t y) 47 { 48 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 49 } 50 51 static inline int32_t div_fp(int32_t x, int32_t y) 52 { 53 return div_s64((int64_t)x << FRAC_BITS, y); 54 } 55 56 static inline int ceiling_fp(int32_t x) 57 { 58 int mask, ret; 59 60 ret = fp_toint(x); 61 mask = (1 << FRAC_BITS) - 1; 62 if (x & mask) 63 ret += 1; 64 return ret; 65 } 66 67 struct sample { 68 int32_t core_pct_busy; 69 u64 aperf; 70 u64 mperf; 71 u64 tsc; 72 int freq; 73 ktime_t time; 74 }; 75 76 struct pstate_data { 77 int current_pstate; 78 int min_pstate; 79 int max_pstate; 80 int scaling; 81 int turbo_pstate; 82 }; 83 84 struct vid_data { 85 int min; 86 int max; 87 int turbo; 88 int32_t ratio; 89 }; 90 91 struct _pid { 92 int setpoint; 93 int32_t integral; 94 int32_t p_gain; 95 int32_t i_gain; 96 int32_t d_gain; 97 int deadband; 98 int32_t last_err; 99 }; 100 101 struct cpudata { 102 int cpu; 103 104 struct timer_list timer; 105 106 struct pstate_data pstate; 107 struct vid_data vid; 108 struct _pid pid; 109 110 ktime_t last_sample_time; 111 u64 prev_aperf; 112 u64 prev_mperf; 113 u64 prev_tsc; 114 struct sample sample; 115 }; 116 117 static struct cpudata **all_cpu_data; 118 struct pstate_adjust_policy { 119 int sample_rate_ms; 120 int deadband; 121 int setpoint; 122 int p_gain_pct; 123 int d_gain_pct; 124 int i_gain_pct; 125 }; 126 127 struct pstate_funcs { 128 int (*get_max)(void); 129 int (*get_min)(void); 130 int (*get_turbo)(void); 131 int (*get_scaling)(void); 132 void (*set)(struct cpudata*, int pstate); 133 void (*get_vid)(struct cpudata *); 134 }; 135 136 struct cpu_defaults { 137 struct pstate_adjust_policy pid_policy; 138 struct pstate_funcs funcs; 139 }; 140 141 static struct pstate_adjust_policy pid_params; 142 static struct pstate_funcs pstate_funcs; 143 static int hwp_active; 144 145 struct perf_limits { 146 int no_turbo; 147 int turbo_disabled; 148 int max_perf_pct; 149 int min_perf_pct; 150 int32_t max_perf; 151 int32_t min_perf; 152 int max_policy_pct; 153 int max_sysfs_pct; 154 int min_policy_pct; 155 int min_sysfs_pct; 156 }; 157 158 static struct perf_limits limits = { 159 .no_turbo = 0, 160 .turbo_disabled = 0, 161 .max_perf_pct = 100, 162 .max_perf = int_tofp(1), 163 .min_perf_pct = 0, 164 .min_perf = 0, 165 .max_policy_pct = 100, 166 .max_sysfs_pct = 100, 167 .min_policy_pct = 0, 168 .min_sysfs_pct = 0, 169 }; 170 171 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 172 int deadband, int integral) { 173 pid->setpoint = setpoint; 174 pid->deadband = deadband; 175 pid->integral = int_tofp(integral); 176 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 177 } 178 179 static inline void pid_p_gain_set(struct _pid *pid, int percent) 180 { 181 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 182 } 183 184 static inline void pid_i_gain_set(struct _pid *pid, int percent) 185 { 186 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 187 } 188 189 static inline void pid_d_gain_set(struct _pid *pid, int percent) 190 { 191 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 192 } 193 194 static signed int pid_calc(struct _pid *pid, int32_t busy) 195 { 196 signed int result; 197 int32_t pterm, dterm, fp_error; 198 int32_t integral_limit; 199 200 fp_error = int_tofp(pid->setpoint) - busy; 201 202 if (abs(fp_error) <= int_tofp(pid->deadband)) 203 return 0; 204 205 pterm = mul_fp(pid->p_gain, fp_error); 206 207 pid->integral += fp_error; 208 209 /* 210 * We limit the integral here so that it will never 211 * get higher than 30. This prevents it from becoming 212 * too large an input over long periods of time and allows 213 * it to get factored out sooner. 214 * 215 * The value of 30 was chosen through experimentation. 216 */ 217 integral_limit = int_tofp(30); 218 if (pid->integral > integral_limit) 219 pid->integral = integral_limit; 220 if (pid->integral < -integral_limit) 221 pid->integral = -integral_limit; 222 223 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 224 pid->last_err = fp_error; 225 226 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 227 result = result + (1 << (FRAC_BITS-1)); 228 return (signed int)fp_toint(result); 229 } 230 231 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 232 { 233 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 234 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 235 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 236 237 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 238 } 239 240 static inline void intel_pstate_reset_all_pid(void) 241 { 242 unsigned int cpu; 243 244 for_each_online_cpu(cpu) { 245 if (all_cpu_data[cpu]) 246 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 247 } 248 } 249 250 static inline void update_turbo_state(void) 251 { 252 u64 misc_en; 253 struct cpudata *cpu; 254 255 cpu = all_cpu_data[0]; 256 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 257 limits.turbo_disabled = 258 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 259 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 260 } 261 262 #define PCT_TO_HWP(x) (x * 255 / 100) 263 static void intel_pstate_hwp_set(void) 264 { 265 int min, max, cpu; 266 u64 value, freq; 267 268 get_online_cpus(); 269 270 for_each_online_cpu(cpu) { 271 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 272 min = PCT_TO_HWP(limits.min_perf_pct); 273 value &= ~HWP_MIN_PERF(~0L); 274 value |= HWP_MIN_PERF(min); 275 276 max = PCT_TO_HWP(limits.max_perf_pct); 277 if (limits.no_turbo) { 278 rdmsrl( MSR_HWP_CAPABILITIES, freq); 279 max = HWP_GUARANTEED_PERF(freq); 280 } 281 282 value &= ~HWP_MAX_PERF(~0L); 283 value |= HWP_MAX_PERF(max); 284 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 285 } 286 287 put_online_cpus(); 288 } 289 290 /************************** debugfs begin ************************/ 291 static int pid_param_set(void *data, u64 val) 292 { 293 *(u32 *)data = val; 294 intel_pstate_reset_all_pid(); 295 return 0; 296 } 297 298 static int pid_param_get(void *data, u64 *val) 299 { 300 *val = *(u32 *)data; 301 return 0; 302 } 303 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 304 305 struct pid_param { 306 char *name; 307 void *value; 308 }; 309 310 static struct pid_param pid_files[] = { 311 {"sample_rate_ms", &pid_params.sample_rate_ms}, 312 {"d_gain_pct", &pid_params.d_gain_pct}, 313 {"i_gain_pct", &pid_params.i_gain_pct}, 314 {"deadband", &pid_params.deadband}, 315 {"setpoint", &pid_params.setpoint}, 316 {"p_gain_pct", &pid_params.p_gain_pct}, 317 {NULL, NULL} 318 }; 319 320 static void __init intel_pstate_debug_expose_params(void) 321 { 322 struct dentry *debugfs_parent; 323 int i = 0; 324 325 if (hwp_active) 326 return; 327 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 328 if (IS_ERR_OR_NULL(debugfs_parent)) 329 return; 330 while (pid_files[i].name) { 331 debugfs_create_file(pid_files[i].name, 0660, 332 debugfs_parent, pid_files[i].value, 333 &fops_pid_param); 334 i++; 335 } 336 } 337 338 /************************** debugfs end ************************/ 339 340 /************************** sysfs begin ************************/ 341 #define show_one(file_name, object) \ 342 static ssize_t show_##file_name \ 343 (struct kobject *kobj, struct attribute *attr, char *buf) \ 344 { \ 345 return sprintf(buf, "%u\n", limits.object); \ 346 } 347 348 static ssize_t show_turbo_pct(struct kobject *kobj, 349 struct attribute *attr, char *buf) 350 { 351 struct cpudata *cpu; 352 int total, no_turbo, turbo_pct; 353 uint32_t turbo_fp; 354 355 cpu = all_cpu_data[0]; 356 357 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 358 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 359 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total)); 360 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 361 return sprintf(buf, "%u\n", turbo_pct); 362 } 363 364 static ssize_t show_num_pstates(struct kobject *kobj, 365 struct attribute *attr, char *buf) 366 { 367 struct cpudata *cpu; 368 int total; 369 370 cpu = all_cpu_data[0]; 371 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 372 return sprintf(buf, "%u\n", total); 373 } 374 375 static ssize_t show_no_turbo(struct kobject *kobj, 376 struct attribute *attr, char *buf) 377 { 378 ssize_t ret; 379 380 update_turbo_state(); 381 if (limits.turbo_disabled) 382 ret = sprintf(buf, "%u\n", limits.turbo_disabled); 383 else 384 ret = sprintf(buf, "%u\n", limits.no_turbo); 385 386 return ret; 387 } 388 389 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 390 const char *buf, size_t count) 391 { 392 unsigned int input; 393 int ret; 394 395 ret = sscanf(buf, "%u", &input); 396 if (ret != 1) 397 return -EINVAL; 398 399 update_turbo_state(); 400 if (limits.turbo_disabled) { 401 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); 402 return -EPERM; 403 } 404 405 limits.no_turbo = clamp_t(int, input, 0, 1); 406 407 if (hwp_active) 408 intel_pstate_hwp_set(); 409 410 return count; 411 } 412 413 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 414 const char *buf, size_t count) 415 { 416 unsigned int input; 417 int ret; 418 419 ret = sscanf(buf, "%u", &input); 420 if (ret != 1) 421 return -EINVAL; 422 423 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); 424 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 425 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 426 427 if (hwp_active) 428 intel_pstate_hwp_set(); 429 return count; 430 } 431 432 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 433 const char *buf, size_t count) 434 { 435 unsigned int input; 436 int ret; 437 438 ret = sscanf(buf, "%u", &input); 439 if (ret != 1) 440 return -EINVAL; 441 442 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); 443 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 444 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 445 446 if (hwp_active) 447 intel_pstate_hwp_set(); 448 return count; 449 } 450 451 show_one(max_perf_pct, max_perf_pct); 452 show_one(min_perf_pct, min_perf_pct); 453 454 define_one_global_rw(no_turbo); 455 define_one_global_rw(max_perf_pct); 456 define_one_global_rw(min_perf_pct); 457 define_one_global_ro(turbo_pct); 458 define_one_global_ro(num_pstates); 459 460 static struct attribute *intel_pstate_attributes[] = { 461 &no_turbo.attr, 462 &max_perf_pct.attr, 463 &min_perf_pct.attr, 464 &turbo_pct.attr, 465 &num_pstates.attr, 466 NULL 467 }; 468 469 static struct attribute_group intel_pstate_attr_group = { 470 .attrs = intel_pstate_attributes, 471 }; 472 473 static void __init intel_pstate_sysfs_expose_params(void) 474 { 475 struct kobject *intel_pstate_kobject; 476 int rc; 477 478 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 479 &cpu_subsys.dev_root->kobj); 480 BUG_ON(!intel_pstate_kobject); 481 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 482 BUG_ON(rc); 483 } 484 /************************** sysfs end ************************/ 485 486 static void intel_pstate_hwp_enable(void) 487 { 488 hwp_active++; 489 pr_info("intel_pstate: HWP enabled\n"); 490 491 wrmsrl( MSR_PM_ENABLE, 0x1); 492 } 493 494 static int byt_get_min_pstate(void) 495 { 496 u64 value; 497 498 rdmsrl(BYT_RATIOS, value); 499 return (value >> 8) & 0x7F; 500 } 501 502 static int byt_get_max_pstate(void) 503 { 504 u64 value; 505 506 rdmsrl(BYT_RATIOS, value); 507 return (value >> 16) & 0x7F; 508 } 509 510 static int byt_get_turbo_pstate(void) 511 { 512 u64 value; 513 514 rdmsrl(BYT_TURBO_RATIOS, value); 515 return value & 0x7F; 516 } 517 518 static void byt_set_pstate(struct cpudata *cpudata, int pstate) 519 { 520 u64 val; 521 int32_t vid_fp; 522 u32 vid; 523 524 val = pstate << 8; 525 if (limits.no_turbo && !limits.turbo_disabled) 526 val |= (u64)1 << 32; 527 528 vid_fp = cpudata->vid.min + mul_fp( 529 int_tofp(pstate - cpudata->pstate.min_pstate), 530 cpudata->vid.ratio); 531 532 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 533 vid = ceiling_fp(vid_fp); 534 535 if (pstate > cpudata->pstate.max_pstate) 536 vid = cpudata->vid.turbo; 537 538 val |= vid; 539 540 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 541 } 542 543 #define BYT_BCLK_FREQS 5 544 static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; 545 546 static int byt_get_scaling(void) 547 { 548 u64 value; 549 int i; 550 551 rdmsrl(MSR_FSB_FREQ, value); 552 i = value & 0x3; 553 554 BUG_ON(i > BYT_BCLK_FREQS); 555 556 return byt_freq_table[i] * 100; 557 } 558 559 static void byt_get_vid(struct cpudata *cpudata) 560 { 561 u64 value; 562 563 rdmsrl(BYT_VIDS, value); 564 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 565 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 566 cpudata->vid.ratio = div_fp( 567 cpudata->vid.max - cpudata->vid.min, 568 int_tofp(cpudata->pstate.max_pstate - 569 cpudata->pstate.min_pstate)); 570 571 rdmsrl(BYT_TURBO_VIDS, value); 572 cpudata->vid.turbo = value & 0x7f; 573 } 574 575 static int core_get_min_pstate(void) 576 { 577 u64 value; 578 579 rdmsrl(MSR_PLATFORM_INFO, value); 580 return (value >> 40) & 0xFF; 581 } 582 583 static int core_get_max_pstate(void) 584 { 585 u64 value; 586 587 rdmsrl(MSR_PLATFORM_INFO, value); 588 return (value >> 8) & 0xFF; 589 } 590 591 static int core_get_turbo_pstate(void) 592 { 593 u64 value; 594 int nont, ret; 595 596 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 597 nont = core_get_max_pstate(); 598 ret = (value) & 255; 599 if (ret <= nont) 600 ret = nont; 601 return ret; 602 } 603 604 static inline int core_get_scaling(void) 605 { 606 return 100000; 607 } 608 609 static void core_set_pstate(struct cpudata *cpudata, int pstate) 610 { 611 u64 val; 612 613 val = pstate << 8; 614 if (limits.no_turbo && !limits.turbo_disabled) 615 val |= (u64)1 << 32; 616 617 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 618 } 619 620 static int knl_get_turbo_pstate(void) 621 { 622 u64 value; 623 int nont, ret; 624 625 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 626 nont = core_get_max_pstate(); 627 ret = (((value) >> 8) & 0xFF); 628 if (ret <= nont) 629 ret = nont; 630 return ret; 631 } 632 633 static struct cpu_defaults core_params = { 634 .pid_policy = { 635 .sample_rate_ms = 10, 636 .deadband = 0, 637 .setpoint = 97, 638 .p_gain_pct = 20, 639 .d_gain_pct = 0, 640 .i_gain_pct = 0, 641 }, 642 .funcs = { 643 .get_max = core_get_max_pstate, 644 .get_min = core_get_min_pstate, 645 .get_turbo = core_get_turbo_pstate, 646 .get_scaling = core_get_scaling, 647 .set = core_set_pstate, 648 }, 649 }; 650 651 static struct cpu_defaults byt_params = { 652 .pid_policy = { 653 .sample_rate_ms = 10, 654 .deadband = 0, 655 .setpoint = 60, 656 .p_gain_pct = 14, 657 .d_gain_pct = 0, 658 .i_gain_pct = 4, 659 }, 660 .funcs = { 661 .get_max = byt_get_max_pstate, 662 .get_min = byt_get_min_pstate, 663 .get_turbo = byt_get_turbo_pstate, 664 .set = byt_set_pstate, 665 .get_scaling = byt_get_scaling, 666 .get_vid = byt_get_vid, 667 }, 668 }; 669 670 static struct cpu_defaults knl_params = { 671 .pid_policy = { 672 .sample_rate_ms = 10, 673 .deadband = 0, 674 .setpoint = 97, 675 .p_gain_pct = 20, 676 .d_gain_pct = 0, 677 .i_gain_pct = 0, 678 }, 679 .funcs = { 680 .get_max = core_get_max_pstate, 681 .get_min = core_get_min_pstate, 682 .get_turbo = knl_get_turbo_pstate, 683 .set = core_set_pstate, 684 }, 685 }; 686 687 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 688 { 689 int max_perf = cpu->pstate.turbo_pstate; 690 int max_perf_adj; 691 int min_perf; 692 693 if (limits.no_turbo || limits.turbo_disabled) 694 max_perf = cpu->pstate.max_pstate; 695 696 /* 697 * performance can be limited by user through sysfs, by cpufreq 698 * policy, or by cpu specific default values determined through 699 * experimentation. 700 */ 701 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 702 *max = clamp_t(int, max_perf_adj, 703 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 704 705 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 706 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 707 } 708 709 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) 710 { 711 int max_perf, min_perf; 712 713 if (force) { 714 update_turbo_state(); 715 716 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 717 718 pstate = clamp_t(int, pstate, min_perf, max_perf); 719 720 if (pstate == cpu->pstate.current_pstate) 721 return; 722 } 723 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 724 725 cpu->pstate.current_pstate = pstate; 726 727 pstate_funcs.set(cpu, pstate); 728 } 729 730 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 731 { 732 cpu->pstate.min_pstate = pstate_funcs.get_min(); 733 cpu->pstate.max_pstate = pstate_funcs.get_max(); 734 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 735 cpu->pstate.scaling = pstate_funcs.get_scaling(); 736 737 if (pstate_funcs.get_vid) 738 pstate_funcs.get_vid(cpu); 739 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 740 } 741 742 static inline void intel_pstate_calc_busy(struct cpudata *cpu) 743 { 744 struct sample *sample = &cpu->sample; 745 int64_t core_pct; 746 747 core_pct = int_tofp(sample->aperf) * int_tofp(100); 748 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 749 750 sample->freq = fp_toint( 751 mul_fp(int_tofp( 752 cpu->pstate.max_pstate * cpu->pstate.scaling / 100), 753 core_pct)); 754 755 sample->core_pct_busy = (int32_t)core_pct; 756 } 757 758 static inline void intel_pstate_sample(struct cpudata *cpu) 759 { 760 u64 aperf, mperf; 761 unsigned long flags; 762 u64 tsc; 763 764 local_irq_save(flags); 765 rdmsrl(MSR_IA32_APERF, aperf); 766 rdmsrl(MSR_IA32_MPERF, mperf); 767 tsc = native_read_tsc(); 768 local_irq_restore(flags); 769 770 cpu->last_sample_time = cpu->sample.time; 771 cpu->sample.time = ktime_get(); 772 cpu->sample.aperf = aperf; 773 cpu->sample.mperf = mperf; 774 cpu->sample.tsc = tsc; 775 cpu->sample.aperf -= cpu->prev_aperf; 776 cpu->sample.mperf -= cpu->prev_mperf; 777 cpu->sample.tsc -= cpu->prev_tsc; 778 779 intel_pstate_calc_busy(cpu); 780 781 cpu->prev_aperf = aperf; 782 cpu->prev_mperf = mperf; 783 cpu->prev_tsc = tsc; 784 } 785 786 static inline void intel_hwp_set_sample_time(struct cpudata *cpu) 787 { 788 int delay; 789 790 delay = msecs_to_jiffies(50); 791 mod_timer_pinned(&cpu->timer, jiffies + delay); 792 } 793 794 static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 795 { 796 int delay; 797 798 delay = msecs_to_jiffies(pid_params.sample_rate_ms); 799 mod_timer_pinned(&cpu->timer, jiffies + delay); 800 } 801 802 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) 803 { 804 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 805 u32 duration_us; 806 u32 sample_time; 807 808 /* 809 * core_busy is the ratio of actual performance to max 810 * max_pstate is the max non turbo pstate available 811 * current_pstate was the pstate that was requested during 812 * the last sample period. 813 * 814 * We normalize core_busy, which was our actual percent 815 * performance to what we requested during the last sample 816 * period. The result will be a percentage of busy at a 817 * specified pstate. 818 */ 819 core_busy = cpu->sample.core_pct_busy; 820 max_pstate = int_tofp(cpu->pstate.max_pstate); 821 current_pstate = int_tofp(cpu->pstate.current_pstate); 822 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 823 824 /* 825 * Since we have a deferred timer, it will not fire unless 826 * we are in C0. So, determine if the actual elapsed time 827 * is significantly greater (3x) than our sample interval. If it 828 * is, then we were idle for a long enough period of time 829 * to adjust our busyness. 830 */ 831 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; 832 duration_us = (u32) ktime_us_delta(cpu->sample.time, 833 cpu->last_sample_time); 834 if (duration_us > sample_time * 3) { 835 sample_ratio = div_fp(int_tofp(sample_time), 836 int_tofp(duration_us)); 837 core_busy = mul_fp(core_busy, sample_ratio); 838 } 839 840 return core_busy; 841 } 842 843 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 844 { 845 int32_t busy_scaled; 846 struct _pid *pid; 847 signed int ctl; 848 int from; 849 struct sample *sample; 850 851 from = cpu->pstate.current_pstate; 852 853 pid = &cpu->pid; 854 busy_scaled = intel_pstate_get_scaled_busy(cpu); 855 856 ctl = pid_calc(pid, busy_scaled); 857 858 /* Negative values of ctl increase the pstate and vice versa */ 859 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true); 860 861 sample = &cpu->sample; 862 trace_pstate_sample(fp_toint(sample->core_pct_busy), 863 fp_toint(busy_scaled), 864 from, 865 cpu->pstate.current_pstate, 866 sample->mperf, 867 sample->aperf, 868 sample->tsc, 869 sample->freq); 870 } 871 872 static void intel_hwp_timer_func(unsigned long __data) 873 { 874 struct cpudata *cpu = (struct cpudata *) __data; 875 876 intel_pstate_sample(cpu); 877 intel_hwp_set_sample_time(cpu); 878 } 879 880 static void intel_pstate_timer_func(unsigned long __data) 881 { 882 struct cpudata *cpu = (struct cpudata *) __data; 883 884 intel_pstate_sample(cpu); 885 886 intel_pstate_adjust_busy_pstate(cpu); 887 888 intel_pstate_set_sample_time(cpu); 889 } 890 891 #define ICPU(model, policy) \ 892 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 893 (unsigned long)&policy } 894 895 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 896 ICPU(0x2a, core_params), 897 ICPU(0x2d, core_params), 898 ICPU(0x37, byt_params), 899 ICPU(0x3a, core_params), 900 ICPU(0x3c, core_params), 901 ICPU(0x3d, core_params), 902 ICPU(0x3e, core_params), 903 ICPU(0x3f, core_params), 904 ICPU(0x45, core_params), 905 ICPU(0x46, core_params), 906 ICPU(0x47, core_params), 907 ICPU(0x4c, byt_params), 908 ICPU(0x4e, core_params), 909 ICPU(0x4f, core_params), 910 ICPU(0x56, core_params), 911 ICPU(0x57, knl_params), 912 {} 913 }; 914 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 915 916 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 917 ICPU(0x56, core_params), 918 {} 919 }; 920 921 static int intel_pstate_init_cpu(unsigned int cpunum) 922 { 923 struct cpudata *cpu; 924 925 if (!all_cpu_data[cpunum]) 926 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 927 GFP_KERNEL); 928 if (!all_cpu_data[cpunum]) 929 return -ENOMEM; 930 931 cpu = all_cpu_data[cpunum]; 932 933 cpu->cpu = cpunum; 934 intel_pstate_get_cpu_pstates(cpu); 935 936 init_timer_deferrable(&cpu->timer); 937 cpu->timer.data = (unsigned long)cpu; 938 cpu->timer.expires = jiffies + HZ/100; 939 940 if (!hwp_active) 941 cpu->timer.function = intel_pstate_timer_func; 942 else 943 cpu->timer.function = intel_hwp_timer_func; 944 945 intel_pstate_busy_pid_reset(cpu); 946 intel_pstate_sample(cpu); 947 948 add_timer_on(&cpu->timer, cpunum); 949 950 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); 951 952 return 0; 953 } 954 955 static unsigned int intel_pstate_get(unsigned int cpu_num) 956 { 957 struct sample *sample; 958 struct cpudata *cpu; 959 960 cpu = all_cpu_data[cpu_num]; 961 if (!cpu) 962 return 0; 963 sample = &cpu->sample; 964 return sample->freq; 965 } 966 967 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 968 { 969 if (!policy->cpuinfo.max_freq) 970 return -ENODEV; 971 972 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 973 policy->max >= policy->cpuinfo.max_freq) { 974 limits.min_policy_pct = 100; 975 limits.min_perf_pct = 100; 976 limits.min_perf = int_tofp(1); 977 limits.max_policy_pct = 100; 978 limits.max_perf_pct = 100; 979 limits.max_perf = int_tofp(1); 980 limits.no_turbo = 0; 981 return 0; 982 } 983 984 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 985 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); 986 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 987 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 988 989 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 990 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 991 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 992 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 993 994 if (hwp_active) 995 intel_pstate_hwp_set(); 996 997 return 0; 998 } 999 1000 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1001 { 1002 cpufreq_verify_within_cpu_limits(policy); 1003 1004 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1005 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1006 return -EINVAL; 1007 1008 return 0; 1009 } 1010 1011 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1012 { 1013 int cpu_num = policy->cpu; 1014 struct cpudata *cpu = all_cpu_data[cpu_num]; 1015 1016 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); 1017 1018 del_timer_sync(&all_cpu_data[cpu_num]->timer); 1019 if (hwp_active) 1020 return; 1021 1022 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 1023 } 1024 1025 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1026 { 1027 struct cpudata *cpu; 1028 int rc; 1029 1030 rc = intel_pstate_init_cpu(policy->cpu); 1031 if (rc) 1032 return rc; 1033 1034 cpu = all_cpu_data[policy->cpu]; 1035 1036 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 1037 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1038 else 1039 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1040 1041 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1042 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1043 1044 /* cpuinfo and default policy values */ 1045 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1046 policy->cpuinfo.max_freq = 1047 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1048 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1049 cpumask_set_cpu(policy->cpu, policy->cpus); 1050 1051 return 0; 1052 } 1053 1054 static struct cpufreq_driver intel_pstate_driver = { 1055 .flags = CPUFREQ_CONST_LOOPS, 1056 .verify = intel_pstate_verify_policy, 1057 .setpolicy = intel_pstate_set_policy, 1058 .get = intel_pstate_get, 1059 .init = intel_pstate_cpu_init, 1060 .stop_cpu = intel_pstate_stop_cpu, 1061 .name = "intel_pstate", 1062 }; 1063 1064 static int __initdata no_load; 1065 static int __initdata no_hwp; 1066 static int __initdata hwp_only; 1067 static unsigned int force_load; 1068 1069 static int intel_pstate_msrs_not_valid(void) 1070 { 1071 if (!pstate_funcs.get_max() || 1072 !pstate_funcs.get_min() || 1073 !pstate_funcs.get_turbo()) 1074 return -ENODEV; 1075 1076 return 0; 1077 } 1078 1079 static void copy_pid_params(struct pstate_adjust_policy *policy) 1080 { 1081 pid_params.sample_rate_ms = policy->sample_rate_ms; 1082 pid_params.p_gain_pct = policy->p_gain_pct; 1083 pid_params.i_gain_pct = policy->i_gain_pct; 1084 pid_params.d_gain_pct = policy->d_gain_pct; 1085 pid_params.deadband = policy->deadband; 1086 pid_params.setpoint = policy->setpoint; 1087 } 1088 1089 static void copy_cpu_funcs(struct pstate_funcs *funcs) 1090 { 1091 pstate_funcs.get_max = funcs->get_max; 1092 pstate_funcs.get_min = funcs->get_min; 1093 pstate_funcs.get_turbo = funcs->get_turbo; 1094 pstate_funcs.get_scaling = funcs->get_scaling; 1095 pstate_funcs.set = funcs->set; 1096 pstate_funcs.get_vid = funcs->get_vid; 1097 } 1098 1099 #if IS_ENABLED(CONFIG_ACPI) 1100 #include <acpi/processor.h> 1101 1102 static bool intel_pstate_no_acpi_pss(void) 1103 { 1104 int i; 1105 1106 for_each_possible_cpu(i) { 1107 acpi_status status; 1108 union acpi_object *pss; 1109 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1110 struct acpi_processor *pr = per_cpu(processors, i); 1111 1112 if (!pr) 1113 continue; 1114 1115 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1116 if (ACPI_FAILURE(status)) 1117 continue; 1118 1119 pss = buffer.pointer; 1120 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1121 kfree(pss); 1122 return false; 1123 } 1124 1125 kfree(pss); 1126 } 1127 1128 return true; 1129 } 1130 1131 static bool intel_pstate_has_acpi_ppc(void) 1132 { 1133 int i; 1134 1135 for_each_possible_cpu(i) { 1136 struct acpi_processor *pr = per_cpu(processors, i); 1137 1138 if (!pr) 1139 continue; 1140 if (acpi_has_method(pr->handle, "_PPC")) 1141 return true; 1142 } 1143 return false; 1144 } 1145 1146 enum { 1147 PSS, 1148 PPC, 1149 }; 1150 1151 struct hw_vendor_info { 1152 u16 valid; 1153 char oem_id[ACPI_OEM_ID_SIZE]; 1154 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1155 int oem_pwr_table; 1156 }; 1157 1158 /* Hardware vendor-specific info that has its own power management modes */ 1159 static struct hw_vendor_info vendor_info[] = { 1160 {1, "HP ", "ProLiant", PSS}, 1161 {1, "ORACLE", "X4-2 ", PPC}, 1162 {1, "ORACLE", "X4-2L ", PPC}, 1163 {1, "ORACLE", "X4-2B ", PPC}, 1164 {1, "ORACLE", "X3-2 ", PPC}, 1165 {1, "ORACLE", "X3-2L ", PPC}, 1166 {1, "ORACLE", "X3-2B ", PPC}, 1167 {1, "ORACLE", "X4470M2 ", PPC}, 1168 {1, "ORACLE", "X4270M3 ", PPC}, 1169 {1, "ORACLE", "X4270M2 ", PPC}, 1170 {1, "ORACLE", "X4170M2 ", PPC}, 1171 {0, "", ""}, 1172 }; 1173 1174 static bool intel_pstate_platform_pwr_mgmt_exists(void) 1175 { 1176 struct acpi_table_header hdr; 1177 struct hw_vendor_info *v_info; 1178 const struct x86_cpu_id *id; 1179 u64 misc_pwr; 1180 1181 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1182 if (id) { 1183 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1184 if ( misc_pwr & (1 << 8)) 1185 return true; 1186 } 1187 1188 if (acpi_disabled || 1189 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1190 return false; 1191 1192 for (v_info = vendor_info; v_info->valid; v_info++) { 1193 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1194 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1195 ACPI_OEM_TABLE_ID_SIZE)) 1196 switch (v_info->oem_pwr_table) { 1197 case PSS: 1198 return intel_pstate_no_acpi_pss(); 1199 case PPC: 1200 return intel_pstate_has_acpi_ppc() && 1201 (!force_load); 1202 } 1203 } 1204 1205 return false; 1206 } 1207 #else /* CONFIG_ACPI not enabled */ 1208 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1209 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1210 #endif /* CONFIG_ACPI */ 1211 1212 static int __init intel_pstate_init(void) 1213 { 1214 int cpu, rc = 0; 1215 const struct x86_cpu_id *id; 1216 struct cpu_defaults *cpu_def; 1217 1218 if (no_load) 1219 return -ENODEV; 1220 1221 id = x86_match_cpu(intel_pstate_cpu_ids); 1222 if (!id) 1223 return -ENODEV; 1224 1225 /* 1226 * The Intel pstate driver will be ignored if the platform 1227 * firmware has its own power management modes. 1228 */ 1229 if (intel_pstate_platform_pwr_mgmt_exists()) 1230 return -ENODEV; 1231 1232 cpu_def = (struct cpu_defaults *)id->driver_data; 1233 1234 copy_pid_params(&cpu_def->pid_policy); 1235 copy_cpu_funcs(&cpu_def->funcs); 1236 1237 if (intel_pstate_msrs_not_valid()) 1238 return -ENODEV; 1239 1240 pr_info("Intel P-state driver initializing.\n"); 1241 1242 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1243 if (!all_cpu_data) 1244 return -ENOMEM; 1245 1246 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) 1247 intel_pstate_hwp_enable(); 1248 1249 if (!hwp_active && hwp_only) 1250 goto out; 1251 1252 rc = cpufreq_register_driver(&intel_pstate_driver); 1253 if (rc) 1254 goto out; 1255 1256 intel_pstate_debug_expose_params(); 1257 intel_pstate_sysfs_expose_params(); 1258 1259 return rc; 1260 out: 1261 get_online_cpus(); 1262 for_each_online_cpu(cpu) { 1263 if (all_cpu_data[cpu]) { 1264 del_timer_sync(&all_cpu_data[cpu]->timer); 1265 kfree(all_cpu_data[cpu]); 1266 } 1267 } 1268 1269 put_online_cpus(); 1270 vfree(all_cpu_data); 1271 return -ENODEV; 1272 } 1273 device_initcall(intel_pstate_init); 1274 1275 static int __init intel_pstate_setup(char *str) 1276 { 1277 if (!str) 1278 return -EINVAL; 1279 1280 if (!strcmp(str, "disable")) 1281 no_load = 1; 1282 if (!strcmp(str, "no_hwp")) 1283 no_hwp = 1; 1284 if (!strcmp(str, "force")) 1285 force_load = 1; 1286 if (!strcmp(str, "hwp_only")) 1287 hwp_only = 1; 1288 return 0; 1289 } 1290 early_param("intel_pstate", intel_pstate_setup); 1291 1292 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1293 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1294 MODULE_LICENSE("GPL"); 1295