1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/module.h> 16 #include <linux/ktime.h> 17 #include <linux/hrtimer.h> 18 #include <linux/tick.h> 19 #include <linux/slab.h> 20 #include <linux/sched.h> 21 #include <linux/list.h> 22 #include <linux/cpu.h> 23 #include <linux/cpufreq.h> 24 #include <linux/sysfs.h> 25 #include <linux/types.h> 26 #include <linux/fs.h> 27 #include <linux/debugfs.h> 28 #include <linux/acpi.h> 29 #include <linux/vmalloc.h> 30 #include <trace/events/power.h> 31 32 #include <asm/div64.h> 33 #include <asm/msr.h> 34 #include <asm/cpu_device_id.h> 35 #include <asm/cpufeature.h> 36 37 #define ATOM_RATIOS 0x66a 38 #define ATOM_VIDS 0x66b 39 #define ATOM_TURBO_RATIOS 0x66c 40 #define ATOM_TURBO_VIDS 0x66d 41 42 #define FRAC_BITS 8 43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 44 #define fp_toint(X) ((X) >> FRAC_BITS) 45 46 static inline int32_t mul_fp(int32_t x, int32_t y) 47 { 48 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 49 } 50 51 static inline int32_t div_fp(s64 x, s64 y) 52 { 53 return div64_s64((int64_t)x << FRAC_BITS, y); 54 } 55 56 static inline int ceiling_fp(int32_t x) 57 { 58 int mask, ret; 59 60 ret = fp_toint(x); 61 mask = (1 << FRAC_BITS) - 1; 62 if (x & mask) 63 ret += 1; 64 return ret; 65 } 66 67 struct sample { 68 int32_t core_pct_busy; 69 int32_t busy_scaled; 70 u64 aperf; 71 u64 mperf; 72 u64 tsc; 73 int freq; 74 u64 time; 75 }; 76 77 struct pstate_data { 78 int current_pstate; 79 int min_pstate; 80 int max_pstate; 81 int max_pstate_physical; 82 int scaling; 83 int turbo_pstate; 84 }; 85 86 struct vid_data { 87 int min; 88 int max; 89 int turbo; 90 int32_t ratio; 91 }; 92 93 struct _pid { 94 int setpoint; 95 int32_t integral; 96 int32_t p_gain; 97 int32_t i_gain; 98 int32_t d_gain; 99 int deadband; 100 int32_t last_err; 101 }; 102 103 struct cpudata { 104 int cpu; 105 106 struct update_util_data update_util; 107 108 struct pstate_data pstate; 109 struct vid_data vid; 110 struct _pid pid; 111 112 u64 last_sample_time; 113 u64 prev_aperf; 114 u64 prev_mperf; 115 u64 prev_tsc; 116 u64 prev_cummulative_iowait; 117 struct sample sample; 118 }; 119 120 static struct cpudata **all_cpu_data; 121 struct pstate_adjust_policy { 122 int sample_rate_ms; 123 s64 sample_rate_ns; 124 int deadband; 125 int setpoint; 126 int p_gain_pct; 127 int d_gain_pct; 128 int i_gain_pct; 129 }; 130 131 struct pstate_funcs { 132 int (*get_max)(void); 133 int (*get_max_physical)(void); 134 int (*get_min)(void); 135 int (*get_turbo)(void); 136 int (*get_scaling)(void); 137 u64 (*get_val)(struct cpudata*, int pstate); 138 void (*get_vid)(struct cpudata *); 139 int32_t (*get_target_pstate)(struct cpudata *); 140 }; 141 142 struct cpu_defaults { 143 struct pstate_adjust_policy pid_policy; 144 struct pstate_funcs funcs; 145 }; 146 147 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 148 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 149 150 static struct pstate_adjust_policy pid_params; 151 static struct pstate_funcs pstate_funcs; 152 static int hwp_active; 153 154 struct perf_limits { 155 int no_turbo; 156 int turbo_disabled; 157 int max_perf_pct; 158 int min_perf_pct; 159 int32_t max_perf; 160 int32_t min_perf; 161 int max_policy_pct; 162 int max_sysfs_pct; 163 int min_policy_pct; 164 int min_sysfs_pct; 165 }; 166 167 static struct perf_limits performance_limits = { 168 .no_turbo = 0, 169 .turbo_disabled = 0, 170 .max_perf_pct = 100, 171 .max_perf = int_tofp(1), 172 .min_perf_pct = 100, 173 .min_perf = int_tofp(1), 174 .max_policy_pct = 100, 175 .max_sysfs_pct = 100, 176 .min_policy_pct = 0, 177 .min_sysfs_pct = 0, 178 }; 179 180 static struct perf_limits powersave_limits = { 181 .no_turbo = 0, 182 .turbo_disabled = 0, 183 .max_perf_pct = 100, 184 .max_perf = int_tofp(1), 185 .min_perf_pct = 0, 186 .min_perf = 0, 187 .max_policy_pct = 100, 188 .max_sysfs_pct = 100, 189 .min_policy_pct = 0, 190 .min_sysfs_pct = 0, 191 }; 192 193 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 194 static struct perf_limits *limits = &performance_limits; 195 #else 196 static struct perf_limits *limits = &powersave_limits; 197 #endif 198 199 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 200 int deadband, int integral) { 201 pid->setpoint = int_tofp(setpoint); 202 pid->deadband = int_tofp(deadband); 203 pid->integral = int_tofp(integral); 204 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 205 } 206 207 static inline void pid_p_gain_set(struct _pid *pid, int percent) 208 { 209 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 210 } 211 212 static inline void pid_i_gain_set(struct _pid *pid, int percent) 213 { 214 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 215 } 216 217 static inline void pid_d_gain_set(struct _pid *pid, int percent) 218 { 219 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 220 } 221 222 static signed int pid_calc(struct _pid *pid, int32_t busy) 223 { 224 signed int result; 225 int32_t pterm, dterm, fp_error; 226 int32_t integral_limit; 227 228 fp_error = pid->setpoint - busy; 229 230 if (abs(fp_error) <= pid->deadband) 231 return 0; 232 233 pterm = mul_fp(pid->p_gain, fp_error); 234 235 pid->integral += fp_error; 236 237 /* 238 * We limit the integral here so that it will never 239 * get higher than 30. This prevents it from becoming 240 * too large an input over long periods of time and allows 241 * it to get factored out sooner. 242 * 243 * The value of 30 was chosen through experimentation. 244 */ 245 integral_limit = int_tofp(30); 246 if (pid->integral > integral_limit) 247 pid->integral = integral_limit; 248 if (pid->integral < -integral_limit) 249 pid->integral = -integral_limit; 250 251 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 252 pid->last_err = fp_error; 253 254 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 255 result = result + (1 << (FRAC_BITS-1)); 256 return (signed int)fp_toint(result); 257 } 258 259 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 260 { 261 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 262 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 263 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 264 265 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 266 } 267 268 static inline void intel_pstate_reset_all_pid(void) 269 { 270 unsigned int cpu; 271 272 for_each_online_cpu(cpu) { 273 if (all_cpu_data[cpu]) 274 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 275 } 276 } 277 278 static inline void update_turbo_state(void) 279 { 280 u64 misc_en; 281 struct cpudata *cpu; 282 283 cpu = all_cpu_data[0]; 284 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 285 limits->turbo_disabled = 286 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 287 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 288 } 289 290 static void intel_pstate_hwp_set(const struct cpumask *cpumask) 291 { 292 int min, hw_min, max, hw_max, cpu, range, adj_range; 293 u64 value, cap; 294 295 rdmsrl(MSR_HWP_CAPABILITIES, cap); 296 hw_min = HWP_LOWEST_PERF(cap); 297 hw_max = HWP_HIGHEST_PERF(cap); 298 range = hw_max - hw_min; 299 300 for_each_cpu(cpu, cpumask) { 301 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 302 adj_range = limits->min_perf_pct * range / 100; 303 min = hw_min + adj_range; 304 value &= ~HWP_MIN_PERF(~0L); 305 value |= HWP_MIN_PERF(min); 306 307 adj_range = limits->max_perf_pct * range / 100; 308 max = hw_min + adj_range; 309 if (limits->no_turbo) { 310 hw_max = HWP_GUARANTEED_PERF(cap); 311 if (hw_max < max) 312 max = hw_max; 313 } 314 315 value &= ~HWP_MAX_PERF(~0L); 316 value |= HWP_MAX_PERF(max); 317 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 318 } 319 } 320 321 static void intel_pstate_hwp_set_online_cpus(void) 322 { 323 get_online_cpus(); 324 intel_pstate_hwp_set(cpu_online_mask); 325 put_online_cpus(); 326 } 327 328 /************************** debugfs begin ************************/ 329 static int pid_param_set(void *data, u64 val) 330 { 331 *(u32 *)data = val; 332 intel_pstate_reset_all_pid(); 333 return 0; 334 } 335 336 static int pid_param_get(void *data, u64 *val) 337 { 338 *val = *(u32 *)data; 339 return 0; 340 } 341 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 342 343 struct pid_param { 344 char *name; 345 void *value; 346 }; 347 348 static struct pid_param pid_files[] = { 349 {"sample_rate_ms", &pid_params.sample_rate_ms}, 350 {"d_gain_pct", &pid_params.d_gain_pct}, 351 {"i_gain_pct", &pid_params.i_gain_pct}, 352 {"deadband", &pid_params.deadband}, 353 {"setpoint", &pid_params.setpoint}, 354 {"p_gain_pct", &pid_params.p_gain_pct}, 355 {NULL, NULL} 356 }; 357 358 static void __init intel_pstate_debug_expose_params(void) 359 { 360 struct dentry *debugfs_parent; 361 int i = 0; 362 363 if (hwp_active) 364 return; 365 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 366 if (IS_ERR_OR_NULL(debugfs_parent)) 367 return; 368 while (pid_files[i].name) { 369 debugfs_create_file(pid_files[i].name, 0660, 370 debugfs_parent, pid_files[i].value, 371 &fops_pid_param); 372 i++; 373 } 374 } 375 376 /************************** debugfs end ************************/ 377 378 /************************** sysfs begin ************************/ 379 #define show_one(file_name, object) \ 380 static ssize_t show_##file_name \ 381 (struct kobject *kobj, struct attribute *attr, char *buf) \ 382 { \ 383 return sprintf(buf, "%u\n", limits->object); \ 384 } 385 386 static ssize_t show_turbo_pct(struct kobject *kobj, 387 struct attribute *attr, char *buf) 388 { 389 struct cpudata *cpu; 390 int total, no_turbo, turbo_pct; 391 uint32_t turbo_fp; 392 393 cpu = all_cpu_data[0]; 394 395 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 396 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 397 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total)); 398 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 399 return sprintf(buf, "%u\n", turbo_pct); 400 } 401 402 static ssize_t show_num_pstates(struct kobject *kobj, 403 struct attribute *attr, char *buf) 404 { 405 struct cpudata *cpu; 406 int total; 407 408 cpu = all_cpu_data[0]; 409 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 410 return sprintf(buf, "%u\n", total); 411 } 412 413 static ssize_t show_no_turbo(struct kobject *kobj, 414 struct attribute *attr, char *buf) 415 { 416 ssize_t ret; 417 418 update_turbo_state(); 419 if (limits->turbo_disabled) 420 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 421 else 422 ret = sprintf(buf, "%u\n", limits->no_turbo); 423 424 return ret; 425 } 426 427 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 428 const char *buf, size_t count) 429 { 430 unsigned int input; 431 int ret; 432 433 ret = sscanf(buf, "%u", &input); 434 if (ret != 1) 435 return -EINVAL; 436 437 update_turbo_state(); 438 if (limits->turbo_disabled) { 439 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); 440 return -EPERM; 441 } 442 443 limits->no_turbo = clamp_t(int, input, 0, 1); 444 445 if (hwp_active) 446 intel_pstate_hwp_set_online_cpus(); 447 448 return count; 449 } 450 451 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 452 const char *buf, size_t count) 453 { 454 unsigned int input; 455 int ret; 456 457 ret = sscanf(buf, "%u", &input); 458 if (ret != 1) 459 return -EINVAL; 460 461 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 462 limits->max_perf_pct = min(limits->max_policy_pct, 463 limits->max_sysfs_pct); 464 limits->max_perf_pct = max(limits->min_policy_pct, 465 limits->max_perf_pct); 466 limits->max_perf_pct = max(limits->min_perf_pct, 467 limits->max_perf_pct); 468 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 469 int_tofp(100)); 470 471 if (hwp_active) 472 intel_pstate_hwp_set_online_cpus(); 473 return count; 474 } 475 476 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 477 const char *buf, size_t count) 478 { 479 unsigned int input; 480 int ret; 481 482 ret = sscanf(buf, "%u", &input); 483 if (ret != 1) 484 return -EINVAL; 485 486 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 487 limits->min_perf_pct = max(limits->min_policy_pct, 488 limits->min_sysfs_pct); 489 limits->min_perf_pct = min(limits->max_policy_pct, 490 limits->min_perf_pct); 491 limits->min_perf_pct = min(limits->max_perf_pct, 492 limits->min_perf_pct); 493 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 494 int_tofp(100)); 495 496 if (hwp_active) 497 intel_pstate_hwp_set_online_cpus(); 498 return count; 499 } 500 501 show_one(max_perf_pct, max_perf_pct); 502 show_one(min_perf_pct, min_perf_pct); 503 504 define_one_global_rw(no_turbo); 505 define_one_global_rw(max_perf_pct); 506 define_one_global_rw(min_perf_pct); 507 define_one_global_ro(turbo_pct); 508 define_one_global_ro(num_pstates); 509 510 static struct attribute *intel_pstate_attributes[] = { 511 &no_turbo.attr, 512 &max_perf_pct.attr, 513 &min_perf_pct.attr, 514 &turbo_pct.attr, 515 &num_pstates.attr, 516 NULL 517 }; 518 519 static struct attribute_group intel_pstate_attr_group = { 520 .attrs = intel_pstate_attributes, 521 }; 522 523 static void __init intel_pstate_sysfs_expose_params(void) 524 { 525 struct kobject *intel_pstate_kobject; 526 int rc; 527 528 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 529 &cpu_subsys.dev_root->kobj); 530 BUG_ON(!intel_pstate_kobject); 531 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 532 BUG_ON(rc); 533 } 534 /************************** sysfs end ************************/ 535 536 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 537 { 538 /* First disable HWP notification interrupt as we don't process them */ 539 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 540 541 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 542 } 543 544 static int atom_get_min_pstate(void) 545 { 546 u64 value; 547 548 rdmsrl(ATOM_RATIOS, value); 549 return (value >> 8) & 0x7F; 550 } 551 552 static int atom_get_max_pstate(void) 553 { 554 u64 value; 555 556 rdmsrl(ATOM_RATIOS, value); 557 return (value >> 16) & 0x7F; 558 } 559 560 static int atom_get_turbo_pstate(void) 561 { 562 u64 value; 563 564 rdmsrl(ATOM_TURBO_RATIOS, value); 565 return value & 0x7F; 566 } 567 568 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 569 { 570 u64 val; 571 int32_t vid_fp; 572 u32 vid; 573 574 val = (u64)pstate << 8; 575 if (limits->no_turbo && !limits->turbo_disabled) 576 val |= (u64)1 << 32; 577 578 vid_fp = cpudata->vid.min + mul_fp( 579 int_tofp(pstate - cpudata->pstate.min_pstate), 580 cpudata->vid.ratio); 581 582 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 583 vid = ceiling_fp(vid_fp); 584 585 if (pstate > cpudata->pstate.max_pstate) 586 vid = cpudata->vid.turbo; 587 588 return val | vid; 589 } 590 591 static int silvermont_get_scaling(void) 592 { 593 u64 value; 594 int i; 595 /* Defined in Table 35-6 from SDM (Sept 2015) */ 596 static int silvermont_freq_table[] = { 597 83300, 100000, 133300, 116700, 80000}; 598 599 rdmsrl(MSR_FSB_FREQ, value); 600 i = value & 0x7; 601 WARN_ON(i > 4); 602 603 return silvermont_freq_table[i]; 604 } 605 606 static int airmont_get_scaling(void) 607 { 608 u64 value; 609 int i; 610 /* Defined in Table 35-10 from SDM (Sept 2015) */ 611 static int airmont_freq_table[] = { 612 83300, 100000, 133300, 116700, 80000, 613 93300, 90000, 88900, 87500}; 614 615 rdmsrl(MSR_FSB_FREQ, value); 616 i = value & 0xF; 617 WARN_ON(i > 8); 618 619 return airmont_freq_table[i]; 620 } 621 622 static void atom_get_vid(struct cpudata *cpudata) 623 { 624 u64 value; 625 626 rdmsrl(ATOM_VIDS, value); 627 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 628 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 629 cpudata->vid.ratio = div_fp( 630 cpudata->vid.max - cpudata->vid.min, 631 int_tofp(cpudata->pstate.max_pstate - 632 cpudata->pstate.min_pstate)); 633 634 rdmsrl(ATOM_TURBO_VIDS, value); 635 cpudata->vid.turbo = value & 0x7f; 636 } 637 638 static int core_get_min_pstate(void) 639 { 640 u64 value; 641 642 rdmsrl(MSR_PLATFORM_INFO, value); 643 return (value >> 40) & 0xFF; 644 } 645 646 static int core_get_max_pstate_physical(void) 647 { 648 u64 value; 649 650 rdmsrl(MSR_PLATFORM_INFO, value); 651 return (value >> 8) & 0xFF; 652 } 653 654 static int core_get_max_pstate(void) 655 { 656 u64 tar; 657 u64 plat_info; 658 int max_pstate; 659 int err; 660 661 rdmsrl(MSR_PLATFORM_INFO, plat_info); 662 max_pstate = (plat_info >> 8) & 0xFF; 663 664 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 665 if (!err) { 666 /* Do some sanity checking for safety */ 667 if (plat_info & 0x600000000) { 668 u64 tdp_ctrl; 669 u64 tdp_ratio; 670 int tdp_msr; 671 672 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 673 if (err) 674 goto skip_tar; 675 676 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; 677 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 678 if (err) 679 goto skip_tar; 680 681 if (tdp_ratio - 1 == tar) { 682 max_pstate = tar; 683 pr_debug("max_pstate=TAC %x\n", max_pstate); 684 } else { 685 goto skip_tar; 686 } 687 } 688 } 689 690 skip_tar: 691 return max_pstate; 692 } 693 694 static int core_get_turbo_pstate(void) 695 { 696 u64 value; 697 int nont, ret; 698 699 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 700 nont = core_get_max_pstate(); 701 ret = (value) & 255; 702 if (ret <= nont) 703 ret = nont; 704 return ret; 705 } 706 707 static inline int core_get_scaling(void) 708 { 709 return 100000; 710 } 711 712 static u64 core_get_val(struct cpudata *cpudata, int pstate) 713 { 714 u64 val; 715 716 val = (u64)pstate << 8; 717 if (limits->no_turbo && !limits->turbo_disabled) 718 val |= (u64)1 << 32; 719 720 return val; 721 } 722 723 static int knl_get_turbo_pstate(void) 724 { 725 u64 value; 726 int nont, ret; 727 728 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 729 nont = core_get_max_pstate(); 730 ret = (((value) >> 8) & 0xFF); 731 if (ret <= nont) 732 ret = nont; 733 return ret; 734 } 735 736 static struct cpu_defaults core_params = { 737 .pid_policy = { 738 .sample_rate_ms = 10, 739 .deadband = 0, 740 .setpoint = 97, 741 .p_gain_pct = 20, 742 .d_gain_pct = 0, 743 .i_gain_pct = 0, 744 }, 745 .funcs = { 746 .get_max = core_get_max_pstate, 747 .get_max_physical = core_get_max_pstate_physical, 748 .get_min = core_get_min_pstate, 749 .get_turbo = core_get_turbo_pstate, 750 .get_scaling = core_get_scaling, 751 .get_val = core_get_val, 752 .get_target_pstate = get_target_pstate_use_performance, 753 }, 754 }; 755 756 static struct cpu_defaults silvermont_params = { 757 .pid_policy = { 758 .sample_rate_ms = 10, 759 .deadband = 0, 760 .setpoint = 60, 761 .p_gain_pct = 14, 762 .d_gain_pct = 0, 763 .i_gain_pct = 4, 764 }, 765 .funcs = { 766 .get_max = atom_get_max_pstate, 767 .get_max_physical = atom_get_max_pstate, 768 .get_min = atom_get_min_pstate, 769 .get_turbo = atom_get_turbo_pstate, 770 .get_val = atom_get_val, 771 .get_scaling = silvermont_get_scaling, 772 .get_vid = atom_get_vid, 773 .get_target_pstate = get_target_pstate_use_cpu_load, 774 }, 775 }; 776 777 static struct cpu_defaults airmont_params = { 778 .pid_policy = { 779 .sample_rate_ms = 10, 780 .deadband = 0, 781 .setpoint = 60, 782 .p_gain_pct = 14, 783 .d_gain_pct = 0, 784 .i_gain_pct = 4, 785 }, 786 .funcs = { 787 .get_max = atom_get_max_pstate, 788 .get_max_physical = atom_get_max_pstate, 789 .get_min = atom_get_min_pstate, 790 .get_turbo = atom_get_turbo_pstate, 791 .get_val = atom_get_val, 792 .get_scaling = airmont_get_scaling, 793 .get_vid = atom_get_vid, 794 .get_target_pstate = get_target_pstate_use_cpu_load, 795 }, 796 }; 797 798 static struct cpu_defaults knl_params = { 799 .pid_policy = { 800 .sample_rate_ms = 10, 801 .deadband = 0, 802 .setpoint = 97, 803 .p_gain_pct = 20, 804 .d_gain_pct = 0, 805 .i_gain_pct = 0, 806 }, 807 .funcs = { 808 .get_max = core_get_max_pstate, 809 .get_max_physical = core_get_max_pstate_physical, 810 .get_min = core_get_min_pstate, 811 .get_turbo = knl_get_turbo_pstate, 812 .get_scaling = core_get_scaling, 813 .get_val = core_get_val, 814 .get_target_pstate = get_target_pstate_use_performance, 815 }, 816 }; 817 818 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 819 { 820 int max_perf = cpu->pstate.turbo_pstate; 821 int max_perf_adj; 822 int min_perf; 823 824 if (limits->no_turbo || limits->turbo_disabled) 825 max_perf = cpu->pstate.max_pstate; 826 827 /* 828 * performance can be limited by user through sysfs, by cpufreq 829 * policy, or by cpu specific default values determined through 830 * experimentation. 831 */ 832 max_perf_adj = fp_toint(max_perf * limits->max_perf); 833 *max = clamp_t(int, max_perf_adj, 834 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 835 836 min_perf = fp_toint(max_perf * limits->min_perf); 837 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 838 } 839 840 static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate) 841 { 842 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 843 cpu->pstate.current_pstate = pstate; 844 } 845 846 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 847 { 848 int pstate = cpu->pstate.min_pstate; 849 850 intel_pstate_record_pstate(cpu, pstate); 851 /* 852 * Generally, there is no guarantee that this code will always run on 853 * the CPU being updated, so force the register update to run on the 854 * right CPU. 855 */ 856 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 857 pstate_funcs.get_val(cpu, pstate)); 858 } 859 860 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 861 { 862 cpu->pstate.min_pstate = pstate_funcs.get_min(); 863 cpu->pstate.max_pstate = pstate_funcs.get_max(); 864 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 865 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 866 cpu->pstate.scaling = pstate_funcs.get_scaling(); 867 868 if (pstate_funcs.get_vid) 869 pstate_funcs.get_vid(cpu); 870 871 intel_pstate_set_min_pstate(cpu); 872 } 873 874 static inline void intel_pstate_calc_busy(struct cpudata *cpu) 875 { 876 struct sample *sample = &cpu->sample; 877 int64_t core_pct; 878 879 core_pct = int_tofp(sample->aperf) * int_tofp(100); 880 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 881 882 sample->core_pct_busy = (int32_t)core_pct; 883 } 884 885 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 886 { 887 u64 aperf, mperf; 888 unsigned long flags; 889 u64 tsc; 890 891 local_irq_save(flags); 892 rdmsrl(MSR_IA32_APERF, aperf); 893 rdmsrl(MSR_IA32_MPERF, mperf); 894 tsc = rdtsc(); 895 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 896 local_irq_restore(flags); 897 return false; 898 } 899 local_irq_restore(flags); 900 901 cpu->last_sample_time = cpu->sample.time; 902 cpu->sample.time = time; 903 cpu->sample.aperf = aperf; 904 cpu->sample.mperf = mperf; 905 cpu->sample.tsc = tsc; 906 cpu->sample.aperf -= cpu->prev_aperf; 907 cpu->sample.mperf -= cpu->prev_mperf; 908 cpu->sample.tsc -= cpu->prev_tsc; 909 910 cpu->prev_aperf = aperf; 911 cpu->prev_mperf = mperf; 912 cpu->prev_tsc = tsc; 913 return true; 914 } 915 916 static inline int32_t get_avg_frequency(struct cpudata *cpu) 917 { 918 return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf * 919 cpu->pstate.scaling, cpu->sample.mperf); 920 } 921 922 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 923 { 924 struct sample *sample = &cpu->sample; 925 u64 cummulative_iowait, delta_iowait_us; 926 u64 delta_iowait_mperf; 927 u64 mperf, now; 928 int32_t cpu_load; 929 930 cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now); 931 932 /* 933 * Convert iowait time into number of IO cycles spent at max_freq. 934 * IO is considered as busy only for the cpu_load algorithm. For 935 * performance this is not needed since we always try to reach the 936 * maximum P-State, so we are already boosting the IOs. 937 */ 938 delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait; 939 delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling * 940 cpu->pstate.max_pstate, MSEC_PER_SEC); 941 942 mperf = cpu->sample.mperf + delta_iowait_mperf; 943 cpu->prev_cummulative_iowait = cummulative_iowait; 944 945 /* 946 * The load can be estimated as the ratio of the mperf counter 947 * running at a constant frequency during active periods 948 * (C0) and the time stamp counter running at the same frequency 949 * also during C-states. 950 */ 951 cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc); 952 cpu->sample.busy_scaled = cpu_load; 953 954 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load); 955 } 956 957 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 958 { 959 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 960 u64 duration_ns; 961 962 intel_pstate_calc_busy(cpu); 963 964 /* 965 * core_busy is the ratio of actual performance to max 966 * max_pstate is the max non turbo pstate available 967 * current_pstate was the pstate that was requested during 968 * the last sample period. 969 * 970 * We normalize core_busy, which was our actual percent 971 * performance to what we requested during the last sample 972 * period. The result will be a percentage of busy at a 973 * specified pstate. 974 */ 975 core_busy = cpu->sample.core_pct_busy; 976 max_pstate = int_tofp(cpu->pstate.max_pstate_physical); 977 current_pstate = int_tofp(cpu->pstate.current_pstate); 978 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 979 980 /* 981 * Since our utilization update callback will not run unless we are 982 * in C0, check if the actual elapsed time is significantly greater (3x) 983 * than our sample interval. If it is, then we were idle for a long 984 * enough period of time to adjust our busyness. 985 */ 986 duration_ns = cpu->sample.time - cpu->last_sample_time; 987 if ((s64)duration_ns > pid_params.sample_rate_ns * 3 988 && cpu->last_sample_time > 0) { 989 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), 990 int_tofp(duration_ns)); 991 core_busy = mul_fp(core_busy, sample_ratio); 992 } 993 994 cpu->sample.busy_scaled = core_busy; 995 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy); 996 } 997 998 static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 999 { 1000 int max_perf, min_perf; 1001 1002 update_turbo_state(); 1003 1004 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1005 pstate = clamp_t(int, pstate, min_perf, max_perf); 1006 if (pstate == cpu->pstate.current_pstate) 1007 return; 1008 1009 intel_pstate_record_pstate(cpu, pstate); 1010 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1011 } 1012 1013 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1014 { 1015 int from, target_pstate; 1016 struct sample *sample; 1017 1018 from = cpu->pstate.current_pstate; 1019 1020 target_pstate = pstate_funcs.get_target_pstate(cpu); 1021 1022 intel_pstate_update_pstate(cpu, target_pstate); 1023 1024 sample = &cpu->sample; 1025 trace_pstate_sample(fp_toint(sample->core_pct_busy), 1026 fp_toint(sample->busy_scaled), 1027 from, 1028 cpu->pstate.current_pstate, 1029 sample->mperf, 1030 sample->aperf, 1031 sample->tsc, 1032 get_avg_frequency(cpu)); 1033 } 1034 1035 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1036 unsigned long util, unsigned long max) 1037 { 1038 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1039 u64 delta_ns = time - cpu->sample.time; 1040 1041 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1042 bool sample_taken = intel_pstate_sample(cpu, time); 1043 1044 if (sample_taken && !hwp_active) 1045 intel_pstate_adjust_busy_pstate(cpu); 1046 } 1047 } 1048 1049 #define ICPU(model, policy) \ 1050 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1051 (unsigned long)&policy } 1052 1053 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1054 ICPU(0x2a, core_params), 1055 ICPU(0x2d, core_params), 1056 ICPU(0x37, silvermont_params), 1057 ICPU(0x3a, core_params), 1058 ICPU(0x3c, core_params), 1059 ICPU(0x3d, core_params), 1060 ICPU(0x3e, core_params), 1061 ICPU(0x3f, core_params), 1062 ICPU(0x45, core_params), 1063 ICPU(0x46, core_params), 1064 ICPU(0x47, core_params), 1065 ICPU(0x4c, airmont_params), 1066 ICPU(0x4e, core_params), 1067 ICPU(0x4f, core_params), 1068 ICPU(0x5e, core_params), 1069 ICPU(0x56, core_params), 1070 ICPU(0x57, knl_params), 1071 {} 1072 }; 1073 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1074 1075 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 1076 ICPU(0x56, core_params), 1077 {} 1078 }; 1079 1080 static int intel_pstate_init_cpu(unsigned int cpunum) 1081 { 1082 struct cpudata *cpu; 1083 1084 if (!all_cpu_data[cpunum]) 1085 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 1086 GFP_KERNEL); 1087 if (!all_cpu_data[cpunum]) 1088 return -ENOMEM; 1089 1090 cpu = all_cpu_data[cpunum]; 1091 1092 cpu->cpu = cpunum; 1093 1094 if (hwp_active) { 1095 intel_pstate_hwp_enable(cpu); 1096 pid_params.sample_rate_ms = 50; 1097 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1098 } 1099 1100 intel_pstate_get_cpu_pstates(cpu); 1101 1102 intel_pstate_busy_pid_reset(cpu); 1103 intel_pstate_sample(cpu, 0); 1104 1105 cpu->update_util.func = intel_pstate_update_util; 1106 1107 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); 1108 1109 return 0; 1110 } 1111 1112 static unsigned int intel_pstate_get(unsigned int cpu_num) 1113 { 1114 struct sample *sample; 1115 struct cpudata *cpu; 1116 1117 cpu = all_cpu_data[cpu_num]; 1118 if (!cpu) 1119 return 0; 1120 sample = &cpu->sample; 1121 return get_avg_frequency(cpu); 1122 } 1123 1124 static void intel_pstate_set_update_util_hook(unsigned int cpu) 1125 { 1126 cpufreq_set_update_util_data(cpu, &all_cpu_data[cpu]->update_util); 1127 } 1128 1129 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 1130 { 1131 cpufreq_set_update_util_data(cpu, NULL); 1132 synchronize_sched(); 1133 } 1134 1135 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1136 { 1137 if (!policy->cpuinfo.max_freq) 1138 return -ENODEV; 1139 1140 intel_pstate_clear_update_util_hook(policy->cpu); 1141 1142 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 1143 policy->max >= policy->cpuinfo.max_freq) { 1144 pr_debug("intel_pstate: set performance\n"); 1145 limits = &performance_limits; 1146 goto out; 1147 } 1148 1149 pr_debug("intel_pstate: set powersave\n"); 1150 limits = &powersave_limits; 1151 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1152 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1153 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1154 policy->cpuinfo.max_freq); 1155 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1156 1157 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1158 limits->min_perf_pct = max(limits->min_policy_pct, 1159 limits->min_sysfs_pct); 1160 limits->min_perf_pct = min(limits->max_policy_pct, 1161 limits->min_perf_pct); 1162 limits->max_perf_pct = min(limits->max_policy_pct, 1163 limits->max_sysfs_pct); 1164 limits->max_perf_pct = max(limits->min_policy_pct, 1165 limits->max_perf_pct); 1166 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1167 1168 /* Make sure min_perf_pct <= max_perf_pct */ 1169 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1170 1171 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 1172 int_tofp(100)); 1173 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1174 int_tofp(100)); 1175 1176 out: 1177 intel_pstate_set_update_util_hook(policy->cpu); 1178 1179 if (hwp_active) 1180 intel_pstate_hwp_set(policy->cpus); 1181 1182 return 0; 1183 } 1184 1185 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1186 { 1187 cpufreq_verify_within_cpu_limits(policy); 1188 1189 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1190 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1191 return -EINVAL; 1192 1193 return 0; 1194 } 1195 1196 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1197 { 1198 int cpu_num = policy->cpu; 1199 struct cpudata *cpu = all_cpu_data[cpu_num]; 1200 1201 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); 1202 1203 intel_pstate_clear_update_util_hook(cpu_num); 1204 1205 if (hwp_active) 1206 return; 1207 1208 intel_pstate_set_min_pstate(cpu); 1209 } 1210 1211 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1212 { 1213 struct cpudata *cpu; 1214 int rc; 1215 1216 rc = intel_pstate_init_cpu(policy->cpu); 1217 if (rc) 1218 return rc; 1219 1220 cpu = all_cpu_data[policy->cpu]; 1221 1222 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1223 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1224 else 1225 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1226 1227 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1228 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1229 1230 /* cpuinfo and default policy values */ 1231 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1232 policy->cpuinfo.max_freq = 1233 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1234 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1235 cpumask_set_cpu(policy->cpu, policy->cpus); 1236 1237 return 0; 1238 } 1239 1240 static struct cpufreq_driver intel_pstate_driver = { 1241 .flags = CPUFREQ_CONST_LOOPS, 1242 .verify = intel_pstate_verify_policy, 1243 .setpolicy = intel_pstate_set_policy, 1244 .get = intel_pstate_get, 1245 .init = intel_pstate_cpu_init, 1246 .stop_cpu = intel_pstate_stop_cpu, 1247 .name = "intel_pstate", 1248 }; 1249 1250 static int __initdata no_load; 1251 static int __initdata no_hwp; 1252 static int __initdata hwp_only; 1253 static unsigned int force_load; 1254 1255 static int intel_pstate_msrs_not_valid(void) 1256 { 1257 if (!pstate_funcs.get_max() || 1258 !pstate_funcs.get_min() || 1259 !pstate_funcs.get_turbo()) 1260 return -ENODEV; 1261 1262 return 0; 1263 } 1264 1265 static void copy_pid_params(struct pstate_adjust_policy *policy) 1266 { 1267 pid_params.sample_rate_ms = policy->sample_rate_ms; 1268 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 1269 pid_params.p_gain_pct = policy->p_gain_pct; 1270 pid_params.i_gain_pct = policy->i_gain_pct; 1271 pid_params.d_gain_pct = policy->d_gain_pct; 1272 pid_params.deadband = policy->deadband; 1273 pid_params.setpoint = policy->setpoint; 1274 } 1275 1276 static void copy_cpu_funcs(struct pstate_funcs *funcs) 1277 { 1278 pstate_funcs.get_max = funcs->get_max; 1279 pstate_funcs.get_max_physical = funcs->get_max_physical; 1280 pstate_funcs.get_min = funcs->get_min; 1281 pstate_funcs.get_turbo = funcs->get_turbo; 1282 pstate_funcs.get_scaling = funcs->get_scaling; 1283 pstate_funcs.get_val = funcs->get_val; 1284 pstate_funcs.get_vid = funcs->get_vid; 1285 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 1286 1287 } 1288 1289 #if IS_ENABLED(CONFIG_ACPI) 1290 #include <acpi/processor.h> 1291 1292 static bool intel_pstate_no_acpi_pss(void) 1293 { 1294 int i; 1295 1296 for_each_possible_cpu(i) { 1297 acpi_status status; 1298 union acpi_object *pss; 1299 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1300 struct acpi_processor *pr = per_cpu(processors, i); 1301 1302 if (!pr) 1303 continue; 1304 1305 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1306 if (ACPI_FAILURE(status)) 1307 continue; 1308 1309 pss = buffer.pointer; 1310 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1311 kfree(pss); 1312 return false; 1313 } 1314 1315 kfree(pss); 1316 } 1317 1318 return true; 1319 } 1320 1321 static bool intel_pstate_has_acpi_ppc(void) 1322 { 1323 int i; 1324 1325 for_each_possible_cpu(i) { 1326 struct acpi_processor *pr = per_cpu(processors, i); 1327 1328 if (!pr) 1329 continue; 1330 if (acpi_has_method(pr->handle, "_PPC")) 1331 return true; 1332 } 1333 return false; 1334 } 1335 1336 enum { 1337 PSS, 1338 PPC, 1339 }; 1340 1341 struct hw_vendor_info { 1342 u16 valid; 1343 char oem_id[ACPI_OEM_ID_SIZE]; 1344 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1345 int oem_pwr_table; 1346 }; 1347 1348 /* Hardware vendor-specific info that has its own power management modes */ 1349 static struct hw_vendor_info vendor_info[] = { 1350 {1, "HP ", "ProLiant", PSS}, 1351 {1, "ORACLE", "X4-2 ", PPC}, 1352 {1, "ORACLE", "X4-2L ", PPC}, 1353 {1, "ORACLE", "X4-2B ", PPC}, 1354 {1, "ORACLE", "X3-2 ", PPC}, 1355 {1, "ORACLE", "X3-2L ", PPC}, 1356 {1, "ORACLE", "X3-2B ", PPC}, 1357 {1, "ORACLE", "X4470M2 ", PPC}, 1358 {1, "ORACLE", "X4270M3 ", PPC}, 1359 {1, "ORACLE", "X4270M2 ", PPC}, 1360 {1, "ORACLE", "X4170M2 ", PPC}, 1361 {1, "ORACLE", "X4170 M3", PPC}, 1362 {1, "ORACLE", "X4275 M3", PPC}, 1363 {1, "ORACLE", "X6-2 ", PPC}, 1364 {1, "ORACLE", "Sudbury ", PPC}, 1365 {0, "", ""}, 1366 }; 1367 1368 static bool intel_pstate_platform_pwr_mgmt_exists(void) 1369 { 1370 struct acpi_table_header hdr; 1371 struct hw_vendor_info *v_info; 1372 const struct x86_cpu_id *id; 1373 u64 misc_pwr; 1374 1375 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1376 if (id) { 1377 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1378 if ( misc_pwr & (1 << 8)) 1379 return true; 1380 } 1381 1382 if (acpi_disabled || 1383 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1384 return false; 1385 1386 for (v_info = vendor_info; v_info->valid; v_info++) { 1387 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1388 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1389 ACPI_OEM_TABLE_ID_SIZE)) 1390 switch (v_info->oem_pwr_table) { 1391 case PSS: 1392 return intel_pstate_no_acpi_pss(); 1393 case PPC: 1394 return intel_pstate_has_acpi_ppc() && 1395 (!force_load); 1396 } 1397 } 1398 1399 return false; 1400 } 1401 #else /* CONFIG_ACPI not enabled */ 1402 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1403 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1404 #endif /* CONFIG_ACPI */ 1405 1406 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 1407 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 1408 {} 1409 }; 1410 1411 static int __init intel_pstate_init(void) 1412 { 1413 int cpu, rc = 0; 1414 const struct x86_cpu_id *id; 1415 struct cpu_defaults *cpu_def; 1416 1417 if (no_load) 1418 return -ENODEV; 1419 1420 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 1421 copy_cpu_funcs(&core_params.funcs); 1422 hwp_active++; 1423 goto hwp_cpu_matched; 1424 } 1425 1426 id = x86_match_cpu(intel_pstate_cpu_ids); 1427 if (!id) 1428 return -ENODEV; 1429 1430 cpu_def = (struct cpu_defaults *)id->driver_data; 1431 1432 copy_pid_params(&cpu_def->pid_policy); 1433 copy_cpu_funcs(&cpu_def->funcs); 1434 1435 if (intel_pstate_msrs_not_valid()) 1436 return -ENODEV; 1437 1438 hwp_cpu_matched: 1439 /* 1440 * The Intel pstate driver will be ignored if the platform 1441 * firmware has its own power management modes. 1442 */ 1443 if (intel_pstate_platform_pwr_mgmt_exists()) 1444 return -ENODEV; 1445 1446 pr_info("Intel P-state driver initializing.\n"); 1447 1448 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1449 if (!all_cpu_data) 1450 return -ENOMEM; 1451 1452 if (!hwp_active && hwp_only) 1453 goto out; 1454 1455 rc = cpufreq_register_driver(&intel_pstate_driver); 1456 if (rc) 1457 goto out; 1458 1459 intel_pstate_debug_expose_params(); 1460 intel_pstate_sysfs_expose_params(); 1461 1462 if (hwp_active) 1463 pr_info("intel_pstate: HWP enabled\n"); 1464 1465 return rc; 1466 out: 1467 get_online_cpus(); 1468 for_each_online_cpu(cpu) { 1469 if (all_cpu_data[cpu]) { 1470 intel_pstate_clear_update_util_hook(cpu); 1471 kfree(all_cpu_data[cpu]); 1472 } 1473 } 1474 1475 put_online_cpus(); 1476 vfree(all_cpu_data); 1477 return -ENODEV; 1478 } 1479 device_initcall(intel_pstate_init); 1480 1481 static int __init intel_pstate_setup(char *str) 1482 { 1483 if (!str) 1484 return -EINVAL; 1485 1486 if (!strcmp(str, "disable")) 1487 no_load = 1; 1488 if (!strcmp(str, "no_hwp")) { 1489 pr_info("intel_pstate: HWP disabled\n"); 1490 no_hwp = 1; 1491 } 1492 if (!strcmp(str, "force")) 1493 force_load = 1; 1494 if (!strcmp(str, "hwp_only")) 1495 hwp_only = 1; 1496 return 0; 1497 } 1498 early_param("intel_pstate", intel_pstate_setup); 1499 1500 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1501 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1502 MODULE_LICENSE("GPL"); 1503