1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/module.h> 16 #include <linux/ktime.h> 17 #include <linux/hrtimer.h> 18 #include <linux/tick.h> 19 #include <linux/slab.h> 20 #include <linux/sched.h> 21 #include <linux/list.h> 22 #include <linux/cpu.h> 23 #include <linux/cpufreq.h> 24 #include <linux/sysfs.h> 25 #include <linux/types.h> 26 #include <linux/fs.h> 27 #include <linux/debugfs.h> 28 #include <linux/acpi.h> 29 #include <linux/vmalloc.h> 30 #include <trace/events/power.h> 31 32 #include <asm/div64.h> 33 #include <asm/msr.h> 34 #include <asm/cpu_device_id.h> 35 #include <asm/cpufeature.h> 36 37 #define ATOM_RATIOS 0x66a 38 #define ATOM_VIDS 0x66b 39 #define ATOM_TURBO_RATIOS 0x66c 40 #define ATOM_TURBO_VIDS 0x66d 41 42 #define FRAC_BITS 8 43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 44 #define fp_toint(X) ((X) >> FRAC_BITS) 45 46 static inline int32_t mul_fp(int32_t x, int32_t y) 47 { 48 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 49 } 50 51 static inline int32_t div_fp(s64 x, s64 y) 52 { 53 return div64_s64((int64_t)x << FRAC_BITS, y); 54 } 55 56 static inline int ceiling_fp(int32_t x) 57 { 58 int mask, ret; 59 60 ret = fp_toint(x); 61 mask = (1 << FRAC_BITS) - 1; 62 if (x & mask) 63 ret += 1; 64 return ret; 65 } 66 67 struct sample { 68 int32_t core_pct_busy; 69 int32_t busy_scaled; 70 u64 aperf; 71 u64 mperf; 72 u64 tsc; 73 int freq; 74 u64 time; 75 }; 76 77 struct pstate_data { 78 int current_pstate; 79 int min_pstate; 80 int max_pstate; 81 int max_pstate_physical; 82 int scaling; 83 int turbo_pstate; 84 }; 85 86 struct vid_data { 87 int min; 88 int max; 89 int turbo; 90 int32_t ratio; 91 }; 92 93 struct _pid { 94 int setpoint; 95 int32_t integral; 96 int32_t p_gain; 97 int32_t i_gain; 98 int32_t d_gain; 99 int deadband; 100 int32_t last_err; 101 }; 102 103 struct cpudata { 104 int cpu; 105 106 struct update_util_data update_util; 107 108 struct pstate_data pstate; 109 struct vid_data vid; 110 struct _pid pid; 111 112 u64 last_sample_time; 113 u64 prev_aperf; 114 u64 prev_mperf; 115 u64 prev_tsc; 116 u64 prev_cummulative_iowait; 117 struct sample sample; 118 }; 119 120 static struct cpudata **all_cpu_data; 121 struct pstate_adjust_policy { 122 int sample_rate_ms; 123 s64 sample_rate_ns; 124 int deadband; 125 int setpoint; 126 int p_gain_pct; 127 int d_gain_pct; 128 int i_gain_pct; 129 }; 130 131 struct pstate_funcs { 132 int (*get_max)(void); 133 int (*get_max_physical)(void); 134 int (*get_min)(void); 135 int (*get_turbo)(void); 136 int (*get_scaling)(void); 137 void (*set)(struct cpudata*, int pstate); 138 void (*get_vid)(struct cpudata *); 139 int32_t (*get_target_pstate)(struct cpudata *); 140 }; 141 142 struct cpu_defaults { 143 struct pstate_adjust_policy pid_policy; 144 struct pstate_funcs funcs; 145 }; 146 147 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 148 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 149 150 static struct pstate_adjust_policy pid_params; 151 static struct pstate_funcs pstate_funcs; 152 static int hwp_active; 153 154 struct perf_limits { 155 int no_turbo; 156 int turbo_disabled; 157 int max_perf_pct; 158 int min_perf_pct; 159 int32_t max_perf; 160 int32_t min_perf; 161 int max_policy_pct; 162 int max_sysfs_pct; 163 int min_policy_pct; 164 int min_sysfs_pct; 165 }; 166 167 static struct perf_limits performance_limits = { 168 .no_turbo = 0, 169 .turbo_disabled = 0, 170 .max_perf_pct = 100, 171 .max_perf = int_tofp(1), 172 .min_perf_pct = 100, 173 .min_perf = int_tofp(1), 174 .max_policy_pct = 100, 175 .max_sysfs_pct = 100, 176 .min_policy_pct = 0, 177 .min_sysfs_pct = 0, 178 }; 179 180 static struct perf_limits powersave_limits = { 181 .no_turbo = 0, 182 .turbo_disabled = 0, 183 .max_perf_pct = 100, 184 .max_perf = int_tofp(1), 185 .min_perf_pct = 0, 186 .min_perf = 0, 187 .max_policy_pct = 100, 188 .max_sysfs_pct = 100, 189 .min_policy_pct = 0, 190 .min_sysfs_pct = 0, 191 }; 192 193 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 194 static struct perf_limits *limits = &performance_limits; 195 #else 196 static struct perf_limits *limits = &powersave_limits; 197 #endif 198 199 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 200 int deadband, int integral) { 201 pid->setpoint = int_tofp(setpoint); 202 pid->deadband = int_tofp(deadband); 203 pid->integral = int_tofp(integral); 204 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 205 } 206 207 static inline void pid_p_gain_set(struct _pid *pid, int percent) 208 { 209 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 210 } 211 212 static inline void pid_i_gain_set(struct _pid *pid, int percent) 213 { 214 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 215 } 216 217 static inline void pid_d_gain_set(struct _pid *pid, int percent) 218 { 219 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 220 } 221 222 static signed int pid_calc(struct _pid *pid, int32_t busy) 223 { 224 signed int result; 225 int32_t pterm, dterm, fp_error; 226 int32_t integral_limit; 227 228 fp_error = pid->setpoint - busy; 229 230 if (abs(fp_error) <= pid->deadband) 231 return 0; 232 233 pterm = mul_fp(pid->p_gain, fp_error); 234 235 pid->integral += fp_error; 236 237 /* 238 * We limit the integral here so that it will never 239 * get higher than 30. This prevents it from becoming 240 * too large an input over long periods of time and allows 241 * it to get factored out sooner. 242 * 243 * The value of 30 was chosen through experimentation. 244 */ 245 integral_limit = int_tofp(30); 246 if (pid->integral > integral_limit) 247 pid->integral = integral_limit; 248 if (pid->integral < -integral_limit) 249 pid->integral = -integral_limit; 250 251 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 252 pid->last_err = fp_error; 253 254 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 255 result = result + (1 << (FRAC_BITS-1)); 256 return (signed int)fp_toint(result); 257 } 258 259 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 260 { 261 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 262 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 263 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 264 265 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 266 } 267 268 static inline void intel_pstate_reset_all_pid(void) 269 { 270 unsigned int cpu; 271 272 for_each_online_cpu(cpu) { 273 if (all_cpu_data[cpu]) 274 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 275 } 276 } 277 278 static inline void update_turbo_state(void) 279 { 280 u64 misc_en; 281 struct cpudata *cpu; 282 283 cpu = all_cpu_data[0]; 284 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 285 limits->turbo_disabled = 286 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 287 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 288 } 289 290 static void intel_pstate_hwp_set(const struct cpumask *cpumask) 291 { 292 int min, hw_min, max, hw_max, cpu, range, adj_range; 293 u64 value, cap; 294 295 rdmsrl(MSR_HWP_CAPABILITIES, cap); 296 hw_min = HWP_LOWEST_PERF(cap); 297 hw_max = HWP_HIGHEST_PERF(cap); 298 range = hw_max - hw_min; 299 300 for_each_cpu(cpu, cpumask) { 301 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 302 adj_range = limits->min_perf_pct * range / 100; 303 min = hw_min + adj_range; 304 value &= ~HWP_MIN_PERF(~0L); 305 value |= HWP_MIN_PERF(min); 306 307 adj_range = limits->max_perf_pct * range / 100; 308 max = hw_min + adj_range; 309 if (limits->no_turbo) { 310 hw_max = HWP_GUARANTEED_PERF(cap); 311 if (hw_max < max) 312 max = hw_max; 313 } 314 315 value &= ~HWP_MAX_PERF(~0L); 316 value |= HWP_MAX_PERF(max); 317 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 318 } 319 } 320 321 static void intel_pstate_hwp_set_online_cpus(void) 322 { 323 get_online_cpus(); 324 intel_pstate_hwp_set(cpu_online_mask); 325 put_online_cpus(); 326 } 327 328 /************************** debugfs begin ************************/ 329 static int pid_param_set(void *data, u64 val) 330 { 331 *(u32 *)data = val; 332 intel_pstate_reset_all_pid(); 333 return 0; 334 } 335 336 static int pid_param_get(void *data, u64 *val) 337 { 338 *val = *(u32 *)data; 339 return 0; 340 } 341 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 342 343 struct pid_param { 344 char *name; 345 void *value; 346 }; 347 348 static struct pid_param pid_files[] = { 349 {"sample_rate_ms", &pid_params.sample_rate_ms}, 350 {"d_gain_pct", &pid_params.d_gain_pct}, 351 {"i_gain_pct", &pid_params.i_gain_pct}, 352 {"deadband", &pid_params.deadband}, 353 {"setpoint", &pid_params.setpoint}, 354 {"p_gain_pct", &pid_params.p_gain_pct}, 355 {NULL, NULL} 356 }; 357 358 static void __init intel_pstate_debug_expose_params(void) 359 { 360 struct dentry *debugfs_parent; 361 int i = 0; 362 363 if (hwp_active) 364 return; 365 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 366 if (IS_ERR_OR_NULL(debugfs_parent)) 367 return; 368 while (pid_files[i].name) { 369 debugfs_create_file(pid_files[i].name, 0660, 370 debugfs_parent, pid_files[i].value, 371 &fops_pid_param); 372 i++; 373 } 374 } 375 376 /************************** debugfs end ************************/ 377 378 /************************** sysfs begin ************************/ 379 #define show_one(file_name, object) \ 380 static ssize_t show_##file_name \ 381 (struct kobject *kobj, struct attribute *attr, char *buf) \ 382 { \ 383 return sprintf(buf, "%u\n", limits->object); \ 384 } 385 386 static ssize_t show_turbo_pct(struct kobject *kobj, 387 struct attribute *attr, char *buf) 388 { 389 struct cpudata *cpu; 390 int total, no_turbo, turbo_pct; 391 uint32_t turbo_fp; 392 393 cpu = all_cpu_data[0]; 394 395 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 396 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 397 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total)); 398 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 399 return sprintf(buf, "%u\n", turbo_pct); 400 } 401 402 static ssize_t show_num_pstates(struct kobject *kobj, 403 struct attribute *attr, char *buf) 404 { 405 struct cpudata *cpu; 406 int total; 407 408 cpu = all_cpu_data[0]; 409 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 410 return sprintf(buf, "%u\n", total); 411 } 412 413 static ssize_t show_no_turbo(struct kobject *kobj, 414 struct attribute *attr, char *buf) 415 { 416 ssize_t ret; 417 418 update_turbo_state(); 419 if (limits->turbo_disabled) 420 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 421 else 422 ret = sprintf(buf, "%u\n", limits->no_turbo); 423 424 return ret; 425 } 426 427 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 428 const char *buf, size_t count) 429 { 430 unsigned int input; 431 int ret; 432 433 ret = sscanf(buf, "%u", &input); 434 if (ret != 1) 435 return -EINVAL; 436 437 update_turbo_state(); 438 if (limits->turbo_disabled) { 439 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); 440 return -EPERM; 441 } 442 443 limits->no_turbo = clamp_t(int, input, 0, 1); 444 445 if (hwp_active) 446 intel_pstate_hwp_set_online_cpus(); 447 448 return count; 449 } 450 451 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 452 const char *buf, size_t count) 453 { 454 unsigned int input; 455 int ret; 456 457 ret = sscanf(buf, "%u", &input); 458 if (ret != 1) 459 return -EINVAL; 460 461 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 462 limits->max_perf_pct = min(limits->max_policy_pct, 463 limits->max_sysfs_pct); 464 limits->max_perf_pct = max(limits->min_policy_pct, 465 limits->max_perf_pct); 466 limits->max_perf_pct = max(limits->min_perf_pct, 467 limits->max_perf_pct); 468 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 469 int_tofp(100)); 470 471 if (hwp_active) 472 intel_pstate_hwp_set_online_cpus(); 473 return count; 474 } 475 476 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 477 const char *buf, size_t count) 478 { 479 unsigned int input; 480 int ret; 481 482 ret = sscanf(buf, "%u", &input); 483 if (ret != 1) 484 return -EINVAL; 485 486 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 487 limits->min_perf_pct = max(limits->min_policy_pct, 488 limits->min_sysfs_pct); 489 limits->min_perf_pct = min(limits->max_policy_pct, 490 limits->min_perf_pct); 491 limits->min_perf_pct = min(limits->max_perf_pct, 492 limits->min_perf_pct); 493 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 494 int_tofp(100)); 495 496 if (hwp_active) 497 intel_pstate_hwp_set_online_cpus(); 498 return count; 499 } 500 501 show_one(max_perf_pct, max_perf_pct); 502 show_one(min_perf_pct, min_perf_pct); 503 504 define_one_global_rw(no_turbo); 505 define_one_global_rw(max_perf_pct); 506 define_one_global_rw(min_perf_pct); 507 define_one_global_ro(turbo_pct); 508 define_one_global_ro(num_pstates); 509 510 static struct attribute *intel_pstate_attributes[] = { 511 &no_turbo.attr, 512 &max_perf_pct.attr, 513 &min_perf_pct.attr, 514 &turbo_pct.attr, 515 &num_pstates.attr, 516 NULL 517 }; 518 519 static struct attribute_group intel_pstate_attr_group = { 520 .attrs = intel_pstate_attributes, 521 }; 522 523 static void __init intel_pstate_sysfs_expose_params(void) 524 { 525 struct kobject *intel_pstate_kobject; 526 int rc; 527 528 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 529 &cpu_subsys.dev_root->kobj); 530 BUG_ON(!intel_pstate_kobject); 531 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 532 BUG_ON(rc); 533 } 534 /************************** sysfs end ************************/ 535 536 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 537 { 538 /* First disable HWP notification interrupt as we don't process them */ 539 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 540 541 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 542 } 543 544 static int atom_get_min_pstate(void) 545 { 546 u64 value; 547 548 rdmsrl(ATOM_RATIOS, value); 549 return (value >> 8) & 0x7F; 550 } 551 552 static int atom_get_max_pstate(void) 553 { 554 u64 value; 555 556 rdmsrl(ATOM_RATIOS, value); 557 return (value >> 16) & 0x7F; 558 } 559 560 static int atom_get_turbo_pstate(void) 561 { 562 u64 value; 563 564 rdmsrl(ATOM_TURBO_RATIOS, value); 565 return value & 0x7F; 566 } 567 568 static void atom_set_pstate(struct cpudata *cpudata, int pstate) 569 { 570 u64 val; 571 int32_t vid_fp; 572 u32 vid; 573 574 val = (u64)pstate << 8; 575 if (limits->no_turbo && !limits->turbo_disabled) 576 val |= (u64)1 << 32; 577 578 vid_fp = cpudata->vid.min + mul_fp( 579 int_tofp(pstate - cpudata->pstate.min_pstate), 580 cpudata->vid.ratio); 581 582 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 583 vid = ceiling_fp(vid_fp); 584 585 if (pstate > cpudata->pstate.max_pstate) 586 vid = cpudata->vid.turbo; 587 588 val |= vid; 589 590 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 591 } 592 593 static int silvermont_get_scaling(void) 594 { 595 u64 value; 596 int i; 597 /* Defined in Table 35-6 from SDM (Sept 2015) */ 598 static int silvermont_freq_table[] = { 599 83300, 100000, 133300, 116700, 80000}; 600 601 rdmsrl(MSR_FSB_FREQ, value); 602 i = value & 0x7; 603 WARN_ON(i > 4); 604 605 return silvermont_freq_table[i]; 606 } 607 608 static int airmont_get_scaling(void) 609 { 610 u64 value; 611 int i; 612 /* Defined in Table 35-10 from SDM (Sept 2015) */ 613 static int airmont_freq_table[] = { 614 83300, 100000, 133300, 116700, 80000, 615 93300, 90000, 88900, 87500}; 616 617 rdmsrl(MSR_FSB_FREQ, value); 618 i = value & 0xF; 619 WARN_ON(i > 8); 620 621 return airmont_freq_table[i]; 622 } 623 624 static void atom_get_vid(struct cpudata *cpudata) 625 { 626 u64 value; 627 628 rdmsrl(ATOM_VIDS, value); 629 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 630 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 631 cpudata->vid.ratio = div_fp( 632 cpudata->vid.max - cpudata->vid.min, 633 int_tofp(cpudata->pstate.max_pstate - 634 cpudata->pstate.min_pstate)); 635 636 rdmsrl(ATOM_TURBO_VIDS, value); 637 cpudata->vid.turbo = value & 0x7f; 638 } 639 640 static int core_get_min_pstate(void) 641 { 642 u64 value; 643 644 rdmsrl(MSR_PLATFORM_INFO, value); 645 return (value >> 40) & 0xFF; 646 } 647 648 static int core_get_max_pstate_physical(void) 649 { 650 u64 value; 651 652 rdmsrl(MSR_PLATFORM_INFO, value); 653 return (value >> 8) & 0xFF; 654 } 655 656 static int core_get_max_pstate(void) 657 { 658 u64 tar; 659 u64 plat_info; 660 int max_pstate; 661 int err; 662 663 rdmsrl(MSR_PLATFORM_INFO, plat_info); 664 max_pstate = (plat_info >> 8) & 0xFF; 665 666 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 667 if (!err) { 668 /* Do some sanity checking for safety */ 669 if (plat_info & 0x600000000) { 670 u64 tdp_ctrl; 671 u64 tdp_ratio; 672 int tdp_msr; 673 674 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 675 if (err) 676 goto skip_tar; 677 678 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; 679 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 680 if (err) 681 goto skip_tar; 682 683 if (tdp_ratio - 1 == tar) { 684 max_pstate = tar; 685 pr_debug("max_pstate=TAC %x\n", max_pstate); 686 } else { 687 goto skip_tar; 688 } 689 } 690 } 691 692 skip_tar: 693 return max_pstate; 694 } 695 696 static int core_get_turbo_pstate(void) 697 { 698 u64 value; 699 int nont, ret; 700 701 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 702 nont = core_get_max_pstate(); 703 ret = (value) & 255; 704 if (ret <= nont) 705 ret = nont; 706 return ret; 707 } 708 709 static inline int core_get_scaling(void) 710 { 711 return 100000; 712 } 713 714 static void core_set_pstate(struct cpudata *cpudata, int pstate) 715 { 716 u64 val; 717 718 val = (u64)pstate << 8; 719 if (limits->no_turbo && !limits->turbo_disabled) 720 val |= (u64)1 << 32; 721 722 wrmsrl(MSR_IA32_PERF_CTL, val); 723 } 724 725 static int knl_get_turbo_pstate(void) 726 { 727 u64 value; 728 int nont, ret; 729 730 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 731 nont = core_get_max_pstate(); 732 ret = (((value) >> 8) & 0xFF); 733 if (ret <= nont) 734 ret = nont; 735 return ret; 736 } 737 738 static struct cpu_defaults core_params = { 739 .pid_policy = { 740 .sample_rate_ms = 10, 741 .deadband = 0, 742 .setpoint = 97, 743 .p_gain_pct = 20, 744 .d_gain_pct = 0, 745 .i_gain_pct = 0, 746 }, 747 .funcs = { 748 .get_max = core_get_max_pstate, 749 .get_max_physical = core_get_max_pstate_physical, 750 .get_min = core_get_min_pstate, 751 .get_turbo = core_get_turbo_pstate, 752 .get_scaling = core_get_scaling, 753 .set = core_set_pstate, 754 .get_target_pstate = get_target_pstate_use_performance, 755 }, 756 }; 757 758 static struct cpu_defaults silvermont_params = { 759 .pid_policy = { 760 .sample_rate_ms = 10, 761 .deadband = 0, 762 .setpoint = 60, 763 .p_gain_pct = 14, 764 .d_gain_pct = 0, 765 .i_gain_pct = 4, 766 }, 767 .funcs = { 768 .get_max = atom_get_max_pstate, 769 .get_max_physical = atom_get_max_pstate, 770 .get_min = atom_get_min_pstate, 771 .get_turbo = atom_get_turbo_pstate, 772 .set = atom_set_pstate, 773 .get_scaling = silvermont_get_scaling, 774 .get_vid = atom_get_vid, 775 .get_target_pstate = get_target_pstate_use_cpu_load, 776 }, 777 }; 778 779 static struct cpu_defaults airmont_params = { 780 .pid_policy = { 781 .sample_rate_ms = 10, 782 .deadband = 0, 783 .setpoint = 60, 784 .p_gain_pct = 14, 785 .d_gain_pct = 0, 786 .i_gain_pct = 4, 787 }, 788 .funcs = { 789 .get_max = atom_get_max_pstate, 790 .get_max_physical = atom_get_max_pstate, 791 .get_min = atom_get_min_pstate, 792 .get_turbo = atom_get_turbo_pstate, 793 .set = atom_set_pstate, 794 .get_scaling = airmont_get_scaling, 795 .get_vid = atom_get_vid, 796 .get_target_pstate = get_target_pstate_use_cpu_load, 797 }, 798 }; 799 800 static struct cpu_defaults knl_params = { 801 .pid_policy = { 802 .sample_rate_ms = 10, 803 .deadband = 0, 804 .setpoint = 97, 805 .p_gain_pct = 20, 806 .d_gain_pct = 0, 807 .i_gain_pct = 0, 808 }, 809 .funcs = { 810 .get_max = core_get_max_pstate, 811 .get_max_physical = core_get_max_pstate_physical, 812 .get_min = core_get_min_pstate, 813 .get_turbo = knl_get_turbo_pstate, 814 .get_scaling = core_get_scaling, 815 .set = core_set_pstate, 816 .get_target_pstate = get_target_pstate_use_performance, 817 }, 818 }; 819 820 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 821 { 822 int max_perf = cpu->pstate.turbo_pstate; 823 int max_perf_adj; 824 int min_perf; 825 826 if (limits->no_turbo || limits->turbo_disabled) 827 max_perf = cpu->pstate.max_pstate; 828 829 /* 830 * performance can be limited by user through sysfs, by cpufreq 831 * policy, or by cpu specific default values determined through 832 * experimentation. 833 */ 834 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf)); 835 *max = clamp_t(int, max_perf_adj, 836 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 837 838 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf)); 839 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 840 } 841 842 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) 843 { 844 int max_perf, min_perf; 845 846 if (force) { 847 update_turbo_state(); 848 849 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 850 851 pstate = clamp_t(int, pstate, min_perf, max_perf); 852 853 if (pstate == cpu->pstate.current_pstate) 854 return; 855 } 856 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 857 858 cpu->pstate.current_pstate = pstate; 859 860 pstate_funcs.set(cpu, pstate); 861 } 862 863 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 864 { 865 cpu->pstate.min_pstate = pstate_funcs.get_min(); 866 cpu->pstate.max_pstate = pstate_funcs.get_max(); 867 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 868 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 869 cpu->pstate.scaling = pstate_funcs.get_scaling(); 870 871 if (pstate_funcs.get_vid) 872 pstate_funcs.get_vid(cpu); 873 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 874 } 875 876 static inline void intel_pstate_calc_busy(struct cpudata *cpu) 877 { 878 struct sample *sample = &cpu->sample; 879 int64_t core_pct; 880 881 core_pct = int_tofp(sample->aperf) * int_tofp(100); 882 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 883 884 sample->freq = fp_toint( 885 mul_fp(int_tofp( 886 cpu->pstate.max_pstate_physical * 887 cpu->pstate.scaling / 100), 888 core_pct)); 889 890 sample->core_pct_busy = (int32_t)core_pct; 891 } 892 893 static inline void intel_pstate_sample(struct cpudata *cpu, u64 time) 894 { 895 u64 aperf, mperf; 896 unsigned long flags; 897 u64 tsc; 898 899 local_irq_save(flags); 900 rdmsrl(MSR_IA32_APERF, aperf); 901 rdmsrl(MSR_IA32_MPERF, mperf); 902 tsc = rdtsc(); 903 if ((cpu->prev_mperf == mperf) || (cpu->prev_tsc == tsc)) { 904 local_irq_restore(flags); 905 return; 906 } 907 local_irq_restore(flags); 908 909 cpu->last_sample_time = cpu->sample.time; 910 cpu->sample.time = time; 911 cpu->sample.aperf = aperf; 912 cpu->sample.mperf = mperf; 913 cpu->sample.tsc = tsc; 914 cpu->sample.aperf -= cpu->prev_aperf; 915 cpu->sample.mperf -= cpu->prev_mperf; 916 cpu->sample.tsc -= cpu->prev_tsc; 917 918 intel_pstate_calc_busy(cpu); 919 920 cpu->prev_aperf = aperf; 921 cpu->prev_mperf = mperf; 922 cpu->prev_tsc = tsc; 923 } 924 925 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 926 { 927 struct sample *sample = &cpu->sample; 928 u64 cummulative_iowait, delta_iowait_us; 929 u64 delta_iowait_mperf; 930 u64 mperf, now; 931 int32_t cpu_load; 932 933 cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now); 934 935 /* 936 * Convert iowait time into number of IO cycles spent at max_freq. 937 * IO is considered as busy only for the cpu_load algorithm. For 938 * performance this is not needed since we always try to reach the 939 * maximum P-State, so we are already boosting the IOs. 940 */ 941 delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait; 942 delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling * 943 cpu->pstate.max_pstate, MSEC_PER_SEC); 944 945 mperf = cpu->sample.mperf + delta_iowait_mperf; 946 cpu->prev_cummulative_iowait = cummulative_iowait; 947 948 949 /* 950 * The load can be estimated as the ratio of the mperf counter 951 * running at a constant frequency during active periods 952 * (C0) and the time stamp counter running at the same frequency 953 * also during C-states. 954 */ 955 cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc); 956 cpu->sample.busy_scaled = cpu_load; 957 958 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load); 959 } 960 961 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 962 { 963 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 964 u64 duration_ns; 965 966 /* 967 * core_busy is the ratio of actual performance to max 968 * max_pstate is the max non turbo pstate available 969 * current_pstate was the pstate that was requested during 970 * the last sample period. 971 * 972 * We normalize core_busy, which was our actual percent 973 * performance to what we requested during the last sample 974 * period. The result will be a percentage of busy at a 975 * specified pstate. 976 */ 977 core_busy = cpu->sample.core_pct_busy; 978 max_pstate = int_tofp(cpu->pstate.max_pstate_physical); 979 current_pstate = int_tofp(cpu->pstate.current_pstate); 980 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 981 982 /* 983 * Since our utilization update callback will not run unless we are 984 * in C0, check if the actual elapsed time is significantly greater (3x) 985 * than our sample interval. If it is, then we were idle for a long 986 * enough period of time to adjust our busyness. 987 */ 988 duration_ns = cpu->sample.time - cpu->last_sample_time; 989 if ((s64)duration_ns > pid_params.sample_rate_ns * 3 990 && cpu->last_sample_time > 0) { 991 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), 992 int_tofp(duration_ns)); 993 core_busy = mul_fp(core_busy, sample_ratio); 994 } 995 996 cpu->sample.busy_scaled = core_busy; 997 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy); 998 } 999 1000 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1001 { 1002 int from, target_pstate; 1003 struct sample *sample; 1004 1005 from = cpu->pstate.current_pstate; 1006 1007 target_pstate = pstate_funcs.get_target_pstate(cpu); 1008 1009 intel_pstate_set_pstate(cpu, target_pstate, true); 1010 1011 sample = &cpu->sample; 1012 trace_pstate_sample(fp_toint(sample->core_pct_busy), 1013 fp_toint(sample->busy_scaled), 1014 from, 1015 cpu->pstate.current_pstate, 1016 sample->mperf, 1017 sample->aperf, 1018 sample->tsc, 1019 sample->freq); 1020 } 1021 1022 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1023 unsigned long util, unsigned long max) 1024 { 1025 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1026 u64 delta_ns = time - cpu->sample.time; 1027 1028 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1029 intel_pstate_sample(cpu, time); 1030 if (!hwp_active) 1031 intel_pstate_adjust_busy_pstate(cpu); 1032 } 1033 } 1034 1035 #define ICPU(model, policy) \ 1036 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1037 (unsigned long)&policy } 1038 1039 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1040 ICPU(0x2a, core_params), 1041 ICPU(0x2d, core_params), 1042 ICPU(0x37, silvermont_params), 1043 ICPU(0x3a, core_params), 1044 ICPU(0x3c, core_params), 1045 ICPU(0x3d, core_params), 1046 ICPU(0x3e, core_params), 1047 ICPU(0x3f, core_params), 1048 ICPU(0x45, core_params), 1049 ICPU(0x46, core_params), 1050 ICPU(0x47, core_params), 1051 ICPU(0x4c, airmont_params), 1052 ICPU(0x4e, core_params), 1053 ICPU(0x4f, core_params), 1054 ICPU(0x5e, core_params), 1055 ICPU(0x56, core_params), 1056 ICPU(0x57, knl_params), 1057 {} 1058 }; 1059 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1060 1061 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 1062 ICPU(0x56, core_params), 1063 {} 1064 }; 1065 1066 static int intel_pstate_init_cpu(unsigned int cpunum) 1067 { 1068 struct cpudata *cpu; 1069 1070 if (!all_cpu_data[cpunum]) 1071 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 1072 GFP_KERNEL); 1073 if (!all_cpu_data[cpunum]) 1074 return -ENOMEM; 1075 1076 cpu = all_cpu_data[cpunum]; 1077 1078 cpu->cpu = cpunum; 1079 1080 if (hwp_active) { 1081 intel_pstate_hwp_enable(cpu); 1082 pid_params.sample_rate_ms = 50; 1083 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1084 } 1085 1086 intel_pstate_get_cpu_pstates(cpu); 1087 1088 intel_pstate_busy_pid_reset(cpu); 1089 intel_pstate_sample(cpu, 0); 1090 1091 cpu->update_util.func = intel_pstate_update_util; 1092 cpufreq_set_update_util_data(cpunum, &cpu->update_util); 1093 1094 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); 1095 1096 return 0; 1097 } 1098 1099 static unsigned int intel_pstate_get(unsigned int cpu_num) 1100 { 1101 struct sample *sample; 1102 struct cpudata *cpu; 1103 1104 cpu = all_cpu_data[cpu_num]; 1105 if (!cpu) 1106 return 0; 1107 sample = &cpu->sample; 1108 return sample->freq; 1109 } 1110 1111 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1112 { 1113 if (!policy->cpuinfo.max_freq) 1114 return -ENODEV; 1115 1116 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 1117 policy->max >= policy->cpuinfo.max_freq) { 1118 pr_debug("intel_pstate: set performance\n"); 1119 limits = &performance_limits; 1120 if (hwp_active) 1121 intel_pstate_hwp_set(policy->cpus); 1122 return 0; 1123 } 1124 1125 pr_debug("intel_pstate: set powersave\n"); 1126 limits = &powersave_limits; 1127 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1128 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1129 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1130 policy->cpuinfo.max_freq); 1131 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1132 1133 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1134 limits->min_perf_pct = max(limits->min_policy_pct, 1135 limits->min_sysfs_pct); 1136 limits->min_perf_pct = min(limits->max_policy_pct, 1137 limits->min_perf_pct); 1138 limits->max_perf_pct = min(limits->max_policy_pct, 1139 limits->max_sysfs_pct); 1140 limits->max_perf_pct = max(limits->min_policy_pct, 1141 limits->max_perf_pct); 1142 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1143 1144 /* Make sure min_perf_pct <= max_perf_pct */ 1145 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1146 1147 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 1148 int_tofp(100)); 1149 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1150 int_tofp(100)); 1151 1152 if (hwp_active) 1153 intel_pstate_hwp_set(policy->cpus); 1154 1155 return 0; 1156 } 1157 1158 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1159 { 1160 cpufreq_verify_within_cpu_limits(policy); 1161 1162 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1163 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1164 return -EINVAL; 1165 1166 return 0; 1167 } 1168 1169 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1170 { 1171 int cpu_num = policy->cpu; 1172 struct cpudata *cpu = all_cpu_data[cpu_num]; 1173 1174 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); 1175 1176 cpufreq_set_update_util_data(cpu_num, NULL); 1177 synchronize_sched(); 1178 1179 if (hwp_active) 1180 return; 1181 1182 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 1183 } 1184 1185 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1186 { 1187 struct cpudata *cpu; 1188 int rc; 1189 1190 rc = intel_pstate_init_cpu(policy->cpu); 1191 if (rc) 1192 return rc; 1193 1194 cpu = all_cpu_data[policy->cpu]; 1195 1196 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1197 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1198 else 1199 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1200 1201 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1202 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1203 1204 /* cpuinfo and default policy values */ 1205 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1206 policy->cpuinfo.max_freq = 1207 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1208 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1209 cpumask_set_cpu(policy->cpu, policy->cpus); 1210 1211 return 0; 1212 } 1213 1214 static struct cpufreq_driver intel_pstate_driver = { 1215 .flags = CPUFREQ_CONST_LOOPS, 1216 .verify = intel_pstate_verify_policy, 1217 .setpolicy = intel_pstate_set_policy, 1218 .get = intel_pstate_get, 1219 .init = intel_pstate_cpu_init, 1220 .stop_cpu = intel_pstate_stop_cpu, 1221 .name = "intel_pstate", 1222 }; 1223 1224 static int __initdata no_load; 1225 static int __initdata no_hwp; 1226 static int __initdata hwp_only; 1227 static unsigned int force_load; 1228 1229 static int intel_pstate_msrs_not_valid(void) 1230 { 1231 if (!pstate_funcs.get_max() || 1232 !pstate_funcs.get_min() || 1233 !pstate_funcs.get_turbo()) 1234 return -ENODEV; 1235 1236 return 0; 1237 } 1238 1239 static void copy_pid_params(struct pstate_adjust_policy *policy) 1240 { 1241 pid_params.sample_rate_ms = policy->sample_rate_ms; 1242 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 1243 pid_params.p_gain_pct = policy->p_gain_pct; 1244 pid_params.i_gain_pct = policy->i_gain_pct; 1245 pid_params.d_gain_pct = policy->d_gain_pct; 1246 pid_params.deadband = policy->deadband; 1247 pid_params.setpoint = policy->setpoint; 1248 } 1249 1250 static void copy_cpu_funcs(struct pstate_funcs *funcs) 1251 { 1252 pstate_funcs.get_max = funcs->get_max; 1253 pstate_funcs.get_max_physical = funcs->get_max_physical; 1254 pstate_funcs.get_min = funcs->get_min; 1255 pstate_funcs.get_turbo = funcs->get_turbo; 1256 pstate_funcs.get_scaling = funcs->get_scaling; 1257 pstate_funcs.set = funcs->set; 1258 pstate_funcs.get_vid = funcs->get_vid; 1259 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 1260 1261 } 1262 1263 #if IS_ENABLED(CONFIG_ACPI) 1264 #include <acpi/processor.h> 1265 1266 static bool intel_pstate_no_acpi_pss(void) 1267 { 1268 int i; 1269 1270 for_each_possible_cpu(i) { 1271 acpi_status status; 1272 union acpi_object *pss; 1273 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1274 struct acpi_processor *pr = per_cpu(processors, i); 1275 1276 if (!pr) 1277 continue; 1278 1279 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1280 if (ACPI_FAILURE(status)) 1281 continue; 1282 1283 pss = buffer.pointer; 1284 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1285 kfree(pss); 1286 return false; 1287 } 1288 1289 kfree(pss); 1290 } 1291 1292 return true; 1293 } 1294 1295 static bool intel_pstate_has_acpi_ppc(void) 1296 { 1297 int i; 1298 1299 for_each_possible_cpu(i) { 1300 struct acpi_processor *pr = per_cpu(processors, i); 1301 1302 if (!pr) 1303 continue; 1304 if (acpi_has_method(pr->handle, "_PPC")) 1305 return true; 1306 } 1307 return false; 1308 } 1309 1310 enum { 1311 PSS, 1312 PPC, 1313 }; 1314 1315 struct hw_vendor_info { 1316 u16 valid; 1317 char oem_id[ACPI_OEM_ID_SIZE]; 1318 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1319 int oem_pwr_table; 1320 }; 1321 1322 /* Hardware vendor-specific info that has its own power management modes */ 1323 static struct hw_vendor_info vendor_info[] = { 1324 {1, "HP ", "ProLiant", PSS}, 1325 {1, "ORACLE", "X4-2 ", PPC}, 1326 {1, "ORACLE", "X4-2L ", PPC}, 1327 {1, "ORACLE", "X4-2B ", PPC}, 1328 {1, "ORACLE", "X3-2 ", PPC}, 1329 {1, "ORACLE", "X3-2L ", PPC}, 1330 {1, "ORACLE", "X3-2B ", PPC}, 1331 {1, "ORACLE", "X4470M2 ", PPC}, 1332 {1, "ORACLE", "X4270M3 ", PPC}, 1333 {1, "ORACLE", "X4270M2 ", PPC}, 1334 {1, "ORACLE", "X4170M2 ", PPC}, 1335 {1, "ORACLE", "X4170 M3", PPC}, 1336 {1, "ORACLE", "X4275 M3", PPC}, 1337 {1, "ORACLE", "X6-2 ", PPC}, 1338 {1, "ORACLE", "Sudbury ", PPC}, 1339 {0, "", ""}, 1340 }; 1341 1342 static bool intel_pstate_platform_pwr_mgmt_exists(void) 1343 { 1344 struct acpi_table_header hdr; 1345 struct hw_vendor_info *v_info; 1346 const struct x86_cpu_id *id; 1347 u64 misc_pwr; 1348 1349 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1350 if (id) { 1351 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1352 if ( misc_pwr & (1 << 8)) 1353 return true; 1354 } 1355 1356 if (acpi_disabled || 1357 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1358 return false; 1359 1360 for (v_info = vendor_info; v_info->valid; v_info++) { 1361 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1362 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1363 ACPI_OEM_TABLE_ID_SIZE)) 1364 switch (v_info->oem_pwr_table) { 1365 case PSS: 1366 return intel_pstate_no_acpi_pss(); 1367 case PPC: 1368 return intel_pstate_has_acpi_ppc() && 1369 (!force_load); 1370 } 1371 } 1372 1373 return false; 1374 } 1375 #else /* CONFIG_ACPI not enabled */ 1376 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1377 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1378 #endif /* CONFIG_ACPI */ 1379 1380 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 1381 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 1382 {} 1383 }; 1384 1385 static int __init intel_pstate_init(void) 1386 { 1387 int cpu, rc = 0; 1388 const struct x86_cpu_id *id; 1389 struct cpu_defaults *cpu_def; 1390 1391 if (no_load) 1392 return -ENODEV; 1393 1394 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 1395 copy_cpu_funcs(&core_params.funcs); 1396 hwp_active++; 1397 goto hwp_cpu_matched; 1398 } 1399 1400 id = x86_match_cpu(intel_pstate_cpu_ids); 1401 if (!id) 1402 return -ENODEV; 1403 1404 cpu_def = (struct cpu_defaults *)id->driver_data; 1405 1406 copy_pid_params(&cpu_def->pid_policy); 1407 copy_cpu_funcs(&cpu_def->funcs); 1408 1409 if (intel_pstate_msrs_not_valid()) 1410 return -ENODEV; 1411 1412 hwp_cpu_matched: 1413 /* 1414 * The Intel pstate driver will be ignored if the platform 1415 * firmware has its own power management modes. 1416 */ 1417 if (intel_pstate_platform_pwr_mgmt_exists()) 1418 return -ENODEV; 1419 1420 pr_info("Intel P-state driver initializing.\n"); 1421 1422 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1423 if (!all_cpu_data) 1424 return -ENOMEM; 1425 1426 if (!hwp_active && hwp_only) 1427 goto out; 1428 1429 rc = cpufreq_register_driver(&intel_pstate_driver); 1430 if (rc) 1431 goto out; 1432 1433 intel_pstate_debug_expose_params(); 1434 intel_pstate_sysfs_expose_params(); 1435 1436 if (hwp_active) 1437 pr_info("intel_pstate: HWP enabled\n"); 1438 1439 return rc; 1440 out: 1441 get_online_cpus(); 1442 for_each_online_cpu(cpu) { 1443 if (all_cpu_data[cpu]) { 1444 cpufreq_set_update_util_data(cpu, NULL); 1445 synchronize_sched(); 1446 kfree(all_cpu_data[cpu]); 1447 } 1448 } 1449 1450 put_online_cpus(); 1451 vfree(all_cpu_data); 1452 return -ENODEV; 1453 } 1454 device_initcall(intel_pstate_init); 1455 1456 static int __init intel_pstate_setup(char *str) 1457 { 1458 if (!str) 1459 return -EINVAL; 1460 1461 if (!strcmp(str, "disable")) 1462 no_load = 1; 1463 if (!strcmp(str, "no_hwp")) { 1464 pr_info("intel_pstate: HWP disabled\n"); 1465 no_hwp = 1; 1466 } 1467 if (!strcmp(str, "force")) 1468 force_load = 1; 1469 if (!strcmp(str, "hwp_only")) 1470 hwp_only = 1; 1471 return 0; 1472 } 1473 early_param("intel_pstate", intel_pstate_setup); 1474 1475 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1476 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1477 MODULE_LICENSE("GPL"); 1478