1 /*
2  * intel_pstate.c: Native P state management for Intel processors
3  *
4  * (C) Copyright 2012 Intel Corporation
5  * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
26 #include <linux/fs.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <linux/vmalloc.h>
30 #include <trace/events/power.h>
31 
32 #include <asm/div64.h>
33 #include <asm/msr.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/cpufeature.h>
36 
37 #define ATOM_RATIOS		0x66a
38 #define ATOM_VIDS		0x66b
39 #define ATOM_TURBO_RATIOS	0x66c
40 #define ATOM_TURBO_VIDS		0x66d
41 
42 #define FRAC_BITS 8
43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
44 #define fp_toint(X) ((X) >> FRAC_BITS)
45 
46 static inline int32_t mul_fp(int32_t x, int32_t y)
47 {
48 	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
49 }
50 
51 static inline int32_t div_fp(s64 x, s64 y)
52 {
53 	return div64_s64((int64_t)x << FRAC_BITS, y);
54 }
55 
56 static inline int ceiling_fp(int32_t x)
57 {
58 	int mask, ret;
59 
60 	ret = fp_toint(x);
61 	mask = (1 << FRAC_BITS) - 1;
62 	if (x & mask)
63 		ret += 1;
64 	return ret;
65 }
66 
67 struct sample {
68 	int32_t core_pct_busy;
69 	int32_t busy_scaled;
70 	u64 aperf;
71 	u64 mperf;
72 	u64 tsc;
73 	int freq;
74 	u64 time;
75 };
76 
77 struct pstate_data {
78 	int	current_pstate;
79 	int	min_pstate;
80 	int	max_pstate;
81 	int	max_pstate_physical;
82 	int	scaling;
83 	int	turbo_pstate;
84 };
85 
86 struct vid_data {
87 	int min;
88 	int max;
89 	int turbo;
90 	int32_t ratio;
91 };
92 
93 struct _pid {
94 	int setpoint;
95 	int32_t integral;
96 	int32_t p_gain;
97 	int32_t i_gain;
98 	int32_t d_gain;
99 	int deadband;
100 	int32_t last_err;
101 };
102 
103 struct cpudata {
104 	int cpu;
105 
106 	struct update_util_data update_util;
107 
108 	struct pstate_data pstate;
109 	struct vid_data vid;
110 	struct _pid pid;
111 
112 	u64	last_sample_time;
113 	u64	prev_aperf;
114 	u64	prev_mperf;
115 	u64	prev_tsc;
116 	u64	prev_cummulative_iowait;
117 	struct sample sample;
118 };
119 
120 static struct cpudata **all_cpu_data;
121 struct pstate_adjust_policy {
122 	int sample_rate_ms;
123 	s64 sample_rate_ns;
124 	int deadband;
125 	int setpoint;
126 	int p_gain_pct;
127 	int d_gain_pct;
128 	int i_gain_pct;
129 };
130 
131 struct pstate_funcs {
132 	int (*get_max)(void);
133 	int (*get_max_physical)(void);
134 	int (*get_min)(void);
135 	int (*get_turbo)(void);
136 	int (*get_scaling)(void);
137 	void (*set)(struct cpudata*, int pstate);
138 	void (*get_vid)(struct cpudata *);
139 	int32_t (*get_target_pstate)(struct cpudata *);
140 };
141 
142 struct cpu_defaults {
143 	struct pstate_adjust_policy pid_policy;
144 	struct pstate_funcs funcs;
145 };
146 
147 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
148 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
149 
150 static struct pstate_adjust_policy pid_params;
151 static struct pstate_funcs pstate_funcs;
152 static int hwp_active;
153 
154 struct perf_limits {
155 	int no_turbo;
156 	int turbo_disabled;
157 	int max_perf_pct;
158 	int min_perf_pct;
159 	int32_t max_perf;
160 	int32_t min_perf;
161 	int max_policy_pct;
162 	int max_sysfs_pct;
163 	int min_policy_pct;
164 	int min_sysfs_pct;
165 };
166 
167 static struct perf_limits performance_limits = {
168 	.no_turbo = 0,
169 	.turbo_disabled = 0,
170 	.max_perf_pct = 100,
171 	.max_perf = int_tofp(1),
172 	.min_perf_pct = 100,
173 	.min_perf = int_tofp(1),
174 	.max_policy_pct = 100,
175 	.max_sysfs_pct = 100,
176 	.min_policy_pct = 0,
177 	.min_sysfs_pct = 0,
178 };
179 
180 static struct perf_limits powersave_limits = {
181 	.no_turbo = 0,
182 	.turbo_disabled = 0,
183 	.max_perf_pct = 100,
184 	.max_perf = int_tofp(1),
185 	.min_perf_pct = 0,
186 	.min_perf = 0,
187 	.max_policy_pct = 100,
188 	.max_sysfs_pct = 100,
189 	.min_policy_pct = 0,
190 	.min_sysfs_pct = 0,
191 };
192 
193 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
194 static struct perf_limits *limits = &performance_limits;
195 #else
196 static struct perf_limits *limits = &powersave_limits;
197 #endif
198 
199 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
200 			     int deadband, int integral) {
201 	pid->setpoint = int_tofp(setpoint);
202 	pid->deadband  = int_tofp(deadband);
203 	pid->integral  = int_tofp(integral);
204 	pid->last_err  = int_tofp(setpoint) - int_tofp(busy);
205 }
206 
207 static inline void pid_p_gain_set(struct _pid *pid, int percent)
208 {
209 	pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
210 }
211 
212 static inline void pid_i_gain_set(struct _pid *pid, int percent)
213 {
214 	pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
215 }
216 
217 static inline void pid_d_gain_set(struct _pid *pid, int percent)
218 {
219 	pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
220 }
221 
222 static signed int pid_calc(struct _pid *pid, int32_t busy)
223 {
224 	signed int result;
225 	int32_t pterm, dterm, fp_error;
226 	int32_t integral_limit;
227 
228 	fp_error = pid->setpoint - busy;
229 
230 	if (abs(fp_error) <= pid->deadband)
231 		return 0;
232 
233 	pterm = mul_fp(pid->p_gain, fp_error);
234 
235 	pid->integral += fp_error;
236 
237 	/*
238 	 * We limit the integral here so that it will never
239 	 * get higher than 30.  This prevents it from becoming
240 	 * too large an input over long periods of time and allows
241 	 * it to get factored out sooner.
242 	 *
243 	 * The value of 30 was chosen through experimentation.
244 	 */
245 	integral_limit = int_tofp(30);
246 	if (pid->integral > integral_limit)
247 		pid->integral = integral_limit;
248 	if (pid->integral < -integral_limit)
249 		pid->integral = -integral_limit;
250 
251 	dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
252 	pid->last_err = fp_error;
253 
254 	result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
255 	result = result + (1 << (FRAC_BITS-1));
256 	return (signed int)fp_toint(result);
257 }
258 
259 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
260 {
261 	pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
262 	pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
263 	pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
264 
265 	pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
266 }
267 
268 static inline void intel_pstate_reset_all_pid(void)
269 {
270 	unsigned int cpu;
271 
272 	for_each_online_cpu(cpu) {
273 		if (all_cpu_data[cpu])
274 			intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
275 	}
276 }
277 
278 static inline void update_turbo_state(void)
279 {
280 	u64 misc_en;
281 	struct cpudata *cpu;
282 
283 	cpu = all_cpu_data[0];
284 	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
285 	limits->turbo_disabled =
286 		(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
287 		 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
288 }
289 
290 static void intel_pstate_hwp_set(const struct cpumask *cpumask)
291 {
292 	int min, hw_min, max, hw_max, cpu, range, adj_range;
293 	u64 value, cap;
294 
295 	rdmsrl(MSR_HWP_CAPABILITIES, cap);
296 	hw_min = HWP_LOWEST_PERF(cap);
297 	hw_max = HWP_HIGHEST_PERF(cap);
298 	range = hw_max - hw_min;
299 
300 	for_each_cpu(cpu, cpumask) {
301 		rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
302 		adj_range = limits->min_perf_pct * range / 100;
303 		min = hw_min + adj_range;
304 		value &= ~HWP_MIN_PERF(~0L);
305 		value |= HWP_MIN_PERF(min);
306 
307 		adj_range = limits->max_perf_pct * range / 100;
308 		max = hw_min + adj_range;
309 		if (limits->no_turbo) {
310 			hw_max = HWP_GUARANTEED_PERF(cap);
311 			if (hw_max < max)
312 				max = hw_max;
313 		}
314 
315 		value &= ~HWP_MAX_PERF(~0L);
316 		value |= HWP_MAX_PERF(max);
317 		wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
318 	}
319 }
320 
321 static void intel_pstate_hwp_set_online_cpus(void)
322 {
323 	get_online_cpus();
324 	intel_pstate_hwp_set(cpu_online_mask);
325 	put_online_cpus();
326 }
327 
328 /************************** debugfs begin ************************/
329 static int pid_param_set(void *data, u64 val)
330 {
331 	*(u32 *)data = val;
332 	intel_pstate_reset_all_pid();
333 	return 0;
334 }
335 
336 static int pid_param_get(void *data, u64 *val)
337 {
338 	*val = *(u32 *)data;
339 	return 0;
340 }
341 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
342 
343 struct pid_param {
344 	char *name;
345 	void *value;
346 };
347 
348 static struct pid_param pid_files[] = {
349 	{"sample_rate_ms", &pid_params.sample_rate_ms},
350 	{"d_gain_pct", &pid_params.d_gain_pct},
351 	{"i_gain_pct", &pid_params.i_gain_pct},
352 	{"deadband", &pid_params.deadband},
353 	{"setpoint", &pid_params.setpoint},
354 	{"p_gain_pct", &pid_params.p_gain_pct},
355 	{NULL, NULL}
356 };
357 
358 static void __init intel_pstate_debug_expose_params(void)
359 {
360 	struct dentry *debugfs_parent;
361 	int i = 0;
362 
363 	if (hwp_active)
364 		return;
365 	debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
366 	if (IS_ERR_OR_NULL(debugfs_parent))
367 		return;
368 	while (pid_files[i].name) {
369 		debugfs_create_file(pid_files[i].name, 0660,
370 				    debugfs_parent, pid_files[i].value,
371 				    &fops_pid_param);
372 		i++;
373 	}
374 }
375 
376 /************************** debugfs end ************************/
377 
378 /************************** sysfs begin ************************/
379 #define show_one(file_name, object)					\
380 	static ssize_t show_##file_name					\
381 	(struct kobject *kobj, struct attribute *attr, char *buf)	\
382 	{								\
383 		return sprintf(buf, "%u\n", limits->object);		\
384 	}
385 
386 static ssize_t show_turbo_pct(struct kobject *kobj,
387 				struct attribute *attr, char *buf)
388 {
389 	struct cpudata *cpu;
390 	int total, no_turbo, turbo_pct;
391 	uint32_t turbo_fp;
392 
393 	cpu = all_cpu_data[0];
394 
395 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
396 	no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
397 	turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
398 	turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
399 	return sprintf(buf, "%u\n", turbo_pct);
400 }
401 
402 static ssize_t show_num_pstates(struct kobject *kobj,
403 				struct attribute *attr, char *buf)
404 {
405 	struct cpudata *cpu;
406 	int total;
407 
408 	cpu = all_cpu_data[0];
409 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
410 	return sprintf(buf, "%u\n", total);
411 }
412 
413 static ssize_t show_no_turbo(struct kobject *kobj,
414 			     struct attribute *attr, char *buf)
415 {
416 	ssize_t ret;
417 
418 	update_turbo_state();
419 	if (limits->turbo_disabled)
420 		ret = sprintf(buf, "%u\n", limits->turbo_disabled);
421 	else
422 		ret = sprintf(buf, "%u\n", limits->no_turbo);
423 
424 	return ret;
425 }
426 
427 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
428 			      const char *buf, size_t count)
429 {
430 	unsigned int input;
431 	int ret;
432 
433 	ret = sscanf(buf, "%u", &input);
434 	if (ret != 1)
435 		return -EINVAL;
436 
437 	update_turbo_state();
438 	if (limits->turbo_disabled) {
439 		pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
440 		return -EPERM;
441 	}
442 
443 	limits->no_turbo = clamp_t(int, input, 0, 1);
444 
445 	if (hwp_active)
446 		intel_pstate_hwp_set_online_cpus();
447 
448 	return count;
449 }
450 
451 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
452 				  const char *buf, size_t count)
453 {
454 	unsigned int input;
455 	int ret;
456 
457 	ret = sscanf(buf, "%u", &input);
458 	if (ret != 1)
459 		return -EINVAL;
460 
461 	limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
462 	limits->max_perf_pct = min(limits->max_policy_pct,
463 				   limits->max_sysfs_pct);
464 	limits->max_perf_pct = max(limits->min_policy_pct,
465 				   limits->max_perf_pct);
466 	limits->max_perf_pct = max(limits->min_perf_pct,
467 				   limits->max_perf_pct);
468 	limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
469 				  int_tofp(100));
470 
471 	if (hwp_active)
472 		intel_pstate_hwp_set_online_cpus();
473 	return count;
474 }
475 
476 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
477 				  const char *buf, size_t count)
478 {
479 	unsigned int input;
480 	int ret;
481 
482 	ret = sscanf(buf, "%u", &input);
483 	if (ret != 1)
484 		return -EINVAL;
485 
486 	limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
487 	limits->min_perf_pct = max(limits->min_policy_pct,
488 				   limits->min_sysfs_pct);
489 	limits->min_perf_pct = min(limits->max_policy_pct,
490 				   limits->min_perf_pct);
491 	limits->min_perf_pct = min(limits->max_perf_pct,
492 				   limits->min_perf_pct);
493 	limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
494 				  int_tofp(100));
495 
496 	if (hwp_active)
497 		intel_pstate_hwp_set_online_cpus();
498 	return count;
499 }
500 
501 show_one(max_perf_pct, max_perf_pct);
502 show_one(min_perf_pct, min_perf_pct);
503 
504 define_one_global_rw(no_turbo);
505 define_one_global_rw(max_perf_pct);
506 define_one_global_rw(min_perf_pct);
507 define_one_global_ro(turbo_pct);
508 define_one_global_ro(num_pstates);
509 
510 static struct attribute *intel_pstate_attributes[] = {
511 	&no_turbo.attr,
512 	&max_perf_pct.attr,
513 	&min_perf_pct.attr,
514 	&turbo_pct.attr,
515 	&num_pstates.attr,
516 	NULL
517 };
518 
519 static struct attribute_group intel_pstate_attr_group = {
520 	.attrs = intel_pstate_attributes,
521 };
522 
523 static void __init intel_pstate_sysfs_expose_params(void)
524 {
525 	struct kobject *intel_pstate_kobject;
526 	int rc;
527 
528 	intel_pstate_kobject = kobject_create_and_add("intel_pstate",
529 						&cpu_subsys.dev_root->kobj);
530 	BUG_ON(!intel_pstate_kobject);
531 	rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
532 	BUG_ON(rc);
533 }
534 /************************** sysfs end ************************/
535 
536 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
537 {
538 	/* First disable HWP notification interrupt as we don't process them */
539 	wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
540 
541 	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
542 }
543 
544 static int atom_get_min_pstate(void)
545 {
546 	u64 value;
547 
548 	rdmsrl(ATOM_RATIOS, value);
549 	return (value >> 8) & 0x7F;
550 }
551 
552 static int atom_get_max_pstate(void)
553 {
554 	u64 value;
555 
556 	rdmsrl(ATOM_RATIOS, value);
557 	return (value >> 16) & 0x7F;
558 }
559 
560 static int atom_get_turbo_pstate(void)
561 {
562 	u64 value;
563 
564 	rdmsrl(ATOM_TURBO_RATIOS, value);
565 	return value & 0x7F;
566 }
567 
568 static void atom_set_pstate(struct cpudata *cpudata, int pstate)
569 {
570 	u64 val;
571 	int32_t vid_fp;
572 	u32 vid;
573 
574 	val = (u64)pstate << 8;
575 	if (limits->no_turbo && !limits->turbo_disabled)
576 		val |= (u64)1 << 32;
577 
578 	vid_fp = cpudata->vid.min + mul_fp(
579 		int_tofp(pstate - cpudata->pstate.min_pstate),
580 		cpudata->vid.ratio);
581 
582 	vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
583 	vid = ceiling_fp(vid_fp);
584 
585 	if (pstate > cpudata->pstate.max_pstate)
586 		vid = cpudata->vid.turbo;
587 
588 	val |= vid;
589 
590 	wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
591 }
592 
593 static int silvermont_get_scaling(void)
594 {
595 	u64 value;
596 	int i;
597 	/* Defined in Table 35-6 from SDM (Sept 2015) */
598 	static int silvermont_freq_table[] = {
599 		83300, 100000, 133300, 116700, 80000};
600 
601 	rdmsrl(MSR_FSB_FREQ, value);
602 	i = value & 0x7;
603 	WARN_ON(i > 4);
604 
605 	return silvermont_freq_table[i];
606 }
607 
608 static int airmont_get_scaling(void)
609 {
610 	u64 value;
611 	int i;
612 	/* Defined in Table 35-10 from SDM (Sept 2015) */
613 	static int airmont_freq_table[] = {
614 		83300, 100000, 133300, 116700, 80000,
615 		93300, 90000, 88900, 87500};
616 
617 	rdmsrl(MSR_FSB_FREQ, value);
618 	i = value & 0xF;
619 	WARN_ON(i > 8);
620 
621 	return airmont_freq_table[i];
622 }
623 
624 static void atom_get_vid(struct cpudata *cpudata)
625 {
626 	u64 value;
627 
628 	rdmsrl(ATOM_VIDS, value);
629 	cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
630 	cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
631 	cpudata->vid.ratio = div_fp(
632 		cpudata->vid.max - cpudata->vid.min,
633 		int_tofp(cpudata->pstate.max_pstate -
634 			cpudata->pstate.min_pstate));
635 
636 	rdmsrl(ATOM_TURBO_VIDS, value);
637 	cpudata->vid.turbo = value & 0x7f;
638 }
639 
640 static int core_get_min_pstate(void)
641 {
642 	u64 value;
643 
644 	rdmsrl(MSR_PLATFORM_INFO, value);
645 	return (value >> 40) & 0xFF;
646 }
647 
648 static int core_get_max_pstate_physical(void)
649 {
650 	u64 value;
651 
652 	rdmsrl(MSR_PLATFORM_INFO, value);
653 	return (value >> 8) & 0xFF;
654 }
655 
656 static int core_get_max_pstate(void)
657 {
658 	u64 tar;
659 	u64 plat_info;
660 	int max_pstate;
661 	int err;
662 
663 	rdmsrl(MSR_PLATFORM_INFO, plat_info);
664 	max_pstate = (plat_info >> 8) & 0xFF;
665 
666 	err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
667 	if (!err) {
668 		/* Do some sanity checking for safety */
669 		if (plat_info & 0x600000000) {
670 			u64 tdp_ctrl;
671 			u64 tdp_ratio;
672 			int tdp_msr;
673 
674 			err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
675 			if (err)
676 				goto skip_tar;
677 
678 			tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
679 			err = rdmsrl_safe(tdp_msr, &tdp_ratio);
680 			if (err)
681 				goto skip_tar;
682 
683 			if (tdp_ratio - 1 == tar) {
684 				max_pstate = tar;
685 				pr_debug("max_pstate=TAC %x\n", max_pstate);
686 			} else {
687 				goto skip_tar;
688 			}
689 		}
690 	}
691 
692 skip_tar:
693 	return max_pstate;
694 }
695 
696 static int core_get_turbo_pstate(void)
697 {
698 	u64 value;
699 	int nont, ret;
700 
701 	rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
702 	nont = core_get_max_pstate();
703 	ret = (value) & 255;
704 	if (ret <= nont)
705 		ret = nont;
706 	return ret;
707 }
708 
709 static inline int core_get_scaling(void)
710 {
711 	return 100000;
712 }
713 
714 static void core_set_pstate(struct cpudata *cpudata, int pstate)
715 {
716 	u64 val;
717 
718 	val = (u64)pstate << 8;
719 	if (limits->no_turbo && !limits->turbo_disabled)
720 		val |= (u64)1 << 32;
721 
722 	wrmsrl(MSR_IA32_PERF_CTL, val);
723 }
724 
725 static int knl_get_turbo_pstate(void)
726 {
727 	u64 value;
728 	int nont, ret;
729 
730 	rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
731 	nont = core_get_max_pstate();
732 	ret = (((value) >> 8) & 0xFF);
733 	if (ret <= nont)
734 		ret = nont;
735 	return ret;
736 }
737 
738 static struct cpu_defaults core_params = {
739 	.pid_policy = {
740 		.sample_rate_ms = 10,
741 		.deadband = 0,
742 		.setpoint = 97,
743 		.p_gain_pct = 20,
744 		.d_gain_pct = 0,
745 		.i_gain_pct = 0,
746 	},
747 	.funcs = {
748 		.get_max = core_get_max_pstate,
749 		.get_max_physical = core_get_max_pstate_physical,
750 		.get_min = core_get_min_pstate,
751 		.get_turbo = core_get_turbo_pstate,
752 		.get_scaling = core_get_scaling,
753 		.set = core_set_pstate,
754 		.get_target_pstate = get_target_pstate_use_performance,
755 	},
756 };
757 
758 static struct cpu_defaults silvermont_params = {
759 	.pid_policy = {
760 		.sample_rate_ms = 10,
761 		.deadband = 0,
762 		.setpoint = 60,
763 		.p_gain_pct = 14,
764 		.d_gain_pct = 0,
765 		.i_gain_pct = 4,
766 	},
767 	.funcs = {
768 		.get_max = atom_get_max_pstate,
769 		.get_max_physical = atom_get_max_pstate,
770 		.get_min = atom_get_min_pstate,
771 		.get_turbo = atom_get_turbo_pstate,
772 		.set = atom_set_pstate,
773 		.get_scaling = silvermont_get_scaling,
774 		.get_vid = atom_get_vid,
775 		.get_target_pstate = get_target_pstate_use_cpu_load,
776 	},
777 };
778 
779 static struct cpu_defaults airmont_params = {
780 	.pid_policy = {
781 		.sample_rate_ms = 10,
782 		.deadband = 0,
783 		.setpoint = 60,
784 		.p_gain_pct = 14,
785 		.d_gain_pct = 0,
786 		.i_gain_pct = 4,
787 	},
788 	.funcs = {
789 		.get_max = atom_get_max_pstate,
790 		.get_max_physical = atom_get_max_pstate,
791 		.get_min = atom_get_min_pstate,
792 		.get_turbo = atom_get_turbo_pstate,
793 		.set = atom_set_pstate,
794 		.get_scaling = airmont_get_scaling,
795 		.get_vid = atom_get_vid,
796 		.get_target_pstate = get_target_pstate_use_cpu_load,
797 	},
798 };
799 
800 static struct cpu_defaults knl_params = {
801 	.pid_policy = {
802 		.sample_rate_ms = 10,
803 		.deadband = 0,
804 		.setpoint = 97,
805 		.p_gain_pct = 20,
806 		.d_gain_pct = 0,
807 		.i_gain_pct = 0,
808 	},
809 	.funcs = {
810 		.get_max = core_get_max_pstate,
811 		.get_max_physical = core_get_max_pstate_physical,
812 		.get_min = core_get_min_pstate,
813 		.get_turbo = knl_get_turbo_pstate,
814 		.get_scaling = core_get_scaling,
815 		.set = core_set_pstate,
816 		.get_target_pstate = get_target_pstate_use_performance,
817 	},
818 };
819 
820 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
821 {
822 	int max_perf = cpu->pstate.turbo_pstate;
823 	int max_perf_adj;
824 	int min_perf;
825 
826 	if (limits->no_turbo || limits->turbo_disabled)
827 		max_perf = cpu->pstate.max_pstate;
828 
829 	/*
830 	 * performance can be limited by user through sysfs, by cpufreq
831 	 * policy, or by cpu specific default values determined through
832 	 * experimentation.
833 	 */
834 	max_perf_adj = fp_toint(max_perf * limits->max_perf);
835 	*max = clamp_t(int, max_perf_adj,
836 			cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
837 
838 	min_perf = fp_toint(max_perf * limits->min_perf);
839 	*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
840 }
841 
842 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
843 {
844 	int max_perf, min_perf;
845 
846 	if (force) {
847 		update_turbo_state();
848 
849 		intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
850 
851 		pstate = clamp_t(int, pstate, min_perf, max_perf);
852 
853 		if (pstate == cpu->pstate.current_pstate)
854 			return;
855 	}
856 	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
857 
858 	cpu->pstate.current_pstate = pstate;
859 
860 	pstate_funcs.set(cpu, pstate);
861 }
862 
863 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
864 {
865 	cpu->pstate.min_pstate = pstate_funcs.get_min();
866 	cpu->pstate.max_pstate = pstate_funcs.get_max();
867 	cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
868 	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
869 	cpu->pstate.scaling = pstate_funcs.get_scaling();
870 
871 	if (pstate_funcs.get_vid)
872 		pstate_funcs.get_vid(cpu);
873 	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
874 }
875 
876 static inline void intel_pstate_calc_busy(struct cpudata *cpu)
877 {
878 	struct sample *sample = &cpu->sample;
879 	int64_t core_pct;
880 
881 	core_pct = int_tofp(sample->aperf) * int_tofp(100);
882 	core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
883 
884 	sample->core_pct_busy = (int32_t)core_pct;
885 }
886 
887 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
888 {
889 	u64 aperf, mperf;
890 	unsigned long flags;
891 	u64 tsc;
892 
893 	local_irq_save(flags);
894 	rdmsrl(MSR_IA32_APERF, aperf);
895 	rdmsrl(MSR_IA32_MPERF, mperf);
896 	tsc = rdtsc();
897 	if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
898 		local_irq_restore(flags);
899 		return false;
900 	}
901 	local_irq_restore(flags);
902 
903 	cpu->last_sample_time = cpu->sample.time;
904 	cpu->sample.time = time;
905 	cpu->sample.aperf = aperf;
906 	cpu->sample.mperf = mperf;
907 	cpu->sample.tsc =  tsc;
908 	cpu->sample.aperf -= cpu->prev_aperf;
909 	cpu->sample.mperf -= cpu->prev_mperf;
910 	cpu->sample.tsc -= cpu->prev_tsc;
911 
912 	cpu->prev_aperf = aperf;
913 	cpu->prev_mperf = mperf;
914 	cpu->prev_tsc = tsc;
915 	return true;
916 }
917 
918 static inline int32_t get_avg_frequency(struct cpudata *cpu)
919 {
920 	return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
921 		cpu->pstate.scaling, cpu->sample.mperf);
922 }
923 
924 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
925 {
926 	struct sample *sample = &cpu->sample;
927 	u64 cummulative_iowait, delta_iowait_us;
928 	u64 delta_iowait_mperf;
929 	u64 mperf, now;
930 	int32_t cpu_load;
931 
932 	cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now);
933 
934 	/*
935 	 * Convert iowait time into number of IO cycles spent at max_freq.
936 	 * IO is considered as busy only for the cpu_load algorithm. For
937 	 * performance this is not needed since we always try to reach the
938 	 * maximum P-State, so we are already boosting the IOs.
939 	 */
940 	delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait;
941 	delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling *
942 		cpu->pstate.max_pstate, MSEC_PER_SEC);
943 
944 	mperf = cpu->sample.mperf + delta_iowait_mperf;
945 	cpu->prev_cummulative_iowait = cummulative_iowait;
946 
947 	/*
948 	 * The load can be estimated as the ratio of the mperf counter
949 	 * running at a constant frequency during active periods
950 	 * (C0) and the time stamp counter running at the same frequency
951 	 * also during C-states.
952 	 */
953 	cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
954 	cpu->sample.busy_scaled = cpu_load;
955 
956 	return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load);
957 }
958 
959 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
960 {
961 	int32_t core_busy, max_pstate, current_pstate, sample_ratio;
962 	u64 duration_ns;
963 
964 	intel_pstate_calc_busy(cpu);
965 
966 	/*
967 	 * core_busy is the ratio of actual performance to max
968 	 * max_pstate is the max non turbo pstate available
969 	 * current_pstate was the pstate that was requested during
970 	 * 	the last sample period.
971 	 *
972 	 * We normalize core_busy, which was our actual percent
973 	 * performance to what we requested during the last sample
974 	 * period. The result will be a percentage of busy at a
975 	 * specified pstate.
976 	 */
977 	core_busy = cpu->sample.core_pct_busy;
978 	max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
979 	current_pstate = int_tofp(cpu->pstate.current_pstate);
980 	core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
981 
982 	/*
983 	 * Since our utilization update callback will not run unless we are
984 	 * in C0, check if the actual elapsed time is significantly greater (3x)
985 	 * than our sample interval.  If it is, then we were idle for a long
986 	 * enough period of time to adjust our busyness.
987 	 */
988 	duration_ns = cpu->sample.time - cpu->last_sample_time;
989 	if ((s64)duration_ns > pid_params.sample_rate_ns * 3
990 	    && cpu->last_sample_time > 0) {
991 		sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
992 				      int_tofp(duration_ns));
993 		core_busy = mul_fp(core_busy, sample_ratio);
994 	}
995 
996 	cpu->sample.busy_scaled = core_busy;
997 	return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
998 }
999 
1000 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1001 {
1002 	int from, target_pstate;
1003 	struct sample *sample;
1004 
1005 	from = cpu->pstate.current_pstate;
1006 
1007 	target_pstate = pstate_funcs.get_target_pstate(cpu);
1008 
1009 	intel_pstate_set_pstate(cpu, target_pstate, true);
1010 
1011 	sample = &cpu->sample;
1012 	trace_pstate_sample(fp_toint(sample->core_pct_busy),
1013 		fp_toint(sample->busy_scaled),
1014 		from,
1015 		cpu->pstate.current_pstate,
1016 		sample->mperf,
1017 		sample->aperf,
1018 		sample->tsc,
1019 		get_avg_frequency(cpu));
1020 }
1021 
1022 static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1023 				     unsigned long util, unsigned long max)
1024 {
1025 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1026 	u64 delta_ns = time - cpu->sample.time;
1027 
1028 	if ((s64)delta_ns >= pid_params.sample_rate_ns) {
1029 		bool sample_taken = intel_pstate_sample(cpu, time);
1030 
1031 		if (sample_taken && !hwp_active)
1032 			intel_pstate_adjust_busy_pstate(cpu);
1033 	}
1034 }
1035 
1036 #define ICPU(model, policy) \
1037 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1038 			(unsigned long)&policy }
1039 
1040 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1041 	ICPU(0x2a, core_params),
1042 	ICPU(0x2d, core_params),
1043 	ICPU(0x37, silvermont_params),
1044 	ICPU(0x3a, core_params),
1045 	ICPU(0x3c, core_params),
1046 	ICPU(0x3d, core_params),
1047 	ICPU(0x3e, core_params),
1048 	ICPU(0x3f, core_params),
1049 	ICPU(0x45, core_params),
1050 	ICPU(0x46, core_params),
1051 	ICPU(0x47, core_params),
1052 	ICPU(0x4c, airmont_params),
1053 	ICPU(0x4e, core_params),
1054 	ICPU(0x4f, core_params),
1055 	ICPU(0x5e, core_params),
1056 	ICPU(0x56, core_params),
1057 	ICPU(0x57, knl_params),
1058 	{}
1059 };
1060 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
1061 
1062 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
1063 	ICPU(0x56, core_params),
1064 	{}
1065 };
1066 
1067 static int intel_pstate_init_cpu(unsigned int cpunum)
1068 {
1069 	struct cpudata *cpu;
1070 
1071 	if (!all_cpu_data[cpunum])
1072 		all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
1073 					       GFP_KERNEL);
1074 	if (!all_cpu_data[cpunum])
1075 		return -ENOMEM;
1076 
1077 	cpu = all_cpu_data[cpunum];
1078 
1079 	cpu->cpu = cpunum;
1080 
1081 	if (hwp_active) {
1082 		intel_pstate_hwp_enable(cpu);
1083 		pid_params.sample_rate_ms = 50;
1084 		pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
1085 	}
1086 
1087 	intel_pstate_get_cpu_pstates(cpu);
1088 
1089 	intel_pstate_busy_pid_reset(cpu);
1090 	intel_pstate_sample(cpu, 0);
1091 
1092 	cpu->update_util.func = intel_pstate_update_util;
1093 	cpufreq_set_update_util_data(cpunum, &cpu->update_util);
1094 
1095 	pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
1096 
1097 	return 0;
1098 }
1099 
1100 static unsigned int intel_pstate_get(unsigned int cpu_num)
1101 {
1102 	struct sample *sample;
1103 	struct cpudata *cpu;
1104 
1105 	cpu = all_cpu_data[cpu_num];
1106 	if (!cpu)
1107 		return 0;
1108 	sample = &cpu->sample;
1109 	return get_avg_frequency(cpu);
1110 }
1111 
1112 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1113 {
1114 	if (!policy->cpuinfo.max_freq)
1115 		return -ENODEV;
1116 
1117 	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
1118 	    policy->max >= policy->cpuinfo.max_freq) {
1119 		pr_debug("intel_pstate: set performance\n");
1120 		limits = &performance_limits;
1121 		if (hwp_active)
1122 			intel_pstate_hwp_set(policy->cpus);
1123 		return 0;
1124 	}
1125 
1126 	pr_debug("intel_pstate: set powersave\n");
1127 	limits = &powersave_limits;
1128 	limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1129 	limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1130 	limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
1131 					      policy->cpuinfo.max_freq);
1132 	limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
1133 
1134 	/* Normalize user input to [min_policy_pct, max_policy_pct] */
1135 	limits->min_perf_pct = max(limits->min_policy_pct,
1136 				   limits->min_sysfs_pct);
1137 	limits->min_perf_pct = min(limits->max_policy_pct,
1138 				   limits->min_perf_pct);
1139 	limits->max_perf_pct = min(limits->max_policy_pct,
1140 				   limits->max_sysfs_pct);
1141 	limits->max_perf_pct = max(limits->min_policy_pct,
1142 				   limits->max_perf_pct);
1143 	limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
1144 
1145 	/* Make sure min_perf_pct <= max_perf_pct */
1146 	limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
1147 
1148 	limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
1149 				  int_tofp(100));
1150 	limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1151 				  int_tofp(100));
1152 
1153 	if (hwp_active)
1154 		intel_pstate_hwp_set(policy->cpus);
1155 
1156 	return 0;
1157 }
1158 
1159 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
1160 {
1161 	cpufreq_verify_within_cpu_limits(policy);
1162 
1163 	if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
1164 	    policy->policy != CPUFREQ_POLICY_PERFORMANCE)
1165 		return -EINVAL;
1166 
1167 	return 0;
1168 }
1169 
1170 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
1171 {
1172 	int cpu_num = policy->cpu;
1173 	struct cpudata *cpu = all_cpu_data[cpu_num];
1174 
1175 	pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
1176 
1177 	cpufreq_set_update_util_data(cpu_num, NULL);
1178 	synchronize_sched();
1179 
1180 	if (hwp_active)
1181 		return;
1182 
1183 	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
1184 }
1185 
1186 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1187 {
1188 	struct cpudata *cpu;
1189 	int rc;
1190 
1191 	rc = intel_pstate_init_cpu(policy->cpu);
1192 	if (rc)
1193 		return rc;
1194 
1195 	cpu = all_cpu_data[policy->cpu];
1196 
1197 	if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
1198 		policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1199 	else
1200 		policy->policy = CPUFREQ_POLICY_POWERSAVE;
1201 
1202 	policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
1203 	policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1204 
1205 	/* cpuinfo and default policy values */
1206 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1207 	policy->cpuinfo.max_freq =
1208 		cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1209 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1210 	cpumask_set_cpu(policy->cpu, policy->cpus);
1211 
1212 	return 0;
1213 }
1214 
1215 static struct cpufreq_driver intel_pstate_driver = {
1216 	.flags		= CPUFREQ_CONST_LOOPS,
1217 	.verify		= intel_pstate_verify_policy,
1218 	.setpolicy	= intel_pstate_set_policy,
1219 	.get		= intel_pstate_get,
1220 	.init		= intel_pstate_cpu_init,
1221 	.stop_cpu	= intel_pstate_stop_cpu,
1222 	.name		= "intel_pstate",
1223 };
1224 
1225 static int __initdata no_load;
1226 static int __initdata no_hwp;
1227 static int __initdata hwp_only;
1228 static unsigned int force_load;
1229 
1230 static int intel_pstate_msrs_not_valid(void)
1231 {
1232 	if (!pstate_funcs.get_max() ||
1233 	    !pstate_funcs.get_min() ||
1234 	    !pstate_funcs.get_turbo())
1235 		return -ENODEV;
1236 
1237 	return 0;
1238 }
1239 
1240 static void copy_pid_params(struct pstate_adjust_policy *policy)
1241 {
1242 	pid_params.sample_rate_ms = policy->sample_rate_ms;
1243 	pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
1244 	pid_params.p_gain_pct = policy->p_gain_pct;
1245 	pid_params.i_gain_pct = policy->i_gain_pct;
1246 	pid_params.d_gain_pct = policy->d_gain_pct;
1247 	pid_params.deadband = policy->deadband;
1248 	pid_params.setpoint = policy->setpoint;
1249 }
1250 
1251 static void copy_cpu_funcs(struct pstate_funcs *funcs)
1252 {
1253 	pstate_funcs.get_max   = funcs->get_max;
1254 	pstate_funcs.get_max_physical = funcs->get_max_physical;
1255 	pstate_funcs.get_min   = funcs->get_min;
1256 	pstate_funcs.get_turbo = funcs->get_turbo;
1257 	pstate_funcs.get_scaling = funcs->get_scaling;
1258 	pstate_funcs.set       = funcs->set;
1259 	pstate_funcs.get_vid   = funcs->get_vid;
1260 	pstate_funcs.get_target_pstate = funcs->get_target_pstate;
1261 
1262 }
1263 
1264 #if IS_ENABLED(CONFIG_ACPI)
1265 #include <acpi/processor.h>
1266 
1267 static bool intel_pstate_no_acpi_pss(void)
1268 {
1269 	int i;
1270 
1271 	for_each_possible_cpu(i) {
1272 		acpi_status status;
1273 		union acpi_object *pss;
1274 		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1275 		struct acpi_processor *pr = per_cpu(processors, i);
1276 
1277 		if (!pr)
1278 			continue;
1279 
1280 		status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
1281 		if (ACPI_FAILURE(status))
1282 			continue;
1283 
1284 		pss = buffer.pointer;
1285 		if (pss && pss->type == ACPI_TYPE_PACKAGE) {
1286 			kfree(pss);
1287 			return false;
1288 		}
1289 
1290 		kfree(pss);
1291 	}
1292 
1293 	return true;
1294 }
1295 
1296 static bool intel_pstate_has_acpi_ppc(void)
1297 {
1298 	int i;
1299 
1300 	for_each_possible_cpu(i) {
1301 		struct acpi_processor *pr = per_cpu(processors, i);
1302 
1303 		if (!pr)
1304 			continue;
1305 		if (acpi_has_method(pr->handle, "_PPC"))
1306 			return true;
1307 	}
1308 	return false;
1309 }
1310 
1311 enum {
1312 	PSS,
1313 	PPC,
1314 };
1315 
1316 struct hw_vendor_info {
1317 	u16  valid;
1318 	char oem_id[ACPI_OEM_ID_SIZE];
1319 	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
1320 	int  oem_pwr_table;
1321 };
1322 
1323 /* Hardware vendor-specific info that has its own power management modes */
1324 static struct hw_vendor_info vendor_info[] = {
1325 	{1, "HP    ", "ProLiant", PSS},
1326 	{1, "ORACLE", "X4-2    ", PPC},
1327 	{1, "ORACLE", "X4-2L   ", PPC},
1328 	{1, "ORACLE", "X4-2B   ", PPC},
1329 	{1, "ORACLE", "X3-2    ", PPC},
1330 	{1, "ORACLE", "X3-2L   ", PPC},
1331 	{1, "ORACLE", "X3-2B   ", PPC},
1332 	{1, "ORACLE", "X4470M2 ", PPC},
1333 	{1, "ORACLE", "X4270M3 ", PPC},
1334 	{1, "ORACLE", "X4270M2 ", PPC},
1335 	{1, "ORACLE", "X4170M2 ", PPC},
1336 	{1, "ORACLE", "X4170 M3", PPC},
1337 	{1, "ORACLE", "X4275 M3", PPC},
1338 	{1, "ORACLE", "X6-2    ", PPC},
1339 	{1, "ORACLE", "Sudbury ", PPC},
1340 	{0, "", ""},
1341 };
1342 
1343 static bool intel_pstate_platform_pwr_mgmt_exists(void)
1344 {
1345 	struct acpi_table_header hdr;
1346 	struct hw_vendor_info *v_info;
1347 	const struct x86_cpu_id *id;
1348 	u64 misc_pwr;
1349 
1350 	id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1351 	if (id) {
1352 		rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1353 		if ( misc_pwr & (1 << 8))
1354 			return true;
1355 	}
1356 
1357 	if (acpi_disabled ||
1358 	    ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
1359 		return false;
1360 
1361 	for (v_info = vendor_info; v_info->valid; v_info++) {
1362 		if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
1363 			!strncmp(hdr.oem_table_id, v_info->oem_table_id,
1364 						ACPI_OEM_TABLE_ID_SIZE))
1365 			switch (v_info->oem_pwr_table) {
1366 			case PSS:
1367 				return intel_pstate_no_acpi_pss();
1368 			case PPC:
1369 				return intel_pstate_has_acpi_ppc() &&
1370 					(!force_load);
1371 			}
1372 	}
1373 
1374 	return false;
1375 }
1376 #else /* CONFIG_ACPI not enabled */
1377 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1378 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1379 #endif /* CONFIG_ACPI */
1380 
1381 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
1382 	{ X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
1383 	{}
1384 };
1385 
1386 static int __init intel_pstate_init(void)
1387 {
1388 	int cpu, rc = 0;
1389 	const struct x86_cpu_id *id;
1390 	struct cpu_defaults *cpu_def;
1391 
1392 	if (no_load)
1393 		return -ENODEV;
1394 
1395 	if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
1396 		copy_cpu_funcs(&core_params.funcs);
1397 		hwp_active++;
1398 		goto hwp_cpu_matched;
1399 	}
1400 
1401 	id = x86_match_cpu(intel_pstate_cpu_ids);
1402 	if (!id)
1403 		return -ENODEV;
1404 
1405 	cpu_def = (struct cpu_defaults *)id->driver_data;
1406 
1407 	copy_pid_params(&cpu_def->pid_policy);
1408 	copy_cpu_funcs(&cpu_def->funcs);
1409 
1410 	if (intel_pstate_msrs_not_valid())
1411 		return -ENODEV;
1412 
1413 hwp_cpu_matched:
1414 	/*
1415 	 * The Intel pstate driver will be ignored if the platform
1416 	 * firmware has its own power management modes.
1417 	 */
1418 	if (intel_pstate_platform_pwr_mgmt_exists())
1419 		return -ENODEV;
1420 
1421 	pr_info("Intel P-state driver initializing.\n");
1422 
1423 	all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
1424 	if (!all_cpu_data)
1425 		return -ENOMEM;
1426 
1427 	if (!hwp_active && hwp_only)
1428 		goto out;
1429 
1430 	rc = cpufreq_register_driver(&intel_pstate_driver);
1431 	if (rc)
1432 		goto out;
1433 
1434 	intel_pstate_debug_expose_params();
1435 	intel_pstate_sysfs_expose_params();
1436 
1437 	if (hwp_active)
1438 		pr_info("intel_pstate: HWP enabled\n");
1439 
1440 	return rc;
1441 out:
1442 	get_online_cpus();
1443 	for_each_online_cpu(cpu) {
1444 		if (all_cpu_data[cpu]) {
1445 			cpufreq_set_update_util_data(cpu, NULL);
1446 			synchronize_sched();
1447 			kfree(all_cpu_data[cpu]);
1448 		}
1449 	}
1450 
1451 	put_online_cpus();
1452 	vfree(all_cpu_data);
1453 	return -ENODEV;
1454 }
1455 device_initcall(intel_pstate_init);
1456 
1457 static int __init intel_pstate_setup(char *str)
1458 {
1459 	if (!str)
1460 		return -EINVAL;
1461 
1462 	if (!strcmp(str, "disable"))
1463 		no_load = 1;
1464 	if (!strcmp(str, "no_hwp")) {
1465 		pr_info("intel_pstate: HWP disabled\n");
1466 		no_hwp = 1;
1467 	}
1468 	if (!strcmp(str, "force"))
1469 		force_load = 1;
1470 	if (!strcmp(str, "hwp_only"))
1471 		hwp_only = 1;
1472 	return 0;
1473 }
1474 early_param("intel_pstate", intel_pstate_setup);
1475 
1476 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1477 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1478 MODULE_LICENSE("GPL");
1479