1 /*
2  * intel_pstate.c: Native P state management for Intel processors
3  *
4  * (C) Copyright 2012 Intel Corporation
5  * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/kernel.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/module.h>
18 #include <linux/ktime.h>
19 #include <linux/hrtimer.h>
20 #include <linux/tick.h>
21 #include <linux/slab.h>
22 #include <linux/sched.h>
23 #include <linux/list.h>
24 #include <linux/cpu.h>
25 #include <linux/cpufreq.h>
26 #include <linux/sysfs.h>
27 #include <linux/types.h>
28 #include <linux/fs.h>
29 #include <linux/debugfs.h>
30 #include <linux/acpi.h>
31 #include <linux/vmalloc.h>
32 #include <trace/events/power.h>
33 
34 #include <asm/div64.h>
35 #include <asm/msr.h>
36 #include <asm/cpu_device_id.h>
37 #include <asm/cpufeature.h>
38 #include <asm/intel-family.h>
39 
40 #define INTEL_CPUFREQ_TRANSITION_LATENCY	20000
41 
42 #define ATOM_RATIOS		0x66a
43 #define ATOM_VIDS		0x66b
44 #define ATOM_TURBO_RATIOS	0x66c
45 #define ATOM_TURBO_VIDS		0x66d
46 
47 #ifdef CONFIG_ACPI
48 #include <acpi/processor.h>
49 #include <acpi/cppc_acpi.h>
50 #endif
51 
52 #define FRAC_BITS 8
53 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
54 #define fp_toint(X) ((X) >> FRAC_BITS)
55 
56 #define EXT_BITS 6
57 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
58 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
59 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
60 
61 static inline int32_t mul_fp(int32_t x, int32_t y)
62 {
63 	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
64 }
65 
66 static inline int32_t div_fp(s64 x, s64 y)
67 {
68 	return div64_s64((int64_t)x << FRAC_BITS, y);
69 }
70 
71 static inline int ceiling_fp(int32_t x)
72 {
73 	int mask, ret;
74 
75 	ret = fp_toint(x);
76 	mask = (1 << FRAC_BITS) - 1;
77 	if (x & mask)
78 		ret += 1;
79 	return ret;
80 }
81 
82 static inline u64 mul_ext_fp(u64 x, u64 y)
83 {
84 	return (x * y) >> EXT_FRAC_BITS;
85 }
86 
87 static inline u64 div_ext_fp(u64 x, u64 y)
88 {
89 	return div64_u64(x << EXT_FRAC_BITS, y);
90 }
91 
92 /**
93  * struct sample -	Store performance sample
94  * @core_avg_perf:	Ratio of APERF/MPERF which is the actual average
95  *			performance during last sample period
96  * @busy_scaled:	Scaled busy value which is used to calculate next
97  *			P state. This can be different than core_avg_perf
98  *			to account for cpu idle period
99  * @aperf:		Difference of actual performance frequency clock count
100  *			read from APERF MSR between last and current sample
101  * @mperf:		Difference of maximum performance frequency clock count
102  *			read from MPERF MSR between last and current sample
103  * @tsc:		Difference of time stamp counter between last and
104  *			current sample
105  * @time:		Current time from scheduler
106  *
107  * This structure is used in the cpudata structure to store performance sample
108  * data for choosing next P State.
109  */
110 struct sample {
111 	int32_t core_avg_perf;
112 	int32_t busy_scaled;
113 	u64 aperf;
114 	u64 mperf;
115 	u64 tsc;
116 	u64 time;
117 };
118 
119 /**
120  * struct pstate_data - Store P state data
121  * @current_pstate:	Current requested P state
122  * @min_pstate:		Min P state possible for this platform
123  * @max_pstate:		Max P state possible for this platform
124  * @max_pstate_physical:This is physical Max P state for a processor
125  *			This can be higher than the max_pstate which can
126  *			be limited by platform thermal design power limits
127  * @scaling:		Scaling factor to  convert frequency to cpufreq
128  *			frequency units
129  * @turbo_pstate:	Max Turbo P state possible for this platform
130  * @max_freq:		@max_pstate frequency in cpufreq units
131  * @turbo_freq:		@turbo_pstate frequency in cpufreq units
132  *
133  * Stores the per cpu model P state limits and current P state.
134  */
135 struct pstate_data {
136 	int	current_pstate;
137 	int	min_pstate;
138 	int	max_pstate;
139 	int	max_pstate_physical;
140 	int	scaling;
141 	int	turbo_pstate;
142 	unsigned int max_freq;
143 	unsigned int turbo_freq;
144 };
145 
146 /**
147  * struct vid_data -	Stores voltage information data
148  * @min:		VID data for this platform corresponding to
149  *			the lowest P state
150  * @max:		VID data corresponding to the highest P State.
151  * @turbo:		VID data for turbo P state
152  * @ratio:		Ratio of (vid max - vid min) /
153  *			(max P state - Min P State)
154  *
155  * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
156  * This data is used in Atom platforms, where in addition to target P state,
157  * the voltage data needs to be specified to select next P State.
158  */
159 struct vid_data {
160 	int min;
161 	int max;
162 	int turbo;
163 	int32_t ratio;
164 };
165 
166 /**
167  * struct _pid -	Stores PID data
168  * @setpoint:		Target set point for busyness or performance
169  * @integral:		Storage for accumulated error values
170  * @p_gain:		PID proportional gain
171  * @i_gain:		PID integral gain
172  * @d_gain:		PID derivative gain
173  * @deadband:		PID deadband
174  * @last_err:		Last error storage for integral part of PID calculation
175  *
176  * Stores PID coefficients and last error for PID controller.
177  */
178 struct _pid {
179 	int setpoint;
180 	int32_t integral;
181 	int32_t p_gain;
182 	int32_t i_gain;
183 	int32_t d_gain;
184 	int deadband;
185 	int32_t last_err;
186 };
187 
188 /**
189  * struct perf_limits - Store user and policy limits
190  * @no_turbo:		User requested turbo state from intel_pstate sysfs
191  * @turbo_disabled:	Platform turbo status either from msr
192  *			MSR_IA32_MISC_ENABLE or when maximum available pstate
193  *			matches the maximum turbo pstate
194  * @max_perf_pct:	Effective maximum performance limit in percentage, this
195  *			is minimum of either limits enforced by cpufreq policy
196  *			or limits from user set limits via intel_pstate sysfs
197  * @min_perf_pct:	Effective minimum performance limit in percentage, this
198  *			is maximum of either limits enforced by cpufreq policy
199  *			or limits from user set limits via intel_pstate sysfs
200  * @max_perf:		This is a scaled value between 0 to 255 for max_perf_pct
201  *			This value is used to limit max pstate
202  * @min_perf:		This is a scaled value between 0 to 255 for min_perf_pct
203  *			This value is used to limit min pstate
204  * @max_policy_pct:	The maximum performance in percentage enforced by
205  *			cpufreq setpolicy interface
206  * @max_sysfs_pct:	The maximum performance in percentage enforced by
207  *			intel pstate sysfs interface, unused when per cpu
208  *			controls are enforced
209  * @min_policy_pct:	The minimum performance in percentage enforced by
210  *			cpufreq setpolicy interface
211  * @min_sysfs_pct:	The minimum performance in percentage enforced by
212  *			intel pstate sysfs interface, unused when per cpu
213  *			controls are enforced
214  *
215  * Storage for user and policy defined limits.
216  */
217 struct perf_limits {
218 	int no_turbo;
219 	int turbo_disabled;
220 	int max_perf_pct;
221 	int min_perf_pct;
222 	int32_t max_perf;
223 	int32_t min_perf;
224 	int max_policy_pct;
225 	int max_sysfs_pct;
226 	int min_policy_pct;
227 	int min_sysfs_pct;
228 };
229 
230 /**
231  * struct cpudata -	Per CPU instance data storage
232  * @cpu:		CPU number for this instance data
233  * @policy:		CPUFreq policy value
234  * @update_util:	CPUFreq utility callback information
235  * @update_util_set:	CPUFreq utility callback is set
236  * @iowait_boost:	iowait-related boost fraction
237  * @last_update:	Time of the last update.
238  * @pstate:		Stores P state limits for this CPU
239  * @vid:		Stores VID limits for this CPU
240  * @pid:		Stores PID parameters for this CPU
241  * @last_sample_time:	Last Sample time
242  * @prev_aperf:		Last APERF value read from APERF MSR
243  * @prev_mperf:		Last MPERF value read from MPERF MSR
244  * @prev_tsc:		Last timestamp counter (TSC) value
245  * @prev_cummulative_iowait: IO Wait time difference from last and
246  *			current sample
247  * @sample:		Storage for storing last Sample data
248  * @perf_limits:	Pointer to perf_limit unique to this CPU
249  *			Not all field in the structure are applicable
250  *			when per cpu controls are enforced
251  * @acpi_perf_data:	Stores ACPI perf information read from _PSS
252  * @valid_pss_table:	Set to true for valid ACPI _PSS entries found
253  * @epp_powersave:	Last saved HWP energy performance preference
254  *			(EPP) or energy performance bias (EPB),
255  *			when policy switched to performance
256  * @epp_policy:		Last saved policy used to set EPP/EPB
257  * @epp_default:	Power on default HWP energy performance
258  *			preference/bias
259  * @epp_saved:		Saved EPP/EPB during system suspend or CPU offline
260  *			operation
261  *
262  * This structure stores per CPU instance data for all CPUs.
263  */
264 struct cpudata {
265 	int cpu;
266 
267 	unsigned int policy;
268 	struct update_util_data update_util;
269 	bool   update_util_set;
270 
271 	struct pstate_data pstate;
272 	struct vid_data vid;
273 	struct _pid pid;
274 
275 	u64	last_update;
276 	u64	last_sample_time;
277 	u64	prev_aperf;
278 	u64	prev_mperf;
279 	u64	prev_tsc;
280 	u64	prev_cummulative_iowait;
281 	struct sample sample;
282 	struct perf_limits *perf_limits;
283 #ifdef CONFIG_ACPI
284 	struct acpi_processor_performance acpi_perf_data;
285 	bool valid_pss_table;
286 #endif
287 	unsigned int iowait_boost;
288 	s16 epp_powersave;
289 	s16 epp_policy;
290 	s16 epp_default;
291 	s16 epp_saved;
292 };
293 
294 static struct cpudata **all_cpu_data;
295 
296 /**
297  * struct pstate_adjust_policy - Stores static PID configuration data
298  * @sample_rate_ms:	PID calculation sample rate in ms
299  * @sample_rate_ns:	Sample rate calculation in ns
300  * @deadband:		PID deadband
301  * @setpoint:		PID Setpoint
302  * @p_gain_pct:		PID proportional gain
303  * @i_gain_pct:		PID integral gain
304  * @d_gain_pct:		PID derivative gain
305  *
306  * Stores per CPU model static PID configuration data.
307  */
308 struct pstate_adjust_policy {
309 	int sample_rate_ms;
310 	s64 sample_rate_ns;
311 	int deadband;
312 	int setpoint;
313 	int p_gain_pct;
314 	int d_gain_pct;
315 	int i_gain_pct;
316 };
317 
318 /**
319  * struct pstate_funcs - Per CPU model specific callbacks
320  * @get_max:		Callback to get maximum non turbo effective P state
321  * @get_max_physical:	Callback to get maximum non turbo physical P state
322  * @get_min:		Callback to get minimum P state
323  * @get_turbo:		Callback to get turbo P state
324  * @get_scaling:	Callback to get frequency scaling factor
325  * @get_val:		Callback to convert P state to actual MSR write value
326  * @get_vid:		Callback to get VID data for Atom platforms
327  * @get_target_pstate:	Callback to a function to calculate next P state to use
328  *
329  * Core and Atom CPU models have different way to get P State limits. This
330  * structure is used to store those callbacks.
331  */
332 struct pstate_funcs {
333 	int (*get_max)(void);
334 	int (*get_max_physical)(void);
335 	int (*get_min)(void);
336 	int (*get_turbo)(void);
337 	int (*get_scaling)(void);
338 	u64 (*get_val)(struct cpudata*, int pstate);
339 	void (*get_vid)(struct cpudata *);
340 	int32_t (*get_target_pstate)(struct cpudata *);
341 };
342 
343 /**
344  * struct cpu_defaults- Per CPU model default config data
345  * @pid_policy:	PID config data
346  * @funcs:		Callback function data
347  */
348 struct cpu_defaults {
349 	struct pstate_adjust_policy pid_policy;
350 	struct pstate_funcs funcs;
351 };
352 
353 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
354 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
355 
356 static struct pstate_adjust_policy pid_params __read_mostly;
357 static struct pstate_funcs pstate_funcs __read_mostly;
358 static int hwp_active __read_mostly;
359 static bool per_cpu_limits __read_mostly;
360 
361 #ifdef CONFIG_ACPI
362 static bool acpi_ppc;
363 #endif
364 
365 static struct perf_limits performance_limits = {
366 	.no_turbo = 0,
367 	.turbo_disabled = 0,
368 	.max_perf_pct = 100,
369 	.max_perf = int_ext_tofp(1),
370 	.min_perf_pct = 100,
371 	.min_perf = int_ext_tofp(1),
372 	.max_policy_pct = 100,
373 	.max_sysfs_pct = 100,
374 	.min_policy_pct = 0,
375 	.min_sysfs_pct = 0,
376 };
377 
378 static struct perf_limits powersave_limits = {
379 	.no_turbo = 0,
380 	.turbo_disabled = 0,
381 	.max_perf_pct = 100,
382 	.max_perf = int_ext_tofp(1),
383 	.min_perf_pct = 0,
384 	.min_perf = 0,
385 	.max_policy_pct = 100,
386 	.max_sysfs_pct = 100,
387 	.min_policy_pct = 0,
388 	.min_sysfs_pct = 0,
389 };
390 
391 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
392 static struct perf_limits *limits = &performance_limits;
393 #else
394 static struct perf_limits *limits = &powersave_limits;
395 #endif
396 
397 static DEFINE_MUTEX(intel_pstate_limits_lock);
398 
399 #ifdef CONFIG_ACPI
400 
401 static bool intel_pstate_get_ppc_enable_status(void)
402 {
403 	if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
404 	    acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
405 		return true;
406 
407 	return acpi_ppc;
408 }
409 
410 #ifdef CONFIG_ACPI_CPPC_LIB
411 
412 /* The work item is needed to avoid CPU hotplug locking issues */
413 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
414 {
415 	sched_set_itmt_support();
416 }
417 
418 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
419 
420 static void intel_pstate_set_itmt_prio(int cpu)
421 {
422 	struct cppc_perf_caps cppc_perf;
423 	static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
424 	int ret;
425 
426 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
427 	if (ret)
428 		return;
429 
430 	/*
431 	 * The priorities can be set regardless of whether or not
432 	 * sched_set_itmt_support(true) has been called and it is valid to
433 	 * update them at any time after it has been called.
434 	 */
435 	sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
436 
437 	if (max_highest_perf <= min_highest_perf) {
438 		if (cppc_perf.highest_perf > max_highest_perf)
439 			max_highest_perf = cppc_perf.highest_perf;
440 
441 		if (cppc_perf.highest_perf < min_highest_perf)
442 			min_highest_perf = cppc_perf.highest_perf;
443 
444 		if (max_highest_perf > min_highest_perf) {
445 			/*
446 			 * This code can be run during CPU online under the
447 			 * CPU hotplug locks, so sched_set_itmt_support()
448 			 * cannot be called from here.  Queue up a work item
449 			 * to invoke it.
450 			 */
451 			schedule_work(&sched_itmt_work);
452 		}
453 	}
454 }
455 #else
456 static void intel_pstate_set_itmt_prio(int cpu)
457 {
458 }
459 #endif
460 
461 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
462 {
463 	struct cpudata *cpu;
464 	int ret;
465 	int i;
466 
467 	if (hwp_active) {
468 		intel_pstate_set_itmt_prio(policy->cpu);
469 		return;
470 	}
471 
472 	if (!intel_pstate_get_ppc_enable_status())
473 		return;
474 
475 	cpu = all_cpu_data[policy->cpu];
476 
477 	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
478 						  policy->cpu);
479 	if (ret)
480 		return;
481 
482 	/*
483 	 * Check if the control value in _PSS is for PERF_CTL MSR, which should
484 	 * guarantee that the states returned by it map to the states in our
485 	 * list directly.
486 	 */
487 	if (cpu->acpi_perf_data.control_register.space_id !=
488 						ACPI_ADR_SPACE_FIXED_HARDWARE)
489 		goto err;
490 
491 	/*
492 	 * If there is only one entry _PSS, simply ignore _PSS and continue as
493 	 * usual without taking _PSS into account
494 	 */
495 	if (cpu->acpi_perf_data.state_count < 2)
496 		goto err;
497 
498 	pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
499 	for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
500 		pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
501 			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
502 			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
503 			 (u32) cpu->acpi_perf_data.states[i].power,
504 			 (u32) cpu->acpi_perf_data.states[i].control);
505 	}
506 
507 	/*
508 	 * The _PSS table doesn't contain whole turbo frequency range.
509 	 * This just contains +1 MHZ above the max non turbo frequency,
510 	 * with control value corresponding to max turbo ratio. But
511 	 * when cpufreq set policy is called, it will call with this
512 	 * max frequency, which will cause a reduced performance as
513 	 * this driver uses real max turbo frequency as the max
514 	 * frequency. So correct this frequency in _PSS table to
515 	 * correct max turbo frequency based on the turbo state.
516 	 * Also need to convert to MHz as _PSS freq is in MHz.
517 	 */
518 	if (!limits->turbo_disabled)
519 		cpu->acpi_perf_data.states[0].core_frequency =
520 					policy->cpuinfo.max_freq / 1000;
521 	cpu->valid_pss_table = true;
522 	pr_debug("_PPC limits will be enforced\n");
523 
524 	return;
525 
526  err:
527 	cpu->valid_pss_table = false;
528 	acpi_processor_unregister_performance(policy->cpu);
529 }
530 
531 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
532 {
533 	struct cpudata *cpu;
534 
535 	cpu = all_cpu_data[policy->cpu];
536 	if (!cpu->valid_pss_table)
537 		return;
538 
539 	acpi_processor_unregister_performance(policy->cpu);
540 }
541 
542 #else
543 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
544 {
545 }
546 
547 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
548 {
549 }
550 #endif
551 
552 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
553 			     int deadband, int integral) {
554 	pid->setpoint = int_tofp(setpoint);
555 	pid->deadband  = int_tofp(deadband);
556 	pid->integral  = int_tofp(integral);
557 	pid->last_err  = int_tofp(setpoint) - int_tofp(busy);
558 }
559 
560 static inline void pid_p_gain_set(struct _pid *pid, int percent)
561 {
562 	pid->p_gain = div_fp(percent, 100);
563 }
564 
565 static inline void pid_i_gain_set(struct _pid *pid, int percent)
566 {
567 	pid->i_gain = div_fp(percent, 100);
568 }
569 
570 static inline void pid_d_gain_set(struct _pid *pid, int percent)
571 {
572 	pid->d_gain = div_fp(percent, 100);
573 }
574 
575 static signed int pid_calc(struct _pid *pid, int32_t busy)
576 {
577 	signed int result;
578 	int32_t pterm, dterm, fp_error;
579 	int32_t integral_limit;
580 
581 	fp_error = pid->setpoint - busy;
582 
583 	if (abs(fp_error) <= pid->deadband)
584 		return 0;
585 
586 	pterm = mul_fp(pid->p_gain, fp_error);
587 
588 	pid->integral += fp_error;
589 
590 	/*
591 	 * We limit the integral here so that it will never
592 	 * get higher than 30.  This prevents it from becoming
593 	 * too large an input over long periods of time and allows
594 	 * it to get factored out sooner.
595 	 *
596 	 * The value of 30 was chosen through experimentation.
597 	 */
598 	integral_limit = int_tofp(30);
599 	if (pid->integral > integral_limit)
600 		pid->integral = integral_limit;
601 	if (pid->integral < -integral_limit)
602 		pid->integral = -integral_limit;
603 
604 	dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
605 	pid->last_err = fp_error;
606 
607 	result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
608 	result = result + (1 << (FRAC_BITS-1));
609 	return (signed int)fp_toint(result);
610 }
611 
612 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
613 {
614 	pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
615 	pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
616 	pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
617 
618 	pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
619 }
620 
621 static inline void intel_pstate_reset_all_pid(void)
622 {
623 	unsigned int cpu;
624 
625 	for_each_online_cpu(cpu) {
626 		if (all_cpu_data[cpu])
627 			intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
628 	}
629 }
630 
631 static inline void update_turbo_state(void)
632 {
633 	u64 misc_en;
634 	struct cpudata *cpu;
635 
636 	cpu = all_cpu_data[0];
637 	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
638 	limits->turbo_disabled =
639 		(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
640 		 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
641 }
642 
643 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
644 {
645 	u64 epb;
646 	int ret;
647 
648 	if (!static_cpu_has(X86_FEATURE_EPB))
649 		return -ENXIO;
650 
651 	ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
652 	if (ret)
653 		return (s16)ret;
654 
655 	return (s16)(epb & 0x0f);
656 }
657 
658 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
659 {
660 	s16 epp;
661 
662 	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
663 		/*
664 		 * When hwp_req_data is 0, means that caller didn't read
665 		 * MSR_HWP_REQUEST, so need to read and get EPP.
666 		 */
667 		if (!hwp_req_data) {
668 			epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
669 					    &hwp_req_data);
670 			if (epp)
671 				return epp;
672 		}
673 		epp = (hwp_req_data >> 24) & 0xff;
674 	} else {
675 		/* When there is no EPP present, HWP uses EPB settings */
676 		epp = intel_pstate_get_epb(cpu_data);
677 	}
678 
679 	return epp;
680 }
681 
682 static int intel_pstate_set_epb(int cpu, s16 pref)
683 {
684 	u64 epb;
685 	int ret;
686 
687 	if (!static_cpu_has(X86_FEATURE_EPB))
688 		return -ENXIO;
689 
690 	ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
691 	if (ret)
692 		return ret;
693 
694 	epb = (epb & ~0x0f) | pref;
695 	wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
696 
697 	return 0;
698 }
699 
700 /*
701  * EPP/EPB display strings corresponding to EPP index in the
702  * energy_perf_strings[]
703  *	index		String
704  *-------------------------------------
705  *	0		default
706  *	1		performance
707  *	2		balance_performance
708  *	3		balance_power
709  *	4		power
710  */
711 static const char * const energy_perf_strings[] = {
712 	"default",
713 	"performance",
714 	"balance_performance",
715 	"balance_power",
716 	"power",
717 	NULL
718 };
719 
720 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
721 {
722 	s16 epp;
723 	int index = -EINVAL;
724 
725 	epp = intel_pstate_get_epp(cpu_data, 0);
726 	if (epp < 0)
727 		return epp;
728 
729 	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
730 		/*
731 		 * Range:
732 		 *	0x00-0x3F	:	Performance
733 		 *	0x40-0x7F	:	Balance performance
734 		 *	0x80-0xBF	:	Balance power
735 		 *	0xC0-0xFF	:	Power
736 		 * The EPP is a 8 bit value, but our ranges restrict the
737 		 * value which can be set. Here only using top two bits
738 		 * effectively.
739 		 */
740 		index = (epp >> 6) + 1;
741 	} else if (static_cpu_has(X86_FEATURE_EPB)) {
742 		/*
743 		 * Range:
744 		 *	0x00-0x03	:	Performance
745 		 *	0x04-0x07	:	Balance performance
746 		 *	0x08-0x0B	:	Balance power
747 		 *	0x0C-0x0F	:	Power
748 		 * The EPB is a 4 bit value, but our ranges restrict the
749 		 * value which can be set. Here only using top two bits
750 		 * effectively.
751 		 */
752 		index = (epp >> 2) + 1;
753 	}
754 
755 	return index;
756 }
757 
758 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
759 					      int pref_index)
760 {
761 	int epp = -EINVAL;
762 	int ret;
763 
764 	if (!pref_index)
765 		epp = cpu_data->epp_default;
766 
767 	mutex_lock(&intel_pstate_limits_lock);
768 
769 	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
770 		u64 value;
771 
772 		ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
773 		if (ret)
774 			goto return_pref;
775 
776 		value &= ~GENMASK_ULL(31, 24);
777 
778 		/*
779 		 * If epp is not default, convert from index into
780 		 * energy_perf_strings to epp value, by shifting 6
781 		 * bits left to use only top two bits in epp.
782 		 * The resultant epp need to shifted by 24 bits to
783 		 * epp position in MSR_HWP_REQUEST.
784 		 */
785 		if (epp == -EINVAL)
786 			epp = (pref_index - 1) << 6;
787 
788 		value |= (u64)epp << 24;
789 		ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
790 	} else {
791 		if (epp == -EINVAL)
792 			epp = (pref_index - 1) << 2;
793 		ret = intel_pstate_set_epb(cpu_data->cpu, epp);
794 	}
795 return_pref:
796 	mutex_unlock(&intel_pstate_limits_lock);
797 
798 	return ret;
799 }
800 
801 static ssize_t show_energy_performance_available_preferences(
802 				struct cpufreq_policy *policy, char *buf)
803 {
804 	int i = 0;
805 	int ret = 0;
806 
807 	while (energy_perf_strings[i] != NULL)
808 		ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
809 
810 	ret += sprintf(&buf[ret], "\n");
811 
812 	return ret;
813 }
814 
815 cpufreq_freq_attr_ro(energy_performance_available_preferences);
816 
817 static ssize_t store_energy_performance_preference(
818 		struct cpufreq_policy *policy, const char *buf, size_t count)
819 {
820 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
821 	char str_preference[21];
822 	int ret, i = 0;
823 
824 	ret = sscanf(buf, "%20s", str_preference);
825 	if (ret != 1)
826 		return -EINVAL;
827 
828 	while (energy_perf_strings[i] != NULL) {
829 		if (!strcmp(str_preference, energy_perf_strings[i])) {
830 			intel_pstate_set_energy_pref_index(cpu_data, i);
831 			return count;
832 		}
833 		++i;
834 	}
835 
836 	return -EINVAL;
837 }
838 
839 static ssize_t show_energy_performance_preference(
840 				struct cpufreq_policy *policy, char *buf)
841 {
842 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
843 	int preference;
844 
845 	preference = intel_pstate_get_energy_pref_index(cpu_data);
846 	if (preference < 0)
847 		return preference;
848 
849 	return  sprintf(buf, "%s\n", energy_perf_strings[preference]);
850 }
851 
852 cpufreq_freq_attr_rw(energy_performance_preference);
853 
854 static struct freq_attr *hwp_cpufreq_attrs[] = {
855 	&energy_performance_preference,
856 	&energy_performance_available_preferences,
857 	NULL,
858 };
859 
860 static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
861 {
862 	int min, hw_min, max, hw_max, cpu, range, adj_range;
863 	struct perf_limits *perf_limits = limits;
864 	u64 value, cap;
865 
866 	for_each_cpu(cpu, policy->cpus) {
867 		int max_perf_pct, min_perf_pct;
868 		struct cpudata *cpu_data = all_cpu_data[cpu];
869 		s16 epp;
870 
871 		if (per_cpu_limits)
872 			perf_limits = all_cpu_data[cpu]->perf_limits;
873 
874 		rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
875 		hw_min = HWP_LOWEST_PERF(cap);
876 		hw_max = HWP_HIGHEST_PERF(cap);
877 		range = hw_max - hw_min;
878 
879 		max_perf_pct = perf_limits->max_perf_pct;
880 		min_perf_pct = perf_limits->min_perf_pct;
881 
882 		rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
883 		adj_range = min_perf_pct * range / 100;
884 		min = hw_min + adj_range;
885 		value &= ~HWP_MIN_PERF(~0L);
886 		value |= HWP_MIN_PERF(min);
887 
888 		adj_range = max_perf_pct * range / 100;
889 		max = hw_min + adj_range;
890 		if (limits->no_turbo) {
891 			hw_max = HWP_GUARANTEED_PERF(cap);
892 			if (hw_max < max)
893 				max = hw_max;
894 		}
895 
896 		value &= ~HWP_MAX_PERF(~0L);
897 		value |= HWP_MAX_PERF(max);
898 
899 		if (cpu_data->epp_policy == cpu_data->policy)
900 			goto skip_epp;
901 
902 		cpu_data->epp_policy = cpu_data->policy;
903 
904 		if (cpu_data->epp_saved >= 0) {
905 			epp = cpu_data->epp_saved;
906 			cpu_data->epp_saved = -EINVAL;
907 			goto update_epp;
908 		}
909 
910 		if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
911 			epp = intel_pstate_get_epp(cpu_data, value);
912 			cpu_data->epp_powersave = epp;
913 			/* If EPP read was failed, then don't try to write */
914 			if (epp < 0)
915 				goto skip_epp;
916 
917 
918 			epp = 0;
919 		} else {
920 			/* skip setting EPP, when saved value is invalid */
921 			if (cpu_data->epp_powersave < 0)
922 				goto skip_epp;
923 
924 			/*
925 			 * No need to restore EPP when it is not zero. This
926 			 * means:
927 			 *  - Policy is not changed
928 			 *  - user has manually changed
929 			 *  - Error reading EPB
930 			 */
931 			epp = intel_pstate_get_epp(cpu_data, value);
932 			if (epp)
933 				goto skip_epp;
934 
935 			epp = cpu_data->epp_powersave;
936 		}
937 update_epp:
938 		if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
939 			value &= ~GENMASK_ULL(31, 24);
940 			value |= (u64)epp << 24;
941 		} else {
942 			intel_pstate_set_epb(cpu, epp);
943 		}
944 skip_epp:
945 		wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
946 	}
947 }
948 
949 static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
950 {
951 	if (hwp_active)
952 		intel_pstate_hwp_set(policy);
953 
954 	return 0;
955 }
956 
957 static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
958 {
959 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
960 
961 	if (!hwp_active)
962 		return 0;
963 
964 	cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0);
965 
966 	return 0;
967 }
968 
969 static int intel_pstate_resume(struct cpufreq_policy *policy)
970 {
971 	int ret;
972 
973 	if (!hwp_active)
974 		return 0;
975 
976 	mutex_lock(&intel_pstate_limits_lock);
977 
978 	all_cpu_data[policy->cpu]->epp_policy = 0;
979 
980 	ret = intel_pstate_hwp_set_policy(policy);
981 
982 	mutex_unlock(&intel_pstate_limits_lock);
983 
984 	return ret;
985 }
986 
987 static void intel_pstate_update_policies(void)
988 {
989 	int cpu;
990 
991 	for_each_possible_cpu(cpu)
992 		cpufreq_update_policy(cpu);
993 }
994 
995 /************************** debugfs begin ************************/
996 static int pid_param_set(void *data, u64 val)
997 {
998 	*(u32 *)data = val;
999 	intel_pstate_reset_all_pid();
1000 	return 0;
1001 }
1002 
1003 static int pid_param_get(void *data, u64 *val)
1004 {
1005 	*val = *(u32 *)data;
1006 	return 0;
1007 }
1008 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
1009 
1010 struct pid_param {
1011 	char *name;
1012 	void *value;
1013 };
1014 
1015 static struct pid_param pid_files[] = {
1016 	{"sample_rate_ms", &pid_params.sample_rate_ms},
1017 	{"d_gain_pct", &pid_params.d_gain_pct},
1018 	{"i_gain_pct", &pid_params.i_gain_pct},
1019 	{"deadband", &pid_params.deadband},
1020 	{"setpoint", &pid_params.setpoint},
1021 	{"p_gain_pct", &pid_params.p_gain_pct},
1022 	{NULL, NULL}
1023 };
1024 
1025 static void __init intel_pstate_debug_expose_params(void)
1026 {
1027 	struct dentry *debugfs_parent;
1028 	int i = 0;
1029 
1030 	debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
1031 	if (IS_ERR_OR_NULL(debugfs_parent))
1032 		return;
1033 	while (pid_files[i].name) {
1034 		debugfs_create_file(pid_files[i].name, 0660,
1035 				    debugfs_parent, pid_files[i].value,
1036 				    &fops_pid_param);
1037 		i++;
1038 	}
1039 }
1040 
1041 /************************** debugfs end ************************/
1042 
1043 /************************** sysfs begin ************************/
1044 #define show_one(file_name, object)					\
1045 	static ssize_t show_##file_name					\
1046 	(struct kobject *kobj, struct attribute *attr, char *buf)	\
1047 	{								\
1048 		return sprintf(buf, "%u\n", limits->object);		\
1049 	}
1050 
1051 static ssize_t show_turbo_pct(struct kobject *kobj,
1052 				struct attribute *attr, char *buf)
1053 {
1054 	struct cpudata *cpu;
1055 	int total, no_turbo, turbo_pct;
1056 	uint32_t turbo_fp;
1057 
1058 	cpu = all_cpu_data[0];
1059 
1060 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1061 	no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
1062 	turbo_fp = div_fp(no_turbo, total);
1063 	turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
1064 	return sprintf(buf, "%u\n", turbo_pct);
1065 }
1066 
1067 static ssize_t show_num_pstates(struct kobject *kobj,
1068 				struct attribute *attr, char *buf)
1069 {
1070 	struct cpudata *cpu;
1071 	int total;
1072 
1073 	cpu = all_cpu_data[0];
1074 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1075 	return sprintf(buf, "%u\n", total);
1076 }
1077 
1078 static ssize_t show_no_turbo(struct kobject *kobj,
1079 			     struct attribute *attr, char *buf)
1080 {
1081 	ssize_t ret;
1082 
1083 	update_turbo_state();
1084 	if (limits->turbo_disabled)
1085 		ret = sprintf(buf, "%u\n", limits->turbo_disabled);
1086 	else
1087 		ret = sprintf(buf, "%u\n", limits->no_turbo);
1088 
1089 	return ret;
1090 }
1091 
1092 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1093 			      const char *buf, size_t count)
1094 {
1095 	unsigned int input;
1096 	int ret;
1097 
1098 	ret = sscanf(buf, "%u", &input);
1099 	if (ret != 1)
1100 		return -EINVAL;
1101 
1102 	mutex_lock(&intel_pstate_limits_lock);
1103 
1104 	update_turbo_state();
1105 	if (limits->turbo_disabled) {
1106 		pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
1107 		mutex_unlock(&intel_pstate_limits_lock);
1108 		return -EPERM;
1109 	}
1110 
1111 	limits->no_turbo = clamp_t(int, input, 0, 1);
1112 
1113 	mutex_unlock(&intel_pstate_limits_lock);
1114 
1115 	intel_pstate_update_policies();
1116 
1117 	return count;
1118 }
1119 
1120 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1121 				  const char *buf, size_t count)
1122 {
1123 	unsigned int input;
1124 	int ret;
1125 
1126 	ret = sscanf(buf, "%u", &input);
1127 	if (ret != 1)
1128 		return -EINVAL;
1129 
1130 	mutex_lock(&intel_pstate_limits_lock);
1131 
1132 	limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
1133 	limits->max_perf_pct = min(limits->max_policy_pct,
1134 				   limits->max_sysfs_pct);
1135 	limits->max_perf_pct = max(limits->min_policy_pct,
1136 				   limits->max_perf_pct);
1137 	limits->max_perf_pct = max(limits->min_perf_pct,
1138 				   limits->max_perf_pct);
1139 	limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
1140 
1141 	mutex_unlock(&intel_pstate_limits_lock);
1142 
1143 	intel_pstate_update_policies();
1144 
1145 	return count;
1146 }
1147 
1148 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1149 				  const char *buf, size_t count)
1150 {
1151 	unsigned int input;
1152 	int ret;
1153 
1154 	ret = sscanf(buf, "%u", &input);
1155 	if (ret != 1)
1156 		return -EINVAL;
1157 
1158 	mutex_lock(&intel_pstate_limits_lock);
1159 
1160 	limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
1161 	limits->min_perf_pct = max(limits->min_policy_pct,
1162 				   limits->min_sysfs_pct);
1163 	limits->min_perf_pct = min(limits->max_policy_pct,
1164 				   limits->min_perf_pct);
1165 	limits->min_perf_pct = min(limits->max_perf_pct,
1166 				   limits->min_perf_pct);
1167 	limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
1168 
1169 	mutex_unlock(&intel_pstate_limits_lock);
1170 
1171 	intel_pstate_update_policies();
1172 
1173 	return count;
1174 }
1175 
1176 show_one(max_perf_pct, max_perf_pct);
1177 show_one(min_perf_pct, min_perf_pct);
1178 
1179 define_one_global_rw(no_turbo);
1180 define_one_global_rw(max_perf_pct);
1181 define_one_global_rw(min_perf_pct);
1182 define_one_global_ro(turbo_pct);
1183 define_one_global_ro(num_pstates);
1184 
1185 static struct attribute *intel_pstate_attributes[] = {
1186 	&no_turbo.attr,
1187 	&turbo_pct.attr,
1188 	&num_pstates.attr,
1189 	NULL
1190 };
1191 
1192 static struct attribute_group intel_pstate_attr_group = {
1193 	.attrs = intel_pstate_attributes,
1194 };
1195 
1196 static void __init intel_pstate_sysfs_expose_params(void)
1197 {
1198 	struct kobject *intel_pstate_kobject;
1199 	int rc;
1200 
1201 	intel_pstate_kobject = kobject_create_and_add("intel_pstate",
1202 						&cpu_subsys.dev_root->kobj);
1203 	if (WARN_ON(!intel_pstate_kobject))
1204 		return;
1205 
1206 	rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
1207 	if (WARN_ON(rc))
1208 		return;
1209 
1210 	/*
1211 	 * If per cpu limits are enforced there are no global limits, so
1212 	 * return without creating max/min_perf_pct attributes
1213 	 */
1214 	if (per_cpu_limits)
1215 		return;
1216 
1217 	rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
1218 	WARN_ON(rc);
1219 
1220 	rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
1221 	WARN_ON(rc);
1222 
1223 }
1224 /************************** sysfs end ************************/
1225 
1226 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
1227 {
1228 	/* First disable HWP notification interrupt as we don't process them */
1229 	if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
1230 		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1231 
1232 	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
1233 	cpudata->epp_policy = 0;
1234 	if (cpudata->epp_default == -EINVAL)
1235 		cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
1236 }
1237 
1238 static int atom_get_min_pstate(void)
1239 {
1240 	u64 value;
1241 
1242 	rdmsrl(ATOM_RATIOS, value);
1243 	return (value >> 8) & 0x7F;
1244 }
1245 
1246 static int atom_get_max_pstate(void)
1247 {
1248 	u64 value;
1249 
1250 	rdmsrl(ATOM_RATIOS, value);
1251 	return (value >> 16) & 0x7F;
1252 }
1253 
1254 static int atom_get_turbo_pstate(void)
1255 {
1256 	u64 value;
1257 
1258 	rdmsrl(ATOM_TURBO_RATIOS, value);
1259 	return value & 0x7F;
1260 }
1261 
1262 static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1263 {
1264 	u64 val;
1265 	int32_t vid_fp;
1266 	u32 vid;
1267 
1268 	val = (u64)pstate << 8;
1269 	if (limits->no_turbo && !limits->turbo_disabled)
1270 		val |= (u64)1 << 32;
1271 
1272 	vid_fp = cpudata->vid.min + mul_fp(
1273 		int_tofp(pstate - cpudata->pstate.min_pstate),
1274 		cpudata->vid.ratio);
1275 
1276 	vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
1277 	vid = ceiling_fp(vid_fp);
1278 
1279 	if (pstate > cpudata->pstate.max_pstate)
1280 		vid = cpudata->vid.turbo;
1281 
1282 	return val | vid;
1283 }
1284 
1285 static int silvermont_get_scaling(void)
1286 {
1287 	u64 value;
1288 	int i;
1289 	/* Defined in Table 35-6 from SDM (Sept 2015) */
1290 	static int silvermont_freq_table[] = {
1291 		83300, 100000, 133300, 116700, 80000};
1292 
1293 	rdmsrl(MSR_FSB_FREQ, value);
1294 	i = value & 0x7;
1295 	WARN_ON(i > 4);
1296 
1297 	return silvermont_freq_table[i];
1298 }
1299 
1300 static int airmont_get_scaling(void)
1301 {
1302 	u64 value;
1303 	int i;
1304 	/* Defined in Table 35-10 from SDM (Sept 2015) */
1305 	static int airmont_freq_table[] = {
1306 		83300, 100000, 133300, 116700, 80000,
1307 		93300, 90000, 88900, 87500};
1308 
1309 	rdmsrl(MSR_FSB_FREQ, value);
1310 	i = value & 0xF;
1311 	WARN_ON(i > 8);
1312 
1313 	return airmont_freq_table[i];
1314 }
1315 
1316 static void atom_get_vid(struct cpudata *cpudata)
1317 {
1318 	u64 value;
1319 
1320 	rdmsrl(ATOM_VIDS, value);
1321 	cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
1322 	cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
1323 	cpudata->vid.ratio = div_fp(
1324 		cpudata->vid.max - cpudata->vid.min,
1325 		int_tofp(cpudata->pstate.max_pstate -
1326 			cpudata->pstate.min_pstate));
1327 
1328 	rdmsrl(ATOM_TURBO_VIDS, value);
1329 	cpudata->vid.turbo = value & 0x7f;
1330 }
1331 
1332 static int core_get_min_pstate(void)
1333 {
1334 	u64 value;
1335 
1336 	rdmsrl(MSR_PLATFORM_INFO, value);
1337 	return (value >> 40) & 0xFF;
1338 }
1339 
1340 static int core_get_max_pstate_physical(void)
1341 {
1342 	u64 value;
1343 
1344 	rdmsrl(MSR_PLATFORM_INFO, value);
1345 	return (value >> 8) & 0xFF;
1346 }
1347 
1348 static int core_get_max_pstate(void)
1349 {
1350 	u64 tar;
1351 	u64 plat_info;
1352 	int max_pstate;
1353 	int err;
1354 
1355 	rdmsrl(MSR_PLATFORM_INFO, plat_info);
1356 	max_pstate = (plat_info >> 8) & 0xFF;
1357 
1358 	err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
1359 	if (!err) {
1360 		/* Do some sanity checking for safety */
1361 		if (plat_info & 0x600000000) {
1362 			u64 tdp_ctrl;
1363 			u64 tdp_ratio;
1364 			int tdp_msr;
1365 
1366 			err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
1367 			if (err)
1368 				goto skip_tar;
1369 
1370 			tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
1371 			err = rdmsrl_safe(tdp_msr, &tdp_ratio);
1372 			if (err)
1373 				goto skip_tar;
1374 
1375 			/* For level 1 and 2, bits[23:16] contain the ratio */
1376 			if (tdp_ctrl)
1377 				tdp_ratio >>= 16;
1378 
1379 			tdp_ratio &= 0xff; /* ratios are only 8 bits long */
1380 			if (tdp_ratio - 1 == tar) {
1381 				max_pstate = tar;
1382 				pr_debug("max_pstate=TAC %x\n", max_pstate);
1383 			} else {
1384 				goto skip_tar;
1385 			}
1386 		}
1387 	}
1388 
1389 skip_tar:
1390 	return max_pstate;
1391 }
1392 
1393 static int core_get_turbo_pstate(void)
1394 {
1395 	u64 value;
1396 	int nont, ret;
1397 
1398 	rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1399 	nont = core_get_max_pstate();
1400 	ret = (value) & 255;
1401 	if (ret <= nont)
1402 		ret = nont;
1403 	return ret;
1404 }
1405 
1406 static inline int core_get_scaling(void)
1407 {
1408 	return 100000;
1409 }
1410 
1411 static u64 core_get_val(struct cpudata *cpudata, int pstate)
1412 {
1413 	u64 val;
1414 
1415 	val = (u64)pstate << 8;
1416 	if (limits->no_turbo && !limits->turbo_disabled)
1417 		val |= (u64)1 << 32;
1418 
1419 	return val;
1420 }
1421 
1422 static int knl_get_turbo_pstate(void)
1423 {
1424 	u64 value;
1425 	int nont, ret;
1426 
1427 	rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1428 	nont = core_get_max_pstate();
1429 	ret = (((value) >> 8) & 0xFF);
1430 	if (ret <= nont)
1431 		ret = nont;
1432 	return ret;
1433 }
1434 
1435 static struct cpu_defaults core_params = {
1436 	.pid_policy = {
1437 		.sample_rate_ms = 10,
1438 		.deadband = 0,
1439 		.setpoint = 97,
1440 		.p_gain_pct = 20,
1441 		.d_gain_pct = 0,
1442 		.i_gain_pct = 0,
1443 	},
1444 	.funcs = {
1445 		.get_max = core_get_max_pstate,
1446 		.get_max_physical = core_get_max_pstate_physical,
1447 		.get_min = core_get_min_pstate,
1448 		.get_turbo = core_get_turbo_pstate,
1449 		.get_scaling = core_get_scaling,
1450 		.get_val = core_get_val,
1451 		.get_target_pstate = get_target_pstate_use_performance,
1452 	},
1453 };
1454 
1455 static const struct cpu_defaults silvermont_params = {
1456 	.pid_policy = {
1457 		.sample_rate_ms = 10,
1458 		.deadband = 0,
1459 		.setpoint = 60,
1460 		.p_gain_pct = 14,
1461 		.d_gain_pct = 0,
1462 		.i_gain_pct = 4,
1463 	},
1464 	.funcs = {
1465 		.get_max = atom_get_max_pstate,
1466 		.get_max_physical = atom_get_max_pstate,
1467 		.get_min = atom_get_min_pstate,
1468 		.get_turbo = atom_get_turbo_pstate,
1469 		.get_val = atom_get_val,
1470 		.get_scaling = silvermont_get_scaling,
1471 		.get_vid = atom_get_vid,
1472 		.get_target_pstate = get_target_pstate_use_cpu_load,
1473 	},
1474 };
1475 
1476 static const struct cpu_defaults airmont_params = {
1477 	.pid_policy = {
1478 		.sample_rate_ms = 10,
1479 		.deadband = 0,
1480 		.setpoint = 60,
1481 		.p_gain_pct = 14,
1482 		.d_gain_pct = 0,
1483 		.i_gain_pct = 4,
1484 	},
1485 	.funcs = {
1486 		.get_max = atom_get_max_pstate,
1487 		.get_max_physical = atom_get_max_pstate,
1488 		.get_min = atom_get_min_pstate,
1489 		.get_turbo = atom_get_turbo_pstate,
1490 		.get_val = atom_get_val,
1491 		.get_scaling = airmont_get_scaling,
1492 		.get_vid = atom_get_vid,
1493 		.get_target_pstate = get_target_pstate_use_cpu_load,
1494 	},
1495 };
1496 
1497 static const struct cpu_defaults knl_params = {
1498 	.pid_policy = {
1499 		.sample_rate_ms = 10,
1500 		.deadband = 0,
1501 		.setpoint = 97,
1502 		.p_gain_pct = 20,
1503 		.d_gain_pct = 0,
1504 		.i_gain_pct = 0,
1505 	},
1506 	.funcs = {
1507 		.get_max = core_get_max_pstate,
1508 		.get_max_physical = core_get_max_pstate_physical,
1509 		.get_min = core_get_min_pstate,
1510 		.get_turbo = knl_get_turbo_pstate,
1511 		.get_scaling = core_get_scaling,
1512 		.get_val = core_get_val,
1513 		.get_target_pstate = get_target_pstate_use_performance,
1514 	},
1515 };
1516 
1517 static const struct cpu_defaults bxt_params = {
1518 	.pid_policy = {
1519 		.sample_rate_ms = 10,
1520 		.deadband = 0,
1521 		.setpoint = 60,
1522 		.p_gain_pct = 14,
1523 		.d_gain_pct = 0,
1524 		.i_gain_pct = 4,
1525 	},
1526 	.funcs = {
1527 		.get_max = core_get_max_pstate,
1528 		.get_max_physical = core_get_max_pstate_physical,
1529 		.get_min = core_get_min_pstate,
1530 		.get_turbo = core_get_turbo_pstate,
1531 		.get_scaling = core_get_scaling,
1532 		.get_val = core_get_val,
1533 		.get_target_pstate = get_target_pstate_use_cpu_load,
1534 	},
1535 };
1536 
1537 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
1538 {
1539 	int max_perf = cpu->pstate.turbo_pstate;
1540 	int max_perf_adj;
1541 	int min_perf;
1542 	struct perf_limits *perf_limits = limits;
1543 
1544 	if (limits->no_turbo || limits->turbo_disabled)
1545 		max_perf = cpu->pstate.max_pstate;
1546 
1547 	if (per_cpu_limits)
1548 		perf_limits = cpu->perf_limits;
1549 
1550 	/*
1551 	 * performance can be limited by user through sysfs, by cpufreq
1552 	 * policy, or by cpu specific default values determined through
1553 	 * experimentation.
1554 	 */
1555 	max_perf_adj = fp_ext_toint(max_perf * perf_limits->max_perf);
1556 	*max = clamp_t(int, max_perf_adj,
1557 			cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
1558 
1559 	min_perf = fp_ext_toint(max_perf * perf_limits->min_perf);
1560 	*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
1561 }
1562 
1563 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
1564 {
1565 	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
1566 	cpu->pstate.current_pstate = pstate;
1567 	/*
1568 	 * Generally, there is no guarantee that this code will always run on
1569 	 * the CPU being updated, so force the register update to run on the
1570 	 * right CPU.
1571 	 */
1572 	wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
1573 		      pstate_funcs.get_val(cpu, pstate));
1574 }
1575 
1576 static void intel_pstate_set_min_pstate(struct cpudata *cpu)
1577 {
1578 	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
1579 }
1580 
1581 static void intel_pstate_max_within_limits(struct cpudata *cpu)
1582 {
1583 	int min_pstate, max_pstate;
1584 
1585 	update_turbo_state();
1586 	intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
1587 	intel_pstate_set_pstate(cpu, max_pstate);
1588 }
1589 
1590 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1591 {
1592 	cpu->pstate.min_pstate = pstate_funcs.get_min();
1593 	cpu->pstate.max_pstate = pstate_funcs.get_max();
1594 	cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
1595 	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
1596 	cpu->pstate.scaling = pstate_funcs.get_scaling();
1597 	cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
1598 	cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1599 
1600 	if (pstate_funcs.get_vid)
1601 		pstate_funcs.get_vid(cpu);
1602 
1603 	intel_pstate_set_min_pstate(cpu);
1604 }
1605 
1606 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
1607 {
1608 	struct sample *sample = &cpu->sample;
1609 
1610 	sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
1611 }
1612 
1613 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
1614 {
1615 	u64 aperf, mperf;
1616 	unsigned long flags;
1617 	u64 tsc;
1618 
1619 	local_irq_save(flags);
1620 	rdmsrl(MSR_IA32_APERF, aperf);
1621 	rdmsrl(MSR_IA32_MPERF, mperf);
1622 	tsc = rdtsc();
1623 	if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
1624 		local_irq_restore(flags);
1625 		return false;
1626 	}
1627 	local_irq_restore(flags);
1628 
1629 	cpu->last_sample_time = cpu->sample.time;
1630 	cpu->sample.time = time;
1631 	cpu->sample.aperf = aperf;
1632 	cpu->sample.mperf = mperf;
1633 	cpu->sample.tsc =  tsc;
1634 	cpu->sample.aperf -= cpu->prev_aperf;
1635 	cpu->sample.mperf -= cpu->prev_mperf;
1636 	cpu->sample.tsc -= cpu->prev_tsc;
1637 
1638 	cpu->prev_aperf = aperf;
1639 	cpu->prev_mperf = mperf;
1640 	cpu->prev_tsc = tsc;
1641 	/*
1642 	 * First time this function is invoked in a given cycle, all of the
1643 	 * previous sample data fields are equal to zero or stale and they must
1644 	 * be populated with meaningful numbers for things to work, so assume
1645 	 * that sample.time will always be reset before setting the utilization
1646 	 * update hook and make the caller skip the sample then.
1647 	 */
1648 	return !!cpu->last_sample_time;
1649 }
1650 
1651 static inline int32_t get_avg_frequency(struct cpudata *cpu)
1652 {
1653 	return mul_ext_fp(cpu->sample.core_avg_perf,
1654 			  cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
1655 }
1656 
1657 static inline int32_t get_avg_pstate(struct cpudata *cpu)
1658 {
1659 	return mul_ext_fp(cpu->pstate.max_pstate_physical,
1660 			  cpu->sample.core_avg_perf);
1661 }
1662 
1663 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
1664 {
1665 	struct sample *sample = &cpu->sample;
1666 	int32_t busy_frac, boost;
1667 	int target, avg_pstate;
1668 
1669 	busy_frac = div_fp(sample->mperf, sample->tsc);
1670 
1671 	boost = cpu->iowait_boost;
1672 	cpu->iowait_boost >>= 1;
1673 
1674 	if (busy_frac < boost)
1675 		busy_frac = boost;
1676 
1677 	sample->busy_scaled = busy_frac * 100;
1678 
1679 	target = limits->no_turbo || limits->turbo_disabled ?
1680 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1681 	target += target >> 2;
1682 	target = mul_fp(target, busy_frac);
1683 	if (target < cpu->pstate.min_pstate)
1684 		target = cpu->pstate.min_pstate;
1685 
1686 	/*
1687 	 * If the average P-state during the previous cycle was higher than the
1688 	 * current target, add 50% of the difference to the target to reduce
1689 	 * possible performance oscillations and offset possible performance
1690 	 * loss related to moving the workload from one CPU to another within
1691 	 * a package/module.
1692 	 */
1693 	avg_pstate = get_avg_pstate(cpu);
1694 	if (avg_pstate > target)
1695 		target += (avg_pstate - target) >> 1;
1696 
1697 	return target;
1698 }
1699 
1700 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1701 {
1702 	int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
1703 	u64 duration_ns;
1704 
1705 	/*
1706 	 * perf_scaled is the ratio of the average P-state during the last
1707 	 * sampling period to the P-state requested last time (in percent).
1708 	 *
1709 	 * That measures the system's response to the previous P-state
1710 	 * selection.
1711 	 */
1712 	max_pstate = cpu->pstate.max_pstate_physical;
1713 	current_pstate = cpu->pstate.current_pstate;
1714 	perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf,
1715 			       div_fp(100 * max_pstate, current_pstate));
1716 
1717 	/*
1718 	 * Since our utilization update callback will not run unless we are
1719 	 * in C0, check if the actual elapsed time is significantly greater (3x)
1720 	 * than our sample interval.  If it is, then we were idle for a long
1721 	 * enough period of time to adjust our performance metric.
1722 	 */
1723 	duration_ns = cpu->sample.time - cpu->last_sample_time;
1724 	if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
1725 		sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
1726 		perf_scaled = mul_fp(perf_scaled, sample_ratio);
1727 	} else {
1728 		sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
1729 		if (sample_ratio < int_tofp(1))
1730 			perf_scaled = 0;
1731 	}
1732 
1733 	cpu->sample.busy_scaled = perf_scaled;
1734 	return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
1735 }
1736 
1737 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
1738 {
1739 	int max_perf, min_perf;
1740 
1741 	intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
1742 	pstate = clamp_t(int, pstate, min_perf, max_perf);
1743 	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
1744 	return pstate;
1745 }
1746 
1747 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
1748 {
1749 	pstate = intel_pstate_prepare_request(cpu, pstate);
1750 	if (pstate == cpu->pstate.current_pstate)
1751 		return;
1752 
1753 	cpu->pstate.current_pstate = pstate;
1754 	wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
1755 }
1756 
1757 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1758 {
1759 	int from, target_pstate;
1760 	struct sample *sample;
1761 
1762 	from = cpu->pstate.current_pstate;
1763 
1764 	target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
1765 		cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
1766 
1767 	update_turbo_state();
1768 
1769 	intel_pstate_update_pstate(cpu, target_pstate);
1770 
1771 	sample = &cpu->sample;
1772 	trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
1773 		fp_toint(sample->busy_scaled),
1774 		from,
1775 		cpu->pstate.current_pstate,
1776 		sample->mperf,
1777 		sample->aperf,
1778 		sample->tsc,
1779 		get_avg_frequency(cpu),
1780 		fp_toint(cpu->iowait_boost * 100));
1781 }
1782 
1783 static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1784 				     unsigned int flags)
1785 {
1786 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1787 	u64 delta_ns;
1788 
1789 	if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) {
1790 		if (flags & SCHED_CPUFREQ_IOWAIT) {
1791 			cpu->iowait_boost = int_tofp(1);
1792 		} else if (cpu->iowait_boost) {
1793 			/* Clear iowait_boost if the CPU may have been idle. */
1794 			delta_ns = time - cpu->last_update;
1795 			if (delta_ns > TICK_NSEC)
1796 				cpu->iowait_boost = 0;
1797 		}
1798 		cpu->last_update = time;
1799 	}
1800 
1801 	delta_ns = time - cpu->sample.time;
1802 	if ((s64)delta_ns >= pid_params.sample_rate_ns) {
1803 		bool sample_taken = intel_pstate_sample(cpu, time);
1804 
1805 		if (sample_taken) {
1806 			intel_pstate_calc_avg_perf(cpu);
1807 			if (!hwp_active)
1808 				intel_pstate_adjust_busy_pstate(cpu);
1809 		}
1810 	}
1811 }
1812 
1813 #define ICPU(model, policy) \
1814 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1815 			(unsigned long)&policy }
1816 
1817 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1818 	ICPU(INTEL_FAM6_SANDYBRIDGE, 		core_params),
1819 	ICPU(INTEL_FAM6_SANDYBRIDGE_X,		core_params),
1820 	ICPU(INTEL_FAM6_ATOM_SILVERMONT1,	silvermont_params),
1821 	ICPU(INTEL_FAM6_IVYBRIDGE,		core_params),
1822 	ICPU(INTEL_FAM6_HASWELL_CORE,		core_params),
1823 	ICPU(INTEL_FAM6_BROADWELL_CORE,		core_params),
1824 	ICPU(INTEL_FAM6_IVYBRIDGE_X,		core_params),
1825 	ICPU(INTEL_FAM6_HASWELL_X,		core_params),
1826 	ICPU(INTEL_FAM6_HASWELL_ULT,		core_params),
1827 	ICPU(INTEL_FAM6_HASWELL_GT3E,		core_params),
1828 	ICPU(INTEL_FAM6_BROADWELL_GT3E,		core_params),
1829 	ICPU(INTEL_FAM6_ATOM_AIRMONT,		airmont_params),
1830 	ICPU(INTEL_FAM6_SKYLAKE_MOBILE,		core_params),
1831 	ICPU(INTEL_FAM6_BROADWELL_X,		core_params),
1832 	ICPU(INTEL_FAM6_SKYLAKE_DESKTOP,	core_params),
1833 	ICPU(INTEL_FAM6_BROADWELL_XEON_D,	core_params),
1834 	ICPU(INTEL_FAM6_XEON_PHI_KNL,		knl_params),
1835 	ICPU(INTEL_FAM6_XEON_PHI_KNM,		knl_params),
1836 	ICPU(INTEL_FAM6_ATOM_GOLDMONT,		bxt_params),
1837 	{}
1838 };
1839 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
1840 
1841 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
1842 	ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
1843 	ICPU(INTEL_FAM6_BROADWELL_X, core_params),
1844 	ICPU(INTEL_FAM6_SKYLAKE_X, core_params),
1845 	{}
1846 };
1847 
1848 static int intel_pstate_init_cpu(unsigned int cpunum)
1849 {
1850 	struct cpudata *cpu;
1851 
1852 	cpu = all_cpu_data[cpunum];
1853 
1854 	if (!cpu) {
1855 		unsigned int size = sizeof(struct cpudata);
1856 
1857 		if (per_cpu_limits)
1858 			size += sizeof(struct perf_limits);
1859 
1860 		cpu = kzalloc(size, GFP_KERNEL);
1861 		if (!cpu)
1862 			return -ENOMEM;
1863 
1864 		all_cpu_data[cpunum] = cpu;
1865 		if (per_cpu_limits)
1866 			cpu->perf_limits = (struct perf_limits *)(cpu + 1);
1867 
1868 		cpu->epp_default = -EINVAL;
1869 		cpu->epp_powersave = -EINVAL;
1870 		cpu->epp_saved = -EINVAL;
1871 	}
1872 
1873 	cpu = all_cpu_data[cpunum];
1874 
1875 	cpu->cpu = cpunum;
1876 
1877 	if (hwp_active) {
1878 		intel_pstate_hwp_enable(cpu);
1879 		pid_params.sample_rate_ms = 50;
1880 		pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
1881 	}
1882 
1883 	intel_pstate_get_cpu_pstates(cpu);
1884 
1885 	intel_pstate_busy_pid_reset(cpu);
1886 
1887 	pr_debug("controlling: cpu %d\n", cpunum);
1888 
1889 	return 0;
1890 }
1891 
1892 static unsigned int intel_pstate_get(unsigned int cpu_num)
1893 {
1894 	struct cpudata *cpu = all_cpu_data[cpu_num];
1895 
1896 	return cpu ? get_avg_frequency(cpu) : 0;
1897 }
1898 
1899 static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1900 {
1901 	struct cpudata *cpu = all_cpu_data[cpu_num];
1902 
1903 	if (cpu->update_util_set)
1904 		return;
1905 
1906 	/* Prevent intel_pstate_update_util() from using stale data. */
1907 	cpu->sample.time = 0;
1908 	cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
1909 				     intel_pstate_update_util);
1910 	cpu->update_util_set = true;
1911 }
1912 
1913 static void intel_pstate_clear_update_util_hook(unsigned int cpu)
1914 {
1915 	struct cpudata *cpu_data = all_cpu_data[cpu];
1916 
1917 	if (!cpu_data->update_util_set)
1918 		return;
1919 
1920 	cpufreq_remove_update_util_hook(cpu);
1921 	cpu_data->update_util_set = false;
1922 	synchronize_sched();
1923 }
1924 
1925 static void intel_pstate_set_performance_limits(struct perf_limits *limits)
1926 {
1927 	limits->no_turbo = 0;
1928 	limits->turbo_disabled = 0;
1929 	limits->max_perf_pct = 100;
1930 	limits->max_perf = int_ext_tofp(1);
1931 	limits->min_perf_pct = 100;
1932 	limits->min_perf = int_ext_tofp(1);
1933 	limits->max_policy_pct = 100;
1934 	limits->max_sysfs_pct = 100;
1935 	limits->min_policy_pct = 0;
1936 	limits->min_sysfs_pct = 0;
1937 }
1938 
1939 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
1940 					    struct perf_limits *limits)
1941 {
1942 
1943 	limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
1944 					      policy->cpuinfo.max_freq);
1945 	limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
1946 	if (policy->max == policy->min) {
1947 		limits->min_policy_pct = limits->max_policy_pct;
1948 	} else {
1949 		limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
1950 						      policy->cpuinfo.max_freq);
1951 		limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
1952 						 0, 100);
1953 	}
1954 
1955 	/* Normalize user input to [min_policy_pct, max_policy_pct] */
1956 	limits->min_perf_pct = max(limits->min_policy_pct,
1957 				   limits->min_sysfs_pct);
1958 	limits->min_perf_pct = min(limits->max_policy_pct,
1959 				   limits->min_perf_pct);
1960 	limits->max_perf_pct = min(limits->max_policy_pct,
1961 				   limits->max_sysfs_pct);
1962 	limits->max_perf_pct = max(limits->min_policy_pct,
1963 				   limits->max_perf_pct);
1964 
1965 	/* Make sure min_perf_pct <= max_perf_pct */
1966 	limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
1967 
1968 	limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
1969 	limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
1970 	limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
1971 	limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
1972 
1973 	pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
1974 		 limits->max_perf_pct, limits->min_perf_pct);
1975 }
1976 
1977 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1978 {
1979 	struct cpudata *cpu;
1980 	struct perf_limits *perf_limits = NULL;
1981 
1982 	if (!policy->cpuinfo.max_freq)
1983 		return -ENODEV;
1984 
1985 	pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
1986 		 policy->cpuinfo.max_freq, policy->max);
1987 
1988 	cpu = all_cpu_data[policy->cpu];
1989 	cpu->policy = policy->policy;
1990 
1991 	if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
1992 	    policy->max < policy->cpuinfo.max_freq &&
1993 	    policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
1994 		pr_debug("policy->max > max non turbo frequency\n");
1995 		policy->max = policy->cpuinfo.max_freq;
1996 	}
1997 
1998 	if (per_cpu_limits)
1999 		perf_limits = cpu->perf_limits;
2000 
2001 	mutex_lock(&intel_pstate_limits_lock);
2002 
2003 	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
2004 		if (!perf_limits) {
2005 			limits = &performance_limits;
2006 			perf_limits = limits;
2007 		}
2008 		if (policy->max >= policy->cpuinfo.max_freq) {
2009 			pr_debug("set performance\n");
2010 			intel_pstate_set_performance_limits(perf_limits);
2011 			goto out;
2012 		}
2013 	} else {
2014 		pr_debug("set powersave\n");
2015 		if (!perf_limits) {
2016 			limits = &powersave_limits;
2017 			perf_limits = limits;
2018 		}
2019 
2020 	}
2021 
2022 	intel_pstate_update_perf_limits(policy, perf_limits);
2023  out:
2024 	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
2025 		/*
2026 		 * NOHZ_FULL CPUs need this as the governor callback may not
2027 		 * be invoked on them.
2028 		 */
2029 		intel_pstate_clear_update_util_hook(policy->cpu);
2030 		intel_pstate_max_within_limits(cpu);
2031 	}
2032 
2033 	intel_pstate_set_update_util_hook(policy->cpu);
2034 
2035 	intel_pstate_hwp_set_policy(policy);
2036 
2037 	mutex_unlock(&intel_pstate_limits_lock);
2038 
2039 	return 0;
2040 }
2041 
2042 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2043 {
2044 	cpufreq_verify_within_cpu_limits(policy);
2045 
2046 	if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
2047 	    policy->policy != CPUFREQ_POLICY_PERFORMANCE)
2048 		return -EINVAL;
2049 
2050 	return 0;
2051 }
2052 
2053 static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
2054 {
2055 	intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
2056 }
2057 
2058 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
2059 {
2060 	pr_debug("CPU %d exiting\n", policy->cpu);
2061 
2062 	intel_pstate_clear_update_util_hook(policy->cpu);
2063 	if (hwp_active)
2064 		intel_pstate_hwp_save_state(policy);
2065 	else
2066 		intel_cpufreq_stop_cpu(policy);
2067 }
2068 
2069 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
2070 {
2071 	intel_pstate_exit_perf_limits(policy);
2072 
2073 	policy->fast_switch_possible = false;
2074 
2075 	return 0;
2076 }
2077 
2078 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2079 {
2080 	struct cpudata *cpu;
2081 	int rc;
2082 
2083 	rc = intel_pstate_init_cpu(policy->cpu);
2084 	if (rc)
2085 		return rc;
2086 
2087 	cpu = all_cpu_data[policy->cpu];
2088 
2089 	/*
2090 	 * We need sane value in the cpu->perf_limits, so inherit from global
2091 	 * perf_limits limits, which are seeded with values based on the
2092 	 * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
2093 	 */
2094 	if (per_cpu_limits)
2095 		memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits));
2096 
2097 	policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
2098 	policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
2099 
2100 	/* cpuinfo and default policy values */
2101 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
2102 	update_turbo_state();
2103 	policy->cpuinfo.max_freq = limits->turbo_disabled ?
2104 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
2105 	policy->cpuinfo.max_freq *= cpu->pstate.scaling;
2106 
2107 	intel_pstate_init_acpi_perf_limits(policy);
2108 	cpumask_set_cpu(policy->cpu, policy->cpus);
2109 
2110 	policy->fast_switch_possible = true;
2111 
2112 	return 0;
2113 }
2114 
2115 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2116 {
2117 	int ret = __intel_pstate_cpu_init(policy);
2118 
2119 	if (ret)
2120 		return ret;
2121 
2122 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
2123 	if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
2124 		policy->policy = CPUFREQ_POLICY_PERFORMANCE;
2125 	else
2126 		policy->policy = CPUFREQ_POLICY_POWERSAVE;
2127 
2128 	return 0;
2129 }
2130 
2131 static struct cpufreq_driver intel_pstate = {
2132 	.flags		= CPUFREQ_CONST_LOOPS,
2133 	.verify		= intel_pstate_verify_policy,
2134 	.setpolicy	= intel_pstate_set_policy,
2135 	.suspend	= intel_pstate_hwp_save_state,
2136 	.resume		= intel_pstate_resume,
2137 	.get		= intel_pstate_get,
2138 	.init		= intel_pstate_cpu_init,
2139 	.exit		= intel_pstate_cpu_exit,
2140 	.stop_cpu	= intel_pstate_stop_cpu,
2141 	.name		= "intel_pstate",
2142 };
2143 
2144 static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2145 {
2146 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2147 	struct perf_limits *perf_limits = limits;
2148 
2149 	update_turbo_state();
2150 	policy->cpuinfo.max_freq = limits->turbo_disabled ?
2151 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2152 
2153 	cpufreq_verify_within_cpu_limits(policy);
2154 
2155 	if (per_cpu_limits)
2156 		perf_limits = cpu->perf_limits;
2157 
2158 	mutex_lock(&intel_pstate_limits_lock);
2159 
2160 	intel_pstate_update_perf_limits(policy, perf_limits);
2161 
2162 	mutex_unlock(&intel_pstate_limits_lock);
2163 
2164 	return 0;
2165 }
2166 
2167 static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
2168 					       struct cpufreq_policy *policy,
2169 					       unsigned int target_freq)
2170 {
2171 	unsigned int max_freq;
2172 
2173 	update_turbo_state();
2174 
2175 	max_freq = limits->no_turbo || limits->turbo_disabled ?
2176 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2177 	policy->cpuinfo.max_freq = max_freq;
2178 	if (policy->max > max_freq)
2179 		policy->max = max_freq;
2180 
2181 	if (target_freq > max_freq)
2182 		target_freq = max_freq;
2183 
2184 	return target_freq;
2185 }
2186 
2187 static int intel_cpufreq_target(struct cpufreq_policy *policy,
2188 				unsigned int target_freq,
2189 				unsigned int relation)
2190 {
2191 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2192 	struct cpufreq_freqs freqs;
2193 	int target_pstate;
2194 
2195 	freqs.old = policy->cur;
2196 	freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);
2197 
2198 	cpufreq_freq_transition_begin(policy, &freqs);
2199 	switch (relation) {
2200 	case CPUFREQ_RELATION_L:
2201 		target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
2202 		break;
2203 	case CPUFREQ_RELATION_H:
2204 		target_pstate = freqs.new / cpu->pstate.scaling;
2205 		break;
2206 	default:
2207 		target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
2208 		break;
2209 	}
2210 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2211 	if (target_pstate != cpu->pstate.current_pstate) {
2212 		cpu->pstate.current_pstate = target_pstate;
2213 		wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
2214 			      pstate_funcs.get_val(cpu, target_pstate));
2215 	}
2216 	cpufreq_freq_transition_end(policy, &freqs, false);
2217 
2218 	return 0;
2219 }
2220 
2221 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2222 					      unsigned int target_freq)
2223 {
2224 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2225 	int target_pstate;
2226 
2227 	target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
2228 	target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
2229 	intel_pstate_update_pstate(cpu, target_pstate);
2230 	return target_freq;
2231 }
2232 
2233 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
2234 {
2235 	int ret = __intel_pstate_cpu_init(policy);
2236 
2237 	if (ret)
2238 		return ret;
2239 
2240 	policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
2241 	/* This reflects the intel_pstate_get_cpu_pstates() setting. */
2242 	policy->cur = policy->cpuinfo.min_freq;
2243 
2244 	return 0;
2245 }
2246 
2247 static struct cpufreq_driver intel_cpufreq = {
2248 	.flags		= CPUFREQ_CONST_LOOPS,
2249 	.verify		= intel_cpufreq_verify_policy,
2250 	.target		= intel_cpufreq_target,
2251 	.fast_switch	= intel_cpufreq_fast_switch,
2252 	.init		= intel_cpufreq_cpu_init,
2253 	.exit		= intel_pstate_cpu_exit,
2254 	.stop_cpu	= intel_cpufreq_stop_cpu,
2255 	.name		= "intel_cpufreq",
2256 };
2257 
2258 static struct cpufreq_driver *intel_pstate_driver = &intel_pstate;
2259 
2260 static int no_load __initdata;
2261 static int no_hwp __initdata;
2262 static int hwp_only __initdata;
2263 static unsigned int force_load __initdata;
2264 
2265 static int __init intel_pstate_msrs_not_valid(void)
2266 {
2267 	if (!pstate_funcs.get_max() ||
2268 	    !pstate_funcs.get_min() ||
2269 	    !pstate_funcs.get_turbo())
2270 		return -ENODEV;
2271 
2272 	return 0;
2273 }
2274 
2275 static void __init copy_pid_params(struct pstate_adjust_policy *policy)
2276 {
2277 	pid_params.sample_rate_ms = policy->sample_rate_ms;
2278 	pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
2279 	pid_params.p_gain_pct = policy->p_gain_pct;
2280 	pid_params.i_gain_pct = policy->i_gain_pct;
2281 	pid_params.d_gain_pct = policy->d_gain_pct;
2282 	pid_params.deadband = policy->deadband;
2283 	pid_params.setpoint = policy->setpoint;
2284 }
2285 
2286 #ifdef CONFIG_ACPI
2287 static void intel_pstate_use_acpi_profile(void)
2288 {
2289 	if (acpi_gbl_FADT.preferred_profile == PM_MOBILE)
2290 		pstate_funcs.get_target_pstate =
2291 				get_target_pstate_use_cpu_load;
2292 }
2293 #else
2294 static void intel_pstate_use_acpi_profile(void)
2295 {
2296 }
2297 #endif
2298 
2299 static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
2300 {
2301 	pstate_funcs.get_max   = funcs->get_max;
2302 	pstate_funcs.get_max_physical = funcs->get_max_physical;
2303 	pstate_funcs.get_min   = funcs->get_min;
2304 	pstate_funcs.get_turbo = funcs->get_turbo;
2305 	pstate_funcs.get_scaling = funcs->get_scaling;
2306 	pstate_funcs.get_val   = funcs->get_val;
2307 	pstate_funcs.get_vid   = funcs->get_vid;
2308 	pstate_funcs.get_target_pstate = funcs->get_target_pstate;
2309 
2310 	intel_pstate_use_acpi_profile();
2311 }
2312 
2313 #ifdef CONFIG_ACPI
2314 
2315 static bool __init intel_pstate_no_acpi_pss(void)
2316 {
2317 	int i;
2318 
2319 	for_each_possible_cpu(i) {
2320 		acpi_status status;
2321 		union acpi_object *pss;
2322 		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
2323 		struct acpi_processor *pr = per_cpu(processors, i);
2324 
2325 		if (!pr)
2326 			continue;
2327 
2328 		status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
2329 		if (ACPI_FAILURE(status))
2330 			continue;
2331 
2332 		pss = buffer.pointer;
2333 		if (pss && pss->type == ACPI_TYPE_PACKAGE) {
2334 			kfree(pss);
2335 			return false;
2336 		}
2337 
2338 		kfree(pss);
2339 	}
2340 
2341 	return true;
2342 }
2343 
2344 static bool __init intel_pstate_has_acpi_ppc(void)
2345 {
2346 	int i;
2347 
2348 	for_each_possible_cpu(i) {
2349 		struct acpi_processor *pr = per_cpu(processors, i);
2350 
2351 		if (!pr)
2352 			continue;
2353 		if (acpi_has_method(pr->handle, "_PPC"))
2354 			return true;
2355 	}
2356 	return false;
2357 }
2358 
2359 enum {
2360 	PSS,
2361 	PPC,
2362 };
2363 
2364 struct hw_vendor_info {
2365 	u16  valid;
2366 	char oem_id[ACPI_OEM_ID_SIZE];
2367 	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
2368 	int  oem_pwr_table;
2369 };
2370 
2371 /* Hardware vendor-specific info that has its own power management modes */
2372 static struct hw_vendor_info vendor_info[] __initdata = {
2373 	{1, "HP    ", "ProLiant", PSS},
2374 	{1, "ORACLE", "X4-2    ", PPC},
2375 	{1, "ORACLE", "X4-2L   ", PPC},
2376 	{1, "ORACLE", "X4-2B   ", PPC},
2377 	{1, "ORACLE", "X3-2    ", PPC},
2378 	{1, "ORACLE", "X3-2L   ", PPC},
2379 	{1, "ORACLE", "X3-2B   ", PPC},
2380 	{1, "ORACLE", "X4470M2 ", PPC},
2381 	{1, "ORACLE", "X4270M3 ", PPC},
2382 	{1, "ORACLE", "X4270M2 ", PPC},
2383 	{1, "ORACLE", "X4170M2 ", PPC},
2384 	{1, "ORACLE", "X4170 M3", PPC},
2385 	{1, "ORACLE", "X4275 M3", PPC},
2386 	{1, "ORACLE", "X6-2    ", PPC},
2387 	{1, "ORACLE", "Sudbury ", PPC},
2388 	{0, "", ""},
2389 };
2390 
2391 static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
2392 {
2393 	struct acpi_table_header hdr;
2394 	struct hw_vendor_info *v_info;
2395 	const struct x86_cpu_id *id;
2396 	u64 misc_pwr;
2397 
2398 	id = x86_match_cpu(intel_pstate_cpu_oob_ids);
2399 	if (id) {
2400 		rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
2401 		if ( misc_pwr & (1 << 8))
2402 			return true;
2403 	}
2404 
2405 	if (acpi_disabled ||
2406 	    ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
2407 		return false;
2408 
2409 	for (v_info = vendor_info; v_info->valid; v_info++) {
2410 		if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
2411 			!strncmp(hdr.oem_table_id, v_info->oem_table_id,
2412 						ACPI_OEM_TABLE_ID_SIZE))
2413 			switch (v_info->oem_pwr_table) {
2414 			case PSS:
2415 				return intel_pstate_no_acpi_pss();
2416 			case PPC:
2417 				return intel_pstate_has_acpi_ppc() &&
2418 					(!force_load);
2419 			}
2420 	}
2421 
2422 	return false;
2423 }
2424 
2425 static void intel_pstate_request_control_from_smm(void)
2426 {
2427 	/*
2428 	 * It may be unsafe to request P-states control from SMM if _PPC support
2429 	 * has not been enabled.
2430 	 */
2431 	if (acpi_ppc)
2432 		acpi_processor_pstate_control();
2433 }
2434 #else /* CONFIG_ACPI not enabled */
2435 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
2436 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
2437 static inline void intel_pstate_request_control_from_smm(void) {}
2438 #endif /* CONFIG_ACPI */
2439 
2440 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
2441 	{ X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
2442 	{}
2443 };
2444 
2445 static int __init intel_pstate_init(void)
2446 {
2447 	int cpu, rc = 0;
2448 	const struct x86_cpu_id *id;
2449 	struct cpu_defaults *cpu_def;
2450 
2451 	if (no_load)
2452 		return -ENODEV;
2453 
2454 	if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
2455 		copy_cpu_funcs(&core_params.funcs);
2456 		hwp_active++;
2457 		intel_pstate.attr = hwp_cpufreq_attrs;
2458 		goto hwp_cpu_matched;
2459 	}
2460 
2461 	id = x86_match_cpu(intel_pstate_cpu_ids);
2462 	if (!id)
2463 		return -ENODEV;
2464 
2465 	cpu_def = (struct cpu_defaults *)id->driver_data;
2466 
2467 	copy_pid_params(&cpu_def->pid_policy);
2468 	copy_cpu_funcs(&cpu_def->funcs);
2469 
2470 	if (intel_pstate_msrs_not_valid())
2471 		return -ENODEV;
2472 
2473 hwp_cpu_matched:
2474 	/*
2475 	 * The Intel pstate driver will be ignored if the platform
2476 	 * firmware has its own power management modes.
2477 	 */
2478 	if (intel_pstate_platform_pwr_mgmt_exists())
2479 		return -ENODEV;
2480 
2481 	pr_info("Intel P-state driver initializing\n");
2482 
2483 	all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
2484 	if (!all_cpu_data)
2485 		return -ENOMEM;
2486 
2487 	if (!hwp_active && hwp_only)
2488 		goto out;
2489 
2490 	intel_pstate_request_control_from_smm();
2491 
2492 	rc = cpufreq_register_driver(intel_pstate_driver);
2493 	if (rc)
2494 		goto out;
2495 
2496 	if (intel_pstate_driver == &intel_pstate && !hwp_active &&
2497 	    pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
2498 		intel_pstate_debug_expose_params();
2499 
2500 	intel_pstate_sysfs_expose_params();
2501 
2502 	if (hwp_active)
2503 		pr_info("HWP enabled\n");
2504 
2505 	return rc;
2506 out:
2507 	get_online_cpus();
2508 	for_each_online_cpu(cpu) {
2509 		if (all_cpu_data[cpu]) {
2510 			if (intel_pstate_driver == &intel_pstate)
2511 				intel_pstate_clear_update_util_hook(cpu);
2512 
2513 			kfree(all_cpu_data[cpu]);
2514 		}
2515 	}
2516 
2517 	put_online_cpus();
2518 	vfree(all_cpu_data);
2519 	return -ENODEV;
2520 }
2521 device_initcall(intel_pstate_init);
2522 
2523 static int __init intel_pstate_setup(char *str)
2524 {
2525 	if (!str)
2526 		return -EINVAL;
2527 
2528 	if (!strcmp(str, "disable")) {
2529 		no_load = 1;
2530 	} else if (!strcmp(str, "passive")) {
2531 		pr_info("Passive mode enabled\n");
2532 		intel_pstate_driver = &intel_cpufreq;
2533 		no_hwp = 1;
2534 	}
2535 	if (!strcmp(str, "no_hwp")) {
2536 		pr_info("HWP disabled\n");
2537 		no_hwp = 1;
2538 	}
2539 	if (!strcmp(str, "force"))
2540 		force_load = 1;
2541 	if (!strcmp(str, "hwp_only"))
2542 		hwp_only = 1;
2543 	if (!strcmp(str, "per_cpu_perf_limits"))
2544 		per_cpu_limits = true;
2545 
2546 #ifdef CONFIG_ACPI
2547 	if (!strcmp(str, "support_acpi_ppc"))
2548 		acpi_ppc = true;
2549 #endif
2550 
2551 	return 0;
2552 }
2553 early_param("intel_pstate", intel_pstate_setup);
2554 
2555 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
2556 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
2557 MODULE_LICENSE("GPL");
2558