1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * intel_pstate.c: Native P state management for Intel processors
4  *
5  * (C) Copyright 2012 Intel Corporation
6  * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/module.h>
14 #include <linux/ktime.h>
15 #include <linux/hrtimer.h>
16 #include <linux/tick.h>
17 #include <linux/slab.h>
18 #include <linux/sched/cpufreq.h>
19 #include <linux/list.h>
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/sysfs.h>
23 #include <linux/types.h>
24 #include <linux/fs.h>
25 #include <linux/acpi.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pm_qos.h>
28 #include <trace/events/power.h>
29 
30 #include <asm/div64.h>
31 #include <asm/msr.h>
32 #include <asm/cpu_device_id.h>
33 #include <asm/cpufeature.h>
34 #include <asm/intel-family.h>
35 
36 #define INTEL_PSTATE_SAMPLING_INTERVAL	(10 * NSEC_PER_MSEC)
37 
38 #define INTEL_CPUFREQ_TRANSITION_LATENCY	20000
39 #define INTEL_CPUFREQ_TRANSITION_DELAY		500
40 
41 #ifdef CONFIG_ACPI
42 #include <acpi/processor.h>
43 #include <acpi/cppc_acpi.h>
44 #endif
45 
46 #define FRAC_BITS 8
47 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
48 #define fp_toint(X) ((X) >> FRAC_BITS)
49 
50 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
51 
52 #define EXT_BITS 6
53 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
54 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
55 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
56 
57 static inline int32_t mul_fp(int32_t x, int32_t y)
58 {
59 	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
60 }
61 
62 static inline int32_t div_fp(s64 x, s64 y)
63 {
64 	return div64_s64((int64_t)x << FRAC_BITS, y);
65 }
66 
67 static inline int ceiling_fp(int32_t x)
68 {
69 	int mask, ret;
70 
71 	ret = fp_toint(x);
72 	mask = (1 << FRAC_BITS) - 1;
73 	if (x & mask)
74 		ret += 1;
75 	return ret;
76 }
77 
78 static inline int32_t percent_fp(int percent)
79 {
80 	return div_fp(percent, 100);
81 }
82 
83 static inline u64 mul_ext_fp(u64 x, u64 y)
84 {
85 	return (x * y) >> EXT_FRAC_BITS;
86 }
87 
88 static inline u64 div_ext_fp(u64 x, u64 y)
89 {
90 	return div64_u64(x << EXT_FRAC_BITS, y);
91 }
92 
93 static inline int32_t percent_ext_fp(int percent)
94 {
95 	return div_ext_fp(percent, 100);
96 }
97 
98 /**
99  * struct sample -	Store performance sample
100  * @core_avg_perf:	Ratio of APERF/MPERF which is the actual average
101  *			performance during last sample period
102  * @busy_scaled:	Scaled busy value which is used to calculate next
103  *			P state. This can be different than core_avg_perf
104  *			to account for cpu idle period
105  * @aperf:		Difference of actual performance frequency clock count
106  *			read from APERF MSR between last and current sample
107  * @mperf:		Difference of maximum performance frequency clock count
108  *			read from MPERF MSR between last and current sample
109  * @tsc:		Difference of time stamp counter between last and
110  *			current sample
111  * @time:		Current time from scheduler
112  *
113  * This structure is used in the cpudata structure to store performance sample
114  * data for choosing next P State.
115  */
116 struct sample {
117 	int32_t core_avg_perf;
118 	int32_t busy_scaled;
119 	u64 aperf;
120 	u64 mperf;
121 	u64 tsc;
122 	u64 time;
123 };
124 
125 /**
126  * struct pstate_data - Store P state data
127  * @current_pstate:	Current requested P state
128  * @min_pstate:		Min P state possible for this platform
129  * @max_pstate:		Max P state possible for this platform
130  * @max_pstate_physical:This is physical Max P state for a processor
131  *			This can be higher than the max_pstate which can
132  *			be limited by platform thermal design power limits
133  * @scaling:		Scaling factor to  convert frequency to cpufreq
134  *			frequency units
135  * @turbo_pstate:	Max Turbo P state possible for this platform
136  * @max_freq:		@max_pstate frequency in cpufreq units
137  * @turbo_freq:		@turbo_pstate frequency in cpufreq units
138  *
139  * Stores the per cpu model P state limits and current P state.
140  */
141 struct pstate_data {
142 	int	current_pstate;
143 	int	min_pstate;
144 	int	max_pstate;
145 	int	max_pstate_physical;
146 	int	scaling;
147 	int	turbo_pstate;
148 	unsigned int max_freq;
149 	unsigned int turbo_freq;
150 };
151 
152 /**
153  * struct vid_data -	Stores voltage information data
154  * @min:		VID data for this platform corresponding to
155  *			the lowest P state
156  * @max:		VID data corresponding to the highest P State.
157  * @turbo:		VID data for turbo P state
158  * @ratio:		Ratio of (vid max - vid min) /
159  *			(max P state - Min P State)
160  *
161  * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
162  * This data is used in Atom platforms, where in addition to target P state,
163  * the voltage data needs to be specified to select next P State.
164  */
165 struct vid_data {
166 	int min;
167 	int max;
168 	int turbo;
169 	int32_t ratio;
170 };
171 
172 /**
173  * struct global_params - Global parameters, mostly tunable via sysfs.
174  * @no_turbo:		Whether or not to use turbo P-states.
175  * @turbo_disabled:	Whethet or not turbo P-states are available at all,
176  *			based on the MSR_IA32_MISC_ENABLE value and whether or
177  *			not the maximum reported turbo P-state is different from
178  *			the maximum reported non-turbo one.
179  * @turbo_disabled_mf:	The @turbo_disabled value reflected by cpuinfo.max_freq.
180  * @min_perf_pct:	Minimum capacity limit in percent of the maximum turbo
181  *			P-state capacity.
182  * @max_perf_pct:	Maximum capacity limit in percent of the maximum turbo
183  *			P-state capacity.
184  */
185 struct global_params {
186 	bool no_turbo;
187 	bool turbo_disabled;
188 	bool turbo_disabled_mf;
189 	int max_perf_pct;
190 	int min_perf_pct;
191 };
192 
193 /**
194  * struct cpudata -	Per CPU instance data storage
195  * @cpu:		CPU number for this instance data
196  * @policy:		CPUFreq policy value
197  * @update_util:	CPUFreq utility callback information
198  * @update_util_set:	CPUFreq utility callback is set
199  * @iowait_boost:	iowait-related boost fraction
200  * @last_update:	Time of the last update.
201  * @pstate:		Stores P state limits for this CPU
202  * @vid:		Stores VID limits for this CPU
203  * @last_sample_time:	Last Sample time
204  * @aperf_mperf_shift:	Number of clock cycles after aperf, merf is incremented
205  *			This shift is a multiplier to mperf delta to
206  *			calculate CPU busy.
207  * @prev_aperf:		Last APERF value read from APERF MSR
208  * @prev_mperf:		Last MPERF value read from MPERF MSR
209  * @prev_tsc:		Last timestamp counter (TSC) value
210  * @prev_cummulative_iowait: IO Wait time difference from last and
211  *			current sample
212  * @sample:		Storage for storing last Sample data
213  * @min_perf_ratio:	Minimum capacity in terms of PERF or HWP ratios
214  * @max_perf_ratio:	Maximum capacity in terms of PERF or HWP ratios
215  * @acpi_perf_data:	Stores ACPI perf information read from _PSS
216  * @valid_pss_table:	Set to true for valid ACPI _PSS entries found
217  * @epp_powersave:	Last saved HWP energy performance preference
218  *			(EPP) or energy performance bias (EPB),
219  *			when policy switched to performance
220  * @epp_policy:		Last saved policy used to set EPP/EPB
221  * @epp_default:	Power on default HWP energy performance
222  *			preference/bias
223  * @epp_saved:		Saved EPP/EPB during system suspend or CPU offline
224  *			operation
225  * @hwp_req_cached:	Cached value of the last HWP Request MSR
226  * @hwp_cap_cached:	Cached value of the last HWP Capabilities MSR
227  * @last_io_update:	Last time when IO wake flag was set
228  * @sched_flags:	Store scheduler flags for possible cross CPU update
229  * @hwp_boost_min:	Last HWP boosted min performance
230  *
231  * This structure stores per CPU instance data for all CPUs.
232  */
233 struct cpudata {
234 	int cpu;
235 
236 	unsigned int policy;
237 	struct update_util_data update_util;
238 	bool   update_util_set;
239 
240 	struct pstate_data pstate;
241 	struct vid_data vid;
242 
243 	u64	last_update;
244 	u64	last_sample_time;
245 	u64	aperf_mperf_shift;
246 	u64	prev_aperf;
247 	u64	prev_mperf;
248 	u64	prev_tsc;
249 	u64	prev_cummulative_iowait;
250 	struct sample sample;
251 	int32_t	min_perf_ratio;
252 	int32_t	max_perf_ratio;
253 #ifdef CONFIG_ACPI
254 	struct acpi_processor_performance acpi_perf_data;
255 	bool valid_pss_table;
256 #endif
257 	unsigned int iowait_boost;
258 	s16 epp_powersave;
259 	s16 epp_policy;
260 	s16 epp_default;
261 	s16 epp_saved;
262 	u64 hwp_req_cached;
263 	u64 hwp_cap_cached;
264 	u64 last_io_update;
265 	unsigned int sched_flags;
266 	u32 hwp_boost_min;
267 };
268 
269 static struct cpudata **all_cpu_data;
270 
271 /**
272  * struct pstate_funcs - Per CPU model specific callbacks
273  * @get_max:		Callback to get maximum non turbo effective P state
274  * @get_max_physical:	Callback to get maximum non turbo physical P state
275  * @get_min:		Callback to get minimum P state
276  * @get_turbo:		Callback to get turbo P state
277  * @get_scaling:	Callback to get frequency scaling factor
278  * @get_val:		Callback to convert P state to actual MSR write value
279  * @get_vid:		Callback to get VID data for Atom platforms
280  *
281  * Core and Atom CPU models have different way to get P State limits. This
282  * structure is used to store those callbacks.
283  */
284 struct pstate_funcs {
285 	int (*get_max)(void);
286 	int (*get_max_physical)(void);
287 	int (*get_min)(void);
288 	int (*get_turbo)(void);
289 	int (*get_scaling)(void);
290 	int (*get_aperf_mperf_shift)(void);
291 	u64 (*get_val)(struct cpudata*, int pstate);
292 	void (*get_vid)(struct cpudata *);
293 };
294 
295 static struct pstate_funcs pstate_funcs __read_mostly;
296 
297 static int hwp_active __read_mostly;
298 static int hwp_mode_bdw __read_mostly;
299 static bool per_cpu_limits __read_mostly;
300 static bool hwp_boost __read_mostly;
301 
302 static struct cpufreq_driver *intel_pstate_driver __read_mostly;
303 
304 #ifdef CONFIG_ACPI
305 static bool acpi_ppc;
306 #endif
307 
308 static struct global_params global;
309 
310 static DEFINE_MUTEX(intel_pstate_driver_lock);
311 static DEFINE_MUTEX(intel_pstate_limits_lock);
312 
313 #ifdef CONFIG_ACPI
314 
315 static bool intel_pstate_acpi_pm_profile_server(void)
316 {
317 	if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
318 	    acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
319 		return true;
320 
321 	return false;
322 }
323 
324 static bool intel_pstate_get_ppc_enable_status(void)
325 {
326 	if (intel_pstate_acpi_pm_profile_server())
327 		return true;
328 
329 	return acpi_ppc;
330 }
331 
332 #ifdef CONFIG_ACPI_CPPC_LIB
333 
334 /* The work item is needed to avoid CPU hotplug locking issues */
335 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
336 {
337 	sched_set_itmt_support();
338 }
339 
340 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
341 
342 static void intel_pstate_set_itmt_prio(int cpu)
343 {
344 	struct cppc_perf_caps cppc_perf;
345 	static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
346 	int ret;
347 
348 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
349 	if (ret)
350 		return;
351 
352 	/*
353 	 * The priorities can be set regardless of whether or not
354 	 * sched_set_itmt_support(true) has been called and it is valid to
355 	 * update them at any time after it has been called.
356 	 */
357 	sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
358 
359 	if (max_highest_perf <= min_highest_perf) {
360 		if (cppc_perf.highest_perf > max_highest_perf)
361 			max_highest_perf = cppc_perf.highest_perf;
362 
363 		if (cppc_perf.highest_perf < min_highest_perf)
364 			min_highest_perf = cppc_perf.highest_perf;
365 
366 		if (max_highest_perf > min_highest_perf) {
367 			/*
368 			 * This code can be run during CPU online under the
369 			 * CPU hotplug locks, so sched_set_itmt_support()
370 			 * cannot be called from here.  Queue up a work item
371 			 * to invoke it.
372 			 */
373 			schedule_work(&sched_itmt_work);
374 		}
375 	}
376 }
377 
378 static int intel_pstate_get_cppc_guranteed(int cpu)
379 {
380 	struct cppc_perf_caps cppc_perf;
381 	int ret;
382 
383 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
384 	if (ret)
385 		return ret;
386 
387 	if (cppc_perf.guaranteed_perf)
388 		return cppc_perf.guaranteed_perf;
389 
390 	return cppc_perf.nominal_perf;
391 }
392 
393 #else /* CONFIG_ACPI_CPPC_LIB */
394 static void intel_pstate_set_itmt_prio(int cpu)
395 {
396 }
397 #endif /* CONFIG_ACPI_CPPC_LIB */
398 
399 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
400 {
401 	struct cpudata *cpu;
402 	int ret;
403 	int i;
404 
405 	if (hwp_active) {
406 		intel_pstate_set_itmt_prio(policy->cpu);
407 		return;
408 	}
409 
410 	if (!intel_pstate_get_ppc_enable_status())
411 		return;
412 
413 	cpu = all_cpu_data[policy->cpu];
414 
415 	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
416 						  policy->cpu);
417 	if (ret)
418 		return;
419 
420 	/*
421 	 * Check if the control value in _PSS is for PERF_CTL MSR, which should
422 	 * guarantee that the states returned by it map to the states in our
423 	 * list directly.
424 	 */
425 	if (cpu->acpi_perf_data.control_register.space_id !=
426 						ACPI_ADR_SPACE_FIXED_HARDWARE)
427 		goto err;
428 
429 	/*
430 	 * If there is only one entry _PSS, simply ignore _PSS and continue as
431 	 * usual without taking _PSS into account
432 	 */
433 	if (cpu->acpi_perf_data.state_count < 2)
434 		goto err;
435 
436 	pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
437 	for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
438 		pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
439 			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
440 			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
441 			 (u32) cpu->acpi_perf_data.states[i].power,
442 			 (u32) cpu->acpi_perf_data.states[i].control);
443 	}
444 
445 	/*
446 	 * The _PSS table doesn't contain whole turbo frequency range.
447 	 * This just contains +1 MHZ above the max non turbo frequency,
448 	 * with control value corresponding to max turbo ratio. But
449 	 * when cpufreq set policy is called, it will call with this
450 	 * max frequency, which will cause a reduced performance as
451 	 * this driver uses real max turbo frequency as the max
452 	 * frequency. So correct this frequency in _PSS table to
453 	 * correct max turbo frequency based on the turbo state.
454 	 * Also need to convert to MHz as _PSS freq is in MHz.
455 	 */
456 	if (!global.turbo_disabled)
457 		cpu->acpi_perf_data.states[0].core_frequency =
458 					policy->cpuinfo.max_freq / 1000;
459 	cpu->valid_pss_table = true;
460 	pr_debug("_PPC limits will be enforced\n");
461 
462 	return;
463 
464  err:
465 	cpu->valid_pss_table = false;
466 	acpi_processor_unregister_performance(policy->cpu);
467 }
468 
469 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
470 {
471 	struct cpudata *cpu;
472 
473 	cpu = all_cpu_data[policy->cpu];
474 	if (!cpu->valid_pss_table)
475 		return;
476 
477 	acpi_processor_unregister_performance(policy->cpu);
478 }
479 #else /* CONFIG_ACPI */
480 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
481 {
482 }
483 
484 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
485 {
486 }
487 
488 static inline bool intel_pstate_acpi_pm_profile_server(void)
489 {
490 	return false;
491 }
492 #endif /* CONFIG_ACPI */
493 
494 #ifndef CONFIG_ACPI_CPPC_LIB
495 static int intel_pstate_get_cppc_guranteed(int cpu)
496 {
497 	return -ENOTSUPP;
498 }
499 #endif /* CONFIG_ACPI_CPPC_LIB */
500 
501 static inline void update_turbo_state(void)
502 {
503 	u64 misc_en;
504 	struct cpudata *cpu;
505 
506 	cpu = all_cpu_data[0];
507 	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
508 	global.turbo_disabled =
509 		(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
510 		 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
511 }
512 
513 static int min_perf_pct_min(void)
514 {
515 	struct cpudata *cpu = all_cpu_data[0];
516 	int turbo_pstate = cpu->pstate.turbo_pstate;
517 
518 	return turbo_pstate ?
519 		(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
520 }
521 
522 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
523 {
524 	u64 epb;
525 	int ret;
526 
527 	if (!boot_cpu_has(X86_FEATURE_EPB))
528 		return -ENXIO;
529 
530 	ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
531 	if (ret)
532 		return (s16)ret;
533 
534 	return (s16)(epb & 0x0f);
535 }
536 
537 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
538 {
539 	s16 epp;
540 
541 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
542 		/*
543 		 * When hwp_req_data is 0, means that caller didn't read
544 		 * MSR_HWP_REQUEST, so need to read and get EPP.
545 		 */
546 		if (!hwp_req_data) {
547 			epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
548 					    &hwp_req_data);
549 			if (epp)
550 				return epp;
551 		}
552 		epp = (hwp_req_data >> 24) & 0xff;
553 	} else {
554 		/* When there is no EPP present, HWP uses EPB settings */
555 		epp = intel_pstate_get_epb(cpu_data);
556 	}
557 
558 	return epp;
559 }
560 
561 static int intel_pstate_set_epb(int cpu, s16 pref)
562 {
563 	u64 epb;
564 	int ret;
565 
566 	if (!boot_cpu_has(X86_FEATURE_EPB))
567 		return -ENXIO;
568 
569 	ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
570 	if (ret)
571 		return ret;
572 
573 	epb = (epb & ~0x0f) | pref;
574 	wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
575 
576 	return 0;
577 }
578 
579 /*
580  * EPP/EPB display strings corresponding to EPP index in the
581  * energy_perf_strings[]
582  *	index		String
583  *-------------------------------------
584  *	0		default
585  *	1		performance
586  *	2		balance_performance
587  *	3		balance_power
588  *	4		power
589  */
590 static const char * const energy_perf_strings[] = {
591 	"default",
592 	"performance",
593 	"balance_performance",
594 	"balance_power",
595 	"power",
596 	NULL
597 };
598 static const unsigned int epp_values[] = {
599 	HWP_EPP_PERFORMANCE,
600 	HWP_EPP_BALANCE_PERFORMANCE,
601 	HWP_EPP_BALANCE_POWERSAVE,
602 	HWP_EPP_POWERSAVE
603 };
604 
605 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
606 {
607 	s16 epp;
608 	int index = -EINVAL;
609 
610 	epp = intel_pstate_get_epp(cpu_data, 0);
611 	if (epp < 0)
612 		return epp;
613 
614 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
615 		if (epp == HWP_EPP_PERFORMANCE)
616 			return 1;
617 		if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
618 			return 2;
619 		if (epp <= HWP_EPP_BALANCE_POWERSAVE)
620 			return 3;
621 		else
622 			return 4;
623 	} else if (boot_cpu_has(X86_FEATURE_EPB)) {
624 		/*
625 		 * Range:
626 		 *	0x00-0x03	:	Performance
627 		 *	0x04-0x07	:	Balance performance
628 		 *	0x08-0x0B	:	Balance power
629 		 *	0x0C-0x0F	:	Power
630 		 * The EPB is a 4 bit value, but our ranges restrict the
631 		 * value which can be set. Here only using top two bits
632 		 * effectively.
633 		 */
634 		index = (epp >> 2) + 1;
635 	}
636 
637 	return index;
638 }
639 
640 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
641 					      int pref_index)
642 {
643 	int epp = -EINVAL;
644 	int ret;
645 
646 	if (!pref_index)
647 		epp = cpu_data->epp_default;
648 
649 	mutex_lock(&intel_pstate_limits_lock);
650 
651 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
652 		u64 value;
653 
654 		ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
655 		if (ret)
656 			goto return_pref;
657 
658 		value &= ~GENMASK_ULL(31, 24);
659 
660 		if (epp == -EINVAL)
661 			epp = epp_values[pref_index - 1];
662 
663 		value |= (u64)epp << 24;
664 		ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
665 	} else {
666 		if (epp == -EINVAL)
667 			epp = (pref_index - 1) << 2;
668 		ret = intel_pstate_set_epb(cpu_data->cpu, epp);
669 	}
670 return_pref:
671 	mutex_unlock(&intel_pstate_limits_lock);
672 
673 	return ret;
674 }
675 
676 static ssize_t show_energy_performance_available_preferences(
677 				struct cpufreq_policy *policy, char *buf)
678 {
679 	int i = 0;
680 	int ret = 0;
681 
682 	while (energy_perf_strings[i] != NULL)
683 		ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
684 
685 	ret += sprintf(&buf[ret], "\n");
686 
687 	return ret;
688 }
689 
690 cpufreq_freq_attr_ro(energy_performance_available_preferences);
691 
692 static ssize_t store_energy_performance_preference(
693 		struct cpufreq_policy *policy, const char *buf, size_t count)
694 {
695 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
696 	char str_preference[21];
697 	int ret;
698 
699 	ret = sscanf(buf, "%20s", str_preference);
700 	if (ret != 1)
701 		return -EINVAL;
702 
703 	ret = match_string(energy_perf_strings, -1, str_preference);
704 	if (ret < 0)
705 		return ret;
706 
707 	intel_pstate_set_energy_pref_index(cpu_data, ret);
708 	return count;
709 }
710 
711 static ssize_t show_energy_performance_preference(
712 				struct cpufreq_policy *policy, char *buf)
713 {
714 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
715 	int preference;
716 
717 	preference = intel_pstate_get_energy_pref_index(cpu_data);
718 	if (preference < 0)
719 		return preference;
720 
721 	return  sprintf(buf, "%s\n", energy_perf_strings[preference]);
722 }
723 
724 cpufreq_freq_attr_rw(energy_performance_preference);
725 
726 static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
727 {
728 	struct cpudata *cpu;
729 	u64 cap;
730 	int ratio;
731 
732 	ratio = intel_pstate_get_cppc_guranteed(policy->cpu);
733 	if (ratio <= 0) {
734 		rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
735 		ratio = HWP_GUARANTEED_PERF(cap);
736 	}
737 
738 	cpu = all_cpu_data[policy->cpu];
739 
740 	return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling);
741 }
742 
743 cpufreq_freq_attr_ro(base_frequency);
744 
745 static struct freq_attr *hwp_cpufreq_attrs[] = {
746 	&energy_performance_preference,
747 	&energy_performance_available_preferences,
748 	&base_frequency,
749 	NULL,
750 };
751 
752 static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
753 				     int *current_max)
754 {
755 	u64 cap;
756 
757 	rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
758 	WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
759 	if (global.no_turbo)
760 		*current_max = HWP_GUARANTEED_PERF(cap);
761 	else
762 		*current_max = HWP_HIGHEST_PERF(cap);
763 
764 	*phy_max = HWP_HIGHEST_PERF(cap);
765 }
766 
767 static void intel_pstate_hwp_set(unsigned int cpu)
768 {
769 	struct cpudata *cpu_data = all_cpu_data[cpu];
770 	int max, min;
771 	u64 value;
772 	s16 epp;
773 
774 	max = cpu_data->max_perf_ratio;
775 	min = cpu_data->min_perf_ratio;
776 
777 	if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
778 		min = max;
779 
780 	rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
781 
782 	value &= ~HWP_MIN_PERF(~0L);
783 	value |= HWP_MIN_PERF(min);
784 
785 	value &= ~HWP_MAX_PERF(~0L);
786 	value |= HWP_MAX_PERF(max);
787 
788 	if (cpu_data->epp_policy == cpu_data->policy)
789 		goto skip_epp;
790 
791 	cpu_data->epp_policy = cpu_data->policy;
792 
793 	if (cpu_data->epp_saved >= 0) {
794 		epp = cpu_data->epp_saved;
795 		cpu_data->epp_saved = -EINVAL;
796 		goto update_epp;
797 	}
798 
799 	if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
800 		epp = intel_pstate_get_epp(cpu_data, value);
801 		cpu_data->epp_powersave = epp;
802 		/* If EPP read was failed, then don't try to write */
803 		if (epp < 0)
804 			goto skip_epp;
805 
806 		epp = 0;
807 	} else {
808 		/* skip setting EPP, when saved value is invalid */
809 		if (cpu_data->epp_powersave < 0)
810 			goto skip_epp;
811 
812 		/*
813 		 * No need to restore EPP when it is not zero. This
814 		 * means:
815 		 *  - Policy is not changed
816 		 *  - user has manually changed
817 		 *  - Error reading EPB
818 		 */
819 		epp = intel_pstate_get_epp(cpu_data, value);
820 		if (epp)
821 			goto skip_epp;
822 
823 		epp = cpu_data->epp_powersave;
824 	}
825 update_epp:
826 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
827 		value &= ~GENMASK_ULL(31, 24);
828 		value |= (u64)epp << 24;
829 	} else {
830 		intel_pstate_set_epb(cpu, epp);
831 	}
832 skip_epp:
833 	WRITE_ONCE(cpu_data->hwp_req_cached, value);
834 	wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
835 }
836 
837 static void intel_pstate_hwp_force_min_perf(int cpu)
838 {
839 	u64 value;
840 	int min_perf;
841 
842 	value = all_cpu_data[cpu]->hwp_req_cached;
843 	value &= ~GENMASK_ULL(31, 0);
844 	min_perf = HWP_LOWEST_PERF(all_cpu_data[cpu]->hwp_cap_cached);
845 
846 	/* Set hwp_max = hwp_min */
847 	value |= HWP_MAX_PERF(min_perf);
848 	value |= HWP_MIN_PERF(min_perf);
849 
850 	/* Set EPP/EPB to min */
851 	if (boot_cpu_has(X86_FEATURE_HWP_EPP))
852 		value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
853 	else
854 		intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
855 
856 	wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
857 }
858 
859 static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
860 {
861 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
862 
863 	if (!hwp_active)
864 		return 0;
865 
866 	cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0);
867 
868 	return 0;
869 }
870 
871 static void intel_pstate_hwp_enable(struct cpudata *cpudata);
872 
873 static int intel_pstate_resume(struct cpufreq_policy *policy)
874 {
875 	if (!hwp_active)
876 		return 0;
877 
878 	mutex_lock(&intel_pstate_limits_lock);
879 
880 	if (policy->cpu == 0)
881 		intel_pstate_hwp_enable(all_cpu_data[policy->cpu]);
882 
883 	all_cpu_data[policy->cpu]->epp_policy = 0;
884 	intel_pstate_hwp_set(policy->cpu);
885 
886 	mutex_unlock(&intel_pstate_limits_lock);
887 
888 	return 0;
889 }
890 
891 static void intel_pstate_update_policies(void)
892 {
893 	int cpu;
894 
895 	for_each_possible_cpu(cpu)
896 		cpufreq_update_policy(cpu);
897 }
898 
899 static void intel_pstate_update_max_freq(unsigned int cpu)
900 {
901 	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
902 	struct cpudata *cpudata;
903 
904 	if (!policy)
905 		return;
906 
907 	cpudata = all_cpu_data[cpu];
908 	policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
909 			cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
910 
911 	refresh_frequency_limits(policy);
912 
913 	cpufreq_cpu_release(policy);
914 }
915 
916 static void intel_pstate_update_limits(unsigned int cpu)
917 {
918 	mutex_lock(&intel_pstate_driver_lock);
919 
920 	update_turbo_state();
921 	/*
922 	 * If turbo has been turned on or off globally, policy limits for
923 	 * all CPUs need to be updated to reflect that.
924 	 */
925 	if (global.turbo_disabled_mf != global.turbo_disabled) {
926 		global.turbo_disabled_mf = global.turbo_disabled;
927 		for_each_possible_cpu(cpu)
928 			intel_pstate_update_max_freq(cpu);
929 	} else {
930 		cpufreq_update_policy(cpu);
931 	}
932 
933 	mutex_unlock(&intel_pstate_driver_lock);
934 }
935 
936 /************************** sysfs begin ************************/
937 #define show_one(file_name, object)					\
938 	static ssize_t show_##file_name					\
939 	(struct kobject *kobj, struct kobj_attribute *attr, char *buf)	\
940 	{								\
941 		return sprintf(buf, "%u\n", global.object);		\
942 	}
943 
944 static ssize_t intel_pstate_show_status(char *buf);
945 static int intel_pstate_update_status(const char *buf, size_t size);
946 
947 static ssize_t show_status(struct kobject *kobj,
948 			   struct kobj_attribute *attr, char *buf)
949 {
950 	ssize_t ret;
951 
952 	mutex_lock(&intel_pstate_driver_lock);
953 	ret = intel_pstate_show_status(buf);
954 	mutex_unlock(&intel_pstate_driver_lock);
955 
956 	return ret;
957 }
958 
959 static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
960 			    const char *buf, size_t count)
961 {
962 	char *p = memchr(buf, '\n', count);
963 	int ret;
964 
965 	mutex_lock(&intel_pstate_driver_lock);
966 	ret = intel_pstate_update_status(buf, p ? p - buf : count);
967 	mutex_unlock(&intel_pstate_driver_lock);
968 
969 	return ret < 0 ? ret : count;
970 }
971 
972 static ssize_t show_turbo_pct(struct kobject *kobj,
973 				struct kobj_attribute *attr, char *buf)
974 {
975 	struct cpudata *cpu;
976 	int total, no_turbo, turbo_pct;
977 	uint32_t turbo_fp;
978 
979 	mutex_lock(&intel_pstate_driver_lock);
980 
981 	if (!intel_pstate_driver) {
982 		mutex_unlock(&intel_pstate_driver_lock);
983 		return -EAGAIN;
984 	}
985 
986 	cpu = all_cpu_data[0];
987 
988 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
989 	no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
990 	turbo_fp = div_fp(no_turbo, total);
991 	turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
992 
993 	mutex_unlock(&intel_pstate_driver_lock);
994 
995 	return sprintf(buf, "%u\n", turbo_pct);
996 }
997 
998 static ssize_t show_num_pstates(struct kobject *kobj,
999 				struct kobj_attribute *attr, char *buf)
1000 {
1001 	struct cpudata *cpu;
1002 	int total;
1003 
1004 	mutex_lock(&intel_pstate_driver_lock);
1005 
1006 	if (!intel_pstate_driver) {
1007 		mutex_unlock(&intel_pstate_driver_lock);
1008 		return -EAGAIN;
1009 	}
1010 
1011 	cpu = all_cpu_data[0];
1012 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1013 
1014 	mutex_unlock(&intel_pstate_driver_lock);
1015 
1016 	return sprintf(buf, "%u\n", total);
1017 }
1018 
1019 static ssize_t show_no_turbo(struct kobject *kobj,
1020 			     struct kobj_attribute *attr, char *buf)
1021 {
1022 	ssize_t ret;
1023 
1024 	mutex_lock(&intel_pstate_driver_lock);
1025 
1026 	if (!intel_pstate_driver) {
1027 		mutex_unlock(&intel_pstate_driver_lock);
1028 		return -EAGAIN;
1029 	}
1030 
1031 	update_turbo_state();
1032 	if (global.turbo_disabled)
1033 		ret = sprintf(buf, "%u\n", global.turbo_disabled);
1034 	else
1035 		ret = sprintf(buf, "%u\n", global.no_turbo);
1036 
1037 	mutex_unlock(&intel_pstate_driver_lock);
1038 
1039 	return ret;
1040 }
1041 
1042 static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
1043 			      const char *buf, size_t count)
1044 {
1045 	unsigned int input;
1046 	int ret;
1047 
1048 	ret = sscanf(buf, "%u", &input);
1049 	if (ret != 1)
1050 		return -EINVAL;
1051 
1052 	mutex_lock(&intel_pstate_driver_lock);
1053 
1054 	if (!intel_pstate_driver) {
1055 		mutex_unlock(&intel_pstate_driver_lock);
1056 		return -EAGAIN;
1057 	}
1058 
1059 	mutex_lock(&intel_pstate_limits_lock);
1060 
1061 	update_turbo_state();
1062 	if (global.turbo_disabled) {
1063 		pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
1064 		mutex_unlock(&intel_pstate_limits_lock);
1065 		mutex_unlock(&intel_pstate_driver_lock);
1066 		return -EPERM;
1067 	}
1068 
1069 	global.no_turbo = clamp_t(int, input, 0, 1);
1070 
1071 	if (global.no_turbo) {
1072 		struct cpudata *cpu = all_cpu_data[0];
1073 		int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
1074 
1075 		/* Squash the global minimum into the permitted range. */
1076 		if (global.min_perf_pct > pct)
1077 			global.min_perf_pct = pct;
1078 	}
1079 
1080 	mutex_unlock(&intel_pstate_limits_lock);
1081 
1082 	intel_pstate_update_policies();
1083 
1084 	mutex_unlock(&intel_pstate_driver_lock);
1085 
1086 	return count;
1087 }
1088 
1089 static struct cpufreq_driver intel_pstate;
1090 
1091 static void update_qos_request(enum dev_pm_qos_req_type type)
1092 {
1093 	int max_state, turbo_max, freq, i, perf_pct;
1094 	struct dev_pm_qos_request *req;
1095 	struct cpufreq_policy *policy;
1096 
1097 	for_each_possible_cpu(i) {
1098 		struct cpudata *cpu = all_cpu_data[i];
1099 
1100 		policy = cpufreq_cpu_get(i);
1101 		if (!policy)
1102 			continue;
1103 
1104 		req = policy->driver_data;
1105 		cpufreq_cpu_put(policy);
1106 
1107 		if (!req)
1108 			continue;
1109 
1110 		if (hwp_active)
1111 			intel_pstate_get_hwp_max(i, &turbo_max, &max_state);
1112 		else
1113 			turbo_max = cpu->pstate.turbo_pstate;
1114 
1115 		if (type == DEV_PM_QOS_MIN_FREQUENCY) {
1116 			perf_pct = global.min_perf_pct;
1117 		} else {
1118 			req++;
1119 			perf_pct = global.max_perf_pct;
1120 		}
1121 
1122 		freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
1123 		freq *= cpu->pstate.scaling;
1124 
1125 		if (dev_pm_qos_update_request(req, freq) < 0)
1126 			pr_warn("Failed to update freq constraint: CPU%d\n", i);
1127 	}
1128 }
1129 
1130 static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
1131 				  const char *buf, size_t count)
1132 {
1133 	unsigned int input;
1134 	int ret;
1135 
1136 	ret = sscanf(buf, "%u", &input);
1137 	if (ret != 1)
1138 		return -EINVAL;
1139 
1140 	mutex_lock(&intel_pstate_driver_lock);
1141 
1142 	if (!intel_pstate_driver) {
1143 		mutex_unlock(&intel_pstate_driver_lock);
1144 		return -EAGAIN;
1145 	}
1146 
1147 	mutex_lock(&intel_pstate_limits_lock);
1148 
1149 	global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
1150 
1151 	mutex_unlock(&intel_pstate_limits_lock);
1152 
1153 	if (intel_pstate_driver == &intel_pstate)
1154 		intel_pstate_update_policies();
1155 	else
1156 		update_qos_request(DEV_PM_QOS_MAX_FREQUENCY);
1157 
1158 	mutex_unlock(&intel_pstate_driver_lock);
1159 
1160 	return count;
1161 }
1162 
1163 static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
1164 				  const char *buf, size_t count)
1165 {
1166 	unsigned int input;
1167 	int ret;
1168 
1169 	ret = sscanf(buf, "%u", &input);
1170 	if (ret != 1)
1171 		return -EINVAL;
1172 
1173 	mutex_lock(&intel_pstate_driver_lock);
1174 
1175 	if (!intel_pstate_driver) {
1176 		mutex_unlock(&intel_pstate_driver_lock);
1177 		return -EAGAIN;
1178 	}
1179 
1180 	mutex_lock(&intel_pstate_limits_lock);
1181 
1182 	global.min_perf_pct = clamp_t(int, input,
1183 				      min_perf_pct_min(), global.max_perf_pct);
1184 
1185 	mutex_unlock(&intel_pstate_limits_lock);
1186 
1187 	if (intel_pstate_driver == &intel_pstate)
1188 		intel_pstate_update_policies();
1189 	else
1190 		update_qos_request(DEV_PM_QOS_MIN_FREQUENCY);
1191 
1192 	mutex_unlock(&intel_pstate_driver_lock);
1193 
1194 	return count;
1195 }
1196 
1197 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
1198 				struct kobj_attribute *attr, char *buf)
1199 {
1200 	return sprintf(buf, "%u\n", hwp_boost);
1201 }
1202 
1203 static ssize_t store_hwp_dynamic_boost(struct kobject *a,
1204 				       struct kobj_attribute *b,
1205 				       const char *buf, size_t count)
1206 {
1207 	unsigned int input;
1208 	int ret;
1209 
1210 	ret = kstrtouint(buf, 10, &input);
1211 	if (ret)
1212 		return ret;
1213 
1214 	mutex_lock(&intel_pstate_driver_lock);
1215 	hwp_boost = !!input;
1216 	intel_pstate_update_policies();
1217 	mutex_unlock(&intel_pstate_driver_lock);
1218 
1219 	return count;
1220 }
1221 
1222 show_one(max_perf_pct, max_perf_pct);
1223 show_one(min_perf_pct, min_perf_pct);
1224 
1225 define_one_global_rw(status);
1226 define_one_global_rw(no_turbo);
1227 define_one_global_rw(max_perf_pct);
1228 define_one_global_rw(min_perf_pct);
1229 define_one_global_ro(turbo_pct);
1230 define_one_global_ro(num_pstates);
1231 define_one_global_rw(hwp_dynamic_boost);
1232 
1233 static struct attribute *intel_pstate_attributes[] = {
1234 	&status.attr,
1235 	&no_turbo.attr,
1236 	&turbo_pct.attr,
1237 	&num_pstates.attr,
1238 	NULL
1239 };
1240 
1241 static const struct attribute_group intel_pstate_attr_group = {
1242 	.attrs = intel_pstate_attributes,
1243 };
1244 
1245 static void __init intel_pstate_sysfs_expose_params(void)
1246 {
1247 	struct kobject *intel_pstate_kobject;
1248 	int rc;
1249 
1250 	intel_pstate_kobject = kobject_create_and_add("intel_pstate",
1251 						&cpu_subsys.dev_root->kobj);
1252 	if (WARN_ON(!intel_pstate_kobject))
1253 		return;
1254 
1255 	rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
1256 	if (WARN_ON(rc))
1257 		return;
1258 
1259 	/*
1260 	 * If per cpu limits are enforced there are no global limits, so
1261 	 * return without creating max/min_perf_pct attributes
1262 	 */
1263 	if (per_cpu_limits)
1264 		return;
1265 
1266 	rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
1267 	WARN_ON(rc);
1268 
1269 	rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
1270 	WARN_ON(rc);
1271 
1272 	if (hwp_active) {
1273 		rc = sysfs_create_file(intel_pstate_kobject,
1274 				       &hwp_dynamic_boost.attr);
1275 		WARN_ON(rc);
1276 	}
1277 }
1278 /************************** sysfs end ************************/
1279 
1280 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
1281 {
1282 	/* First disable HWP notification interrupt as we don't process them */
1283 	if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
1284 		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1285 
1286 	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
1287 	cpudata->epp_policy = 0;
1288 	if (cpudata->epp_default == -EINVAL)
1289 		cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
1290 }
1291 
1292 #define MSR_IA32_POWER_CTL_BIT_EE	19
1293 
1294 /* Disable energy efficiency optimization */
1295 static void intel_pstate_disable_ee(int cpu)
1296 {
1297 	u64 power_ctl;
1298 	int ret;
1299 
1300 	ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
1301 	if (ret)
1302 		return;
1303 
1304 	if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
1305 		pr_info("Disabling energy efficiency optimization\n");
1306 		power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
1307 		wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
1308 	}
1309 }
1310 
1311 static int atom_get_min_pstate(void)
1312 {
1313 	u64 value;
1314 
1315 	rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1316 	return (value >> 8) & 0x7F;
1317 }
1318 
1319 static int atom_get_max_pstate(void)
1320 {
1321 	u64 value;
1322 
1323 	rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1324 	return (value >> 16) & 0x7F;
1325 }
1326 
1327 static int atom_get_turbo_pstate(void)
1328 {
1329 	u64 value;
1330 
1331 	rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
1332 	return value & 0x7F;
1333 }
1334 
1335 static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1336 {
1337 	u64 val;
1338 	int32_t vid_fp;
1339 	u32 vid;
1340 
1341 	val = (u64)pstate << 8;
1342 	if (global.no_turbo && !global.turbo_disabled)
1343 		val |= (u64)1 << 32;
1344 
1345 	vid_fp = cpudata->vid.min + mul_fp(
1346 		int_tofp(pstate - cpudata->pstate.min_pstate),
1347 		cpudata->vid.ratio);
1348 
1349 	vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
1350 	vid = ceiling_fp(vid_fp);
1351 
1352 	if (pstate > cpudata->pstate.max_pstate)
1353 		vid = cpudata->vid.turbo;
1354 
1355 	return val | vid;
1356 }
1357 
1358 static int silvermont_get_scaling(void)
1359 {
1360 	u64 value;
1361 	int i;
1362 	/* Defined in Table 35-6 from SDM (Sept 2015) */
1363 	static int silvermont_freq_table[] = {
1364 		83300, 100000, 133300, 116700, 80000};
1365 
1366 	rdmsrl(MSR_FSB_FREQ, value);
1367 	i = value & 0x7;
1368 	WARN_ON(i > 4);
1369 
1370 	return silvermont_freq_table[i];
1371 }
1372 
1373 static int airmont_get_scaling(void)
1374 {
1375 	u64 value;
1376 	int i;
1377 	/* Defined in Table 35-10 from SDM (Sept 2015) */
1378 	static int airmont_freq_table[] = {
1379 		83300, 100000, 133300, 116700, 80000,
1380 		93300, 90000, 88900, 87500};
1381 
1382 	rdmsrl(MSR_FSB_FREQ, value);
1383 	i = value & 0xF;
1384 	WARN_ON(i > 8);
1385 
1386 	return airmont_freq_table[i];
1387 }
1388 
1389 static void atom_get_vid(struct cpudata *cpudata)
1390 {
1391 	u64 value;
1392 
1393 	rdmsrl(MSR_ATOM_CORE_VIDS, value);
1394 	cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
1395 	cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
1396 	cpudata->vid.ratio = div_fp(
1397 		cpudata->vid.max - cpudata->vid.min,
1398 		int_tofp(cpudata->pstate.max_pstate -
1399 			cpudata->pstate.min_pstate));
1400 
1401 	rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
1402 	cpudata->vid.turbo = value & 0x7f;
1403 }
1404 
1405 static int core_get_min_pstate(void)
1406 {
1407 	u64 value;
1408 
1409 	rdmsrl(MSR_PLATFORM_INFO, value);
1410 	return (value >> 40) & 0xFF;
1411 }
1412 
1413 static int core_get_max_pstate_physical(void)
1414 {
1415 	u64 value;
1416 
1417 	rdmsrl(MSR_PLATFORM_INFO, value);
1418 	return (value >> 8) & 0xFF;
1419 }
1420 
1421 static int core_get_tdp_ratio(u64 plat_info)
1422 {
1423 	/* Check how many TDP levels present */
1424 	if (plat_info & 0x600000000) {
1425 		u64 tdp_ctrl;
1426 		u64 tdp_ratio;
1427 		int tdp_msr;
1428 		int err;
1429 
1430 		/* Get the TDP level (0, 1, 2) to get ratios */
1431 		err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
1432 		if (err)
1433 			return err;
1434 
1435 		/* TDP MSR are continuous starting at 0x648 */
1436 		tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
1437 		err = rdmsrl_safe(tdp_msr, &tdp_ratio);
1438 		if (err)
1439 			return err;
1440 
1441 		/* For level 1 and 2, bits[23:16] contain the ratio */
1442 		if (tdp_ctrl & 0x03)
1443 			tdp_ratio >>= 16;
1444 
1445 		tdp_ratio &= 0xff; /* ratios are only 8 bits long */
1446 		pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
1447 
1448 		return (int)tdp_ratio;
1449 	}
1450 
1451 	return -ENXIO;
1452 }
1453 
1454 static int core_get_max_pstate(void)
1455 {
1456 	u64 tar;
1457 	u64 plat_info;
1458 	int max_pstate;
1459 	int tdp_ratio;
1460 	int err;
1461 
1462 	rdmsrl(MSR_PLATFORM_INFO, plat_info);
1463 	max_pstate = (plat_info >> 8) & 0xFF;
1464 
1465 	tdp_ratio = core_get_tdp_ratio(plat_info);
1466 	if (tdp_ratio <= 0)
1467 		return max_pstate;
1468 
1469 	if (hwp_active) {
1470 		/* Turbo activation ratio is not used on HWP platforms */
1471 		return tdp_ratio;
1472 	}
1473 
1474 	err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
1475 	if (!err) {
1476 		int tar_levels;
1477 
1478 		/* Do some sanity checking for safety */
1479 		tar_levels = tar & 0xff;
1480 		if (tdp_ratio - 1 == tar_levels) {
1481 			max_pstate = tar_levels;
1482 			pr_debug("max_pstate=TAC %x\n", max_pstate);
1483 		}
1484 	}
1485 
1486 	return max_pstate;
1487 }
1488 
1489 static int core_get_turbo_pstate(void)
1490 {
1491 	u64 value;
1492 	int nont, ret;
1493 
1494 	rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1495 	nont = core_get_max_pstate();
1496 	ret = (value) & 255;
1497 	if (ret <= nont)
1498 		ret = nont;
1499 	return ret;
1500 }
1501 
1502 static inline int core_get_scaling(void)
1503 {
1504 	return 100000;
1505 }
1506 
1507 static u64 core_get_val(struct cpudata *cpudata, int pstate)
1508 {
1509 	u64 val;
1510 
1511 	val = (u64)pstate << 8;
1512 	if (global.no_turbo && !global.turbo_disabled)
1513 		val |= (u64)1 << 32;
1514 
1515 	return val;
1516 }
1517 
1518 static int knl_get_aperf_mperf_shift(void)
1519 {
1520 	return 10;
1521 }
1522 
1523 static int knl_get_turbo_pstate(void)
1524 {
1525 	u64 value;
1526 	int nont, ret;
1527 
1528 	rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1529 	nont = core_get_max_pstate();
1530 	ret = (((value) >> 8) & 0xFF);
1531 	if (ret <= nont)
1532 		ret = nont;
1533 	return ret;
1534 }
1535 
1536 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
1537 {
1538 	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
1539 	cpu->pstate.current_pstate = pstate;
1540 	/*
1541 	 * Generally, there is no guarantee that this code will always run on
1542 	 * the CPU being updated, so force the register update to run on the
1543 	 * right CPU.
1544 	 */
1545 	wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
1546 		      pstate_funcs.get_val(cpu, pstate));
1547 }
1548 
1549 static void intel_pstate_set_min_pstate(struct cpudata *cpu)
1550 {
1551 	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
1552 }
1553 
1554 static void intel_pstate_max_within_limits(struct cpudata *cpu)
1555 {
1556 	int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
1557 
1558 	update_turbo_state();
1559 	intel_pstate_set_pstate(cpu, pstate);
1560 }
1561 
1562 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1563 {
1564 	cpu->pstate.min_pstate = pstate_funcs.get_min();
1565 	cpu->pstate.max_pstate = pstate_funcs.get_max();
1566 	cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
1567 	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
1568 	cpu->pstate.scaling = pstate_funcs.get_scaling();
1569 	cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
1570 
1571 	if (hwp_active && !hwp_mode_bdw) {
1572 		unsigned int phy_max, current_max;
1573 
1574 		intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
1575 		cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
1576 	} else {
1577 		cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1578 	}
1579 
1580 	if (pstate_funcs.get_aperf_mperf_shift)
1581 		cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
1582 
1583 	if (pstate_funcs.get_vid)
1584 		pstate_funcs.get_vid(cpu);
1585 
1586 	intel_pstate_set_min_pstate(cpu);
1587 }
1588 
1589 /*
1590  * Long hold time will keep high perf limits for long time,
1591  * which negatively impacts perf/watt for some workloads,
1592  * like specpower. 3ms is based on experiements on some
1593  * workoads.
1594  */
1595 static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
1596 
1597 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
1598 {
1599 	u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
1600 	u32 max_limit = (hwp_req & 0xff00) >> 8;
1601 	u32 min_limit = (hwp_req & 0xff);
1602 	u32 boost_level1;
1603 
1604 	/*
1605 	 * Cases to consider (User changes via sysfs or boot time):
1606 	 * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
1607 	 *	No boost, return.
1608 	 * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
1609 	 *     Should result in one level boost only for P0.
1610 	 * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
1611 	 *     Should result in two level boost:
1612 	 *         (min + p1)/2 and P1.
1613 	 * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
1614 	 *     Should result in three level boost:
1615 	 *        (min + p1)/2, P1 and P0.
1616 	 */
1617 
1618 	/* If max and min are equal or already at max, nothing to boost */
1619 	if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
1620 		return;
1621 
1622 	if (!cpu->hwp_boost_min)
1623 		cpu->hwp_boost_min = min_limit;
1624 
1625 	/* level at half way mark between min and guranteed */
1626 	boost_level1 = (HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) + min_limit) >> 1;
1627 
1628 	if (cpu->hwp_boost_min < boost_level1)
1629 		cpu->hwp_boost_min = boost_level1;
1630 	else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
1631 		cpu->hwp_boost_min = HWP_GUARANTEED_PERF(cpu->hwp_cap_cached);
1632 	else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) &&
1633 		 max_limit != HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
1634 		cpu->hwp_boost_min = max_limit;
1635 	else
1636 		return;
1637 
1638 	hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
1639 	wrmsrl(MSR_HWP_REQUEST, hwp_req);
1640 	cpu->last_update = cpu->sample.time;
1641 }
1642 
1643 static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
1644 {
1645 	if (cpu->hwp_boost_min) {
1646 		bool expired;
1647 
1648 		/* Check if we are idle for hold time to boost down */
1649 		expired = time_after64(cpu->sample.time, cpu->last_update +
1650 				       hwp_boost_hold_time_ns);
1651 		if (expired) {
1652 			wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
1653 			cpu->hwp_boost_min = 0;
1654 		}
1655 	}
1656 	cpu->last_update = cpu->sample.time;
1657 }
1658 
1659 static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
1660 						      u64 time)
1661 {
1662 	cpu->sample.time = time;
1663 
1664 	if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
1665 		bool do_io = false;
1666 
1667 		cpu->sched_flags = 0;
1668 		/*
1669 		 * Set iowait_boost flag and update time. Since IO WAIT flag
1670 		 * is set all the time, we can't just conclude that there is
1671 		 * some IO bound activity is scheduled on this CPU with just
1672 		 * one occurrence. If we receive at least two in two
1673 		 * consecutive ticks, then we treat as boost candidate.
1674 		 */
1675 		if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
1676 			do_io = true;
1677 
1678 		cpu->last_io_update = time;
1679 
1680 		if (do_io)
1681 			intel_pstate_hwp_boost_up(cpu);
1682 
1683 	} else {
1684 		intel_pstate_hwp_boost_down(cpu);
1685 	}
1686 }
1687 
1688 static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
1689 						u64 time, unsigned int flags)
1690 {
1691 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1692 
1693 	cpu->sched_flags |= flags;
1694 
1695 	if (smp_processor_id() == cpu->cpu)
1696 		intel_pstate_update_util_hwp_local(cpu, time);
1697 }
1698 
1699 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
1700 {
1701 	struct sample *sample = &cpu->sample;
1702 
1703 	sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
1704 }
1705 
1706 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
1707 {
1708 	u64 aperf, mperf;
1709 	unsigned long flags;
1710 	u64 tsc;
1711 
1712 	local_irq_save(flags);
1713 	rdmsrl(MSR_IA32_APERF, aperf);
1714 	rdmsrl(MSR_IA32_MPERF, mperf);
1715 	tsc = rdtsc();
1716 	if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
1717 		local_irq_restore(flags);
1718 		return false;
1719 	}
1720 	local_irq_restore(flags);
1721 
1722 	cpu->last_sample_time = cpu->sample.time;
1723 	cpu->sample.time = time;
1724 	cpu->sample.aperf = aperf;
1725 	cpu->sample.mperf = mperf;
1726 	cpu->sample.tsc =  tsc;
1727 	cpu->sample.aperf -= cpu->prev_aperf;
1728 	cpu->sample.mperf -= cpu->prev_mperf;
1729 	cpu->sample.tsc -= cpu->prev_tsc;
1730 
1731 	cpu->prev_aperf = aperf;
1732 	cpu->prev_mperf = mperf;
1733 	cpu->prev_tsc = tsc;
1734 	/*
1735 	 * First time this function is invoked in a given cycle, all of the
1736 	 * previous sample data fields are equal to zero or stale and they must
1737 	 * be populated with meaningful numbers for things to work, so assume
1738 	 * that sample.time will always be reset before setting the utilization
1739 	 * update hook and make the caller skip the sample then.
1740 	 */
1741 	if (cpu->last_sample_time) {
1742 		intel_pstate_calc_avg_perf(cpu);
1743 		return true;
1744 	}
1745 	return false;
1746 }
1747 
1748 static inline int32_t get_avg_frequency(struct cpudata *cpu)
1749 {
1750 	return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
1751 }
1752 
1753 static inline int32_t get_avg_pstate(struct cpudata *cpu)
1754 {
1755 	return mul_ext_fp(cpu->pstate.max_pstate_physical,
1756 			  cpu->sample.core_avg_perf);
1757 }
1758 
1759 static inline int32_t get_target_pstate(struct cpudata *cpu)
1760 {
1761 	struct sample *sample = &cpu->sample;
1762 	int32_t busy_frac;
1763 	int target, avg_pstate;
1764 
1765 	busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
1766 			   sample->tsc);
1767 
1768 	if (busy_frac < cpu->iowait_boost)
1769 		busy_frac = cpu->iowait_boost;
1770 
1771 	sample->busy_scaled = busy_frac * 100;
1772 
1773 	target = global.no_turbo || global.turbo_disabled ?
1774 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1775 	target += target >> 2;
1776 	target = mul_fp(target, busy_frac);
1777 	if (target < cpu->pstate.min_pstate)
1778 		target = cpu->pstate.min_pstate;
1779 
1780 	/*
1781 	 * If the average P-state during the previous cycle was higher than the
1782 	 * current target, add 50% of the difference to the target to reduce
1783 	 * possible performance oscillations and offset possible performance
1784 	 * loss related to moving the workload from one CPU to another within
1785 	 * a package/module.
1786 	 */
1787 	avg_pstate = get_avg_pstate(cpu);
1788 	if (avg_pstate > target)
1789 		target += (avg_pstate - target) >> 1;
1790 
1791 	return target;
1792 }
1793 
1794 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
1795 {
1796 	int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
1797 	int max_pstate = max(min_pstate, cpu->max_perf_ratio);
1798 
1799 	return clamp_t(int, pstate, min_pstate, max_pstate);
1800 }
1801 
1802 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
1803 {
1804 	if (pstate == cpu->pstate.current_pstate)
1805 		return;
1806 
1807 	cpu->pstate.current_pstate = pstate;
1808 	wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
1809 }
1810 
1811 static void intel_pstate_adjust_pstate(struct cpudata *cpu)
1812 {
1813 	int from = cpu->pstate.current_pstate;
1814 	struct sample *sample;
1815 	int target_pstate;
1816 
1817 	update_turbo_state();
1818 
1819 	target_pstate = get_target_pstate(cpu);
1820 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
1821 	trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
1822 	intel_pstate_update_pstate(cpu, target_pstate);
1823 
1824 	sample = &cpu->sample;
1825 	trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
1826 		fp_toint(sample->busy_scaled),
1827 		from,
1828 		cpu->pstate.current_pstate,
1829 		sample->mperf,
1830 		sample->aperf,
1831 		sample->tsc,
1832 		get_avg_frequency(cpu),
1833 		fp_toint(cpu->iowait_boost * 100));
1834 }
1835 
1836 static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1837 				     unsigned int flags)
1838 {
1839 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1840 	u64 delta_ns;
1841 
1842 	/* Don't allow remote callbacks */
1843 	if (smp_processor_id() != cpu->cpu)
1844 		return;
1845 
1846 	delta_ns = time - cpu->last_update;
1847 	if (flags & SCHED_CPUFREQ_IOWAIT) {
1848 		/* Start over if the CPU may have been idle. */
1849 		if (delta_ns > TICK_NSEC) {
1850 			cpu->iowait_boost = ONE_EIGHTH_FP;
1851 		} else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
1852 			cpu->iowait_boost <<= 1;
1853 			if (cpu->iowait_boost > int_tofp(1))
1854 				cpu->iowait_boost = int_tofp(1);
1855 		} else {
1856 			cpu->iowait_boost = ONE_EIGHTH_FP;
1857 		}
1858 	} else if (cpu->iowait_boost) {
1859 		/* Clear iowait_boost if the CPU may have been idle. */
1860 		if (delta_ns > TICK_NSEC)
1861 			cpu->iowait_boost = 0;
1862 		else
1863 			cpu->iowait_boost >>= 1;
1864 	}
1865 	cpu->last_update = time;
1866 	delta_ns = time - cpu->sample.time;
1867 	if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
1868 		return;
1869 
1870 	if (intel_pstate_sample(cpu, time))
1871 		intel_pstate_adjust_pstate(cpu);
1872 }
1873 
1874 static struct pstate_funcs core_funcs = {
1875 	.get_max = core_get_max_pstate,
1876 	.get_max_physical = core_get_max_pstate_physical,
1877 	.get_min = core_get_min_pstate,
1878 	.get_turbo = core_get_turbo_pstate,
1879 	.get_scaling = core_get_scaling,
1880 	.get_val = core_get_val,
1881 };
1882 
1883 static const struct pstate_funcs silvermont_funcs = {
1884 	.get_max = atom_get_max_pstate,
1885 	.get_max_physical = atom_get_max_pstate,
1886 	.get_min = atom_get_min_pstate,
1887 	.get_turbo = atom_get_turbo_pstate,
1888 	.get_val = atom_get_val,
1889 	.get_scaling = silvermont_get_scaling,
1890 	.get_vid = atom_get_vid,
1891 };
1892 
1893 static const struct pstate_funcs airmont_funcs = {
1894 	.get_max = atom_get_max_pstate,
1895 	.get_max_physical = atom_get_max_pstate,
1896 	.get_min = atom_get_min_pstate,
1897 	.get_turbo = atom_get_turbo_pstate,
1898 	.get_val = atom_get_val,
1899 	.get_scaling = airmont_get_scaling,
1900 	.get_vid = atom_get_vid,
1901 };
1902 
1903 static const struct pstate_funcs knl_funcs = {
1904 	.get_max = core_get_max_pstate,
1905 	.get_max_physical = core_get_max_pstate_physical,
1906 	.get_min = core_get_min_pstate,
1907 	.get_turbo = knl_get_turbo_pstate,
1908 	.get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
1909 	.get_scaling = core_get_scaling,
1910 	.get_val = core_get_val,
1911 };
1912 
1913 #define ICPU(model, policy) \
1914 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1915 			(unsigned long)&policy }
1916 
1917 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1918 	ICPU(INTEL_FAM6_SANDYBRIDGE,		core_funcs),
1919 	ICPU(INTEL_FAM6_SANDYBRIDGE_X,		core_funcs),
1920 	ICPU(INTEL_FAM6_ATOM_SILVERMONT,	silvermont_funcs),
1921 	ICPU(INTEL_FAM6_IVYBRIDGE,		core_funcs),
1922 	ICPU(INTEL_FAM6_HASWELL,		core_funcs),
1923 	ICPU(INTEL_FAM6_BROADWELL,		core_funcs),
1924 	ICPU(INTEL_FAM6_IVYBRIDGE_X,		core_funcs),
1925 	ICPU(INTEL_FAM6_HASWELL_X,		core_funcs),
1926 	ICPU(INTEL_FAM6_HASWELL_L,		core_funcs),
1927 	ICPU(INTEL_FAM6_HASWELL_G,		core_funcs),
1928 	ICPU(INTEL_FAM6_BROADWELL_G,		core_funcs),
1929 	ICPU(INTEL_FAM6_ATOM_AIRMONT,		airmont_funcs),
1930 	ICPU(INTEL_FAM6_SKYLAKE_L,		core_funcs),
1931 	ICPU(INTEL_FAM6_BROADWELL_X,		core_funcs),
1932 	ICPU(INTEL_FAM6_SKYLAKE,		core_funcs),
1933 	ICPU(INTEL_FAM6_BROADWELL_D,		core_funcs),
1934 	ICPU(INTEL_FAM6_XEON_PHI_KNL,		knl_funcs),
1935 	ICPU(INTEL_FAM6_XEON_PHI_KNM,		knl_funcs),
1936 	ICPU(INTEL_FAM6_ATOM_GOLDMONT,		core_funcs),
1937 	ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS,     core_funcs),
1938 	ICPU(INTEL_FAM6_SKYLAKE_X,		core_funcs),
1939 	{}
1940 };
1941 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
1942 
1943 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
1944 	ICPU(INTEL_FAM6_BROADWELL_D, core_funcs),
1945 	ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
1946 	ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
1947 	{}
1948 };
1949 
1950 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
1951 	ICPU(INTEL_FAM6_KABYLAKE, core_funcs),
1952 	{}
1953 };
1954 
1955 static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
1956 	ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
1957 	ICPU(INTEL_FAM6_SKYLAKE, core_funcs),
1958 	{}
1959 };
1960 
1961 static int intel_pstate_init_cpu(unsigned int cpunum)
1962 {
1963 	struct cpudata *cpu;
1964 
1965 	cpu = all_cpu_data[cpunum];
1966 
1967 	if (!cpu) {
1968 		cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
1969 		if (!cpu)
1970 			return -ENOMEM;
1971 
1972 		all_cpu_data[cpunum] = cpu;
1973 
1974 		cpu->epp_default = -EINVAL;
1975 		cpu->epp_powersave = -EINVAL;
1976 		cpu->epp_saved = -EINVAL;
1977 	}
1978 
1979 	cpu = all_cpu_data[cpunum];
1980 
1981 	cpu->cpu = cpunum;
1982 
1983 	if (hwp_active) {
1984 		const struct x86_cpu_id *id;
1985 
1986 		id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
1987 		if (id)
1988 			intel_pstate_disable_ee(cpunum);
1989 
1990 		intel_pstate_hwp_enable(cpu);
1991 
1992 		id = x86_match_cpu(intel_pstate_hwp_boost_ids);
1993 		if (id && intel_pstate_acpi_pm_profile_server())
1994 			hwp_boost = true;
1995 	}
1996 
1997 	intel_pstate_get_cpu_pstates(cpu);
1998 
1999 	pr_debug("controlling: cpu %d\n", cpunum);
2000 
2001 	return 0;
2002 }
2003 
2004 static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
2005 {
2006 	struct cpudata *cpu = all_cpu_data[cpu_num];
2007 
2008 	if (hwp_active && !hwp_boost)
2009 		return;
2010 
2011 	if (cpu->update_util_set)
2012 		return;
2013 
2014 	/* Prevent intel_pstate_update_util() from using stale data. */
2015 	cpu->sample.time = 0;
2016 	cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
2017 				     (hwp_active ?
2018 				      intel_pstate_update_util_hwp :
2019 				      intel_pstate_update_util));
2020 	cpu->update_util_set = true;
2021 }
2022 
2023 static void intel_pstate_clear_update_util_hook(unsigned int cpu)
2024 {
2025 	struct cpudata *cpu_data = all_cpu_data[cpu];
2026 
2027 	if (!cpu_data->update_util_set)
2028 		return;
2029 
2030 	cpufreq_remove_update_util_hook(cpu);
2031 	cpu_data->update_util_set = false;
2032 	synchronize_rcu();
2033 }
2034 
2035 static int intel_pstate_get_max_freq(struct cpudata *cpu)
2036 {
2037 	return global.turbo_disabled || global.no_turbo ?
2038 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2039 }
2040 
2041 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
2042 					    struct cpudata *cpu)
2043 {
2044 	int max_freq = intel_pstate_get_max_freq(cpu);
2045 	int32_t max_policy_perf, min_policy_perf;
2046 	int max_state, turbo_max;
2047 
2048 	/*
2049 	 * HWP needs some special consideration, because on BDX the
2050 	 * HWP_REQUEST uses abstract value to represent performance
2051 	 * rather than pure ratios.
2052 	 */
2053 	if (hwp_active) {
2054 		intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
2055 	} else {
2056 		max_state = global.no_turbo || global.turbo_disabled ?
2057 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
2058 		turbo_max = cpu->pstate.turbo_pstate;
2059 	}
2060 
2061 	max_policy_perf = max_state * policy->max / max_freq;
2062 	if (policy->max == policy->min) {
2063 		min_policy_perf = max_policy_perf;
2064 	} else {
2065 		min_policy_perf = max_state * policy->min / max_freq;
2066 		min_policy_perf = clamp_t(int32_t, min_policy_perf,
2067 					  0, max_policy_perf);
2068 	}
2069 
2070 	pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
2071 		 policy->cpu, max_state,
2072 		 min_policy_perf, max_policy_perf);
2073 
2074 	/* Normalize user input to [min_perf, max_perf] */
2075 	if (per_cpu_limits) {
2076 		cpu->min_perf_ratio = min_policy_perf;
2077 		cpu->max_perf_ratio = max_policy_perf;
2078 	} else {
2079 		int32_t global_min, global_max;
2080 
2081 		/* Global limits are in percent of the maximum turbo P-state. */
2082 		global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
2083 		global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
2084 		global_min = clamp_t(int32_t, global_min, 0, global_max);
2085 
2086 		pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
2087 			 global_min, global_max);
2088 
2089 		cpu->min_perf_ratio = max(min_policy_perf, global_min);
2090 		cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
2091 		cpu->max_perf_ratio = min(max_policy_perf, global_max);
2092 		cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
2093 
2094 		/* Make sure min_perf <= max_perf */
2095 		cpu->min_perf_ratio = min(cpu->min_perf_ratio,
2096 					  cpu->max_perf_ratio);
2097 
2098 	}
2099 	pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
2100 		 cpu->max_perf_ratio,
2101 		 cpu->min_perf_ratio);
2102 }
2103 
2104 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2105 {
2106 	struct cpudata *cpu;
2107 
2108 	if (!policy->cpuinfo.max_freq)
2109 		return -ENODEV;
2110 
2111 	pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
2112 		 policy->cpuinfo.max_freq, policy->max);
2113 
2114 	cpu = all_cpu_data[policy->cpu];
2115 	cpu->policy = policy->policy;
2116 
2117 	mutex_lock(&intel_pstate_limits_lock);
2118 
2119 	intel_pstate_update_perf_limits(policy, cpu);
2120 
2121 	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
2122 		/*
2123 		 * NOHZ_FULL CPUs need this as the governor callback may not
2124 		 * be invoked on them.
2125 		 */
2126 		intel_pstate_clear_update_util_hook(policy->cpu);
2127 		intel_pstate_max_within_limits(cpu);
2128 	} else {
2129 		intel_pstate_set_update_util_hook(policy->cpu);
2130 	}
2131 
2132 	if (hwp_active) {
2133 		/*
2134 		 * When hwp_boost was active before and dynamically it
2135 		 * was turned off, in that case we need to clear the
2136 		 * update util hook.
2137 		 */
2138 		if (!hwp_boost)
2139 			intel_pstate_clear_update_util_hook(policy->cpu);
2140 		intel_pstate_hwp_set(policy->cpu);
2141 	}
2142 
2143 	mutex_unlock(&intel_pstate_limits_lock);
2144 
2145 	return 0;
2146 }
2147 
2148 static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
2149 					 struct cpudata *cpu)
2150 {
2151 	if (!hwp_active &&
2152 	    cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
2153 	    policy->max < policy->cpuinfo.max_freq &&
2154 	    policy->max > cpu->pstate.max_freq) {
2155 		pr_debug("policy->max > max non turbo frequency\n");
2156 		policy->max = policy->cpuinfo.max_freq;
2157 	}
2158 }
2159 
2160 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2161 {
2162 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2163 
2164 	update_turbo_state();
2165 	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
2166 				     intel_pstate_get_max_freq(cpu));
2167 
2168 	if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
2169 	    policy->policy != CPUFREQ_POLICY_PERFORMANCE)
2170 		return -EINVAL;
2171 
2172 	intel_pstate_adjust_policy_max(policy, cpu);
2173 
2174 	return 0;
2175 }
2176 
2177 static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
2178 {
2179 	intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
2180 }
2181 
2182 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
2183 {
2184 	pr_debug("CPU %d exiting\n", policy->cpu);
2185 
2186 	intel_pstate_clear_update_util_hook(policy->cpu);
2187 	if (hwp_active) {
2188 		intel_pstate_hwp_save_state(policy);
2189 		intel_pstate_hwp_force_min_perf(policy->cpu);
2190 	} else {
2191 		intel_cpufreq_stop_cpu(policy);
2192 	}
2193 }
2194 
2195 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
2196 {
2197 	intel_pstate_exit_perf_limits(policy);
2198 
2199 	policy->fast_switch_possible = false;
2200 
2201 	return 0;
2202 }
2203 
2204 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2205 {
2206 	struct cpudata *cpu;
2207 	int rc;
2208 
2209 	rc = intel_pstate_init_cpu(policy->cpu);
2210 	if (rc)
2211 		return rc;
2212 
2213 	cpu = all_cpu_data[policy->cpu];
2214 
2215 	cpu->max_perf_ratio = 0xFF;
2216 	cpu->min_perf_ratio = 0;
2217 
2218 	policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
2219 	policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
2220 
2221 	/* cpuinfo and default policy values */
2222 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
2223 	update_turbo_state();
2224 	global.turbo_disabled_mf = global.turbo_disabled;
2225 	policy->cpuinfo.max_freq = global.turbo_disabled ?
2226 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
2227 	policy->cpuinfo.max_freq *= cpu->pstate.scaling;
2228 
2229 	if (hwp_active) {
2230 		unsigned int max_freq;
2231 
2232 		max_freq = global.turbo_disabled ?
2233 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2234 		if (max_freq < policy->cpuinfo.max_freq)
2235 			policy->cpuinfo.max_freq = max_freq;
2236 	}
2237 
2238 	intel_pstate_init_acpi_perf_limits(policy);
2239 
2240 	policy->fast_switch_possible = true;
2241 
2242 	return 0;
2243 }
2244 
2245 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2246 {
2247 	int ret = __intel_pstate_cpu_init(policy);
2248 
2249 	if (ret)
2250 		return ret;
2251 
2252 	if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
2253 		policy->policy = CPUFREQ_POLICY_PERFORMANCE;
2254 	else
2255 		policy->policy = CPUFREQ_POLICY_POWERSAVE;
2256 
2257 	return 0;
2258 }
2259 
2260 static struct cpufreq_driver intel_pstate = {
2261 	.flags		= CPUFREQ_CONST_LOOPS,
2262 	.verify		= intel_pstate_verify_policy,
2263 	.setpolicy	= intel_pstate_set_policy,
2264 	.suspend	= intel_pstate_hwp_save_state,
2265 	.resume		= intel_pstate_resume,
2266 	.init		= intel_pstate_cpu_init,
2267 	.exit		= intel_pstate_cpu_exit,
2268 	.stop_cpu	= intel_pstate_stop_cpu,
2269 	.update_limits	= intel_pstate_update_limits,
2270 	.name		= "intel_pstate",
2271 };
2272 
2273 static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2274 {
2275 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2276 
2277 	update_turbo_state();
2278 	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
2279 				     intel_pstate_get_max_freq(cpu));
2280 
2281 	intel_pstate_adjust_policy_max(policy, cpu);
2282 
2283 	intel_pstate_update_perf_limits(policy, cpu);
2284 
2285 	return 0;
2286 }
2287 
2288 /* Use of trace in passive mode:
2289  *
2290  * In passive mode the trace core_busy field (also known as the
2291  * performance field, and lablelled as such on the graphs; also known as
2292  * core_avg_perf) is not needed and so is re-assigned to indicate if the
2293  * driver call was via the normal or fast switch path. Various graphs
2294  * output from the intel_pstate_tracer.py utility that include core_busy
2295  * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
2296  * so we use 10 to indicate the the normal path through the driver, and
2297  * 90 to indicate the fast switch path through the driver.
2298  * The scaled_busy field is not used, and is set to 0.
2299  */
2300 
2301 #define	INTEL_PSTATE_TRACE_TARGET 10
2302 #define	INTEL_PSTATE_TRACE_FAST_SWITCH 90
2303 
2304 static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
2305 {
2306 	struct sample *sample;
2307 
2308 	if (!trace_pstate_sample_enabled())
2309 		return;
2310 
2311 	if (!intel_pstate_sample(cpu, ktime_get()))
2312 		return;
2313 
2314 	sample = &cpu->sample;
2315 	trace_pstate_sample(trace_type,
2316 		0,
2317 		old_pstate,
2318 		cpu->pstate.current_pstate,
2319 		sample->mperf,
2320 		sample->aperf,
2321 		sample->tsc,
2322 		get_avg_frequency(cpu),
2323 		fp_toint(cpu->iowait_boost * 100));
2324 }
2325 
2326 static int intel_cpufreq_target(struct cpufreq_policy *policy,
2327 				unsigned int target_freq,
2328 				unsigned int relation)
2329 {
2330 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2331 	struct cpufreq_freqs freqs;
2332 	int target_pstate, old_pstate;
2333 
2334 	update_turbo_state();
2335 
2336 	freqs.old = policy->cur;
2337 	freqs.new = target_freq;
2338 
2339 	cpufreq_freq_transition_begin(policy, &freqs);
2340 	switch (relation) {
2341 	case CPUFREQ_RELATION_L:
2342 		target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
2343 		break;
2344 	case CPUFREQ_RELATION_H:
2345 		target_pstate = freqs.new / cpu->pstate.scaling;
2346 		break;
2347 	default:
2348 		target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
2349 		break;
2350 	}
2351 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2352 	old_pstate = cpu->pstate.current_pstate;
2353 	if (target_pstate != cpu->pstate.current_pstate) {
2354 		cpu->pstate.current_pstate = target_pstate;
2355 		wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
2356 			      pstate_funcs.get_val(cpu, target_pstate));
2357 	}
2358 	freqs.new = target_pstate * cpu->pstate.scaling;
2359 	intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_TARGET, old_pstate);
2360 	cpufreq_freq_transition_end(policy, &freqs, false);
2361 
2362 	return 0;
2363 }
2364 
2365 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2366 					      unsigned int target_freq)
2367 {
2368 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2369 	int target_pstate, old_pstate;
2370 
2371 	update_turbo_state();
2372 
2373 	target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
2374 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2375 	old_pstate = cpu->pstate.current_pstate;
2376 	intel_pstate_update_pstate(cpu, target_pstate);
2377 	intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
2378 	return target_pstate * cpu->pstate.scaling;
2379 }
2380 
2381 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
2382 {
2383 	int max_state, turbo_max, min_freq, max_freq, ret;
2384 	struct dev_pm_qos_request *req;
2385 	struct cpudata *cpu;
2386 	struct device *dev;
2387 
2388 	dev = get_cpu_device(policy->cpu);
2389 	if (!dev)
2390 		return -ENODEV;
2391 
2392 	ret = __intel_pstate_cpu_init(policy);
2393 	if (ret)
2394 		return ret;
2395 
2396 	policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
2397 	policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
2398 	/* This reflects the intel_pstate_get_cpu_pstates() setting. */
2399 	policy->cur = policy->cpuinfo.min_freq;
2400 
2401 	req = kcalloc(2, sizeof(*req), GFP_KERNEL);
2402 	if (!req) {
2403 		ret = -ENOMEM;
2404 		goto pstate_exit;
2405 	}
2406 
2407 	cpu = all_cpu_data[policy->cpu];
2408 
2409 	if (hwp_active)
2410 		intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
2411 	else
2412 		turbo_max = cpu->pstate.turbo_pstate;
2413 
2414 	min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
2415 	min_freq *= cpu->pstate.scaling;
2416 	max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
2417 	max_freq *= cpu->pstate.scaling;
2418 
2419 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY,
2420 				     min_freq);
2421 	if (ret < 0) {
2422 		dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
2423 		goto free_req;
2424 	}
2425 
2426 	ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY,
2427 				     max_freq);
2428 	if (ret < 0) {
2429 		dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
2430 		goto remove_min_req;
2431 	}
2432 
2433 	policy->driver_data = req;
2434 
2435 	return 0;
2436 
2437 remove_min_req:
2438 	dev_pm_qos_remove_request(req);
2439 free_req:
2440 	kfree(req);
2441 pstate_exit:
2442 	intel_pstate_exit_perf_limits(policy);
2443 
2444 	return ret;
2445 }
2446 
2447 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
2448 {
2449 	struct dev_pm_qos_request *req;
2450 
2451 	req = policy->driver_data;
2452 
2453 	dev_pm_qos_remove_request(req + 1);
2454 	dev_pm_qos_remove_request(req);
2455 	kfree(req);
2456 
2457 	return intel_pstate_cpu_exit(policy);
2458 }
2459 
2460 static struct cpufreq_driver intel_cpufreq = {
2461 	.flags		= CPUFREQ_CONST_LOOPS,
2462 	.verify		= intel_cpufreq_verify_policy,
2463 	.target		= intel_cpufreq_target,
2464 	.fast_switch	= intel_cpufreq_fast_switch,
2465 	.init		= intel_cpufreq_cpu_init,
2466 	.exit		= intel_cpufreq_cpu_exit,
2467 	.stop_cpu	= intel_cpufreq_stop_cpu,
2468 	.update_limits	= intel_pstate_update_limits,
2469 	.name		= "intel_cpufreq",
2470 };
2471 
2472 static struct cpufreq_driver *default_driver = &intel_pstate;
2473 
2474 static void intel_pstate_driver_cleanup(void)
2475 {
2476 	unsigned int cpu;
2477 
2478 	get_online_cpus();
2479 	for_each_online_cpu(cpu) {
2480 		if (all_cpu_data[cpu]) {
2481 			if (intel_pstate_driver == &intel_pstate)
2482 				intel_pstate_clear_update_util_hook(cpu);
2483 
2484 			kfree(all_cpu_data[cpu]);
2485 			all_cpu_data[cpu] = NULL;
2486 		}
2487 	}
2488 	put_online_cpus();
2489 	intel_pstate_driver = NULL;
2490 }
2491 
2492 static int intel_pstate_register_driver(struct cpufreq_driver *driver)
2493 {
2494 	int ret;
2495 
2496 	memset(&global, 0, sizeof(global));
2497 	global.max_perf_pct = 100;
2498 
2499 	intel_pstate_driver = driver;
2500 	ret = cpufreq_register_driver(intel_pstate_driver);
2501 	if (ret) {
2502 		intel_pstate_driver_cleanup();
2503 		return ret;
2504 	}
2505 
2506 	global.min_perf_pct = min_perf_pct_min();
2507 
2508 	return 0;
2509 }
2510 
2511 static int intel_pstate_unregister_driver(void)
2512 {
2513 	if (hwp_active)
2514 		return -EBUSY;
2515 
2516 	cpufreq_unregister_driver(intel_pstate_driver);
2517 	intel_pstate_driver_cleanup();
2518 
2519 	return 0;
2520 }
2521 
2522 static ssize_t intel_pstate_show_status(char *buf)
2523 {
2524 	if (!intel_pstate_driver)
2525 		return sprintf(buf, "off\n");
2526 
2527 	return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
2528 					"active" : "passive");
2529 }
2530 
2531 static int intel_pstate_update_status(const char *buf, size_t size)
2532 {
2533 	int ret;
2534 
2535 	if (size == 3 && !strncmp(buf, "off", size))
2536 		return intel_pstate_driver ?
2537 			intel_pstate_unregister_driver() : -EINVAL;
2538 
2539 	if (size == 6 && !strncmp(buf, "active", size)) {
2540 		if (intel_pstate_driver) {
2541 			if (intel_pstate_driver == &intel_pstate)
2542 				return 0;
2543 
2544 			ret = intel_pstate_unregister_driver();
2545 			if (ret)
2546 				return ret;
2547 		}
2548 
2549 		return intel_pstate_register_driver(&intel_pstate);
2550 	}
2551 
2552 	if (size == 7 && !strncmp(buf, "passive", size)) {
2553 		if (intel_pstate_driver) {
2554 			if (intel_pstate_driver == &intel_cpufreq)
2555 				return 0;
2556 
2557 			ret = intel_pstate_unregister_driver();
2558 			if (ret)
2559 				return ret;
2560 		}
2561 
2562 		return intel_pstate_register_driver(&intel_cpufreq);
2563 	}
2564 
2565 	return -EINVAL;
2566 }
2567 
2568 static int no_load __initdata;
2569 static int no_hwp __initdata;
2570 static int hwp_only __initdata;
2571 static unsigned int force_load __initdata;
2572 
2573 static int __init intel_pstate_msrs_not_valid(void)
2574 {
2575 	if (!pstate_funcs.get_max() ||
2576 	    !pstate_funcs.get_min() ||
2577 	    !pstate_funcs.get_turbo())
2578 		return -ENODEV;
2579 
2580 	return 0;
2581 }
2582 
2583 static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
2584 {
2585 	pstate_funcs.get_max   = funcs->get_max;
2586 	pstate_funcs.get_max_physical = funcs->get_max_physical;
2587 	pstate_funcs.get_min   = funcs->get_min;
2588 	pstate_funcs.get_turbo = funcs->get_turbo;
2589 	pstate_funcs.get_scaling = funcs->get_scaling;
2590 	pstate_funcs.get_val   = funcs->get_val;
2591 	pstate_funcs.get_vid   = funcs->get_vid;
2592 	pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
2593 }
2594 
2595 #ifdef CONFIG_ACPI
2596 
2597 static bool __init intel_pstate_no_acpi_pss(void)
2598 {
2599 	int i;
2600 
2601 	for_each_possible_cpu(i) {
2602 		acpi_status status;
2603 		union acpi_object *pss;
2604 		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
2605 		struct acpi_processor *pr = per_cpu(processors, i);
2606 
2607 		if (!pr)
2608 			continue;
2609 
2610 		status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
2611 		if (ACPI_FAILURE(status))
2612 			continue;
2613 
2614 		pss = buffer.pointer;
2615 		if (pss && pss->type == ACPI_TYPE_PACKAGE) {
2616 			kfree(pss);
2617 			return false;
2618 		}
2619 
2620 		kfree(pss);
2621 	}
2622 
2623 	pr_debug("ACPI _PSS not found\n");
2624 	return true;
2625 }
2626 
2627 static bool __init intel_pstate_no_acpi_pcch(void)
2628 {
2629 	acpi_status status;
2630 	acpi_handle handle;
2631 
2632 	status = acpi_get_handle(NULL, "\\_SB", &handle);
2633 	if (ACPI_FAILURE(status))
2634 		goto not_found;
2635 
2636 	if (acpi_has_method(handle, "PCCH"))
2637 		return false;
2638 
2639 not_found:
2640 	pr_debug("ACPI PCCH not found\n");
2641 	return true;
2642 }
2643 
2644 static bool __init intel_pstate_has_acpi_ppc(void)
2645 {
2646 	int i;
2647 
2648 	for_each_possible_cpu(i) {
2649 		struct acpi_processor *pr = per_cpu(processors, i);
2650 
2651 		if (!pr)
2652 			continue;
2653 		if (acpi_has_method(pr->handle, "_PPC"))
2654 			return true;
2655 	}
2656 	pr_debug("ACPI _PPC not found\n");
2657 	return false;
2658 }
2659 
2660 enum {
2661 	PSS,
2662 	PPC,
2663 };
2664 
2665 /* Hardware vendor-specific info that has its own power management modes */
2666 static struct acpi_platform_list plat_info[] __initdata = {
2667 	{"HP    ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS},
2668 	{"ORACLE", "X4-2    ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2669 	{"ORACLE", "X4-2L   ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2670 	{"ORACLE", "X4-2B   ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2671 	{"ORACLE", "X3-2    ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2672 	{"ORACLE", "X3-2L   ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2673 	{"ORACLE", "X3-2B   ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2674 	{"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2675 	{"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2676 	{"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2677 	{"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2678 	{"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2679 	{"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2680 	{"ORACLE", "X6-2    ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2681 	{"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2682 	{ } /* End */
2683 };
2684 
2685 static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
2686 {
2687 	const struct x86_cpu_id *id;
2688 	u64 misc_pwr;
2689 	int idx;
2690 
2691 	id = x86_match_cpu(intel_pstate_cpu_oob_ids);
2692 	if (id) {
2693 		rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
2694 		if (misc_pwr & (1 << 8)) {
2695 			pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n");
2696 			return true;
2697 		}
2698 	}
2699 
2700 	idx = acpi_match_platform_list(plat_info);
2701 	if (idx < 0)
2702 		return false;
2703 
2704 	switch (plat_info[idx].data) {
2705 	case PSS:
2706 		if (!intel_pstate_no_acpi_pss())
2707 			return false;
2708 
2709 		return intel_pstate_no_acpi_pcch();
2710 	case PPC:
2711 		return intel_pstate_has_acpi_ppc() && !force_load;
2712 	}
2713 
2714 	return false;
2715 }
2716 
2717 static void intel_pstate_request_control_from_smm(void)
2718 {
2719 	/*
2720 	 * It may be unsafe to request P-states control from SMM if _PPC support
2721 	 * has not been enabled.
2722 	 */
2723 	if (acpi_ppc)
2724 		acpi_processor_pstate_control();
2725 }
2726 #else /* CONFIG_ACPI not enabled */
2727 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
2728 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
2729 static inline void intel_pstate_request_control_from_smm(void) {}
2730 #endif /* CONFIG_ACPI */
2731 
2732 #define INTEL_PSTATE_HWP_BROADWELL	0x01
2733 
2734 #define ICPU_HWP(model, hwp_mode) \
2735 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
2736 
2737 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
2738 	ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
2739 	ICPU_HWP(INTEL_FAM6_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
2740 	ICPU_HWP(X86_MODEL_ANY, 0),
2741 	{}
2742 };
2743 
2744 static int __init intel_pstate_init(void)
2745 {
2746 	const struct x86_cpu_id *id;
2747 	int rc;
2748 
2749 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2750 		return -ENODEV;
2751 
2752 	if (no_load)
2753 		return -ENODEV;
2754 
2755 	id = x86_match_cpu(hwp_support_ids);
2756 	if (id) {
2757 		copy_cpu_funcs(&core_funcs);
2758 		if (!no_hwp) {
2759 			hwp_active++;
2760 			hwp_mode_bdw = id->driver_data;
2761 			intel_pstate.attr = hwp_cpufreq_attrs;
2762 			goto hwp_cpu_matched;
2763 		}
2764 	} else {
2765 		id = x86_match_cpu(intel_pstate_cpu_ids);
2766 		if (!id) {
2767 			pr_info("CPU model not supported\n");
2768 			return -ENODEV;
2769 		}
2770 
2771 		copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
2772 	}
2773 
2774 	if (intel_pstate_msrs_not_valid()) {
2775 		pr_info("Invalid MSRs\n");
2776 		return -ENODEV;
2777 	}
2778 
2779 hwp_cpu_matched:
2780 	/*
2781 	 * The Intel pstate driver will be ignored if the platform
2782 	 * firmware has its own power management modes.
2783 	 */
2784 	if (intel_pstate_platform_pwr_mgmt_exists()) {
2785 		pr_info("P-states controlled by the platform\n");
2786 		return -ENODEV;
2787 	}
2788 
2789 	if (!hwp_active && hwp_only)
2790 		return -ENOTSUPP;
2791 
2792 	pr_info("Intel P-state driver initializing\n");
2793 
2794 	all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
2795 	if (!all_cpu_data)
2796 		return -ENOMEM;
2797 
2798 	intel_pstate_request_control_from_smm();
2799 
2800 	intel_pstate_sysfs_expose_params();
2801 
2802 	mutex_lock(&intel_pstate_driver_lock);
2803 	rc = intel_pstate_register_driver(default_driver);
2804 	mutex_unlock(&intel_pstate_driver_lock);
2805 	if (rc)
2806 		return rc;
2807 
2808 	if (hwp_active)
2809 		pr_info("HWP enabled\n");
2810 
2811 	return 0;
2812 }
2813 device_initcall(intel_pstate_init);
2814 
2815 static int __init intel_pstate_setup(char *str)
2816 {
2817 	if (!str)
2818 		return -EINVAL;
2819 
2820 	if (!strcmp(str, "disable")) {
2821 		no_load = 1;
2822 	} else if (!strcmp(str, "passive")) {
2823 		pr_info("Passive mode enabled\n");
2824 		default_driver = &intel_cpufreq;
2825 		no_hwp = 1;
2826 	}
2827 	if (!strcmp(str, "no_hwp")) {
2828 		pr_info("HWP disabled\n");
2829 		no_hwp = 1;
2830 	}
2831 	if (!strcmp(str, "force"))
2832 		force_load = 1;
2833 	if (!strcmp(str, "hwp_only"))
2834 		hwp_only = 1;
2835 	if (!strcmp(str, "per_cpu_perf_limits"))
2836 		per_cpu_limits = true;
2837 
2838 #ifdef CONFIG_ACPI
2839 	if (!strcmp(str, "support_acpi_ppc"))
2840 		acpi_ppc = true;
2841 #endif
2842 
2843 	return 0;
2844 }
2845 early_param("intel_pstate", intel_pstate_setup);
2846 
2847 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
2848 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
2849 MODULE_LICENSE("GPL");
2850