1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * intel_pstate.c: Native P state management for Intel processors
4  *
5  * (C) Copyright 2012 Intel Corporation
6  * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/module.h>
14 #include <linux/ktime.h>
15 #include <linux/hrtimer.h>
16 #include <linux/tick.h>
17 #include <linux/slab.h>
18 #include <linux/sched/cpufreq.h>
19 #include <linux/list.h>
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/sysfs.h>
23 #include <linux/types.h>
24 #include <linux/fs.h>
25 #include <linux/acpi.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pm_qos.h>
28 #include <trace/events/power.h>
29 
30 #include <asm/cpu.h>
31 #include <asm/div64.h>
32 #include <asm/msr.h>
33 #include <asm/cpu_device_id.h>
34 #include <asm/cpufeature.h>
35 #include <asm/intel-family.h>
36 #include "../drivers/thermal/intel/thermal_interrupt.h"
37 
38 #define INTEL_PSTATE_SAMPLING_INTERVAL	(10 * NSEC_PER_MSEC)
39 
40 #define INTEL_CPUFREQ_TRANSITION_LATENCY	20000
41 #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP	5000
42 #define INTEL_CPUFREQ_TRANSITION_DELAY		500
43 
44 #ifdef CONFIG_ACPI
45 #include <acpi/processor.h>
46 #include <acpi/cppc_acpi.h>
47 #endif
48 
49 #define FRAC_BITS 8
50 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
51 #define fp_toint(X) ((X) >> FRAC_BITS)
52 
53 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
54 
55 #define EXT_BITS 6
56 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
57 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
58 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
59 
60 static inline int32_t mul_fp(int32_t x, int32_t y)
61 {
62 	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
63 }
64 
65 static inline int32_t div_fp(s64 x, s64 y)
66 {
67 	return div64_s64((int64_t)x << FRAC_BITS, y);
68 }
69 
70 static inline int ceiling_fp(int32_t x)
71 {
72 	int mask, ret;
73 
74 	ret = fp_toint(x);
75 	mask = (1 << FRAC_BITS) - 1;
76 	if (x & mask)
77 		ret += 1;
78 	return ret;
79 }
80 
81 static inline u64 mul_ext_fp(u64 x, u64 y)
82 {
83 	return (x * y) >> EXT_FRAC_BITS;
84 }
85 
86 static inline u64 div_ext_fp(u64 x, u64 y)
87 {
88 	return div64_u64(x << EXT_FRAC_BITS, y);
89 }
90 
91 /**
92  * struct sample -	Store performance sample
93  * @core_avg_perf:	Ratio of APERF/MPERF which is the actual average
94  *			performance during last sample period
95  * @busy_scaled:	Scaled busy value which is used to calculate next
96  *			P state. This can be different than core_avg_perf
97  *			to account for cpu idle period
98  * @aperf:		Difference of actual performance frequency clock count
99  *			read from APERF MSR between last and current sample
100  * @mperf:		Difference of maximum performance frequency clock count
101  *			read from MPERF MSR between last and current sample
102  * @tsc:		Difference of time stamp counter between last and
103  *			current sample
104  * @time:		Current time from scheduler
105  *
106  * This structure is used in the cpudata structure to store performance sample
107  * data for choosing next P State.
108  */
109 struct sample {
110 	int32_t core_avg_perf;
111 	int32_t busy_scaled;
112 	u64 aperf;
113 	u64 mperf;
114 	u64 tsc;
115 	u64 time;
116 };
117 
118 /**
119  * struct pstate_data - Store P state data
120  * @current_pstate:	Current requested P state
121  * @min_pstate:		Min P state possible for this platform
122  * @max_pstate:		Max P state possible for this platform
123  * @max_pstate_physical:This is physical Max P state for a processor
124  *			This can be higher than the max_pstate which can
125  *			be limited by platform thermal design power limits
126  * @perf_ctl_scaling:	PERF_CTL P-state to frequency scaling factor
127  * @scaling:		Scaling factor between performance and frequency
128  * @turbo_pstate:	Max Turbo P state possible for this platform
129  * @min_freq:		@min_pstate frequency in cpufreq units
130  * @max_freq:		@max_pstate frequency in cpufreq units
131  * @turbo_freq:		@turbo_pstate frequency in cpufreq units
132  *
133  * Stores the per cpu model P state limits and current P state.
134  */
135 struct pstate_data {
136 	int	current_pstate;
137 	int	min_pstate;
138 	int	max_pstate;
139 	int	max_pstate_physical;
140 	int	perf_ctl_scaling;
141 	int	scaling;
142 	int	turbo_pstate;
143 	unsigned int min_freq;
144 	unsigned int max_freq;
145 	unsigned int turbo_freq;
146 };
147 
148 /**
149  * struct vid_data -	Stores voltage information data
150  * @min:		VID data for this platform corresponding to
151  *			the lowest P state
152  * @max:		VID data corresponding to the highest P State.
153  * @turbo:		VID data for turbo P state
154  * @ratio:		Ratio of (vid max - vid min) /
155  *			(max P state - Min P State)
156  *
157  * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
158  * This data is used in Atom platforms, where in addition to target P state,
159  * the voltage data needs to be specified to select next P State.
160  */
161 struct vid_data {
162 	int min;
163 	int max;
164 	int turbo;
165 	int32_t ratio;
166 };
167 
168 /**
169  * struct global_params - Global parameters, mostly tunable via sysfs.
170  * @no_turbo:		Whether or not to use turbo P-states.
171  * @turbo_disabled:	Whether or not turbo P-states are available at all,
172  *			based on the MSR_IA32_MISC_ENABLE value and whether or
173  *			not the maximum reported turbo P-state is different from
174  *			the maximum reported non-turbo one.
175  * @turbo_disabled_mf:	The @turbo_disabled value reflected by cpuinfo.max_freq.
176  * @min_perf_pct:	Minimum capacity limit in percent of the maximum turbo
177  *			P-state capacity.
178  * @max_perf_pct:	Maximum capacity limit in percent of the maximum turbo
179  *			P-state capacity.
180  */
181 struct global_params {
182 	bool no_turbo;
183 	bool turbo_disabled;
184 	bool turbo_disabled_mf;
185 	int max_perf_pct;
186 	int min_perf_pct;
187 };
188 
189 /**
190  * struct cpudata -	Per CPU instance data storage
191  * @cpu:		CPU number for this instance data
192  * @policy:		CPUFreq policy value
193  * @update_util:	CPUFreq utility callback information
194  * @update_util_set:	CPUFreq utility callback is set
195  * @iowait_boost:	iowait-related boost fraction
196  * @last_update:	Time of the last update.
197  * @pstate:		Stores P state limits for this CPU
198  * @vid:		Stores VID limits for this CPU
199  * @last_sample_time:	Last Sample time
200  * @aperf_mperf_shift:	APERF vs MPERF counting frequency difference
201  * @prev_aperf:		Last APERF value read from APERF MSR
202  * @prev_mperf:		Last MPERF value read from MPERF MSR
203  * @prev_tsc:		Last timestamp counter (TSC) value
204  * @prev_cummulative_iowait: IO Wait time difference from last and
205  *			current sample
206  * @sample:		Storage for storing last Sample data
207  * @min_perf_ratio:	Minimum capacity in terms of PERF or HWP ratios
208  * @max_perf_ratio:	Maximum capacity in terms of PERF or HWP ratios
209  * @acpi_perf_data:	Stores ACPI perf information read from _PSS
210  * @valid_pss_table:	Set to true for valid ACPI _PSS entries found
211  * @epp_powersave:	Last saved HWP energy performance preference
212  *			(EPP) or energy performance bias (EPB),
213  *			when policy switched to performance
214  * @epp_policy:		Last saved policy used to set EPP/EPB
215  * @epp_default:	Power on default HWP energy performance
216  *			preference/bias
217  * @epp_cached		Cached HWP energy-performance preference value
218  * @hwp_req_cached:	Cached value of the last HWP Request MSR
219  * @hwp_cap_cached:	Cached value of the last HWP Capabilities MSR
220  * @last_io_update:	Last time when IO wake flag was set
221  * @sched_flags:	Store scheduler flags for possible cross CPU update
222  * @hwp_boost_min:	Last HWP boosted min performance
223  * @suspended:		Whether or not the driver has been suspended.
224  * @hwp_notify_work:	workqueue for HWP notifications.
225  *
226  * This structure stores per CPU instance data for all CPUs.
227  */
228 struct cpudata {
229 	int cpu;
230 
231 	unsigned int policy;
232 	struct update_util_data update_util;
233 	bool   update_util_set;
234 
235 	struct pstate_data pstate;
236 	struct vid_data vid;
237 
238 	u64	last_update;
239 	u64	last_sample_time;
240 	u64	aperf_mperf_shift;
241 	u64	prev_aperf;
242 	u64	prev_mperf;
243 	u64	prev_tsc;
244 	u64	prev_cummulative_iowait;
245 	struct sample sample;
246 	int32_t	min_perf_ratio;
247 	int32_t	max_perf_ratio;
248 #ifdef CONFIG_ACPI
249 	struct acpi_processor_performance acpi_perf_data;
250 	bool valid_pss_table;
251 #endif
252 	unsigned int iowait_boost;
253 	s16 epp_powersave;
254 	s16 epp_policy;
255 	s16 epp_default;
256 	s16 epp_cached;
257 	u64 hwp_req_cached;
258 	u64 hwp_cap_cached;
259 	u64 last_io_update;
260 	unsigned int sched_flags;
261 	u32 hwp_boost_min;
262 	bool suspended;
263 	struct delayed_work hwp_notify_work;
264 };
265 
266 static struct cpudata **all_cpu_data;
267 
268 /**
269  * struct pstate_funcs - Per CPU model specific callbacks
270  * @get_max:		Callback to get maximum non turbo effective P state
271  * @get_max_physical:	Callback to get maximum non turbo physical P state
272  * @get_min:		Callback to get minimum P state
273  * @get_turbo:		Callback to get turbo P state
274  * @get_scaling:	Callback to get frequency scaling factor
275  * @get_cpu_scaling:	Get frequency scaling factor for a given cpu
276  * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
277  * @get_val:		Callback to convert P state to actual MSR write value
278  * @get_vid:		Callback to get VID data for Atom platforms
279  *
280  * Core and Atom CPU models have different way to get P State limits. This
281  * structure is used to store those callbacks.
282  */
283 struct pstate_funcs {
284 	int (*get_max)(int cpu);
285 	int (*get_max_physical)(int cpu);
286 	int (*get_min)(int cpu);
287 	int (*get_turbo)(int cpu);
288 	int (*get_scaling)(void);
289 	int (*get_cpu_scaling)(int cpu);
290 	int (*get_aperf_mperf_shift)(void);
291 	u64 (*get_val)(struct cpudata*, int pstate);
292 	void (*get_vid)(struct cpudata *);
293 };
294 
295 static struct pstate_funcs pstate_funcs __read_mostly;
296 
297 static int hwp_active __read_mostly;
298 static int hwp_mode_bdw __read_mostly;
299 static bool per_cpu_limits __read_mostly;
300 static bool hwp_boost __read_mostly;
301 static bool hwp_forced __read_mostly;
302 
303 static struct cpufreq_driver *intel_pstate_driver __read_mostly;
304 
305 #ifdef CONFIG_ACPI
306 static bool acpi_ppc;
307 #endif
308 
309 static struct global_params global;
310 
311 static DEFINE_MUTEX(intel_pstate_driver_lock);
312 static DEFINE_MUTEX(intel_pstate_limits_lock);
313 
314 #ifdef CONFIG_ACPI
315 
316 static bool intel_pstate_acpi_pm_profile_server(void)
317 {
318 	if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
319 	    acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
320 		return true;
321 
322 	return false;
323 }
324 
325 static bool intel_pstate_get_ppc_enable_status(void)
326 {
327 	if (intel_pstate_acpi_pm_profile_server())
328 		return true;
329 
330 	return acpi_ppc;
331 }
332 
333 #ifdef CONFIG_ACPI_CPPC_LIB
334 
335 /* The work item is needed to avoid CPU hotplug locking issues */
336 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
337 {
338 	sched_set_itmt_support();
339 }
340 
341 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
342 
343 #define CPPC_MAX_PERF	U8_MAX
344 
345 static void intel_pstate_set_itmt_prio(int cpu)
346 {
347 	struct cppc_perf_caps cppc_perf;
348 	static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
349 	int ret;
350 
351 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
352 	if (ret)
353 		return;
354 
355 	/*
356 	 * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
357 	 * In this case we can't use CPPC.highest_perf to enable ITMT.
358 	 * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
359 	 */
360 	if (cppc_perf.highest_perf == CPPC_MAX_PERF)
361 		cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
362 
363 	/*
364 	 * The priorities can be set regardless of whether or not
365 	 * sched_set_itmt_support(true) has been called and it is valid to
366 	 * update them at any time after it has been called.
367 	 */
368 	sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
369 
370 	if (max_highest_perf <= min_highest_perf) {
371 		if (cppc_perf.highest_perf > max_highest_perf)
372 			max_highest_perf = cppc_perf.highest_perf;
373 
374 		if (cppc_perf.highest_perf < min_highest_perf)
375 			min_highest_perf = cppc_perf.highest_perf;
376 
377 		if (max_highest_perf > min_highest_perf) {
378 			/*
379 			 * This code can be run during CPU online under the
380 			 * CPU hotplug locks, so sched_set_itmt_support()
381 			 * cannot be called from here.  Queue up a work item
382 			 * to invoke it.
383 			 */
384 			schedule_work(&sched_itmt_work);
385 		}
386 	}
387 }
388 
389 static int intel_pstate_get_cppc_guaranteed(int cpu)
390 {
391 	struct cppc_perf_caps cppc_perf;
392 	int ret;
393 
394 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
395 	if (ret)
396 		return ret;
397 
398 	if (cppc_perf.guaranteed_perf)
399 		return cppc_perf.guaranteed_perf;
400 
401 	return cppc_perf.nominal_perf;
402 }
403 #else /* CONFIG_ACPI_CPPC_LIB */
404 static inline void intel_pstate_set_itmt_prio(int cpu)
405 {
406 }
407 #endif /* CONFIG_ACPI_CPPC_LIB */
408 
409 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
410 {
411 	struct cpudata *cpu;
412 	int ret;
413 	int i;
414 
415 	if (hwp_active) {
416 		intel_pstate_set_itmt_prio(policy->cpu);
417 		return;
418 	}
419 
420 	if (!intel_pstate_get_ppc_enable_status())
421 		return;
422 
423 	cpu = all_cpu_data[policy->cpu];
424 
425 	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
426 						  policy->cpu);
427 	if (ret)
428 		return;
429 
430 	/*
431 	 * Check if the control value in _PSS is for PERF_CTL MSR, which should
432 	 * guarantee that the states returned by it map to the states in our
433 	 * list directly.
434 	 */
435 	if (cpu->acpi_perf_data.control_register.space_id !=
436 						ACPI_ADR_SPACE_FIXED_HARDWARE)
437 		goto err;
438 
439 	/*
440 	 * If there is only one entry _PSS, simply ignore _PSS and continue as
441 	 * usual without taking _PSS into account
442 	 */
443 	if (cpu->acpi_perf_data.state_count < 2)
444 		goto err;
445 
446 	pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
447 	for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
448 		pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
449 			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
450 			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
451 			 (u32) cpu->acpi_perf_data.states[i].power,
452 			 (u32) cpu->acpi_perf_data.states[i].control);
453 	}
454 
455 	cpu->valid_pss_table = true;
456 	pr_debug("_PPC limits will be enforced\n");
457 
458 	return;
459 
460  err:
461 	cpu->valid_pss_table = false;
462 	acpi_processor_unregister_performance(policy->cpu);
463 }
464 
465 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
466 {
467 	struct cpudata *cpu;
468 
469 	cpu = all_cpu_data[policy->cpu];
470 	if (!cpu->valid_pss_table)
471 		return;
472 
473 	acpi_processor_unregister_performance(policy->cpu);
474 }
475 #else /* CONFIG_ACPI */
476 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
477 {
478 }
479 
480 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
481 {
482 }
483 
484 static inline bool intel_pstate_acpi_pm_profile_server(void)
485 {
486 	return false;
487 }
488 #endif /* CONFIG_ACPI */
489 
490 #ifndef CONFIG_ACPI_CPPC_LIB
491 static inline int intel_pstate_get_cppc_guaranteed(int cpu)
492 {
493 	return -ENOTSUPP;
494 }
495 #endif /* CONFIG_ACPI_CPPC_LIB */
496 
497 /**
498  * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
499  * @cpu: Target CPU.
500  *
501  * On hybrid processors, HWP may expose more performance levels than there are
502  * P-states accessible through the PERF_CTL interface.  If that happens, the
503  * scaling factor between HWP performance levels and CPU frequency will be less
504  * than the scaling factor between P-state values and CPU frequency.
505  *
506  * In that case, adjust the CPU parameters used in computations accordingly.
507  */
508 static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
509 {
510 	int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
511 	int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
512 	int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
513 	int scaling = cpu->pstate.scaling;
514 
515 	pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
516 	pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
517 	pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
518 	pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
519 	pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
520 	pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
521 
522 	cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
523 					   perf_ctl_scaling);
524 	cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
525 					 perf_ctl_scaling);
526 
527 	cpu->pstate.max_pstate_physical =
528 			DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
529 				     scaling);
530 
531 	cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
532 	/*
533 	 * Cast the min P-state value retrieved via pstate_funcs.get_min() to
534 	 * the effective range of HWP performance levels.
535 	 */
536 	cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
537 }
538 
539 static inline void update_turbo_state(void)
540 {
541 	u64 misc_en;
542 	struct cpudata *cpu;
543 
544 	cpu = all_cpu_data[0];
545 	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
546 	global.turbo_disabled =
547 		(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
548 		 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
549 }
550 
551 static int min_perf_pct_min(void)
552 {
553 	struct cpudata *cpu = all_cpu_data[0];
554 	int turbo_pstate = cpu->pstate.turbo_pstate;
555 
556 	return turbo_pstate ?
557 		(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
558 }
559 
560 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
561 {
562 	u64 epb;
563 	int ret;
564 
565 	if (!boot_cpu_has(X86_FEATURE_EPB))
566 		return -ENXIO;
567 
568 	ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
569 	if (ret)
570 		return (s16)ret;
571 
572 	return (s16)(epb & 0x0f);
573 }
574 
575 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
576 {
577 	s16 epp;
578 
579 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
580 		/*
581 		 * When hwp_req_data is 0, means that caller didn't read
582 		 * MSR_HWP_REQUEST, so need to read and get EPP.
583 		 */
584 		if (!hwp_req_data) {
585 			epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
586 					    &hwp_req_data);
587 			if (epp)
588 				return epp;
589 		}
590 		epp = (hwp_req_data >> 24) & 0xff;
591 	} else {
592 		/* When there is no EPP present, HWP uses EPB settings */
593 		epp = intel_pstate_get_epb(cpu_data);
594 	}
595 
596 	return epp;
597 }
598 
599 static int intel_pstate_set_epb(int cpu, s16 pref)
600 {
601 	u64 epb;
602 	int ret;
603 
604 	if (!boot_cpu_has(X86_FEATURE_EPB))
605 		return -ENXIO;
606 
607 	ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
608 	if (ret)
609 		return ret;
610 
611 	epb = (epb & ~0x0f) | pref;
612 	wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
613 
614 	return 0;
615 }
616 
617 /*
618  * EPP/EPB display strings corresponding to EPP index in the
619  * energy_perf_strings[]
620  *	index		String
621  *-------------------------------------
622  *	0		default
623  *	1		performance
624  *	2		balance_performance
625  *	3		balance_power
626  *	4		power
627  */
628 
629 enum energy_perf_value_index {
630 	EPP_INDEX_DEFAULT = 0,
631 	EPP_INDEX_PERFORMANCE,
632 	EPP_INDEX_BALANCE_PERFORMANCE,
633 	EPP_INDEX_BALANCE_POWERSAVE,
634 	EPP_INDEX_POWERSAVE,
635 };
636 
637 static const char * const energy_perf_strings[] = {
638 	[EPP_INDEX_DEFAULT] = "default",
639 	[EPP_INDEX_PERFORMANCE] = "performance",
640 	[EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
641 	[EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
642 	[EPP_INDEX_POWERSAVE] = "power",
643 	NULL
644 };
645 static unsigned int epp_values[] = {
646 	[EPP_INDEX_DEFAULT] = 0, /* Unused index */
647 	[EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE,
648 	[EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE,
649 	[EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE,
650 	[EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE,
651 };
652 
653 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
654 {
655 	s16 epp;
656 	int index = -EINVAL;
657 
658 	*raw_epp = 0;
659 	epp = intel_pstate_get_epp(cpu_data, 0);
660 	if (epp < 0)
661 		return epp;
662 
663 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
664 		if (epp == epp_values[EPP_INDEX_PERFORMANCE])
665 			return EPP_INDEX_PERFORMANCE;
666 		if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE])
667 			return EPP_INDEX_BALANCE_PERFORMANCE;
668 		if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE])
669 			return EPP_INDEX_BALANCE_POWERSAVE;
670 		if (epp == epp_values[EPP_INDEX_POWERSAVE])
671 			return EPP_INDEX_POWERSAVE;
672 		*raw_epp = epp;
673 		return 0;
674 	} else if (boot_cpu_has(X86_FEATURE_EPB)) {
675 		/*
676 		 * Range:
677 		 *	0x00-0x03	:	Performance
678 		 *	0x04-0x07	:	Balance performance
679 		 *	0x08-0x0B	:	Balance power
680 		 *	0x0C-0x0F	:	Power
681 		 * The EPB is a 4 bit value, but our ranges restrict the
682 		 * value which can be set. Here only using top two bits
683 		 * effectively.
684 		 */
685 		index = (epp >> 2) + 1;
686 	}
687 
688 	return index;
689 }
690 
691 static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
692 {
693 	int ret;
694 
695 	/*
696 	 * Use the cached HWP Request MSR value, because in the active mode the
697 	 * register itself may be updated by intel_pstate_hwp_boost_up() or
698 	 * intel_pstate_hwp_boost_down() at any time.
699 	 */
700 	u64 value = READ_ONCE(cpu->hwp_req_cached);
701 
702 	value &= ~GENMASK_ULL(31, 24);
703 	value |= (u64)epp << 24;
704 	/*
705 	 * The only other updater of hwp_req_cached in the active mode,
706 	 * intel_pstate_hwp_set(), is called under the same lock as this
707 	 * function, so it cannot run in parallel with the update below.
708 	 */
709 	WRITE_ONCE(cpu->hwp_req_cached, value);
710 	ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
711 	if (!ret)
712 		cpu->epp_cached = epp;
713 
714 	return ret;
715 }
716 
717 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
718 					      int pref_index, bool use_raw,
719 					      u32 raw_epp)
720 {
721 	int epp = -EINVAL;
722 	int ret;
723 
724 	if (!pref_index)
725 		epp = cpu_data->epp_default;
726 
727 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
728 		if (use_raw)
729 			epp = raw_epp;
730 		else if (epp == -EINVAL)
731 			epp = epp_values[pref_index];
732 
733 		/*
734 		 * To avoid confusion, refuse to set EPP to any values different
735 		 * from 0 (performance) if the current policy is "performance",
736 		 * because those values would be overridden.
737 		 */
738 		if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
739 			return -EBUSY;
740 
741 		ret = intel_pstate_set_epp(cpu_data, epp);
742 	} else {
743 		if (epp == -EINVAL)
744 			epp = (pref_index - 1) << 2;
745 		ret = intel_pstate_set_epb(cpu_data->cpu, epp);
746 	}
747 
748 	return ret;
749 }
750 
751 static ssize_t show_energy_performance_available_preferences(
752 				struct cpufreq_policy *policy, char *buf)
753 {
754 	int i = 0;
755 	int ret = 0;
756 
757 	while (energy_perf_strings[i] != NULL)
758 		ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
759 
760 	ret += sprintf(&buf[ret], "\n");
761 
762 	return ret;
763 }
764 
765 cpufreq_freq_attr_ro(energy_performance_available_preferences);
766 
767 static struct cpufreq_driver intel_pstate;
768 
769 static ssize_t store_energy_performance_preference(
770 		struct cpufreq_policy *policy, const char *buf, size_t count)
771 {
772 	struct cpudata *cpu = all_cpu_data[policy->cpu];
773 	char str_preference[21];
774 	bool raw = false;
775 	ssize_t ret;
776 	u32 epp = 0;
777 
778 	ret = sscanf(buf, "%20s", str_preference);
779 	if (ret != 1)
780 		return -EINVAL;
781 
782 	ret = match_string(energy_perf_strings, -1, str_preference);
783 	if (ret < 0) {
784 		if (!boot_cpu_has(X86_FEATURE_HWP_EPP))
785 			return ret;
786 
787 		ret = kstrtouint(buf, 10, &epp);
788 		if (ret)
789 			return ret;
790 
791 		if (epp > 255)
792 			return -EINVAL;
793 
794 		raw = true;
795 	}
796 
797 	/*
798 	 * This function runs with the policy R/W semaphore held, which
799 	 * guarantees that the driver pointer will not change while it is
800 	 * running.
801 	 */
802 	if (!intel_pstate_driver)
803 		return -EAGAIN;
804 
805 	mutex_lock(&intel_pstate_limits_lock);
806 
807 	if (intel_pstate_driver == &intel_pstate) {
808 		ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp);
809 	} else {
810 		/*
811 		 * In the passive mode the governor needs to be stopped on the
812 		 * target CPU before the EPP update and restarted after it,
813 		 * which is super-heavy-weight, so make sure it is worth doing
814 		 * upfront.
815 		 */
816 		if (!raw)
817 			epp = ret ? epp_values[ret] : cpu->epp_default;
818 
819 		if (cpu->epp_cached != epp) {
820 			int err;
821 
822 			cpufreq_stop_governor(policy);
823 			ret = intel_pstate_set_epp(cpu, epp);
824 			err = cpufreq_start_governor(policy);
825 			if (!ret)
826 				ret = err;
827 		}
828 	}
829 
830 	mutex_unlock(&intel_pstate_limits_lock);
831 
832 	return ret ?: count;
833 }
834 
835 static ssize_t show_energy_performance_preference(
836 				struct cpufreq_policy *policy, char *buf)
837 {
838 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
839 	int preference, raw_epp;
840 
841 	preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp);
842 	if (preference < 0)
843 		return preference;
844 
845 	if (raw_epp)
846 		return  sprintf(buf, "%d\n", raw_epp);
847 	else
848 		return  sprintf(buf, "%s\n", energy_perf_strings[preference]);
849 }
850 
851 cpufreq_freq_attr_rw(energy_performance_preference);
852 
853 static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
854 {
855 	struct cpudata *cpu = all_cpu_data[policy->cpu];
856 	int ratio, freq;
857 
858 	ratio = intel_pstate_get_cppc_guaranteed(policy->cpu);
859 	if (ratio <= 0) {
860 		u64 cap;
861 
862 		rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
863 		ratio = HWP_GUARANTEED_PERF(cap);
864 	}
865 
866 	freq = ratio * cpu->pstate.scaling;
867 	if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling)
868 		freq = rounddown(freq, cpu->pstate.perf_ctl_scaling);
869 
870 	return sprintf(buf, "%d\n", freq);
871 }
872 
873 cpufreq_freq_attr_ro(base_frequency);
874 
875 static struct freq_attr *hwp_cpufreq_attrs[] = {
876 	&energy_performance_preference,
877 	&energy_performance_available_preferences,
878 	&base_frequency,
879 	NULL,
880 };
881 
882 static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
883 {
884 	u64 cap;
885 
886 	rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
887 	WRITE_ONCE(cpu->hwp_cap_cached, cap);
888 	cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
889 	cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
890 }
891 
892 static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
893 {
894 	int scaling = cpu->pstate.scaling;
895 
896 	__intel_pstate_get_hwp_cap(cpu);
897 
898 	cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling;
899 	cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
900 	if (scaling != cpu->pstate.perf_ctl_scaling) {
901 		int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
902 
903 		cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq,
904 						 perf_ctl_scaling);
905 		cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq,
906 						   perf_ctl_scaling);
907 	}
908 }
909 
910 static void intel_pstate_hwp_set(unsigned int cpu)
911 {
912 	struct cpudata *cpu_data = all_cpu_data[cpu];
913 	int max, min;
914 	u64 value;
915 	s16 epp;
916 
917 	max = cpu_data->max_perf_ratio;
918 	min = cpu_data->min_perf_ratio;
919 
920 	if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
921 		min = max;
922 
923 	rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
924 
925 	value &= ~HWP_MIN_PERF(~0L);
926 	value |= HWP_MIN_PERF(min);
927 
928 	value &= ~HWP_MAX_PERF(~0L);
929 	value |= HWP_MAX_PERF(max);
930 
931 	if (cpu_data->epp_policy == cpu_data->policy)
932 		goto skip_epp;
933 
934 	cpu_data->epp_policy = cpu_data->policy;
935 
936 	if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
937 		epp = intel_pstate_get_epp(cpu_data, value);
938 		cpu_data->epp_powersave = epp;
939 		/* If EPP read was failed, then don't try to write */
940 		if (epp < 0)
941 			goto skip_epp;
942 
943 		epp = 0;
944 	} else {
945 		/* skip setting EPP, when saved value is invalid */
946 		if (cpu_data->epp_powersave < 0)
947 			goto skip_epp;
948 
949 		/*
950 		 * No need to restore EPP when it is not zero. This
951 		 * means:
952 		 *  - Policy is not changed
953 		 *  - user has manually changed
954 		 *  - Error reading EPB
955 		 */
956 		epp = intel_pstate_get_epp(cpu_data, value);
957 		if (epp)
958 			goto skip_epp;
959 
960 		epp = cpu_data->epp_powersave;
961 	}
962 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
963 		value &= ~GENMASK_ULL(31, 24);
964 		value |= (u64)epp << 24;
965 	} else {
966 		intel_pstate_set_epb(cpu, epp);
967 	}
968 skip_epp:
969 	WRITE_ONCE(cpu_data->hwp_req_cached, value);
970 	wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
971 }
972 
973 static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
974 
975 static void intel_pstate_hwp_offline(struct cpudata *cpu)
976 {
977 	u64 value = READ_ONCE(cpu->hwp_req_cached);
978 	int min_perf;
979 
980 	intel_pstate_disable_hwp_interrupt(cpu);
981 
982 	if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
983 		/*
984 		 * In case the EPP has been set to "performance" by the
985 		 * active mode "performance" scaling algorithm, replace that
986 		 * temporary value with the cached EPP one.
987 		 */
988 		value &= ~GENMASK_ULL(31, 24);
989 		value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
990 		/*
991 		 * However, make sure that EPP will be set to "performance" when
992 		 * the CPU is brought back online again and the "performance"
993 		 * scaling algorithm is still in effect.
994 		 */
995 		cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
996 	}
997 
998 	/*
999 	 * Clear the desired perf field in the cached HWP request value to
1000 	 * prevent nonzero desired values from being leaked into the active
1001 	 * mode.
1002 	 */
1003 	value &= ~HWP_DESIRED_PERF(~0L);
1004 	WRITE_ONCE(cpu->hwp_req_cached, value);
1005 
1006 	value &= ~GENMASK_ULL(31, 0);
1007 	min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
1008 
1009 	/* Set hwp_max = hwp_min */
1010 	value |= HWP_MAX_PERF(min_perf);
1011 	value |= HWP_MIN_PERF(min_perf);
1012 
1013 	/* Set EPP to min */
1014 	if (boot_cpu_has(X86_FEATURE_HWP_EPP))
1015 		value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
1016 
1017 	wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
1018 }
1019 
1020 #define POWER_CTL_EE_ENABLE	1
1021 #define POWER_CTL_EE_DISABLE	2
1022 
1023 static int power_ctl_ee_state;
1024 
1025 static void set_power_ctl_ee_state(bool input)
1026 {
1027 	u64 power_ctl;
1028 
1029 	mutex_lock(&intel_pstate_driver_lock);
1030 	rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
1031 	if (input) {
1032 		power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
1033 		power_ctl_ee_state = POWER_CTL_EE_ENABLE;
1034 	} else {
1035 		power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
1036 		power_ctl_ee_state = POWER_CTL_EE_DISABLE;
1037 	}
1038 	wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
1039 	mutex_unlock(&intel_pstate_driver_lock);
1040 }
1041 
1042 static void intel_pstate_hwp_enable(struct cpudata *cpudata);
1043 
1044 static void intel_pstate_hwp_reenable(struct cpudata *cpu)
1045 {
1046 	intel_pstate_hwp_enable(cpu);
1047 	wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
1048 }
1049 
1050 static int intel_pstate_suspend(struct cpufreq_policy *policy)
1051 {
1052 	struct cpudata *cpu = all_cpu_data[policy->cpu];
1053 
1054 	pr_debug("CPU %d suspending\n", cpu->cpu);
1055 
1056 	cpu->suspended = true;
1057 
1058 	/* disable HWP interrupt and cancel any pending work */
1059 	intel_pstate_disable_hwp_interrupt(cpu);
1060 
1061 	return 0;
1062 }
1063 
1064 static int intel_pstate_resume(struct cpufreq_policy *policy)
1065 {
1066 	struct cpudata *cpu = all_cpu_data[policy->cpu];
1067 
1068 	pr_debug("CPU %d resuming\n", cpu->cpu);
1069 
1070 	/* Only restore if the system default is changed */
1071 	if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
1072 		set_power_ctl_ee_state(true);
1073 	else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
1074 		set_power_ctl_ee_state(false);
1075 
1076 	if (cpu->suspended && hwp_active) {
1077 		mutex_lock(&intel_pstate_limits_lock);
1078 
1079 		/* Re-enable HWP, because "online" has not done that. */
1080 		intel_pstate_hwp_reenable(cpu);
1081 
1082 		mutex_unlock(&intel_pstate_limits_lock);
1083 	}
1084 
1085 	cpu->suspended = false;
1086 
1087 	return 0;
1088 }
1089 
1090 static void intel_pstate_update_policies(void)
1091 {
1092 	int cpu;
1093 
1094 	for_each_possible_cpu(cpu)
1095 		cpufreq_update_policy(cpu);
1096 }
1097 
1098 static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
1099 					   struct cpufreq_policy *policy)
1100 {
1101 	policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
1102 			cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
1103 	refresh_frequency_limits(policy);
1104 }
1105 
1106 static void intel_pstate_update_max_freq(unsigned int cpu)
1107 {
1108 	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
1109 
1110 	if (!policy)
1111 		return;
1112 
1113 	__intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
1114 
1115 	cpufreq_cpu_release(policy);
1116 }
1117 
1118 static void intel_pstate_update_limits(unsigned int cpu)
1119 {
1120 	mutex_lock(&intel_pstate_driver_lock);
1121 
1122 	update_turbo_state();
1123 	/*
1124 	 * If turbo has been turned on or off globally, policy limits for
1125 	 * all CPUs need to be updated to reflect that.
1126 	 */
1127 	if (global.turbo_disabled_mf != global.turbo_disabled) {
1128 		global.turbo_disabled_mf = global.turbo_disabled;
1129 		arch_set_max_freq_ratio(global.turbo_disabled);
1130 		for_each_possible_cpu(cpu)
1131 			intel_pstate_update_max_freq(cpu);
1132 	} else {
1133 		cpufreq_update_policy(cpu);
1134 	}
1135 
1136 	mutex_unlock(&intel_pstate_driver_lock);
1137 }
1138 
1139 /************************** sysfs begin ************************/
1140 #define show_one(file_name, object)					\
1141 	static ssize_t show_##file_name					\
1142 	(struct kobject *kobj, struct kobj_attribute *attr, char *buf)	\
1143 	{								\
1144 		return sprintf(buf, "%u\n", global.object);		\
1145 	}
1146 
1147 static ssize_t intel_pstate_show_status(char *buf);
1148 static int intel_pstate_update_status(const char *buf, size_t size);
1149 
1150 static ssize_t show_status(struct kobject *kobj,
1151 			   struct kobj_attribute *attr, char *buf)
1152 {
1153 	ssize_t ret;
1154 
1155 	mutex_lock(&intel_pstate_driver_lock);
1156 	ret = intel_pstate_show_status(buf);
1157 	mutex_unlock(&intel_pstate_driver_lock);
1158 
1159 	return ret;
1160 }
1161 
1162 static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
1163 			    const char *buf, size_t count)
1164 {
1165 	char *p = memchr(buf, '\n', count);
1166 	int ret;
1167 
1168 	mutex_lock(&intel_pstate_driver_lock);
1169 	ret = intel_pstate_update_status(buf, p ? p - buf : count);
1170 	mutex_unlock(&intel_pstate_driver_lock);
1171 
1172 	return ret < 0 ? ret : count;
1173 }
1174 
1175 static ssize_t show_turbo_pct(struct kobject *kobj,
1176 				struct kobj_attribute *attr, char *buf)
1177 {
1178 	struct cpudata *cpu;
1179 	int total, no_turbo, turbo_pct;
1180 	uint32_t turbo_fp;
1181 
1182 	mutex_lock(&intel_pstate_driver_lock);
1183 
1184 	if (!intel_pstate_driver) {
1185 		mutex_unlock(&intel_pstate_driver_lock);
1186 		return -EAGAIN;
1187 	}
1188 
1189 	cpu = all_cpu_data[0];
1190 
1191 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1192 	no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
1193 	turbo_fp = div_fp(no_turbo, total);
1194 	turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
1195 
1196 	mutex_unlock(&intel_pstate_driver_lock);
1197 
1198 	return sprintf(buf, "%u\n", turbo_pct);
1199 }
1200 
1201 static ssize_t show_num_pstates(struct kobject *kobj,
1202 				struct kobj_attribute *attr, char *buf)
1203 {
1204 	struct cpudata *cpu;
1205 	int total;
1206 
1207 	mutex_lock(&intel_pstate_driver_lock);
1208 
1209 	if (!intel_pstate_driver) {
1210 		mutex_unlock(&intel_pstate_driver_lock);
1211 		return -EAGAIN;
1212 	}
1213 
1214 	cpu = all_cpu_data[0];
1215 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1216 
1217 	mutex_unlock(&intel_pstate_driver_lock);
1218 
1219 	return sprintf(buf, "%u\n", total);
1220 }
1221 
1222 static ssize_t show_no_turbo(struct kobject *kobj,
1223 			     struct kobj_attribute *attr, char *buf)
1224 {
1225 	ssize_t ret;
1226 
1227 	mutex_lock(&intel_pstate_driver_lock);
1228 
1229 	if (!intel_pstate_driver) {
1230 		mutex_unlock(&intel_pstate_driver_lock);
1231 		return -EAGAIN;
1232 	}
1233 
1234 	update_turbo_state();
1235 	if (global.turbo_disabled)
1236 		ret = sprintf(buf, "%u\n", global.turbo_disabled);
1237 	else
1238 		ret = sprintf(buf, "%u\n", global.no_turbo);
1239 
1240 	mutex_unlock(&intel_pstate_driver_lock);
1241 
1242 	return ret;
1243 }
1244 
1245 static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
1246 			      const char *buf, size_t count)
1247 {
1248 	unsigned int input;
1249 	int ret;
1250 
1251 	ret = sscanf(buf, "%u", &input);
1252 	if (ret != 1)
1253 		return -EINVAL;
1254 
1255 	mutex_lock(&intel_pstate_driver_lock);
1256 
1257 	if (!intel_pstate_driver) {
1258 		mutex_unlock(&intel_pstate_driver_lock);
1259 		return -EAGAIN;
1260 	}
1261 
1262 	mutex_lock(&intel_pstate_limits_lock);
1263 
1264 	update_turbo_state();
1265 	if (global.turbo_disabled) {
1266 		pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
1267 		mutex_unlock(&intel_pstate_limits_lock);
1268 		mutex_unlock(&intel_pstate_driver_lock);
1269 		return -EPERM;
1270 	}
1271 
1272 	global.no_turbo = clamp_t(int, input, 0, 1);
1273 
1274 	if (global.no_turbo) {
1275 		struct cpudata *cpu = all_cpu_data[0];
1276 		int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
1277 
1278 		/* Squash the global minimum into the permitted range. */
1279 		if (global.min_perf_pct > pct)
1280 			global.min_perf_pct = pct;
1281 	}
1282 
1283 	mutex_unlock(&intel_pstate_limits_lock);
1284 
1285 	intel_pstate_update_policies();
1286 	arch_set_max_freq_ratio(global.no_turbo);
1287 
1288 	mutex_unlock(&intel_pstate_driver_lock);
1289 
1290 	return count;
1291 }
1292 
1293 static void update_qos_request(enum freq_qos_req_type type)
1294 {
1295 	struct freq_qos_request *req;
1296 	struct cpufreq_policy *policy;
1297 	int i;
1298 
1299 	for_each_possible_cpu(i) {
1300 		struct cpudata *cpu = all_cpu_data[i];
1301 		unsigned int freq, perf_pct;
1302 
1303 		policy = cpufreq_cpu_get(i);
1304 		if (!policy)
1305 			continue;
1306 
1307 		req = policy->driver_data;
1308 		cpufreq_cpu_put(policy);
1309 
1310 		if (!req)
1311 			continue;
1312 
1313 		if (hwp_active)
1314 			intel_pstate_get_hwp_cap(cpu);
1315 
1316 		if (type == FREQ_QOS_MIN) {
1317 			perf_pct = global.min_perf_pct;
1318 		} else {
1319 			req++;
1320 			perf_pct = global.max_perf_pct;
1321 		}
1322 
1323 		freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
1324 
1325 		if (freq_qos_update_request(req, freq) < 0)
1326 			pr_warn("Failed to update freq constraint: CPU%d\n", i);
1327 	}
1328 }
1329 
1330 static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
1331 				  const char *buf, size_t count)
1332 {
1333 	unsigned int input;
1334 	int ret;
1335 
1336 	ret = sscanf(buf, "%u", &input);
1337 	if (ret != 1)
1338 		return -EINVAL;
1339 
1340 	mutex_lock(&intel_pstate_driver_lock);
1341 
1342 	if (!intel_pstate_driver) {
1343 		mutex_unlock(&intel_pstate_driver_lock);
1344 		return -EAGAIN;
1345 	}
1346 
1347 	mutex_lock(&intel_pstate_limits_lock);
1348 
1349 	global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
1350 
1351 	mutex_unlock(&intel_pstate_limits_lock);
1352 
1353 	if (intel_pstate_driver == &intel_pstate)
1354 		intel_pstate_update_policies();
1355 	else
1356 		update_qos_request(FREQ_QOS_MAX);
1357 
1358 	mutex_unlock(&intel_pstate_driver_lock);
1359 
1360 	return count;
1361 }
1362 
1363 static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
1364 				  const char *buf, size_t count)
1365 {
1366 	unsigned int input;
1367 	int ret;
1368 
1369 	ret = sscanf(buf, "%u", &input);
1370 	if (ret != 1)
1371 		return -EINVAL;
1372 
1373 	mutex_lock(&intel_pstate_driver_lock);
1374 
1375 	if (!intel_pstate_driver) {
1376 		mutex_unlock(&intel_pstate_driver_lock);
1377 		return -EAGAIN;
1378 	}
1379 
1380 	mutex_lock(&intel_pstate_limits_lock);
1381 
1382 	global.min_perf_pct = clamp_t(int, input,
1383 				      min_perf_pct_min(), global.max_perf_pct);
1384 
1385 	mutex_unlock(&intel_pstate_limits_lock);
1386 
1387 	if (intel_pstate_driver == &intel_pstate)
1388 		intel_pstate_update_policies();
1389 	else
1390 		update_qos_request(FREQ_QOS_MIN);
1391 
1392 	mutex_unlock(&intel_pstate_driver_lock);
1393 
1394 	return count;
1395 }
1396 
1397 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
1398 				struct kobj_attribute *attr, char *buf)
1399 {
1400 	return sprintf(buf, "%u\n", hwp_boost);
1401 }
1402 
1403 static ssize_t store_hwp_dynamic_boost(struct kobject *a,
1404 				       struct kobj_attribute *b,
1405 				       const char *buf, size_t count)
1406 {
1407 	unsigned int input;
1408 	int ret;
1409 
1410 	ret = kstrtouint(buf, 10, &input);
1411 	if (ret)
1412 		return ret;
1413 
1414 	mutex_lock(&intel_pstate_driver_lock);
1415 	hwp_boost = !!input;
1416 	intel_pstate_update_policies();
1417 	mutex_unlock(&intel_pstate_driver_lock);
1418 
1419 	return count;
1420 }
1421 
1422 static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr,
1423 				      char *buf)
1424 {
1425 	u64 power_ctl;
1426 	int enable;
1427 
1428 	rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
1429 	enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
1430 	return sprintf(buf, "%d\n", !enable);
1431 }
1432 
1433 static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b,
1434 				       const char *buf, size_t count)
1435 {
1436 	bool input;
1437 	int ret;
1438 
1439 	ret = kstrtobool(buf, &input);
1440 	if (ret)
1441 		return ret;
1442 
1443 	set_power_ctl_ee_state(input);
1444 
1445 	return count;
1446 }
1447 
1448 show_one(max_perf_pct, max_perf_pct);
1449 show_one(min_perf_pct, min_perf_pct);
1450 
1451 define_one_global_rw(status);
1452 define_one_global_rw(no_turbo);
1453 define_one_global_rw(max_perf_pct);
1454 define_one_global_rw(min_perf_pct);
1455 define_one_global_ro(turbo_pct);
1456 define_one_global_ro(num_pstates);
1457 define_one_global_rw(hwp_dynamic_boost);
1458 define_one_global_rw(energy_efficiency);
1459 
1460 static struct attribute *intel_pstate_attributes[] = {
1461 	&status.attr,
1462 	&no_turbo.attr,
1463 	NULL
1464 };
1465 
1466 static const struct attribute_group intel_pstate_attr_group = {
1467 	.attrs = intel_pstate_attributes,
1468 };
1469 
1470 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
1471 
1472 static struct kobject *intel_pstate_kobject;
1473 
1474 static void __init intel_pstate_sysfs_expose_params(void)
1475 {
1476 	int rc;
1477 
1478 	intel_pstate_kobject = kobject_create_and_add("intel_pstate",
1479 						&cpu_subsys.dev_root->kobj);
1480 	if (WARN_ON(!intel_pstate_kobject))
1481 		return;
1482 
1483 	rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
1484 	if (WARN_ON(rc))
1485 		return;
1486 
1487 	if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
1488 		rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr);
1489 		WARN_ON(rc);
1490 
1491 		rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr);
1492 		WARN_ON(rc);
1493 	}
1494 
1495 	/*
1496 	 * If per cpu limits are enforced there are no global limits, so
1497 	 * return without creating max/min_perf_pct attributes
1498 	 */
1499 	if (per_cpu_limits)
1500 		return;
1501 
1502 	rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
1503 	WARN_ON(rc);
1504 
1505 	rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
1506 	WARN_ON(rc);
1507 
1508 	if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
1509 		rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
1510 		WARN_ON(rc);
1511 	}
1512 }
1513 
1514 static void __init intel_pstate_sysfs_remove(void)
1515 {
1516 	if (!intel_pstate_kobject)
1517 		return;
1518 
1519 	sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group);
1520 
1521 	if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
1522 		sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr);
1523 		sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr);
1524 	}
1525 
1526 	if (!per_cpu_limits) {
1527 		sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr);
1528 		sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr);
1529 
1530 		if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids))
1531 			sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr);
1532 	}
1533 
1534 	kobject_put(intel_pstate_kobject);
1535 }
1536 
1537 static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
1538 {
1539 	int rc;
1540 
1541 	if (!hwp_active)
1542 		return;
1543 
1544 	rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
1545 	WARN_ON_ONCE(rc);
1546 }
1547 
1548 static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
1549 {
1550 	if (!hwp_active)
1551 		return;
1552 
1553 	sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
1554 }
1555 
1556 /************************** sysfs end ************************/
1557 
1558 static void intel_pstate_notify_work(struct work_struct *work)
1559 {
1560 	struct cpudata *cpudata =
1561 		container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
1562 	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
1563 
1564 	if (policy) {
1565 		intel_pstate_get_hwp_cap(cpudata);
1566 		__intel_pstate_update_max_freq(cpudata, policy);
1567 
1568 		cpufreq_cpu_release(policy);
1569 	}
1570 
1571 	wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
1572 }
1573 
1574 static DEFINE_SPINLOCK(hwp_notify_lock);
1575 static cpumask_t hwp_intr_enable_mask;
1576 
1577 void notify_hwp_interrupt(void)
1578 {
1579 	unsigned int this_cpu = smp_processor_id();
1580 	struct cpudata *cpudata;
1581 	unsigned long flags;
1582 	u64 value;
1583 
1584 	if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
1585 		return;
1586 
1587 	rdmsrl_safe(MSR_HWP_STATUS, &value);
1588 	if (!(value & 0x01))
1589 		return;
1590 
1591 	spin_lock_irqsave(&hwp_notify_lock, flags);
1592 
1593 	if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
1594 		goto ack_intr;
1595 
1596 	/*
1597 	 * Currently we never free all_cpu_data. And we can't reach here
1598 	 * without this allocated. But for safety for future changes, added
1599 	 * check.
1600 	 */
1601 	if (unlikely(!READ_ONCE(all_cpu_data)))
1602 		goto ack_intr;
1603 
1604 	/*
1605 	 * The free is done during cleanup, when cpufreq registry is failed.
1606 	 * We wouldn't be here if it fails on init or switch status. But for
1607 	 * future changes, added check.
1608 	 */
1609 	cpudata = READ_ONCE(all_cpu_data[this_cpu]);
1610 	if (unlikely(!cpudata))
1611 		goto ack_intr;
1612 
1613 	schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
1614 
1615 	spin_unlock_irqrestore(&hwp_notify_lock, flags);
1616 
1617 	return;
1618 
1619 ack_intr:
1620 	wrmsrl_safe(MSR_HWP_STATUS, 0);
1621 	spin_unlock_irqrestore(&hwp_notify_lock, flags);
1622 }
1623 
1624 static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
1625 {
1626 	unsigned long flags;
1627 
1628 	if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
1629 		return;
1630 
1631 	/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
1632 	wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1633 
1634 	spin_lock_irqsave(&hwp_notify_lock, flags);
1635 	if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
1636 		cancel_delayed_work(&cpudata->hwp_notify_work);
1637 	spin_unlock_irqrestore(&hwp_notify_lock, flags);
1638 }
1639 
1640 static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
1641 {
1642 	/* Enable HWP notification interrupt for guaranteed performance change */
1643 	if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
1644 		unsigned long flags;
1645 
1646 		spin_lock_irqsave(&hwp_notify_lock, flags);
1647 		INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
1648 		cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
1649 		spin_unlock_irqrestore(&hwp_notify_lock, flags);
1650 
1651 		/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
1652 		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
1653 		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
1654 	}
1655 }
1656 
1657 static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
1658 {
1659 	cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
1660 
1661 	/*
1662 	 * If this CPU gen doesn't call for change in balance_perf
1663 	 * EPP return.
1664 	 */
1665 	if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
1666 		return;
1667 
1668 	/*
1669 	 * If the EPP is set by firmware, which means that firmware enabled HWP
1670 	 * - Is equal or less than 0x80 (default balance_perf EPP)
1671 	 * - But less performance oriented than performance EPP
1672 	 *   then use this as new balance_perf EPP.
1673 	 */
1674 	if (hwp_forced && cpudata->epp_default <= HWP_EPP_BALANCE_PERFORMANCE &&
1675 	    cpudata->epp_default > HWP_EPP_PERFORMANCE) {
1676 		epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default;
1677 		return;
1678 	}
1679 
1680 	/*
1681 	 * Use hard coded value per gen to update the balance_perf
1682 	 * and default EPP.
1683 	 */
1684 	cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
1685 	intel_pstate_set_epp(cpudata, cpudata->epp_default);
1686 }
1687 
1688 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
1689 {
1690 	/* First disable HWP notification interrupt till we activate again */
1691 	if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
1692 		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1693 
1694 	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
1695 
1696 	intel_pstate_enable_hwp_interrupt(cpudata);
1697 
1698 	if (cpudata->epp_default >= 0)
1699 		return;
1700 
1701 	intel_pstate_update_epp_defaults(cpudata);
1702 }
1703 
1704 static int atom_get_min_pstate(int not_used)
1705 {
1706 	u64 value;
1707 
1708 	rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1709 	return (value >> 8) & 0x7F;
1710 }
1711 
1712 static int atom_get_max_pstate(int not_used)
1713 {
1714 	u64 value;
1715 
1716 	rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1717 	return (value >> 16) & 0x7F;
1718 }
1719 
1720 static int atom_get_turbo_pstate(int not_used)
1721 {
1722 	u64 value;
1723 
1724 	rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
1725 	return value & 0x7F;
1726 }
1727 
1728 static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1729 {
1730 	u64 val;
1731 	int32_t vid_fp;
1732 	u32 vid;
1733 
1734 	val = (u64)pstate << 8;
1735 	if (global.no_turbo && !global.turbo_disabled)
1736 		val |= (u64)1 << 32;
1737 
1738 	vid_fp = cpudata->vid.min + mul_fp(
1739 		int_tofp(pstate - cpudata->pstate.min_pstate),
1740 		cpudata->vid.ratio);
1741 
1742 	vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
1743 	vid = ceiling_fp(vid_fp);
1744 
1745 	if (pstate > cpudata->pstate.max_pstate)
1746 		vid = cpudata->vid.turbo;
1747 
1748 	return val | vid;
1749 }
1750 
1751 static int silvermont_get_scaling(void)
1752 {
1753 	u64 value;
1754 	int i;
1755 	/* Defined in Table 35-6 from SDM (Sept 2015) */
1756 	static int silvermont_freq_table[] = {
1757 		83300, 100000, 133300, 116700, 80000};
1758 
1759 	rdmsrl(MSR_FSB_FREQ, value);
1760 	i = value & 0x7;
1761 	WARN_ON(i > 4);
1762 
1763 	return silvermont_freq_table[i];
1764 }
1765 
1766 static int airmont_get_scaling(void)
1767 {
1768 	u64 value;
1769 	int i;
1770 	/* Defined in Table 35-10 from SDM (Sept 2015) */
1771 	static int airmont_freq_table[] = {
1772 		83300, 100000, 133300, 116700, 80000,
1773 		93300, 90000, 88900, 87500};
1774 
1775 	rdmsrl(MSR_FSB_FREQ, value);
1776 	i = value & 0xF;
1777 	WARN_ON(i > 8);
1778 
1779 	return airmont_freq_table[i];
1780 }
1781 
1782 static void atom_get_vid(struct cpudata *cpudata)
1783 {
1784 	u64 value;
1785 
1786 	rdmsrl(MSR_ATOM_CORE_VIDS, value);
1787 	cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
1788 	cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
1789 	cpudata->vid.ratio = div_fp(
1790 		cpudata->vid.max - cpudata->vid.min,
1791 		int_tofp(cpudata->pstate.max_pstate -
1792 			cpudata->pstate.min_pstate));
1793 
1794 	rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
1795 	cpudata->vid.turbo = value & 0x7f;
1796 }
1797 
1798 static int core_get_min_pstate(int cpu)
1799 {
1800 	u64 value;
1801 
1802 	rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
1803 	return (value >> 40) & 0xFF;
1804 }
1805 
1806 static int core_get_max_pstate_physical(int cpu)
1807 {
1808 	u64 value;
1809 
1810 	rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
1811 	return (value >> 8) & 0xFF;
1812 }
1813 
1814 static int core_get_tdp_ratio(int cpu, u64 plat_info)
1815 {
1816 	/* Check how many TDP levels present */
1817 	if (plat_info & 0x600000000) {
1818 		u64 tdp_ctrl;
1819 		u64 tdp_ratio;
1820 		int tdp_msr;
1821 		int err;
1822 
1823 		/* Get the TDP level (0, 1, 2) to get ratios */
1824 		err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
1825 		if (err)
1826 			return err;
1827 
1828 		/* TDP MSR are continuous starting at 0x648 */
1829 		tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
1830 		err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
1831 		if (err)
1832 			return err;
1833 
1834 		/* For level 1 and 2, bits[23:16] contain the ratio */
1835 		if (tdp_ctrl & 0x03)
1836 			tdp_ratio >>= 16;
1837 
1838 		tdp_ratio &= 0xff; /* ratios are only 8 bits long */
1839 		pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
1840 
1841 		return (int)tdp_ratio;
1842 	}
1843 
1844 	return -ENXIO;
1845 }
1846 
1847 static int core_get_max_pstate(int cpu)
1848 {
1849 	u64 tar;
1850 	u64 plat_info;
1851 	int max_pstate;
1852 	int tdp_ratio;
1853 	int err;
1854 
1855 	rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
1856 	max_pstate = (plat_info >> 8) & 0xFF;
1857 
1858 	tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
1859 	if (tdp_ratio <= 0)
1860 		return max_pstate;
1861 
1862 	if (hwp_active) {
1863 		/* Turbo activation ratio is not used on HWP platforms */
1864 		return tdp_ratio;
1865 	}
1866 
1867 	err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
1868 	if (!err) {
1869 		int tar_levels;
1870 
1871 		/* Do some sanity checking for safety */
1872 		tar_levels = tar & 0xff;
1873 		if (tdp_ratio - 1 == tar_levels) {
1874 			max_pstate = tar_levels;
1875 			pr_debug("max_pstate=TAC %x\n", max_pstate);
1876 		}
1877 	}
1878 
1879 	return max_pstate;
1880 }
1881 
1882 static int core_get_turbo_pstate(int cpu)
1883 {
1884 	u64 value;
1885 	int nont, ret;
1886 
1887 	rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
1888 	nont = core_get_max_pstate(cpu);
1889 	ret = (value) & 255;
1890 	if (ret <= nont)
1891 		ret = nont;
1892 	return ret;
1893 }
1894 
1895 static inline int core_get_scaling(void)
1896 {
1897 	return 100000;
1898 }
1899 
1900 static u64 core_get_val(struct cpudata *cpudata, int pstate)
1901 {
1902 	u64 val;
1903 
1904 	val = (u64)pstate << 8;
1905 	if (global.no_turbo && !global.turbo_disabled)
1906 		val |= (u64)1 << 32;
1907 
1908 	return val;
1909 }
1910 
1911 static int knl_get_aperf_mperf_shift(void)
1912 {
1913 	return 10;
1914 }
1915 
1916 static int knl_get_turbo_pstate(int cpu)
1917 {
1918 	u64 value;
1919 	int nont, ret;
1920 
1921 	rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
1922 	nont = core_get_max_pstate(cpu);
1923 	ret = (((value) >> 8) & 0xFF);
1924 	if (ret <= nont)
1925 		ret = nont;
1926 	return ret;
1927 }
1928 
1929 static void hybrid_get_type(void *data)
1930 {
1931 	u8 *cpu_type = data;
1932 
1933 	*cpu_type = get_this_hybrid_cpu_type();
1934 }
1935 
1936 static int hybrid_get_cpu_scaling(int cpu)
1937 {
1938 	u8 cpu_type = 0;
1939 
1940 	smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
1941 	/* P-cores have a smaller perf level-to-freqency scaling factor. */
1942 	if (cpu_type == 0x40)
1943 		return 78741;
1944 
1945 	return core_get_scaling();
1946 }
1947 
1948 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
1949 {
1950 	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
1951 	cpu->pstate.current_pstate = pstate;
1952 	/*
1953 	 * Generally, there is no guarantee that this code will always run on
1954 	 * the CPU being updated, so force the register update to run on the
1955 	 * right CPU.
1956 	 */
1957 	wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
1958 		      pstate_funcs.get_val(cpu, pstate));
1959 }
1960 
1961 static void intel_pstate_set_min_pstate(struct cpudata *cpu)
1962 {
1963 	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
1964 }
1965 
1966 static void intel_pstate_max_within_limits(struct cpudata *cpu)
1967 {
1968 	int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
1969 
1970 	update_turbo_state();
1971 	intel_pstate_set_pstate(cpu, pstate);
1972 }
1973 
1974 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1975 {
1976 	int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
1977 	int perf_ctl_scaling = pstate_funcs.get_scaling();
1978 
1979 	cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
1980 	cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
1981 	cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
1982 
1983 	if (hwp_active && !hwp_mode_bdw) {
1984 		__intel_pstate_get_hwp_cap(cpu);
1985 
1986 		if (pstate_funcs.get_cpu_scaling) {
1987 			cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
1988 			if (cpu->pstate.scaling != perf_ctl_scaling)
1989 				intel_pstate_hybrid_hwp_adjust(cpu);
1990 		} else {
1991 			cpu->pstate.scaling = perf_ctl_scaling;
1992 		}
1993 	} else {
1994 		cpu->pstate.scaling = perf_ctl_scaling;
1995 		cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
1996 		cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu);
1997 	}
1998 
1999 	if (cpu->pstate.scaling == perf_ctl_scaling) {
2000 		cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
2001 		cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling;
2002 		cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling;
2003 	}
2004 
2005 	if (pstate_funcs.get_aperf_mperf_shift)
2006 		cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
2007 
2008 	if (pstate_funcs.get_vid)
2009 		pstate_funcs.get_vid(cpu);
2010 
2011 	intel_pstate_set_min_pstate(cpu);
2012 }
2013 
2014 /*
2015  * Long hold time will keep high perf limits for long time,
2016  * which negatively impacts perf/watt for some workloads,
2017  * like specpower. 3ms is based on experiements on some
2018  * workoads.
2019  */
2020 static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
2021 
2022 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
2023 {
2024 	u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
2025 	u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
2026 	u32 max_limit = (hwp_req & 0xff00) >> 8;
2027 	u32 min_limit = (hwp_req & 0xff);
2028 	u32 boost_level1;
2029 
2030 	/*
2031 	 * Cases to consider (User changes via sysfs or boot time):
2032 	 * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
2033 	 *	No boost, return.
2034 	 * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
2035 	 *     Should result in one level boost only for P0.
2036 	 * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
2037 	 *     Should result in two level boost:
2038 	 *         (min + p1)/2 and P1.
2039 	 * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
2040 	 *     Should result in three level boost:
2041 	 *        (min + p1)/2, P1 and P0.
2042 	 */
2043 
2044 	/* If max and min are equal or already at max, nothing to boost */
2045 	if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
2046 		return;
2047 
2048 	if (!cpu->hwp_boost_min)
2049 		cpu->hwp_boost_min = min_limit;
2050 
2051 	/* level at half way mark between min and guranteed */
2052 	boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1;
2053 
2054 	if (cpu->hwp_boost_min < boost_level1)
2055 		cpu->hwp_boost_min = boost_level1;
2056 	else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap))
2057 		cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap);
2058 	else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) &&
2059 		 max_limit != HWP_GUARANTEED_PERF(hwp_cap))
2060 		cpu->hwp_boost_min = max_limit;
2061 	else
2062 		return;
2063 
2064 	hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
2065 	wrmsrl(MSR_HWP_REQUEST, hwp_req);
2066 	cpu->last_update = cpu->sample.time;
2067 }
2068 
2069 static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
2070 {
2071 	if (cpu->hwp_boost_min) {
2072 		bool expired;
2073 
2074 		/* Check if we are idle for hold time to boost down */
2075 		expired = time_after64(cpu->sample.time, cpu->last_update +
2076 				       hwp_boost_hold_time_ns);
2077 		if (expired) {
2078 			wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
2079 			cpu->hwp_boost_min = 0;
2080 		}
2081 	}
2082 	cpu->last_update = cpu->sample.time;
2083 }
2084 
2085 static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
2086 						      u64 time)
2087 {
2088 	cpu->sample.time = time;
2089 
2090 	if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
2091 		bool do_io = false;
2092 
2093 		cpu->sched_flags = 0;
2094 		/*
2095 		 * Set iowait_boost flag and update time. Since IO WAIT flag
2096 		 * is set all the time, we can't just conclude that there is
2097 		 * some IO bound activity is scheduled on this CPU with just
2098 		 * one occurrence. If we receive at least two in two
2099 		 * consecutive ticks, then we treat as boost candidate.
2100 		 */
2101 		if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
2102 			do_io = true;
2103 
2104 		cpu->last_io_update = time;
2105 
2106 		if (do_io)
2107 			intel_pstate_hwp_boost_up(cpu);
2108 
2109 	} else {
2110 		intel_pstate_hwp_boost_down(cpu);
2111 	}
2112 }
2113 
2114 static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
2115 						u64 time, unsigned int flags)
2116 {
2117 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
2118 
2119 	cpu->sched_flags |= flags;
2120 
2121 	if (smp_processor_id() == cpu->cpu)
2122 		intel_pstate_update_util_hwp_local(cpu, time);
2123 }
2124 
2125 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
2126 {
2127 	struct sample *sample = &cpu->sample;
2128 
2129 	sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
2130 }
2131 
2132 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
2133 {
2134 	u64 aperf, mperf;
2135 	unsigned long flags;
2136 	u64 tsc;
2137 
2138 	local_irq_save(flags);
2139 	rdmsrl(MSR_IA32_APERF, aperf);
2140 	rdmsrl(MSR_IA32_MPERF, mperf);
2141 	tsc = rdtsc();
2142 	if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
2143 		local_irq_restore(flags);
2144 		return false;
2145 	}
2146 	local_irq_restore(flags);
2147 
2148 	cpu->last_sample_time = cpu->sample.time;
2149 	cpu->sample.time = time;
2150 	cpu->sample.aperf = aperf;
2151 	cpu->sample.mperf = mperf;
2152 	cpu->sample.tsc =  tsc;
2153 	cpu->sample.aperf -= cpu->prev_aperf;
2154 	cpu->sample.mperf -= cpu->prev_mperf;
2155 	cpu->sample.tsc -= cpu->prev_tsc;
2156 
2157 	cpu->prev_aperf = aperf;
2158 	cpu->prev_mperf = mperf;
2159 	cpu->prev_tsc = tsc;
2160 	/*
2161 	 * First time this function is invoked in a given cycle, all of the
2162 	 * previous sample data fields are equal to zero or stale and they must
2163 	 * be populated with meaningful numbers for things to work, so assume
2164 	 * that sample.time will always be reset before setting the utilization
2165 	 * update hook and make the caller skip the sample then.
2166 	 */
2167 	if (cpu->last_sample_time) {
2168 		intel_pstate_calc_avg_perf(cpu);
2169 		return true;
2170 	}
2171 	return false;
2172 }
2173 
2174 static inline int32_t get_avg_frequency(struct cpudata *cpu)
2175 {
2176 	return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
2177 }
2178 
2179 static inline int32_t get_avg_pstate(struct cpudata *cpu)
2180 {
2181 	return mul_ext_fp(cpu->pstate.max_pstate_physical,
2182 			  cpu->sample.core_avg_perf);
2183 }
2184 
2185 static inline int32_t get_target_pstate(struct cpudata *cpu)
2186 {
2187 	struct sample *sample = &cpu->sample;
2188 	int32_t busy_frac;
2189 	int target, avg_pstate;
2190 
2191 	busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
2192 			   sample->tsc);
2193 
2194 	if (busy_frac < cpu->iowait_boost)
2195 		busy_frac = cpu->iowait_boost;
2196 
2197 	sample->busy_scaled = busy_frac * 100;
2198 
2199 	target = global.no_turbo || global.turbo_disabled ?
2200 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
2201 	target += target >> 2;
2202 	target = mul_fp(target, busy_frac);
2203 	if (target < cpu->pstate.min_pstate)
2204 		target = cpu->pstate.min_pstate;
2205 
2206 	/*
2207 	 * If the average P-state during the previous cycle was higher than the
2208 	 * current target, add 50% of the difference to the target to reduce
2209 	 * possible performance oscillations and offset possible performance
2210 	 * loss related to moving the workload from one CPU to another within
2211 	 * a package/module.
2212 	 */
2213 	avg_pstate = get_avg_pstate(cpu);
2214 	if (avg_pstate > target)
2215 		target += (avg_pstate - target) >> 1;
2216 
2217 	return target;
2218 }
2219 
2220 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
2221 {
2222 	int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
2223 	int max_pstate = max(min_pstate, cpu->max_perf_ratio);
2224 
2225 	return clamp_t(int, pstate, min_pstate, max_pstate);
2226 }
2227 
2228 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
2229 {
2230 	if (pstate == cpu->pstate.current_pstate)
2231 		return;
2232 
2233 	cpu->pstate.current_pstate = pstate;
2234 	wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
2235 }
2236 
2237 static void intel_pstate_adjust_pstate(struct cpudata *cpu)
2238 {
2239 	int from = cpu->pstate.current_pstate;
2240 	struct sample *sample;
2241 	int target_pstate;
2242 
2243 	update_turbo_state();
2244 
2245 	target_pstate = get_target_pstate(cpu);
2246 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2247 	trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
2248 	intel_pstate_update_pstate(cpu, target_pstate);
2249 
2250 	sample = &cpu->sample;
2251 	trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
2252 		fp_toint(sample->busy_scaled),
2253 		from,
2254 		cpu->pstate.current_pstate,
2255 		sample->mperf,
2256 		sample->aperf,
2257 		sample->tsc,
2258 		get_avg_frequency(cpu),
2259 		fp_toint(cpu->iowait_boost * 100));
2260 }
2261 
2262 static void intel_pstate_update_util(struct update_util_data *data, u64 time,
2263 				     unsigned int flags)
2264 {
2265 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
2266 	u64 delta_ns;
2267 
2268 	/* Don't allow remote callbacks */
2269 	if (smp_processor_id() != cpu->cpu)
2270 		return;
2271 
2272 	delta_ns = time - cpu->last_update;
2273 	if (flags & SCHED_CPUFREQ_IOWAIT) {
2274 		/* Start over if the CPU may have been idle. */
2275 		if (delta_ns > TICK_NSEC) {
2276 			cpu->iowait_boost = ONE_EIGHTH_FP;
2277 		} else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
2278 			cpu->iowait_boost <<= 1;
2279 			if (cpu->iowait_boost > int_tofp(1))
2280 				cpu->iowait_boost = int_tofp(1);
2281 		} else {
2282 			cpu->iowait_boost = ONE_EIGHTH_FP;
2283 		}
2284 	} else if (cpu->iowait_boost) {
2285 		/* Clear iowait_boost if the CPU may have been idle. */
2286 		if (delta_ns > TICK_NSEC)
2287 			cpu->iowait_boost = 0;
2288 		else
2289 			cpu->iowait_boost >>= 1;
2290 	}
2291 	cpu->last_update = time;
2292 	delta_ns = time - cpu->sample.time;
2293 	if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
2294 		return;
2295 
2296 	if (intel_pstate_sample(cpu, time))
2297 		intel_pstate_adjust_pstate(cpu);
2298 }
2299 
2300 static struct pstate_funcs core_funcs = {
2301 	.get_max = core_get_max_pstate,
2302 	.get_max_physical = core_get_max_pstate_physical,
2303 	.get_min = core_get_min_pstate,
2304 	.get_turbo = core_get_turbo_pstate,
2305 	.get_scaling = core_get_scaling,
2306 	.get_val = core_get_val,
2307 };
2308 
2309 static const struct pstate_funcs silvermont_funcs = {
2310 	.get_max = atom_get_max_pstate,
2311 	.get_max_physical = atom_get_max_pstate,
2312 	.get_min = atom_get_min_pstate,
2313 	.get_turbo = atom_get_turbo_pstate,
2314 	.get_val = atom_get_val,
2315 	.get_scaling = silvermont_get_scaling,
2316 	.get_vid = atom_get_vid,
2317 };
2318 
2319 static const struct pstate_funcs airmont_funcs = {
2320 	.get_max = atom_get_max_pstate,
2321 	.get_max_physical = atom_get_max_pstate,
2322 	.get_min = atom_get_min_pstate,
2323 	.get_turbo = atom_get_turbo_pstate,
2324 	.get_val = atom_get_val,
2325 	.get_scaling = airmont_get_scaling,
2326 	.get_vid = atom_get_vid,
2327 };
2328 
2329 static const struct pstate_funcs knl_funcs = {
2330 	.get_max = core_get_max_pstate,
2331 	.get_max_physical = core_get_max_pstate_physical,
2332 	.get_min = core_get_min_pstate,
2333 	.get_turbo = knl_get_turbo_pstate,
2334 	.get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
2335 	.get_scaling = core_get_scaling,
2336 	.get_val = core_get_val,
2337 };
2338 
2339 #define X86_MATCH(model, policy)					 \
2340 	X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
2341 					   X86_FEATURE_APERFMPERF, &policy)
2342 
2343 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
2344 	X86_MATCH(SANDYBRIDGE,		core_funcs),
2345 	X86_MATCH(SANDYBRIDGE_X,	core_funcs),
2346 	X86_MATCH(ATOM_SILVERMONT,	silvermont_funcs),
2347 	X86_MATCH(IVYBRIDGE,		core_funcs),
2348 	X86_MATCH(HASWELL,		core_funcs),
2349 	X86_MATCH(BROADWELL,		core_funcs),
2350 	X86_MATCH(IVYBRIDGE_X,		core_funcs),
2351 	X86_MATCH(HASWELL_X,		core_funcs),
2352 	X86_MATCH(HASWELL_L,		core_funcs),
2353 	X86_MATCH(HASWELL_G,		core_funcs),
2354 	X86_MATCH(BROADWELL_G,		core_funcs),
2355 	X86_MATCH(ATOM_AIRMONT,		airmont_funcs),
2356 	X86_MATCH(SKYLAKE_L,		core_funcs),
2357 	X86_MATCH(BROADWELL_X,		core_funcs),
2358 	X86_MATCH(SKYLAKE,		core_funcs),
2359 	X86_MATCH(BROADWELL_D,		core_funcs),
2360 	X86_MATCH(XEON_PHI_KNL,		knl_funcs),
2361 	X86_MATCH(XEON_PHI_KNM,		knl_funcs),
2362 	X86_MATCH(ATOM_GOLDMONT,	core_funcs),
2363 	X86_MATCH(ATOM_GOLDMONT_PLUS,	core_funcs),
2364 	X86_MATCH(SKYLAKE_X,		core_funcs),
2365 	X86_MATCH(COMETLAKE,		core_funcs),
2366 	X86_MATCH(ICELAKE_X,		core_funcs),
2367 	X86_MATCH(TIGERLAKE,		core_funcs),
2368 	X86_MATCH(SAPPHIRERAPIDS_X,	core_funcs),
2369 	{}
2370 };
2371 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
2372 
2373 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
2374 	X86_MATCH(BROADWELL_D,		core_funcs),
2375 	X86_MATCH(BROADWELL_X,		core_funcs),
2376 	X86_MATCH(SKYLAKE_X,		core_funcs),
2377 	X86_MATCH(ICELAKE_X,		core_funcs),
2378 	X86_MATCH(SAPPHIRERAPIDS_X,	core_funcs),
2379 	{}
2380 };
2381 
2382 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
2383 	X86_MATCH(KABYLAKE,		core_funcs),
2384 	{}
2385 };
2386 
2387 static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
2388 	X86_MATCH(SKYLAKE_X,		core_funcs),
2389 	X86_MATCH(SKYLAKE,		core_funcs),
2390 	{}
2391 };
2392 
2393 static int intel_pstate_init_cpu(unsigned int cpunum)
2394 {
2395 	struct cpudata *cpu;
2396 
2397 	cpu = all_cpu_data[cpunum];
2398 
2399 	if (!cpu) {
2400 		cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
2401 		if (!cpu)
2402 			return -ENOMEM;
2403 
2404 		WRITE_ONCE(all_cpu_data[cpunum], cpu);
2405 
2406 		cpu->cpu = cpunum;
2407 
2408 		cpu->epp_default = -EINVAL;
2409 
2410 		if (hwp_active) {
2411 			const struct x86_cpu_id *id;
2412 
2413 			intel_pstate_hwp_enable(cpu);
2414 
2415 			id = x86_match_cpu(intel_pstate_hwp_boost_ids);
2416 			if (id && intel_pstate_acpi_pm_profile_server())
2417 				hwp_boost = true;
2418 		}
2419 	} else if (hwp_active) {
2420 		/*
2421 		 * Re-enable HWP in case this happens after a resume from ACPI
2422 		 * S3 if the CPU was offline during the whole system/resume
2423 		 * cycle.
2424 		 */
2425 		intel_pstate_hwp_reenable(cpu);
2426 	}
2427 
2428 	cpu->epp_powersave = -EINVAL;
2429 	cpu->epp_policy = 0;
2430 
2431 	intel_pstate_get_cpu_pstates(cpu);
2432 
2433 	pr_debug("controlling: cpu %d\n", cpunum);
2434 
2435 	return 0;
2436 }
2437 
2438 static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
2439 {
2440 	struct cpudata *cpu = all_cpu_data[cpu_num];
2441 
2442 	if (hwp_active && !hwp_boost)
2443 		return;
2444 
2445 	if (cpu->update_util_set)
2446 		return;
2447 
2448 	/* Prevent intel_pstate_update_util() from using stale data. */
2449 	cpu->sample.time = 0;
2450 	cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
2451 				     (hwp_active ?
2452 				      intel_pstate_update_util_hwp :
2453 				      intel_pstate_update_util));
2454 	cpu->update_util_set = true;
2455 }
2456 
2457 static void intel_pstate_clear_update_util_hook(unsigned int cpu)
2458 {
2459 	struct cpudata *cpu_data = all_cpu_data[cpu];
2460 
2461 	if (!cpu_data->update_util_set)
2462 		return;
2463 
2464 	cpufreq_remove_update_util_hook(cpu);
2465 	cpu_data->update_util_set = false;
2466 	synchronize_rcu();
2467 }
2468 
2469 static int intel_pstate_get_max_freq(struct cpudata *cpu)
2470 {
2471 	return global.turbo_disabled || global.no_turbo ?
2472 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2473 }
2474 
2475 static void intel_pstate_update_perf_limits(struct cpudata *cpu,
2476 					    unsigned int policy_min,
2477 					    unsigned int policy_max)
2478 {
2479 	int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
2480 	int32_t max_policy_perf, min_policy_perf;
2481 
2482 	max_policy_perf = policy_max / perf_ctl_scaling;
2483 	if (policy_max == policy_min) {
2484 		min_policy_perf = max_policy_perf;
2485 	} else {
2486 		min_policy_perf = policy_min / perf_ctl_scaling;
2487 		min_policy_perf = clamp_t(int32_t, min_policy_perf,
2488 					  0, max_policy_perf);
2489 	}
2490 
2491 	/*
2492 	 * HWP needs some special consideration, because HWP_REQUEST uses
2493 	 * abstract values to represent performance rather than pure ratios.
2494 	 */
2495 	if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
2496 		int scaling = cpu->pstate.scaling;
2497 		int freq;
2498 
2499 		freq = max_policy_perf * perf_ctl_scaling;
2500 		max_policy_perf = DIV_ROUND_UP(freq, scaling);
2501 		freq = min_policy_perf * perf_ctl_scaling;
2502 		min_policy_perf = DIV_ROUND_UP(freq, scaling);
2503 	}
2504 
2505 	pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
2506 		 cpu->cpu, min_policy_perf, max_policy_perf);
2507 
2508 	/* Normalize user input to [min_perf, max_perf] */
2509 	if (per_cpu_limits) {
2510 		cpu->min_perf_ratio = min_policy_perf;
2511 		cpu->max_perf_ratio = max_policy_perf;
2512 	} else {
2513 		int turbo_max = cpu->pstate.turbo_pstate;
2514 		int32_t global_min, global_max;
2515 
2516 		/* Global limits are in percent of the maximum turbo P-state. */
2517 		global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
2518 		global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
2519 		global_min = clamp_t(int32_t, global_min, 0, global_max);
2520 
2521 		pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
2522 			 global_min, global_max);
2523 
2524 		cpu->min_perf_ratio = max(min_policy_perf, global_min);
2525 		cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
2526 		cpu->max_perf_ratio = min(max_policy_perf, global_max);
2527 		cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
2528 
2529 		/* Make sure min_perf <= max_perf */
2530 		cpu->min_perf_ratio = min(cpu->min_perf_ratio,
2531 					  cpu->max_perf_ratio);
2532 
2533 	}
2534 	pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
2535 		 cpu->max_perf_ratio,
2536 		 cpu->min_perf_ratio);
2537 }
2538 
2539 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2540 {
2541 	struct cpudata *cpu;
2542 
2543 	if (!policy->cpuinfo.max_freq)
2544 		return -ENODEV;
2545 
2546 	pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
2547 		 policy->cpuinfo.max_freq, policy->max);
2548 
2549 	cpu = all_cpu_data[policy->cpu];
2550 	cpu->policy = policy->policy;
2551 
2552 	mutex_lock(&intel_pstate_limits_lock);
2553 
2554 	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
2555 
2556 	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
2557 		/*
2558 		 * NOHZ_FULL CPUs need this as the governor callback may not
2559 		 * be invoked on them.
2560 		 */
2561 		intel_pstate_clear_update_util_hook(policy->cpu);
2562 		intel_pstate_max_within_limits(cpu);
2563 	} else {
2564 		intel_pstate_set_update_util_hook(policy->cpu);
2565 	}
2566 
2567 	if (hwp_active) {
2568 		/*
2569 		 * When hwp_boost was active before and dynamically it
2570 		 * was turned off, in that case we need to clear the
2571 		 * update util hook.
2572 		 */
2573 		if (!hwp_boost)
2574 			intel_pstate_clear_update_util_hook(policy->cpu);
2575 		intel_pstate_hwp_set(policy->cpu);
2576 	}
2577 
2578 	mutex_unlock(&intel_pstate_limits_lock);
2579 
2580 	return 0;
2581 }
2582 
2583 static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
2584 					   struct cpufreq_policy_data *policy)
2585 {
2586 	if (!hwp_active &&
2587 	    cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
2588 	    policy->max < policy->cpuinfo.max_freq &&
2589 	    policy->max > cpu->pstate.max_freq) {
2590 		pr_debug("policy->max > max non turbo frequency\n");
2591 		policy->max = policy->cpuinfo.max_freq;
2592 	}
2593 }
2594 
2595 static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
2596 					   struct cpufreq_policy_data *policy)
2597 {
2598 	int max_freq;
2599 
2600 	update_turbo_state();
2601 	if (hwp_active) {
2602 		intel_pstate_get_hwp_cap(cpu);
2603 		max_freq = global.no_turbo || global.turbo_disabled ?
2604 				cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2605 	} else {
2606 		max_freq = intel_pstate_get_max_freq(cpu);
2607 	}
2608 	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
2609 
2610 	intel_pstate_adjust_policy_max(cpu, policy);
2611 }
2612 
2613 static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
2614 {
2615 	intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy);
2616 
2617 	return 0;
2618 }
2619 
2620 static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy)
2621 {
2622 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2623 
2624 	pr_debug("CPU %d going offline\n", cpu->cpu);
2625 
2626 	if (cpu->suspended)
2627 		return 0;
2628 
2629 	/*
2630 	 * If the CPU is an SMT thread and it goes offline with the performance
2631 	 * settings different from the minimum, it will prevent its sibling
2632 	 * from getting to lower performance levels, so force the minimum
2633 	 * performance on CPU offline to prevent that from happening.
2634 	 */
2635 	if (hwp_active)
2636 		intel_pstate_hwp_offline(cpu);
2637 	else
2638 		intel_pstate_set_min_pstate(cpu);
2639 
2640 	intel_pstate_exit_perf_limits(policy);
2641 
2642 	return 0;
2643 }
2644 
2645 static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
2646 {
2647 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2648 
2649 	pr_debug("CPU %d going online\n", cpu->cpu);
2650 
2651 	intel_pstate_init_acpi_perf_limits(policy);
2652 
2653 	if (hwp_active) {
2654 		/*
2655 		 * Re-enable HWP and clear the "suspended" flag to let "resume"
2656 		 * know that it need not do that.
2657 		 */
2658 		intel_pstate_hwp_reenable(cpu);
2659 		cpu->suspended = false;
2660 	}
2661 
2662 	return 0;
2663 }
2664 
2665 static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
2666 {
2667 	intel_pstate_clear_update_util_hook(policy->cpu);
2668 
2669 	return intel_cpufreq_cpu_offline(policy);
2670 }
2671 
2672 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
2673 {
2674 	pr_debug("CPU %d exiting\n", policy->cpu);
2675 
2676 	policy->fast_switch_possible = false;
2677 
2678 	return 0;
2679 }
2680 
2681 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2682 {
2683 	struct cpudata *cpu;
2684 	int rc;
2685 
2686 	rc = intel_pstate_init_cpu(policy->cpu);
2687 	if (rc)
2688 		return rc;
2689 
2690 	cpu = all_cpu_data[policy->cpu];
2691 
2692 	cpu->max_perf_ratio = 0xFF;
2693 	cpu->min_perf_ratio = 0;
2694 
2695 	/* cpuinfo and default policy values */
2696 	policy->cpuinfo.min_freq = cpu->pstate.min_freq;
2697 	update_turbo_state();
2698 	global.turbo_disabled_mf = global.turbo_disabled;
2699 	policy->cpuinfo.max_freq = global.turbo_disabled ?
2700 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2701 
2702 	policy->min = policy->cpuinfo.min_freq;
2703 	policy->max = policy->cpuinfo.max_freq;
2704 
2705 	intel_pstate_init_acpi_perf_limits(policy);
2706 
2707 	policy->fast_switch_possible = true;
2708 
2709 	return 0;
2710 }
2711 
2712 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2713 {
2714 	int ret = __intel_pstate_cpu_init(policy);
2715 
2716 	if (ret)
2717 		return ret;
2718 
2719 	/*
2720 	 * Set the policy to powersave to provide a valid fallback value in case
2721 	 * the default cpufreq governor is neither powersave nor performance.
2722 	 */
2723 	policy->policy = CPUFREQ_POLICY_POWERSAVE;
2724 
2725 	if (hwp_active) {
2726 		struct cpudata *cpu = all_cpu_data[policy->cpu];
2727 
2728 		cpu->epp_cached = intel_pstate_get_epp(cpu, 0);
2729 	}
2730 
2731 	return 0;
2732 }
2733 
2734 static struct cpufreq_driver intel_pstate = {
2735 	.flags		= CPUFREQ_CONST_LOOPS,
2736 	.verify		= intel_pstate_verify_policy,
2737 	.setpolicy	= intel_pstate_set_policy,
2738 	.suspend	= intel_pstate_suspend,
2739 	.resume		= intel_pstate_resume,
2740 	.init		= intel_pstate_cpu_init,
2741 	.exit		= intel_pstate_cpu_exit,
2742 	.offline	= intel_pstate_cpu_offline,
2743 	.online		= intel_pstate_cpu_online,
2744 	.update_limits	= intel_pstate_update_limits,
2745 	.name		= "intel_pstate",
2746 };
2747 
2748 static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
2749 {
2750 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2751 
2752 	intel_pstate_verify_cpu_policy(cpu, policy);
2753 	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
2754 
2755 	return 0;
2756 }
2757 
2758 /* Use of trace in passive mode:
2759  *
2760  * In passive mode the trace core_busy field (also known as the
2761  * performance field, and lablelled as such on the graphs; also known as
2762  * core_avg_perf) is not needed and so is re-assigned to indicate if the
2763  * driver call was via the normal or fast switch path. Various graphs
2764  * output from the intel_pstate_tracer.py utility that include core_busy
2765  * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
2766  * so we use 10 to indicate the normal path through the driver, and
2767  * 90 to indicate the fast switch path through the driver.
2768  * The scaled_busy field is not used, and is set to 0.
2769  */
2770 
2771 #define	INTEL_PSTATE_TRACE_TARGET 10
2772 #define	INTEL_PSTATE_TRACE_FAST_SWITCH 90
2773 
2774 static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
2775 {
2776 	struct sample *sample;
2777 
2778 	if (!trace_pstate_sample_enabled())
2779 		return;
2780 
2781 	if (!intel_pstate_sample(cpu, ktime_get()))
2782 		return;
2783 
2784 	sample = &cpu->sample;
2785 	trace_pstate_sample(trace_type,
2786 		0,
2787 		old_pstate,
2788 		cpu->pstate.current_pstate,
2789 		sample->mperf,
2790 		sample->aperf,
2791 		sample->tsc,
2792 		get_avg_frequency(cpu),
2793 		fp_toint(cpu->iowait_boost * 100));
2794 }
2795 
2796 static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
2797 				     u32 desired, bool fast_switch)
2798 {
2799 	u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
2800 
2801 	value &= ~HWP_MIN_PERF(~0L);
2802 	value |= HWP_MIN_PERF(min);
2803 
2804 	value &= ~HWP_MAX_PERF(~0L);
2805 	value |= HWP_MAX_PERF(max);
2806 
2807 	value &= ~HWP_DESIRED_PERF(~0L);
2808 	value |= HWP_DESIRED_PERF(desired);
2809 
2810 	if (value == prev)
2811 		return;
2812 
2813 	WRITE_ONCE(cpu->hwp_req_cached, value);
2814 	if (fast_switch)
2815 		wrmsrl(MSR_HWP_REQUEST, value);
2816 	else
2817 		wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
2818 }
2819 
2820 static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
2821 					  u32 target_pstate, bool fast_switch)
2822 {
2823 	if (fast_switch)
2824 		wrmsrl(MSR_IA32_PERF_CTL,
2825 		       pstate_funcs.get_val(cpu, target_pstate));
2826 	else
2827 		wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
2828 			      pstate_funcs.get_val(cpu, target_pstate));
2829 }
2830 
2831 static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
2832 				       int target_pstate, bool fast_switch)
2833 {
2834 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2835 	int old_pstate = cpu->pstate.current_pstate;
2836 
2837 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2838 	if (hwp_active) {
2839 		int max_pstate = policy->strict_target ?
2840 					target_pstate : cpu->max_perf_ratio;
2841 
2842 		intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
2843 					 fast_switch);
2844 	} else if (target_pstate != old_pstate) {
2845 		intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
2846 	}
2847 
2848 	cpu->pstate.current_pstate = target_pstate;
2849 
2850 	intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
2851 			    INTEL_PSTATE_TRACE_TARGET, old_pstate);
2852 
2853 	return target_pstate;
2854 }
2855 
2856 static int intel_cpufreq_target(struct cpufreq_policy *policy,
2857 				unsigned int target_freq,
2858 				unsigned int relation)
2859 {
2860 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2861 	struct cpufreq_freqs freqs;
2862 	int target_pstate;
2863 
2864 	update_turbo_state();
2865 
2866 	freqs.old = policy->cur;
2867 	freqs.new = target_freq;
2868 
2869 	cpufreq_freq_transition_begin(policy, &freqs);
2870 
2871 	switch (relation) {
2872 	case CPUFREQ_RELATION_L:
2873 		target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
2874 		break;
2875 	case CPUFREQ_RELATION_H:
2876 		target_pstate = freqs.new / cpu->pstate.scaling;
2877 		break;
2878 	default:
2879 		target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
2880 		break;
2881 	}
2882 
2883 	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
2884 
2885 	freqs.new = target_pstate * cpu->pstate.scaling;
2886 
2887 	cpufreq_freq_transition_end(policy, &freqs, false);
2888 
2889 	return 0;
2890 }
2891 
2892 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2893 					      unsigned int target_freq)
2894 {
2895 	struct cpudata *cpu = all_cpu_data[policy->cpu];
2896 	int target_pstate;
2897 
2898 	update_turbo_state();
2899 
2900 	target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
2901 
2902 	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
2903 
2904 	return target_pstate * cpu->pstate.scaling;
2905 }
2906 
2907 static void intel_cpufreq_adjust_perf(unsigned int cpunum,
2908 				      unsigned long min_perf,
2909 				      unsigned long target_perf,
2910 				      unsigned long capacity)
2911 {
2912 	struct cpudata *cpu = all_cpu_data[cpunum];
2913 	u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
2914 	int old_pstate = cpu->pstate.current_pstate;
2915 	int cap_pstate, min_pstate, max_pstate, target_pstate;
2916 
2917 	update_turbo_state();
2918 	cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
2919 					     HWP_HIGHEST_PERF(hwp_cap);
2920 
2921 	/* Optimization: Avoid unnecessary divisions. */
2922 
2923 	target_pstate = cap_pstate;
2924 	if (target_perf < capacity)
2925 		target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
2926 
2927 	min_pstate = cap_pstate;
2928 	if (min_perf < capacity)
2929 		min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
2930 
2931 	if (min_pstate < cpu->pstate.min_pstate)
2932 		min_pstate = cpu->pstate.min_pstate;
2933 
2934 	if (min_pstate < cpu->min_perf_ratio)
2935 		min_pstate = cpu->min_perf_ratio;
2936 
2937 	max_pstate = min(cap_pstate, cpu->max_perf_ratio);
2938 	if (max_pstate < min_pstate)
2939 		max_pstate = min_pstate;
2940 
2941 	target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
2942 
2943 	intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true);
2944 
2945 	cpu->pstate.current_pstate = target_pstate;
2946 	intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
2947 }
2948 
2949 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
2950 {
2951 	struct freq_qos_request *req;
2952 	struct cpudata *cpu;
2953 	struct device *dev;
2954 	int ret, freq;
2955 
2956 	dev = get_cpu_device(policy->cpu);
2957 	if (!dev)
2958 		return -ENODEV;
2959 
2960 	ret = __intel_pstate_cpu_init(policy);
2961 	if (ret)
2962 		return ret;
2963 
2964 	policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
2965 	/* This reflects the intel_pstate_get_cpu_pstates() setting. */
2966 	policy->cur = policy->cpuinfo.min_freq;
2967 
2968 	req = kcalloc(2, sizeof(*req), GFP_KERNEL);
2969 	if (!req) {
2970 		ret = -ENOMEM;
2971 		goto pstate_exit;
2972 	}
2973 
2974 	cpu = all_cpu_data[policy->cpu];
2975 
2976 	if (hwp_active) {
2977 		u64 value;
2978 
2979 		policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
2980 
2981 		intel_pstate_get_hwp_cap(cpu);
2982 
2983 		rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
2984 		WRITE_ONCE(cpu->hwp_req_cached, value);
2985 
2986 		cpu->epp_cached = intel_pstate_get_epp(cpu, value);
2987 	} else {
2988 		policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
2989 	}
2990 
2991 	freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100);
2992 
2993 	ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
2994 				   freq);
2995 	if (ret < 0) {
2996 		dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
2997 		goto free_req;
2998 	}
2999 
3000 	freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100);
3001 
3002 	ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
3003 				   freq);
3004 	if (ret < 0) {
3005 		dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
3006 		goto remove_min_req;
3007 	}
3008 
3009 	policy->driver_data = req;
3010 
3011 	return 0;
3012 
3013 remove_min_req:
3014 	freq_qos_remove_request(req);
3015 free_req:
3016 	kfree(req);
3017 pstate_exit:
3018 	intel_pstate_exit_perf_limits(policy);
3019 
3020 	return ret;
3021 }
3022 
3023 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
3024 {
3025 	struct freq_qos_request *req;
3026 
3027 	req = policy->driver_data;
3028 
3029 	freq_qos_remove_request(req + 1);
3030 	freq_qos_remove_request(req);
3031 	kfree(req);
3032 
3033 	return intel_pstate_cpu_exit(policy);
3034 }
3035 
3036 static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
3037 {
3038 	intel_pstate_suspend(policy);
3039 
3040 	if (hwp_active) {
3041 		struct cpudata *cpu = all_cpu_data[policy->cpu];
3042 		u64 value = READ_ONCE(cpu->hwp_req_cached);
3043 
3044 		/*
3045 		 * Clear the desired perf field in MSR_HWP_REQUEST in case
3046 		 * intel_cpufreq_adjust_perf() is in use and the last value
3047 		 * written by it may not be suitable.
3048 		 */
3049 		value &= ~HWP_DESIRED_PERF(~0L);
3050 		wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
3051 		WRITE_ONCE(cpu->hwp_req_cached, value);
3052 	}
3053 
3054 	return 0;
3055 }
3056 
3057 static struct cpufreq_driver intel_cpufreq = {
3058 	.flags		= CPUFREQ_CONST_LOOPS,
3059 	.verify		= intel_cpufreq_verify_policy,
3060 	.target		= intel_cpufreq_target,
3061 	.fast_switch	= intel_cpufreq_fast_switch,
3062 	.init		= intel_cpufreq_cpu_init,
3063 	.exit		= intel_cpufreq_cpu_exit,
3064 	.offline	= intel_cpufreq_cpu_offline,
3065 	.online		= intel_pstate_cpu_online,
3066 	.suspend	= intel_cpufreq_suspend,
3067 	.resume		= intel_pstate_resume,
3068 	.update_limits	= intel_pstate_update_limits,
3069 	.name		= "intel_cpufreq",
3070 };
3071 
3072 static struct cpufreq_driver *default_driver;
3073 
3074 static void intel_pstate_driver_cleanup(void)
3075 {
3076 	unsigned int cpu;
3077 
3078 	cpus_read_lock();
3079 	for_each_online_cpu(cpu) {
3080 		if (all_cpu_data[cpu]) {
3081 			if (intel_pstate_driver == &intel_pstate)
3082 				intel_pstate_clear_update_util_hook(cpu);
3083 
3084 			spin_lock(&hwp_notify_lock);
3085 			kfree(all_cpu_data[cpu]);
3086 			WRITE_ONCE(all_cpu_data[cpu], NULL);
3087 			spin_unlock(&hwp_notify_lock);
3088 		}
3089 	}
3090 	cpus_read_unlock();
3091 
3092 	intel_pstate_driver = NULL;
3093 }
3094 
3095 static int intel_pstate_register_driver(struct cpufreq_driver *driver)
3096 {
3097 	int ret;
3098 
3099 	if (driver == &intel_pstate)
3100 		intel_pstate_sysfs_expose_hwp_dynamic_boost();
3101 
3102 	memset(&global, 0, sizeof(global));
3103 	global.max_perf_pct = 100;
3104 
3105 	intel_pstate_driver = driver;
3106 	ret = cpufreq_register_driver(intel_pstate_driver);
3107 	if (ret) {
3108 		intel_pstate_driver_cleanup();
3109 		return ret;
3110 	}
3111 
3112 	global.min_perf_pct = min_perf_pct_min();
3113 
3114 	return 0;
3115 }
3116 
3117 static ssize_t intel_pstate_show_status(char *buf)
3118 {
3119 	if (!intel_pstate_driver)
3120 		return sprintf(buf, "off\n");
3121 
3122 	return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
3123 					"active" : "passive");
3124 }
3125 
3126 static int intel_pstate_update_status(const char *buf, size_t size)
3127 {
3128 	if (size == 3 && !strncmp(buf, "off", size)) {
3129 		if (!intel_pstate_driver)
3130 			return -EINVAL;
3131 
3132 		if (hwp_active)
3133 			return -EBUSY;
3134 
3135 		cpufreq_unregister_driver(intel_pstate_driver);
3136 		intel_pstate_driver_cleanup();
3137 		return 0;
3138 	}
3139 
3140 	if (size == 6 && !strncmp(buf, "active", size)) {
3141 		if (intel_pstate_driver) {
3142 			if (intel_pstate_driver == &intel_pstate)
3143 				return 0;
3144 
3145 			cpufreq_unregister_driver(intel_pstate_driver);
3146 		}
3147 
3148 		return intel_pstate_register_driver(&intel_pstate);
3149 	}
3150 
3151 	if (size == 7 && !strncmp(buf, "passive", size)) {
3152 		if (intel_pstate_driver) {
3153 			if (intel_pstate_driver == &intel_cpufreq)
3154 				return 0;
3155 
3156 			cpufreq_unregister_driver(intel_pstate_driver);
3157 			intel_pstate_sysfs_hide_hwp_dynamic_boost();
3158 		}
3159 
3160 		return intel_pstate_register_driver(&intel_cpufreq);
3161 	}
3162 
3163 	return -EINVAL;
3164 }
3165 
3166 static int no_load __initdata;
3167 static int no_hwp __initdata;
3168 static int hwp_only __initdata;
3169 static unsigned int force_load __initdata;
3170 
3171 static int __init intel_pstate_msrs_not_valid(void)
3172 {
3173 	if (!pstate_funcs.get_max(0) ||
3174 	    !pstate_funcs.get_min(0) ||
3175 	    !pstate_funcs.get_turbo(0))
3176 		return -ENODEV;
3177 
3178 	return 0;
3179 }
3180 
3181 static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
3182 {
3183 	pstate_funcs.get_max   = funcs->get_max;
3184 	pstate_funcs.get_max_physical = funcs->get_max_physical;
3185 	pstate_funcs.get_min   = funcs->get_min;
3186 	pstate_funcs.get_turbo = funcs->get_turbo;
3187 	pstate_funcs.get_scaling = funcs->get_scaling;
3188 	pstate_funcs.get_val   = funcs->get_val;
3189 	pstate_funcs.get_vid   = funcs->get_vid;
3190 	pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
3191 }
3192 
3193 #ifdef CONFIG_ACPI
3194 
3195 static bool __init intel_pstate_no_acpi_pss(void)
3196 {
3197 	int i;
3198 
3199 	for_each_possible_cpu(i) {
3200 		acpi_status status;
3201 		union acpi_object *pss;
3202 		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
3203 		struct acpi_processor *pr = per_cpu(processors, i);
3204 
3205 		if (!pr)
3206 			continue;
3207 
3208 		status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
3209 		if (ACPI_FAILURE(status))
3210 			continue;
3211 
3212 		pss = buffer.pointer;
3213 		if (pss && pss->type == ACPI_TYPE_PACKAGE) {
3214 			kfree(pss);
3215 			return false;
3216 		}
3217 
3218 		kfree(pss);
3219 	}
3220 
3221 	pr_debug("ACPI _PSS not found\n");
3222 	return true;
3223 }
3224 
3225 static bool __init intel_pstate_no_acpi_pcch(void)
3226 {
3227 	acpi_status status;
3228 	acpi_handle handle;
3229 
3230 	status = acpi_get_handle(NULL, "\\_SB", &handle);
3231 	if (ACPI_FAILURE(status))
3232 		goto not_found;
3233 
3234 	if (acpi_has_method(handle, "PCCH"))
3235 		return false;
3236 
3237 not_found:
3238 	pr_debug("ACPI PCCH not found\n");
3239 	return true;
3240 }
3241 
3242 static bool __init intel_pstate_has_acpi_ppc(void)
3243 {
3244 	int i;
3245 
3246 	for_each_possible_cpu(i) {
3247 		struct acpi_processor *pr = per_cpu(processors, i);
3248 
3249 		if (!pr)
3250 			continue;
3251 		if (acpi_has_method(pr->handle, "_PPC"))
3252 			return true;
3253 	}
3254 	pr_debug("ACPI _PPC not found\n");
3255 	return false;
3256 }
3257 
3258 enum {
3259 	PSS,
3260 	PPC,
3261 };
3262 
3263 /* Hardware vendor-specific info that has its own power management modes */
3264 static struct acpi_platform_list plat_info[] __initdata = {
3265 	{"HP    ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
3266 	{"ORACLE", "X4-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3267 	{"ORACLE", "X4-2L   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3268 	{"ORACLE", "X4-2B   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3269 	{"ORACLE", "X3-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3270 	{"ORACLE", "X3-2L   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3271 	{"ORACLE", "X3-2B   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3272 	{"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3273 	{"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3274 	{"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3275 	{"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3276 	{"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3277 	{"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3278 	{"ORACLE", "X6-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3279 	{"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
3280 	{ } /* End */
3281 };
3282 
3283 #define BITMASK_OOB	(BIT(8) | BIT(18))
3284 
3285 static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
3286 {
3287 	const struct x86_cpu_id *id;
3288 	u64 misc_pwr;
3289 	int idx;
3290 
3291 	id = x86_match_cpu(intel_pstate_cpu_oob_ids);
3292 	if (id) {
3293 		rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
3294 		if (misc_pwr & BITMASK_OOB) {
3295 			pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
3296 			pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
3297 			return true;
3298 		}
3299 	}
3300 
3301 	idx = acpi_match_platform_list(plat_info);
3302 	if (idx < 0)
3303 		return false;
3304 
3305 	switch (plat_info[idx].data) {
3306 	case PSS:
3307 		if (!intel_pstate_no_acpi_pss())
3308 			return false;
3309 
3310 		return intel_pstate_no_acpi_pcch();
3311 	case PPC:
3312 		return intel_pstate_has_acpi_ppc() && !force_load;
3313 	}
3314 
3315 	return false;
3316 }
3317 
3318 static void intel_pstate_request_control_from_smm(void)
3319 {
3320 	/*
3321 	 * It may be unsafe to request P-states control from SMM if _PPC support
3322 	 * has not been enabled.
3323 	 */
3324 	if (acpi_ppc)
3325 		acpi_processor_pstate_control();
3326 }
3327 #else /* CONFIG_ACPI not enabled */
3328 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
3329 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
3330 static inline void intel_pstate_request_control_from_smm(void) {}
3331 #endif /* CONFIG_ACPI */
3332 
3333 #define INTEL_PSTATE_HWP_BROADWELL	0x01
3334 
3335 #define X86_MATCH_HWP(model, hwp_mode)					\
3336 	X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
3337 					   X86_FEATURE_HWP, hwp_mode)
3338 
3339 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
3340 	X86_MATCH_HWP(BROADWELL_X,	INTEL_PSTATE_HWP_BROADWELL),
3341 	X86_MATCH_HWP(BROADWELL_D,	INTEL_PSTATE_HWP_BROADWELL),
3342 	X86_MATCH_HWP(ANY,		0),
3343 	{}
3344 };
3345 
3346 static bool intel_pstate_hwp_is_enabled(void)
3347 {
3348 	u64 value;
3349 
3350 	rdmsrl(MSR_PM_ENABLE, value);
3351 	return !!(value & 0x1);
3352 }
3353 
3354 static const struct x86_cpu_id intel_epp_balance_perf[] = {
3355 	/*
3356 	 * Set EPP value as 102, this is the max suggested EPP
3357 	 * which can result in one core turbo frequency for
3358 	 * AlderLake Mobile CPUs.
3359 	 */
3360 	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
3361 	{}
3362 };
3363 
3364 static int __init intel_pstate_init(void)
3365 {
3366 	static struct cpudata **_all_cpu_data;
3367 	const struct x86_cpu_id *id;
3368 	int rc;
3369 
3370 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3371 		return -ENODEV;
3372 
3373 	id = x86_match_cpu(hwp_support_ids);
3374 	if (id) {
3375 		hwp_forced = intel_pstate_hwp_is_enabled();
3376 
3377 		if (hwp_forced)
3378 			pr_info("HWP enabled by BIOS\n");
3379 		else if (no_load)
3380 			return -ENODEV;
3381 
3382 		copy_cpu_funcs(&core_funcs);
3383 		/*
3384 		 * Avoid enabling HWP for processors without EPP support,
3385 		 * because that means incomplete HWP implementation which is a
3386 		 * corner case and supporting it is generally problematic.
3387 		 *
3388 		 * If HWP is enabled already, though, there is no choice but to
3389 		 * deal with it.
3390 		 */
3391 		if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
3392 			WRITE_ONCE(hwp_active, 1);
3393 			hwp_mode_bdw = id->driver_data;
3394 			intel_pstate.attr = hwp_cpufreq_attrs;
3395 			intel_cpufreq.attr = hwp_cpufreq_attrs;
3396 			intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
3397 			intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
3398 			if (!default_driver)
3399 				default_driver = &intel_pstate;
3400 
3401 			if (boot_cpu_has(X86_FEATURE_HYBRID_CPU))
3402 				pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
3403 
3404 			goto hwp_cpu_matched;
3405 		}
3406 		pr_info("HWP not enabled\n");
3407 	} else {
3408 		if (no_load)
3409 			return -ENODEV;
3410 
3411 		id = x86_match_cpu(intel_pstate_cpu_ids);
3412 		if (!id) {
3413 			pr_info("CPU model not supported\n");
3414 			return -ENODEV;
3415 		}
3416 
3417 		copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
3418 	}
3419 
3420 	if (intel_pstate_msrs_not_valid()) {
3421 		pr_info("Invalid MSRs\n");
3422 		return -ENODEV;
3423 	}
3424 	/* Without HWP start in the passive mode. */
3425 	if (!default_driver)
3426 		default_driver = &intel_cpufreq;
3427 
3428 hwp_cpu_matched:
3429 	/*
3430 	 * The Intel pstate driver will be ignored if the platform
3431 	 * firmware has its own power management modes.
3432 	 */
3433 	if (intel_pstate_platform_pwr_mgmt_exists()) {
3434 		pr_info("P-states controlled by the platform\n");
3435 		return -ENODEV;
3436 	}
3437 
3438 	if (!hwp_active && hwp_only)
3439 		return -ENOTSUPP;
3440 
3441 	pr_info("Intel P-state driver initializing\n");
3442 
3443 	_all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
3444 	if (!_all_cpu_data)
3445 		return -ENOMEM;
3446 
3447 	WRITE_ONCE(all_cpu_data, _all_cpu_data);
3448 
3449 	intel_pstate_request_control_from_smm();
3450 
3451 	intel_pstate_sysfs_expose_params();
3452 
3453 	if (hwp_active) {
3454 		const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
3455 
3456 		if (id)
3457 			epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
3458 	}
3459 
3460 	mutex_lock(&intel_pstate_driver_lock);
3461 	rc = intel_pstate_register_driver(default_driver);
3462 	mutex_unlock(&intel_pstate_driver_lock);
3463 	if (rc) {
3464 		intel_pstate_sysfs_remove();
3465 		return rc;
3466 	}
3467 
3468 	if (hwp_active) {
3469 		const struct x86_cpu_id *id;
3470 
3471 		id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
3472 		if (id) {
3473 			set_power_ctl_ee_state(false);
3474 			pr_info("Disabling energy efficiency optimization\n");
3475 		}
3476 
3477 		pr_info("HWP enabled\n");
3478 	} else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
3479 		pr_warn("Problematic setup: Hybrid processor with disabled HWP\n");
3480 	}
3481 
3482 	return 0;
3483 }
3484 device_initcall(intel_pstate_init);
3485 
3486 static int __init intel_pstate_setup(char *str)
3487 {
3488 	if (!str)
3489 		return -EINVAL;
3490 
3491 	if (!strcmp(str, "disable"))
3492 		no_load = 1;
3493 	else if (!strcmp(str, "active"))
3494 		default_driver = &intel_pstate;
3495 	else if (!strcmp(str, "passive"))
3496 		default_driver = &intel_cpufreq;
3497 
3498 	if (!strcmp(str, "no_hwp"))
3499 		no_hwp = 1;
3500 
3501 	if (!strcmp(str, "force"))
3502 		force_load = 1;
3503 	if (!strcmp(str, "hwp_only"))
3504 		hwp_only = 1;
3505 	if (!strcmp(str, "per_cpu_perf_limits"))
3506 		per_cpu_limits = true;
3507 
3508 #ifdef CONFIG_ACPI
3509 	if (!strcmp(str, "support_acpi_ppc"))
3510 		acpi_ppc = true;
3511 #endif
3512 
3513 	return 0;
3514 }
3515 early_param("intel_pstate", intel_pstate_setup);
3516 
3517 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
3518 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
3519 MODULE_LICENSE("GPL");
3520