1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * amd-pstate.c - AMD Processor P-state Frequency Driver
4 *
5 * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
6 *
7 * Author: Huang Rui <ray.huang@amd.com>
8 *
9 * AMD P-State introduces a new CPU performance scaling design for AMD
10 * processors using the ACPI Collaborative Performance and Power Control (CPPC)
11 * feature which works with the AMD SMU firmware providing a finer grained
12 * frequency control range. It is to replace the legacy ACPI P-States control,
13 * allows a flexible, low-latency interface for the Linux kernel to directly
14 * communicate the performance hints to hardware.
15 *
16 * AMD P-State is supported on recent AMD Zen base CPU series include some of
17 * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD
18 * P-State supported system. And there are two types of hardware implementations
19 * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution.
20 * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types.
21 */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/smp.h>
29 #include <linux/sched.h>
30 #include <linux/cpufreq.h>
31 #include <linux/compiler.h>
32 #include <linux/dmi.h>
33 #include <linux/slab.h>
34 #include <linux/acpi.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/uaccess.h>
38 #include <linux/static_call.h>
39 #include <linux/amd-pstate.h>
40
41 #include <acpi/processor.h>
42 #include <acpi/cppc_acpi.h>
43
44 #include <asm/msr.h>
45 #include <asm/processor.h>
46 #include <asm/cpufeature.h>
47 #include <asm/cpu_device_id.h>
48 #include "amd-pstate-trace.h"
49
50 #define AMD_PSTATE_TRANSITION_LATENCY 20000
51 #define AMD_PSTATE_TRANSITION_DELAY 1000
52
53 /*
54 * TODO: We need more time to fine tune processors with shared memory solution
55 * with community together.
56 *
57 * There are some performance drops on the CPU benchmarks which reports from
58 * Suse. We are co-working with them to fine tune the shared memory solution. So
59 * we disable it by default to go acpi-cpufreq on these processors and add a
60 * module parameter to be able to enable it manually for debugging.
61 */
62 static struct cpufreq_driver *current_pstate_driver;
63 static struct cpufreq_driver amd_pstate_driver;
64 static struct cpufreq_driver amd_pstate_epp_driver;
65 static int cppc_state = AMD_PSTATE_UNDEFINED;
66 static bool cppc_enabled;
67
68 /*
69 * AMD Energy Preference Performance (EPP)
70 * The EPP is used in the CCLK DPM controller to drive
71 * the frequency that a core is going to operate during
72 * short periods of activity. EPP values will be utilized for
73 * different OS profiles (balanced, performance, power savings)
74 * display strings corresponding to EPP index in the
75 * energy_perf_strings[]
76 * index String
77 *-------------------------------------
78 * 0 default
79 * 1 performance
80 * 2 balance_performance
81 * 3 balance_power
82 * 4 power
83 */
84 enum energy_perf_value_index {
85 EPP_INDEX_DEFAULT = 0,
86 EPP_INDEX_PERFORMANCE,
87 EPP_INDEX_BALANCE_PERFORMANCE,
88 EPP_INDEX_BALANCE_POWERSAVE,
89 EPP_INDEX_POWERSAVE,
90 };
91
92 static const char * const energy_perf_strings[] = {
93 [EPP_INDEX_DEFAULT] = "default",
94 [EPP_INDEX_PERFORMANCE] = "performance",
95 [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
96 [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
97 [EPP_INDEX_POWERSAVE] = "power",
98 NULL
99 };
100
101 static unsigned int epp_values[] = {
102 [EPP_INDEX_DEFAULT] = 0,
103 [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE,
104 [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
105 [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
106 [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
107 };
108
109 typedef int (*cppc_mode_transition_fn)(int);
110
get_mode_idx_from_str(const char * str,size_t size)111 static inline int get_mode_idx_from_str(const char *str, size_t size)
112 {
113 int i;
114
115 for (i=0; i < AMD_PSTATE_MAX; i++) {
116 if (!strncmp(str, amd_pstate_mode_string[i], size))
117 return i;
118 }
119 return -EINVAL;
120 }
121
122 static DEFINE_MUTEX(amd_pstate_limits_lock);
123 static DEFINE_MUTEX(amd_pstate_driver_lock);
124
amd_pstate_get_epp(struct amd_cpudata * cpudata,u64 cppc_req_cached)125 static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
126 {
127 u64 epp;
128 int ret;
129
130 if (boot_cpu_has(X86_FEATURE_CPPC)) {
131 if (!cppc_req_cached) {
132 epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
133 &cppc_req_cached);
134 if (epp)
135 return epp;
136 }
137 epp = (cppc_req_cached >> 24) & 0xFF;
138 } else {
139 ret = cppc_get_epp_perf(cpudata->cpu, &epp);
140 if (ret < 0) {
141 pr_debug("Could not retrieve energy perf value (%d)\n", ret);
142 return -EIO;
143 }
144 }
145
146 return (s16)(epp & 0xff);
147 }
148
amd_pstate_get_energy_pref_index(struct amd_cpudata * cpudata)149 static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
150 {
151 s16 epp;
152 int index = -EINVAL;
153
154 epp = amd_pstate_get_epp(cpudata, 0);
155 if (epp < 0)
156 return epp;
157
158 switch (epp) {
159 case AMD_CPPC_EPP_PERFORMANCE:
160 index = EPP_INDEX_PERFORMANCE;
161 break;
162 case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
163 index = EPP_INDEX_BALANCE_PERFORMANCE;
164 break;
165 case AMD_CPPC_EPP_BALANCE_POWERSAVE:
166 index = EPP_INDEX_BALANCE_POWERSAVE;
167 break;
168 case AMD_CPPC_EPP_POWERSAVE:
169 index = EPP_INDEX_POWERSAVE;
170 break;
171 default:
172 break;
173 }
174
175 return index;
176 }
177
amd_pstate_set_epp(struct amd_cpudata * cpudata,u32 epp)178 static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
179 {
180 int ret;
181 struct cppc_perf_ctrls perf_ctrls;
182
183 if (boot_cpu_has(X86_FEATURE_CPPC)) {
184 u64 value = READ_ONCE(cpudata->cppc_req_cached);
185
186 value &= ~GENMASK_ULL(31, 24);
187 value |= (u64)epp << 24;
188 WRITE_ONCE(cpudata->cppc_req_cached, value);
189
190 ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
191 if (!ret)
192 cpudata->epp_cached = epp;
193 } else {
194 perf_ctrls.energy_perf = epp;
195 ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
196 if (ret) {
197 pr_debug("failed to set energy perf value (%d)\n", ret);
198 return ret;
199 }
200 cpudata->epp_cached = epp;
201 }
202
203 return ret;
204 }
205
amd_pstate_set_energy_pref_index(struct amd_cpudata * cpudata,int pref_index)206 static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
207 int pref_index)
208 {
209 int epp = -EINVAL;
210 int ret;
211
212 if (!pref_index) {
213 pr_debug("EPP pref_index is invalid\n");
214 return -EINVAL;
215 }
216
217 if (epp == -EINVAL)
218 epp = epp_values[pref_index];
219
220 if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
221 pr_debug("EPP cannot be set under performance policy\n");
222 return -EBUSY;
223 }
224
225 ret = amd_pstate_set_epp(cpudata, epp);
226
227 return ret;
228 }
229
pstate_enable(bool enable)230 static inline int pstate_enable(bool enable)
231 {
232 int ret, cpu;
233 unsigned long logical_proc_id_mask = 0;
234
235 if (enable == cppc_enabled)
236 return 0;
237
238 for_each_present_cpu(cpu) {
239 unsigned long logical_id = topology_logical_die_id(cpu);
240
241 if (test_bit(logical_id, &logical_proc_id_mask))
242 continue;
243
244 set_bit(logical_id, &logical_proc_id_mask);
245
246 ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
247 enable);
248 if (ret)
249 return ret;
250 }
251
252 cppc_enabled = enable;
253 return 0;
254 }
255
cppc_enable(bool enable)256 static int cppc_enable(bool enable)
257 {
258 int cpu, ret = 0;
259 struct cppc_perf_ctrls perf_ctrls;
260
261 if (enable == cppc_enabled)
262 return 0;
263
264 for_each_present_cpu(cpu) {
265 ret = cppc_set_enable(cpu, enable);
266 if (ret)
267 return ret;
268
269 /* Enable autonomous mode for EPP */
270 if (cppc_state == AMD_PSTATE_ACTIVE) {
271 /* Set desired perf as zero to allow EPP firmware control */
272 perf_ctrls.desired_perf = 0;
273 ret = cppc_set_perf(cpu, &perf_ctrls);
274 if (ret)
275 return ret;
276 }
277 }
278
279 cppc_enabled = enable;
280 return ret;
281 }
282
283 DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
284
amd_pstate_enable(bool enable)285 static inline int amd_pstate_enable(bool enable)
286 {
287 return static_call(amd_pstate_enable)(enable);
288 }
289
pstate_init_perf(struct amd_cpudata * cpudata)290 static int pstate_init_perf(struct amd_cpudata *cpudata)
291 {
292 u64 cap1;
293 u32 highest_perf;
294
295 int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
296 &cap1);
297 if (ret)
298 return ret;
299
300 /*
301 * TODO: Introduce AMD specific power feature.
302 *
303 * CPPC entry doesn't indicate the highest performance in some ASICs.
304 */
305 highest_perf = amd_get_highest_perf();
306 if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
307 highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
308
309 WRITE_ONCE(cpudata->highest_perf, highest_perf);
310 WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
311 WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
312 WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
313 WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
314 WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
315 return 0;
316 }
317
cppc_init_perf(struct amd_cpudata * cpudata)318 static int cppc_init_perf(struct amd_cpudata *cpudata)
319 {
320 struct cppc_perf_caps cppc_perf;
321 u32 highest_perf;
322
323 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
324 if (ret)
325 return ret;
326
327 highest_perf = amd_get_highest_perf();
328 if (highest_perf > cppc_perf.highest_perf)
329 highest_perf = cppc_perf.highest_perf;
330
331 WRITE_ONCE(cpudata->highest_perf, highest_perf);
332 WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
333 WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
334 WRITE_ONCE(cpudata->lowest_nonlinear_perf,
335 cppc_perf.lowest_nonlinear_perf);
336 WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
337 WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
338
339 if (cppc_state == AMD_PSTATE_ACTIVE)
340 return 0;
341
342 ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf);
343 if (ret) {
344 pr_warn("failed to get auto_sel, ret: %d\n", ret);
345 return 0;
346 }
347
348 ret = cppc_set_auto_sel(cpudata->cpu,
349 (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
350
351 if (ret)
352 pr_warn("failed to set auto_sel, ret: %d\n", ret);
353
354 return ret;
355 }
356
357 DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
358
amd_pstate_init_perf(struct amd_cpudata * cpudata)359 static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
360 {
361 return static_call(amd_pstate_init_perf)(cpudata);
362 }
363
pstate_update_perf(struct amd_cpudata * cpudata,u32 min_perf,u32 des_perf,u32 max_perf,bool fast_switch)364 static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
365 u32 des_perf, u32 max_perf, bool fast_switch)
366 {
367 if (fast_switch)
368 wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
369 else
370 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
371 READ_ONCE(cpudata->cppc_req_cached));
372 }
373
cppc_update_perf(struct amd_cpudata * cpudata,u32 min_perf,u32 des_perf,u32 max_perf,bool fast_switch)374 static void cppc_update_perf(struct amd_cpudata *cpudata,
375 u32 min_perf, u32 des_perf,
376 u32 max_perf, bool fast_switch)
377 {
378 struct cppc_perf_ctrls perf_ctrls;
379
380 perf_ctrls.max_perf = max_perf;
381 perf_ctrls.min_perf = min_perf;
382 perf_ctrls.desired_perf = des_perf;
383
384 cppc_set_perf(cpudata->cpu, &perf_ctrls);
385 }
386
387 DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
388
amd_pstate_update_perf(struct amd_cpudata * cpudata,u32 min_perf,u32 des_perf,u32 max_perf,bool fast_switch)389 static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
390 u32 min_perf, u32 des_perf,
391 u32 max_perf, bool fast_switch)
392 {
393 static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
394 max_perf, fast_switch);
395 }
396
amd_pstate_sample(struct amd_cpudata * cpudata)397 static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
398 {
399 u64 aperf, mperf, tsc;
400 unsigned long flags;
401
402 local_irq_save(flags);
403 rdmsrl(MSR_IA32_APERF, aperf);
404 rdmsrl(MSR_IA32_MPERF, mperf);
405 tsc = rdtsc();
406
407 if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
408 local_irq_restore(flags);
409 return false;
410 }
411
412 local_irq_restore(flags);
413
414 cpudata->cur.aperf = aperf;
415 cpudata->cur.mperf = mperf;
416 cpudata->cur.tsc = tsc;
417 cpudata->cur.aperf -= cpudata->prev.aperf;
418 cpudata->cur.mperf -= cpudata->prev.mperf;
419 cpudata->cur.tsc -= cpudata->prev.tsc;
420
421 cpudata->prev.aperf = aperf;
422 cpudata->prev.mperf = mperf;
423 cpudata->prev.tsc = tsc;
424
425 cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
426
427 return true;
428 }
429
amd_pstate_update(struct amd_cpudata * cpudata,u32 min_perf,u32 des_perf,u32 max_perf,bool fast_switch,int gov_flags)430 static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
431 u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
432 {
433 u64 prev = READ_ONCE(cpudata->cppc_req_cached);
434 u64 value = prev;
435
436 min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
437 cpudata->max_limit_perf);
438 max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
439 cpudata->max_limit_perf);
440 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
441
442 if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
443 min_perf = des_perf;
444 des_perf = 0;
445 }
446
447 value &= ~AMD_CPPC_MIN_PERF(~0L);
448 value |= AMD_CPPC_MIN_PERF(min_perf);
449
450 value &= ~AMD_CPPC_DES_PERF(~0L);
451 value |= AMD_CPPC_DES_PERF(des_perf);
452
453 value &= ~AMD_CPPC_MAX_PERF(~0L);
454 value |= AMD_CPPC_MAX_PERF(max_perf);
455
456 if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
457 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
458 cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
459 cpudata->cpu, (value != prev), fast_switch);
460 }
461
462 if (value == prev)
463 return;
464
465 WRITE_ONCE(cpudata->cppc_req_cached, value);
466
467 amd_pstate_update_perf(cpudata, min_perf, des_perf,
468 max_perf, fast_switch);
469 }
470
amd_pstate_verify(struct cpufreq_policy_data * policy)471 static int amd_pstate_verify(struct cpufreq_policy_data *policy)
472 {
473 cpufreq_verify_within_cpu_limits(policy);
474
475 return 0;
476 }
477
amd_pstate_update_min_max_limit(struct cpufreq_policy * policy)478 static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
479 {
480 u32 max_limit_perf, min_limit_perf;
481 struct amd_cpudata *cpudata = policy->driver_data;
482
483 max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
484 min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
485
486 WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
487 WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
488 WRITE_ONCE(cpudata->max_limit_freq, policy->max);
489 WRITE_ONCE(cpudata->min_limit_freq, policy->min);
490
491 return 0;
492 }
493
amd_pstate_update_freq(struct cpufreq_policy * policy,unsigned int target_freq,bool fast_switch)494 static int amd_pstate_update_freq(struct cpufreq_policy *policy,
495 unsigned int target_freq, bool fast_switch)
496 {
497 struct cpufreq_freqs freqs;
498 struct amd_cpudata *cpudata = policy->driver_data;
499 unsigned long max_perf, min_perf, des_perf, cap_perf;
500
501 if (!cpudata->max_freq)
502 return -ENODEV;
503
504 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
505 amd_pstate_update_min_max_limit(policy);
506
507 cap_perf = READ_ONCE(cpudata->highest_perf);
508 min_perf = READ_ONCE(cpudata->lowest_perf);
509 max_perf = cap_perf;
510
511 freqs.old = policy->cur;
512 freqs.new = target_freq;
513
514 des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
515 cpudata->max_freq);
516
517 WARN_ON(fast_switch && !policy->fast_switch_enabled);
518 /*
519 * If fast_switch is desired, then there aren't any registered
520 * transition notifiers. See comment for
521 * cpufreq_enable_fast_switch().
522 */
523 if (!fast_switch)
524 cpufreq_freq_transition_begin(policy, &freqs);
525
526 amd_pstate_update(cpudata, min_perf, des_perf,
527 max_perf, fast_switch, policy->governor->flags);
528
529 if (!fast_switch)
530 cpufreq_freq_transition_end(policy, &freqs, false);
531
532 return 0;
533 }
534
amd_pstate_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)535 static int amd_pstate_target(struct cpufreq_policy *policy,
536 unsigned int target_freq,
537 unsigned int relation)
538 {
539 return amd_pstate_update_freq(policy, target_freq, false);
540 }
541
amd_pstate_fast_switch(struct cpufreq_policy * policy,unsigned int target_freq)542 static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
543 unsigned int target_freq)
544 {
545 if (!amd_pstate_update_freq(policy, target_freq, true))
546 return target_freq;
547 return policy->cur;
548 }
549
amd_pstate_adjust_perf(unsigned int cpu,unsigned long _min_perf,unsigned long target_perf,unsigned long capacity)550 static void amd_pstate_adjust_perf(unsigned int cpu,
551 unsigned long _min_perf,
552 unsigned long target_perf,
553 unsigned long capacity)
554 {
555 unsigned long max_perf, min_perf, des_perf,
556 cap_perf, lowest_nonlinear_perf, max_freq;
557 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
558 struct amd_cpudata *cpudata = policy->driver_data;
559 unsigned int target_freq;
560
561 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
562 amd_pstate_update_min_max_limit(policy);
563
564
565 cap_perf = READ_ONCE(cpudata->highest_perf);
566 lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
567 max_freq = READ_ONCE(cpudata->max_freq);
568
569 des_perf = cap_perf;
570 if (target_perf < capacity)
571 des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
572
573 min_perf = READ_ONCE(cpudata->lowest_perf);
574 if (_min_perf < capacity)
575 min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
576
577 if (min_perf < lowest_nonlinear_perf)
578 min_perf = lowest_nonlinear_perf;
579
580 max_perf = cap_perf;
581 if (max_perf < min_perf)
582 max_perf = min_perf;
583
584 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
585 target_freq = div_u64(des_perf * max_freq, max_perf);
586 policy->cur = target_freq;
587
588 amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
589 policy->governor->flags);
590 cpufreq_cpu_put(policy);
591 }
592
amd_get_min_freq(struct amd_cpudata * cpudata)593 static int amd_get_min_freq(struct amd_cpudata *cpudata)
594 {
595 struct cppc_perf_caps cppc_perf;
596
597 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
598 if (ret)
599 return ret;
600
601 /* Switch to khz */
602 return cppc_perf.lowest_freq * 1000;
603 }
604
amd_get_max_freq(struct amd_cpudata * cpudata)605 static int amd_get_max_freq(struct amd_cpudata *cpudata)
606 {
607 struct cppc_perf_caps cppc_perf;
608 u32 max_perf, max_freq, nominal_freq, nominal_perf;
609 u64 boost_ratio;
610
611 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
612 if (ret)
613 return ret;
614
615 nominal_freq = cppc_perf.nominal_freq;
616 nominal_perf = READ_ONCE(cpudata->nominal_perf);
617 max_perf = READ_ONCE(cpudata->highest_perf);
618
619 boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
620 nominal_perf);
621
622 max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
623
624 /* Switch to khz */
625 return max_freq * 1000;
626 }
627
amd_get_nominal_freq(struct amd_cpudata * cpudata)628 static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
629 {
630 struct cppc_perf_caps cppc_perf;
631
632 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
633 if (ret)
634 return ret;
635
636 /* Switch to khz */
637 return cppc_perf.nominal_freq * 1000;
638 }
639
amd_get_lowest_nonlinear_freq(struct amd_cpudata * cpudata)640 static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
641 {
642 struct cppc_perf_caps cppc_perf;
643 u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
644 nominal_freq, nominal_perf;
645 u64 lowest_nonlinear_ratio;
646
647 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
648 if (ret)
649 return ret;
650
651 nominal_freq = cppc_perf.nominal_freq;
652 nominal_perf = READ_ONCE(cpudata->nominal_perf);
653
654 lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
655
656 lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
657 nominal_perf);
658
659 lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
660
661 /* Switch to khz */
662 return lowest_nonlinear_freq * 1000;
663 }
664
amd_pstate_set_boost(struct cpufreq_policy * policy,int state)665 static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
666 {
667 struct amd_cpudata *cpudata = policy->driver_data;
668 int ret;
669
670 if (!cpudata->boost_supported) {
671 pr_err("Boost mode is not supported by this processor or SBIOS\n");
672 return -EINVAL;
673 }
674
675 if (state)
676 policy->cpuinfo.max_freq = cpudata->max_freq;
677 else
678 policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
679
680 policy->max = policy->cpuinfo.max_freq;
681
682 ret = freq_qos_update_request(&cpudata->req[1],
683 policy->cpuinfo.max_freq);
684 if (ret < 0)
685 return ret;
686
687 return 0;
688 }
689
amd_pstate_boost_init(struct amd_cpudata * cpudata)690 static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
691 {
692 u32 highest_perf, nominal_perf;
693
694 highest_perf = READ_ONCE(cpudata->highest_perf);
695 nominal_perf = READ_ONCE(cpudata->nominal_perf);
696
697 if (highest_perf <= nominal_perf)
698 return;
699
700 cpudata->boost_supported = true;
701 current_pstate_driver->boost_enabled = true;
702 }
703
amd_perf_ctl_reset(unsigned int cpu)704 static void amd_perf_ctl_reset(unsigned int cpu)
705 {
706 wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
707 }
708
amd_pstate_cpu_init(struct cpufreq_policy * policy)709 static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
710 {
711 int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
712 struct device *dev;
713 struct amd_cpudata *cpudata;
714
715 /*
716 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
717 * which is ideal for initialization process.
718 */
719 amd_perf_ctl_reset(policy->cpu);
720 dev = get_cpu_device(policy->cpu);
721 if (!dev)
722 return -ENODEV;
723
724 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
725 if (!cpudata)
726 return -ENOMEM;
727
728 cpudata->cpu = policy->cpu;
729
730 ret = amd_pstate_init_perf(cpudata);
731 if (ret)
732 goto free_cpudata1;
733
734 min_freq = amd_get_min_freq(cpudata);
735 max_freq = amd_get_max_freq(cpudata);
736 nominal_freq = amd_get_nominal_freq(cpudata);
737 lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
738
739 if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
740 dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
741 min_freq, max_freq);
742 ret = -EINVAL;
743 goto free_cpudata1;
744 }
745
746 policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
747 policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
748
749 policy->min = min_freq;
750 policy->max = max_freq;
751
752 policy->cpuinfo.min_freq = min_freq;
753 policy->cpuinfo.max_freq = max_freq;
754
755 /* It will be updated by governor */
756 policy->cur = policy->cpuinfo.min_freq;
757
758 if (boot_cpu_has(X86_FEATURE_CPPC))
759 policy->fast_switch_possible = true;
760
761 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
762 FREQ_QOS_MIN, policy->cpuinfo.min_freq);
763 if (ret < 0) {
764 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
765 goto free_cpudata1;
766 }
767
768 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1],
769 FREQ_QOS_MAX, policy->cpuinfo.max_freq);
770 if (ret < 0) {
771 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
772 goto free_cpudata2;
773 }
774
775 /* Initial processor data capability frequencies */
776 cpudata->max_freq = max_freq;
777 cpudata->min_freq = min_freq;
778 cpudata->max_limit_freq = max_freq;
779 cpudata->min_limit_freq = min_freq;
780 cpudata->nominal_freq = nominal_freq;
781 cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
782
783 policy->driver_data = cpudata;
784
785 amd_pstate_boost_init(cpudata);
786 if (!current_pstate_driver->adjust_perf)
787 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
788
789 return 0;
790
791 free_cpudata2:
792 freq_qos_remove_request(&cpudata->req[0]);
793 free_cpudata1:
794 kfree(cpudata);
795 return ret;
796 }
797
amd_pstate_cpu_exit(struct cpufreq_policy * policy)798 static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
799 {
800 struct amd_cpudata *cpudata = policy->driver_data;
801
802 freq_qos_remove_request(&cpudata->req[1]);
803 freq_qos_remove_request(&cpudata->req[0]);
804 policy->fast_switch_possible = false;
805 kfree(cpudata);
806
807 return 0;
808 }
809
amd_pstate_cpu_resume(struct cpufreq_policy * policy)810 static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
811 {
812 int ret;
813
814 ret = amd_pstate_enable(true);
815 if (ret)
816 pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
817
818 return ret;
819 }
820
amd_pstate_cpu_suspend(struct cpufreq_policy * policy)821 static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
822 {
823 int ret;
824
825 ret = amd_pstate_enable(false);
826 if (ret)
827 pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
828
829 return ret;
830 }
831
832 /* Sysfs attributes */
833
834 /*
835 * This frequency is to indicate the maximum hardware frequency.
836 * If boost is not active but supported, the frequency will be larger than the
837 * one in cpuinfo.
838 */
show_amd_pstate_max_freq(struct cpufreq_policy * policy,char * buf)839 static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
840 char *buf)
841 {
842 int max_freq;
843 struct amd_cpudata *cpudata = policy->driver_data;
844
845 max_freq = amd_get_max_freq(cpudata);
846 if (max_freq < 0)
847 return max_freq;
848
849 return sysfs_emit(buf, "%u\n", max_freq);
850 }
851
show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy * policy,char * buf)852 static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
853 char *buf)
854 {
855 int freq;
856 struct amd_cpudata *cpudata = policy->driver_data;
857
858 freq = amd_get_lowest_nonlinear_freq(cpudata);
859 if (freq < 0)
860 return freq;
861
862 return sysfs_emit(buf, "%u\n", freq);
863 }
864
865 /*
866 * In some of ASICs, the highest_perf is not the one in the _CPC table, so we
867 * need to expose it to sysfs.
868 */
show_amd_pstate_highest_perf(struct cpufreq_policy * policy,char * buf)869 static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
870 char *buf)
871 {
872 u32 perf;
873 struct amd_cpudata *cpudata = policy->driver_data;
874
875 perf = READ_ONCE(cpudata->highest_perf);
876
877 return sysfs_emit(buf, "%u\n", perf);
878 }
879
show_energy_performance_available_preferences(struct cpufreq_policy * policy,char * buf)880 static ssize_t show_energy_performance_available_preferences(
881 struct cpufreq_policy *policy, char *buf)
882 {
883 int i = 0;
884 int offset = 0;
885 struct amd_cpudata *cpudata = policy->driver_data;
886
887 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
888 return sysfs_emit_at(buf, offset, "%s\n",
889 energy_perf_strings[EPP_INDEX_PERFORMANCE]);
890
891 while (energy_perf_strings[i] != NULL)
892 offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
893
894 offset += sysfs_emit_at(buf, offset, "\n");
895
896 return offset;
897 }
898
store_energy_performance_preference(struct cpufreq_policy * policy,const char * buf,size_t count)899 static ssize_t store_energy_performance_preference(
900 struct cpufreq_policy *policy, const char *buf, size_t count)
901 {
902 struct amd_cpudata *cpudata = policy->driver_data;
903 char str_preference[21];
904 ssize_t ret;
905
906 ret = sscanf(buf, "%20s", str_preference);
907 if (ret != 1)
908 return -EINVAL;
909
910 ret = match_string(energy_perf_strings, -1, str_preference);
911 if (ret < 0)
912 return -EINVAL;
913
914 mutex_lock(&amd_pstate_limits_lock);
915 ret = amd_pstate_set_energy_pref_index(cpudata, ret);
916 mutex_unlock(&amd_pstate_limits_lock);
917
918 return ret ?: count;
919 }
920
show_energy_performance_preference(struct cpufreq_policy * policy,char * buf)921 static ssize_t show_energy_performance_preference(
922 struct cpufreq_policy *policy, char *buf)
923 {
924 struct amd_cpudata *cpudata = policy->driver_data;
925 int preference;
926
927 preference = amd_pstate_get_energy_pref_index(cpudata);
928 if (preference < 0)
929 return preference;
930
931 return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
932 }
933
amd_pstate_driver_cleanup(void)934 static void amd_pstate_driver_cleanup(void)
935 {
936 amd_pstate_enable(false);
937 cppc_state = AMD_PSTATE_DISABLE;
938 current_pstate_driver = NULL;
939 }
940
amd_pstate_register_driver(int mode)941 static int amd_pstate_register_driver(int mode)
942 {
943 int ret;
944
945 if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED)
946 current_pstate_driver = &amd_pstate_driver;
947 else if (mode == AMD_PSTATE_ACTIVE)
948 current_pstate_driver = &amd_pstate_epp_driver;
949 else
950 return -EINVAL;
951
952 cppc_state = mode;
953 ret = cpufreq_register_driver(current_pstate_driver);
954 if (ret) {
955 amd_pstate_driver_cleanup();
956 return ret;
957 }
958 return 0;
959 }
960
amd_pstate_unregister_driver(int dummy)961 static int amd_pstate_unregister_driver(int dummy)
962 {
963 cpufreq_unregister_driver(current_pstate_driver);
964 amd_pstate_driver_cleanup();
965 return 0;
966 }
967
amd_pstate_change_mode_without_dvr_change(int mode)968 static int amd_pstate_change_mode_without_dvr_change(int mode)
969 {
970 int cpu = 0;
971
972 cppc_state = mode;
973
974 if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
975 return 0;
976
977 for_each_present_cpu(cpu) {
978 cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
979 }
980
981 return 0;
982 }
983
amd_pstate_change_driver_mode(int mode)984 static int amd_pstate_change_driver_mode(int mode)
985 {
986 int ret;
987
988 ret = amd_pstate_unregister_driver(0);
989 if (ret)
990 return ret;
991
992 ret = amd_pstate_register_driver(mode);
993 if (ret)
994 return ret;
995
996 return 0;
997 }
998
999 static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = {
1000 [AMD_PSTATE_DISABLE] = {
1001 [AMD_PSTATE_DISABLE] = NULL,
1002 [AMD_PSTATE_PASSIVE] = amd_pstate_register_driver,
1003 [AMD_PSTATE_ACTIVE] = amd_pstate_register_driver,
1004 [AMD_PSTATE_GUIDED] = amd_pstate_register_driver,
1005 },
1006 [AMD_PSTATE_PASSIVE] = {
1007 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
1008 [AMD_PSTATE_PASSIVE] = NULL,
1009 [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
1010 [AMD_PSTATE_GUIDED] = amd_pstate_change_mode_without_dvr_change,
1011 },
1012 [AMD_PSTATE_ACTIVE] = {
1013 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
1014 [AMD_PSTATE_PASSIVE] = amd_pstate_change_driver_mode,
1015 [AMD_PSTATE_ACTIVE] = NULL,
1016 [AMD_PSTATE_GUIDED] = amd_pstate_change_driver_mode,
1017 },
1018 [AMD_PSTATE_GUIDED] = {
1019 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
1020 [AMD_PSTATE_PASSIVE] = amd_pstate_change_mode_without_dvr_change,
1021 [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
1022 [AMD_PSTATE_GUIDED] = NULL,
1023 },
1024 };
1025
amd_pstate_show_status(char * buf)1026 static ssize_t amd_pstate_show_status(char *buf)
1027 {
1028 if (!current_pstate_driver)
1029 return sysfs_emit(buf, "disable\n");
1030
1031 return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
1032 }
1033
amd_pstate_update_status(const char * buf,size_t size)1034 static int amd_pstate_update_status(const char *buf, size_t size)
1035 {
1036 int mode_idx;
1037
1038 if (size > strlen("passive") || size < strlen("active"))
1039 return -EINVAL;
1040
1041 mode_idx = get_mode_idx_from_str(buf, size);
1042
1043 if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
1044 return -EINVAL;
1045
1046 if (mode_state_machine[cppc_state][mode_idx])
1047 return mode_state_machine[cppc_state][mode_idx](mode_idx);
1048
1049 return 0;
1050 }
1051
status_show(struct device * dev,struct device_attribute * attr,char * buf)1052 static ssize_t status_show(struct device *dev,
1053 struct device_attribute *attr, char *buf)
1054 {
1055 ssize_t ret;
1056
1057 mutex_lock(&amd_pstate_driver_lock);
1058 ret = amd_pstate_show_status(buf);
1059 mutex_unlock(&amd_pstate_driver_lock);
1060
1061 return ret;
1062 }
1063
status_store(struct device * a,struct device_attribute * b,const char * buf,size_t count)1064 static ssize_t status_store(struct device *a, struct device_attribute *b,
1065 const char *buf, size_t count)
1066 {
1067 char *p = memchr(buf, '\n', count);
1068 int ret;
1069
1070 mutex_lock(&amd_pstate_driver_lock);
1071 ret = amd_pstate_update_status(buf, p ? p - buf : count);
1072 mutex_unlock(&amd_pstate_driver_lock);
1073
1074 return ret < 0 ? ret : count;
1075 }
1076
1077 cpufreq_freq_attr_ro(amd_pstate_max_freq);
1078 cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
1079
1080 cpufreq_freq_attr_ro(amd_pstate_highest_perf);
1081 cpufreq_freq_attr_rw(energy_performance_preference);
1082 cpufreq_freq_attr_ro(energy_performance_available_preferences);
1083 static DEVICE_ATTR_RW(status);
1084
1085 static struct freq_attr *amd_pstate_attr[] = {
1086 &amd_pstate_max_freq,
1087 &amd_pstate_lowest_nonlinear_freq,
1088 &amd_pstate_highest_perf,
1089 NULL,
1090 };
1091
1092 static struct freq_attr *amd_pstate_epp_attr[] = {
1093 &amd_pstate_max_freq,
1094 &amd_pstate_lowest_nonlinear_freq,
1095 &amd_pstate_highest_perf,
1096 &energy_performance_preference,
1097 &energy_performance_available_preferences,
1098 NULL,
1099 };
1100
1101 static struct attribute *pstate_global_attributes[] = {
1102 &dev_attr_status.attr,
1103 NULL
1104 };
1105
1106 static const struct attribute_group amd_pstate_global_attr_group = {
1107 .name = "amd_pstate",
1108 .attrs = pstate_global_attributes,
1109 };
1110
amd_pstate_acpi_pm_profile_server(void)1111 static bool amd_pstate_acpi_pm_profile_server(void)
1112 {
1113 switch (acpi_gbl_FADT.preferred_profile) {
1114 case PM_ENTERPRISE_SERVER:
1115 case PM_SOHO_SERVER:
1116 case PM_PERFORMANCE_SERVER:
1117 return true;
1118 }
1119 return false;
1120 }
1121
amd_pstate_acpi_pm_profile_undefined(void)1122 static bool amd_pstate_acpi_pm_profile_undefined(void)
1123 {
1124 if (acpi_gbl_FADT.preferred_profile == PM_UNSPECIFIED)
1125 return true;
1126 if (acpi_gbl_FADT.preferred_profile >= NR_PM_PROFILES)
1127 return true;
1128 return false;
1129 }
1130
amd_pstate_epp_cpu_init(struct cpufreq_policy * policy)1131 static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
1132 {
1133 int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
1134 struct amd_cpudata *cpudata;
1135 struct device *dev;
1136 u64 value;
1137
1138 /*
1139 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
1140 * which is ideal for initialization process.
1141 */
1142 amd_perf_ctl_reset(policy->cpu);
1143 dev = get_cpu_device(policy->cpu);
1144 if (!dev)
1145 return -ENODEV;
1146
1147 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
1148 if (!cpudata)
1149 return -ENOMEM;
1150
1151 cpudata->cpu = policy->cpu;
1152 cpudata->epp_policy = 0;
1153
1154 ret = amd_pstate_init_perf(cpudata);
1155 if (ret)
1156 goto free_cpudata1;
1157
1158 min_freq = amd_get_min_freq(cpudata);
1159 max_freq = amd_get_max_freq(cpudata);
1160 nominal_freq = amd_get_nominal_freq(cpudata);
1161 lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
1162 if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
1163 dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
1164 min_freq, max_freq);
1165 ret = -EINVAL;
1166 goto free_cpudata1;
1167 }
1168
1169 policy->cpuinfo.min_freq = min_freq;
1170 policy->cpuinfo.max_freq = max_freq;
1171 /* It will be updated by governor */
1172 policy->cur = policy->cpuinfo.min_freq;
1173
1174 /* Initial processor data capability frequencies */
1175 cpudata->max_freq = max_freq;
1176 cpudata->min_freq = min_freq;
1177 cpudata->nominal_freq = nominal_freq;
1178 cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
1179
1180 policy->driver_data = cpudata;
1181
1182 cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
1183
1184 policy->min = policy->cpuinfo.min_freq;
1185 policy->max = policy->cpuinfo.max_freq;
1186
1187 /*
1188 * Set the policy to provide a valid fallback value in case
1189 * the default cpufreq governor is neither powersave nor performance.
1190 */
1191 if (amd_pstate_acpi_pm_profile_server() ||
1192 amd_pstate_acpi_pm_profile_undefined())
1193 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1194 else
1195 policy->policy = CPUFREQ_POLICY_POWERSAVE;
1196
1197 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1198 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
1199 if (ret)
1200 return ret;
1201 WRITE_ONCE(cpudata->cppc_req_cached, value);
1202
1203 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
1204 if (ret)
1205 return ret;
1206 WRITE_ONCE(cpudata->cppc_cap1_cached, value);
1207 }
1208 amd_pstate_boost_init(cpudata);
1209
1210 return 0;
1211
1212 free_cpudata1:
1213 kfree(cpudata);
1214 return ret;
1215 }
1216
amd_pstate_epp_cpu_exit(struct cpufreq_policy * policy)1217 static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
1218 {
1219 struct amd_cpudata *cpudata = policy->driver_data;
1220
1221 if (cpudata) {
1222 kfree(cpudata);
1223 policy->driver_data = NULL;
1224 }
1225
1226 pr_debug("CPU %d exiting\n", policy->cpu);
1227 return 0;
1228 }
1229
amd_pstate_epp_update_limit(struct cpufreq_policy * policy)1230 static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
1231 {
1232 struct amd_cpudata *cpudata = policy->driver_data;
1233 u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
1234 u64 value;
1235 s16 epp;
1236
1237 max_perf = READ_ONCE(cpudata->highest_perf);
1238 min_perf = READ_ONCE(cpudata->lowest_perf);
1239 max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
1240 min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
1241
1242 WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
1243 WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
1244
1245 max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
1246 cpudata->max_limit_perf);
1247 min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
1248 cpudata->max_limit_perf);
1249 value = READ_ONCE(cpudata->cppc_req_cached);
1250
1251 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1252 min_perf = max_perf;
1253
1254 /* Initial min/max values for CPPC Performance Controls Register */
1255 value &= ~AMD_CPPC_MIN_PERF(~0L);
1256 value |= AMD_CPPC_MIN_PERF(min_perf);
1257
1258 value &= ~AMD_CPPC_MAX_PERF(~0L);
1259 value |= AMD_CPPC_MAX_PERF(max_perf);
1260
1261 /* CPPC EPP feature require to set zero to the desire perf bit */
1262 value &= ~AMD_CPPC_DES_PERF(~0L);
1263 value |= AMD_CPPC_DES_PERF(0);
1264
1265 cpudata->epp_policy = cpudata->policy;
1266
1267 /* Get BIOS pre-defined epp value */
1268 epp = amd_pstate_get_epp(cpudata, value);
1269 if (epp < 0) {
1270 /**
1271 * This return value can only be negative for shared_memory
1272 * systems where EPP register read/write not supported.
1273 */
1274 return;
1275 }
1276
1277 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1278 epp = 0;
1279
1280 /* Set initial EPP value */
1281 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1282 value &= ~GENMASK_ULL(31, 24);
1283 value |= (u64)epp << 24;
1284 }
1285
1286 WRITE_ONCE(cpudata->cppc_req_cached, value);
1287 amd_pstate_set_epp(cpudata, epp);
1288 }
1289
amd_pstate_epp_set_policy(struct cpufreq_policy * policy)1290 static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
1291 {
1292 struct amd_cpudata *cpudata = policy->driver_data;
1293
1294 if (!policy->cpuinfo.max_freq)
1295 return -ENODEV;
1296
1297 pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
1298 policy->cpuinfo.max_freq, policy->max);
1299
1300 cpudata->policy = policy->policy;
1301
1302 amd_pstate_epp_update_limit(policy);
1303
1304 return 0;
1305 }
1306
amd_pstate_epp_reenable(struct amd_cpudata * cpudata)1307 static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
1308 {
1309 struct cppc_perf_ctrls perf_ctrls;
1310 u64 value, max_perf;
1311 int ret;
1312
1313 ret = amd_pstate_enable(true);
1314 if (ret)
1315 pr_err("failed to enable amd pstate during resume, return %d\n", ret);
1316
1317 value = READ_ONCE(cpudata->cppc_req_cached);
1318 max_perf = READ_ONCE(cpudata->highest_perf);
1319
1320 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1321 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
1322 } else {
1323 perf_ctrls.max_perf = max_perf;
1324 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
1325 cppc_set_perf(cpudata->cpu, &perf_ctrls);
1326 }
1327 }
1328
amd_pstate_epp_cpu_online(struct cpufreq_policy * policy)1329 static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
1330 {
1331 struct amd_cpudata *cpudata = policy->driver_data;
1332
1333 pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
1334
1335 if (cppc_state == AMD_PSTATE_ACTIVE) {
1336 amd_pstate_epp_reenable(cpudata);
1337 cpudata->suspended = false;
1338 }
1339
1340 return 0;
1341 }
1342
amd_pstate_epp_offline(struct cpufreq_policy * policy)1343 static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
1344 {
1345 struct amd_cpudata *cpudata = policy->driver_data;
1346 struct cppc_perf_ctrls perf_ctrls;
1347 int min_perf;
1348 u64 value;
1349
1350 min_perf = READ_ONCE(cpudata->lowest_perf);
1351 value = READ_ONCE(cpudata->cppc_req_cached);
1352
1353 mutex_lock(&amd_pstate_limits_lock);
1354 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1355 cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
1356
1357 /* Set max perf same as min perf */
1358 value &= ~AMD_CPPC_MAX_PERF(~0L);
1359 value |= AMD_CPPC_MAX_PERF(min_perf);
1360 value &= ~AMD_CPPC_MIN_PERF(~0L);
1361 value |= AMD_CPPC_MIN_PERF(min_perf);
1362 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
1363 } else {
1364 perf_ctrls.desired_perf = 0;
1365 perf_ctrls.max_perf = min_perf;
1366 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
1367 cppc_set_perf(cpudata->cpu, &perf_ctrls);
1368 }
1369 mutex_unlock(&amd_pstate_limits_lock);
1370 }
1371
amd_pstate_epp_cpu_offline(struct cpufreq_policy * policy)1372 static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
1373 {
1374 struct amd_cpudata *cpudata = policy->driver_data;
1375
1376 pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
1377
1378 if (cpudata->suspended)
1379 return 0;
1380
1381 if (cppc_state == AMD_PSTATE_ACTIVE)
1382 amd_pstate_epp_offline(policy);
1383
1384 return 0;
1385 }
1386
amd_pstate_epp_verify_policy(struct cpufreq_policy_data * policy)1387 static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
1388 {
1389 cpufreq_verify_within_cpu_limits(policy);
1390 pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
1391 return 0;
1392 }
1393
amd_pstate_epp_suspend(struct cpufreq_policy * policy)1394 static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
1395 {
1396 struct amd_cpudata *cpudata = policy->driver_data;
1397 int ret;
1398
1399 /* avoid suspending when EPP is not enabled */
1400 if (cppc_state != AMD_PSTATE_ACTIVE)
1401 return 0;
1402
1403 /* set this flag to avoid setting core offline*/
1404 cpudata->suspended = true;
1405
1406 /* disable CPPC in lowlevel firmware */
1407 ret = amd_pstate_enable(false);
1408 if (ret)
1409 pr_err("failed to suspend, return %d\n", ret);
1410
1411 return 0;
1412 }
1413
amd_pstate_epp_resume(struct cpufreq_policy * policy)1414 static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
1415 {
1416 struct amd_cpudata *cpudata = policy->driver_data;
1417
1418 if (cpudata->suspended) {
1419 mutex_lock(&amd_pstate_limits_lock);
1420
1421 /* enable amd pstate from suspend state*/
1422 amd_pstate_epp_reenable(cpudata);
1423
1424 mutex_unlock(&amd_pstate_limits_lock);
1425
1426 cpudata->suspended = false;
1427 }
1428
1429 return 0;
1430 }
1431
1432 static struct cpufreq_driver amd_pstate_driver = {
1433 .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
1434 .verify = amd_pstate_verify,
1435 .target = amd_pstate_target,
1436 .fast_switch = amd_pstate_fast_switch,
1437 .init = amd_pstate_cpu_init,
1438 .exit = amd_pstate_cpu_exit,
1439 .suspend = amd_pstate_cpu_suspend,
1440 .resume = amd_pstate_cpu_resume,
1441 .set_boost = amd_pstate_set_boost,
1442 .name = "amd-pstate",
1443 .attr = amd_pstate_attr,
1444 };
1445
1446 static struct cpufreq_driver amd_pstate_epp_driver = {
1447 .flags = CPUFREQ_CONST_LOOPS,
1448 .verify = amd_pstate_epp_verify_policy,
1449 .setpolicy = amd_pstate_epp_set_policy,
1450 .init = amd_pstate_epp_cpu_init,
1451 .exit = amd_pstate_epp_cpu_exit,
1452 .offline = amd_pstate_epp_cpu_offline,
1453 .online = amd_pstate_epp_cpu_online,
1454 .suspend = amd_pstate_epp_suspend,
1455 .resume = amd_pstate_epp_resume,
1456 .name = "amd-pstate-epp",
1457 .attr = amd_pstate_epp_attr,
1458 };
1459
amd_pstate_set_driver(int mode_idx)1460 static int __init amd_pstate_set_driver(int mode_idx)
1461 {
1462 if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
1463 cppc_state = mode_idx;
1464 if (cppc_state == AMD_PSTATE_DISABLE)
1465 pr_info("driver is explicitly disabled\n");
1466
1467 if (cppc_state == AMD_PSTATE_ACTIVE)
1468 current_pstate_driver = &amd_pstate_epp_driver;
1469
1470 if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
1471 current_pstate_driver = &amd_pstate_driver;
1472
1473 return 0;
1474 }
1475
1476 return -EINVAL;
1477 }
1478
amd_pstate_init(void)1479 static int __init amd_pstate_init(void)
1480 {
1481 struct device *dev_root;
1482 int ret;
1483
1484 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1485 return -ENODEV;
1486
1487 if (!acpi_cpc_valid()) {
1488 pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
1489 return -ENODEV;
1490 }
1491
1492 /* don't keep reloading if cpufreq_driver exists */
1493 if (cpufreq_get_current_driver())
1494 return -EEXIST;
1495
1496 switch (cppc_state) {
1497 case AMD_PSTATE_UNDEFINED:
1498 /* Disable on the following configs by default:
1499 * 1. Undefined platforms
1500 * 2. Server platforms
1501 * 3. Shared memory designs
1502 */
1503 if (amd_pstate_acpi_pm_profile_undefined() ||
1504 amd_pstate_acpi_pm_profile_server() ||
1505 !boot_cpu_has(X86_FEATURE_CPPC)) {
1506 pr_info("driver load is disabled, boot with specific mode to enable this\n");
1507 return -ENODEV;
1508 }
1509 ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE);
1510 if (ret)
1511 return ret;
1512 break;
1513 case AMD_PSTATE_DISABLE:
1514 return -ENODEV;
1515 case AMD_PSTATE_PASSIVE:
1516 case AMD_PSTATE_ACTIVE:
1517 case AMD_PSTATE_GUIDED:
1518 break;
1519 default:
1520 return -EINVAL;
1521 }
1522
1523 /* capability check */
1524 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1525 pr_debug("AMD CPPC MSR based functionality is supported\n");
1526 if (cppc_state != AMD_PSTATE_ACTIVE)
1527 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
1528 } else {
1529 pr_debug("AMD CPPC shared memory based functionality is supported\n");
1530 static_call_update(amd_pstate_enable, cppc_enable);
1531 static_call_update(amd_pstate_init_perf, cppc_init_perf);
1532 static_call_update(amd_pstate_update_perf, cppc_update_perf);
1533 }
1534
1535 /* enable amd pstate feature */
1536 ret = amd_pstate_enable(true);
1537 if (ret) {
1538 pr_err("failed to enable with return %d\n", ret);
1539 return ret;
1540 }
1541
1542 ret = cpufreq_register_driver(current_pstate_driver);
1543 if (ret)
1544 pr_err("failed to register with return %d\n", ret);
1545
1546 dev_root = bus_get_dev_root(&cpu_subsys);
1547 if (dev_root) {
1548 ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group);
1549 put_device(dev_root);
1550 if (ret) {
1551 pr_err("sysfs attribute export failed with error %d.\n", ret);
1552 goto global_attr_free;
1553 }
1554 }
1555
1556 return ret;
1557
1558 global_attr_free:
1559 cpufreq_unregister_driver(current_pstate_driver);
1560 return ret;
1561 }
1562 device_initcall(amd_pstate_init);
1563
amd_pstate_param(char * str)1564 static int __init amd_pstate_param(char *str)
1565 {
1566 size_t size;
1567 int mode_idx;
1568
1569 if (!str)
1570 return -EINVAL;
1571
1572 size = strlen(str);
1573 mode_idx = get_mode_idx_from_str(str, size);
1574
1575 return amd_pstate_set_driver(mode_idx);
1576 }
1577 early_param("amd_pstate", amd_pstate_param);
1578
1579 MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
1580 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
1581