1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * amd-pstate.c - AMD Processor P-state Frequency Driver
4 *
5 * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
6 *
7 * Author: Huang Rui <ray.huang@amd.com>
8 *
9 * AMD P-State introduces a new CPU performance scaling design for AMD
10 * processors using the ACPI Collaborative Performance and Power Control (CPPC)
11 * feature which works with the AMD SMU firmware providing a finer grained
12 * frequency control range. It is to replace the legacy ACPI P-States control,
13 * allows a flexible, low-latency interface for the Linux kernel to directly
14 * communicate the performance hints to hardware.
15 *
16 * AMD P-State is supported on recent AMD Zen base CPU series include some of
17 * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD
18 * P-State supported system. And there are two types of hardware implementations
19 * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution.
20 * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types.
21 */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/smp.h>
29 #include <linux/sched.h>
30 #include <linux/cpufreq.h>
31 #include <linux/compiler.h>
32 #include <linux/dmi.h>
33 #include <linux/slab.h>
34 #include <linux/acpi.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/uaccess.h>
38 #include <linux/static_call.h>
39 #include <linux/amd-pstate.h>
40 #include <linux/topology.h>
41
42 #include <acpi/processor.h>
43 #include <acpi/cppc_acpi.h>
44
45 #include <asm/msr.h>
46 #include <asm/processor.h>
47 #include <asm/cpufeature.h>
48 #include <asm/cpu_device_id.h>
49 #include "amd-pstate-trace.h"
50
51 #define AMD_PSTATE_TRANSITION_LATENCY 20000
52 #define AMD_PSTATE_TRANSITION_DELAY 1000
53 #define CPPC_HIGHEST_PERF_PERFORMANCE 196
54 #define CPPC_HIGHEST_PERF_DEFAULT 166
55
56 /*
57 * TODO: We need more time to fine tune processors with shared memory solution
58 * with community together.
59 *
60 * There are some performance drops on the CPU benchmarks which reports from
61 * Suse. We are co-working with them to fine tune the shared memory solution. So
62 * we disable it by default to go acpi-cpufreq on these processors and add a
63 * module parameter to be able to enable it manually for debugging.
64 */
65 static struct cpufreq_driver *current_pstate_driver;
66 static struct cpufreq_driver amd_pstate_driver;
67 static struct cpufreq_driver amd_pstate_epp_driver;
68 static int cppc_state = AMD_PSTATE_UNDEFINED;
69 static bool cppc_enabled;
70 static bool amd_pstate_prefcore = true;
71
72 /*
73 * AMD Energy Preference Performance (EPP)
74 * The EPP is used in the CCLK DPM controller to drive
75 * the frequency that a core is going to operate during
76 * short periods of activity. EPP values will be utilized for
77 * different OS profiles (balanced, performance, power savings)
78 * display strings corresponding to EPP index in the
79 * energy_perf_strings[]
80 * index String
81 *-------------------------------------
82 * 0 default
83 * 1 performance
84 * 2 balance_performance
85 * 3 balance_power
86 * 4 power
87 */
88 enum energy_perf_value_index {
89 EPP_INDEX_DEFAULT = 0,
90 EPP_INDEX_PERFORMANCE,
91 EPP_INDEX_BALANCE_PERFORMANCE,
92 EPP_INDEX_BALANCE_POWERSAVE,
93 EPP_INDEX_POWERSAVE,
94 };
95
96 static const char * const energy_perf_strings[] = {
97 [EPP_INDEX_DEFAULT] = "default",
98 [EPP_INDEX_PERFORMANCE] = "performance",
99 [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
100 [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
101 [EPP_INDEX_POWERSAVE] = "power",
102 NULL
103 };
104
105 static unsigned int epp_values[] = {
106 [EPP_INDEX_DEFAULT] = 0,
107 [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE,
108 [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
109 [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
110 [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
111 };
112
113 typedef int (*cppc_mode_transition_fn)(int);
114
get_mode_idx_from_str(const char * str,size_t size)115 static inline int get_mode_idx_from_str(const char *str, size_t size)
116 {
117 int i;
118
119 for (i=0; i < AMD_PSTATE_MAX; i++) {
120 if (!strncmp(str, amd_pstate_mode_string[i], size))
121 return i;
122 }
123 return -EINVAL;
124 }
125
126 static DEFINE_MUTEX(amd_pstate_limits_lock);
127 static DEFINE_MUTEX(amd_pstate_driver_lock);
128
amd_pstate_get_epp(struct amd_cpudata * cpudata,u64 cppc_req_cached)129 static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
130 {
131 u64 epp;
132 int ret;
133
134 if (boot_cpu_has(X86_FEATURE_CPPC)) {
135 if (!cppc_req_cached) {
136 epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
137 &cppc_req_cached);
138 if (epp)
139 return epp;
140 }
141 epp = (cppc_req_cached >> 24) & 0xFF;
142 } else {
143 ret = cppc_get_epp_perf(cpudata->cpu, &epp);
144 if (ret < 0) {
145 pr_debug("Could not retrieve energy perf value (%d)\n", ret);
146 return -EIO;
147 }
148 }
149
150 return (s16)(epp & 0xff);
151 }
152
amd_pstate_get_energy_pref_index(struct amd_cpudata * cpudata)153 static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
154 {
155 s16 epp;
156 int index = -EINVAL;
157
158 epp = amd_pstate_get_epp(cpudata, 0);
159 if (epp < 0)
160 return epp;
161
162 switch (epp) {
163 case AMD_CPPC_EPP_PERFORMANCE:
164 index = EPP_INDEX_PERFORMANCE;
165 break;
166 case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
167 index = EPP_INDEX_BALANCE_PERFORMANCE;
168 break;
169 case AMD_CPPC_EPP_BALANCE_POWERSAVE:
170 index = EPP_INDEX_BALANCE_POWERSAVE;
171 break;
172 case AMD_CPPC_EPP_POWERSAVE:
173 index = EPP_INDEX_POWERSAVE;
174 break;
175 default:
176 break;
177 }
178
179 return index;
180 }
181
pstate_update_perf(struct amd_cpudata * cpudata,u32 min_perf,u32 des_perf,u32 max_perf,bool fast_switch)182 static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
183 u32 des_perf, u32 max_perf, bool fast_switch)
184 {
185 if (fast_switch)
186 wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
187 else
188 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
189 READ_ONCE(cpudata->cppc_req_cached));
190 }
191
192 DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
193
amd_pstate_update_perf(struct amd_cpudata * cpudata,u32 min_perf,u32 des_perf,u32 max_perf,bool fast_switch)194 static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
195 u32 min_perf, u32 des_perf,
196 u32 max_perf, bool fast_switch)
197 {
198 static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
199 max_perf, fast_switch);
200 }
201
amd_pstate_set_epp(struct amd_cpudata * cpudata,u32 epp)202 static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
203 {
204 int ret;
205 struct cppc_perf_ctrls perf_ctrls;
206
207 if (boot_cpu_has(X86_FEATURE_CPPC)) {
208 u64 value = READ_ONCE(cpudata->cppc_req_cached);
209
210 value &= ~GENMASK_ULL(31, 24);
211 value |= (u64)epp << 24;
212 WRITE_ONCE(cpudata->cppc_req_cached, value);
213
214 ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
215 if (!ret)
216 cpudata->epp_cached = epp;
217 } else {
218 amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
219 cpudata->max_limit_perf, false);
220
221 perf_ctrls.energy_perf = epp;
222 ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
223 if (ret) {
224 pr_debug("failed to set energy perf value (%d)\n", ret);
225 return ret;
226 }
227 cpudata->epp_cached = epp;
228 }
229
230 return ret;
231 }
232
amd_pstate_set_energy_pref_index(struct amd_cpudata * cpudata,int pref_index)233 static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
234 int pref_index)
235 {
236 int epp = -EINVAL;
237 int ret;
238
239 if (!pref_index) {
240 pr_debug("EPP pref_index is invalid\n");
241 return -EINVAL;
242 }
243
244 if (epp == -EINVAL)
245 epp = epp_values[pref_index];
246
247 if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
248 pr_debug("EPP cannot be set under performance policy\n");
249 return -EBUSY;
250 }
251
252 ret = amd_pstate_set_epp(cpudata, epp);
253
254 return ret;
255 }
256
pstate_enable(bool enable)257 static inline int pstate_enable(bool enable)
258 {
259 int ret, cpu;
260 unsigned long logical_proc_id_mask = 0;
261
262 if (enable == cppc_enabled)
263 return 0;
264
265 for_each_present_cpu(cpu) {
266 unsigned long logical_id = topology_logical_die_id(cpu);
267
268 if (test_bit(logical_id, &logical_proc_id_mask))
269 continue;
270
271 set_bit(logical_id, &logical_proc_id_mask);
272
273 ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
274 enable);
275 if (ret)
276 return ret;
277 }
278
279 cppc_enabled = enable;
280 return 0;
281 }
282
cppc_enable(bool enable)283 static int cppc_enable(bool enable)
284 {
285 int cpu, ret = 0;
286 struct cppc_perf_ctrls perf_ctrls;
287
288 if (enable == cppc_enabled)
289 return 0;
290
291 for_each_present_cpu(cpu) {
292 ret = cppc_set_enable(cpu, enable);
293 if (ret)
294 return ret;
295
296 /* Enable autonomous mode for EPP */
297 if (cppc_state == AMD_PSTATE_ACTIVE) {
298 /* Set desired perf as zero to allow EPP firmware control */
299 perf_ctrls.desired_perf = 0;
300 ret = cppc_set_perf(cpu, &perf_ctrls);
301 if (ret)
302 return ret;
303 }
304 }
305
306 cppc_enabled = enable;
307 return ret;
308 }
309
310 DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
311
amd_pstate_enable(bool enable)312 static inline int amd_pstate_enable(bool enable)
313 {
314 return static_call(amd_pstate_enable)(enable);
315 }
316
amd_pstate_highest_perf_set(struct amd_cpudata * cpudata)317 static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata)
318 {
319 struct cpuinfo_x86 *c = &cpu_data(0);
320
321 /*
322 * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
323 * the highest performance level is set to 196.
324 * https://bugzilla.kernel.org/show_bug.cgi?id=218759
325 */
326 if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f))
327 return CPPC_HIGHEST_PERF_PERFORMANCE;
328
329 return CPPC_HIGHEST_PERF_DEFAULT;
330 }
331
pstate_init_perf(struct amd_cpudata * cpudata)332 static int pstate_init_perf(struct amd_cpudata *cpudata)
333 {
334 u64 cap1;
335 u32 highest_perf;
336
337 int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
338 &cap1);
339 if (ret)
340 return ret;
341
342 /* For platforms that do not support the preferred core feature, the
343 * highest_pef may be configured with 166 or 255, to avoid max frequency
344 * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as
345 * the default max perf.
346 */
347 if (cpudata->hw_prefcore)
348 highest_perf = amd_pstate_highest_perf_set(cpudata);
349 else
350 highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
351
352 WRITE_ONCE(cpudata->highest_perf, highest_perf);
353 WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
354 WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
355 WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
356 WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
357 WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
358 return 0;
359 }
360
cppc_init_perf(struct amd_cpudata * cpudata)361 static int cppc_init_perf(struct amd_cpudata *cpudata)
362 {
363 struct cppc_perf_caps cppc_perf;
364 u32 highest_perf;
365
366 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
367 if (ret)
368 return ret;
369
370 if (cpudata->hw_prefcore)
371 highest_perf = amd_pstate_highest_perf_set(cpudata);
372 else
373 highest_perf = cppc_perf.highest_perf;
374
375 WRITE_ONCE(cpudata->highest_perf, highest_perf);
376 WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
377 WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
378 WRITE_ONCE(cpudata->lowest_nonlinear_perf,
379 cppc_perf.lowest_nonlinear_perf);
380 WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
381 WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
382
383 if (cppc_state == AMD_PSTATE_ACTIVE)
384 return 0;
385
386 ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf);
387 if (ret) {
388 pr_warn("failed to get auto_sel, ret: %d\n", ret);
389 return 0;
390 }
391
392 ret = cppc_set_auto_sel(cpudata->cpu,
393 (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
394
395 if (ret)
396 pr_warn("failed to set auto_sel, ret: %d\n", ret);
397
398 return ret;
399 }
400
401 DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
402
amd_pstate_init_perf(struct amd_cpudata * cpudata)403 static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
404 {
405 return static_call(amd_pstate_init_perf)(cpudata);
406 }
407
cppc_update_perf(struct amd_cpudata * cpudata,u32 min_perf,u32 des_perf,u32 max_perf,bool fast_switch)408 static void cppc_update_perf(struct amd_cpudata *cpudata,
409 u32 min_perf, u32 des_perf,
410 u32 max_perf, bool fast_switch)
411 {
412 struct cppc_perf_ctrls perf_ctrls;
413
414 perf_ctrls.max_perf = max_perf;
415 perf_ctrls.min_perf = min_perf;
416 perf_ctrls.desired_perf = des_perf;
417
418 cppc_set_perf(cpudata->cpu, &perf_ctrls);
419 }
420
amd_pstate_sample(struct amd_cpudata * cpudata)421 static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
422 {
423 u64 aperf, mperf, tsc;
424 unsigned long flags;
425
426 local_irq_save(flags);
427 rdmsrl(MSR_IA32_APERF, aperf);
428 rdmsrl(MSR_IA32_MPERF, mperf);
429 tsc = rdtsc();
430
431 if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
432 local_irq_restore(flags);
433 return false;
434 }
435
436 local_irq_restore(flags);
437
438 cpudata->cur.aperf = aperf;
439 cpudata->cur.mperf = mperf;
440 cpudata->cur.tsc = tsc;
441 cpudata->cur.aperf -= cpudata->prev.aperf;
442 cpudata->cur.mperf -= cpudata->prev.mperf;
443 cpudata->cur.tsc -= cpudata->prev.tsc;
444
445 cpudata->prev.aperf = aperf;
446 cpudata->prev.mperf = mperf;
447 cpudata->prev.tsc = tsc;
448
449 cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
450
451 return true;
452 }
453
amd_pstate_update(struct amd_cpudata * cpudata,u32 min_perf,u32 des_perf,u32 max_perf,bool fast_switch,int gov_flags)454 static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
455 u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
456 {
457 u64 prev = READ_ONCE(cpudata->cppc_req_cached);
458 u64 value = prev;
459
460 min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
461 cpudata->max_limit_perf);
462 max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
463 cpudata->max_limit_perf);
464 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
465
466 if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
467 min_perf = des_perf;
468 des_perf = 0;
469 }
470
471 value &= ~AMD_CPPC_MIN_PERF(~0L);
472 value |= AMD_CPPC_MIN_PERF(min_perf);
473
474 value &= ~AMD_CPPC_DES_PERF(~0L);
475 value |= AMD_CPPC_DES_PERF(des_perf);
476
477 value &= ~AMD_CPPC_MAX_PERF(~0L);
478 value |= AMD_CPPC_MAX_PERF(max_perf);
479
480 if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
481 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
482 cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
483 cpudata->cpu, (value != prev), fast_switch);
484 }
485
486 if (value == prev)
487 return;
488
489 WRITE_ONCE(cpudata->cppc_req_cached, value);
490
491 amd_pstate_update_perf(cpudata, min_perf, des_perf,
492 max_perf, fast_switch);
493 }
494
amd_pstate_verify(struct cpufreq_policy_data * policy)495 static int amd_pstate_verify(struct cpufreq_policy_data *policy)
496 {
497 cpufreq_verify_within_cpu_limits(policy);
498
499 return 0;
500 }
501
amd_pstate_update_min_max_limit(struct cpufreq_policy * policy)502 static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
503 {
504 u32 max_limit_perf, min_limit_perf;
505 struct amd_cpudata *cpudata = policy->driver_data;
506
507 max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
508 min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
509
510 WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
511 WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
512 WRITE_ONCE(cpudata->max_limit_freq, policy->max);
513 WRITE_ONCE(cpudata->min_limit_freq, policy->min);
514
515 return 0;
516 }
517
amd_pstate_update_freq(struct cpufreq_policy * policy,unsigned int target_freq,bool fast_switch)518 static int amd_pstate_update_freq(struct cpufreq_policy *policy,
519 unsigned int target_freq, bool fast_switch)
520 {
521 struct cpufreq_freqs freqs;
522 struct amd_cpudata *cpudata = policy->driver_data;
523 unsigned long max_perf, min_perf, des_perf, cap_perf;
524
525 if (!cpudata->max_freq)
526 return -ENODEV;
527
528 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
529 amd_pstate_update_min_max_limit(policy);
530
531 cap_perf = READ_ONCE(cpudata->highest_perf);
532 min_perf = READ_ONCE(cpudata->lowest_perf);
533 max_perf = cap_perf;
534
535 freqs.old = policy->cur;
536 freqs.new = target_freq;
537
538 des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
539 cpudata->max_freq);
540
541 WARN_ON(fast_switch && !policy->fast_switch_enabled);
542 /*
543 * If fast_switch is desired, then there aren't any registered
544 * transition notifiers. See comment for
545 * cpufreq_enable_fast_switch().
546 */
547 if (!fast_switch)
548 cpufreq_freq_transition_begin(policy, &freqs);
549
550 amd_pstate_update(cpudata, min_perf, des_perf,
551 max_perf, fast_switch, policy->governor->flags);
552
553 if (!fast_switch)
554 cpufreq_freq_transition_end(policy, &freqs, false);
555
556 return 0;
557 }
558
amd_pstate_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)559 static int amd_pstate_target(struct cpufreq_policy *policy,
560 unsigned int target_freq,
561 unsigned int relation)
562 {
563 return amd_pstate_update_freq(policy, target_freq, false);
564 }
565
amd_pstate_fast_switch(struct cpufreq_policy * policy,unsigned int target_freq)566 static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
567 unsigned int target_freq)
568 {
569 if (!amd_pstate_update_freq(policy, target_freq, true))
570 return target_freq;
571 return policy->cur;
572 }
573
amd_pstate_adjust_perf(unsigned int cpu,unsigned long _min_perf,unsigned long target_perf,unsigned long capacity)574 static void amd_pstate_adjust_perf(unsigned int cpu,
575 unsigned long _min_perf,
576 unsigned long target_perf,
577 unsigned long capacity)
578 {
579 unsigned long max_perf, min_perf, des_perf,
580 cap_perf, lowest_nonlinear_perf, max_freq;
581 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
582 unsigned int target_freq;
583 struct amd_cpudata *cpudata;
584
585 if (!policy)
586 return;
587
588 cpudata = policy->driver_data;
589
590 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
591 amd_pstate_update_min_max_limit(policy);
592
593
594 cap_perf = READ_ONCE(cpudata->highest_perf);
595 lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
596 max_freq = READ_ONCE(cpudata->max_freq);
597
598 des_perf = cap_perf;
599 if (target_perf < capacity)
600 des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
601
602 min_perf = READ_ONCE(cpudata->lowest_perf);
603 if (_min_perf < capacity)
604 min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
605
606 if (min_perf < lowest_nonlinear_perf)
607 min_perf = lowest_nonlinear_perf;
608
609 max_perf = cap_perf;
610 if (max_perf < min_perf)
611 max_perf = min_perf;
612
613 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
614 target_freq = div_u64(des_perf * max_freq, max_perf);
615 policy->cur = target_freq;
616
617 amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
618 policy->governor->flags);
619 cpufreq_cpu_put(policy);
620 }
621
amd_get_min_freq(struct amd_cpudata * cpudata)622 static int amd_get_min_freq(struct amd_cpudata *cpudata)
623 {
624 struct cppc_perf_caps cppc_perf;
625
626 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
627 if (ret)
628 return ret;
629
630 /* Switch to khz */
631 return cppc_perf.lowest_freq * 1000;
632 }
633
amd_get_max_freq(struct amd_cpudata * cpudata)634 static int amd_get_max_freq(struct amd_cpudata *cpudata)
635 {
636 struct cppc_perf_caps cppc_perf;
637 u32 max_perf, max_freq, nominal_freq, nominal_perf;
638 u64 boost_ratio;
639
640 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
641 if (ret)
642 return ret;
643
644 nominal_freq = cppc_perf.nominal_freq;
645 nominal_perf = READ_ONCE(cpudata->nominal_perf);
646 max_perf = READ_ONCE(cpudata->highest_perf);
647
648 boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
649 nominal_perf);
650
651 max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
652
653 /* Switch to khz */
654 return max_freq * 1000;
655 }
656
amd_get_nominal_freq(struct amd_cpudata * cpudata)657 static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
658 {
659 struct cppc_perf_caps cppc_perf;
660
661 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
662 if (ret)
663 return ret;
664
665 /* Switch to khz */
666 return cppc_perf.nominal_freq * 1000;
667 }
668
amd_get_lowest_nonlinear_freq(struct amd_cpudata * cpudata)669 static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
670 {
671 struct cppc_perf_caps cppc_perf;
672 u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
673 nominal_freq, nominal_perf;
674 u64 lowest_nonlinear_ratio;
675
676 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
677 if (ret)
678 return ret;
679
680 nominal_freq = cppc_perf.nominal_freq;
681 nominal_perf = READ_ONCE(cpudata->nominal_perf);
682
683 lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
684
685 lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
686 nominal_perf);
687
688 lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
689
690 /* Switch to khz */
691 return lowest_nonlinear_freq * 1000;
692 }
693
amd_pstate_set_boost(struct cpufreq_policy * policy,int state)694 static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
695 {
696 struct amd_cpudata *cpudata = policy->driver_data;
697 int ret;
698
699 if (!cpudata->boost_supported) {
700 pr_err("Boost mode is not supported by this processor or SBIOS\n");
701 return -EINVAL;
702 }
703
704 if (state)
705 policy->cpuinfo.max_freq = cpudata->max_freq;
706 else
707 policy->cpuinfo.max_freq = cpudata->nominal_freq;
708
709 policy->max = policy->cpuinfo.max_freq;
710
711 ret = freq_qos_update_request(&cpudata->req[1],
712 policy->cpuinfo.max_freq);
713 if (ret < 0)
714 return ret;
715
716 return 0;
717 }
718
amd_pstate_boost_init(struct amd_cpudata * cpudata)719 static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
720 {
721 u32 highest_perf, nominal_perf;
722
723 highest_perf = READ_ONCE(cpudata->highest_perf);
724 nominal_perf = READ_ONCE(cpudata->nominal_perf);
725
726 if (highest_perf <= nominal_perf)
727 return;
728
729 cpudata->boost_supported = true;
730 current_pstate_driver->boost_enabled = true;
731 }
732
amd_perf_ctl_reset(unsigned int cpu)733 static void amd_perf_ctl_reset(unsigned int cpu)
734 {
735 wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
736 }
737
738 /*
739 * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
740 * due to locking, so queue the work for later.
741 */
amd_pstste_sched_prefcore_workfn(struct work_struct * work)742 static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
743 {
744 sched_set_itmt_support();
745 }
746 static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
747
748 /*
749 * Get the highest performance register value.
750 * @cpu: CPU from which to get highest performance.
751 * @highest_perf: Return address.
752 *
753 * Return: 0 for success, -EIO otherwise.
754 */
amd_pstate_get_highest_perf(int cpu,u32 * highest_perf)755 static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
756 {
757 int ret;
758
759 if (boot_cpu_has(X86_FEATURE_CPPC)) {
760 u64 cap1;
761
762 ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
763 if (ret)
764 return ret;
765 WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
766 } else {
767 u64 cppc_highest_perf;
768
769 ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
770 if (ret)
771 return ret;
772 WRITE_ONCE(*highest_perf, cppc_highest_perf);
773 }
774
775 return (ret);
776 }
777
778 #define CPPC_MAX_PERF U8_MAX
779
amd_pstate_init_prefcore(struct amd_cpudata * cpudata)780 static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
781 {
782 int ret, prio;
783 u32 highest_perf;
784
785 ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
786 if (ret)
787 return;
788
789 cpudata->hw_prefcore = true;
790 /* check if CPPC preferred core feature is enabled*/
791 if (highest_perf < CPPC_MAX_PERF)
792 prio = (int)highest_perf;
793 else {
794 pr_debug("AMD CPPC preferred core is unsupported!\n");
795 cpudata->hw_prefcore = false;
796 return;
797 }
798
799 if (!amd_pstate_prefcore)
800 return;
801
802 /*
803 * The priorities can be set regardless of whether or not
804 * sched_set_itmt_support(true) has been called and it is valid to
805 * update them at any time after it has been called.
806 */
807 sched_set_itmt_core_prio(prio, cpudata->cpu);
808
809 schedule_work(&sched_prefcore_work);
810 }
811
amd_pstate_cpu_init(struct cpufreq_policy * policy)812 static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
813 {
814 int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
815 struct device *dev;
816 struct amd_cpudata *cpudata;
817
818 /*
819 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
820 * which is ideal for initialization process.
821 */
822 amd_perf_ctl_reset(policy->cpu);
823 dev = get_cpu_device(policy->cpu);
824 if (!dev)
825 return -ENODEV;
826
827 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
828 if (!cpudata)
829 return -ENOMEM;
830
831 cpudata->cpu = policy->cpu;
832
833 amd_pstate_init_prefcore(cpudata);
834
835 ret = amd_pstate_init_perf(cpudata);
836 if (ret)
837 goto free_cpudata1;
838
839 min_freq = amd_get_min_freq(cpudata);
840 max_freq = amd_get_max_freq(cpudata);
841 nominal_freq = amd_get_nominal_freq(cpudata);
842 lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
843
844 if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
845 dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
846 min_freq, max_freq);
847 ret = -EINVAL;
848 goto free_cpudata1;
849 }
850
851 policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
852 policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
853
854 policy->min = min_freq;
855 policy->max = max_freq;
856
857 policy->cpuinfo.min_freq = min_freq;
858 policy->cpuinfo.max_freq = max_freq;
859
860 /* It will be updated by governor */
861 policy->cur = policy->cpuinfo.min_freq;
862
863 if (boot_cpu_has(X86_FEATURE_CPPC))
864 policy->fast_switch_possible = true;
865
866 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
867 FREQ_QOS_MIN, policy->cpuinfo.min_freq);
868 if (ret < 0) {
869 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
870 goto free_cpudata1;
871 }
872
873 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1],
874 FREQ_QOS_MAX, policy->cpuinfo.max_freq);
875 if (ret < 0) {
876 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
877 goto free_cpudata2;
878 }
879
880 /* Initial processor data capability frequencies */
881 cpudata->max_freq = max_freq;
882 cpudata->min_freq = min_freq;
883 cpudata->max_limit_freq = max_freq;
884 cpudata->min_limit_freq = min_freq;
885 cpudata->nominal_freq = nominal_freq;
886 cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
887
888 policy->driver_data = cpudata;
889
890 amd_pstate_boost_init(cpudata);
891 if (!current_pstate_driver->adjust_perf)
892 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
893
894 return 0;
895
896 free_cpudata2:
897 freq_qos_remove_request(&cpudata->req[0]);
898 free_cpudata1:
899 kfree(cpudata);
900 return ret;
901 }
902
amd_pstate_cpu_exit(struct cpufreq_policy * policy)903 static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
904 {
905 struct amd_cpudata *cpudata = policy->driver_data;
906
907 freq_qos_remove_request(&cpudata->req[1]);
908 freq_qos_remove_request(&cpudata->req[0]);
909 policy->fast_switch_possible = false;
910 kfree(cpudata);
911
912 return 0;
913 }
914
amd_pstate_cpu_resume(struct cpufreq_policy * policy)915 static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
916 {
917 int ret;
918
919 ret = amd_pstate_enable(true);
920 if (ret)
921 pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
922
923 return ret;
924 }
925
amd_pstate_cpu_suspend(struct cpufreq_policy * policy)926 static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
927 {
928 int ret;
929
930 ret = amd_pstate_enable(false);
931 if (ret)
932 pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
933
934 return ret;
935 }
936
937 /* Sysfs attributes */
938
939 /*
940 * This frequency is to indicate the maximum hardware frequency.
941 * If boost is not active but supported, the frequency will be larger than the
942 * one in cpuinfo.
943 */
show_amd_pstate_max_freq(struct cpufreq_policy * policy,char * buf)944 static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
945 char *buf)
946 {
947 int max_freq;
948 struct amd_cpudata *cpudata = policy->driver_data;
949
950 max_freq = amd_get_max_freq(cpudata);
951 if (max_freq < 0)
952 return max_freq;
953
954 return sysfs_emit(buf, "%u\n", max_freq);
955 }
956
show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy * policy,char * buf)957 static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
958 char *buf)
959 {
960 int freq;
961 struct amd_cpudata *cpudata = policy->driver_data;
962
963 freq = amd_get_lowest_nonlinear_freq(cpudata);
964 if (freq < 0)
965 return freq;
966
967 return sysfs_emit(buf, "%u\n", freq);
968 }
969
970 /*
971 * In some of ASICs, the highest_perf is not the one in the _CPC table, so we
972 * need to expose it to sysfs.
973 */
show_amd_pstate_highest_perf(struct cpufreq_policy * policy,char * buf)974 static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
975 char *buf)
976 {
977 u32 perf;
978 struct amd_cpudata *cpudata = policy->driver_data;
979
980 perf = READ_ONCE(cpudata->highest_perf);
981
982 return sysfs_emit(buf, "%u\n", perf);
983 }
984
show_amd_pstate_hw_prefcore(struct cpufreq_policy * policy,char * buf)985 static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
986 char *buf)
987 {
988 bool hw_prefcore;
989 struct amd_cpudata *cpudata = policy->driver_data;
990
991 hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
992
993 return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
994 }
995
show_energy_performance_available_preferences(struct cpufreq_policy * policy,char * buf)996 static ssize_t show_energy_performance_available_preferences(
997 struct cpufreq_policy *policy, char *buf)
998 {
999 int i = 0;
1000 int offset = 0;
1001 struct amd_cpudata *cpudata = policy->driver_data;
1002
1003 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1004 return sysfs_emit_at(buf, offset, "%s\n",
1005 energy_perf_strings[EPP_INDEX_PERFORMANCE]);
1006
1007 while (energy_perf_strings[i] != NULL)
1008 offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
1009
1010 offset += sysfs_emit_at(buf, offset, "\n");
1011
1012 return offset;
1013 }
1014
store_energy_performance_preference(struct cpufreq_policy * policy,const char * buf,size_t count)1015 static ssize_t store_energy_performance_preference(
1016 struct cpufreq_policy *policy, const char *buf, size_t count)
1017 {
1018 struct amd_cpudata *cpudata = policy->driver_data;
1019 char str_preference[21];
1020 ssize_t ret;
1021
1022 ret = sscanf(buf, "%20s", str_preference);
1023 if (ret != 1)
1024 return -EINVAL;
1025
1026 ret = match_string(energy_perf_strings, -1, str_preference);
1027 if (ret < 0)
1028 return -EINVAL;
1029
1030 mutex_lock(&amd_pstate_limits_lock);
1031 ret = amd_pstate_set_energy_pref_index(cpudata, ret);
1032 mutex_unlock(&amd_pstate_limits_lock);
1033
1034 return ret ?: count;
1035 }
1036
show_energy_performance_preference(struct cpufreq_policy * policy,char * buf)1037 static ssize_t show_energy_performance_preference(
1038 struct cpufreq_policy *policy, char *buf)
1039 {
1040 struct amd_cpudata *cpudata = policy->driver_data;
1041 int preference;
1042
1043 preference = amd_pstate_get_energy_pref_index(cpudata);
1044 if (preference < 0)
1045 return preference;
1046
1047 return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
1048 }
1049
amd_pstate_driver_cleanup(void)1050 static void amd_pstate_driver_cleanup(void)
1051 {
1052 amd_pstate_enable(false);
1053 cppc_state = AMD_PSTATE_DISABLE;
1054 current_pstate_driver = NULL;
1055 }
1056
amd_pstate_register_driver(int mode)1057 static int amd_pstate_register_driver(int mode)
1058 {
1059 int ret;
1060
1061 if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED)
1062 current_pstate_driver = &amd_pstate_driver;
1063 else if (mode == AMD_PSTATE_ACTIVE)
1064 current_pstate_driver = &amd_pstate_epp_driver;
1065 else
1066 return -EINVAL;
1067
1068 cppc_state = mode;
1069
1070 ret = amd_pstate_enable(true);
1071 if (ret) {
1072 pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
1073 ret);
1074 amd_pstate_driver_cleanup();
1075 return ret;
1076 }
1077
1078 ret = cpufreq_register_driver(current_pstate_driver);
1079 if (ret) {
1080 amd_pstate_driver_cleanup();
1081 return ret;
1082 }
1083
1084 return 0;
1085 }
1086
amd_pstate_unregister_driver(int dummy)1087 static int amd_pstate_unregister_driver(int dummy)
1088 {
1089 cpufreq_unregister_driver(current_pstate_driver);
1090 amd_pstate_driver_cleanup();
1091 return 0;
1092 }
1093
amd_pstate_change_mode_without_dvr_change(int mode)1094 static int amd_pstate_change_mode_without_dvr_change(int mode)
1095 {
1096 int cpu = 0;
1097
1098 cppc_state = mode;
1099
1100 if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
1101 return 0;
1102
1103 for_each_present_cpu(cpu) {
1104 cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
1105 }
1106
1107 return 0;
1108 }
1109
amd_pstate_change_driver_mode(int mode)1110 static int amd_pstate_change_driver_mode(int mode)
1111 {
1112 int ret;
1113
1114 ret = amd_pstate_unregister_driver(0);
1115 if (ret)
1116 return ret;
1117
1118 ret = amd_pstate_register_driver(mode);
1119 if (ret)
1120 return ret;
1121
1122 return 0;
1123 }
1124
1125 static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = {
1126 [AMD_PSTATE_DISABLE] = {
1127 [AMD_PSTATE_DISABLE] = NULL,
1128 [AMD_PSTATE_PASSIVE] = amd_pstate_register_driver,
1129 [AMD_PSTATE_ACTIVE] = amd_pstate_register_driver,
1130 [AMD_PSTATE_GUIDED] = amd_pstate_register_driver,
1131 },
1132 [AMD_PSTATE_PASSIVE] = {
1133 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
1134 [AMD_PSTATE_PASSIVE] = NULL,
1135 [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
1136 [AMD_PSTATE_GUIDED] = amd_pstate_change_mode_without_dvr_change,
1137 },
1138 [AMD_PSTATE_ACTIVE] = {
1139 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
1140 [AMD_PSTATE_PASSIVE] = amd_pstate_change_driver_mode,
1141 [AMD_PSTATE_ACTIVE] = NULL,
1142 [AMD_PSTATE_GUIDED] = amd_pstate_change_driver_mode,
1143 },
1144 [AMD_PSTATE_GUIDED] = {
1145 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
1146 [AMD_PSTATE_PASSIVE] = amd_pstate_change_mode_without_dvr_change,
1147 [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
1148 [AMD_PSTATE_GUIDED] = NULL,
1149 },
1150 };
1151
amd_pstate_show_status(char * buf)1152 static ssize_t amd_pstate_show_status(char *buf)
1153 {
1154 if (!current_pstate_driver)
1155 return sysfs_emit(buf, "disable\n");
1156
1157 return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
1158 }
1159
amd_pstate_update_status(const char * buf,size_t size)1160 static int amd_pstate_update_status(const char *buf, size_t size)
1161 {
1162 int mode_idx;
1163
1164 if (size > strlen("passive") || size < strlen("active"))
1165 return -EINVAL;
1166
1167 mode_idx = get_mode_idx_from_str(buf, size);
1168
1169 if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
1170 return -EINVAL;
1171
1172 if (mode_state_machine[cppc_state][mode_idx])
1173 return mode_state_machine[cppc_state][mode_idx](mode_idx);
1174
1175 return 0;
1176 }
1177
status_show(struct device * dev,struct device_attribute * attr,char * buf)1178 static ssize_t status_show(struct device *dev,
1179 struct device_attribute *attr, char *buf)
1180 {
1181 ssize_t ret;
1182
1183 mutex_lock(&amd_pstate_driver_lock);
1184 ret = amd_pstate_show_status(buf);
1185 mutex_unlock(&amd_pstate_driver_lock);
1186
1187 return ret;
1188 }
1189
status_store(struct device * a,struct device_attribute * b,const char * buf,size_t count)1190 static ssize_t status_store(struct device *a, struct device_attribute *b,
1191 const char *buf, size_t count)
1192 {
1193 char *p = memchr(buf, '\n', count);
1194 int ret;
1195
1196 mutex_lock(&amd_pstate_driver_lock);
1197 ret = amd_pstate_update_status(buf, p ? p - buf : count);
1198 mutex_unlock(&amd_pstate_driver_lock);
1199
1200 return ret < 0 ? ret : count;
1201 }
1202
prefcore_show(struct device * dev,struct device_attribute * attr,char * buf)1203 static ssize_t prefcore_show(struct device *dev,
1204 struct device_attribute *attr, char *buf)
1205 {
1206 return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
1207 }
1208
1209 cpufreq_freq_attr_ro(amd_pstate_max_freq);
1210 cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
1211
1212 cpufreq_freq_attr_ro(amd_pstate_highest_perf);
1213 cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
1214 cpufreq_freq_attr_rw(energy_performance_preference);
1215 cpufreq_freq_attr_ro(energy_performance_available_preferences);
1216 static DEVICE_ATTR_RW(status);
1217 static DEVICE_ATTR_RO(prefcore);
1218
1219 static struct freq_attr *amd_pstate_attr[] = {
1220 &amd_pstate_max_freq,
1221 &amd_pstate_lowest_nonlinear_freq,
1222 &amd_pstate_highest_perf,
1223 &amd_pstate_hw_prefcore,
1224 NULL,
1225 };
1226
1227 static struct freq_attr *amd_pstate_epp_attr[] = {
1228 &amd_pstate_max_freq,
1229 &amd_pstate_lowest_nonlinear_freq,
1230 &amd_pstate_highest_perf,
1231 &amd_pstate_hw_prefcore,
1232 &energy_performance_preference,
1233 &energy_performance_available_preferences,
1234 NULL,
1235 };
1236
1237 static struct attribute *pstate_global_attributes[] = {
1238 &dev_attr_status.attr,
1239 &dev_attr_prefcore.attr,
1240 NULL
1241 };
1242
1243 static const struct attribute_group amd_pstate_global_attr_group = {
1244 .name = "amd_pstate",
1245 .attrs = pstate_global_attributes,
1246 };
1247
amd_pstate_acpi_pm_profile_server(void)1248 static bool amd_pstate_acpi_pm_profile_server(void)
1249 {
1250 switch (acpi_gbl_FADT.preferred_profile) {
1251 case PM_ENTERPRISE_SERVER:
1252 case PM_SOHO_SERVER:
1253 case PM_PERFORMANCE_SERVER:
1254 return true;
1255 }
1256 return false;
1257 }
1258
amd_pstate_acpi_pm_profile_undefined(void)1259 static bool amd_pstate_acpi_pm_profile_undefined(void)
1260 {
1261 if (acpi_gbl_FADT.preferred_profile == PM_UNSPECIFIED)
1262 return true;
1263 if (acpi_gbl_FADT.preferred_profile >= NR_PM_PROFILES)
1264 return true;
1265 return false;
1266 }
1267
amd_pstate_epp_cpu_init(struct cpufreq_policy * policy)1268 static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
1269 {
1270 int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
1271 struct amd_cpudata *cpudata;
1272 struct device *dev;
1273 u64 value;
1274
1275 /*
1276 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
1277 * which is ideal for initialization process.
1278 */
1279 amd_perf_ctl_reset(policy->cpu);
1280 dev = get_cpu_device(policy->cpu);
1281 if (!dev)
1282 return -ENODEV;
1283
1284 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
1285 if (!cpudata)
1286 return -ENOMEM;
1287
1288 cpudata->cpu = policy->cpu;
1289 cpudata->epp_policy = 0;
1290
1291 amd_pstate_init_prefcore(cpudata);
1292
1293 ret = amd_pstate_init_perf(cpudata);
1294 if (ret)
1295 goto free_cpudata1;
1296
1297 min_freq = amd_get_min_freq(cpudata);
1298 max_freq = amd_get_max_freq(cpudata);
1299 nominal_freq = amd_get_nominal_freq(cpudata);
1300 lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
1301 if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
1302 dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
1303 min_freq, max_freq);
1304 ret = -EINVAL;
1305 goto free_cpudata1;
1306 }
1307
1308 policy->cpuinfo.min_freq = min_freq;
1309 policy->cpuinfo.max_freq = max_freq;
1310 /* It will be updated by governor */
1311 policy->cur = policy->cpuinfo.min_freq;
1312
1313 /* Initial processor data capability frequencies */
1314 cpudata->max_freq = max_freq;
1315 cpudata->min_freq = min_freq;
1316 cpudata->nominal_freq = nominal_freq;
1317 cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
1318
1319 policy->driver_data = cpudata;
1320
1321 cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
1322
1323 policy->min = policy->cpuinfo.min_freq;
1324 policy->max = policy->cpuinfo.max_freq;
1325
1326 /*
1327 * Set the policy to provide a valid fallback value in case
1328 * the default cpufreq governor is neither powersave nor performance.
1329 */
1330 if (amd_pstate_acpi_pm_profile_server() ||
1331 amd_pstate_acpi_pm_profile_undefined())
1332 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1333 else
1334 policy->policy = CPUFREQ_POLICY_POWERSAVE;
1335
1336 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1337 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
1338 if (ret)
1339 return ret;
1340 WRITE_ONCE(cpudata->cppc_req_cached, value);
1341
1342 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
1343 if (ret)
1344 return ret;
1345 WRITE_ONCE(cpudata->cppc_cap1_cached, value);
1346 }
1347 amd_pstate_boost_init(cpudata);
1348
1349 return 0;
1350
1351 free_cpudata1:
1352 kfree(cpudata);
1353 return ret;
1354 }
1355
amd_pstate_epp_cpu_exit(struct cpufreq_policy * policy)1356 static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
1357 {
1358 struct amd_cpudata *cpudata = policy->driver_data;
1359
1360 if (cpudata) {
1361 kfree(cpudata);
1362 policy->driver_data = NULL;
1363 }
1364
1365 pr_debug("CPU %d exiting\n", policy->cpu);
1366 return 0;
1367 }
1368
amd_pstate_epp_update_limit(struct cpufreq_policy * policy)1369 static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
1370 {
1371 struct amd_cpudata *cpudata = policy->driver_data;
1372 u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
1373 u64 value;
1374 s16 epp;
1375
1376 max_perf = READ_ONCE(cpudata->highest_perf);
1377 min_perf = READ_ONCE(cpudata->lowest_perf);
1378 max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
1379 min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
1380
1381 WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
1382 WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
1383
1384 max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
1385 cpudata->max_limit_perf);
1386 min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
1387 cpudata->max_limit_perf);
1388 value = READ_ONCE(cpudata->cppc_req_cached);
1389
1390 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1391 min_perf = min(cpudata->nominal_perf, max_perf);
1392
1393 /* Initial min/max values for CPPC Performance Controls Register */
1394 value &= ~AMD_CPPC_MIN_PERF(~0L);
1395 value |= AMD_CPPC_MIN_PERF(min_perf);
1396
1397 value &= ~AMD_CPPC_MAX_PERF(~0L);
1398 value |= AMD_CPPC_MAX_PERF(max_perf);
1399
1400 /* CPPC EPP feature require to set zero to the desire perf bit */
1401 value &= ~AMD_CPPC_DES_PERF(~0L);
1402 value |= AMD_CPPC_DES_PERF(0);
1403
1404 cpudata->epp_policy = cpudata->policy;
1405
1406 /* Get BIOS pre-defined epp value */
1407 epp = amd_pstate_get_epp(cpudata, value);
1408 if (epp < 0) {
1409 /**
1410 * This return value can only be negative for shared_memory
1411 * systems where EPP register read/write not supported.
1412 */
1413 return;
1414 }
1415
1416 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1417 epp = 0;
1418
1419 /* Set initial EPP value */
1420 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1421 value &= ~GENMASK_ULL(31, 24);
1422 value |= (u64)epp << 24;
1423 }
1424
1425 WRITE_ONCE(cpudata->cppc_req_cached, value);
1426 amd_pstate_set_epp(cpudata, epp);
1427 }
1428
amd_pstate_epp_set_policy(struct cpufreq_policy * policy)1429 static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
1430 {
1431 struct amd_cpudata *cpudata = policy->driver_data;
1432
1433 if (!policy->cpuinfo.max_freq)
1434 return -ENODEV;
1435
1436 pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
1437 policy->cpuinfo.max_freq, policy->max);
1438
1439 cpudata->policy = policy->policy;
1440
1441 amd_pstate_epp_update_limit(policy);
1442
1443 return 0;
1444 }
1445
amd_pstate_epp_reenable(struct amd_cpudata * cpudata)1446 static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
1447 {
1448 struct cppc_perf_ctrls perf_ctrls;
1449 u64 value, max_perf;
1450 int ret;
1451
1452 ret = amd_pstate_enable(true);
1453 if (ret)
1454 pr_err("failed to enable amd pstate during resume, return %d\n", ret);
1455
1456 value = READ_ONCE(cpudata->cppc_req_cached);
1457 max_perf = READ_ONCE(cpudata->highest_perf);
1458
1459 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1460 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
1461 } else {
1462 perf_ctrls.max_perf = max_perf;
1463 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
1464 cppc_set_perf(cpudata->cpu, &perf_ctrls);
1465 }
1466 }
1467
amd_pstate_epp_cpu_online(struct cpufreq_policy * policy)1468 static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
1469 {
1470 struct amd_cpudata *cpudata = policy->driver_data;
1471
1472 pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
1473
1474 if (cppc_state == AMD_PSTATE_ACTIVE) {
1475 amd_pstate_epp_reenable(cpudata);
1476 cpudata->suspended = false;
1477 }
1478
1479 return 0;
1480 }
1481
amd_pstate_epp_offline(struct cpufreq_policy * policy)1482 static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
1483 {
1484 struct amd_cpudata *cpudata = policy->driver_data;
1485 struct cppc_perf_ctrls perf_ctrls;
1486 int min_perf;
1487 u64 value;
1488
1489 min_perf = READ_ONCE(cpudata->lowest_perf);
1490 value = READ_ONCE(cpudata->cppc_req_cached);
1491
1492 mutex_lock(&amd_pstate_limits_lock);
1493 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1494 cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
1495
1496 /* Set max perf same as min perf */
1497 value &= ~AMD_CPPC_MAX_PERF(~0L);
1498 value |= AMD_CPPC_MAX_PERF(min_perf);
1499 value &= ~AMD_CPPC_MIN_PERF(~0L);
1500 value |= AMD_CPPC_MIN_PERF(min_perf);
1501 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
1502 } else {
1503 perf_ctrls.desired_perf = 0;
1504 perf_ctrls.max_perf = min_perf;
1505 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
1506 cppc_set_perf(cpudata->cpu, &perf_ctrls);
1507 }
1508 mutex_unlock(&amd_pstate_limits_lock);
1509 }
1510
amd_pstate_epp_cpu_offline(struct cpufreq_policy * policy)1511 static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
1512 {
1513 struct amd_cpudata *cpudata = policy->driver_data;
1514
1515 pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
1516
1517 if (cpudata->suspended)
1518 return 0;
1519
1520 if (cppc_state == AMD_PSTATE_ACTIVE)
1521 amd_pstate_epp_offline(policy);
1522
1523 return 0;
1524 }
1525
amd_pstate_epp_verify_policy(struct cpufreq_policy_data * policy)1526 static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
1527 {
1528 cpufreq_verify_within_cpu_limits(policy);
1529 pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
1530 return 0;
1531 }
1532
amd_pstate_epp_suspend(struct cpufreq_policy * policy)1533 static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
1534 {
1535 struct amd_cpudata *cpudata = policy->driver_data;
1536 int ret;
1537
1538 /* avoid suspending when EPP is not enabled */
1539 if (cppc_state != AMD_PSTATE_ACTIVE)
1540 return 0;
1541
1542 /* set this flag to avoid setting core offline*/
1543 cpudata->suspended = true;
1544
1545 /* disable CPPC in lowlevel firmware */
1546 ret = amd_pstate_enable(false);
1547 if (ret)
1548 pr_err("failed to suspend, return %d\n", ret);
1549
1550 return 0;
1551 }
1552
amd_pstate_epp_resume(struct cpufreq_policy * policy)1553 static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
1554 {
1555 struct amd_cpudata *cpudata = policy->driver_data;
1556
1557 if (cpudata->suspended) {
1558 mutex_lock(&amd_pstate_limits_lock);
1559
1560 /* enable amd pstate from suspend state*/
1561 amd_pstate_epp_reenable(cpudata);
1562
1563 mutex_unlock(&amd_pstate_limits_lock);
1564
1565 cpudata->suspended = false;
1566 }
1567
1568 return 0;
1569 }
1570
1571 static struct cpufreq_driver amd_pstate_driver = {
1572 .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
1573 .verify = amd_pstate_verify,
1574 .target = amd_pstate_target,
1575 .fast_switch = amd_pstate_fast_switch,
1576 .init = amd_pstate_cpu_init,
1577 .exit = amd_pstate_cpu_exit,
1578 .suspend = amd_pstate_cpu_suspend,
1579 .resume = amd_pstate_cpu_resume,
1580 .set_boost = amd_pstate_set_boost,
1581 .name = "amd-pstate",
1582 .attr = amd_pstate_attr,
1583 };
1584
1585 static struct cpufreq_driver amd_pstate_epp_driver = {
1586 .flags = CPUFREQ_CONST_LOOPS,
1587 .verify = amd_pstate_epp_verify_policy,
1588 .setpolicy = amd_pstate_epp_set_policy,
1589 .init = amd_pstate_epp_cpu_init,
1590 .exit = amd_pstate_epp_cpu_exit,
1591 .offline = amd_pstate_epp_cpu_offline,
1592 .online = amd_pstate_epp_cpu_online,
1593 .suspend = amd_pstate_epp_suspend,
1594 .resume = amd_pstate_epp_resume,
1595 .name = "amd-pstate-epp",
1596 .attr = amd_pstate_epp_attr,
1597 };
1598
amd_pstate_set_driver(int mode_idx)1599 static int __init amd_pstate_set_driver(int mode_idx)
1600 {
1601 if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
1602 cppc_state = mode_idx;
1603 if (cppc_state == AMD_PSTATE_DISABLE)
1604 pr_info("driver is explicitly disabled\n");
1605
1606 if (cppc_state == AMD_PSTATE_ACTIVE)
1607 current_pstate_driver = &amd_pstate_epp_driver;
1608
1609 if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
1610 current_pstate_driver = &amd_pstate_driver;
1611
1612 return 0;
1613 }
1614
1615 return -EINVAL;
1616 }
1617
amd_pstate_init(void)1618 static int __init amd_pstate_init(void)
1619 {
1620 struct device *dev_root;
1621 int ret;
1622
1623 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1624 return -ENODEV;
1625
1626 if (!acpi_cpc_valid()) {
1627 pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
1628 return -ENODEV;
1629 }
1630
1631 /* don't keep reloading if cpufreq_driver exists */
1632 if (cpufreq_get_current_driver())
1633 return -EEXIST;
1634
1635 switch (cppc_state) {
1636 case AMD_PSTATE_UNDEFINED:
1637 /* Disable on the following configs by default:
1638 * 1. Undefined platforms
1639 * 2. Server platforms
1640 * 3. Shared memory designs
1641 */
1642 if (amd_pstate_acpi_pm_profile_undefined() ||
1643 amd_pstate_acpi_pm_profile_server() ||
1644 !boot_cpu_has(X86_FEATURE_CPPC)) {
1645 pr_info("driver load is disabled, boot with specific mode to enable this\n");
1646 return -ENODEV;
1647 }
1648 ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE);
1649 if (ret)
1650 return ret;
1651 break;
1652 case AMD_PSTATE_DISABLE:
1653 return -ENODEV;
1654 case AMD_PSTATE_PASSIVE:
1655 case AMD_PSTATE_ACTIVE:
1656 case AMD_PSTATE_GUIDED:
1657 break;
1658 default:
1659 return -EINVAL;
1660 }
1661
1662 /* capability check */
1663 if (boot_cpu_has(X86_FEATURE_CPPC)) {
1664 pr_debug("AMD CPPC MSR based functionality is supported\n");
1665 if (cppc_state != AMD_PSTATE_ACTIVE)
1666 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
1667 } else {
1668 pr_debug("AMD CPPC shared memory based functionality is supported\n");
1669 static_call_update(amd_pstate_enable, cppc_enable);
1670 static_call_update(amd_pstate_init_perf, cppc_init_perf);
1671 static_call_update(amd_pstate_update_perf, cppc_update_perf);
1672 }
1673
1674 /* enable amd pstate feature */
1675 ret = amd_pstate_enable(true);
1676 if (ret) {
1677 pr_err("failed to enable with return %d\n", ret);
1678 return ret;
1679 }
1680
1681 ret = cpufreq_register_driver(current_pstate_driver);
1682 if (ret)
1683 pr_err("failed to register with return %d\n", ret);
1684
1685 dev_root = bus_get_dev_root(&cpu_subsys);
1686 if (dev_root) {
1687 ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group);
1688 put_device(dev_root);
1689 if (ret) {
1690 pr_err("sysfs attribute export failed with error %d.\n", ret);
1691 goto global_attr_free;
1692 }
1693 }
1694
1695 return ret;
1696
1697 global_attr_free:
1698 cpufreq_unregister_driver(current_pstate_driver);
1699 return ret;
1700 }
1701 device_initcall(amd_pstate_init);
1702
amd_pstate_param(char * str)1703 static int __init amd_pstate_param(char *str)
1704 {
1705 size_t size;
1706 int mode_idx;
1707
1708 if (!str)
1709 return -EINVAL;
1710
1711 size = strlen(str);
1712 mode_idx = get_mode_idx_from_str(str, size);
1713
1714 return amd_pstate_set_driver(mode_idx);
1715 }
1716
amd_prefcore_param(char * str)1717 static int __init amd_prefcore_param(char *str)
1718 {
1719 if (!strcmp(str, "disable"))
1720 amd_pstate_prefcore = false;
1721
1722 return 0;
1723 }
1724
1725 early_param("amd_pstate", amd_pstate_param);
1726 early_param("amd_prefcore", amd_prefcore_param);
1727
1728 MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
1729 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
1730