1ec437d71SHuang Rui // SPDX-License-Identifier: GPL-2.0-or-later 2ec437d71SHuang Rui /* 3ec437d71SHuang Rui * amd-pstate.c - AMD Processor P-state Frequency Driver 4ec437d71SHuang Rui * 5ec437d71SHuang Rui * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved. 6ec437d71SHuang Rui * 7ec437d71SHuang Rui * Author: Huang Rui <ray.huang@amd.com> 8ec437d71SHuang Rui * 9ec437d71SHuang Rui * AMD P-State introduces a new CPU performance scaling design for AMD 10ec437d71SHuang Rui * processors using the ACPI Collaborative Performance and Power Control (CPPC) 11ec437d71SHuang Rui * feature which works with the AMD SMU firmware providing a finer grained 12ec437d71SHuang Rui * frequency control range. It is to replace the legacy ACPI P-States control, 13ec437d71SHuang Rui * allows a flexible, low-latency interface for the Linux kernel to directly 14ec437d71SHuang Rui * communicate the performance hints to hardware. 15ec437d71SHuang Rui * 16ec437d71SHuang Rui * AMD P-State is supported on recent AMD Zen base CPU series include some of 17ec437d71SHuang Rui * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD 18ec437d71SHuang Rui * P-State supported system. And there are two types of hardware implementations 19ec437d71SHuang Rui * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution. 20ec437d71SHuang Rui * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types. 21ec437d71SHuang Rui */ 22ec437d71SHuang Rui 23ec437d71SHuang Rui #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24ec437d71SHuang Rui 25ec437d71SHuang Rui #include <linux/kernel.h> 26ec437d71SHuang Rui #include <linux/module.h> 27ec437d71SHuang Rui #include <linux/init.h> 28ec437d71SHuang Rui #include <linux/smp.h> 29ec437d71SHuang Rui #include <linux/sched.h> 30ec437d71SHuang Rui #include <linux/cpufreq.h> 31ec437d71SHuang Rui #include <linux/compiler.h> 32ec437d71SHuang Rui #include <linux/dmi.h> 33ec437d71SHuang Rui #include <linux/slab.h> 34ec437d71SHuang Rui #include <linux/acpi.h> 35ec437d71SHuang Rui #include <linux/io.h> 36ec437d71SHuang Rui #include <linux/delay.h> 37ec437d71SHuang Rui #include <linux/uaccess.h> 38ec437d71SHuang Rui #include <linux/static_call.h> 39f1375ec1SMeng Li #include <linux/amd-pstate.h> 40ec437d71SHuang Rui 41ec437d71SHuang Rui #include <acpi/processor.h> 42ec437d71SHuang Rui #include <acpi/cppc_acpi.h> 43ec437d71SHuang Rui 44ec437d71SHuang Rui #include <asm/msr.h> 45ec437d71SHuang Rui #include <asm/processor.h> 46ec437d71SHuang Rui #include <asm/cpufeature.h> 47ec437d71SHuang Rui #include <asm/cpu_device_id.h> 4860e10f89SHuang Rui #include "amd-pstate-trace.h" 49ec437d71SHuang Rui 50ca08e46dSPerry Yuan #define AMD_PSTATE_TRANSITION_LATENCY 20000 51ca08e46dSPerry Yuan #define AMD_PSTATE_TRANSITION_DELAY 1000 52ec437d71SHuang Rui 53e059c184SHuang Rui /* 54e059c184SHuang Rui * TODO: We need more time to fine tune processors with shared memory solution 55e059c184SHuang Rui * with community together. 56e059c184SHuang Rui * 57e059c184SHuang Rui * There are some performance drops on the CPU benchmarks which reports from 58e059c184SHuang Rui * Suse. We are co-working with them to fine tune the shared memory solution. So 59e059c184SHuang Rui * we disable it by default to go acpi-cpufreq on these processors and add a 60e059c184SHuang Rui * module parameter to be able to enable it manually for debugging. 61e059c184SHuang Rui */ 62ffa5096aSPerry Yuan static struct cpufreq_driver *current_pstate_driver; 63ec437d71SHuang Rui static struct cpufreq_driver amd_pstate_driver; 64ffa5096aSPerry Yuan static struct cpufreq_driver amd_pstate_epp_driver; 65c88ad30eSMario Limonciello static int cppc_state = AMD_PSTATE_UNDEFINED; 66217e6778SWyes Karny static bool cppc_enabled; 6736c5014eSWyes Karny 68ffa5096aSPerry Yuan /* 69ffa5096aSPerry Yuan * AMD Energy Preference Performance (EPP) 70ffa5096aSPerry Yuan * The EPP is used in the CCLK DPM controller to drive 71ffa5096aSPerry Yuan * the frequency that a core is going to operate during 72ffa5096aSPerry Yuan * short periods of activity. EPP values will be utilized for 73ffa5096aSPerry Yuan * different OS profiles (balanced, performance, power savings) 74ffa5096aSPerry Yuan * display strings corresponding to EPP index in the 75ffa5096aSPerry Yuan * energy_perf_strings[] 76ffa5096aSPerry Yuan * index String 77ffa5096aSPerry Yuan *------------------------------------- 78ffa5096aSPerry Yuan * 0 default 79ffa5096aSPerry Yuan * 1 performance 80ffa5096aSPerry Yuan * 2 balance_performance 81ffa5096aSPerry Yuan * 3 balance_power 82ffa5096aSPerry Yuan * 4 power 83ffa5096aSPerry Yuan */ 84ffa5096aSPerry Yuan enum energy_perf_value_index { 85ffa5096aSPerry Yuan EPP_INDEX_DEFAULT = 0, 86ffa5096aSPerry Yuan EPP_INDEX_PERFORMANCE, 87ffa5096aSPerry Yuan EPP_INDEX_BALANCE_PERFORMANCE, 88ffa5096aSPerry Yuan EPP_INDEX_BALANCE_POWERSAVE, 89ffa5096aSPerry Yuan EPP_INDEX_POWERSAVE, 90ffa5096aSPerry Yuan }; 91ffa5096aSPerry Yuan 92ffa5096aSPerry Yuan static const char * const energy_perf_strings[] = { 93ffa5096aSPerry Yuan [EPP_INDEX_DEFAULT] = "default", 94ffa5096aSPerry Yuan [EPP_INDEX_PERFORMANCE] = "performance", 95ffa5096aSPerry Yuan [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance", 96ffa5096aSPerry Yuan [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power", 97ffa5096aSPerry Yuan [EPP_INDEX_POWERSAVE] = "power", 98ffa5096aSPerry Yuan NULL 99ffa5096aSPerry Yuan }; 100ffa5096aSPerry Yuan 101ffa5096aSPerry Yuan static unsigned int epp_values[] = { 102ffa5096aSPerry Yuan [EPP_INDEX_DEFAULT] = 0, 103ffa5096aSPerry Yuan [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE, 104ffa5096aSPerry Yuan [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE, 105ffa5096aSPerry Yuan [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE, 106ffa5096aSPerry Yuan [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE, 107ffa5096aSPerry Yuan }; 108ffa5096aSPerry Yuan 1093ca7bc81SWyes Karny typedef int (*cppc_mode_transition_fn)(int); 1103ca7bc81SWyes Karny 11136c5014eSWyes Karny static inline int get_mode_idx_from_str(const char *str, size_t size) 11236c5014eSWyes Karny { 11336c5014eSWyes Karny int i; 11436c5014eSWyes Karny 11536c5014eSWyes Karny for (i=0; i < AMD_PSTATE_MAX; i++) { 11636c5014eSWyes Karny if (!strncmp(str, amd_pstate_mode_string[i], size)) 11736c5014eSWyes Karny return i; 11836c5014eSWyes Karny } 11936c5014eSWyes Karny return -EINVAL; 12036c5014eSWyes Karny } 121ec437d71SHuang Rui 122ffa5096aSPerry Yuan static DEFINE_MUTEX(amd_pstate_limits_lock); 123ffa5096aSPerry Yuan static DEFINE_MUTEX(amd_pstate_driver_lock); 124ffa5096aSPerry Yuan 125ffa5096aSPerry Yuan static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached) 126ffa5096aSPerry Yuan { 127ffa5096aSPerry Yuan u64 epp; 128ffa5096aSPerry Yuan int ret; 129ffa5096aSPerry Yuan 130ffa5096aSPerry Yuan if (boot_cpu_has(X86_FEATURE_CPPC)) { 131ffa5096aSPerry Yuan if (!cppc_req_cached) { 132ffa5096aSPerry Yuan epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, 133ffa5096aSPerry Yuan &cppc_req_cached); 134ffa5096aSPerry Yuan if (epp) 135ffa5096aSPerry Yuan return epp; 136ffa5096aSPerry Yuan } 137ffa5096aSPerry Yuan epp = (cppc_req_cached >> 24) & 0xFF; 138ffa5096aSPerry Yuan } else { 139ffa5096aSPerry Yuan ret = cppc_get_epp_perf(cpudata->cpu, &epp); 140ffa5096aSPerry Yuan if (ret < 0) { 141ffa5096aSPerry Yuan pr_debug("Could not retrieve energy perf value (%d)\n", ret); 142ffa5096aSPerry Yuan return -EIO; 143ffa5096aSPerry Yuan } 144ffa5096aSPerry Yuan } 145ffa5096aSPerry Yuan 146ffa5096aSPerry Yuan return (s16)(epp & 0xff); 147ffa5096aSPerry Yuan } 148ffa5096aSPerry Yuan 149ffa5096aSPerry Yuan static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata) 150ffa5096aSPerry Yuan { 151ffa5096aSPerry Yuan s16 epp; 152ffa5096aSPerry Yuan int index = -EINVAL; 153ffa5096aSPerry Yuan 154ffa5096aSPerry Yuan epp = amd_pstate_get_epp(cpudata, 0); 155ffa5096aSPerry Yuan if (epp < 0) 156ffa5096aSPerry Yuan return epp; 157ffa5096aSPerry Yuan 158ffa5096aSPerry Yuan switch (epp) { 159ffa5096aSPerry Yuan case AMD_CPPC_EPP_PERFORMANCE: 160ffa5096aSPerry Yuan index = EPP_INDEX_PERFORMANCE; 161ffa5096aSPerry Yuan break; 162ffa5096aSPerry Yuan case AMD_CPPC_EPP_BALANCE_PERFORMANCE: 163ffa5096aSPerry Yuan index = EPP_INDEX_BALANCE_PERFORMANCE; 164ffa5096aSPerry Yuan break; 165ffa5096aSPerry Yuan case AMD_CPPC_EPP_BALANCE_POWERSAVE: 166ffa5096aSPerry Yuan index = EPP_INDEX_BALANCE_POWERSAVE; 167ffa5096aSPerry Yuan break; 168ffa5096aSPerry Yuan case AMD_CPPC_EPP_POWERSAVE: 169ffa5096aSPerry Yuan index = EPP_INDEX_POWERSAVE; 170ffa5096aSPerry Yuan break; 171ffa5096aSPerry Yuan default: 172ffa5096aSPerry Yuan break; 173ffa5096aSPerry Yuan } 174ffa5096aSPerry Yuan 175ffa5096aSPerry Yuan return index; 176ffa5096aSPerry Yuan } 177ffa5096aSPerry Yuan 178ffa5096aSPerry Yuan static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) 179ffa5096aSPerry Yuan { 180ffa5096aSPerry Yuan int ret; 181ffa5096aSPerry Yuan struct cppc_perf_ctrls perf_ctrls; 182ffa5096aSPerry Yuan 183ffa5096aSPerry Yuan if (boot_cpu_has(X86_FEATURE_CPPC)) { 184ffa5096aSPerry Yuan u64 value = READ_ONCE(cpudata->cppc_req_cached); 185ffa5096aSPerry Yuan 186ffa5096aSPerry Yuan value &= ~GENMASK_ULL(31, 24); 187ffa5096aSPerry Yuan value |= (u64)epp << 24; 188ffa5096aSPerry Yuan WRITE_ONCE(cpudata->cppc_req_cached, value); 189ffa5096aSPerry Yuan 190ffa5096aSPerry Yuan ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 191ffa5096aSPerry Yuan if (!ret) 192ffa5096aSPerry Yuan cpudata->epp_cached = epp; 193ffa5096aSPerry Yuan } else { 194ffa5096aSPerry Yuan perf_ctrls.energy_perf = epp; 195ffa5096aSPerry Yuan ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); 196ffa5096aSPerry Yuan if (ret) { 197ffa5096aSPerry Yuan pr_debug("failed to set energy perf value (%d)\n", ret); 198ffa5096aSPerry Yuan return ret; 199ffa5096aSPerry Yuan } 200ffa5096aSPerry Yuan cpudata->epp_cached = epp; 201ffa5096aSPerry Yuan } 202ffa5096aSPerry Yuan 203ffa5096aSPerry Yuan return ret; 204ffa5096aSPerry Yuan } 205ffa5096aSPerry Yuan 206ffa5096aSPerry Yuan static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata, 207ffa5096aSPerry Yuan int pref_index) 208ffa5096aSPerry Yuan { 209ffa5096aSPerry Yuan int epp = -EINVAL; 210ffa5096aSPerry Yuan int ret; 211ffa5096aSPerry Yuan 212ffa5096aSPerry Yuan if (!pref_index) { 213ffa5096aSPerry Yuan pr_debug("EPP pref_index is invalid\n"); 214ffa5096aSPerry Yuan return -EINVAL; 215ffa5096aSPerry Yuan } 216ffa5096aSPerry Yuan 217ffa5096aSPerry Yuan if (epp == -EINVAL) 218ffa5096aSPerry Yuan epp = epp_values[pref_index]; 219ffa5096aSPerry Yuan 220ffa5096aSPerry Yuan if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) { 221ffa5096aSPerry Yuan pr_debug("EPP cannot be set under performance policy\n"); 222ffa5096aSPerry Yuan return -EBUSY; 223ffa5096aSPerry Yuan } 224ffa5096aSPerry Yuan 225ffa5096aSPerry Yuan ret = amd_pstate_set_epp(cpudata, epp); 226ffa5096aSPerry Yuan 227ffa5096aSPerry Yuan return ret; 228ffa5096aSPerry Yuan } 229ffa5096aSPerry Yuan 230e059c184SHuang Rui static inline int pstate_enable(bool enable) 231ec437d71SHuang Rui { 232217e6778SWyes Karny int ret, cpu; 233217e6778SWyes Karny unsigned long logical_proc_id_mask = 0; 234217e6778SWyes Karny 235217e6778SWyes Karny if (enable == cppc_enabled) 236217e6778SWyes Karny return 0; 237217e6778SWyes Karny 238217e6778SWyes Karny for_each_present_cpu(cpu) { 239217e6778SWyes Karny unsigned long logical_id = topology_logical_die_id(cpu); 240217e6778SWyes Karny 241217e6778SWyes Karny if (test_bit(logical_id, &logical_proc_id_mask)) 242217e6778SWyes Karny continue; 243217e6778SWyes Karny 244217e6778SWyes Karny set_bit(logical_id, &logical_proc_id_mask); 245217e6778SWyes Karny 246217e6778SWyes Karny ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE, 247217e6778SWyes Karny enable); 248217e6778SWyes Karny if (ret) 249217e6778SWyes Karny return ret; 250217e6778SWyes Karny } 251217e6778SWyes Karny 252217e6778SWyes Karny cppc_enabled = enable; 253217e6778SWyes Karny return 0; 254ec437d71SHuang Rui } 255ec437d71SHuang Rui 256e059c184SHuang Rui static int cppc_enable(bool enable) 257e059c184SHuang Rui { 258e059c184SHuang Rui int cpu, ret = 0; 259ffa5096aSPerry Yuan struct cppc_perf_ctrls perf_ctrls; 260e059c184SHuang Rui 261217e6778SWyes Karny if (enable == cppc_enabled) 262217e6778SWyes Karny return 0; 263217e6778SWyes Karny 264e059c184SHuang Rui for_each_present_cpu(cpu) { 265e059c184SHuang Rui ret = cppc_set_enable(cpu, enable); 266e059c184SHuang Rui if (ret) 267e059c184SHuang Rui return ret; 268ffa5096aSPerry Yuan 269ffa5096aSPerry Yuan /* Enable autonomous mode for EPP */ 270ffa5096aSPerry Yuan if (cppc_state == AMD_PSTATE_ACTIVE) { 271ffa5096aSPerry Yuan /* Set desired perf as zero to allow EPP firmware control */ 272ffa5096aSPerry Yuan perf_ctrls.desired_perf = 0; 273ffa5096aSPerry Yuan ret = cppc_set_perf(cpu, &perf_ctrls); 274ffa5096aSPerry Yuan if (ret) 275ffa5096aSPerry Yuan return ret; 276ffa5096aSPerry Yuan } 277e059c184SHuang Rui } 278e059c184SHuang Rui 279217e6778SWyes Karny cppc_enabled = enable; 280e059c184SHuang Rui return ret; 281e059c184SHuang Rui } 282e059c184SHuang Rui 283e059c184SHuang Rui DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); 284e059c184SHuang Rui 285e059c184SHuang Rui static inline int amd_pstate_enable(bool enable) 286e059c184SHuang Rui { 287e059c184SHuang Rui return static_call(amd_pstate_enable)(enable); 288e059c184SHuang Rui } 289e059c184SHuang Rui 290e059c184SHuang Rui static int pstate_init_perf(struct amd_cpudata *cpudata) 291ec437d71SHuang Rui { 292ec437d71SHuang Rui u64 cap1; 293bedadcfbSPerry Yuan u32 highest_perf; 294ec437d71SHuang Rui 295ec437d71SHuang Rui int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, 296ec437d71SHuang Rui &cap1); 297ec437d71SHuang Rui if (ret) 298ec437d71SHuang Rui return ret; 299ec437d71SHuang Rui 300ec437d71SHuang Rui /* 301ec437d71SHuang Rui * TODO: Introduce AMD specific power feature. 302ec437d71SHuang Rui * 303ec437d71SHuang Rui * CPPC entry doesn't indicate the highest performance in some ASICs. 304ec437d71SHuang Rui */ 305bedadcfbSPerry Yuan highest_perf = amd_get_highest_perf(); 306bedadcfbSPerry Yuan if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1)) 307bedadcfbSPerry Yuan highest_perf = AMD_CPPC_HIGHEST_PERF(cap1); 308bedadcfbSPerry Yuan 309bedadcfbSPerry Yuan WRITE_ONCE(cpudata->highest_perf, highest_perf); 310*4d78331cSWyes Karny WRITE_ONCE(cpudata->max_limit_perf, highest_perf); 311ec437d71SHuang Rui WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); 312ec437d71SHuang Rui WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); 313ec437d71SHuang Rui WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1)); 314*4d78331cSWyes Karny WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1)); 315ec437d71SHuang Rui return 0; 316ec437d71SHuang Rui } 317ec437d71SHuang Rui 318e059c184SHuang Rui static int cppc_init_perf(struct amd_cpudata *cpudata) 319e059c184SHuang Rui { 320e059c184SHuang Rui struct cppc_perf_caps cppc_perf; 321bedadcfbSPerry Yuan u32 highest_perf; 322e059c184SHuang Rui 323e059c184SHuang Rui int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 324e059c184SHuang Rui if (ret) 325e059c184SHuang Rui return ret; 326e059c184SHuang Rui 327bedadcfbSPerry Yuan highest_perf = amd_get_highest_perf(); 328bedadcfbSPerry Yuan if (highest_perf > cppc_perf.highest_perf) 329bedadcfbSPerry Yuan highest_perf = cppc_perf.highest_perf; 330bedadcfbSPerry Yuan 331bedadcfbSPerry Yuan WRITE_ONCE(cpudata->highest_perf, highest_perf); 332*4d78331cSWyes Karny WRITE_ONCE(cpudata->max_limit_perf, highest_perf); 333e059c184SHuang Rui WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); 334e059c184SHuang Rui WRITE_ONCE(cpudata->lowest_nonlinear_perf, 335e059c184SHuang Rui cppc_perf.lowest_nonlinear_perf); 336e059c184SHuang Rui WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); 337*4d78331cSWyes Karny WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf); 338e059c184SHuang Rui 3392dd6d0ebSWyes Karny if (cppc_state == AMD_PSTATE_ACTIVE) 340e059c184SHuang Rui return 0; 3412dd6d0ebSWyes Karny 3422dd6d0ebSWyes Karny ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf); 3432dd6d0ebSWyes Karny if (ret) { 3442dd6d0ebSWyes Karny pr_warn("failed to get auto_sel, ret: %d\n", ret); 3452dd6d0ebSWyes Karny return 0; 3462dd6d0ebSWyes Karny } 3472dd6d0ebSWyes Karny 3482dd6d0ebSWyes Karny ret = cppc_set_auto_sel(cpudata->cpu, 3492dd6d0ebSWyes Karny (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); 3502dd6d0ebSWyes Karny 3512dd6d0ebSWyes Karny if (ret) 3522dd6d0ebSWyes Karny pr_warn("failed to set auto_sel, ret: %d\n", ret); 3532dd6d0ebSWyes Karny 3542dd6d0ebSWyes Karny return ret; 355e059c184SHuang Rui } 356e059c184SHuang Rui 357e059c184SHuang Rui DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); 358e059c184SHuang Rui 359e059c184SHuang Rui static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) 360e059c184SHuang Rui { 361e059c184SHuang Rui return static_call(amd_pstate_init_perf)(cpudata); 362e059c184SHuang Rui } 363e059c184SHuang Rui 364e059c184SHuang Rui static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, 365ec437d71SHuang Rui u32 des_perf, u32 max_perf, bool fast_switch) 366ec437d71SHuang Rui { 367ec437d71SHuang Rui if (fast_switch) 368ec437d71SHuang Rui wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); 369ec437d71SHuang Rui else 370ec437d71SHuang Rui wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, 371ec437d71SHuang Rui READ_ONCE(cpudata->cppc_req_cached)); 372ec437d71SHuang Rui } 373ec437d71SHuang Rui 374e059c184SHuang Rui static void cppc_update_perf(struct amd_cpudata *cpudata, 375e059c184SHuang Rui u32 min_perf, u32 des_perf, 376e059c184SHuang Rui u32 max_perf, bool fast_switch) 377e059c184SHuang Rui { 378e059c184SHuang Rui struct cppc_perf_ctrls perf_ctrls; 379e059c184SHuang Rui 380e059c184SHuang Rui perf_ctrls.max_perf = max_perf; 381e059c184SHuang Rui perf_ctrls.min_perf = min_perf; 382e059c184SHuang Rui perf_ctrls.desired_perf = des_perf; 383e059c184SHuang Rui 384e059c184SHuang Rui cppc_set_perf(cpudata->cpu, &perf_ctrls); 385e059c184SHuang Rui } 386e059c184SHuang Rui 387e059c184SHuang Rui DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); 388e059c184SHuang Rui 389e059c184SHuang Rui static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, 390e059c184SHuang Rui u32 min_perf, u32 des_perf, 391e059c184SHuang Rui u32 max_perf, bool fast_switch) 392e059c184SHuang Rui { 393e059c184SHuang Rui static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, 394e059c184SHuang Rui max_perf, fast_switch); 395e059c184SHuang Rui } 396e059c184SHuang Rui 39723c296fbSJinzhou Su static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) 39823c296fbSJinzhou Su { 39923c296fbSJinzhou Su u64 aperf, mperf, tsc; 40023c296fbSJinzhou Su unsigned long flags; 40123c296fbSJinzhou Su 40223c296fbSJinzhou Su local_irq_save(flags); 40323c296fbSJinzhou Su rdmsrl(MSR_IA32_APERF, aperf); 40423c296fbSJinzhou Su rdmsrl(MSR_IA32_MPERF, mperf); 40523c296fbSJinzhou Su tsc = rdtsc(); 40623c296fbSJinzhou Su 40723c296fbSJinzhou Su if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) { 40823c296fbSJinzhou Su local_irq_restore(flags); 40923c296fbSJinzhou Su return false; 41023c296fbSJinzhou Su } 41123c296fbSJinzhou Su 41223c296fbSJinzhou Su local_irq_restore(flags); 41323c296fbSJinzhou Su 41423c296fbSJinzhou Su cpudata->cur.aperf = aperf; 41523c296fbSJinzhou Su cpudata->cur.mperf = mperf; 41623c296fbSJinzhou Su cpudata->cur.tsc = tsc; 41723c296fbSJinzhou Su cpudata->cur.aperf -= cpudata->prev.aperf; 41823c296fbSJinzhou Su cpudata->cur.mperf -= cpudata->prev.mperf; 41923c296fbSJinzhou Su cpudata->cur.tsc -= cpudata->prev.tsc; 42023c296fbSJinzhou Su 42123c296fbSJinzhou Su cpudata->prev.aperf = aperf; 42223c296fbSJinzhou Su cpudata->prev.mperf = mperf; 42323c296fbSJinzhou Su cpudata->prev.tsc = tsc; 42423c296fbSJinzhou Su 42523c296fbSJinzhou Su cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf); 42623c296fbSJinzhou Su 42723c296fbSJinzhou Su return true; 42823c296fbSJinzhou Su } 42923c296fbSJinzhou Su 430ec437d71SHuang Rui static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, 4312dd6d0ebSWyes Karny u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags) 432ec437d71SHuang Rui { 433ec437d71SHuang Rui u64 prev = READ_ONCE(cpudata->cppc_req_cached); 434ec437d71SHuang Rui u64 value = prev; 435ec437d71SHuang Rui 436*4d78331cSWyes Karny min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, 437*4d78331cSWyes Karny cpudata->max_limit_perf); 438*4d78331cSWyes Karny max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, 439*4d78331cSWyes Karny cpudata->max_limit_perf); 4400e9a8638SPerry Yuan des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); 4412dd6d0ebSWyes Karny 4422dd6d0ebSWyes Karny if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) { 4432dd6d0ebSWyes Karny min_perf = des_perf; 4442dd6d0ebSWyes Karny des_perf = 0; 4452dd6d0ebSWyes Karny } 4462dd6d0ebSWyes Karny 447ec437d71SHuang Rui value &= ~AMD_CPPC_MIN_PERF(~0L); 448ec437d71SHuang Rui value |= AMD_CPPC_MIN_PERF(min_perf); 449ec437d71SHuang Rui 450ec437d71SHuang Rui value &= ~AMD_CPPC_DES_PERF(~0L); 451ec437d71SHuang Rui value |= AMD_CPPC_DES_PERF(des_perf); 452ec437d71SHuang Rui 453ec437d71SHuang Rui value &= ~AMD_CPPC_MAX_PERF(~0L); 454ec437d71SHuang Rui value |= AMD_CPPC_MAX_PERF(max_perf); 455ec437d71SHuang Rui 45623c296fbSJinzhou Su if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) { 45723c296fbSJinzhou Su trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq, 45823c296fbSJinzhou Su cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc, 45960e10f89SHuang Rui cpudata->cpu, (value != prev), fast_switch); 46023c296fbSJinzhou Su } 46160e10f89SHuang Rui 462ec437d71SHuang Rui if (value == prev) 463ec437d71SHuang Rui return; 464ec437d71SHuang Rui 465ec437d71SHuang Rui WRITE_ONCE(cpudata->cppc_req_cached, value); 466ec437d71SHuang Rui 467ec437d71SHuang Rui amd_pstate_update_perf(cpudata, min_perf, des_perf, 468ec437d71SHuang Rui max_perf, fast_switch); 469ec437d71SHuang Rui } 470ec437d71SHuang Rui 471ec437d71SHuang Rui static int amd_pstate_verify(struct cpufreq_policy_data *policy) 472ec437d71SHuang Rui { 473ec437d71SHuang Rui cpufreq_verify_within_cpu_limits(policy); 474ec437d71SHuang Rui 475ec437d71SHuang Rui return 0; 476ec437d71SHuang Rui } 477ec437d71SHuang Rui 478*4d78331cSWyes Karny static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy) 479*4d78331cSWyes Karny { 480*4d78331cSWyes Karny u32 max_limit_perf, min_limit_perf; 481*4d78331cSWyes Karny struct amd_cpudata *cpudata = policy->driver_data; 482*4d78331cSWyes Karny 483*4d78331cSWyes Karny max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); 484*4d78331cSWyes Karny min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); 485*4d78331cSWyes Karny 486*4d78331cSWyes Karny WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf); 487*4d78331cSWyes Karny WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf); 488*4d78331cSWyes Karny WRITE_ONCE(cpudata->max_limit_freq, policy->max); 489*4d78331cSWyes Karny WRITE_ONCE(cpudata->min_limit_freq, policy->min); 490*4d78331cSWyes Karny 491*4d78331cSWyes Karny return 0; 492*4d78331cSWyes Karny } 493*4d78331cSWyes Karny 4944badf2ebSGautham R. Shenoy static int amd_pstate_update_freq(struct cpufreq_policy *policy, 4954badf2ebSGautham R. Shenoy unsigned int target_freq, bool fast_switch) 496ec437d71SHuang Rui { 497ec437d71SHuang Rui struct cpufreq_freqs freqs; 498ec437d71SHuang Rui struct amd_cpudata *cpudata = policy->driver_data; 499ec437d71SHuang Rui unsigned long max_perf, min_perf, des_perf, cap_perf; 500ec437d71SHuang Rui 501ec437d71SHuang Rui if (!cpudata->max_freq) 502ec437d71SHuang Rui return -ENODEV; 503ec437d71SHuang Rui 504*4d78331cSWyes Karny if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) 505*4d78331cSWyes Karny amd_pstate_update_min_max_limit(policy); 506*4d78331cSWyes Karny 507ec437d71SHuang Rui cap_perf = READ_ONCE(cpudata->highest_perf); 508b185c505SPerry Yuan min_perf = READ_ONCE(cpudata->lowest_perf); 509ec437d71SHuang Rui max_perf = cap_perf; 510ec437d71SHuang Rui 511ec437d71SHuang Rui freqs.old = policy->cur; 512ec437d71SHuang Rui freqs.new = target_freq; 513ec437d71SHuang Rui 514ec437d71SHuang Rui des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf, 515ec437d71SHuang Rui cpudata->max_freq); 516ec437d71SHuang Rui 5174badf2ebSGautham R. Shenoy WARN_ON(fast_switch && !policy->fast_switch_enabled); 5184badf2ebSGautham R. Shenoy /* 5194badf2ebSGautham R. Shenoy * If fast_switch is desired, then there aren't any registered 5204badf2ebSGautham R. Shenoy * transition notifiers. See comment for 5214badf2ebSGautham R. Shenoy * cpufreq_enable_fast_switch(). 5224badf2ebSGautham R. Shenoy */ 5234badf2ebSGautham R. Shenoy if (!fast_switch) 524ec437d71SHuang Rui cpufreq_freq_transition_begin(policy, &freqs); 5254badf2ebSGautham R. Shenoy 526ec437d71SHuang Rui amd_pstate_update(cpudata, min_perf, des_perf, 5274badf2ebSGautham R. Shenoy max_perf, fast_switch, policy->governor->flags); 5284badf2ebSGautham R. Shenoy 5294badf2ebSGautham R. Shenoy if (!fast_switch) 530ec437d71SHuang Rui cpufreq_freq_transition_end(policy, &freqs, false); 531ec437d71SHuang Rui 532ec437d71SHuang Rui return 0; 533ec437d71SHuang Rui } 534ec437d71SHuang Rui 5354badf2ebSGautham R. Shenoy static int amd_pstate_target(struct cpufreq_policy *policy, 5364badf2ebSGautham R. Shenoy unsigned int target_freq, 5374badf2ebSGautham R. Shenoy unsigned int relation) 5384badf2ebSGautham R. Shenoy { 5394badf2ebSGautham R. Shenoy return amd_pstate_update_freq(policy, target_freq, false); 5404badf2ebSGautham R. Shenoy } 5414badf2ebSGautham R. Shenoy 5424badf2ebSGautham R. Shenoy static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy, 5434badf2ebSGautham R. Shenoy unsigned int target_freq) 5444badf2ebSGautham R. Shenoy { 5458ebebfc3SGautham R. Shenoy if (!amd_pstate_update_freq(policy, target_freq, true)) 5468ebebfc3SGautham R. Shenoy return target_freq; 5478ebebfc3SGautham R. Shenoy return policy->cur; 5484badf2ebSGautham R. Shenoy } 5494badf2ebSGautham R. Shenoy 5501d215f03SHuang Rui static void amd_pstate_adjust_perf(unsigned int cpu, 5511d215f03SHuang Rui unsigned long _min_perf, 5521d215f03SHuang Rui unsigned long target_perf, 5531d215f03SHuang Rui unsigned long capacity) 5541d215f03SHuang Rui { 5551d215f03SHuang Rui unsigned long max_perf, min_perf, des_perf, 5563bf8c630SWyes Karny cap_perf, lowest_nonlinear_perf, max_freq; 5571d215f03SHuang Rui struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 5581d215f03SHuang Rui struct amd_cpudata *cpudata = policy->driver_data; 5593bf8c630SWyes Karny unsigned int target_freq; 5601d215f03SHuang Rui 561*4d78331cSWyes Karny if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) 562*4d78331cSWyes Karny amd_pstate_update_min_max_limit(policy); 563*4d78331cSWyes Karny 564*4d78331cSWyes Karny 5651d215f03SHuang Rui cap_perf = READ_ONCE(cpudata->highest_perf); 5661d215f03SHuang Rui lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); 5673bf8c630SWyes Karny max_freq = READ_ONCE(cpudata->max_freq); 5681d215f03SHuang Rui 5691d215f03SHuang Rui des_perf = cap_perf; 5701d215f03SHuang Rui if (target_perf < capacity) 5711d215f03SHuang Rui des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity); 5721d215f03SHuang Rui 5731d215f03SHuang Rui min_perf = READ_ONCE(cpudata->highest_perf); 5741d215f03SHuang Rui if (_min_perf < capacity) 5751d215f03SHuang Rui min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity); 5761d215f03SHuang Rui 5771d215f03SHuang Rui if (min_perf < lowest_nonlinear_perf) 5781d215f03SHuang Rui min_perf = lowest_nonlinear_perf; 5791d215f03SHuang Rui 5801d215f03SHuang Rui max_perf = cap_perf; 5811d215f03SHuang Rui if (max_perf < min_perf) 5821d215f03SHuang Rui max_perf = min_perf; 5831d215f03SHuang Rui 5843bf8c630SWyes Karny des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); 5853bf8c630SWyes Karny target_freq = div_u64(des_perf * max_freq, max_perf); 5863bf8c630SWyes Karny policy->cur = target_freq; 5873bf8c630SWyes Karny 5882dd6d0ebSWyes Karny amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true, 5892dd6d0ebSWyes Karny policy->governor->flags); 5904f3085f8SPerry Yuan cpufreq_cpu_put(policy); 5911d215f03SHuang Rui } 5921d215f03SHuang Rui 593ec437d71SHuang Rui static int amd_get_min_freq(struct amd_cpudata *cpudata) 594ec437d71SHuang Rui { 595ec437d71SHuang Rui struct cppc_perf_caps cppc_perf; 596ec437d71SHuang Rui 597ec437d71SHuang Rui int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 598ec437d71SHuang Rui if (ret) 599ec437d71SHuang Rui return ret; 600ec437d71SHuang Rui 601ec437d71SHuang Rui /* Switch to khz */ 602ec437d71SHuang Rui return cppc_perf.lowest_freq * 1000; 603ec437d71SHuang Rui } 604ec437d71SHuang Rui 605ec437d71SHuang Rui static int amd_get_max_freq(struct amd_cpudata *cpudata) 606ec437d71SHuang Rui { 607ec437d71SHuang Rui struct cppc_perf_caps cppc_perf; 608ec437d71SHuang Rui u32 max_perf, max_freq, nominal_freq, nominal_perf; 609ec437d71SHuang Rui u64 boost_ratio; 610ec437d71SHuang Rui 611ec437d71SHuang Rui int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 612ec437d71SHuang Rui if (ret) 613ec437d71SHuang Rui return ret; 614ec437d71SHuang Rui 615ec437d71SHuang Rui nominal_freq = cppc_perf.nominal_freq; 616ec437d71SHuang Rui nominal_perf = READ_ONCE(cpudata->nominal_perf); 617ec437d71SHuang Rui max_perf = READ_ONCE(cpudata->highest_perf); 618ec437d71SHuang Rui 619ec437d71SHuang Rui boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT, 620ec437d71SHuang Rui nominal_perf); 621ec437d71SHuang Rui 622ec437d71SHuang Rui max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT; 623ec437d71SHuang Rui 624ec437d71SHuang Rui /* Switch to khz */ 625ec437d71SHuang Rui return max_freq * 1000; 626ec437d71SHuang Rui } 627ec437d71SHuang Rui 628ec437d71SHuang Rui static int amd_get_nominal_freq(struct amd_cpudata *cpudata) 629ec437d71SHuang Rui { 630ec437d71SHuang Rui struct cppc_perf_caps cppc_perf; 631ec437d71SHuang Rui 632ec437d71SHuang Rui int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 633ec437d71SHuang Rui if (ret) 634ec437d71SHuang Rui return ret; 635ec437d71SHuang Rui 636ec437d71SHuang Rui /* Switch to khz */ 637ec437d71SHuang Rui return cppc_perf.nominal_freq * 1000; 638ec437d71SHuang Rui } 639ec437d71SHuang Rui 640ec437d71SHuang Rui static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata) 641ec437d71SHuang Rui { 642ec437d71SHuang Rui struct cppc_perf_caps cppc_perf; 643ec437d71SHuang Rui u32 lowest_nonlinear_freq, lowest_nonlinear_perf, 644ec437d71SHuang Rui nominal_freq, nominal_perf; 645ec437d71SHuang Rui u64 lowest_nonlinear_ratio; 646ec437d71SHuang Rui 647ec437d71SHuang Rui int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 648ec437d71SHuang Rui if (ret) 649ec437d71SHuang Rui return ret; 650ec437d71SHuang Rui 651ec437d71SHuang Rui nominal_freq = cppc_perf.nominal_freq; 652ec437d71SHuang Rui nominal_perf = READ_ONCE(cpudata->nominal_perf); 653ec437d71SHuang Rui 654ec437d71SHuang Rui lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; 655ec437d71SHuang Rui 656ec437d71SHuang Rui lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT, 657ec437d71SHuang Rui nominal_perf); 658ec437d71SHuang Rui 659ec437d71SHuang Rui lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT; 660ec437d71SHuang Rui 661ec437d71SHuang Rui /* Switch to khz */ 662ec437d71SHuang Rui return lowest_nonlinear_freq * 1000; 663ec437d71SHuang Rui } 664ec437d71SHuang Rui 66541271016SHuang Rui static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) 66641271016SHuang Rui { 66741271016SHuang Rui struct amd_cpudata *cpudata = policy->driver_data; 66841271016SHuang Rui int ret; 66941271016SHuang Rui 67041271016SHuang Rui if (!cpudata->boost_supported) { 67141271016SHuang Rui pr_err("Boost mode is not supported by this processor or SBIOS\n"); 67241271016SHuang Rui return -EINVAL; 67341271016SHuang Rui } 67441271016SHuang Rui 67541271016SHuang Rui if (state) 67641271016SHuang Rui policy->cpuinfo.max_freq = cpudata->max_freq; 67741271016SHuang Rui else 67841271016SHuang Rui policy->cpuinfo.max_freq = cpudata->nominal_freq; 67941271016SHuang Rui 68041271016SHuang Rui policy->max = policy->cpuinfo.max_freq; 68141271016SHuang Rui 68241271016SHuang Rui ret = freq_qos_update_request(&cpudata->req[1], 68341271016SHuang Rui policy->cpuinfo.max_freq); 68441271016SHuang Rui if (ret < 0) 68541271016SHuang Rui return ret; 68641271016SHuang Rui 68741271016SHuang Rui return 0; 68841271016SHuang Rui } 68941271016SHuang Rui 69041271016SHuang Rui static void amd_pstate_boost_init(struct amd_cpudata *cpudata) 69141271016SHuang Rui { 69241271016SHuang Rui u32 highest_perf, nominal_perf; 69341271016SHuang Rui 69441271016SHuang Rui highest_perf = READ_ONCE(cpudata->highest_perf); 69541271016SHuang Rui nominal_perf = READ_ONCE(cpudata->nominal_perf); 69641271016SHuang Rui 69741271016SHuang Rui if (highest_perf <= nominal_perf) 69841271016SHuang Rui return; 69941271016SHuang Rui 70041271016SHuang Rui cpudata->boost_supported = true; 701ffa5096aSPerry Yuan current_pstate_driver->boost_enabled = true; 70241271016SHuang Rui } 70341271016SHuang Rui 704919f4557SWyes Karny static void amd_perf_ctl_reset(unsigned int cpu) 705919f4557SWyes Karny { 706919f4557SWyes Karny wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); 707919f4557SWyes Karny } 708919f4557SWyes Karny 709ec437d71SHuang Rui static int amd_pstate_cpu_init(struct cpufreq_policy *policy) 710ec437d71SHuang Rui { 711ec437d71SHuang Rui int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; 712ec437d71SHuang Rui struct device *dev; 713ec437d71SHuang Rui struct amd_cpudata *cpudata; 714ec437d71SHuang Rui 715919f4557SWyes Karny /* 716919f4557SWyes Karny * Resetting PERF_CTL_MSR will put the CPU in P0 frequency, 717919f4557SWyes Karny * which is ideal for initialization process. 718919f4557SWyes Karny */ 719919f4557SWyes Karny amd_perf_ctl_reset(policy->cpu); 720ec437d71SHuang Rui dev = get_cpu_device(policy->cpu); 721ec437d71SHuang Rui if (!dev) 722ec437d71SHuang Rui return -ENODEV; 723ec437d71SHuang Rui 724ec437d71SHuang Rui cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL); 725ec437d71SHuang Rui if (!cpudata) 726ec437d71SHuang Rui return -ENOMEM; 727ec437d71SHuang Rui 728ec437d71SHuang Rui cpudata->cpu = policy->cpu; 729ec437d71SHuang Rui 730ec437d71SHuang Rui ret = amd_pstate_init_perf(cpudata); 731ec437d71SHuang Rui if (ret) 73241271016SHuang Rui goto free_cpudata1; 733ec437d71SHuang Rui 734ec437d71SHuang Rui min_freq = amd_get_min_freq(cpudata); 735ec437d71SHuang Rui max_freq = amd_get_max_freq(cpudata); 736ec437d71SHuang Rui nominal_freq = amd_get_nominal_freq(cpudata); 737ec437d71SHuang Rui lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata); 738ec437d71SHuang Rui 739ec437d71SHuang Rui if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) { 740ec437d71SHuang Rui dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n", 741ec437d71SHuang Rui min_freq, max_freq); 742ec437d71SHuang Rui ret = -EINVAL; 74341271016SHuang Rui goto free_cpudata1; 744ec437d71SHuang Rui } 745ec437d71SHuang Rui 746ec437d71SHuang Rui policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY; 747ec437d71SHuang Rui policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY; 748ec437d71SHuang Rui 749ec437d71SHuang Rui policy->min = min_freq; 750ec437d71SHuang Rui policy->max = max_freq; 751ec437d71SHuang Rui 752ec437d71SHuang Rui policy->cpuinfo.min_freq = min_freq; 753ec437d71SHuang Rui policy->cpuinfo.max_freq = max_freq; 754ec437d71SHuang Rui 755ec437d71SHuang Rui /* It will be updated by governor */ 756ec437d71SHuang Rui policy->cur = policy->cpuinfo.min_freq; 757ec437d71SHuang Rui 758e059c184SHuang Rui if (boot_cpu_has(X86_FEATURE_CPPC)) 7591d215f03SHuang Rui policy->fast_switch_possible = true; 7601d215f03SHuang Rui 76141271016SHuang Rui ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], 76241271016SHuang Rui FREQ_QOS_MIN, policy->cpuinfo.min_freq); 76341271016SHuang Rui if (ret < 0) { 76441271016SHuang Rui dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); 76541271016SHuang Rui goto free_cpudata1; 76641271016SHuang Rui } 76741271016SHuang Rui 76841271016SHuang Rui ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1], 76941271016SHuang Rui FREQ_QOS_MAX, policy->cpuinfo.max_freq); 77041271016SHuang Rui if (ret < 0) { 77141271016SHuang Rui dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); 77241271016SHuang Rui goto free_cpudata2; 77341271016SHuang Rui } 77441271016SHuang Rui 775ec437d71SHuang Rui /* Initial processor data capability frequencies */ 776ec437d71SHuang Rui cpudata->max_freq = max_freq; 777ec437d71SHuang Rui cpudata->min_freq = min_freq; 778*4d78331cSWyes Karny cpudata->max_limit_freq = max_freq; 779*4d78331cSWyes Karny cpudata->min_limit_freq = min_freq; 780ec437d71SHuang Rui cpudata->nominal_freq = nominal_freq; 781ec437d71SHuang Rui cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; 782ec437d71SHuang Rui 783ec437d71SHuang Rui policy->driver_data = cpudata; 784ec437d71SHuang Rui 78541271016SHuang Rui amd_pstate_boost_init(cpudata); 786abd61c08SPerry Yuan if (!current_pstate_driver->adjust_perf) 787abd61c08SPerry Yuan current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; 78841271016SHuang Rui 789ec437d71SHuang Rui return 0; 790ec437d71SHuang Rui 79141271016SHuang Rui free_cpudata2: 79241271016SHuang Rui freq_qos_remove_request(&cpudata->req[0]); 79341271016SHuang Rui free_cpudata1: 794ec437d71SHuang Rui kfree(cpudata); 795ec437d71SHuang Rui return ret; 796ec437d71SHuang Rui } 797ec437d71SHuang Rui 798ec437d71SHuang Rui static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) 799ec437d71SHuang Rui { 8004f59540cSPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 801ec437d71SHuang Rui 80241271016SHuang Rui freq_qos_remove_request(&cpudata->req[1]); 80341271016SHuang Rui freq_qos_remove_request(&cpudata->req[0]); 8044badf2ebSGautham R. Shenoy policy->fast_switch_possible = false; 805ec437d71SHuang Rui kfree(cpudata); 806ec437d71SHuang Rui 807ec437d71SHuang Rui return 0; 808ec437d71SHuang Rui } 809ec437d71SHuang Rui 810b376471fSJinzhou Su static int amd_pstate_cpu_resume(struct cpufreq_policy *policy) 811b376471fSJinzhou Su { 812b376471fSJinzhou Su int ret; 813b376471fSJinzhou Su 814b376471fSJinzhou Su ret = amd_pstate_enable(true); 815b376471fSJinzhou Su if (ret) 816b376471fSJinzhou Su pr_err("failed to enable amd-pstate during resume, return %d\n", ret); 817b376471fSJinzhou Su 818b376471fSJinzhou Su return ret; 819b376471fSJinzhou Su } 820b376471fSJinzhou Su 821b376471fSJinzhou Su static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy) 822b376471fSJinzhou Su { 823b376471fSJinzhou Su int ret; 824b376471fSJinzhou Su 825b376471fSJinzhou Su ret = amd_pstate_enable(false); 826b376471fSJinzhou Su if (ret) 827b376471fSJinzhou Su pr_err("failed to disable amd-pstate during suspend, return %d\n", ret); 828b376471fSJinzhou Su 829b376471fSJinzhou Su return ret; 830b376471fSJinzhou Su } 831b376471fSJinzhou Su 832ec4e3326SHuang Rui /* Sysfs attributes */ 833ec4e3326SHuang Rui 834ec4e3326SHuang Rui /* 835ec4e3326SHuang Rui * This frequency is to indicate the maximum hardware frequency. 836ec4e3326SHuang Rui * If boost is not active but supported, the frequency will be larger than the 837ec4e3326SHuang Rui * one in cpuinfo. 838ec4e3326SHuang Rui */ 839ec4e3326SHuang Rui static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy, 840ec4e3326SHuang Rui char *buf) 841ec4e3326SHuang Rui { 842ec4e3326SHuang Rui int max_freq; 8434f59540cSPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 844ec4e3326SHuang Rui 845ec4e3326SHuang Rui max_freq = amd_get_max_freq(cpudata); 846ec4e3326SHuang Rui if (max_freq < 0) 847ec4e3326SHuang Rui return max_freq; 848ec4e3326SHuang Rui 8493ec32b6dSPerry Yuan return sysfs_emit(buf, "%u\n", max_freq); 850ec4e3326SHuang Rui } 851ec4e3326SHuang Rui 852ec4e3326SHuang Rui static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy, 853ec4e3326SHuang Rui char *buf) 854ec4e3326SHuang Rui { 855ec4e3326SHuang Rui int freq; 8564f59540cSPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 857ec4e3326SHuang Rui 858ec4e3326SHuang Rui freq = amd_get_lowest_nonlinear_freq(cpudata); 859ec4e3326SHuang Rui if (freq < 0) 860ec4e3326SHuang Rui return freq; 861ec4e3326SHuang Rui 8623ec32b6dSPerry Yuan return sysfs_emit(buf, "%u\n", freq); 863ec4e3326SHuang Rui } 864ec4e3326SHuang Rui 8653ad7fde1SHuang Rui /* 8663ad7fde1SHuang Rui * In some of ASICs, the highest_perf is not the one in the _CPC table, so we 8673ad7fde1SHuang Rui * need to expose it to sysfs. 8683ad7fde1SHuang Rui */ 8693ad7fde1SHuang Rui static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy, 8703ad7fde1SHuang Rui char *buf) 8713ad7fde1SHuang Rui { 8723ad7fde1SHuang Rui u32 perf; 8733ad7fde1SHuang Rui struct amd_cpudata *cpudata = policy->driver_data; 8743ad7fde1SHuang Rui 8753ad7fde1SHuang Rui perf = READ_ONCE(cpudata->highest_perf); 8763ad7fde1SHuang Rui 8773ec32b6dSPerry Yuan return sysfs_emit(buf, "%u\n", perf); 8783ad7fde1SHuang Rui } 8793ad7fde1SHuang Rui 880ffa5096aSPerry Yuan static ssize_t show_energy_performance_available_preferences( 881ffa5096aSPerry Yuan struct cpufreq_policy *policy, char *buf) 882ffa5096aSPerry Yuan { 883ffa5096aSPerry Yuan int i = 0; 884ffa5096aSPerry Yuan int offset = 0; 885ffa5096aSPerry Yuan 886ffa5096aSPerry Yuan while (energy_perf_strings[i] != NULL) 887ffa5096aSPerry Yuan offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]); 888ffa5096aSPerry Yuan 889ffa5096aSPerry Yuan sysfs_emit_at(buf, offset, "\n"); 890ffa5096aSPerry Yuan 891ffa5096aSPerry Yuan return offset; 892ffa5096aSPerry Yuan } 893ffa5096aSPerry Yuan 894ffa5096aSPerry Yuan static ssize_t store_energy_performance_preference( 895ffa5096aSPerry Yuan struct cpufreq_policy *policy, const char *buf, size_t count) 896ffa5096aSPerry Yuan { 897ffa5096aSPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 898ffa5096aSPerry Yuan char str_preference[21]; 899ffa5096aSPerry Yuan ssize_t ret; 900ffa5096aSPerry Yuan 901ffa5096aSPerry Yuan ret = sscanf(buf, "%20s", str_preference); 902ffa5096aSPerry Yuan if (ret != 1) 903ffa5096aSPerry Yuan return -EINVAL; 904ffa5096aSPerry Yuan 905ffa5096aSPerry Yuan ret = match_string(energy_perf_strings, -1, str_preference); 906ffa5096aSPerry Yuan if (ret < 0) 907ffa5096aSPerry Yuan return -EINVAL; 908ffa5096aSPerry Yuan 909ffa5096aSPerry Yuan mutex_lock(&amd_pstate_limits_lock); 910ffa5096aSPerry Yuan ret = amd_pstate_set_energy_pref_index(cpudata, ret); 911ffa5096aSPerry Yuan mutex_unlock(&amd_pstate_limits_lock); 912ffa5096aSPerry Yuan 913ffa5096aSPerry Yuan return ret ?: count; 914ffa5096aSPerry Yuan } 915ffa5096aSPerry Yuan 916ffa5096aSPerry Yuan static ssize_t show_energy_performance_preference( 917ffa5096aSPerry Yuan struct cpufreq_policy *policy, char *buf) 918ffa5096aSPerry Yuan { 919ffa5096aSPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 920ffa5096aSPerry Yuan int preference; 921ffa5096aSPerry Yuan 922ffa5096aSPerry Yuan preference = amd_pstate_get_energy_pref_index(cpudata); 923ffa5096aSPerry Yuan if (preference < 0) 924ffa5096aSPerry Yuan return preference; 925ffa5096aSPerry Yuan 926ffa5096aSPerry Yuan return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]); 927ffa5096aSPerry Yuan } 928ffa5096aSPerry Yuan 9293ca7bc81SWyes Karny static void amd_pstate_driver_cleanup(void) 9303ca7bc81SWyes Karny { 9313ca7bc81SWyes Karny amd_pstate_enable(false); 9323ca7bc81SWyes Karny cppc_state = AMD_PSTATE_DISABLE; 9333ca7bc81SWyes Karny current_pstate_driver = NULL; 9343ca7bc81SWyes Karny } 9353ca7bc81SWyes Karny 9363ca7bc81SWyes Karny static int amd_pstate_register_driver(int mode) 9373ca7bc81SWyes Karny { 9383ca7bc81SWyes Karny int ret; 9393ca7bc81SWyes Karny 9403ca7bc81SWyes Karny if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED) 9413ca7bc81SWyes Karny current_pstate_driver = &amd_pstate_driver; 9423ca7bc81SWyes Karny else if (mode == AMD_PSTATE_ACTIVE) 9433ca7bc81SWyes Karny current_pstate_driver = &amd_pstate_epp_driver; 9443ca7bc81SWyes Karny else 9453ca7bc81SWyes Karny return -EINVAL; 9463ca7bc81SWyes Karny 9473ca7bc81SWyes Karny cppc_state = mode; 9483ca7bc81SWyes Karny ret = cpufreq_register_driver(current_pstate_driver); 9493ca7bc81SWyes Karny if (ret) { 9503ca7bc81SWyes Karny amd_pstate_driver_cleanup(); 9513ca7bc81SWyes Karny return ret; 9523ca7bc81SWyes Karny } 9533ca7bc81SWyes Karny return 0; 9543ca7bc81SWyes Karny } 9553ca7bc81SWyes Karny 9563ca7bc81SWyes Karny static int amd_pstate_unregister_driver(int dummy) 9573ca7bc81SWyes Karny { 9583ca7bc81SWyes Karny cpufreq_unregister_driver(current_pstate_driver); 9593ca7bc81SWyes Karny amd_pstate_driver_cleanup(); 9603ca7bc81SWyes Karny return 0; 9613ca7bc81SWyes Karny } 9623ca7bc81SWyes Karny 9633ca7bc81SWyes Karny static int amd_pstate_change_mode_without_dvr_change(int mode) 9643ca7bc81SWyes Karny { 9653ca7bc81SWyes Karny int cpu = 0; 9663ca7bc81SWyes Karny 9673ca7bc81SWyes Karny cppc_state = mode; 9683ca7bc81SWyes Karny 9693ca7bc81SWyes Karny if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) 9703ca7bc81SWyes Karny return 0; 9713ca7bc81SWyes Karny 9723ca7bc81SWyes Karny for_each_present_cpu(cpu) { 9733ca7bc81SWyes Karny cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); 9743ca7bc81SWyes Karny } 9753ca7bc81SWyes Karny 9763ca7bc81SWyes Karny return 0; 9773ca7bc81SWyes Karny } 9783ca7bc81SWyes Karny 9793ca7bc81SWyes Karny static int amd_pstate_change_driver_mode(int mode) 9803ca7bc81SWyes Karny { 9813ca7bc81SWyes Karny int ret; 9823ca7bc81SWyes Karny 9833ca7bc81SWyes Karny ret = amd_pstate_unregister_driver(0); 9843ca7bc81SWyes Karny if (ret) 9853ca7bc81SWyes Karny return ret; 9863ca7bc81SWyes Karny 9873ca7bc81SWyes Karny ret = amd_pstate_register_driver(mode); 9883ca7bc81SWyes Karny if (ret) 9893ca7bc81SWyes Karny return ret; 9903ca7bc81SWyes Karny 9913ca7bc81SWyes Karny return 0; 9923ca7bc81SWyes Karny } 9933ca7bc81SWyes Karny 99411fa52feSTom Rix static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = { 9953ca7bc81SWyes Karny [AMD_PSTATE_DISABLE] = { 9963ca7bc81SWyes Karny [AMD_PSTATE_DISABLE] = NULL, 9973ca7bc81SWyes Karny [AMD_PSTATE_PASSIVE] = amd_pstate_register_driver, 9983ca7bc81SWyes Karny [AMD_PSTATE_ACTIVE] = amd_pstate_register_driver, 9993ca7bc81SWyes Karny [AMD_PSTATE_GUIDED] = amd_pstate_register_driver, 10003ca7bc81SWyes Karny }, 10013ca7bc81SWyes Karny [AMD_PSTATE_PASSIVE] = { 10023ca7bc81SWyes Karny [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, 10033ca7bc81SWyes Karny [AMD_PSTATE_PASSIVE] = NULL, 10043ca7bc81SWyes Karny [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode, 10053ca7bc81SWyes Karny [AMD_PSTATE_GUIDED] = amd_pstate_change_mode_without_dvr_change, 10063ca7bc81SWyes Karny }, 10073ca7bc81SWyes Karny [AMD_PSTATE_ACTIVE] = { 10083ca7bc81SWyes Karny [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, 10093ca7bc81SWyes Karny [AMD_PSTATE_PASSIVE] = amd_pstate_change_driver_mode, 10103ca7bc81SWyes Karny [AMD_PSTATE_ACTIVE] = NULL, 10113ca7bc81SWyes Karny [AMD_PSTATE_GUIDED] = amd_pstate_change_driver_mode, 10123ca7bc81SWyes Karny }, 10133ca7bc81SWyes Karny [AMD_PSTATE_GUIDED] = { 10143ca7bc81SWyes Karny [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, 10153ca7bc81SWyes Karny [AMD_PSTATE_PASSIVE] = amd_pstate_change_mode_without_dvr_change, 10163ca7bc81SWyes Karny [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode, 10173ca7bc81SWyes Karny [AMD_PSTATE_GUIDED] = NULL, 10183ca7bc81SWyes Karny }, 10193ca7bc81SWyes Karny }; 10203ca7bc81SWyes Karny 1021abd61c08SPerry Yuan static ssize_t amd_pstate_show_status(char *buf) 1022abd61c08SPerry Yuan { 1023abd61c08SPerry Yuan if (!current_pstate_driver) 1024abd61c08SPerry Yuan return sysfs_emit(buf, "disable\n"); 1025abd61c08SPerry Yuan 1026abd61c08SPerry Yuan return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]); 1027abd61c08SPerry Yuan } 1028abd61c08SPerry Yuan 1029abd61c08SPerry Yuan static int amd_pstate_update_status(const char *buf, size_t size) 1030abd61c08SPerry Yuan { 1031abd61c08SPerry Yuan int mode_idx; 1032abd61c08SPerry Yuan 10333ca7bc81SWyes Karny if (size > strlen("passive") || size < strlen("active")) 1034abd61c08SPerry Yuan return -EINVAL; 10353ca7bc81SWyes Karny 1036abd61c08SPerry Yuan mode_idx = get_mode_idx_from_str(buf, size); 1037abd61c08SPerry Yuan 10383ca7bc81SWyes Karny if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) 1039abd61c08SPerry Yuan return -EINVAL; 10403ca7bc81SWyes Karny 10413ca7bc81SWyes Karny if (mode_state_machine[cppc_state][mode_idx]) 10423ca7bc81SWyes Karny return mode_state_machine[cppc_state][mode_idx](mode_idx); 10433ca7bc81SWyes Karny 1044abd61c08SPerry Yuan return 0; 1045abd61c08SPerry Yuan } 1046abd61c08SPerry Yuan 10475e720f8cSThomas Weißschuh static ssize_t status_show(struct device *dev, 10485e720f8cSThomas Weißschuh struct device_attribute *attr, char *buf) 1049abd61c08SPerry Yuan { 1050abd61c08SPerry Yuan ssize_t ret; 1051abd61c08SPerry Yuan 1052abd61c08SPerry Yuan mutex_lock(&amd_pstate_driver_lock); 1053abd61c08SPerry Yuan ret = amd_pstate_show_status(buf); 1054abd61c08SPerry Yuan mutex_unlock(&amd_pstate_driver_lock); 1055abd61c08SPerry Yuan 1056abd61c08SPerry Yuan return ret; 1057abd61c08SPerry Yuan } 1058abd61c08SPerry Yuan 10595e720f8cSThomas Weißschuh static ssize_t status_store(struct device *a, struct device_attribute *b, 1060abd61c08SPerry Yuan const char *buf, size_t count) 1061abd61c08SPerry Yuan { 1062abd61c08SPerry Yuan char *p = memchr(buf, '\n', count); 1063abd61c08SPerry Yuan int ret; 1064abd61c08SPerry Yuan 1065abd61c08SPerry Yuan mutex_lock(&amd_pstate_driver_lock); 1066abd61c08SPerry Yuan ret = amd_pstate_update_status(buf, p ? p - buf : count); 1067abd61c08SPerry Yuan mutex_unlock(&amd_pstate_driver_lock); 1068abd61c08SPerry Yuan 1069abd61c08SPerry Yuan return ret < 0 ? ret : count; 1070abd61c08SPerry Yuan } 1071abd61c08SPerry Yuan 1072ec4e3326SHuang Rui cpufreq_freq_attr_ro(amd_pstate_max_freq); 1073ec4e3326SHuang Rui cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq); 1074ec4e3326SHuang Rui 10753ad7fde1SHuang Rui cpufreq_freq_attr_ro(amd_pstate_highest_perf); 1076ffa5096aSPerry Yuan cpufreq_freq_attr_rw(energy_performance_preference); 1077ffa5096aSPerry Yuan cpufreq_freq_attr_ro(energy_performance_available_preferences); 10785e720f8cSThomas Weißschuh static DEVICE_ATTR_RW(status); 10793ad7fde1SHuang Rui 1080ec4e3326SHuang Rui static struct freq_attr *amd_pstate_attr[] = { 1081ec4e3326SHuang Rui &amd_pstate_max_freq, 1082ec4e3326SHuang Rui &amd_pstate_lowest_nonlinear_freq, 10833ad7fde1SHuang Rui &amd_pstate_highest_perf, 1084ec4e3326SHuang Rui NULL, 1085ec4e3326SHuang Rui }; 1086ec4e3326SHuang Rui 1087ffa5096aSPerry Yuan static struct freq_attr *amd_pstate_epp_attr[] = { 1088ffa5096aSPerry Yuan &amd_pstate_max_freq, 1089ffa5096aSPerry Yuan &amd_pstate_lowest_nonlinear_freq, 1090ffa5096aSPerry Yuan &amd_pstate_highest_perf, 1091ffa5096aSPerry Yuan &energy_performance_preference, 1092ffa5096aSPerry Yuan &energy_performance_available_preferences, 1093ffa5096aSPerry Yuan NULL, 1094ffa5096aSPerry Yuan }; 1095ffa5096aSPerry Yuan 1096abd61c08SPerry Yuan static struct attribute *pstate_global_attributes[] = { 10975e720f8cSThomas Weißschuh &dev_attr_status.attr, 1098abd61c08SPerry Yuan NULL 1099abd61c08SPerry Yuan }; 1100abd61c08SPerry Yuan 1101abd61c08SPerry Yuan static const struct attribute_group amd_pstate_global_attr_group = { 11023666062bSGreg Kroah-Hartman .name = "amd_pstate", 1103abd61c08SPerry Yuan .attrs = pstate_global_attributes, 1104abd61c08SPerry Yuan }; 1105abd61c08SPerry Yuan 110632f80b9aSMario Limonciello static bool amd_pstate_acpi_pm_profile_server(void) 110732f80b9aSMario Limonciello { 110832f80b9aSMario Limonciello switch (acpi_gbl_FADT.preferred_profile) { 110932f80b9aSMario Limonciello case PM_ENTERPRISE_SERVER: 111032f80b9aSMario Limonciello case PM_SOHO_SERVER: 111132f80b9aSMario Limonciello case PM_PERFORMANCE_SERVER: 111232f80b9aSMario Limonciello return true; 111332f80b9aSMario Limonciello } 111432f80b9aSMario Limonciello return false; 111532f80b9aSMario Limonciello } 111632f80b9aSMario Limonciello 111732f80b9aSMario Limonciello static bool amd_pstate_acpi_pm_profile_undefined(void) 111832f80b9aSMario Limonciello { 111932f80b9aSMario Limonciello if (acpi_gbl_FADT.preferred_profile == PM_UNSPECIFIED) 112032f80b9aSMario Limonciello return true; 112132f80b9aSMario Limonciello if (acpi_gbl_FADT.preferred_profile >= NR_PM_PROFILES) 112232f80b9aSMario Limonciello return true; 112332f80b9aSMario Limonciello return false; 112432f80b9aSMario Limonciello } 112532f80b9aSMario Limonciello 1126ffa5096aSPerry Yuan static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) 1127ffa5096aSPerry Yuan { 1128ffa5096aSPerry Yuan int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; 1129ffa5096aSPerry Yuan struct amd_cpudata *cpudata; 1130ffa5096aSPerry Yuan struct device *dev; 1131ffa5096aSPerry Yuan u64 value; 1132ffa5096aSPerry Yuan 1133ffa5096aSPerry Yuan /* 1134ffa5096aSPerry Yuan * Resetting PERF_CTL_MSR will put the CPU in P0 frequency, 1135ffa5096aSPerry Yuan * which is ideal for initialization process. 1136ffa5096aSPerry Yuan */ 1137ffa5096aSPerry Yuan amd_perf_ctl_reset(policy->cpu); 1138ffa5096aSPerry Yuan dev = get_cpu_device(policy->cpu); 1139ffa5096aSPerry Yuan if (!dev) 11407cca9a98SArnd Bergmann return -ENODEV; 1141ffa5096aSPerry Yuan 1142ffa5096aSPerry Yuan cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL); 1143ffa5096aSPerry Yuan if (!cpudata) 1144ffa5096aSPerry Yuan return -ENOMEM; 1145ffa5096aSPerry Yuan 1146ffa5096aSPerry Yuan cpudata->cpu = policy->cpu; 1147ffa5096aSPerry Yuan cpudata->epp_policy = 0; 1148ffa5096aSPerry Yuan 11497cca9a98SArnd Bergmann ret = amd_pstate_init_perf(cpudata); 11507cca9a98SArnd Bergmann if (ret) 1151ffa5096aSPerry Yuan goto free_cpudata1; 1152ffa5096aSPerry Yuan 1153ffa5096aSPerry Yuan min_freq = amd_get_min_freq(cpudata); 1154ffa5096aSPerry Yuan max_freq = amd_get_max_freq(cpudata); 1155ffa5096aSPerry Yuan nominal_freq = amd_get_nominal_freq(cpudata); 1156ffa5096aSPerry Yuan lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata); 1157ffa5096aSPerry Yuan if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) { 1158ffa5096aSPerry Yuan dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n", 1159ffa5096aSPerry Yuan min_freq, max_freq); 1160ffa5096aSPerry Yuan ret = -EINVAL; 1161ffa5096aSPerry Yuan goto free_cpudata1; 1162ffa5096aSPerry Yuan } 1163ffa5096aSPerry Yuan 1164ffa5096aSPerry Yuan policy->cpuinfo.min_freq = min_freq; 1165ffa5096aSPerry Yuan policy->cpuinfo.max_freq = max_freq; 1166ffa5096aSPerry Yuan /* It will be updated by governor */ 1167ffa5096aSPerry Yuan policy->cur = policy->cpuinfo.min_freq; 1168ffa5096aSPerry Yuan 1169ffa5096aSPerry Yuan /* Initial processor data capability frequencies */ 1170ffa5096aSPerry Yuan cpudata->max_freq = max_freq; 1171ffa5096aSPerry Yuan cpudata->min_freq = min_freq; 1172ffa5096aSPerry Yuan cpudata->nominal_freq = nominal_freq; 1173ffa5096aSPerry Yuan cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; 1174ffa5096aSPerry Yuan 1175ffa5096aSPerry Yuan policy->driver_data = cpudata; 1176ffa5096aSPerry Yuan 1177ffa5096aSPerry Yuan cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0); 1178ffa5096aSPerry Yuan 1179ffa5096aSPerry Yuan policy->min = policy->cpuinfo.min_freq; 1180ffa5096aSPerry Yuan policy->max = policy->cpuinfo.max_freq; 1181ffa5096aSPerry Yuan 1182ffa5096aSPerry Yuan /* 118332f80b9aSMario Limonciello * Set the policy to provide a valid fallback value in case 1184ffa5096aSPerry Yuan * the default cpufreq governor is neither powersave nor performance. 1185ffa5096aSPerry Yuan */ 118632f80b9aSMario Limonciello if (amd_pstate_acpi_pm_profile_server() || 118732f80b9aSMario Limonciello amd_pstate_acpi_pm_profile_undefined()) 118832f80b9aSMario Limonciello policy->policy = CPUFREQ_POLICY_PERFORMANCE; 118932f80b9aSMario Limonciello else 1190ffa5096aSPerry Yuan policy->policy = CPUFREQ_POLICY_POWERSAVE; 1191ffa5096aSPerry Yuan 1192ffa5096aSPerry Yuan if (boot_cpu_has(X86_FEATURE_CPPC)) { 1193ffa5096aSPerry Yuan ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); 1194ffa5096aSPerry Yuan if (ret) 1195ffa5096aSPerry Yuan return ret; 1196ffa5096aSPerry Yuan WRITE_ONCE(cpudata->cppc_req_cached, value); 1197ffa5096aSPerry Yuan 1198ffa5096aSPerry Yuan ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value); 1199ffa5096aSPerry Yuan if (ret) 1200ffa5096aSPerry Yuan return ret; 1201ffa5096aSPerry Yuan WRITE_ONCE(cpudata->cppc_cap1_cached, value); 1202ffa5096aSPerry Yuan } 1203ffa5096aSPerry Yuan amd_pstate_boost_init(cpudata); 1204ffa5096aSPerry Yuan 1205ffa5096aSPerry Yuan return 0; 1206ffa5096aSPerry Yuan 1207ffa5096aSPerry Yuan free_cpudata1: 1208ffa5096aSPerry Yuan kfree(cpudata); 1209ffa5096aSPerry Yuan return ret; 1210ffa5096aSPerry Yuan } 1211ffa5096aSPerry Yuan 1212ffa5096aSPerry Yuan static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) 1213ffa5096aSPerry Yuan { 1214ffa5096aSPerry Yuan pr_debug("CPU %d exiting\n", policy->cpu); 1215ffa5096aSPerry Yuan return 0; 1216ffa5096aSPerry Yuan } 1217ffa5096aSPerry Yuan 1218*4d78331cSWyes Karny static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) 1219ffa5096aSPerry Yuan { 1220ffa5096aSPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 1221*4d78331cSWyes Karny u32 max_perf, min_perf, min_limit_perf, max_limit_perf; 1222ffa5096aSPerry Yuan u64 value; 1223ffa5096aSPerry Yuan s16 epp; 1224ffa5096aSPerry Yuan 1225ffa5096aSPerry Yuan max_perf = READ_ONCE(cpudata->highest_perf); 1226ffa5096aSPerry Yuan min_perf = READ_ONCE(cpudata->lowest_perf); 1227*4d78331cSWyes Karny max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); 1228*4d78331cSWyes Karny min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); 1229*4d78331cSWyes Karny 1230*4d78331cSWyes Karny max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, 1231*4d78331cSWyes Karny cpudata->max_limit_perf); 1232*4d78331cSWyes Karny min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, 1233*4d78331cSWyes Karny cpudata->max_limit_perf); 1234*4d78331cSWyes Karny 1235*4d78331cSWyes Karny WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf); 1236*4d78331cSWyes Karny WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf); 1237ffa5096aSPerry Yuan 1238ffa5096aSPerry Yuan value = READ_ONCE(cpudata->cppc_req_cached); 1239ffa5096aSPerry Yuan 1240ffa5096aSPerry Yuan if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) 1241ffa5096aSPerry Yuan min_perf = max_perf; 1242ffa5096aSPerry Yuan 1243ffa5096aSPerry Yuan /* Initial min/max values for CPPC Performance Controls Register */ 1244ffa5096aSPerry Yuan value &= ~AMD_CPPC_MIN_PERF(~0L); 1245ffa5096aSPerry Yuan value |= AMD_CPPC_MIN_PERF(min_perf); 1246ffa5096aSPerry Yuan 1247ffa5096aSPerry Yuan value &= ~AMD_CPPC_MAX_PERF(~0L); 1248ffa5096aSPerry Yuan value |= AMD_CPPC_MAX_PERF(max_perf); 1249ffa5096aSPerry Yuan 1250ffa5096aSPerry Yuan /* CPPC EPP feature require to set zero to the desire perf bit */ 1251ffa5096aSPerry Yuan value &= ~AMD_CPPC_DES_PERF(~0L); 1252ffa5096aSPerry Yuan value |= AMD_CPPC_DES_PERF(0); 1253ffa5096aSPerry Yuan 1254ffa5096aSPerry Yuan cpudata->epp_policy = cpudata->policy; 1255ffa5096aSPerry Yuan 1256ffa5096aSPerry Yuan /* Get BIOS pre-defined epp value */ 1257ffa5096aSPerry Yuan epp = amd_pstate_get_epp(cpudata, value); 12586e9d1212SWyes Karny if (epp < 0) { 12596e9d1212SWyes Karny /** 12606e9d1212SWyes Karny * This return value can only be negative for shared_memory 12616e9d1212SWyes Karny * systems where EPP register read/write not supported. 12626e9d1212SWyes Karny */ 1263*4d78331cSWyes Karny return; 1264ffa5096aSPerry Yuan } 12656e9d1212SWyes Karny 12666e9d1212SWyes Karny if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) 12676e9d1212SWyes Karny epp = 0; 12686e9d1212SWyes Karny 1269ffa5096aSPerry Yuan /* Set initial EPP value */ 1270ffa5096aSPerry Yuan if (boot_cpu_has(X86_FEATURE_CPPC)) { 1271ffa5096aSPerry Yuan value &= ~GENMASK_ULL(31, 24); 1272ffa5096aSPerry Yuan value |= (u64)epp << 24; 1273ffa5096aSPerry Yuan } 1274ffa5096aSPerry Yuan 12756e9d1212SWyes Karny WRITE_ONCE(cpudata->cppc_req_cached, value); 12767cca9a98SArnd Bergmann amd_pstate_set_epp(cpudata, epp); 1277ffa5096aSPerry Yuan } 1278ffa5096aSPerry Yuan 1279ffa5096aSPerry Yuan static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) 1280ffa5096aSPerry Yuan { 1281ffa5096aSPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 1282ffa5096aSPerry Yuan 1283ffa5096aSPerry Yuan if (!policy->cpuinfo.max_freq) 1284ffa5096aSPerry Yuan return -ENODEV; 1285ffa5096aSPerry Yuan 1286ffa5096aSPerry Yuan pr_debug("set_policy: cpuinfo.max %u policy->max %u\n", 1287ffa5096aSPerry Yuan policy->cpuinfo.max_freq, policy->max); 1288ffa5096aSPerry Yuan 1289ffa5096aSPerry Yuan cpudata->policy = policy->policy; 1290ffa5096aSPerry Yuan 1291*4d78331cSWyes Karny amd_pstate_epp_update_limit(policy); 1292ffa5096aSPerry Yuan 1293ffa5096aSPerry Yuan return 0; 1294ffa5096aSPerry Yuan } 1295ffa5096aSPerry Yuan 1296d4da12f8SPerry Yuan static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) 1297d4da12f8SPerry Yuan { 1298d4da12f8SPerry Yuan struct cppc_perf_ctrls perf_ctrls; 1299d4da12f8SPerry Yuan u64 value, max_perf; 1300d4da12f8SPerry Yuan int ret; 1301d4da12f8SPerry Yuan 1302d4da12f8SPerry Yuan ret = amd_pstate_enable(true); 1303d4da12f8SPerry Yuan if (ret) 1304d4da12f8SPerry Yuan pr_err("failed to enable amd pstate during resume, return %d\n", ret); 1305d4da12f8SPerry Yuan 1306d4da12f8SPerry Yuan value = READ_ONCE(cpudata->cppc_req_cached); 1307d4da12f8SPerry Yuan max_perf = READ_ONCE(cpudata->highest_perf); 1308d4da12f8SPerry Yuan 1309d4da12f8SPerry Yuan if (boot_cpu_has(X86_FEATURE_CPPC)) { 1310d4da12f8SPerry Yuan wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 1311d4da12f8SPerry Yuan } else { 1312d4da12f8SPerry Yuan perf_ctrls.max_perf = max_perf; 1313d4da12f8SPerry Yuan perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached); 1314d4da12f8SPerry Yuan cppc_set_perf(cpudata->cpu, &perf_ctrls); 1315d4da12f8SPerry Yuan } 1316d4da12f8SPerry Yuan } 1317d4da12f8SPerry Yuan 1318d4da12f8SPerry Yuan static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy) 1319d4da12f8SPerry Yuan { 1320d4da12f8SPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 1321d4da12f8SPerry Yuan 1322d4da12f8SPerry Yuan pr_debug("AMD CPU Core %d going online\n", cpudata->cpu); 1323d4da12f8SPerry Yuan 1324d4da12f8SPerry Yuan if (cppc_state == AMD_PSTATE_ACTIVE) { 1325d4da12f8SPerry Yuan amd_pstate_epp_reenable(cpudata); 1326d4da12f8SPerry Yuan cpudata->suspended = false; 1327d4da12f8SPerry Yuan } 1328d4da12f8SPerry Yuan 1329d4da12f8SPerry Yuan return 0; 1330d4da12f8SPerry Yuan } 1331d4da12f8SPerry Yuan 1332d4da12f8SPerry Yuan static void amd_pstate_epp_offline(struct cpufreq_policy *policy) 1333d4da12f8SPerry Yuan { 1334d4da12f8SPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 1335d4da12f8SPerry Yuan struct cppc_perf_ctrls perf_ctrls; 1336d4da12f8SPerry Yuan int min_perf; 1337d4da12f8SPerry Yuan u64 value; 1338d4da12f8SPerry Yuan 1339d4da12f8SPerry Yuan min_perf = READ_ONCE(cpudata->lowest_perf); 1340d4da12f8SPerry Yuan value = READ_ONCE(cpudata->cppc_req_cached); 1341d4da12f8SPerry Yuan 1342d4da12f8SPerry Yuan mutex_lock(&amd_pstate_limits_lock); 1343d4da12f8SPerry Yuan if (boot_cpu_has(X86_FEATURE_CPPC)) { 1344d4da12f8SPerry Yuan cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN; 1345d4da12f8SPerry Yuan 1346d4da12f8SPerry Yuan /* Set max perf same as min perf */ 1347d4da12f8SPerry Yuan value &= ~AMD_CPPC_MAX_PERF(~0L); 1348d4da12f8SPerry Yuan value |= AMD_CPPC_MAX_PERF(min_perf); 1349d4da12f8SPerry Yuan value &= ~AMD_CPPC_MIN_PERF(~0L); 1350d4da12f8SPerry Yuan value |= AMD_CPPC_MIN_PERF(min_perf); 1351d4da12f8SPerry Yuan wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 1352d4da12f8SPerry Yuan } else { 1353d4da12f8SPerry Yuan perf_ctrls.desired_perf = 0; 1354d4da12f8SPerry Yuan perf_ctrls.max_perf = min_perf; 1355d4da12f8SPerry Yuan perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE); 1356d4da12f8SPerry Yuan cppc_set_perf(cpudata->cpu, &perf_ctrls); 1357d4da12f8SPerry Yuan } 1358d4da12f8SPerry Yuan mutex_unlock(&amd_pstate_limits_lock); 1359d4da12f8SPerry Yuan } 1360d4da12f8SPerry Yuan 1361d4da12f8SPerry Yuan static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) 1362d4da12f8SPerry Yuan { 1363d4da12f8SPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 1364d4da12f8SPerry Yuan 1365d4da12f8SPerry Yuan pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu); 1366d4da12f8SPerry Yuan 1367d4da12f8SPerry Yuan if (cpudata->suspended) 1368d4da12f8SPerry Yuan return 0; 1369d4da12f8SPerry Yuan 1370d4da12f8SPerry Yuan if (cppc_state == AMD_PSTATE_ACTIVE) 1371d4da12f8SPerry Yuan amd_pstate_epp_offline(policy); 1372d4da12f8SPerry Yuan 1373d4da12f8SPerry Yuan return 0; 1374d4da12f8SPerry Yuan } 1375d4da12f8SPerry Yuan 1376ffa5096aSPerry Yuan static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy) 1377ffa5096aSPerry Yuan { 1378ffa5096aSPerry Yuan cpufreq_verify_within_cpu_limits(policy); 1379ffa5096aSPerry Yuan pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min); 1380ffa5096aSPerry Yuan return 0; 1381ffa5096aSPerry Yuan } 1382ffa5096aSPerry Yuan 138350ddd2f7SPerry Yuan static int amd_pstate_epp_suspend(struct cpufreq_policy *policy) 138450ddd2f7SPerry Yuan { 138550ddd2f7SPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 138650ddd2f7SPerry Yuan int ret; 138750ddd2f7SPerry Yuan 138850ddd2f7SPerry Yuan /* avoid suspending when EPP is not enabled */ 138950ddd2f7SPerry Yuan if (cppc_state != AMD_PSTATE_ACTIVE) 139050ddd2f7SPerry Yuan return 0; 139150ddd2f7SPerry Yuan 139250ddd2f7SPerry Yuan /* set this flag to avoid setting core offline*/ 139350ddd2f7SPerry Yuan cpudata->suspended = true; 139450ddd2f7SPerry Yuan 139550ddd2f7SPerry Yuan /* disable CPPC in lowlevel firmware */ 139650ddd2f7SPerry Yuan ret = amd_pstate_enable(false); 139750ddd2f7SPerry Yuan if (ret) 139850ddd2f7SPerry Yuan pr_err("failed to suspend, return %d\n", ret); 139950ddd2f7SPerry Yuan 140050ddd2f7SPerry Yuan return 0; 140150ddd2f7SPerry Yuan } 140250ddd2f7SPerry Yuan 140350ddd2f7SPerry Yuan static int amd_pstate_epp_resume(struct cpufreq_policy *policy) 140450ddd2f7SPerry Yuan { 140550ddd2f7SPerry Yuan struct amd_cpudata *cpudata = policy->driver_data; 140650ddd2f7SPerry Yuan 140750ddd2f7SPerry Yuan if (cpudata->suspended) { 140850ddd2f7SPerry Yuan mutex_lock(&amd_pstate_limits_lock); 140950ddd2f7SPerry Yuan 141050ddd2f7SPerry Yuan /* enable amd pstate from suspend state*/ 141150ddd2f7SPerry Yuan amd_pstate_epp_reenable(cpudata); 141250ddd2f7SPerry Yuan 141350ddd2f7SPerry Yuan mutex_unlock(&amd_pstate_limits_lock); 141450ddd2f7SPerry Yuan 141550ddd2f7SPerry Yuan cpudata->suspended = false; 141650ddd2f7SPerry Yuan } 141750ddd2f7SPerry Yuan 141850ddd2f7SPerry Yuan return 0; 141950ddd2f7SPerry Yuan } 142050ddd2f7SPerry Yuan 1421ec437d71SHuang Rui static struct cpufreq_driver amd_pstate_driver = { 1422ec437d71SHuang Rui .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, 1423ec437d71SHuang Rui .verify = amd_pstate_verify, 1424ec437d71SHuang Rui .target = amd_pstate_target, 14254badf2ebSGautham R. Shenoy .fast_switch = amd_pstate_fast_switch, 1426ec437d71SHuang Rui .init = amd_pstate_cpu_init, 1427ec437d71SHuang Rui .exit = amd_pstate_cpu_exit, 1428b376471fSJinzhou Su .suspend = amd_pstate_cpu_suspend, 1429b376471fSJinzhou Su .resume = amd_pstate_cpu_resume, 143041271016SHuang Rui .set_boost = amd_pstate_set_boost, 1431ec437d71SHuang Rui .name = "amd-pstate", 1432ec4e3326SHuang Rui .attr = amd_pstate_attr, 1433ec437d71SHuang Rui }; 1434ec437d71SHuang Rui 1435ffa5096aSPerry Yuan static struct cpufreq_driver amd_pstate_epp_driver = { 1436ffa5096aSPerry Yuan .flags = CPUFREQ_CONST_LOOPS, 1437ffa5096aSPerry Yuan .verify = amd_pstate_epp_verify_policy, 1438ffa5096aSPerry Yuan .setpolicy = amd_pstate_epp_set_policy, 1439ffa5096aSPerry Yuan .init = amd_pstate_epp_cpu_init, 1440ffa5096aSPerry Yuan .exit = amd_pstate_epp_cpu_exit, 1441d4da12f8SPerry Yuan .offline = amd_pstate_epp_cpu_offline, 1442d4da12f8SPerry Yuan .online = amd_pstate_epp_cpu_online, 144350ddd2f7SPerry Yuan .suspend = amd_pstate_epp_suspend, 144450ddd2f7SPerry Yuan .resume = amd_pstate_epp_resume, 1445f4aad639SWyes Karny .name = "amd-pstate-epp", 1446ffa5096aSPerry Yuan .attr = amd_pstate_epp_attr, 1447ffa5096aSPerry Yuan }; 1448ffa5096aSPerry Yuan 1449c88ad30eSMario Limonciello static int __init amd_pstate_set_driver(int mode_idx) 1450c88ad30eSMario Limonciello { 1451c88ad30eSMario Limonciello if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) { 1452c88ad30eSMario Limonciello cppc_state = mode_idx; 1453c88ad30eSMario Limonciello if (cppc_state == AMD_PSTATE_DISABLE) 1454c88ad30eSMario Limonciello pr_info("driver is explicitly disabled\n"); 1455c88ad30eSMario Limonciello 1456c88ad30eSMario Limonciello if (cppc_state == AMD_PSTATE_ACTIVE) 1457c88ad30eSMario Limonciello current_pstate_driver = &amd_pstate_epp_driver; 1458c88ad30eSMario Limonciello 1459c88ad30eSMario Limonciello if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED) 1460c88ad30eSMario Limonciello current_pstate_driver = &amd_pstate_driver; 1461c88ad30eSMario Limonciello 1462c88ad30eSMario Limonciello return 0; 1463c88ad30eSMario Limonciello } 1464c88ad30eSMario Limonciello 1465c88ad30eSMario Limonciello return -EINVAL; 1466c88ad30eSMario Limonciello } 1467c88ad30eSMario Limonciello 1468ec437d71SHuang Rui static int __init amd_pstate_init(void) 1469ec437d71SHuang Rui { 14703666062bSGreg Kroah-Hartman struct device *dev_root; 1471ec437d71SHuang Rui int ret; 1472ec437d71SHuang Rui 1473ec437d71SHuang Rui if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 1474ec437d71SHuang Rui return -ENODEV; 1475ec437d71SHuang Rui 1476ec437d71SHuang Rui if (!acpi_cpc_valid()) { 1477a2a9d185SPerry Yuan pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n"); 1478ec437d71SHuang Rui return -ENODEV; 1479ec437d71SHuang Rui } 1480ec437d71SHuang Rui 1481ec437d71SHuang Rui /* don't keep reloading if cpufreq_driver exists */ 1482ec437d71SHuang Rui if (cpufreq_get_current_driver()) 1483ec437d71SHuang Rui return -EEXIST; 1484ec437d71SHuang Rui 1485c88ad30eSMario Limonciello switch (cppc_state) { 1486c88ad30eSMario Limonciello case AMD_PSTATE_UNDEFINED: 1487c88ad30eSMario Limonciello /* Disable on the following configs by default: 1488c88ad30eSMario Limonciello * 1. Undefined platforms 1489c88ad30eSMario Limonciello * 2. Server platforms 1490c88ad30eSMario Limonciello * 3. Shared memory designs 1491c88ad30eSMario Limonciello */ 1492c88ad30eSMario Limonciello if (amd_pstate_acpi_pm_profile_undefined() || 1493c88ad30eSMario Limonciello amd_pstate_acpi_pm_profile_server() || 1494c88ad30eSMario Limonciello !boot_cpu_has(X86_FEATURE_CPPC)) { 1495c88ad30eSMario Limonciello pr_info("driver load is disabled, boot with specific mode to enable this\n"); 1496c88ad30eSMario Limonciello return -ENODEV; 1497c88ad30eSMario Limonciello } 1498c88ad30eSMario Limonciello ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE); 1499c88ad30eSMario Limonciello if (ret) 1500c88ad30eSMario Limonciello return ret; 1501c88ad30eSMario Limonciello break; 1502c88ad30eSMario Limonciello case AMD_PSTATE_DISABLE: 1503c88ad30eSMario Limonciello return -ENODEV; 1504c88ad30eSMario Limonciello case AMD_PSTATE_PASSIVE: 1505c88ad30eSMario Limonciello case AMD_PSTATE_ACTIVE: 1506c88ad30eSMario Limonciello case AMD_PSTATE_GUIDED: 1507c88ad30eSMario Limonciello break; 1508c88ad30eSMario Limonciello default: 1509c88ad30eSMario Limonciello return -EINVAL; 1510c88ad30eSMario Limonciello } 1511c88ad30eSMario Limonciello 1512ec437d71SHuang Rui /* capability check */ 1513e059c184SHuang Rui if (boot_cpu_has(X86_FEATURE_CPPC)) { 1514e059c184SHuang Rui pr_debug("AMD CPPC MSR based functionality is supported\n"); 15152dd6d0ebSWyes Karny if (cppc_state != AMD_PSTATE_ACTIVE) 1516ffa5096aSPerry Yuan current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; 1517202e683dSPerry Yuan } else { 1518202e683dSPerry Yuan pr_debug("AMD CPPC shared memory based functionality is supported\n"); 1519e059c184SHuang Rui static_call_update(amd_pstate_enable, cppc_enable); 1520e059c184SHuang Rui static_call_update(amd_pstate_init_perf, cppc_init_perf); 1521e059c184SHuang Rui static_call_update(amd_pstate_update_perf, cppc_update_perf); 1522ec437d71SHuang Rui } 1523ec437d71SHuang Rui 1524ec437d71SHuang Rui /* enable amd pstate feature */ 1525ec437d71SHuang Rui ret = amd_pstate_enable(true); 1526ec437d71SHuang Rui if (ret) { 1527ffa5096aSPerry Yuan pr_err("failed to enable with return %d\n", ret); 1528ec437d71SHuang Rui return ret; 1529ec437d71SHuang Rui } 1530ec437d71SHuang Rui 1531ffa5096aSPerry Yuan ret = cpufreq_register_driver(current_pstate_driver); 1532ec437d71SHuang Rui if (ret) 1533ffa5096aSPerry Yuan pr_err("failed to register with return %d\n", ret); 1534ec437d71SHuang Rui 15353666062bSGreg Kroah-Hartman dev_root = bus_get_dev_root(&cpu_subsys); 15363666062bSGreg Kroah-Hartman if (dev_root) { 15373666062bSGreg Kroah-Hartman ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group); 15383666062bSGreg Kroah-Hartman put_device(dev_root); 1539abd61c08SPerry Yuan if (ret) { 1540abd61c08SPerry Yuan pr_err("sysfs attribute export failed with error %d.\n", ret); 1541abd61c08SPerry Yuan goto global_attr_free; 1542abd61c08SPerry Yuan } 15433666062bSGreg Kroah-Hartman } 1544abd61c08SPerry Yuan 1545abd61c08SPerry Yuan return ret; 1546abd61c08SPerry Yuan 1547abd61c08SPerry Yuan global_attr_free: 1548abd61c08SPerry Yuan cpufreq_unregister_driver(current_pstate_driver); 1549ec437d71SHuang Rui return ret; 1550ec437d71SHuang Rui } 1551456ca88dSPerry Yuan device_initcall(amd_pstate_init); 1552ec437d71SHuang Rui 1553202e683dSPerry Yuan static int __init amd_pstate_param(char *str) 1554202e683dSPerry Yuan { 155536c5014eSWyes Karny size_t size; 155636c5014eSWyes Karny int mode_idx; 155736c5014eSWyes Karny 1558202e683dSPerry Yuan if (!str) 1559202e683dSPerry Yuan return -EINVAL; 1560202e683dSPerry Yuan 156136c5014eSWyes Karny size = strlen(str); 156236c5014eSWyes Karny mode_idx = get_mode_idx_from_str(str, size); 156336c5014eSWyes Karny 1564c88ad30eSMario Limonciello return amd_pstate_set_driver(mode_idx); 156536c5014eSWyes Karny } 1566202e683dSPerry Yuan early_param("amd_pstate", amd_pstate_param); 1567202e683dSPerry Yuan 1568ec437d71SHuang Rui MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>"); 1569ec437d71SHuang Rui MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver"); 1570