1ec437d71SHuang Rui // SPDX-License-Identifier: GPL-2.0-or-later 2ec437d71SHuang Rui /* 3ec437d71SHuang Rui * amd-pstate.c - AMD Processor P-state Frequency Driver 4ec437d71SHuang Rui * 5ec437d71SHuang Rui * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved. 6ec437d71SHuang Rui * 7ec437d71SHuang Rui * Author: Huang Rui <ray.huang@amd.com> 8ec437d71SHuang Rui * 9ec437d71SHuang Rui * AMD P-State introduces a new CPU performance scaling design for AMD 10ec437d71SHuang Rui * processors using the ACPI Collaborative Performance and Power Control (CPPC) 11ec437d71SHuang Rui * feature which works with the AMD SMU firmware providing a finer grained 12ec437d71SHuang Rui * frequency control range. It is to replace the legacy ACPI P-States control, 13ec437d71SHuang Rui * allows a flexible, low-latency interface for the Linux kernel to directly 14ec437d71SHuang Rui * communicate the performance hints to hardware. 15ec437d71SHuang Rui * 16ec437d71SHuang Rui * AMD P-State is supported on recent AMD Zen base CPU series include some of 17ec437d71SHuang Rui * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD 18ec437d71SHuang Rui * P-State supported system. And there are two types of hardware implementations 19ec437d71SHuang Rui * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution. 20ec437d71SHuang Rui * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types. 21ec437d71SHuang Rui */ 22ec437d71SHuang Rui 23ec437d71SHuang Rui #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24ec437d71SHuang Rui 25ec437d71SHuang Rui #include <linux/kernel.h> 26ec437d71SHuang Rui #include <linux/module.h> 27ec437d71SHuang Rui #include <linux/init.h> 28ec437d71SHuang Rui #include <linux/smp.h> 29ec437d71SHuang Rui #include <linux/sched.h> 30ec437d71SHuang Rui #include <linux/cpufreq.h> 31ec437d71SHuang Rui #include <linux/compiler.h> 32ec437d71SHuang Rui #include <linux/dmi.h> 33ec437d71SHuang Rui #include <linux/slab.h> 34ec437d71SHuang Rui #include <linux/acpi.h> 35ec437d71SHuang Rui #include <linux/io.h> 36ec437d71SHuang Rui #include <linux/delay.h> 37ec437d71SHuang Rui #include <linux/uaccess.h> 38ec437d71SHuang Rui #include <linux/static_call.h> 39ec437d71SHuang Rui 40ec437d71SHuang Rui #include <acpi/processor.h> 41ec437d71SHuang Rui #include <acpi/cppc_acpi.h> 42ec437d71SHuang Rui 43ec437d71SHuang Rui #include <asm/msr.h> 44ec437d71SHuang Rui #include <asm/processor.h> 45ec437d71SHuang Rui #include <asm/cpufeature.h> 46ec437d71SHuang Rui #include <asm/cpu_device_id.h> 47*60e10f89SHuang Rui #include "amd-pstate-trace.h" 48ec437d71SHuang Rui 49ec437d71SHuang Rui #define AMD_PSTATE_TRANSITION_LATENCY 0x20000 50ec437d71SHuang Rui #define AMD_PSTATE_TRANSITION_DELAY 500 51ec437d71SHuang Rui 52e059c184SHuang Rui /* 53e059c184SHuang Rui * TODO: We need more time to fine tune processors with shared memory solution 54e059c184SHuang Rui * with community together. 55e059c184SHuang Rui * 56e059c184SHuang Rui * There are some performance drops on the CPU benchmarks which reports from 57e059c184SHuang Rui * Suse. We are co-working with them to fine tune the shared memory solution. So 58e059c184SHuang Rui * we disable it by default to go acpi-cpufreq on these processors and add a 59e059c184SHuang Rui * module parameter to be able to enable it manually for debugging. 60e059c184SHuang Rui */ 61e059c184SHuang Rui static bool shared_mem = false; 62e059c184SHuang Rui module_param(shared_mem, bool, 0444); 63e059c184SHuang Rui MODULE_PARM_DESC(shared_mem, 64e059c184SHuang Rui "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)"); 65e059c184SHuang Rui 66ec437d71SHuang Rui static struct cpufreq_driver amd_pstate_driver; 67ec437d71SHuang Rui 68ec437d71SHuang Rui /** 69ec437d71SHuang Rui * struct amd_cpudata - private CPU data for AMD P-State 70ec437d71SHuang Rui * @cpu: CPU number 71ec437d71SHuang Rui * @cppc_req_cached: cached performance request hints 72ec437d71SHuang Rui * @highest_perf: the maximum performance an individual processor may reach, 73ec437d71SHuang Rui * assuming ideal conditions 74ec437d71SHuang Rui * @nominal_perf: the maximum sustained performance level of the processor, 75ec437d71SHuang Rui * assuming ideal operating conditions 76ec437d71SHuang Rui * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power 77ec437d71SHuang Rui * savings are achieved 78ec437d71SHuang Rui * @lowest_perf: the absolute lowest performance level of the processor 79ec437d71SHuang Rui * @max_freq: the frequency that mapped to highest_perf 80ec437d71SHuang Rui * @min_freq: the frequency that mapped to lowest_perf 81ec437d71SHuang Rui * @nominal_freq: the frequency that mapped to nominal_perf 82ec437d71SHuang Rui * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf 83ec437d71SHuang Rui * 84ec437d71SHuang Rui * The amd_cpudata is key private data for each CPU thread in AMD P-State, and 85ec437d71SHuang Rui * represents all the attributes and goals that AMD P-State requests at runtime. 86ec437d71SHuang Rui */ 87ec437d71SHuang Rui struct amd_cpudata { 88ec437d71SHuang Rui int cpu; 89ec437d71SHuang Rui 90ec437d71SHuang Rui u64 cppc_req_cached; 91ec437d71SHuang Rui 92ec437d71SHuang Rui u32 highest_perf; 93ec437d71SHuang Rui u32 nominal_perf; 94ec437d71SHuang Rui u32 lowest_nonlinear_perf; 95ec437d71SHuang Rui u32 lowest_perf; 96ec437d71SHuang Rui 97ec437d71SHuang Rui u32 max_freq; 98ec437d71SHuang Rui u32 min_freq; 99ec437d71SHuang Rui u32 nominal_freq; 100ec437d71SHuang Rui u32 lowest_nonlinear_freq; 101ec437d71SHuang Rui }; 102ec437d71SHuang Rui 103e059c184SHuang Rui static inline int pstate_enable(bool enable) 104ec437d71SHuang Rui { 105ec437d71SHuang Rui return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable); 106ec437d71SHuang Rui } 107ec437d71SHuang Rui 108e059c184SHuang Rui static int cppc_enable(bool enable) 109e059c184SHuang Rui { 110e059c184SHuang Rui int cpu, ret = 0; 111e059c184SHuang Rui 112e059c184SHuang Rui for_each_present_cpu(cpu) { 113e059c184SHuang Rui ret = cppc_set_enable(cpu, enable); 114e059c184SHuang Rui if (ret) 115e059c184SHuang Rui return ret; 116e059c184SHuang Rui } 117e059c184SHuang Rui 118e059c184SHuang Rui return ret; 119e059c184SHuang Rui } 120e059c184SHuang Rui 121e059c184SHuang Rui DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); 122e059c184SHuang Rui 123e059c184SHuang Rui static inline int amd_pstate_enable(bool enable) 124e059c184SHuang Rui { 125e059c184SHuang Rui return static_call(amd_pstate_enable)(enable); 126e059c184SHuang Rui } 127e059c184SHuang Rui 128e059c184SHuang Rui static int pstate_init_perf(struct amd_cpudata *cpudata) 129ec437d71SHuang Rui { 130ec437d71SHuang Rui u64 cap1; 131ec437d71SHuang Rui 132ec437d71SHuang Rui int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, 133ec437d71SHuang Rui &cap1); 134ec437d71SHuang Rui if (ret) 135ec437d71SHuang Rui return ret; 136ec437d71SHuang Rui 137ec437d71SHuang Rui /* 138ec437d71SHuang Rui * TODO: Introduce AMD specific power feature. 139ec437d71SHuang Rui * 140ec437d71SHuang Rui * CPPC entry doesn't indicate the highest performance in some ASICs. 141ec437d71SHuang Rui */ 142ec437d71SHuang Rui WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); 143ec437d71SHuang Rui 144ec437d71SHuang Rui WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); 145ec437d71SHuang Rui WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); 146ec437d71SHuang Rui WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1)); 147ec437d71SHuang Rui 148ec437d71SHuang Rui return 0; 149ec437d71SHuang Rui } 150ec437d71SHuang Rui 151e059c184SHuang Rui static int cppc_init_perf(struct amd_cpudata *cpudata) 152e059c184SHuang Rui { 153e059c184SHuang Rui struct cppc_perf_caps cppc_perf; 154e059c184SHuang Rui 155e059c184SHuang Rui int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 156e059c184SHuang Rui if (ret) 157e059c184SHuang Rui return ret; 158e059c184SHuang Rui 159e059c184SHuang Rui WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); 160e059c184SHuang Rui 161e059c184SHuang Rui WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); 162e059c184SHuang Rui WRITE_ONCE(cpudata->lowest_nonlinear_perf, 163e059c184SHuang Rui cppc_perf.lowest_nonlinear_perf); 164e059c184SHuang Rui WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); 165e059c184SHuang Rui 166e059c184SHuang Rui return 0; 167e059c184SHuang Rui } 168e059c184SHuang Rui 169e059c184SHuang Rui DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); 170e059c184SHuang Rui 171e059c184SHuang Rui static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) 172e059c184SHuang Rui { 173e059c184SHuang Rui return static_call(amd_pstate_init_perf)(cpudata); 174e059c184SHuang Rui } 175e059c184SHuang Rui 176e059c184SHuang Rui static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, 177ec437d71SHuang Rui u32 des_perf, u32 max_perf, bool fast_switch) 178ec437d71SHuang Rui { 179ec437d71SHuang Rui if (fast_switch) 180ec437d71SHuang Rui wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); 181ec437d71SHuang Rui else 182ec437d71SHuang Rui wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, 183ec437d71SHuang Rui READ_ONCE(cpudata->cppc_req_cached)); 184ec437d71SHuang Rui } 185ec437d71SHuang Rui 186e059c184SHuang Rui static void cppc_update_perf(struct amd_cpudata *cpudata, 187e059c184SHuang Rui u32 min_perf, u32 des_perf, 188e059c184SHuang Rui u32 max_perf, bool fast_switch) 189e059c184SHuang Rui { 190e059c184SHuang Rui struct cppc_perf_ctrls perf_ctrls; 191e059c184SHuang Rui 192e059c184SHuang Rui perf_ctrls.max_perf = max_perf; 193e059c184SHuang Rui perf_ctrls.min_perf = min_perf; 194e059c184SHuang Rui perf_ctrls.desired_perf = des_perf; 195e059c184SHuang Rui 196e059c184SHuang Rui cppc_set_perf(cpudata->cpu, &perf_ctrls); 197e059c184SHuang Rui } 198e059c184SHuang Rui 199e059c184SHuang Rui DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); 200e059c184SHuang Rui 201e059c184SHuang Rui static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, 202e059c184SHuang Rui u32 min_perf, u32 des_perf, 203e059c184SHuang Rui u32 max_perf, bool fast_switch) 204e059c184SHuang Rui { 205e059c184SHuang Rui static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, 206e059c184SHuang Rui max_perf, fast_switch); 207e059c184SHuang Rui } 208e059c184SHuang Rui 209ec437d71SHuang Rui static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, 210ec437d71SHuang Rui u32 des_perf, u32 max_perf, bool fast_switch) 211ec437d71SHuang Rui { 212ec437d71SHuang Rui u64 prev = READ_ONCE(cpudata->cppc_req_cached); 213ec437d71SHuang Rui u64 value = prev; 214ec437d71SHuang Rui 215ec437d71SHuang Rui value &= ~AMD_CPPC_MIN_PERF(~0L); 216ec437d71SHuang Rui value |= AMD_CPPC_MIN_PERF(min_perf); 217ec437d71SHuang Rui 218ec437d71SHuang Rui value &= ~AMD_CPPC_DES_PERF(~0L); 219ec437d71SHuang Rui value |= AMD_CPPC_DES_PERF(des_perf); 220ec437d71SHuang Rui 221ec437d71SHuang Rui value &= ~AMD_CPPC_MAX_PERF(~0L); 222ec437d71SHuang Rui value |= AMD_CPPC_MAX_PERF(max_perf); 223ec437d71SHuang Rui 224*60e10f89SHuang Rui trace_amd_pstate_perf(min_perf, des_perf, max_perf, 225*60e10f89SHuang Rui cpudata->cpu, (value != prev), fast_switch); 226*60e10f89SHuang Rui 227ec437d71SHuang Rui if (value == prev) 228ec437d71SHuang Rui return; 229ec437d71SHuang Rui 230ec437d71SHuang Rui WRITE_ONCE(cpudata->cppc_req_cached, value); 231ec437d71SHuang Rui 232ec437d71SHuang Rui amd_pstate_update_perf(cpudata, min_perf, des_perf, 233ec437d71SHuang Rui max_perf, fast_switch); 234ec437d71SHuang Rui } 235ec437d71SHuang Rui 236ec437d71SHuang Rui static int amd_pstate_verify(struct cpufreq_policy_data *policy) 237ec437d71SHuang Rui { 238ec437d71SHuang Rui cpufreq_verify_within_cpu_limits(policy); 239ec437d71SHuang Rui 240ec437d71SHuang Rui return 0; 241ec437d71SHuang Rui } 242ec437d71SHuang Rui 243ec437d71SHuang Rui static int amd_pstate_target(struct cpufreq_policy *policy, 244ec437d71SHuang Rui unsigned int target_freq, 245ec437d71SHuang Rui unsigned int relation) 246ec437d71SHuang Rui { 247ec437d71SHuang Rui struct cpufreq_freqs freqs; 248ec437d71SHuang Rui struct amd_cpudata *cpudata = policy->driver_data; 249ec437d71SHuang Rui unsigned long max_perf, min_perf, des_perf, cap_perf; 250ec437d71SHuang Rui 251ec437d71SHuang Rui if (!cpudata->max_freq) 252ec437d71SHuang Rui return -ENODEV; 253ec437d71SHuang Rui 254ec437d71SHuang Rui cap_perf = READ_ONCE(cpudata->highest_perf); 255ec437d71SHuang Rui min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); 256ec437d71SHuang Rui max_perf = cap_perf; 257ec437d71SHuang Rui 258ec437d71SHuang Rui freqs.old = policy->cur; 259ec437d71SHuang Rui freqs.new = target_freq; 260ec437d71SHuang Rui 261ec437d71SHuang Rui des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf, 262ec437d71SHuang Rui cpudata->max_freq); 263ec437d71SHuang Rui 264ec437d71SHuang Rui cpufreq_freq_transition_begin(policy, &freqs); 265ec437d71SHuang Rui amd_pstate_update(cpudata, min_perf, des_perf, 266ec437d71SHuang Rui max_perf, false); 267ec437d71SHuang Rui cpufreq_freq_transition_end(policy, &freqs, false); 268ec437d71SHuang Rui 269ec437d71SHuang Rui return 0; 270ec437d71SHuang Rui } 271ec437d71SHuang Rui 2721d215f03SHuang Rui static void amd_pstate_adjust_perf(unsigned int cpu, 2731d215f03SHuang Rui unsigned long _min_perf, 2741d215f03SHuang Rui unsigned long target_perf, 2751d215f03SHuang Rui unsigned long capacity) 2761d215f03SHuang Rui { 2771d215f03SHuang Rui unsigned long max_perf, min_perf, des_perf, 2781d215f03SHuang Rui cap_perf, lowest_nonlinear_perf; 2791d215f03SHuang Rui struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 2801d215f03SHuang Rui struct amd_cpudata *cpudata = policy->driver_data; 2811d215f03SHuang Rui 2821d215f03SHuang Rui cap_perf = READ_ONCE(cpudata->highest_perf); 2831d215f03SHuang Rui lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); 2841d215f03SHuang Rui 2851d215f03SHuang Rui des_perf = cap_perf; 2861d215f03SHuang Rui if (target_perf < capacity) 2871d215f03SHuang Rui des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity); 2881d215f03SHuang Rui 2891d215f03SHuang Rui min_perf = READ_ONCE(cpudata->highest_perf); 2901d215f03SHuang Rui if (_min_perf < capacity) 2911d215f03SHuang Rui min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity); 2921d215f03SHuang Rui 2931d215f03SHuang Rui if (min_perf < lowest_nonlinear_perf) 2941d215f03SHuang Rui min_perf = lowest_nonlinear_perf; 2951d215f03SHuang Rui 2961d215f03SHuang Rui max_perf = cap_perf; 2971d215f03SHuang Rui if (max_perf < min_perf) 2981d215f03SHuang Rui max_perf = min_perf; 2991d215f03SHuang Rui 3001d215f03SHuang Rui des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); 3011d215f03SHuang Rui 3021d215f03SHuang Rui amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true); 3031d215f03SHuang Rui } 3041d215f03SHuang Rui 305ec437d71SHuang Rui static int amd_get_min_freq(struct amd_cpudata *cpudata) 306ec437d71SHuang Rui { 307ec437d71SHuang Rui struct cppc_perf_caps cppc_perf; 308ec437d71SHuang Rui 309ec437d71SHuang Rui int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 310ec437d71SHuang Rui if (ret) 311ec437d71SHuang Rui return ret; 312ec437d71SHuang Rui 313ec437d71SHuang Rui /* Switch to khz */ 314ec437d71SHuang Rui return cppc_perf.lowest_freq * 1000; 315ec437d71SHuang Rui } 316ec437d71SHuang Rui 317ec437d71SHuang Rui static int amd_get_max_freq(struct amd_cpudata *cpudata) 318ec437d71SHuang Rui { 319ec437d71SHuang Rui struct cppc_perf_caps cppc_perf; 320ec437d71SHuang Rui u32 max_perf, max_freq, nominal_freq, nominal_perf; 321ec437d71SHuang Rui u64 boost_ratio; 322ec437d71SHuang Rui 323ec437d71SHuang Rui int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 324ec437d71SHuang Rui if (ret) 325ec437d71SHuang Rui return ret; 326ec437d71SHuang Rui 327ec437d71SHuang Rui nominal_freq = cppc_perf.nominal_freq; 328ec437d71SHuang Rui nominal_perf = READ_ONCE(cpudata->nominal_perf); 329ec437d71SHuang Rui max_perf = READ_ONCE(cpudata->highest_perf); 330ec437d71SHuang Rui 331ec437d71SHuang Rui boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT, 332ec437d71SHuang Rui nominal_perf); 333ec437d71SHuang Rui 334ec437d71SHuang Rui max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT; 335ec437d71SHuang Rui 336ec437d71SHuang Rui /* Switch to khz */ 337ec437d71SHuang Rui return max_freq * 1000; 338ec437d71SHuang Rui } 339ec437d71SHuang Rui 340ec437d71SHuang Rui static int amd_get_nominal_freq(struct amd_cpudata *cpudata) 341ec437d71SHuang Rui { 342ec437d71SHuang Rui struct cppc_perf_caps cppc_perf; 343ec437d71SHuang Rui 344ec437d71SHuang Rui int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 345ec437d71SHuang Rui if (ret) 346ec437d71SHuang Rui return ret; 347ec437d71SHuang Rui 348ec437d71SHuang Rui /* Switch to khz */ 349ec437d71SHuang Rui return cppc_perf.nominal_freq * 1000; 350ec437d71SHuang Rui } 351ec437d71SHuang Rui 352ec437d71SHuang Rui static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata) 353ec437d71SHuang Rui { 354ec437d71SHuang Rui struct cppc_perf_caps cppc_perf; 355ec437d71SHuang Rui u32 lowest_nonlinear_freq, lowest_nonlinear_perf, 356ec437d71SHuang Rui nominal_freq, nominal_perf; 357ec437d71SHuang Rui u64 lowest_nonlinear_ratio; 358ec437d71SHuang Rui 359ec437d71SHuang Rui int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 360ec437d71SHuang Rui if (ret) 361ec437d71SHuang Rui return ret; 362ec437d71SHuang Rui 363ec437d71SHuang Rui nominal_freq = cppc_perf.nominal_freq; 364ec437d71SHuang Rui nominal_perf = READ_ONCE(cpudata->nominal_perf); 365ec437d71SHuang Rui 366ec437d71SHuang Rui lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; 367ec437d71SHuang Rui 368ec437d71SHuang Rui lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT, 369ec437d71SHuang Rui nominal_perf); 370ec437d71SHuang Rui 371ec437d71SHuang Rui lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT; 372ec437d71SHuang Rui 373ec437d71SHuang Rui /* Switch to khz */ 374ec437d71SHuang Rui return lowest_nonlinear_freq * 1000; 375ec437d71SHuang Rui } 376ec437d71SHuang Rui 377ec437d71SHuang Rui static int amd_pstate_cpu_init(struct cpufreq_policy *policy) 378ec437d71SHuang Rui { 379ec437d71SHuang Rui int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; 380ec437d71SHuang Rui struct device *dev; 381ec437d71SHuang Rui struct amd_cpudata *cpudata; 382ec437d71SHuang Rui 383ec437d71SHuang Rui dev = get_cpu_device(policy->cpu); 384ec437d71SHuang Rui if (!dev) 385ec437d71SHuang Rui return -ENODEV; 386ec437d71SHuang Rui 387ec437d71SHuang Rui cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL); 388ec437d71SHuang Rui if (!cpudata) 389ec437d71SHuang Rui return -ENOMEM; 390ec437d71SHuang Rui 391ec437d71SHuang Rui cpudata->cpu = policy->cpu; 392ec437d71SHuang Rui 393ec437d71SHuang Rui ret = amd_pstate_init_perf(cpudata); 394ec437d71SHuang Rui if (ret) 395ec437d71SHuang Rui goto free_cpudata; 396ec437d71SHuang Rui 397ec437d71SHuang Rui min_freq = amd_get_min_freq(cpudata); 398ec437d71SHuang Rui max_freq = amd_get_max_freq(cpudata); 399ec437d71SHuang Rui nominal_freq = amd_get_nominal_freq(cpudata); 400ec437d71SHuang Rui lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata); 401ec437d71SHuang Rui 402ec437d71SHuang Rui if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) { 403ec437d71SHuang Rui dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n", 404ec437d71SHuang Rui min_freq, max_freq); 405ec437d71SHuang Rui ret = -EINVAL; 406ec437d71SHuang Rui goto free_cpudata; 407ec437d71SHuang Rui } 408ec437d71SHuang Rui 409ec437d71SHuang Rui policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY; 410ec437d71SHuang Rui policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY; 411ec437d71SHuang Rui 412ec437d71SHuang Rui policy->min = min_freq; 413ec437d71SHuang Rui policy->max = max_freq; 414ec437d71SHuang Rui 415ec437d71SHuang Rui policy->cpuinfo.min_freq = min_freq; 416ec437d71SHuang Rui policy->cpuinfo.max_freq = max_freq; 417ec437d71SHuang Rui 418ec437d71SHuang Rui /* It will be updated by governor */ 419ec437d71SHuang Rui policy->cur = policy->cpuinfo.min_freq; 420ec437d71SHuang Rui 421e059c184SHuang Rui if (boot_cpu_has(X86_FEATURE_CPPC)) 4221d215f03SHuang Rui policy->fast_switch_possible = true; 4231d215f03SHuang Rui 424ec437d71SHuang Rui /* Initial processor data capability frequencies */ 425ec437d71SHuang Rui cpudata->max_freq = max_freq; 426ec437d71SHuang Rui cpudata->min_freq = min_freq; 427ec437d71SHuang Rui cpudata->nominal_freq = nominal_freq; 428ec437d71SHuang Rui cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; 429ec437d71SHuang Rui 430ec437d71SHuang Rui policy->driver_data = cpudata; 431ec437d71SHuang Rui 432ec437d71SHuang Rui return 0; 433ec437d71SHuang Rui 434ec437d71SHuang Rui free_cpudata: 435ec437d71SHuang Rui kfree(cpudata); 436ec437d71SHuang Rui return ret; 437ec437d71SHuang Rui } 438ec437d71SHuang Rui 439ec437d71SHuang Rui static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) 440ec437d71SHuang Rui { 441ec437d71SHuang Rui struct amd_cpudata *cpudata; 442ec437d71SHuang Rui 443ec437d71SHuang Rui cpudata = policy->driver_data; 444ec437d71SHuang Rui 445ec437d71SHuang Rui kfree(cpudata); 446ec437d71SHuang Rui 447ec437d71SHuang Rui return 0; 448ec437d71SHuang Rui } 449ec437d71SHuang Rui 450ec437d71SHuang Rui static struct cpufreq_driver amd_pstate_driver = { 451ec437d71SHuang Rui .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, 452ec437d71SHuang Rui .verify = amd_pstate_verify, 453ec437d71SHuang Rui .target = amd_pstate_target, 454ec437d71SHuang Rui .init = amd_pstate_cpu_init, 455ec437d71SHuang Rui .exit = amd_pstate_cpu_exit, 456ec437d71SHuang Rui .name = "amd-pstate", 457ec437d71SHuang Rui }; 458ec437d71SHuang Rui 459ec437d71SHuang Rui static int __init amd_pstate_init(void) 460ec437d71SHuang Rui { 461ec437d71SHuang Rui int ret; 462ec437d71SHuang Rui 463ec437d71SHuang Rui if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 464ec437d71SHuang Rui return -ENODEV; 465ec437d71SHuang Rui 466ec437d71SHuang Rui if (!acpi_cpc_valid()) { 467ec437d71SHuang Rui pr_debug("the _CPC object is not present in SBIOS\n"); 468ec437d71SHuang Rui return -ENODEV; 469ec437d71SHuang Rui } 470ec437d71SHuang Rui 471ec437d71SHuang Rui /* don't keep reloading if cpufreq_driver exists */ 472ec437d71SHuang Rui if (cpufreq_get_current_driver()) 473ec437d71SHuang Rui return -EEXIST; 474ec437d71SHuang Rui 475ec437d71SHuang Rui /* capability check */ 476e059c184SHuang Rui if (boot_cpu_has(X86_FEATURE_CPPC)) { 477e059c184SHuang Rui pr_debug("AMD CPPC MSR based functionality is supported\n"); 478e059c184SHuang Rui amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf; 479e059c184SHuang Rui } else if (shared_mem) { 480e059c184SHuang Rui static_call_update(amd_pstate_enable, cppc_enable); 481e059c184SHuang Rui static_call_update(amd_pstate_init_perf, cppc_init_perf); 482e059c184SHuang Rui static_call_update(amd_pstate_update_perf, cppc_update_perf); 483e059c184SHuang Rui } else { 484e059c184SHuang Rui pr_info("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n"); 485ec437d71SHuang Rui return -ENODEV; 486ec437d71SHuang Rui } 487ec437d71SHuang Rui 488ec437d71SHuang Rui /* enable amd pstate feature */ 489ec437d71SHuang Rui ret = amd_pstate_enable(true); 490ec437d71SHuang Rui if (ret) { 491ec437d71SHuang Rui pr_err("failed to enable amd-pstate with return %d\n", ret); 492ec437d71SHuang Rui return ret; 493ec437d71SHuang Rui } 494ec437d71SHuang Rui 495ec437d71SHuang Rui ret = cpufreq_register_driver(&amd_pstate_driver); 496ec437d71SHuang Rui if (ret) 497ec437d71SHuang Rui pr_err("failed to register amd_pstate_driver with return %d\n", 498ec437d71SHuang Rui ret); 499ec437d71SHuang Rui 500ec437d71SHuang Rui return ret; 501ec437d71SHuang Rui } 502ec437d71SHuang Rui 503ec437d71SHuang Rui static void __exit amd_pstate_exit(void) 504ec437d71SHuang Rui { 505ec437d71SHuang Rui cpufreq_unregister_driver(&amd_pstate_driver); 506ec437d71SHuang Rui 507ec437d71SHuang Rui amd_pstate_enable(false); 508ec437d71SHuang Rui } 509ec437d71SHuang Rui 510ec437d71SHuang Rui module_init(amd_pstate_init); 511ec437d71SHuang Rui module_exit(amd_pstate_exit); 512ec437d71SHuang Rui 513ec437d71SHuang Rui MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>"); 514ec437d71SHuang Rui MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver"); 515ec437d71SHuang Rui MODULE_LICENSE("GPL"); 516