11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2bb0a56ecSDave Jones /*
3bb0a56ecSDave Jones * acpi-cpufreq.c - ACPI Processor P-States Driver
4bb0a56ecSDave Jones *
5bb0a56ecSDave Jones * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6bb0a56ecSDave Jones * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7bb0a56ecSDave Jones * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
8bb0a56ecSDave Jones * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9bb0a56ecSDave Jones */
10bb0a56ecSDave Jones
111c5864e2SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
121c5864e2SJoe Perches
13bb0a56ecSDave Jones #include <linux/kernel.h>
14bb0a56ecSDave Jones #include <linux/module.h>
15bb0a56ecSDave Jones #include <linux/init.h>
16bb0a56ecSDave Jones #include <linux/smp.h>
17bb0a56ecSDave Jones #include <linux/sched.h>
18bb0a56ecSDave Jones #include <linux/cpufreq.h>
19bb0a56ecSDave Jones #include <linux/compiler.h>
20bb0a56ecSDave Jones #include <linux/dmi.h>
21bb0a56ecSDave Jones #include <linux/slab.h>
22abdea5fcSAndy Shevchenko #include <linux/string_helpers.h>
23cb6fe2ceSLinus Torvalds #include <linux/platform_device.h>
24bb0a56ecSDave Jones
25bb0a56ecSDave Jones #include <linux/acpi.h>
26bb0a56ecSDave Jones #include <linux/io.h>
27bb0a56ecSDave Jones #include <linux/delay.h>
28bb0a56ecSDave Jones #include <linux/uaccess.h>
29bb0a56ecSDave Jones
30bb0a56ecSDave Jones #include <acpi/processor.h>
313c55e94cSRafael J. Wysocki #include <acpi/cppc_acpi.h>
32bb0a56ecSDave Jones
33bb0a56ecSDave Jones #include <asm/msr.h>
34bb0a56ecSDave Jones #include <asm/processor.h>
35bb0a56ecSDave Jones #include <asm/cpufeature.h>
36ba5bade4SThomas Gleixner #include <asm/cpu_device_id.h>
37bb0a56ecSDave Jones
38bb0a56ecSDave Jones MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
39bb0a56ecSDave Jones MODULE_DESCRIPTION("ACPI Processor P-States Driver");
40bb0a56ecSDave Jones MODULE_LICENSE("GPL");
41bb0a56ecSDave Jones
42bb0a56ecSDave Jones enum {
43bb0a56ecSDave Jones UNDEFINED_CAPABLE = 0,
44bb0a56ecSDave Jones SYSTEM_INTEL_MSR_CAPABLE,
453dc9a633SMatthew Garrett SYSTEM_AMD_MSR_CAPABLE,
46bb0a56ecSDave Jones SYSTEM_IO_CAPABLE,
47bb0a56ecSDave Jones };
48bb0a56ecSDave Jones
49bb0a56ecSDave Jones #define INTEL_MSR_RANGE (0xffff)
503dc9a633SMatthew Garrett #define AMD_MSR_RANGE (0x7)
51cc9690cfSPu Wen #define HYGON_MSR_RANGE (0x7)
52bb0a56ecSDave Jones
53615b7300SAndre Przywara #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
54615b7300SAndre Przywara
55bb0a56ecSDave Jones struct acpi_cpufreq_data {
56bb0a56ecSDave Jones unsigned int resume;
57bb0a56ecSDave Jones unsigned int cpu_feature;
588cfcfd39SPan Xinhui unsigned int acpi_perf_cpu;
59f4fd3797SLan Tianyu cpumask_var_t freqdomain_cpus;
60ed757a2cSRafael J. Wysocki void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
61ed757a2cSRafael J. Wysocki u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
62bb0a56ecSDave Jones };
63bb0a56ecSDave Jones
64bb0a56ecSDave Jones /* acpi_perf_data is a pointer to percpu data. */
65bb0a56ecSDave Jones static struct acpi_processor_performance __percpu *acpi_perf_data;
66bb0a56ecSDave Jones
to_perf_data(struct acpi_cpufreq_data * data)673427616bSRafael J. Wysocki static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
683427616bSRafael J. Wysocki {
693427616bSRafael J. Wysocki return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
703427616bSRafael J. Wysocki }
713427616bSRafael J. Wysocki
72bb0a56ecSDave Jones static struct cpufreq_driver acpi_cpufreq_driver;
73bb0a56ecSDave Jones
74bb0a56ecSDave Jones static unsigned int acpi_pstate_strict;
75615b7300SAndre Przywara
boost_state(unsigned int cpu)76615b7300SAndre Przywara static bool boost_state(unsigned int cpu)
77615b7300SAndre Przywara {
78615b7300SAndre Przywara u32 lo, hi;
79615b7300SAndre Przywara u64 msr;
80615b7300SAndre Przywara
81615b7300SAndre Przywara switch (boot_cpu_data.x86_vendor) {
82615b7300SAndre Przywara case X86_VENDOR_INTEL:
83d6f89596STony W Wang-oc case X86_VENDOR_CENTAUR:
84d6f89596STony W Wang-oc case X86_VENDOR_ZHAOXIN:
85615b7300SAndre Przywara rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
86615b7300SAndre Przywara msr = lo | ((u64)hi << 32);
87615b7300SAndre Przywara return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
88cc9690cfSPu Wen case X86_VENDOR_HYGON:
89615b7300SAndre Przywara case X86_VENDOR_AMD:
90615b7300SAndre Przywara rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
91615b7300SAndre Przywara msr = lo | ((u64)hi << 32);
92615b7300SAndre Przywara return !(msr & MSR_K7_HWCR_CPB_DIS);
93615b7300SAndre Przywara }
94615b7300SAndre Przywara return false;
95615b7300SAndre Przywara }
96615b7300SAndre Przywara
boost_set_msr(bool enable)97a3605c46SSebastian Andrzej Siewior static int boost_set_msr(bool enable)
98615b7300SAndre Przywara {
99615b7300SAndre Przywara u32 msr_addr;
100a3605c46SSebastian Andrzej Siewior u64 msr_mask, val;
101615b7300SAndre Przywara
102615b7300SAndre Przywara switch (boot_cpu_data.x86_vendor) {
103615b7300SAndre Przywara case X86_VENDOR_INTEL:
104d6f89596STony W Wang-oc case X86_VENDOR_CENTAUR:
105d6f89596STony W Wang-oc case X86_VENDOR_ZHAOXIN:
106615b7300SAndre Przywara msr_addr = MSR_IA32_MISC_ENABLE;
107615b7300SAndre Przywara msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
108615b7300SAndre Przywara break;
109cc9690cfSPu Wen case X86_VENDOR_HYGON:
110615b7300SAndre Przywara case X86_VENDOR_AMD:
111615b7300SAndre Przywara msr_addr = MSR_K7_HWCR;
112615b7300SAndre Przywara msr_mask = MSR_K7_HWCR_CPB_DIS;
113615b7300SAndre Przywara break;
114615b7300SAndre Przywara default:
115a3605c46SSebastian Andrzej Siewior return -EINVAL;
116615b7300SAndre Przywara }
117615b7300SAndre Przywara
118a3605c46SSebastian Andrzej Siewior rdmsrl(msr_addr, val);
119615b7300SAndre Przywara
120615b7300SAndre Przywara if (enable)
121a3605c46SSebastian Andrzej Siewior val &= ~msr_mask;
122615b7300SAndre Przywara else
123a3605c46SSebastian Andrzej Siewior val |= msr_mask;
124a3605c46SSebastian Andrzej Siewior
125a3605c46SSebastian Andrzej Siewior wrmsrl(msr_addr, val);
126a3605c46SSebastian Andrzej Siewior return 0;
127615b7300SAndre Przywara }
128615b7300SAndre Przywara
boost_set_msr_each(void * p_en)129a3605c46SSebastian Andrzej Siewior static void boost_set_msr_each(void *p_en)
130a3605c46SSebastian Andrzej Siewior {
131a3605c46SSebastian Andrzej Siewior bool enable = (bool) p_en;
132a3605c46SSebastian Andrzej Siewior
133a3605c46SSebastian Andrzej Siewior boost_set_msr(enable);
134615b7300SAndre Przywara }
135615b7300SAndre Przywara
set_boost(struct cpufreq_policy * policy,int val)136cf6fada7SXiongfeng Wang static int set_boost(struct cpufreq_policy *policy, int val)
137615b7300SAndre Przywara {
138cf6fada7SXiongfeng Wang on_each_cpu_mask(policy->cpus, boost_set_msr_each,
139cf6fada7SXiongfeng Wang (void *)(long)val, 1);
140abdea5fcSAndy Shevchenko pr_debug("CPU %*pbl: Core Boosting %s.\n",
141abdea5fcSAndy Shevchenko cpumask_pr_args(policy->cpus), str_enabled_disabled(val));
142615b7300SAndre Przywara
143cfc9c8edSLukasz Majewski return 0;
144615b7300SAndre Przywara }
145615b7300SAndre Przywara
show_freqdomain_cpus(struct cpufreq_policy * policy,char * buf)146f4fd3797SLan Tianyu static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
147f4fd3797SLan Tianyu {
148eb0b3e78SPan Xinhui struct acpi_cpufreq_data *data = policy->driver_data;
149f4fd3797SLan Tianyu
150e2530367SSrinivas Pandruvada if (unlikely(!data))
151e2530367SSrinivas Pandruvada return -ENODEV;
152e2530367SSrinivas Pandruvada
153f4fd3797SLan Tianyu return cpufreq_show_cpus(data->freqdomain_cpus, buf);
154f4fd3797SLan Tianyu }
155f4fd3797SLan Tianyu
156f4fd3797SLan Tianyu cpufreq_freq_attr_ro(freqdomain_cpus);
157f4fd3797SLan Tianyu
15811269ff5SAndre Przywara #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
store_cpb(struct cpufreq_policy * policy,const char * buf,size_t count)15917135782SRafael J. Wysocki static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
16017135782SRafael J. Wysocki size_t count)
161cfc9c8edSLukasz Majewski {
162cfc9c8edSLukasz Majewski int ret;
16317135782SRafael J. Wysocki unsigned int val = 0;
164cfc9c8edSLukasz Majewski
1657a6c79f2SRafael J. Wysocki if (!acpi_cpufreq_driver.set_boost)
166cfc9c8edSLukasz Majewski return -EINVAL;
167cfc9c8edSLukasz Majewski
16817135782SRafael J. Wysocki ret = kstrtouint(buf, 10, &val);
16917135782SRafael J. Wysocki if (ret || val > 1)
170cfc9c8edSLukasz Majewski return -EINVAL;
171cfc9c8edSLukasz Majewski
17209681a07SSebastian Andrzej Siewior cpus_read_lock();
173cf6fada7SXiongfeng Wang set_boost(policy, val);
17409681a07SSebastian Andrzej Siewior cpus_read_unlock();
175cfc9c8edSLukasz Majewski
176cfc9c8edSLukasz Majewski return count;
177cfc9c8edSLukasz Majewski }
178cfc9c8edSLukasz Majewski
show_cpb(struct cpufreq_policy * policy,char * buf)17911269ff5SAndre Przywara static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
18011269ff5SAndre Przywara {
181cfc9c8edSLukasz Majewski return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
18211269ff5SAndre Przywara }
18311269ff5SAndre Przywara
18459027d35SLan Tianyu cpufreq_freq_attr_rw(cpb);
18511269ff5SAndre Przywara #endif
18611269ff5SAndre Przywara
check_est_cpu(unsigned int cpuid)187bb0a56ecSDave Jones static int check_est_cpu(unsigned int cpuid)
188bb0a56ecSDave Jones {
189bb0a56ecSDave Jones struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
190bb0a56ecSDave Jones
191bb0a56ecSDave Jones return cpu_has(cpu, X86_FEATURE_EST);
192bb0a56ecSDave Jones }
193bb0a56ecSDave Jones
check_amd_hwpstate_cpu(unsigned int cpuid)1943dc9a633SMatthew Garrett static int check_amd_hwpstate_cpu(unsigned int cpuid)
1953dc9a633SMatthew Garrett {
1963dc9a633SMatthew Garrett struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
1973dc9a633SMatthew Garrett
1983dc9a633SMatthew Garrett return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
1993dc9a633SMatthew Garrett }
2003dc9a633SMatthew Garrett
extract_io(struct cpufreq_policy * policy,u32 value)2018cee1eedSViresh Kumar static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
202bb0a56ecSDave Jones {
2038cee1eedSViresh Kumar struct acpi_cpufreq_data *data = policy->driver_data;
204bb0a56ecSDave Jones struct acpi_processor_performance *perf;
205bb0a56ecSDave Jones int i;
206bb0a56ecSDave Jones
2073427616bSRafael J. Wysocki perf = to_perf_data(data);
208bb0a56ecSDave Jones
209bb0a56ecSDave Jones for (i = 0; i < perf->state_count; i++) {
210bb0a56ecSDave Jones if (value == perf->states[i].status)
2118cee1eedSViresh Kumar return policy->freq_table[i].frequency;
212bb0a56ecSDave Jones }
213bb0a56ecSDave Jones return 0;
214bb0a56ecSDave Jones }
215bb0a56ecSDave Jones
extract_msr(struct cpufreq_policy * policy,u32 msr)2168cee1eedSViresh Kumar static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
217bb0a56ecSDave Jones {
2188cee1eedSViresh Kumar struct acpi_cpufreq_data *data = policy->driver_data;
219041526f9SStratos Karafotis struct cpufreq_frequency_table *pos;
220bb0a56ecSDave Jones struct acpi_processor_performance *perf;
221bb0a56ecSDave Jones
2223dc9a633SMatthew Garrett if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
2233dc9a633SMatthew Garrett msr &= AMD_MSR_RANGE;
224cc9690cfSPu Wen else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
225cc9690cfSPu Wen msr &= HYGON_MSR_RANGE;
2263dc9a633SMatthew Garrett else
227bb0a56ecSDave Jones msr &= INTEL_MSR_RANGE;
2283dc9a633SMatthew Garrett
2293427616bSRafael J. Wysocki perf = to_perf_data(data);
230bb0a56ecSDave Jones
231538b0188SRafael J. Wysocki cpufreq_for_each_entry(pos, policy->freq_table)
232041526f9SStratos Karafotis if (msr == perf->states[pos->driver_data].status)
233041526f9SStratos Karafotis return pos->frequency;
234538b0188SRafael J. Wysocki return policy->freq_table[0].frequency;
235bb0a56ecSDave Jones }
236bb0a56ecSDave Jones
extract_freq(struct cpufreq_policy * policy,u32 val)2378cee1eedSViresh Kumar static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
238bb0a56ecSDave Jones {
2398cee1eedSViresh Kumar struct acpi_cpufreq_data *data = policy->driver_data;
2408cee1eedSViresh Kumar
241bb0a56ecSDave Jones switch (data->cpu_feature) {
242bb0a56ecSDave Jones case SYSTEM_INTEL_MSR_CAPABLE:
2433dc9a633SMatthew Garrett case SYSTEM_AMD_MSR_CAPABLE:
2448cee1eedSViresh Kumar return extract_msr(policy, val);
245bb0a56ecSDave Jones case SYSTEM_IO_CAPABLE:
2468cee1eedSViresh Kumar return extract_io(policy, val);
247bb0a56ecSDave Jones default:
248bb0a56ecSDave Jones return 0;
249bb0a56ecSDave Jones }
250bb0a56ecSDave Jones }
251bb0a56ecSDave Jones
cpu_freq_read_intel(struct acpi_pct_register * not_used)252ac13b996SJisheng Zhang static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
253ed757a2cSRafael J. Wysocki {
254e1711f29SLee Jones u32 val, dummy __always_unused;
255bb0a56ecSDave Jones
256ed757a2cSRafael J. Wysocki rdmsr(MSR_IA32_PERF_CTL, val, dummy);
257ed757a2cSRafael J. Wysocki return val;
258ed757a2cSRafael J. Wysocki }
259ed757a2cSRafael J. Wysocki
cpu_freq_write_intel(struct acpi_pct_register * not_used,u32 val)260ac13b996SJisheng Zhang static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
261ed757a2cSRafael J. Wysocki {
262ed757a2cSRafael J. Wysocki u32 lo, hi;
263ed757a2cSRafael J. Wysocki
264ed757a2cSRafael J. Wysocki rdmsr(MSR_IA32_PERF_CTL, lo, hi);
265ed757a2cSRafael J. Wysocki lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
266ed757a2cSRafael J. Wysocki wrmsr(MSR_IA32_PERF_CTL, lo, hi);
267ed757a2cSRafael J. Wysocki }
268ed757a2cSRafael J. Wysocki
cpu_freq_read_amd(struct acpi_pct_register * not_used)269ac13b996SJisheng Zhang static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
270ed757a2cSRafael J. Wysocki {
271e1711f29SLee Jones u32 val, dummy __always_unused;
272ed757a2cSRafael J. Wysocki
273ed757a2cSRafael J. Wysocki rdmsr(MSR_AMD_PERF_CTL, val, dummy);
274ed757a2cSRafael J. Wysocki return val;
275ed757a2cSRafael J. Wysocki }
276ed757a2cSRafael J. Wysocki
cpu_freq_write_amd(struct acpi_pct_register * not_used,u32 val)277ac13b996SJisheng Zhang static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
278ed757a2cSRafael J. Wysocki {
279ed757a2cSRafael J. Wysocki wrmsr(MSR_AMD_PERF_CTL, val, 0);
280ed757a2cSRafael J. Wysocki }
281ed757a2cSRafael J. Wysocki
cpu_freq_read_io(struct acpi_pct_register * reg)282ac13b996SJisheng Zhang static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
283ed757a2cSRafael J. Wysocki {
284ed757a2cSRafael J. Wysocki u32 val;
285ed757a2cSRafael J. Wysocki
286ed757a2cSRafael J. Wysocki acpi_os_read_port(reg->address, &val, reg->bit_width);
287ed757a2cSRafael J. Wysocki return val;
288ed757a2cSRafael J. Wysocki }
289ed757a2cSRafael J. Wysocki
cpu_freq_write_io(struct acpi_pct_register * reg,u32 val)290ac13b996SJisheng Zhang static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
291ed757a2cSRafael J. Wysocki {
292ed757a2cSRafael J. Wysocki acpi_os_write_port(reg->address, val, reg->bit_width);
293ed757a2cSRafael J. Wysocki }
294bb0a56ecSDave Jones
295bb0a56ecSDave Jones struct drv_cmd {
296ed757a2cSRafael J. Wysocki struct acpi_pct_register *reg;
297bb0a56ecSDave Jones u32 val;
298ed757a2cSRafael J. Wysocki union {
299ed757a2cSRafael J. Wysocki void (*write)(struct acpi_pct_register *reg, u32 val);
300ed757a2cSRafael J. Wysocki u32 (*read)(struct acpi_pct_register *reg);
301ed757a2cSRafael J. Wysocki } func;
302bb0a56ecSDave Jones };
303bb0a56ecSDave Jones
304bb0a56ecSDave Jones /* Called via smp_call_function_single(), on the target CPU */
do_drv_read(void * _cmd)305bb0a56ecSDave Jones static void do_drv_read(void *_cmd)
306bb0a56ecSDave Jones {
307bb0a56ecSDave Jones struct drv_cmd *cmd = _cmd;
308bb0a56ecSDave Jones
309ed757a2cSRafael J. Wysocki cmd->val = cmd->func.read(cmd->reg);
310bb0a56ecSDave Jones }
311ed757a2cSRafael J. Wysocki
drv_read(struct acpi_cpufreq_data * data,const struct cpumask * mask)312ed757a2cSRafael J. Wysocki static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
313ed757a2cSRafael J. Wysocki {
314ed757a2cSRafael J. Wysocki struct acpi_processor_performance *perf = to_perf_data(data);
315ed757a2cSRafael J. Wysocki struct drv_cmd cmd = {
316ed757a2cSRafael J. Wysocki .reg = &perf->control_register,
317ed757a2cSRafael J. Wysocki .func.read = data->cpu_freq_read,
318ed757a2cSRafael J. Wysocki };
319ed757a2cSRafael J. Wysocki int err;
320ed757a2cSRafael J. Wysocki
321ed757a2cSRafael J. Wysocki err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
322ed757a2cSRafael J. Wysocki WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
323ed757a2cSRafael J. Wysocki return cmd.val;
324bb0a56ecSDave Jones }
325bb0a56ecSDave Jones
326bb0a56ecSDave Jones /* Called via smp_call_function_many(), on the target CPUs */
do_drv_write(void * _cmd)327bb0a56ecSDave Jones static void do_drv_write(void *_cmd)
328bb0a56ecSDave Jones {
329bb0a56ecSDave Jones struct drv_cmd *cmd = _cmd;
330bb0a56ecSDave Jones
331ed757a2cSRafael J. Wysocki cmd->func.write(cmd->reg, cmd->val);
332bb0a56ecSDave Jones }
333bb0a56ecSDave Jones
drv_write(struct acpi_cpufreq_data * data,const struct cpumask * mask,u32 val)334ed757a2cSRafael J. Wysocki static void drv_write(struct acpi_cpufreq_data *data,
335ed757a2cSRafael J. Wysocki const struct cpumask *mask, u32 val)
336bb0a56ecSDave Jones {
337ed757a2cSRafael J. Wysocki struct acpi_processor_performance *perf = to_perf_data(data);
338ed757a2cSRafael J. Wysocki struct drv_cmd cmd = {
339ed757a2cSRafael J. Wysocki .reg = &perf->control_register,
340ed757a2cSRafael J. Wysocki .val = val,
341ed757a2cSRafael J. Wysocki .func.write = data->cpu_freq_write,
342ed757a2cSRafael J. Wysocki };
343bb0a56ecSDave Jones int this_cpu;
344bb0a56ecSDave Jones
345bb0a56ecSDave Jones this_cpu = get_cpu();
346ed757a2cSRafael J. Wysocki if (cpumask_test_cpu(this_cpu, mask))
347ed757a2cSRafael J. Wysocki do_drv_write(&cmd);
348ed757a2cSRafael J. Wysocki
349ed757a2cSRafael J. Wysocki smp_call_function_many(mask, do_drv_write, &cmd, 1);
350bb0a56ecSDave Jones put_cpu();
351bb0a56ecSDave Jones }
352bb0a56ecSDave Jones
get_cur_val(const struct cpumask * mask,struct acpi_cpufreq_data * data)353ed757a2cSRafael J. Wysocki static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
354bb0a56ecSDave Jones {
355ed757a2cSRafael J. Wysocki u32 val;
356bb0a56ecSDave Jones
357bb0a56ecSDave Jones if (unlikely(cpumask_empty(mask)))
358bb0a56ecSDave Jones return 0;
359bb0a56ecSDave Jones
360ed757a2cSRafael J. Wysocki val = drv_read(data, mask);
361bb0a56ecSDave Jones
362eae2ef0eSMohan Kumar pr_debug("%s = %u\n", __func__, val);
363bb0a56ecSDave Jones
364ed757a2cSRafael J. Wysocki return val;
365bb0a56ecSDave Jones }
366bb0a56ecSDave Jones
get_cur_freq_on_cpu(unsigned int cpu)367bb0a56ecSDave Jones static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
368bb0a56ecSDave Jones {
369eb0b3e78SPan Xinhui struct acpi_cpufreq_data *data;
370eb0b3e78SPan Xinhui struct cpufreq_policy *policy;
371bb0a56ecSDave Jones unsigned int freq;
372bb0a56ecSDave Jones unsigned int cached_freq;
373bb0a56ecSDave Jones
374eae2ef0eSMohan Kumar pr_debug("%s (%d)\n", __func__, cpu);
375bb0a56ecSDave Jones
3761f0bd44eSRafael J. Wysocki policy = cpufreq_cpu_get_raw(cpu);
377eb0b3e78SPan Xinhui if (unlikely(!policy))
378bb0a56ecSDave Jones return 0;
379eb0b3e78SPan Xinhui
380eb0b3e78SPan Xinhui data = policy->driver_data;
3818cee1eedSViresh Kumar if (unlikely(!data || !policy->freq_table))
382eb0b3e78SPan Xinhui return 0;
383bb0a56ecSDave Jones
384538b0188SRafael J. Wysocki cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
3858cee1eedSViresh Kumar freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
386bb0a56ecSDave Jones if (freq != cached_freq) {
387bb0a56ecSDave Jones /*
388bb0a56ecSDave Jones * The dreaded BIOS frequency change behind our back.
389bb0a56ecSDave Jones * Force set the frequency on next target call.
390bb0a56ecSDave Jones */
391bb0a56ecSDave Jones data->resume = 1;
392bb0a56ecSDave Jones }
393bb0a56ecSDave Jones
394bb0a56ecSDave Jones pr_debug("cur freq = %u\n", freq);
395bb0a56ecSDave Jones
396bb0a56ecSDave Jones return freq;
397bb0a56ecSDave Jones }
398bb0a56ecSDave Jones
check_freqs(struct cpufreq_policy * policy,const struct cpumask * mask,unsigned int freq)3998cee1eedSViresh Kumar static unsigned int check_freqs(struct cpufreq_policy *policy,
4008cee1eedSViresh Kumar const struct cpumask *mask, unsigned int freq)
401bb0a56ecSDave Jones {
4028cee1eedSViresh Kumar struct acpi_cpufreq_data *data = policy->driver_data;
403bb0a56ecSDave Jones unsigned int cur_freq;
404bb0a56ecSDave Jones unsigned int i;
405bb0a56ecSDave Jones
406bb0a56ecSDave Jones for (i = 0; i < 100; i++) {
4078cee1eedSViresh Kumar cur_freq = extract_freq(policy, get_cur_val(mask, data));
408bb0a56ecSDave Jones if (cur_freq == freq)
409bb0a56ecSDave Jones return 1;
410bb0a56ecSDave Jones udelay(10);
411bb0a56ecSDave Jones }
412bb0a56ecSDave Jones return 0;
413bb0a56ecSDave Jones }
414bb0a56ecSDave Jones
acpi_cpufreq_target(struct cpufreq_policy * policy,unsigned int index)415bb0a56ecSDave Jones static int acpi_cpufreq_target(struct cpufreq_policy *policy,
4169c0ebcf7SViresh Kumar unsigned int index)
417bb0a56ecSDave Jones {
418eb0b3e78SPan Xinhui struct acpi_cpufreq_data *data = policy->driver_data;
419bb0a56ecSDave Jones struct acpi_processor_performance *perf;
420ed757a2cSRafael J. Wysocki const struct cpumask *mask;
421bb0a56ecSDave Jones unsigned int next_perf_state = 0; /* Index into perf table */
422bb0a56ecSDave Jones int result = 0;
423bb0a56ecSDave Jones
4248cee1eedSViresh Kumar if (unlikely(!data)) {
425bb0a56ecSDave Jones return -ENODEV;
426bb0a56ecSDave Jones }
427bb0a56ecSDave Jones
4283427616bSRafael J. Wysocki perf = to_perf_data(data);
4298cee1eedSViresh Kumar next_perf_state = policy->freq_table[index].driver_data;
430bb0a56ecSDave Jones if (perf->state == next_perf_state) {
431bb0a56ecSDave Jones if (unlikely(data->resume)) {
432bb0a56ecSDave Jones pr_debug("Called after resume, resetting to P%d\n",
433bb0a56ecSDave Jones next_perf_state);
434bb0a56ecSDave Jones data->resume = 0;
435bb0a56ecSDave Jones } else {
436bb0a56ecSDave Jones pr_debug("Already at target state (P%d)\n",
437bb0a56ecSDave Jones next_perf_state);
4389a909a14SRafael J. Wysocki return 0;
439bb0a56ecSDave Jones }
440bb0a56ecSDave Jones }
441bb0a56ecSDave Jones
442ed757a2cSRafael J. Wysocki /*
443ed757a2cSRafael J. Wysocki * The core won't allow CPUs to go away until the governor has been
444ed757a2cSRafael J. Wysocki * stopped, so we can rely on the stability of policy->cpus.
445ed757a2cSRafael J. Wysocki */
446ed757a2cSRafael J. Wysocki mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
447ed757a2cSRafael J. Wysocki cpumask_of(policy->cpu) : policy->cpus;
448bb0a56ecSDave Jones
449ed757a2cSRafael J. Wysocki drv_write(data, mask, perf->states[next_perf_state].control);
450bb0a56ecSDave Jones
451bb0a56ecSDave Jones if (acpi_pstate_strict) {
4528cee1eedSViresh Kumar if (!check_freqs(policy, mask,
4538cee1eedSViresh Kumar policy->freq_table[index].frequency)) {
454eae2ef0eSMohan Kumar pr_debug("%s (%d)\n", __func__, policy->cpu);
455bb0a56ecSDave Jones result = -EAGAIN;
456bb0a56ecSDave Jones }
457bb0a56ecSDave Jones }
458bb0a56ecSDave Jones
459e15d8309SViresh Kumar if (!result)
460bb0a56ecSDave Jones perf->state = next_perf_state;
461bb0a56ecSDave Jones
462bb0a56ecSDave Jones return result;
463bb0a56ecSDave Jones }
464bb0a56ecSDave Jones
acpi_cpufreq_fast_switch(struct cpufreq_policy * policy,unsigned int target_freq)46508e9cc40SColin Ian King static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
466b7898fdaSRafael J. Wysocki unsigned int target_freq)
467b7898fdaSRafael J. Wysocki {
468b7898fdaSRafael J. Wysocki struct acpi_cpufreq_data *data = policy->driver_data;
469b7898fdaSRafael J. Wysocki struct acpi_processor_performance *perf;
470b7898fdaSRafael J. Wysocki struct cpufreq_frequency_table *entry;
47182577360SViresh Kumar unsigned int next_perf_state, next_freq, index;
472b7898fdaSRafael J. Wysocki
473b7898fdaSRafael J. Wysocki /*
474b7898fdaSRafael J. Wysocki * Find the closest frequency above target_freq.
475b7898fdaSRafael J. Wysocki */
4765b6667c7SSteve Muckle if (policy->cached_target_freq == target_freq)
4775b6667c7SSteve Muckle index = policy->cached_resolved_idx;
4785b6667c7SSteve Muckle else
4791f39fa0dSVincent Donnefort index = cpufreq_table_find_index_dl(policy, target_freq,
4801f39fa0dSVincent Donnefort false);
48182577360SViresh Kumar
48282577360SViresh Kumar entry = &policy->freq_table[index];
483b7898fdaSRafael J. Wysocki next_freq = entry->frequency;
484b7898fdaSRafael J. Wysocki next_perf_state = entry->driver_data;
485b7898fdaSRafael J. Wysocki
486b7898fdaSRafael J. Wysocki perf = to_perf_data(data);
487b7898fdaSRafael J. Wysocki if (perf->state == next_perf_state) {
488b7898fdaSRafael J. Wysocki if (unlikely(data->resume))
489b7898fdaSRafael J. Wysocki data->resume = 0;
490b7898fdaSRafael J. Wysocki else
491b7898fdaSRafael J. Wysocki return next_freq;
492b7898fdaSRafael J. Wysocki }
493b7898fdaSRafael J. Wysocki
494b7898fdaSRafael J. Wysocki data->cpu_freq_write(&perf->control_register,
495b7898fdaSRafael J. Wysocki perf->states[next_perf_state].control);
496b7898fdaSRafael J. Wysocki perf->state = next_perf_state;
497b7898fdaSRafael J. Wysocki return next_freq;
498b7898fdaSRafael J. Wysocki }
499b7898fdaSRafael J. Wysocki
500bb0a56ecSDave Jones static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data * data,unsigned int cpu)501bb0a56ecSDave Jones acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
502bb0a56ecSDave Jones {
5033427616bSRafael J. Wysocki struct acpi_processor_performance *perf;
504bb0a56ecSDave Jones
5053427616bSRafael J. Wysocki perf = to_perf_data(data);
506bb0a56ecSDave Jones if (cpu_khz) {
507bb0a56ecSDave Jones /* search the closest match to cpu_khz */
508bb0a56ecSDave Jones unsigned int i;
509bb0a56ecSDave Jones unsigned long freq;
510bb0a56ecSDave Jones unsigned long freqn = perf->states[0].core_frequency * 1000;
511bb0a56ecSDave Jones
512bb0a56ecSDave Jones for (i = 0; i < (perf->state_count-1); i++) {
513bb0a56ecSDave Jones freq = freqn;
514bb0a56ecSDave Jones freqn = perf->states[i+1].core_frequency * 1000;
515bb0a56ecSDave Jones if ((2 * cpu_khz) > (freqn + freq)) {
516bb0a56ecSDave Jones perf->state = i;
517bb0a56ecSDave Jones return freq;
518bb0a56ecSDave Jones }
519bb0a56ecSDave Jones }
520bb0a56ecSDave Jones perf->state = perf->state_count-1;
521bb0a56ecSDave Jones return freqn;
522bb0a56ecSDave Jones } else {
523bb0a56ecSDave Jones /* assume CPU is at P0... */
524bb0a56ecSDave Jones perf->state = 0;
525bb0a56ecSDave Jones return perf->states[0].core_frequency * 1000;
526bb0a56ecSDave Jones }
527bb0a56ecSDave Jones }
528bb0a56ecSDave Jones
free_acpi_perf_data(void)529bb0a56ecSDave Jones static void free_acpi_perf_data(void)
530bb0a56ecSDave Jones {
531bb0a56ecSDave Jones unsigned int i;
532bb0a56ecSDave Jones
533bb0a56ecSDave Jones /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
534bb0a56ecSDave Jones for_each_possible_cpu(i)
535bb0a56ecSDave Jones free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
536bb0a56ecSDave Jones ->shared_cpu_map);
537bb0a56ecSDave Jones free_percpu(acpi_perf_data);
538bb0a56ecSDave Jones }
539bb0a56ecSDave Jones
cpufreq_boost_down_prep(unsigned int cpu)5404d66ddf2SSebastian Andrzej Siewior static int cpufreq_boost_down_prep(unsigned int cpu)
5414d66ddf2SSebastian Andrzej Siewior {
542615b7300SAndre Przywara /*
543615b7300SAndre Przywara * Clear the boost-disable bit on the CPU_DOWN path so that
5444d66ddf2SSebastian Andrzej Siewior * this cpu cannot block the remaining ones from boosting.
545615b7300SAndre Przywara */
546a3605c46SSebastian Andrzej Siewior return boost_set_msr(1);
547615b7300SAndre Przywara }
548615b7300SAndre Przywara
549bb0a56ecSDave Jones /*
550bb0a56ecSDave Jones * acpi_cpufreq_early_init - initialize ACPI P-States library
551bb0a56ecSDave Jones *
552bb0a56ecSDave Jones * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
553bb0a56ecSDave Jones * in order to determine correct frequency and voltage pairings. We can
554bb0a56ecSDave Jones * do _PDC and _PSD and find out the processor dependency for the
555bb0a56ecSDave Jones * actual init that will happen later...
556bb0a56ecSDave Jones */
acpi_cpufreq_early_init(void)557bb0a56ecSDave Jones static int __init acpi_cpufreq_early_init(void)
558bb0a56ecSDave Jones {
559bb0a56ecSDave Jones unsigned int i;
560eae2ef0eSMohan Kumar pr_debug("%s\n", __func__);
561bb0a56ecSDave Jones
562bb0a56ecSDave Jones acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
563bb0a56ecSDave Jones if (!acpi_perf_data) {
564bb0a56ecSDave Jones pr_debug("Memory allocation error for acpi_perf_data.\n");
565bb0a56ecSDave Jones return -ENOMEM;
566bb0a56ecSDave Jones }
567bb0a56ecSDave Jones for_each_possible_cpu(i) {
568bb0a56ecSDave Jones if (!zalloc_cpumask_var_node(
569bb0a56ecSDave Jones &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
570bb0a56ecSDave Jones GFP_KERNEL, cpu_to_node(i))) {
571bb0a56ecSDave Jones
572bb0a56ecSDave Jones /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
573bb0a56ecSDave Jones free_acpi_perf_data();
574bb0a56ecSDave Jones return -ENOMEM;
575bb0a56ecSDave Jones }
576bb0a56ecSDave Jones }
577bb0a56ecSDave Jones
578bb0a56ecSDave Jones /* Do initialization in ACPI core */
579bb0a56ecSDave Jones acpi_processor_preregister_performance(acpi_perf_data);
580bb0a56ecSDave Jones return 0;
581bb0a56ecSDave Jones }
582bb0a56ecSDave Jones
583bb0a56ecSDave Jones #ifdef CONFIG_SMP
584bb0a56ecSDave Jones /*
585bb0a56ecSDave Jones * Some BIOSes do SW_ANY coordination internally, either set it up in hw
586bb0a56ecSDave Jones * or do it in BIOS firmware and won't inform about it to OS. If not
587bb0a56ecSDave Jones * detected, this has a side effect of making CPU run at a different speed
588bb0a56ecSDave Jones * than OS intended it to run at. Detect it and handle it cleanly.
589bb0a56ecSDave Jones */
590bb0a56ecSDave Jones static int bios_with_sw_any_bug;
591bb0a56ecSDave Jones
sw_any_bug_found(const struct dmi_system_id * d)592bb0a56ecSDave Jones static int sw_any_bug_found(const struct dmi_system_id *d)
593bb0a56ecSDave Jones {
594bb0a56ecSDave Jones bios_with_sw_any_bug = 1;
595bb0a56ecSDave Jones return 0;
596bb0a56ecSDave Jones }
597bb0a56ecSDave Jones
598bb0a56ecSDave Jones static const struct dmi_system_id sw_any_bug_dmi_table[] = {
599bb0a56ecSDave Jones {
600bb0a56ecSDave Jones .callback = sw_any_bug_found,
601bb0a56ecSDave Jones .ident = "Supermicro Server X6DLP",
602bb0a56ecSDave Jones .matches = {
603bb0a56ecSDave Jones DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
604bb0a56ecSDave Jones DMI_MATCH(DMI_BIOS_VERSION, "080010"),
605bb0a56ecSDave Jones DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
606bb0a56ecSDave Jones },
607bb0a56ecSDave Jones },
608bb0a56ecSDave Jones { }
609bb0a56ecSDave Jones };
610bb0a56ecSDave Jones
acpi_cpufreq_blacklist(struct cpuinfo_x86 * c)611bb0a56ecSDave Jones static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
612bb0a56ecSDave Jones {
613bb0a56ecSDave Jones /* Intel Xeon Processor 7100 Series Specification Update
6148479eb82SAlexander A. Klimov * https://www.intel.com/Assets/PDF/specupdate/314554.pdf
615bb0a56ecSDave Jones * AL30: A Machine Check Exception (MCE) Occurring during an
616bb0a56ecSDave Jones * Enhanced Intel SpeedStep Technology Ratio Change May Cause
617bb0a56ecSDave Jones * Both Processor Cores to Lock Up. */
618bb0a56ecSDave Jones if (c->x86_vendor == X86_VENDOR_INTEL) {
619bb0a56ecSDave Jones if ((c->x86 == 15) &&
620bb0a56ecSDave Jones (c->x86_model == 6) &&
621b399151cSJia Zhang (c->x86_stepping == 8)) {
6221c5864e2SJoe Perches pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
623bb0a56ecSDave Jones return -ENODEV;
624bb0a56ecSDave Jones }
625bb0a56ecSDave Jones }
626bb0a56ecSDave Jones return 0;
627bb0a56ecSDave Jones }
628bb0a56ecSDave Jones #endif
629bb0a56ecSDave Jones
6303c55e94cSRafael J. Wysocki #ifdef CONFIG_ACPI_CPPC_LIB
631*dc343336SGautham R. Shenoy /*
632*dc343336SGautham R. Shenoy * get_max_boost_ratio: Computes the max_boost_ratio as the ratio
633*dc343336SGautham R. Shenoy * between the highest_perf and the nominal_perf.
634*dc343336SGautham R. Shenoy *
635*dc343336SGautham R. Shenoy * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal
636*dc343336SGautham R. Shenoy * frequency via @nominal_freq if it is non-NULL pointer.
637*dc343336SGautham R. Shenoy */
get_max_boost_ratio(unsigned int cpu,u64 * nominal_freq)638*dc343336SGautham R. Shenoy static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
6393c55e94cSRafael J. Wysocki {
6403c55e94cSRafael J. Wysocki struct cppc_perf_caps perf_caps;
6413c55e94cSRafael J. Wysocki u64 highest_perf, nominal_perf;
6423c55e94cSRafael J. Wysocki int ret;
6433c55e94cSRafael J. Wysocki
6443c55e94cSRafael J. Wysocki if (acpi_pstate_strict)
6453c55e94cSRafael J. Wysocki return 0;
6463c55e94cSRafael J. Wysocki
6473c55e94cSRafael J. Wysocki ret = cppc_get_perf_caps(cpu, &perf_caps);
6483c55e94cSRafael J. Wysocki if (ret) {
6493c55e94cSRafael J. Wysocki pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
6503c55e94cSRafael J. Wysocki cpu, ret);
6513c55e94cSRafael J. Wysocki return 0;
6523c55e94cSRafael J. Wysocki }
6533c55e94cSRafael J. Wysocki
6543743d55bSHuang Rui if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
6553743d55bSHuang Rui highest_perf = amd_get_highest_perf();
6563743d55bSHuang Rui else
6573c55e94cSRafael J. Wysocki highest_perf = perf_caps.highest_perf;
6583743d55bSHuang Rui
6593c55e94cSRafael J. Wysocki nominal_perf = perf_caps.nominal_perf;
6603c55e94cSRafael J. Wysocki
661*dc343336SGautham R. Shenoy if (nominal_freq)
662*dc343336SGautham R. Shenoy *nominal_freq = perf_caps.nominal_freq;
663*dc343336SGautham R. Shenoy
6643c55e94cSRafael J. Wysocki if (!highest_perf || !nominal_perf) {
6653c55e94cSRafael J. Wysocki pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
6663c55e94cSRafael J. Wysocki return 0;
6673c55e94cSRafael J. Wysocki }
6683c55e94cSRafael J. Wysocki
6693c55e94cSRafael J. Wysocki if (highest_perf < nominal_perf) {
6703c55e94cSRafael J. Wysocki pr_debug("CPU%d: nominal performance above highest\n", cpu);
6713c55e94cSRafael J. Wysocki return 0;
6723c55e94cSRafael J. Wysocki }
6733c55e94cSRafael J. Wysocki
6743c55e94cSRafael J. Wysocki return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
6753c55e94cSRafael J. Wysocki }
676*dc343336SGautham R. Shenoy
6773c55e94cSRafael J. Wysocki #else
get_max_boost_ratio(unsigned int cpu,u64 * nominal_freq)678*dc343336SGautham R. Shenoy static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
679*dc343336SGautham R. Shenoy {
680*dc343336SGautham R. Shenoy return 0;
681*dc343336SGautham R. Shenoy }
6823c55e94cSRafael J. Wysocki #endif
6833c55e94cSRafael J. Wysocki
acpi_cpufreq_cpu_init(struct cpufreq_policy * policy)684bb0a56ecSDave Jones static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
685bb0a56ecSDave Jones {
6868cee1eedSViresh Kumar struct cpufreq_frequency_table *freq_table;
6873c55e94cSRafael J. Wysocki struct acpi_processor_performance *perf;
6883c55e94cSRafael J. Wysocki struct acpi_cpufreq_data *data;
6893c55e94cSRafael J. Wysocki unsigned int cpu = policy->cpu;
6903c55e94cSRafael J. Wysocki struct cpuinfo_x86 *c = &cpu_data(cpu);
691*dc343336SGautham R. Shenoy u64 max_boost_ratio, nominal_freq = 0;
6923c55e94cSRafael J. Wysocki unsigned int valid_states = 0;
6933c55e94cSRafael J. Wysocki unsigned int result = 0;
6943c55e94cSRafael J. Wysocki unsigned int i;
695bb0a56ecSDave Jones #ifdef CONFIG_SMP
696bb0a56ecSDave Jones static int blacklisted;
697bb0a56ecSDave Jones #endif
698bb0a56ecSDave Jones
699eae2ef0eSMohan Kumar pr_debug("%s\n", __func__);
700bb0a56ecSDave Jones
701bb0a56ecSDave Jones #ifdef CONFIG_SMP
702bb0a56ecSDave Jones if (blacklisted)
703bb0a56ecSDave Jones return blacklisted;
704bb0a56ecSDave Jones blacklisted = acpi_cpufreq_blacklist(c);
705bb0a56ecSDave Jones if (blacklisted)
706bb0a56ecSDave Jones return blacklisted;
707bb0a56ecSDave Jones #endif
708bb0a56ecSDave Jones
709d5b73cd8SViresh Kumar data = kzalloc(sizeof(*data), GFP_KERNEL);
710bb0a56ecSDave Jones if (!data)
711bb0a56ecSDave Jones return -ENOMEM;
712bb0a56ecSDave Jones
713f4fd3797SLan Tianyu if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
714f4fd3797SLan Tianyu result = -ENOMEM;
715f4fd3797SLan Tianyu goto err_free;
716f4fd3797SLan Tianyu }
717f4fd3797SLan Tianyu
7183427616bSRafael J. Wysocki perf = per_cpu_ptr(acpi_perf_data, cpu);
7198cfcfd39SPan Xinhui data->acpi_perf_cpu = cpu;
720eb0b3e78SPan Xinhui policy->driver_data = data;
721bb0a56ecSDave Jones
722bb0a56ecSDave Jones if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
723bb0a56ecSDave Jones acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
724bb0a56ecSDave Jones
7253427616bSRafael J. Wysocki result = acpi_processor_register_performance(perf, cpu);
726bb0a56ecSDave Jones if (result)
727f4fd3797SLan Tianyu goto err_free_mask;
728bb0a56ecSDave Jones
729bb0a56ecSDave Jones policy->shared_type = perf->shared_type;
730bb0a56ecSDave Jones
731bb0a56ecSDave Jones /*
732bb0a56ecSDave Jones * Will let policy->cpus know about dependency only when software
733bb0a56ecSDave Jones * coordination is required.
734bb0a56ecSDave Jones */
735bb0a56ecSDave Jones if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
736bb0a56ecSDave Jones policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
737bb0a56ecSDave Jones cpumask_copy(policy->cpus, perf->shared_cpu_map);
738bb0a56ecSDave Jones }
739f4fd3797SLan Tianyu cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
740bb0a56ecSDave Jones
741bb0a56ecSDave Jones #ifdef CONFIG_SMP
742bb0a56ecSDave Jones dmi_check_system(sw_any_bug_dmi_table);
7432624f90cSFabio Baltieri if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
744bb0a56ecSDave Jones policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
7453280c3c8SBartosz Golaszewski cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
746bb0a56ecSDave Jones }
747acd31624SAndre Przywara
7485368512aSWei Huang if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
7495368512aSWei Huang !acpi_pstate_strict) {
750acd31624SAndre Przywara cpumask_clear(policy->cpus);
751acd31624SAndre Przywara cpumask_set_cpu(cpu, policy->cpus);
7523280c3c8SBartosz Golaszewski cpumask_copy(data->freqdomain_cpus,
7533280c3c8SBartosz Golaszewski topology_sibling_cpumask(cpu));
754acd31624SAndre Przywara policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
7551c5864e2SJoe Perches pr_info_once("overriding BIOS provided _PSD data\n");
756acd31624SAndre Przywara }
757bb0a56ecSDave Jones #endif
758bb0a56ecSDave Jones
759bb0a56ecSDave Jones /* capability check */
760bb0a56ecSDave Jones if (perf->state_count <= 1) {
761bb0a56ecSDave Jones pr_debug("No P-States\n");
762bb0a56ecSDave Jones result = -ENODEV;
763bb0a56ecSDave Jones goto err_unreg;
764bb0a56ecSDave Jones }
765bb0a56ecSDave Jones
766bb0a56ecSDave Jones if (perf->control_register.space_id != perf->status_register.space_id) {
767bb0a56ecSDave Jones result = -ENODEV;
768bb0a56ecSDave Jones goto err_unreg;
769bb0a56ecSDave Jones }
770bb0a56ecSDave Jones
771bb0a56ecSDave Jones switch (perf->control_register.space_id) {
772bb0a56ecSDave Jones case ACPI_ADR_SPACE_SYSTEM_IO:
773c40a4518SMatthew Garrett if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
774c40a4518SMatthew Garrett boot_cpu_data.x86 == 0xf) {
775c40a4518SMatthew Garrett pr_debug("AMD K8 systems must use native drivers.\n");
776c40a4518SMatthew Garrett result = -ENODEV;
777c40a4518SMatthew Garrett goto err_unreg;
778c40a4518SMatthew Garrett }
779bb0a56ecSDave Jones pr_debug("SYSTEM IO addr space\n");
780bb0a56ecSDave Jones data->cpu_feature = SYSTEM_IO_CAPABLE;
781ed757a2cSRafael J. Wysocki data->cpu_freq_read = cpu_freq_read_io;
782ed757a2cSRafael J. Wysocki data->cpu_freq_write = cpu_freq_write_io;
783bb0a56ecSDave Jones break;
784bb0a56ecSDave Jones case ACPI_ADR_SPACE_FIXED_HARDWARE:
785bb0a56ecSDave Jones pr_debug("HARDWARE addr space\n");
7863dc9a633SMatthew Garrett if (check_est_cpu(cpu)) {
787bb0a56ecSDave Jones data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
788ed757a2cSRafael J. Wysocki data->cpu_freq_read = cpu_freq_read_intel;
789ed757a2cSRafael J. Wysocki data->cpu_freq_write = cpu_freq_write_intel;
790bb0a56ecSDave Jones break;
7913dc9a633SMatthew Garrett }
7923dc9a633SMatthew Garrett if (check_amd_hwpstate_cpu(cpu)) {
7933dc9a633SMatthew Garrett data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
794ed757a2cSRafael J. Wysocki data->cpu_freq_read = cpu_freq_read_amd;
795ed757a2cSRafael J. Wysocki data->cpu_freq_write = cpu_freq_write_amd;
7963dc9a633SMatthew Garrett break;
7973dc9a633SMatthew Garrett }
7983dc9a633SMatthew Garrett result = -ENODEV;
7993dc9a633SMatthew Garrett goto err_unreg;
800bb0a56ecSDave Jones default:
801bb0a56ecSDave Jones pr_debug("Unknown addr space %d\n",
802bb0a56ecSDave Jones (u32) (perf->control_register.space_id));
803bb0a56ecSDave Jones result = -ENODEV;
804bb0a56ecSDave Jones goto err_unreg;
805bb0a56ecSDave Jones }
806bb0a56ecSDave Jones
807538b0188SRafael J. Wysocki freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
808538b0188SRafael J. Wysocki GFP_KERNEL);
8098cee1eedSViresh Kumar if (!freq_table) {
810bb0a56ecSDave Jones result = -ENOMEM;
811bb0a56ecSDave Jones goto err_unreg;
812bb0a56ecSDave Jones }
813bb0a56ecSDave Jones
814bb0a56ecSDave Jones /* detect transition latency */
815bb0a56ecSDave Jones policy->cpuinfo.transition_latency = 0;
816bb0a56ecSDave Jones for (i = 0; i < perf->state_count; i++) {
817bb0a56ecSDave Jones if ((perf->states[i].transition_latency * 1000) >
818bb0a56ecSDave Jones policy->cpuinfo.transition_latency)
819bb0a56ecSDave Jones policy->cpuinfo.transition_latency =
820bb0a56ecSDave Jones perf->states[i].transition_latency * 1000;
821bb0a56ecSDave Jones }
822bb0a56ecSDave Jones
823bb0a56ecSDave Jones /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
824bb0a56ecSDave Jones if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
825bb0a56ecSDave Jones policy->cpuinfo.transition_latency > 20 * 1000) {
826bb0a56ecSDave Jones policy->cpuinfo.transition_latency = 20 * 1000;
827b49c22a6SJoe Perches pr_info_once("P-state transition latency capped at 20 uS\n");
828bb0a56ecSDave Jones }
829bb0a56ecSDave Jones
830bb0a56ecSDave Jones /* table init */
831bb0a56ecSDave Jones for (i = 0; i < perf->state_count; i++) {
832bb0a56ecSDave Jones if (i > 0 && perf->states[i].core_frequency >=
8338cee1eedSViresh Kumar freq_table[valid_states-1].frequency / 1000)
834bb0a56ecSDave Jones continue;
835bb0a56ecSDave Jones
8368cee1eedSViresh Kumar freq_table[valid_states].driver_data = i;
8378cee1eedSViresh Kumar freq_table[valid_states].frequency =
838bb0a56ecSDave Jones perf->states[i].core_frequency * 1000;
839bb0a56ecSDave Jones valid_states++;
840bb0a56ecSDave Jones }
8418cee1eedSViresh Kumar freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
8423c55e94cSRafael J. Wysocki
843*dc343336SGautham R. Shenoy max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
8443c55e94cSRafael J. Wysocki if (max_boost_ratio) {
845*dc343336SGautham R. Shenoy unsigned int freq = nominal_freq;
8463c55e94cSRafael J. Wysocki
8473c55e94cSRafael J. Wysocki /*
848*dc343336SGautham R. Shenoy * The loop above sorts the freq_table entries in the
849*dc343336SGautham R. Shenoy * descending order. If ACPI CPPC has not advertised
850*dc343336SGautham R. Shenoy * the nominal frequency (this is possible in CPPC
851*dc343336SGautham R. Shenoy * revisions prior to 3), then use the first entry in
852*dc343336SGautham R. Shenoy * the pstate table as a proxy for nominal frequency.
8533c55e94cSRafael J. Wysocki */
854*dc343336SGautham R. Shenoy if (!freq)
855*dc343336SGautham R. Shenoy freq = freq_table[0].frequency;
856*dc343336SGautham R. Shenoy
857538b0188SRafael J. Wysocki policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
858538b0188SRafael J. Wysocki } else {
8593c55e94cSRafael J. Wysocki /*
860538b0188SRafael J. Wysocki * If the maximum "boost" frequency is unknown, ask the arch
861538b0188SRafael J. Wysocki * scale-invariance code to use the "nominal" performance for
862538b0188SRafael J. Wysocki * CPU utilization scaling so as to prevent the schedutil
863538b0188SRafael J. Wysocki * governor from selecting inadequate CPU frequencies.
8643c55e94cSRafael J. Wysocki */
865538b0188SRafael J. Wysocki arch_set_max_freq_ratio(true);
8663c55e94cSRafael J. Wysocki }
8673c55e94cSRafael J. Wysocki
8681a186d9eSViresh Kumar policy->freq_table = freq_table;
869bb0a56ecSDave Jones perf->state = 0;
870bb0a56ecSDave Jones
871bb0a56ecSDave Jones switch (perf->control_register.space_id) {
872bb0a56ecSDave Jones case ACPI_ADR_SPACE_SYSTEM_IO:
8731bab64d5SViresh Kumar /*
8741bab64d5SViresh Kumar * The core will not set policy->cur, because
8751bab64d5SViresh Kumar * cpufreq_driver->get is NULL, so we need to set it here.
8761bab64d5SViresh Kumar * However, we have to guess it, because the current speed is
8771bab64d5SViresh Kumar * unknown and not detectable via IO ports.
8781bab64d5SViresh Kumar */
879bb0a56ecSDave Jones policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
880bb0a56ecSDave Jones break;
881bb0a56ecSDave Jones case ACPI_ADR_SPACE_FIXED_HARDWARE:
882bb0a56ecSDave Jones acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
883bb0a56ecSDave Jones break;
884bb0a56ecSDave Jones default:
885bb0a56ecSDave Jones break;
886bb0a56ecSDave Jones }
887bb0a56ecSDave Jones
888bb0a56ecSDave Jones /* notify BIOS that we exist */
889bb0a56ecSDave Jones acpi_processor_notify_smm(THIS_MODULE);
890bb0a56ecSDave Jones
891bb0a56ecSDave Jones pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
892bb0a56ecSDave Jones for (i = 0; i < perf->state_count; i++)
893bb0a56ecSDave Jones pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
894bb0a56ecSDave Jones (i == perf->state ? '*' : ' '), i,
895bb0a56ecSDave Jones (u32) perf->states[i].core_frequency,
896bb0a56ecSDave Jones (u32) perf->states[i].power,
897bb0a56ecSDave Jones (u32) perf->states[i].transition_latency);
898bb0a56ecSDave Jones
899bb0a56ecSDave Jones /*
900bb0a56ecSDave Jones * the first call to ->target() should result in us actually
901bb0a56ecSDave Jones * writing something to the appropriate registers.
902bb0a56ecSDave Jones */
903bb0a56ecSDave Jones data->resume = 1;
904bb0a56ecSDave Jones
905b7898fdaSRafael J. Wysocki policy->fast_switch_possible = !acpi_pstate_strict &&
906b7898fdaSRafael J. Wysocki !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
907b7898fdaSRafael J. Wysocki
908692a3b9aSViresh Kumar if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
909692a3b9aSViresh Kumar pr_warn(FW_WARN "P-state 0 is not max freq\n");
910692a3b9aSViresh Kumar
9112ca2fd47SMario Limonciello if (acpi_cpufreq_driver.set_boost) {
91213fdbc8bSStuart Hayes set_boost(policy, acpi_cpufreq_driver.boost_enabled);
9132ca2fd47SMario Limonciello policy->boost_enabled = acpi_cpufreq_driver.boost_enabled;
9142ca2fd47SMario Limonciello }
91513fdbc8bSStuart Hayes
916bb0a56ecSDave Jones return result;
917bb0a56ecSDave Jones
918bb0a56ecSDave Jones err_unreg:
919b2f8dc4cSRafael J. Wysocki acpi_processor_unregister_performance(cpu);
920f4fd3797SLan Tianyu err_free_mask:
921f4fd3797SLan Tianyu free_cpumask_var(data->freqdomain_cpus);
922bb0a56ecSDave Jones err_free:
923bb0a56ecSDave Jones kfree(data);
924eb0b3e78SPan Xinhui policy->driver_data = NULL;
925bb0a56ecSDave Jones
926bb0a56ecSDave Jones return result;
927bb0a56ecSDave Jones }
928bb0a56ecSDave Jones
acpi_cpufreq_cpu_exit(struct cpufreq_policy * policy)929bb0a56ecSDave Jones static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
930bb0a56ecSDave Jones {
931eb0b3e78SPan Xinhui struct acpi_cpufreq_data *data = policy->driver_data;
932bb0a56ecSDave Jones
933eae2ef0eSMohan Kumar pr_debug("%s\n", __func__);
934bb0a56ecSDave Jones
93513fdbc8bSStuart Hayes cpufreq_boost_down_prep(policy->cpu);
936b7898fdaSRafael J. Wysocki policy->fast_switch_possible = false;
937eb0b3e78SPan Xinhui policy->driver_data = NULL;
938b2f8dc4cSRafael J. Wysocki acpi_processor_unregister_performance(data->acpi_perf_cpu);
939f4fd3797SLan Tianyu free_cpumask_var(data->freqdomain_cpus);
9408cee1eedSViresh Kumar kfree(policy->freq_table);
941bb0a56ecSDave Jones kfree(data);
942bb0a56ecSDave Jones
943bb0a56ecSDave Jones return 0;
944bb0a56ecSDave Jones }
945bb0a56ecSDave Jones
acpi_cpufreq_resume(struct cpufreq_policy * policy)946bb0a56ecSDave Jones static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
947bb0a56ecSDave Jones {
948eb0b3e78SPan Xinhui struct acpi_cpufreq_data *data = policy->driver_data;
949bb0a56ecSDave Jones
950eae2ef0eSMohan Kumar pr_debug("%s\n", __func__);
951bb0a56ecSDave Jones
952bb0a56ecSDave Jones data->resume = 1;
953bb0a56ecSDave Jones
954bb0a56ecSDave Jones return 0;
955bb0a56ecSDave Jones }
956bb0a56ecSDave Jones
957bb0a56ecSDave Jones static struct freq_attr *acpi_cpufreq_attr[] = {
958bb0a56ecSDave Jones &cpufreq_freq_attr_scaling_available_freqs,
959f4fd3797SLan Tianyu &freqdomain_cpus,
960f56c50e3SRafael J. Wysocki #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
961f56c50e3SRafael J. Wysocki &cpb,
962f56c50e3SRafael J. Wysocki #endif
963bb0a56ecSDave Jones NULL,
964bb0a56ecSDave Jones };
965bb0a56ecSDave Jones
966bb0a56ecSDave Jones static struct cpufreq_driver acpi_cpufreq_driver = {
967db9be219SViresh Kumar .verify = cpufreq_generic_frequency_table_verify,
9689c0ebcf7SViresh Kumar .target_index = acpi_cpufreq_target,
969b7898fdaSRafael J. Wysocki .fast_switch = acpi_cpufreq_fast_switch,
970bb0a56ecSDave Jones .bios_limit = acpi_processor_get_bios_limit,
971bb0a56ecSDave Jones .init = acpi_cpufreq_cpu_init,
972bb0a56ecSDave Jones .exit = acpi_cpufreq_cpu_exit,
973bb0a56ecSDave Jones .resume = acpi_cpufreq_resume,
974bb0a56ecSDave Jones .name = "acpi-cpufreq",
975bb0a56ecSDave Jones .attr = acpi_cpufreq_attr,
976bb0a56ecSDave Jones };
977bb0a56ecSDave Jones
acpi_cpufreq_boost_init(void)978615b7300SAndre Przywara static void __init acpi_cpufreq_boost_init(void)
979615b7300SAndre Przywara {
9801222d527SErwan Velu if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
9811222d527SErwan Velu pr_debug("Boost capabilities not present in the processor\n");
9824d66ddf2SSebastian Andrzej Siewior return;
9831222d527SErwan Velu }
9844d66ddf2SSebastian Andrzej Siewior
9857a6c79f2SRafael J. Wysocki acpi_cpufreq_driver.set_boost = set_boost;
986cfc9c8edSLukasz Majewski acpi_cpufreq_driver.boost_enabled = boost_state(0);
987615b7300SAndre Przywara }
988615b7300SAndre Przywara
acpi_cpufreq_probe(struct platform_device * pdev)989691a6371SPetr Pavlu static int __init acpi_cpufreq_probe(struct platform_device *pdev)
990bb0a56ecSDave Jones {
991bb0a56ecSDave Jones int ret;
992bb0a56ecSDave Jones
99375c07581SRafael J. Wysocki if (acpi_disabled)
99475c07581SRafael J. Wysocki return -ENODEV;
99575c07581SRafael J. Wysocki
9968a61e12eSYinghai Lu /* don't keep reloading if cpufreq_driver exists */
9978a61e12eSYinghai Lu if (cpufreq_get_current_driver())
99873c7f824SPetr Pavlu return -ENODEV;
999bb0a56ecSDave Jones
1000eae2ef0eSMohan Kumar pr_debug("%s\n", __func__);
1001bb0a56ecSDave Jones
1002bb0a56ecSDave Jones ret = acpi_cpufreq_early_init();
1003bb0a56ecSDave Jones if (ret)
1004bb0a56ecSDave Jones return ret;
1005bb0a56ecSDave Jones
100611269ff5SAndre Przywara #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
100711269ff5SAndre Przywara /* this is a sysfs file with a strange name and an even stranger
100811269ff5SAndre Przywara * semantic - per CPU instantiation, but system global effect.
100911269ff5SAndre Przywara * Lets enable it only on AMD CPUs for compatibility reasons and
101011269ff5SAndre Przywara * only if configured. This is considered legacy code, which
101111269ff5SAndre Przywara * will probably be removed at some point in the future.
101211269ff5SAndre Przywara */
1013f56c50e3SRafael J. Wysocki if (!check_amd_hwpstate_cpu(0)) {
1014f56c50e3SRafael J. Wysocki struct freq_attr **attr;
101511269ff5SAndre Przywara
1016f56c50e3SRafael J. Wysocki pr_debug("CPB unsupported, do not expose it\n");
101711269ff5SAndre Przywara
1018f56c50e3SRafael J. Wysocki for (attr = acpi_cpufreq_attr; *attr; attr++)
1019f56c50e3SRafael J. Wysocki if (*attr == &cpb) {
1020f56c50e3SRafael J. Wysocki *attr = NULL;
1021f56c50e3SRafael J. Wysocki break;
1022f56c50e3SRafael J. Wysocki }
102311269ff5SAndre Przywara }
102411269ff5SAndre Przywara #endif
1025cfc9c8edSLukasz Majewski acpi_cpufreq_boost_init();
102611269ff5SAndre Przywara
1027bb0a56ecSDave Jones ret = cpufreq_register_driver(&acpi_cpufreq_driver);
1028eb8c68efSKonrad Rzeszutek Wilk if (ret) {
1029bb0a56ecSDave Jones free_acpi_perf_data();
1030eb8c68efSKonrad Rzeszutek Wilk }
1031bb0a56ecSDave Jones return ret;
1032bb0a56ecSDave Jones }
1033bb0a56ecSDave Jones
acpi_cpufreq_remove(struct platform_device * pdev)10341cd04adfSYangtao Li static void acpi_cpufreq_remove(struct platform_device *pdev)
1035bb0a56ecSDave Jones {
1036eae2ef0eSMohan Kumar pr_debug("%s\n", __func__);
1037bb0a56ecSDave Jones
1038bb0a56ecSDave Jones cpufreq_unregister_driver(&acpi_cpufreq_driver);
1039bb0a56ecSDave Jones
104050f4ddd4SLuming Yu free_acpi_perf_data();
1041691a6371SPetr Pavlu }
1042691a6371SPetr Pavlu
1043691a6371SPetr Pavlu static struct platform_driver acpi_cpufreq_platdrv = {
1044691a6371SPetr Pavlu .driver = {
1045691a6371SPetr Pavlu .name = "acpi-cpufreq",
1046691a6371SPetr Pavlu },
10471cd04adfSYangtao Li .remove_new = acpi_cpufreq_remove,
1048691a6371SPetr Pavlu };
1049691a6371SPetr Pavlu
acpi_cpufreq_init(void)1050691a6371SPetr Pavlu static int __init acpi_cpufreq_init(void)
1051691a6371SPetr Pavlu {
1052691a6371SPetr Pavlu return platform_driver_probe(&acpi_cpufreq_platdrv, acpi_cpufreq_probe);
1053691a6371SPetr Pavlu }
1054691a6371SPetr Pavlu
acpi_cpufreq_exit(void)1055691a6371SPetr Pavlu static void __exit acpi_cpufreq_exit(void)
1056691a6371SPetr Pavlu {
1057691a6371SPetr Pavlu platform_driver_unregister(&acpi_cpufreq_platdrv);
1058bb0a56ecSDave Jones }
1059bb0a56ecSDave Jones
1060bb0a56ecSDave Jones module_param(acpi_pstate_strict, uint, 0644);
1061bb0a56ecSDave Jones MODULE_PARM_DESC(acpi_pstate_strict,
1062bb0a56ecSDave Jones "value 0 or non-zero. non-zero -> strict ACPI checks are "
1063bb0a56ecSDave Jones "performed during frequency changes.");
1064bb0a56ecSDave Jones
1065bb0a56ecSDave Jones late_initcall(acpi_cpufreq_init);
1066bb0a56ecSDave Jones module_exit(acpi_cpufreq_exit);
1067bb0a56ecSDave Jones
1068691a6371SPetr Pavlu MODULE_ALIAS("platform:acpi-cpufreq");
1069