1 /*
2  * acpi-cpufreq.c - ACPI Processor P-States Driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2006       Denis Sadykov <denis.m.sadykov@intel.com>
8  *
9  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License as published by
13  *  the Free Software Foundation; either version 2 of the License, or (at
14  *  your option) any later version.
15  *
16  *  This program is distributed in the hope that it will be useful, but
17  *  WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  *  General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with this program; if not, write to the Free Software Foundation, Inc.,
23  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
24  *
25  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/smp.h>
32 #include <linux/sched.h>
33 #include <linux/cpufreq.h>
34 #include <linux/compiler.h>
35 #include <linux/dmi.h>
36 #include <linux/slab.h>
37 
38 #include <linux/acpi.h>
39 #include <linux/io.h>
40 #include <linux/delay.h>
41 #include <linux/uaccess.h>
42 
43 #include <acpi/processor.h>
44 
45 #include <asm/msr.h>
46 #include <asm/processor.h>
47 #include <asm/cpufeature.h>
48 
49 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
50 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
51 MODULE_LICENSE("GPL");
52 
53 #define PFX "acpi-cpufreq: "
54 
55 enum {
56 	UNDEFINED_CAPABLE = 0,
57 	SYSTEM_INTEL_MSR_CAPABLE,
58 	SYSTEM_AMD_MSR_CAPABLE,
59 	SYSTEM_IO_CAPABLE,
60 };
61 
62 #define INTEL_MSR_RANGE		(0xffff)
63 #define AMD_MSR_RANGE		(0x7)
64 
65 #define MSR_K7_HWCR_CPB_DIS	(1ULL << 25)
66 
67 struct acpi_cpufreq_data {
68 	struct acpi_processor_performance *acpi_data;
69 	struct cpufreq_frequency_table *freq_table;
70 	unsigned int resume;
71 	unsigned int cpu_feature;
72 	cpumask_var_t freqdomain_cpus;
73 };
74 
75 static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
76 
77 /* acpi_perf_data is a pointer to percpu data. */
78 static struct acpi_processor_performance __percpu *acpi_perf_data;
79 
80 static struct cpufreq_driver acpi_cpufreq_driver;
81 
82 static unsigned int acpi_pstate_strict;
83 static struct msr __percpu *msrs;
84 
85 static bool boost_state(unsigned int cpu)
86 {
87 	u32 lo, hi;
88 	u64 msr;
89 
90 	switch (boot_cpu_data.x86_vendor) {
91 	case X86_VENDOR_INTEL:
92 		rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
93 		msr = lo | ((u64)hi << 32);
94 		return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
95 	case X86_VENDOR_AMD:
96 		rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
97 		msr = lo | ((u64)hi << 32);
98 		return !(msr & MSR_K7_HWCR_CPB_DIS);
99 	}
100 	return false;
101 }
102 
103 static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
104 {
105 	u32 cpu;
106 	u32 msr_addr;
107 	u64 msr_mask;
108 
109 	switch (boot_cpu_data.x86_vendor) {
110 	case X86_VENDOR_INTEL:
111 		msr_addr = MSR_IA32_MISC_ENABLE;
112 		msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
113 		break;
114 	case X86_VENDOR_AMD:
115 		msr_addr = MSR_K7_HWCR;
116 		msr_mask = MSR_K7_HWCR_CPB_DIS;
117 		break;
118 	default:
119 		return;
120 	}
121 
122 	rdmsr_on_cpus(cpumask, msr_addr, msrs);
123 
124 	for_each_cpu(cpu, cpumask) {
125 		struct msr *reg = per_cpu_ptr(msrs, cpu);
126 		if (enable)
127 			reg->q &= ~msr_mask;
128 		else
129 			reg->q |= msr_mask;
130 	}
131 
132 	wrmsr_on_cpus(cpumask, msr_addr, msrs);
133 }
134 
135 static int _store_boost(int val)
136 {
137 	get_online_cpus();
138 	boost_set_msrs(val, cpu_online_mask);
139 	put_online_cpus();
140 	pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
141 
142 	return 0;
143 }
144 
145 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
146 {
147 	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
148 
149 	return cpufreq_show_cpus(data->freqdomain_cpus, buf);
150 }
151 
152 cpufreq_freq_attr_ro(freqdomain_cpus);
153 
154 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
155 static ssize_t store_boost(const char *buf, size_t count)
156 {
157 	int ret;
158 	unsigned long val = 0;
159 
160 	if (!acpi_cpufreq_driver.boost_supported)
161 		return -EINVAL;
162 
163 	ret = kstrtoul(buf, 10, &val);
164 	if (ret || (val > 1))
165 		return -EINVAL;
166 
167 	_store_boost((int) val);
168 
169 	return count;
170 }
171 
172 static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
173 			 size_t count)
174 {
175 	return store_boost(buf, count);
176 }
177 
178 static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
179 {
180 	return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
181 }
182 
183 cpufreq_freq_attr_rw(cpb);
184 #endif
185 
186 static int check_est_cpu(unsigned int cpuid)
187 {
188 	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
189 
190 	return cpu_has(cpu, X86_FEATURE_EST);
191 }
192 
193 static int check_amd_hwpstate_cpu(unsigned int cpuid)
194 {
195 	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
196 
197 	return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
198 }
199 
200 static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
201 {
202 	struct acpi_processor_performance *perf;
203 	int i;
204 
205 	perf = data->acpi_data;
206 
207 	for (i = 0; i < perf->state_count; i++) {
208 		if (value == perf->states[i].status)
209 			return data->freq_table[i].frequency;
210 	}
211 	return 0;
212 }
213 
214 static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
215 {
216 	struct cpufreq_frequency_table *pos;
217 	struct acpi_processor_performance *perf;
218 
219 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
220 		msr &= AMD_MSR_RANGE;
221 	else
222 		msr &= INTEL_MSR_RANGE;
223 
224 	perf = data->acpi_data;
225 
226 	cpufreq_for_each_entry(pos, data->freq_table)
227 		if (msr == perf->states[pos->driver_data].status)
228 			return pos->frequency;
229 	return data->freq_table[0].frequency;
230 }
231 
232 static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
233 {
234 	switch (data->cpu_feature) {
235 	case SYSTEM_INTEL_MSR_CAPABLE:
236 	case SYSTEM_AMD_MSR_CAPABLE:
237 		return extract_msr(val, data);
238 	case SYSTEM_IO_CAPABLE:
239 		return extract_io(val, data);
240 	default:
241 		return 0;
242 	}
243 }
244 
245 struct msr_addr {
246 	u32 reg;
247 };
248 
249 struct io_addr {
250 	u16 port;
251 	u8 bit_width;
252 };
253 
254 struct drv_cmd {
255 	unsigned int type;
256 	const struct cpumask *mask;
257 	union {
258 		struct msr_addr msr;
259 		struct io_addr io;
260 	} addr;
261 	u32 val;
262 };
263 
264 /* Called via smp_call_function_single(), on the target CPU */
265 static void do_drv_read(void *_cmd)
266 {
267 	struct drv_cmd *cmd = _cmd;
268 	u32 h;
269 
270 	switch (cmd->type) {
271 	case SYSTEM_INTEL_MSR_CAPABLE:
272 	case SYSTEM_AMD_MSR_CAPABLE:
273 		rdmsr(cmd->addr.msr.reg, cmd->val, h);
274 		break;
275 	case SYSTEM_IO_CAPABLE:
276 		acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
277 				&cmd->val,
278 				(u32)cmd->addr.io.bit_width);
279 		break;
280 	default:
281 		break;
282 	}
283 }
284 
285 /* Called via smp_call_function_many(), on the target CPUs */
286 static void do_drv_write(void *_cmd)
287 {
288 	struct drv_cmd *cmd = _cmd;
289 	u32 lo, hi;
290 
291 	switch (cmd->type) {
292 	case SYSTEM_INTEL_MSR_CAPABLE:
293 		rdmsr(cmd->addr.msr.reg, lo, hi);
294 		lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
295 		wrmsr(cmd->addr.msr.reg, lo, hi);
296 		break;
297 	case SYSTEM_AMD_MSR_CAPABLE:
298 		wrmsr(cmd->addr.msr.reg, cmd->val, 0);
299 		break;
300 	case SYSTEM_IO_CAPABLE:
301 		acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
302 				cmd->val,
303 				(u32)cmd->addr.io.bit_width);
304 		break;
305 	default:
306 		break;
307 	}
308 }
309 
310 static void drv_read(struct drv_cmd *cmd)
311 {
312 	int err;
313 	cmd->val = 0;
314 
315 	err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
316 	WARN_ON_ONCE(err);	/* smp_call_function_any() was buggy? */
317 }
318 
319 static void drv_write(struct drv_cmd *cmd)
320 {
321 	int this_cpu;
322 
323 	this_cpu = get_cpu();
324 	if (cpumask_test_cpu(this_cpu, cmd->mask))
325 		do_drv_write(cmd);
326 	smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
327 	put_cpu();
328 }
329 
330 static u32 get_cur_val(const struct cpumask *mask)
331 {
332 	struct acpi_processor_performance *perf;
333 	struct drv_cmd cmd;
334 
335 	if (unlikely(cpumask_empty(mask)))
336 		return 0;
337 
338 	switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
339 	case SYSTEM_INTEL_MSR_CAPABLE:
340 		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
341 		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
342 		break;
343 	case SYSTEM_AMD_MSR_CAPABLE:
344 		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
345 		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
346 		break;
347 	case SYSTEM_IO_CAPABLE:
348 		cmd.type = SYSTEM_IO_CAPABLE;
349 		perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
350 		cmd.addr.io.port = perf->control_register.address;
351 		cmd.addr.io.bit_width = perf->control_register.bit_width;
352 		break;
353 	default:
354 		return 0;
355 	}
356 
357 	cmd.mask = mask;
358 	drv_read(&cmd);
359 
360 	pr_debug("get_cur_val = %u\n", cmd.val);
361 
362 	return cmd.val;
363 }
364 
365 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
366 {
367 	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
368 	unsigned int freq;
369 	unsigned int cached_freq;
370 
371 	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
372 
373 	if (unlikely(data == NULL ||
374 		     data->acpi_data == NULL || data->freq_table == NULL)) {
375 		return 0;
376 	}
377 
378 	cached_freq = data->freq_table[data->acpi_data->state].frequency;
379 	freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
380 	if (freq != cached_freq) {
381 		/*
382 		 * The dreaded BIOS frequency change behind our back.
383 		 * Force set the frequency on next target call.
384 		 */
385 		data->resume = 1;
386 	}
387 
388 	pr_debug("cur freq = %u\n", freq);
389 
390 	return freq;
391 }
392 
393 static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
394 				struct acpi_cpufreq_data *data)
395 {
396 	unsigned int cur_freq;
397 	unsigned int i;
398 
399 	for (i = 0; i < 100; i++) {
400 		cur_freq = extract_freq(get_cur_val(mask), data);
401 		if (cur_freq == freq)
402 			return 1;
403 		udelay(10);
404 	}
405 	return 0;
406 }
407 
408 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
409 			       unsigned int index)
410 {
411 	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
412 	struct acpi_processor_performance *perf;
413 	struct drv_cmd cmd;
414 	unsigned int next_perf_state = 0; /* Index into perf table */
415 	int result = 0;
416 
417 	if (unlikely(data == NULL ||
418 	     data->acpi_data == NULL || data->freq_table == NULL)) {
419 		return -ENODEV;
420 	}
421 
422 	perf = data->acpi_data;
423 	next_perf_state = data->freq_table[index].driver_data;
424 	if (perf->state == next_perf_state) {
425 		if (unlikely(data->resume)) {
426 			pr_debug("Called after resume, resetting to P%d\n",
427 				next_perf_state);
428 			data->resume = 0;
429 		} else {
430 			pr_debug("Already at target state (P%d)\n",
431 				next_perf_state);
432 			goto out;
433 		}
434 	}
435 
436 	switch (data->cpu_feature) {
437 	case SYSTEM_INTEL_MSR_CAPABLE:
438 		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
439 		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
440 		cmd.val = (u32) perf->states[next_perf_state].control;
441 		break;
442 	case SYSTEM_AMD_MSR_CAPABLE:
443 		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
444 		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
445 		cmd.val = (u32) perf->states[next_perf_state].control;
446 		break;
447 	case SYSTEM_IO_CAPABLE:
448 		cmd.type = SYSTEM_IO_CAPABLE;
449 		cmd.addr.io.port = perf->control_register.address;
450 		cmd.addr.io.bit_width = perf->control_register.bit_width;
451 		cmd.val = (u32) perf->states[next_perf_state].control;
452 		break;
453 	default:
454 		result = -ENODEV;
455 		goto out;
456 	}
457 
458 	/* cpufreq holds the hotplug lock, so we are safe from here on */
459 	if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
460 		cmd.mask = policy->cpus;
461 	else
462 		cmd.mask = cpumask_of(policy->cpu);
463 
464 	drv_write(&cmd);
465 
466 	if (acpi_pstate_strict) {
467 		if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
468 					data)) {
469 			pr_debug("acpi_cpufreq_target failed (%d)\n",
470 				policy->cpu);
471 			result = -EAGAIN;
472 		}
473 	}
474 
475 	if (!result)
476 		perf->state = next_perf_state;
477 
478 out:
479 	return result;
480 }
481 
482 static unsigned long
483 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
484 {
485 	struct acpi_processor_performance *perf = data->acpi_data;
486 
487 	if (cpu_khz) {
488 		/* search the closest match to cpu_khz */
489 		unsigned int i;
490 		unsigned long freq;
491 		unsigned long freqn = perf->states[0].core_frequency * 1000;
492 
493 		for (i = 0; i < (perf->state_count-1); i++) {
494 			freq = freqn;
495 			freqn = perf->states[i+1].core_frequency * 1000;
496 			if ((2 * cpu_khz) > (freqn + freq)) {
497 				perf->state = i;
498 				return freq;
499 			}
500 		}
501 		perf->state = perf->state_count-1;
502 		return freqn;
503 	} else {
504 		/* assume CPU is at P0... */
505 		perf->state = 0;
506 		return perf->states[0].core_frequency * 1000;
507 	}
508 }
509 
510 static void free_acpi_perf_data(void)
511 {
512 	unsigned int i;
513 
514 	/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
515 	for_each_possible_cpu(i)
516 		free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
517 				 ->shared_cpu_map);
518 	free_percpu(acpi_perf_data);
519 }
520 
521 static int boost_notify(struct notifier_block *nb, unsigned long action,
522 		      void *hcpu)
523 {
524 	unsigned cpu = (long)hcpu;
525 	const struct cpumask *cpumask;
526 
527 	cpumask = get_cpu_mask(cpu);
528 
529 	/*
530 	 * Clear the boost-disable bit on the CPU_DOWN path so that
531 	 * this cpu cannot block the remaining ones from boosting. On
532 	 * the CPU_UP path we simply keep the boost-disable flag in
533 	 * sync with the current global state.
534 	 */
535 
536 	switch (action) {
537 	case CPU_UP_PREPARE:
538 	case CPU_UP_PREPARE_FROZEN:
539 		boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
540 		break;
541 
542 	case CPU_DOWN_PREPARE:
543 	case CPU_DOWN_PREPARE_FROZEN:
544 		boost_set_msrs(1, cpumask);
545 		break;
546 
547 	default:
548 		break;
549 	}
550 
551 	return NOTIFY_OK;
552 }
553 
554 
555 static struct notifier_block boost_nb = {
556 	.notifier_call          = boost_notify,
557 };
558 
559 /*
560  * acpi_cpufreq_early_init - initialize ACPI P-States library
561  *
562  * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
563  * in order to determine correct frequency and voltage pairings. We can
564  * do _PDC and _PSD and find out the processor dependency for the
565  * actual init that will happen later...
566  */
567 static int __init acpi_cpufreq_early_init(void)
568 {
569 	unsigned int i;
570 	pr_debug("acpi_cpufreq_early_init\n");
571 
572 	acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
573 	if (!acpi_perf_data) {
574 		pr_debug("Memory allocation error for acpi_perf_data.\n");
575 		return -ENOMEM;
576 	}
577 	for_each_possible_cpu(i) {
578 		if (!zalloc_cpumask_var_node(
579 			&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
580 			GFP_KERNEL, cpu_to_node(i))) {
581 
582 			/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
583 			free_acpi_perf_data();
584 			return -ENOMEM;
585 		}
586 	}
587 
588 	/* Do initialization in ACPI core */
589 	acpi_processor_preregister_performance(acpi_perf_data);
590 	return 0;
591 }
592 
593 #ifdef CONFIG_SMP
594 /*
595  * Some BIOSes do SW_ANY coordination internally, either set it up in hw
596  * or do it in BIOS firmware and won't inform about it to OS. If not
597  * detected, this has a side effect of making CPU run at a different speed
598  * than OS intended it to run at. Detect it and handle it cleanly.
599  */
600 static int bios_with_sw_any_bug;
601 
602 static int sw_any_bug_found(const struct dmi_system_id *d)
603 {
604 	bios_with_sw_any_bug = 1;
605 	return 0;
606 }
607 
608 static const struct dmi_system_id sw_any_bug_dmi_table[] = {
609 	{
610 		.callback = sw_any_bug_found,
611 		.ident = "Supermicro Server X6DLP",
612 		.matches = {
613 			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
614 			DMI_MATCH(DMI_BIOS_VERSION, "080010"),
615 			DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
616 		},
617 	},
618 	{ }
619 };
620 
621 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
622 {
623 	/* Intel Xeon Processor 7100 Series Specification Update
624 	 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
625 	 * AL30: A Machine Check Exception (MCE) Occurring during an
626 	 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
627 	 * Both Processor Cores to Lock Up. */
628 	if (c->x86_vendor == X86_VENDOR_INTEL) {
629 		if ((c->x86 == 15) &&
630 		    (c->x86_model == 6) &&
631 		    (c->x86_mask == 8)) {
632 			printk(KERN_INFO "acpi-cpufreq: Intel(R) "
633 			    "Xeon(R) 7100 Errata AL30, processors may "
634 			    "lock up on frequency changes: disabling "
635 			    "acpi-cpufreq.\n");
636 			return -ENODEV;
637 		    }
638 		}
639 	return 0;
640 }
641 #endif
642 
643 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
644 {
645 	unsigned int i;
646 	unsigned int valid_states = 0;
647 	unsigned int cpu = policy->cpu;
648 	struct acpi_cpufreq_data *data;
649 	unsigned int result = 0;
650 	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
651 	struct acpi_processor_performance *perf;
652 #ifdef CONFIG_SMP
653 	static int blacklisted;
654 #endif
655 
656 	pr_debug("acpi_cpufreq_cpu_init\n");
657 
658 #ifdef CONFIG_SMP
659 	if (blacklisted)
660 		return blacklisted;
661 	blacklisted = acpi_cpufreq_blacklist(c);
662 	if (blacklisted)
663 		return blacklisted;
664 #endif
665 
666 	data = kzalloc(sizeof(*data), GFP_KERNEL);
667 	if (!data)
668 		return -ENOMEM;
669 
670 	if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
671 		result = -ENOMEM;
672 		goto err_free;
673 	}
674 
675 	data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
676 	per_cpu(acfreq_data, cpu) = data;
677 
678 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
679 		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
680 
681 	result = acpi_processor_register_performance(data->acpi_data, cpu);
682 	if (result)
683 		goto err_free_mask;
684 
685 	perf = data->acpi_data;
686 	policy->shared_type = perf->shared_type;
687 
688 	/*
689 	 * Will let policy->cpus know about dependency only when software
690 	 * coordination is required.
691 	 */
692 	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
693 	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
694 		cpumask_copy(policy->cpus, perf->shared_cpu_map);
695 	}
696 	cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
697 
698 #ifdef CONFIG_SMP
699 	dmi_check_system(sw_any_bug_dmi_table);
700 	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
701 		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
702 		cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
703 	}
704 
705 	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
706 		cpumask_clear(policy->cpus);
707 		cpumask_set_cpu(cpu, policy->cpus);
708 		cpumask_copy(data->freqdomain_cpus,
709 			     topology_sibling_cpumask(cpu));
710 		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
711 		pr_info_once(PFX "overriding BIOS provided _PSD data\n");
712 	}
713 #endif
714 
715 	/* capability check */
716 	if (perf->state_count <= 1) {
717 		pr_debug("No P-States\n");
718 		result = -ENODEV;
719 		goto err_unreg;
720 	}
721 
722 	if (perf->control_register.space_id != perf->status_register.space_id) {
723 		result = -ENODEV;
724 		goto err_unreg;
725 	}
726 
727 	switch (perf->control_register.space_id) {
728 	case ACPI_ADR_SPACE_SYSTEM_IO:
729 		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
730 		    boot_cpu_data.x86 == 0xf) {
731 			pr_debug("AMD K8 systems must use native drivers.\n");
732 			result = -ENODEV;
733 			goto err_unreg;
734 		}
735 		pr_debug("SYSTEM IO addr space\n");
736 		data->cpu_feature = SYSTEM_IO_CAPABLE;
737 		break;
738 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
739 		pr_debug("HARDWARE addr space\n");
740 		if (check_est_cpu(cpu)) {
741 			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
742 			break;
743 		}
744 		if (check_amd_hwpstate_cpu(cpu)) {
745 			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
746 			break;
747 		}
748 		result = -ENODEV;
749 		goto err_unreg;
750 	default:
751 		pr_debug("Unknown addr space %d\n",
752 			(u32) (perf->control_register.space_id));
753 		result = -ENODEV;
754 		goto err_unreg;
755 	}
756 
757 	data->freq_table = kzalloc(sizeof(*data->freq_table) *
758 		    (perf->state_count+1), GFP_KERNEL);
759 	if (!data->freq_table) {
760 		result = -ENOMEM;
761 		goto err_unreg;
762 	}
763 
764 	/* detect transition latency */
765 	policy->cpuinfo.transition_latency = 0;
766 	for (i = 0; i < perf->state_count; i++) {
767 		if ((perf->states[i].transition_latency * 1000) >
768 		    policy->cpuinfo.transition_latency)
769 			policy->cpuinfo.transition_latency =
770 			    perf->states[i].transition_latency * 1000;
771 	}
772 
773 	/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
774 	if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
775 	    policy->cpuinfo.transition_latency > 20 * 1000) {
776 		policy->cpuinfo.transition_latency = 20 * 1000;
777 		printk_once(KERN_INFO
778 			    "P-state transition latency capped at 20 uS\n");
779 	}
780 
781 	/* table init */
782 	for (i = 0; i < perf->state_count; i++) {
783 		if (i > 0 && perf->states[i].core_frequency >=
784 		    data->freq_table[valid_states-1].frequency / 1000)
785 			continue;
786 
787 		data->freq_table[valid_states].driver_data = i;
788 		data->freq_table[valid_states].frequency =
789 		    perf->states[i].core_frequency * 1000;
790 		valid_states++;
791 	}
792 	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
793 	perf->state = 0;
794 
795 	result = cpufreq_table_validate_and_show(policy, data->freq_table);
796 	if (result)
797 		goto err_freqfree;
798 
799 	if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
800 		printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
801 
802 	switch (perf->control_register.space_id) {
803 	case ACPI_ADR_SPACE_SYSTEM_IO:
804 		/*
805 		 * The core will not set policy->cur, because
806 		 * cpufreq_driver->get is NULL, so we need to set it here.
807 		 * However, we have to guess it, because the current speed is
808 		 * unknown and not detectable via IO ports.
809 		 */
810 		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
811 		break;
812 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
813 		acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
814 		break;
815 	default:
816 		break;
817 	}
818 
819 	/* notify BIOS that we exist */
820 	acpi_processor_notify_smm(THIS_MODULE);
821 
822 	pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
823 	for (i = 0; i < perf->state_count; i++)
824 		pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
825 			(i == perf->state ? '*' : ' '), i,
826 			(u32) perf->states[i].core_frequency,
827 			(u32) perf->states[i].power,
828 			(u32) perf->states[i].transition_latency);
829 
830 	/*
831 	 * the first call to ->target() should result in us actually
832 	 * writing something to the appropriate registers.
833 	 */
834 	data->resume = 1;
835 
836 	return result;
837 
838 err_freqfree:
839 	kfree(data->freq_table);
840 err_unreg:
841 	acpi_processor_unregister_performance(perf, cpu);
842 err_free_mask:
843 	free_cpumask_var(data->freqdomain_cpus);
844 err_free:
845 	kfree(data);
846 	per_cpu(acfreq_data, cpu) = NULL;
847 
848 	return result;
849 }
850 
851 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
852 {
853 	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
854 
855 	pr_debug("acpi_cpufreq_cpu_exit\n");
856 
857 	if (data) {
858 		per_cpu(acfreq_data, policy->cpu) = NULL;
859 		acpi_processor_unregister_performance(data->acpi_data,
860 						      policy->cpu);
861 		free_cpumask_var(data->freqdomain_cpus);
862 		kfree(data->freq_table);
863 		kfree(data);
864 	}
865 
866 	return 0;
867 }
868 
869 static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
870 {
871 	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
872 
873 	pr_debug("acpi_cpufreq_resume\n");
874 
875 	data->resume = 1;
876 
877 	return 0;
878 }
879 
880 static struct freq_attr *acpi_cpufreq_attr[] = {
881 	&cpufreq_freq_attr_scaling_available_freqs,
882 	&freqdomain_cpus,
883 	NULL,	/* this is a placeholder for cpb, do not remove */
884 	NULL,
885 };
886 
887 static struct cpufreq_driver acpi_cpufreq_driver = {
888 	.verify		= cpufreq_generic_frequency_table_verify,
889 	.target_index	= acpi_cpufreq_target,
890 	.bios_limit	= acpi_processor_get_bios_limit,
891 	.init		= acpi_cpufreq_cpu_init,
892 	.exit		= acpi_cpufreq_cpu_exit,
893 	.resume		= acpi_cpufreq_resume,
894 	.name		= "acpi-cpufreq",
895 	.attr		= acpi_cpufreq_attr,
896 	.set_boost      = _store_boost,
897 };
898 
899 static void __init acpi_cpufreq_boost_init(void)
900 {
901 	if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
902 		msrs = msrs_alloc();
903 
904 		if (!msrs)
905 			return;
906 
907 		acpi_cpufreq_driver.boost_supported = true;
908 		acpi_cpufreq_driver.boost_enabled = boost_state(0);
909 
910 		cpu_notifier_register_begin();
911 
912 		/* Force all MSRs to the same value */
913 		boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
914 			       cpu_online_mask);
915 
916 		__register_cpu_notifier(&boost_nb);
917 
918 		cpu_notifier_register_done();
919 	}
920 }
921 
922 static void acpi_cpufreq_boost_exit(void)
923 {
924 	if (msrs) {
925 		unregister_cpu_notifier(&boost_nb);
926 
927 		msrs_free(msrs);
928 		msrs = NULL;
929 	}
930 }
931 
932 static int __init acpi_cpufreq_init(void)
933 {
934 	int ret;
935 
936 	if (acpi_disabled)
937 		return -ENODEV;
938 
939 	/* don't keep reloading if cpufreq_driver exists */
940 	if (cpufreq_get_current_driver())
941 		return -EEXIST;
942 
943 	pr_debug("acpi_cpufreq_init\n");
944 
945 	ret = acpi_cpufreq_early_init();
946 	if (ret)
947 		return ret;
948 
949 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
950 	/* this is a sysfs file with a strange name and an even stranger
951 	 * semantic - per CPU instantiation, but system global effect.
952 	 * Lets enable it only on AMD CPUs for compatibility reasons and
953 	 * only if configured. This is considered legacy code, which
954 	 * will probably be removed at some point in the future.
955 	 */
956 	if (check_amd_hwpstate_cpu(0)) {
957 		struct freq_attr **iter;
958 
959 		pr_debug("adding sysfs entry for cpb\n");
960 
961 		for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
962 			;
963 
964 		/* make sure there is a terminator behind it */
965 		if (iter[1] == NULL)
966 			*iter = &cpb;
967 	}
968 #endif
969 	acpi_cpufreq_boost_init();
970 
971 	ret = cpufreq_register_driver(&acpi_cpufreq_driver);
972 	if (ret) {
973 		free_acpi_perf_data();
974 		acpi_cpufreq_boost_exit();
975 	}
976 	return ret;
977 }
978 
979 static void __exit acpi_cpufreq_exit(void)
980 {
981 	pr_debug("acpi_cpufreq_exit\n");
982 
983 	acpi_cpufreq_boost_exit();
984 
985 	cpufreq_unregister_driver(&acpi_cpufreq_driver);
986 
987 	free_acpi_perf_data();
988 }
989 
990 module_param(acpi_pstate_strict, uint, 0644);
991 MODULE_PARM_DESC(acpi_pstate_strict,
992 	"value 0 or non-zero. non-zero -> strict ACPI checks are "
993 	"performed during frequency changes.");
994 
995 late_initcall(acpi_cpufreq_init);
996 module_exit(acpi_cpufreq_exit);
997 
998 static const struct x86_cpu_id acpi_cpufreq_ids[] = {
999 	X86_FEATURE_MATCH(X86_FEATURE_ACPI),
1000 	X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
1001 	{}
1002 };
1003 MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
1004 
1005 static const struct acpi_device_id processor_device_ids[] = {
1006 	{ACPI_PROCESSOR_OBJECT_HID, },
1007 	{ACPI_PROCESSOR_DEVICE_HID, },
1008 	{},
1009 };
1010 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
1011 
1012 MODULE_ALIAS("acpi");
1013