1 /*
2  * This file provides the ACPI based P-state support. This
3  * module works with generic cpufreq infrastructure. Most of
4  * the code is based on i386 version
5  * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
6  *
7  * Copyright (C) 2005 Intel Corp
8  *      Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/cpufreq.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <asm/io.h>
19 #include <asm/uaccess.h>
20 #include <asm/pal.h>
21 
22 #include <linux/acpi.h>
23 #include <acpi/processor.h>
24 
25 MODULE_AUTHOR("Venkatesh Pallipadi");
26 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
27 MODULE_LICENSE("GPL");
28 
29 
30 struct cpufreq_acpi_io {
31 	struct acpi_processor_performance	acpi_data;
32 	struct cpufreq_frequency_table		*freq_table;
33 	unsigned int				resume;
34 };
35 
36 static struct cpufreq_acpi_io	*acpi_io_data[NR_CPUS];
37 
38 static struct cpufreq_driver acpi_cpufreq_driver;
39 
40 
41 static int
42 processor_set_pstate (
43 	u32	value)
44 {
45 	s64 retval;
46 
47 	pr_debug("processor_set_pstate\n");
48 
49 	retval = ia64_pal_set_pstate((u64)value);
50 
51 	if (retval) {
52 		pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n",
53 		        value, retval);
54 		return -ENODEV;
55 	}
56 	return (int)retval;
57 }
58 
59 
60 static int
61 processor_get_pstate (
62 	u32	*value)
63 {
64 	u64	pstate_index = 0;
65 	s64 	retval;
66 
67 	pr_debug("processor_get_pstate\n");
68 
69 	retval = ia64_pal_get_pstate(&pstate_index,
70 	                             PAL_GET_PSTATE_TYPE_INSTANT);
71 	*value = (u32) pstate_index;
72 
73 	if (retval)
74 		pr_debug("Failed to get current freq with "
75 			"error 0x%lx, idx 0x%x\n", retval, *value);
76 
77 	return (int)retval;
78 }
79 
80 
81 /* To be used only after data->acpi_data is initialized */
82 static unsigned
83 extract_clock (
84 	struct cpufreq_acpi_io *data,
85 	unsigned value,
86 	unsigned int cpu)
87 {
88 	unsigned long i;
89 
90 	pr_debug("extract_clock\n");
91 
92 	for (i = 0; i < data->acpi_data.state_count; i++) {
93 		if (value == data->acpi_data.states[i].status)
94 			return data->acpi_data.states[i].core_frequency;
95 	}
96 	return data->acpi_data.states[i-1].core_frequency;
97 }
98 
99 
100 static unsigned int
101 processor_get_freq (
102 	struct cpufreq_acpi_io	*data,
103 	unsigned int		cpu)
104 {
105 	int			ret = 0;
106 	u32			value = 0;
107 	cpumask_t		saved_mask;
108 	unsigned long 		clock_freq;
109 
110 	pr_debug("processor_get_freq\n");
111 
112 	saved_mask = current->cpus_allowed;
113 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
114 	if (smp_processor_id() != cpu)
115 		goto migrate_end;
116 
117 	/* processor_get_pstate gets the instantaneous frequency */
118 	ret = processor_get_pstate(&value);
119 
120 	if (ret) {
121 		set_cpus_allowed_ptr(current, &saved_mask);
122 		printk(KERN_WARNING "get performance failed with error %d\n",
123 		       ret);
124 		ret = 0;
125 		goto migrate_end;
126 	}
127 	clock_freq = extract_clock(data, value, cpu);
128 	ret = (clock_freq*1000);
129 
130 migrate_end:
131 	set_cpus_allowed_ptr(current, &saved_mask);
132 	return ret;
133 }
134 
135 
136 static int
137 processor_set_freq (
138 	struct cpufreq_acpi_io	*data,
139 	struct cpufreq_policy   *policy,
140 	int			state)
141 {
142 	int			ret = 0;
143 	u32			value = 0;
144 	struct cpufreq_freqs    cpufreq_freqs;
145 	cpumask_t		saved_mask;
146 	int			retval;
147 
148 	pr_debug("processor_set_freq\n");
149 
150 	saved_mask = current->cpus_allowed;
151 	set_cpus_allowed_ptr(current, cpumask_of(policy->cpu));
152 	if (smp_processor_id() != policy->cpu) {
153 		retval = -EAGAIN;
154 		goto migrate_end;
155 	}
156 
157 	if (state == data->acpi_data.state) {
158 		if (unlikely(data->resume)) {
159 			pr_debug("Called after resume, resetting to P%d\n", state);
160 			data->resume = 0;
161 		} else {
162 			pr_debug("Already at target state (P%d)\n", state);
163 			retval = 0;
164 			goto migrate_end;
165 		}
166 	}
167 
168 	pr_debug("Transitioning from P%d to P%d\n",
169 		data->acpi_data.state, state);
170 
171 	/* cpufreq frequency struct */
172 	cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
173 	cpufreq_freqs.new = data->freq_table[state].frequency;
174 
175 	/* notify cpufreq */
176 	cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE);
177 
178 	/*
179 	 * First we write the target state's 'control' value to the
180 	 * control_register.
181 	 */
182 
183 	value = (u32) data->acpi_data.states[state].control;
184 
185 	pr_debug("Transitioning to state: 0x%08x\n", value);
186 
187 	ret = processor_set_pstate(value);
188 	if (ret) {
189 		unsigned int tmp = cpufreq_freqs.new;
190 		cpufreq_notify_transition(policy, &cpufreq_freqs,
191 				CPUFREQ_POSTCHANGE);
192 		cpufreq_freqs.new = cpufreq_freqs.old;
193 		cpufreq_freqs.old = tmp;
194 		cpufreq_notify_transition(policy, &cpufreq_freqs,
195 				CPUFREQ_PRECHANGE);
196 		cpufreq_notify_transition(policy, &cpufreq_freqs,
197 				CPUFREQ_POSTCHANGE);
198 		printk(KERN_WARNING "Transition failed with error %d\n", ret);
199 		retval = -ENODEV;
200 		goto migrate_end;
201 	}
202 
203 	cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE);
204 
205 	data->acpi_data.state = state;
206 
207 	retval = 0;
208 
209 migrate_end:
210 	set_cpus_allowed_ptr(current, &saved_mask);
211 	return (retval);
212 }
213 
214 
215 static unsigned int
216 acpi_cpufreq_get (
217 	unsigned int		cpu)
218 {
219 	struct cpufreq_acpi_io *data = acpi_io_data[cpu];
220 
221 	pr_debug("acpi_cpufreq_get\n");
222 
223 	return processor_get_freq(data, cpu);
224 }
225 
226 
227 static int
228 acpi_cpufreq_target (
229 	struct cpufreq_policy   *policy,
230 	unsigned int target_freq,
231 	unsigned int relation)
232 {
233 	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
234 	unsigned int next_state = 0;
235 	unsigned int result = 0;
236 
237 	pr_debug("acpi_cpufreq_setpolicy\n");
238 
239 	result = cpufreq_frequency_table_target(policy,
240 			data->freq_table, target_freq, relation, &next_state);
241 	if (result)
242 		return (result);
243 
244 	result = processor_set_freq(data, policy, next_state);
245 
246 	return (result);
247 }
248 
249 
250 static int
251 acpi_cpufreq_verify (
252 	struct cpufreq_policy   *policy)
253 {
254 	unsigned int result = 0;
255 	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
256 
257 	pr_debug("acpi_cpufreq_verify\n");
258 
259 	result = cpufreq_frequency_table_verify(policy,
260 			data->freq_table);
261 
262 	return (result);
263 }
264 
265 
266 static int
267 acpi_cpufreq_cpu_init (
268 	struct cpufreq_policy   *policy)
269 {
270 	unsigned int		i;
271 	unsigned int		cpu = policy->cpu;
272 	struct cpufreq_acpi_io	*data;
273 	unsigned int		result = 0;
274 
275 	pr_debug("acpi_cpufreq_cpu_init\n");
276 
277 	data = kzalloc(sizeof(*data), GFP_KERNEL);
278 	if (!data)
279 		return (-ENOMEM);
280 
281 	acpi_io_data[cpu] = data;
282 
283 	result = acpi_processor_register_performance(&data->acpi_data, cpu);
284 
285 	if (result)
286 		goto err_free;
287 
288 	/* capability check */
289 	if (data->acpi_data.state_count <= 1) {
290 		pr_debug("No P-States\n");
291 		result = -ENODEV;
292 		goto err_unreg;
293 	}
294 
295 	if ((data->acpi_data.control_register.space_id !=
296 					ACPI_ADR_SPACE_FIXED_HARDWARE) ||
297 	    (data->acpi_data.status_register.space_id !=
298 					ACPI_ADR_SPACE_FIXED_HARDWARE)) {
299 		pr_debug("Unsupported address space [%d, %d]\n",
300 			(u32) (data->acpi_data.control_register.space_id),
301 			(u32) (data->acpi_data.status_register.space_id));
302 		result = -ENODEV;
303 		goto err_unreg;
304 	}
305 
306 	/* alloc freq_table */
307 	data->freq_table = kmalloc(sizeof(*data->freq_table) *
308 	                           (data->acpi_data.state_count + 1),
309 	                           GFP_KERNEL);
310 	if (!data->freq_table) {
311 		result = -ENOMEM;
312 		goto err_unreg;
313 	}
314 
315 	/* detect transition latency */
316 	policy->cpuinfo.transition_latency = 0;
317 	for (i=0; i<data->acpi_data.state_count; i++) {
318 		if ((data->acpi_data.states[i].transition_latency * 1000) >
319 		    policy->cpuinfo.transition_latency) {
320 			policy->cpuinfo.transition_latency =
321 			    data->acpi_data.states[i].transition_latency * 1000;
322 		}
323 	}
324 	policy->cur = processor_get_freq(data, policy->cpu);
325 
326 	/* table init */
327 	for (i = 0; i <= data->acpi_data.state_count; i++)
328 	{
329 		data->freq_table[i].driver_data = i;
330 		if (i < data->acpi_data.state_count) {
331 			data->freq_table[i].frequency =
332 			      data->acpi_data.states[i].core_frequency * 1000;
333 		} else {
334 			data->freq_table[i].frequency = CPUFREQ_TABLE_END;
335 		}
336 	}
337 
338 	result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
339 	if (result) {
340 		goto err_freqfree;
341 	}
342 
343 	/* notify BIOS that we exist */
344 	acpi_processor_notify_smm(THIS_MODULE);
345 
346 	printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
347 	       "activated.\n", cpu);
348 
349 	for (i = 0; i < data->acpi_data.state_count; i++)
350 		pr_debug("     %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
351 			(i == data->acpi_data.state?'*':' '), i,
352 			(u32) data->acpi_data.states[i].core_frequency,
353 			(u32) data->acpi_data.states[i].power,
354 			(u32) data->acpi_data.states[i].transition_latency,
355 			(u32) data->acpi_data.states[i].bus_master_latency,
356 			(u32) data->acpi_data.states[i].status,
357 			(u32) data->acpi_data.states[i].control);
358 
359 	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
360 
361 	/* the first call to ->target() should result in us actually
362 	 * writing something to the appropriate registers. */
363 	data->resume = 1;
364 
365 	return (result);
366 
367  err_freqfree:
368 	kfree(data->freq_table);
369  err_unreg:
370 	acpi_processor_unregister_performance(&data->acpi_data, cpu);
371  err_free:
372 	kfree(data);
373 	acpi_io_data[cpu] = NULL;
374 
375 	return (result);
376 }
377 
378 
379 static int
380 acpi_cpufreq_cpu_exit (
381 	struct cpufreq_policy   *policy)
382 {
383 	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
384 
385 	pr_debug("acpi_cpufreq_cpu_exit\n");
386 
387 	if (data) {
388 		cpufreq_frequency_table_put_attr(policy->cpu);
389 		acpi_io_data[policy->cpu] = NULL;
390 		acpi_processor_unregister_performance(&data->acpi_data,
391 		                                      policy->cpu);
392 		kfree(data);
393 	}
394 
395 	return (0);
396 }
397 
398 
399 static struct freq_attr* acpi_cpufreq_attr[] = {
400 	&cpufreq_freq_attr_scaling_available_freqs,
401 	NULL,
402 };
403 
404 
405 static struct cpufreq_driver acpi_cpufreq_driver = {
406 	.verify 	= acpi_cpufreq_verify,
407 	.target 	= acpi_cpufreq_target,
408 	.get 		= acpi_cpufreq_get,
409 	.init		= acpi_cpufreq_cpu_init,
410 	.exit		= acpi_cpufreq_cpu_exit,
411 	.name		= "acpi-cpufreq",
412 	.attr           = acpi_cpufreq_attr,
413 };
414 
415 
416 static int __init
417 acpi_cpufreq_init (void)
418 {
419 	pr_debug("acpi_cpufreq_init\n");
420 
421  	return cpufreq_register_driver(&acpi_cpufreq_driver);
422 }
423 
424 
425 static void __exit
426 acpi_cpufreq_exit (void)
427 {
428 	pr_debug("acpi_cpufreq_exit\n");
429 
430 	cpufreq_unregister_driver(&acpi_cpufreq_driver);
431 	return;
432 }
433 
434 
435 late_initcall(acpi_cpufreq_init);
436 module_exit(acpi_cpufreq_exit);
437 
438