xref: /openbmc/linux/drivers/acpi/processor_throttling.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * processor_throttling.c - Throttling submodule of the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  */
28 
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
34 #include <linux/cpufreq.h>
35 #ifdef CONFIG_ACPI_PROCFS
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #endif
39 
40 #include <asm/io.h>
41 #include <asm/uaccess.h>
42 
43 #include <acpi/acpi_bus.h>
44 #include <acpi/acpi_drivers.h>
45 #include <acpi/processor.h>
46 
47 #define PREFIX "ACPI: "
48 
49 #define ACPI_PROCESSOR_CLASS            "processor"
50 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
51 ACPI_MODULE_NAME("processor_throttling");
52 
53 /* ignore_tpc:
54  *  0 -> acpi processor driver doesn't ignore _TPC values
55  *  1 -> acpi processor driver ignores _TPC values
56  */
57 static int ignore_tpc;
58 module_param(ignore_tpc, int, 0644);
59 MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
60 
61 struct throttling_tstate {
62 	unsigned int cpu;		/* cpu nr */
63 	int target_state;		/* target T-state */
64 };
65 
66 #define THROTTLING_PRECHANGE       (1)
67 #define THROTTLING_POSTCHANGE      (2)
68 
69 static int acpi_processor_get_throttling(struct acpi_processor *pr);
70 int acpi_processor_set_throttling(struct acpi_processor *pr,
71 						int state, bool force);
72 
73 static int acpi_processor_update_tsd_coord(void)
74 {
75 	int count, count_target;
76 	int retval = 0;
77 	unsigned int i, j;
78 	cpumask_var_t covered_cpus;
79 	struct acpi_processor *pr, *match_pr;
80 	struct acpi_tsd_package *pdomain, *match_pdomain;
81 	struct acpi_processor_throttling *pthrottling, *match_pthrottling;
82 
83 	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
84 		return -ENOMEM;
85 
86 	/*
87 	 * Now that we have _TSD data from all CPUs, lets setup T-state
88 	 * coordination between all CPUs.
89 	 */
90 	for_each_possible_cpu(i) {
91 		pr = per_cpu(processors, i);
92 		if (!pr)
93 			continue;
94 
95 		/* Basic validity check for domain info */
96 		pthrottling = &(pr->throttling);
97 
98 		/*
99 		 * If tsd package for one cpu is invalid, the coordination
100 		 * among all CPUs is thought as invalid.
101 		 * Maybe it is ugly.
102 		 */
103 		if (!pthrottling->tsd_valid_flag) {
104 			retval = -EINVAL;
105 			break;
106 		}
107 	}
108 	if (retval)
109 		goto err_ret;
110 
111 	for_each_possible_cpu(i) {
112 		pr = per_cpu(processors, i);
113 		if (!pr)
114 			continue;
115 
116 		if (cpumask_test_cpu(i, covered_cpus))
117 			continue;
118 		pthrottling = &pr->throttling;
119 
120 		pdomain = &(pthrottling->domain_info);
121 		cpumask_set_cpu(i, pthrottling->shared_cpu_map);
122 		cpumask_set_cpu(i, covered_cpus);
123 		/*
124 		 * If the number of processor in the TSD domain is 1, it is
125 		 * unnecessary to parse the coordination for this CPU.
126 		 */
127 		if (pdomain->num_processors <= 1)
128 			continue;
129 
130 		/* Validate the Domain info */
131 		count_target = pdomain->num_processors;
132 		count = 1;
133 
134 		for_each_possible_cpu(j) {
135 			if (i == j)
136 				continue;
137 
138 			match_pr = per_cpu(processors, j);
139 			if (!match_pr)
140 				continue;
141 
142 			match_pthrottling = &(match_pr->throttling);
143 			match_pdomain = &(match_pthrottling->domain_info);
144 			if (match_pdomain->domain != pdomain->domain)
145 				continue;
146 
147 			/* Here i and j are in the same domain.
148 			 * If two TSD packages have the same domain, they
149 			 * should have the same num_porcessors and
150 			 * coordination type. Otherwise it will be regarded
151 			 * as illegal.
152 			 */
153 			if (match_pdomain->num_processors != count_target) {
154 				retval = -EINVAL;
155 				goto err_ret;
156 			}
157 
158 			if (pdomain->coord_type != match_pdomain->coord_type) {
159 				retval = -EINVAL;
160 				goto err_ret;
161 			}
162 
163 			cpumask_set_cpu(j, covered_cpus);
164 			cpumask_set_cpu(j, pthrottling->shared_cpu_map);
165 			count++;
166 		}
167 		for_each_possible_cpu(j) {
168 			if (i == j)
169 				continue;
170 
171 			match_pr = per_cpu(processors, j);
172 			if (!match_pr)
173 				continue;
174 
175 			match_pthrottling = &(match_pr->throttling);
176 			match_pdomain = &(match_pthrottling->domain_info);
177 			if (match_pdomain->domain != pdomain->domain)
178 				continue;
179 
180 			/*
181 			 * If some CPUS have the same domain, they
182 			 * will have the same shared_cpu_map.
183 			 */
184 			cpumask_copy(match_pthrottling->shared_cpu_map,
185 				     pthrottling->shared_cpu_map);
186 		}
187 	}
188 
189 err_ret:
190 	free_cpumask_var(covered_cpus);
191 
192 	for_each_possible_cpu(i) {
193 		pr = per_cpu(processors, i);
194 		if (!pr)
195 			continue;
196 
197 		/*
198 		 * Assume no coordination on any error parsing domain info.
199 		 * The coordination type will be forced as SW_ALL.
200 		 */
201 		if (retval) {
202 			pthrottling = &(pr->throttling);
203 			cpumask_clear(pthrottling->shared_cpu_map);
204 			cpumask_set_cpu(i, pthrottling->shared_cpu_map);
205 			pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
206 		}
207 	}
208 
209 	return retval;
210 }
211 
212 /*
213  * Update the T-state coordination after the _TSD
214  * data for all cpus is obtained.
215  */
216 void acpi_processor_throttling_init(void)
217 {
218 	if (acpi_processor_update_tsd_coord())
219 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
220 			"Assume no T-state coordination\n"));
221 
222 	return;
223 }
224 
225 static int acpi_processor_throttling_notifier(unsigned long event, void *data)
226 {
227 	struct throttling_tstate *p_tstate = data;
228 	struct acpi_processor *pr;
229 	unsigned int cpu ;
230 	int target_state;
231 	struct acpi_processor_limit *p_limit;
232 	struct acpi_processor_throttling *p_throttling;
233 
234 	cpu = p_tstate->cpu;
235 	pr = per_cpu(processors, cpu);
236 	if (!pr) {
237 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
238 		return 0;
239 	}
240 	if (!pr->flags.throttling) {
241 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
242 				"unsupported on CPU %d\n", cpu));
243 		return 0;
244 	}
245 	target_state = p_tstate->target_state;
246 	p_throttling = &(pr->throttling);
247 	switch (event) {
248 	case THROTTLING_PRECHANGE:
249 		/*
250 		 * Prechange event is used to choose one proper t-state,
251 		 * which meets the limits of thermal, user and _TPC.
252 		 */
253 		p_limit = &pr->limit;
254 		if (p_limit->thermal.tx > target_state)
255 			target_state = p_limit->thermal.tx;
256 		if (p_limit->user.tx > target_state)
257 			target_state = p_limit->user.tx;
258 		if (pr->throttling_platform_limit > target_state)
259 			target_state = pr->throttling_platform_limit;
260 		if (target_state >= p_throttling->state_count) {
261 			printk(KERN_WARNING
262 				"Exceed the limit of T-state \n");
263 			target_state = p_throttling->state_count - 1;
264 		}
265 		p_tstate->target_state = target_state;
266 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
267 				"target T-state of CPU %d is T%d\n",
268 				cpu, target_state));
269 		break;
270 	case THROTTLING_POSTCHANGE:
271 		/*
272 		 * Postchange event is only used to update the
273 		 * T-state flag of acpi_processor_throttling.
274 		 */
275 		p_throttling->state = target_state;
276 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
277 				"CPU %d is switched to T%d\n",
278 				cpu, target_state));
279 		break;
280 	default:
281 		printk(KERN_WARNING
282 			"Unsupported Throttling notifier event\n");
283 		break;
284 	}
285 
286 	return 0;
287 }
288 
289 /*
290  * _TPC - Throttling Present Capabilities
291  */
292 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
293 {
294 	acpi_status status = 0;
295 	unsigned long long tpc = 0;
296 
297 	if (!pr)
298 		return -EINVAL;
299 
300 	if (ignore_tpc)
301 		goto end;
302 
303 	status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
304 	if (ACPI_FAILURE(status)) {
305 		if (status != AE_NOT_FOUND) {
306 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
307 		}
308 		return -ENODEV;
309 	}
310 
311 end:
312 	pr->throttling_platform_limit = (int)tpc;
313 	return 0;
314 }
315 
316 int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
317 {
318 	int result = 0;
319 	int throttling_limit;
320 	int current_state;
321 	struct acpi_processor_limit *limit;
322 	int target_state;
323 
324 	if (ignore_tpc)
325 		return 0;
326 
327 	result = acpi_processor_get_platform_limit(pr);
328 	if (result) {
329 		/* Throttling Limit is unsupported */
330 		return result;
331 	}
332 
333 	throttling_limit = pr->throttling_platform_limit;
334 	if (throttling_limit >= pr->throttling.state_count) {
335 		/* Uncorrect Throttling Limit */
336 		return -EINVAL;
337 	}
338 
339 	current_state = pr->throttling.state;
340 	if (current_state > throttling_limit) {
341 		/*
342 		 * The current state can meet the requirement of
343 		 * _TPC limit. But it is reasonable that OSPM changes
344 		 * t-states from high to low for better performance.
345 		 * Of course the limit condition of thermal
346 		 * and user should be considered.
347 		 */
348 		limit = &pr->limit;
349 		target_state = throttling_limit;
350 		if (limit->thermal.tx > target_state)
351 			target_state = limit->thermal.tx;
352 		if (limit->user.tx > target_state)
353 			target_state = limit->user.tx;
354 	} else if (current_state == throttling_limit) {
355 		/*
356 		 * Unnecessary to change the throttling state
357 		 */
358 		return 0;
359 	} else {
360 		/*
361 		 * If the current state is lower than the limit of _TPC, it
362 		 * will be forced to switch to the throttling state defined
363 		 * by throttling_platfor_limit.
364 		 * Because the previous state meets with the limit condition
365 		 * of thermal and user, it is unnecessary to check it again.
366 		 */
367 		target_state = throttling_limit;
368 	}
369 	return acpi_processor_set_throttling(pr, target_state, false);
370 }
371 
372 /*
373  * _PTC - Processor Throttling Control (and status) register location
374  */
375 static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
376 {
377 	int result = 0;
378 	acpi_status status = 0;
379 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
380 	union acpi_object *ptc = NULL;
381 	union acpi_object obj = { 0 };
382 	struct acpi_processor_throttling *throttling;
383 
384 	status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
385 	if (ACPI_FAILURE(status)) {
386 		if (status != AE_NOT_FOUND) {
387 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
388 		}
389 		return -ENODEV;
390 	}
391 
392 	ptc = (union acpi_object *)buffer.pointer;
393 	if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
394 	    || (ptc->package.count != 2)) {
395 		printk(KERN_ERR PREFIX "Invalid _PTC data\n");
396 		result = -EFAULT;
397 		goto end;
398 	}
399 
400 	/*
401 	 * control_register
402 	 */
403 
404 	obj = ptc->package.elements[0];
405 
406 	if ((obj.type != ACPI_TYPE_BUFFER)
407 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
408 	    || (obj.buffer.pointer == NULL)) {
409 		printk(KERN_ERR PREFIX
410 		       "Invalid _PTC data (control_register)\n");
411 		result = -EFAULT;
412 		goto end;
413 	}
414 	memcpy(&pr->throttling.control_register, obj.buffer.pointer,
415 	       sizeof(struct acpi_ptc_register));
416 
417 	/*
418 	 * status_register
419 	 */
420 
421 	obj = ptc->package.elements[1];
422 
423 	if ((obj.type != ACPI_TYPE_BUFFER)
424 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
425 	    || (obj.buffer.pointer == NULL)) {
426 		printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
427 		result = -EFAULT;
428 		goto end;
429 	}
430 
431 	memcpy(&pr->throttling.status_register, obj.buffer.pointer,
432 	       sizeof(struct acpi_ptc_register));
433 
434 	throttling = &pr->throttling;
435 
436 	if ((throttling->control_register.bit_width +
437 		throttling->control_register.bit_offset) > 32) {
438 		printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
439 		result = -EFAULT;
440 		goto end;
441 	}
442 
443 	if ((throttling->status_register.bit_width +
444 		throttling->status_register.bit_offset) > 32) {
445 		printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
446 		result = -EFAULT;
447 		goto end;
448 	}
449 
450       end:
451 	kfree(buffer.pointer);
452 
453 	return result;
454 }
455 
456 /*
457  * _TSS - Throttling Supported States
458  */
459 static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
460 {
461 	int result = 0;
462 	acpi_status status = AE_OK;
463 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
464 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
465 	struct acpi_buffer state = { 0, NULL };
466 	union acpi_object *tss = NULL;
467 	int i;
468 
469 	status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
470 	if (ACPI_FAILURE(status)) {
471 		if (status != AE_NOT_FOUND) {
472 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
473 		}
474 		return -ENODEV;
475 	}
476 
477 	tss = buffer.pointer;
478 	if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
479 		printk(KERN_ERR PREFIX "Invalid _TSS data\n");
480 		result = -EFAULT;
481 		goto end;
482 	}
483 
484 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
485 			  tss->package.count));
486 
487 	pr->throttling.state_count = tss->package.count;
488 	pr->throttling.states_tss =
489 	    kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
490 		    GFP_KERNEL);
491 	if (!pr->throttling.states_tss) {
492 		result = -ENOMEM;
493 		goto end;
494 	}
495 
496 	for (i = 0; i < pr->throttling.state_count; i++) {
497 
498 		struct acpi_processor_tx_tss *tx =
499 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
500 						      states_tss[i]);
501 
502 		state.length = sizeof(struct acpi_processor_tx_tss);
503 		state.pointer = tx;
504 
505 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
506 
507 		status = acpi_extract_package(&(tss->package.elements[i]),
508 					      &format, &state);
509 		if (ACPI_FAILURE(status)) {
510 			ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
511 			result = -EFAULT;
512 			kfree(pr->throttling.states_tss);
513 			goto end;
514 		}
515 
516 		if (!tx->freqpercentage) {
517 			printk(KERN_ERR PREFIX
518 			       "Invalid _TSS data: freq is zero\n");
519 			result = -EFAULT;
520 			kfree(pr->throttling.states_tss);
521 			goto end;
522 		}
523 	}
524 
525       end:
526 	kfree(buffer.pointer);
527 
528 	return result;
529 }
530 
531 /*
532  * _TSD - T-State Dependencies
533  */
534 static int acpi_processor_get_tsd(struct acpi_processor *pr)
535 {
536 	int result = 0;
537 	acpi_status status = AE_OK;
538 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
539 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
540 	struct acpi_buffer state = { 0, NULL };
541 	union acpi_object *tsd = NULL;
542 	struct acpi_tsd_package *pdomain;
543 	struct acpi_processor_throttling *pthrottling;
544 
545 	pthrottling = &pr->throttling;
546 	pthrottling->tsd_valid_flag = 0;
547 
548 	status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
549 	if (ACPI_FAILURE(status)) {
550 		if (status != AE_NOT_FOUND) {
551 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
552 		}
553 		return -ENODEV;
554 	}
555 
556 	tsd = buffer.pointer;
557 	if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
558 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
559 		result = -EFAULT;
560 		goto end;
561 	}
562 
563 	if (tsd->package.count != 1) {
564 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
565 		result = -EFAULT;
566 		goto end;
567 	}
568 
569 	pdomain = &(pr->throttling.domain_info);
570 
571 	state.length = sizeof(struct acpi_tsd_package);
572 	state.pointer = pdomain;
573 
574 	status = acpi_extract_package(&(tsd->package.elements[0]),
575 				      &format, &state);
576 	if (ACPI_FAILURE(status)) {
577 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
578 		result = -EFAULT;
579 		goto end;
580 	}
581 
582 	if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
583 		printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
584 		result = -EFAULT;
585 		goto end;
586 	}
587 
588 	if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
589 		printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
590 		result = -EFAULT;
591 		goto end;
592 	}
593 
594 	pthrottling = &pr->throttling;
595 	pthrottling->tsd_valid_flag = 1;
596 	pthrottling->shared_type = pdomain->coord_type;
597 	cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
598 	/*
599 	 * If the coordination type is not defined in ACPI spec,
600 	 * the tsd_valid_flag will be clear and coordination type
601 	 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
602 	 */
603 	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
604 		pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
605 		pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
606 		pthrottling->tsd_valid_flag = 0;
607 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
608 	}
609 
610       end:
611 	kfree(buffer.pointer);
612 	return result;
613 }
614 
615 /* --------------------------------------------------------------------------
616                               Throttling Control
617    -------------------------------------------------------------------------- */
618 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
619 {
620 	int state = 0;
621 	u32 value = 0;
622 	u32 duty_mask = 0;
623 	u32 duty_value = 0;
624 
625 	if (!pr)
626 		return -EINVAL;
627 
628 	if (!pr->flags.throttling)
629 		return -ENODEV;
630 
631 	pr->throttling.state = 0;
632 
633 	duty_mask = pr->throttling.state_count - 1;
634 
635 	duty_mask <<= pr->throttling.duty_offset;
636 
637 	local_irq_disable();
638 
639 	value = inl(pr->throttling.address);
640 
641 	/*
642 	 * Compute the current throttling state when throttling is enabled
643 	 * (bit 4 is on).
644 	 */
645 	if (value & 0x10) {
646 		duty_value = value & duty_mask;
647 		duty_value >>= pr->throttling.duty_offset;
648 
649 		if (duty_value)
650 			state = pr->throttling.state_count - duty_value;
651 	}
652 
653 	pr->throttling.state = state;
654 
655 	local_irq_enable();
656 
657 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
658 			  "Throttling state is T%d (%d%% throttling applied)\n",
659 			  state, pr->throttling.states[state].performance));
660 
661 	return 0;
662 }
663 
664 #ifdef CONFIG_X86
665 static int acpi_throttling_rdmsr(struct acpi_processor *pr,
666 					u64 *value)
667 {
668 	struct cpuinfo_x86 *c;
669 	u64 msr_high, msr_low;
670 	unsigned int cpu;
671 	u64 msr = 0;
672 	int ret = -1;
673 
674 	cpu = pr->id;
675 	c = &cpu_data(cpu);
676 
677 	if ((c->x86_vendor != X86_VENDOR_INTEL) ||
678 		!cpu_has(c, X86_FEATURE_ACPI)) {
679 		printk(KERN_ERR PREFIX
680 			"HARDWARE addr space,NOT supported yet\n");
681 	} else {
682 		msr_low = 0;
683 		msr_high = 0;
684 		rdmsr_safe(MSR_IA32_THERM_CONTROL,
685 			(u32 *)&msr_low , (u32 *) &msr_high);
686 		msr = (msr_high << 32) | msr_low;
687 		*value = (u64) msr;
688 		ret = 0;
689 	}
690 	return ret;
691 }
692 
693 static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value)
694 {
695 	struct cpuinfo_x86 *c;
696 	unsigned int cpu;
697 	int ret = -1;
698 	u64 msr;
699 
700 	cpu = pr->id;
701 	c = &cpu_data(cpu);
702 
703 	if ((c->x86_vendor != X86_VENDOR_INTEL) ||
704 		!cpu_has(c, X86_FEATURE_ACPI)) {
705 		printk(KERN_ERR PREFIX
706 			"HARDWARE addr space,NOT supported yet\n");
707 	} else {
708 		msr = value;
709 		wrmsr_safe(MSR_IA32_THERM_CONTROL,
710 			msr & 0xffffffff, msr >> 32);
711 		ret = 0;
712 	}
713 	return ret;
714 }
715 #else
716 static int acpi_throttling_rdmsr(struct acpi_processor *pr,
717 				u64 *value)
718 {
719 	printk(KERN_ERR PREFIX
720 		"HARDWARE addr space,NOT supported yet\n");
721 	return -1;
722 }
723 
724 static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value)
725 {
726 	printk(KERN_ERR PREFIX
727 		"HARDWARE addr space,NOT supported yet\n");
728 	return -1;
729 }
730 #endif
731 
732 static int acpi_read_throttling_status(struct acpi_processor *pr,
733 					u64 *value)
734 {
735 	u32 bit_width, bit_offset;
736 	u64 ptc_value;
737 	u64 ptc_mask;
738 	struct acpi_processor_throttling *throttling;
739 	int ret = -1;
740 
741 	throttling = &pr->throttling;
742 	switch (throttling->status_register.space_id) {
743 	case ACPI_ADR_SPACE_SYSTEM_IO:
744 		ptc_value = 0;
745 		bit_width = throttling->status_register.bit_width;
746 		bit_offset = throttling->status_register.bit_offset;
747 
748 		acpi_os_read_port((acpi_io_address) throttling->status_register.
749 				  address, (u32 *) &ptc_value,
750 				  (u32) (bit_width + bit_offset));
751 		ptc_mask = (1 << bit_width) - 1;
752 		*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
753 		ret = 0;
754 		break;
755 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
756 		ret = acpi_throttling_rdmsr(pr, value);
757 		break;
758 	default:
759 		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
760 		       (u32) (throttling->status_register.space_id));
761 	}
762 	return ret;
763 }
764 
765 static int acpi_write_throttling_state(struct acpi_processor *pr,
766 				u64 value)
767 {
768 	u32 bit_width, bit_offset;
769 	u64 ptc_value;
770 	u64 ptc_mask;
771 	struct acpi_processor_throttling *throttling;
772 	int ret = -1;
773 
774 	throttling = &pr->throttling;
775 	switch (throttling->control_register.space_id) {
776 	case ACPI_ADR_SPACE_SYSTEM_IO:
777 		bit_width = throttling->control_register.bit_width;
778 		bit_offset = throttling->control_register.bit_offset;
779 		ptc_mask = (1 << bit_width) - 1;
780 		ptc_value = value & ptc_mask;
781 
782 		acpi_os_write_port((acpi_io_address) throttling->
783 					control_register.address,
784 					(u32) (ptc_value << bit_offset),
785 					(u32) (bit_width + bit_offset));
786 		ret = 0;
787 		break;
788 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
789 		ret = acpi_throttling_wrmsr(pr, value);
790 		break;
791 	default:
792 		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
793 		       (u32) (throttling->control_register.space_id));
794 	}
795 	return ret;
796 }
797 
798 static int acpi_get_throttling_state(struct acpi_processor *pr,
799 				u64 value)
800 {
801 	int i;
802 
803 	for (i = 0; i < pr->throttling.state_count; i++) {
804 		struct acpi_processor_tx_tss *tx =
805 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
806 						      states_tss[i]);
807 		if (tx->control == value)
808 			return i;
809 	}
810 	return -1;
811 }
812 
813 static int acpi_get_throttling_value(struct acpi_processor *pr,
814 			int state, u64 *value)
815 {
816 	int ret = -1;
817 
818 	if (state >= 0 && state <= pr->throttling.state_count) {
819 		struct acpi_processor_tx_tss *tx =
820 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
821 						      states_tss[state]);
822 		*value = tx->control;
823 		ret = 0;
824 	}
825 	return ret;
826 }
827 
828 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
829 {
830 	int state = 0;
831 	int ret;
832 	u64 value;
833 
834 	if (!pr)
835 		return -EINVAL;
836 
837 	if (!pr->flags.throttling)
838 		return -ENODEV;
839 
840 	pr->throttling.state = 0;
841 
842 	value = 0;
843 	ret = acpi_read_throttling_status(pr, &value);
844 	if (ret >= 0) {
845 		state = acpi_get_throttling_state(pr, value);
846 		if (state == -1) {
847 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
848 				"Invalid throttling state, reset\n"));
849 			state = 0;
850 			ret = acpi_processor_set_throttling(pr, state, true);
851 			if (ret)
852 				return ret;
853 		}
854 		pr->throttling.state = state;
855 	}
856 
857 	return 0;
858 }
859 
860 static int acpi_processor_get_throttling(struct acpi_processor *pr)
861 {
862 	cpumask_var_t saved_mask;
863 	int ret;
864 
865 	if (!pr)
866 		return -EINVAL;
867 
868 	if (!pr->flags.throttling)
869 		return -ENODEV;
870 
871 	if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
872 		return -ENOMEM;
873 
874 	/*
875 	 * Migrate task to the cpu pointed by pr.
876 	 */
877 	cpumask_copy(saved_mask, &current->cpus_allowed);
878 	/* FIXME: use work_on_cpu() */
879 	set_cpus_allowed_ptr(current, cpumask_of(pr->id));
880 	ret = pr->throttling.acpi_processor_get_throttling(pr);
881 	/* restore the previous state */
882 	set_cpus_allowed_ptr(current, saved_mask);
883 	free_cpumask_var(saved_mask);
884 
885 	return ret;
886 }
887 
888 static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
889 {
890 	int i, step;
891 
892 	if (!pr->throttling.address) {
893 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
894 		return -EINVAL;
895 	} else if (!pr->throttling.duty_width) {
896 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
897 		return -EINVAL;
898 	}
899 	/* TBD: Support duty_cycle values that span bit 4. */
900 	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
901 		printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
902 		return -EINVAL;
903 	}
904 
905 	pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
906 
907 	/*
908 	 * Compute state values. Note that throttling displays a linear power
909 	 * performance relationship (at 50% performance the CPU will consume
910 	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
911 	 */
912 
913 	step = (1000 / pr->throttling.state_count);
914 
915 	for (i = 0; i < pr->throttling.state_count; i++) {
916 		pr->throttling.states[i].performance = 1000 - step * i;
917 		pr->throttling.states[i].power = 1000 - step * i;
918 	}
919 	return 0;
920 }
921 
922 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
923 					      int state, bool force)
924 {
925 	u32 value = 0;
926 	u32 duty_mask = 0;
927 	u32 duty_value = 0;
928 
929 	if (!pr)
930 		return -EINVAL;
931 
932 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
933 		return -EINVAL;
934 
935 	if (!pr->flags.throttling)
936 		return -ENODEV;
937 
938 	if (!force && (state == pr->throttling.state))
939 		return 0;
940 
941 	if (state < pr->throttling_platform_limit)
942 		return -EPERM;
943 	/*
944 	 * Calculate the duty_value and duty_mask.
945 	 */
946 	if (state) {
947 		duty_value = pr->throttling.state_count - state;
948 
949 		duty_value <<= pr->throttling.duty_offset;
950 
951 		/* Used to clear all duty_value bits */
952 		duty_mask = pr->throttling.state_count - 1;
953 
954 		duty_mask <<= acpi_gbl_FADT.duty_offset;
955 		duty_mask = ~duty_mask;
956 	}
957 
958 	local_irq_disable();
959 
960 	/*
961 	 * Disable throttling by writing a 0 to bit 4.  Note that we must
962 	 * turn it off before you can change the duty_value.
963 	 */
964 	value = inl(pr->throttling.address);
965 	if (value & 0x10) {
966 		value &= 0xFFFFFFEF;
967 		outl(value, pr->throttling.address);
968 	}
969 
970 	/*
971 	 * Write the new duty_value and then enable throttling.  Note
972 	 * that a state value of 0 leaves throttling disabled.
973 	 */
974 	if (state) {
975 		value &= duty_mask;
976 		value |= duty_value;
977 		outl(value, pr->throttling.address);
978 
979 		value |= 0x00000010;
980 		outl(value, pr->throttling.address);
981 	}
982 
983 	pr->throttling.state = state;
984 
985 	local_irq_enable();
986 
987 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
988 			  "Throttling state set to T%d (%d%%)\n", state,
989 			  (pr->throttling.states[state].performance ? pr->
990 			   throttling.states[state].performance / 10 : 0)));
991 
992 	return 0;
993 }
994 
995 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
996 					     int state, bool force)
997 {
998 	int ret;
999 	u64 value;
1000 
1001 	if (!pr)
1002 		return -EINVAL;
1003 
1004 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1005 		return -EINVAL;
1006 
1007 	if (!pr->flags.throttling)
1008 		return -ENODEV;
1009 
1010 	if (!force && (state == pr->throttling.state))
1011 		return 0;
1012 
1013 	if (state < pr->throttling_platform_limit)
1014 		return -EPERM;
1015 
1016 	value = 0;
1017 	ret = acpi_get_throttling_value(pr, state, &value);
1018 	if (ret >= 0) {
1019 		acpi_write_throttling_state(pr, value);
1020 		pr->throttling.state = state;
1021 	}
1022 
1023 	return 0;
1024 }
1025 
1026 int acpi_processor_set_throttling(struct acpi_processor *pr,
1027 						int state, bool force)
1028 {
1029 	cpumask_var_t saved_mask;
1030 	int ret = 0;
1031 	unsigned int i;
1032 	struct acpi_processor *match_pr;
1033 	struct acpi_processor_throttling *p_throttling;
1034 	struct throttling_tstate t_state;
1035 	cpumask_var_t online_throttling_cpus;
1036 
1037 	if (!pr)
1038 		return -EINVAL;
1039 
1040 	if (!pr->flags.throttling)
1041 		return -ENODEV;
1042 
1043 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1044 		return -EINVAL;
1045 
1046 	if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
1047 		return -ENOMEM;
1048 
1049 	if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
1050 		free_cpumask_var(saved_mask);
1051 		return -ENOMEM;
1052 	}
1053 
1054 	cpumask_copy(saved_mask, &current->cpus_allowed);
1055 	t_state.target_state = state;
1056 	p_throttling = &(pr->throttling);
1057 	cpumask_and(online_throttling_cpus, cpu_online_mask,
1058 		    p_throttling->shared_cpu_map);
1059 	/*
1060 	 * The throttling notifier will be called for every
1061 	 * affected cpu in order to get one proper T-state.
1062 	 * The notifier event is THROTTLING_PRECHANGE.
1063 	 */
1064 	for_each_cpu(i, online_throttling_cpus) {
1065 		t_state.cpu = i;
1066 		acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1067 							&t_state);
1068 	}
1069 	/*
1070 	 * The function of acpi_processor_set_throttling will be called
1071 	 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
1072 	 * it is necessary to call it for every affected cpu. Otherwise
1073 	 * it can be called only for the cpu pointed by pr.
1074 	 */
1075 	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1076 		/* FIXME: use work_on_cpu() */
1077 		set_cpus_allowed_ptr(current, cpumask_of(pr->id));
1078 		ret = p_throttling->acpi_processor_set_throttling(pr,
1079 						t_state.target_state, force);
1080 	} else {
1081 		/*
1082 		 * When the T-state coordination is SW_ALL or HW_ALL,
1083 		 * it is necessary to set T-state for every affected
1084 		 * cpus.
1085 		 */
1086 		for_each_cpu(i, online_throttling_cpus) {
1087 			match_pr = per_cpu(processors, i);
1088 			/*
1089 			 * If the pointer is invalid, we will report the
1090 			 * error message and continue.
1091 			 */
1092 			if (!match_pr) {
1093 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1094 					"Invalid Pointer for CPU %d\n", i));
1095 				continue;
1096 			}
1097 			/*
1098 			 * If the throttling control is unsupported on CPU i,
1099 			 * we will report the error message and continue.
1100 			 */
1101 			if (!match_pr->flags.throttling) {
1102 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1103 					"Throttling Controll is unsupported "
1104 					"on CPU %d\n", i));
1105 				continue;
1106 			}
1107 			t_state.cpu = i;
1108 			/* FIXME: use work_on_cpu() */
1109 			set_cpus_allowed_ptr(current, cpumask_of(i));
1110 			ret = match_pr->throttling.
1111 				acpi_processor_set_throttling(
1112 				match_pr, t_state.target_state, force);
1113 		}
1114 	}
1115 	/*
1116 	 * After the set_throttling is called, the
1117 	 * throttling notifier is called for every
1118 	 * affected cpu to update the T-states.
1119 	 * The notifier event is THROTTLING_POSTCHANGE
1120 	 */
1121 	for_each_cpu(i, online_throttling_cpus) {
1122 		t_state.cpu = i;
1123 		acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1124 							&t_state);
1125 	}
1126 	/* restore the previous state */
1127 	/* FIXME: use work_on_cpu() */
1128 	set_cpus_allowed_ptr(current, saved_mask);
1129 	free_cpumask_var(online_throttling_cpus);
1130 	free_cpumask_var(saved_mask);
1131 	return ret;
1132 }
1133 
1134 int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1135 {
1136 	int result = 0;
1137 	struct acpi_processor_throttling *pthrottling;
1138 
1139 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1140 			  "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1141 			  pr->throttling.address,
1142 			  pr->throttling.duty_offset,
1143 			  pr->throttling.duty_width));
1144 
1145 	/*
1146 	 * Evaluate _PTC, _TSS and _TPC
1147 	 * They must all be present or none of them can be used.
1148 	 */
1149 	if (acpi_processor_get_throttling_control(pr) ||
1150 		acpi_processor_get_throttling_states(pr) ||
1151 		acpi_processor_get_platform_limit(pr))
1152 	{
1153 		pr->throttling.acpi_processor_get_throttling =
1154 		    &acpi_processor_get_throttling_fadt;
1155 		pr->throttling.acpi_processor_set_throttling =
1156 		    &acpi_processor_set_throttling_fadt;
1157 		if (acpi_processor_get_fadt_info(pr))
1158 			return 0;
1159 	} else {
1160 		pr->throttling.acpi_processor_get_throttling =
1161 		    &acpi_processor_get_throttling_ptc;
1162 		pr->throttling.acpi_processor_set_throttling =
1163 		    &acpi_processor_set_throttling_ptc;
1164 	}
1165 
1166 	/*
1167 	 * If TSD package for one CPU can't be parsed successfully, it means
1168 	 * that this CPU will have no coordination with other CPUs.
1169 	 */
1170 	if (acpi_processor_get_tsd(pr)) {
1171 		pthrottling = &pr->throttling;
1172 		pthrottling->tsd_valid_flag = 0;
1173 		cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
1174 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
1175 	}
1176 
1177 	/*
1178 	 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1179 	 * This shouldn't be an issue as few (if any) mobile systems ever
1180 	 * used this part.
1181 	 */
1182 	if (errata.piix4.throttle) {
1183 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1184 				  "Throttling not supported on PIIX4 A- or B-step\n"));
1185 		return 0;
1186 	}
1187 
1188 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
1189 			  pr->throttling.state_count));
1190 
1191 	pr->flags.throttling = 1;
1192 
1193 	/*
1194 	 * Disable throttling (if enabled).  We'll let subsequent policy (e.g.
1195 	 * thermal) decide to lower performance if it so chooses, but for now
1196 	 * we'll crank up the speed.
1197 	 */
1198 
1199 	result = acpi_processor_get_throttling(pr);
1200 	if (result)
1201 		goto end;
1202 
1203 	if (pr->throttling.state) {
1204 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1205 				  "Disabling throttling (was T%d)\n",
1206 				  pr->throttling.state));
1207 		result = acpi_processor_set_throttling(pr, 0, false);
1208 		if (result)
1209 			goto end;
1210 	}
1211 
1212       end:
1213 	if (result)
1214 		pr->flags.throttling = 0;
1215 
1216 	return result;
1217 }
1218 
1219 #ifdef CONFIG_ACPI_PROCFS
1220 /* proc interface */
1221 static int acpi_processor_throttling_seq_show(struct seq_file *seq,
1222 					      void *offset)
1223 {
1224 	struct acpi_processor *pr = seq->private;
1225 	int i = 0;
1226 	int result = 0;
1227 
1228 	if (!pr)
1229 		goto end;
1230 
1231 	if (!(pr->throttling.state_count > 0)) {
1232 		seq_puts(seq, "<not supported>\n");
1233 		goto end;
1234 	}
1235 
1236 	result = acpi_processor_get_throttling(pr);
1237 
1238 	if (result) {
1239 		seq_puts(seq,
1240 			 "Could not determine current throttling state.\n");
1241 		goto end;
1242 	}
1243 
1244 	seq_printf(seq, "state count:             %d\n"
1245 		   "active state:            T%d\n"
1246 		   "state available: T%d to T%d\n",
1247 		   pr->throttling.state_count, pr->throttling.state,
1248 		   pr->throttling_platform_limit,
1249 		   pr->throttling.state_count - 1);
1250 
1251 	seq_puts(seq, "states:\n");
1252 	if (pr->throttling.acpi_processor_get_throttling ==
1253 			acpi_processor_get_throttling_fadt) {
1254 		for (i = 0; i < pr->throttling.state_count; i++)
1255 			seq_printf(seq, "   %cT%d:                  %02d%%\n",
1256 				   (i == pr->throttling.state ? '*' : ' '), i,
1257 				   (pr->throttling.states[i].performance ? pr->
1258 				    throttling.states[i].performance / 10 : 0));
1259 	} else {
1260 		for (i = 0; i < pr->throttling.state_count; i++)
1261 			seq_printf(seq, "   %cT%d:                  %02d%%\n",
1262 				   (i == pr->throttling.state ? '*' : ' '), i,
1263 				   (int)pr->throttling.states_tss[i].
1264 				   freqpercentage);
1265 	}
1266 
1267       end:
1268 	return 0;
1269 }
1270 
1271 static int acpi_processor_throttling_open_fs(struct inode *inode,
1272 					     struct file *file)
1273 {
1274 	return single_open(file, acpi_processor_throttling_seq_show,
1275 			   PDE(inode)->data);
1276 }
1277 
1278 static ssize_t acpi_processor_write_throttling(struct file *file,
1279 					       const char __user * buffer,
1280 					       size_t count, loff_t * data)
1281 {
1282 	int result = 0;
1283 	struct seq_file *m = file->private_data;
1284 	struct acpi_processor *pr = m->private;
1285 	char state_string[5] = "";
1286 	char *charp = NULL;
1287 	size_t state_val = 0;
1288 	char tmpbuf[5] = "";
1289 
1290 	if (!pr || (count > sizeof(state_string) - 1))
1291 		return -EINVAL;
1292 
1293 	if (copy_from_user(state_string, buffer, count))
1294 		return -EFAULT;
1295 
1296 	state_string[count] = '\0';
1297 	if ((count > 0) && (state_string[count-1] == '\n'))
1298 		state_string[count-1] = '\0';
1299 
1300 	charp = state_string;
1301 	if ((state_string[0] == 't') || (state_string[0] == 'T'))
1302 		charp++;
1303 
1304 	state_val = simple_strtoul(charp, NULL, 0);
1305 	if (state_val >= pr->throttling.state_count)
1306 		return -EINVAL;
1307 
1308 	snprintf(tmpbuf, 5, "%zu", state_val);
1309 
1310 	if (strcmp(tmpbuf, charp) != 0)
1311 		return -EINVAL;
1312 
1313 	result = acpi_processor_set_throttling(pr, state_val, false);
1314 	if (result)
1315 		return result;
1316 
1317 	return count;
1318 }
1319 
1320 const struct file_operations acpi_processor_throttling_fops = {
1321 	.owner = THIS_MODULE,
1322 	.open = acpi_processor_throttling_open_fs,
1323 	.read = seq_read,
1324 	.write = acpi_processor_write_throttling,
1325 	.llseek = seq_lseek,
1326 	.release = single_release,
1327 };
1328 #endif
1329