1 /*
2  * processor_throttling.c - Throttling submodule of the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  */
28 
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
34 #include <linux/cpufreq.h>
35 
36 #include <asm/io.h>
37 #include <asm/uaccess.h>
38 
39 #include <acpi/acpi_bus.h>
40 #include <acpi/acpi_drivers.h>
41 #include <acpi/processor.h>
42 
43 #define PREFIX "ACPI: "
44 
45 #define ACPI_PROCESSOR_CLASS            "processor"
46 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
47 ACPI_MODULE_NAME("processor_throttling");
48 
49 /* ignore_tpc:
50  *  0 -> acpi processor driver doesn't ignore _TPC values
51  *  1 -> acpi processor driver ignores _TPC values
52  */
53 static int ignore_tpc;
54 module_param(ignore_tpc, int, 0644);
55 MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
56 
57 struct throttling_tstate {
58 	unsigned int cpu;		/* cpu nr */
59 	int target_state;		/* target T-state */
60 };
61 
62 #define THROTTLING_PRECHANGE       (1)
63 #define THROTTLING_POSTCHANGE      (2)
64 
65 static int acpi_processor_get_throttling(struct acpi_processor *pr);
66 int acpi_processor_set_throttling(struct acpi_processor *pr,
67 						int state, bool force);
68 
69 static int acpi_processor_update_tsd_coord(void)
70 {
71 	int count, count_target;
72 	int retval = 0;
73 	unsigned int i, j;
74 	cpumask_var_t covered_cpus;
75 	struct acpi_processor *pr, *match_pr;
76 	struct acpi_tsd_package *pdomain, *match_pdomain;
77 	struct acpi_processor_throttling *pthrottling, *match_pthrottling;
78 
79 	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
80 		return -ENOMEM;
81 
82 	/*
83 	 * Now that we have _TSD data from all CPUs, lets setup T-state
84 	 * coordination between all CPUs.
85 	 */
86 	for_each_possible_cpu(i) {
87 		pr = per_cpu(processors, i);
88 		if (!pr)
89 			continue;
90 
91 		/* Basic validity check for domain info */
92 		pthrottling = &(pr->throttling);
93 
94 		/*
95 		 * If tsd package for one cpu is invalid, the coordination
96 		 * among all CPUs is thought as invalid.
97 		 * Maybe it is ugly.
98 		 */
99 		if (!pthrottling->tsd_valid_flag) {
100 			retval = -EINVAL;
101 			break;
102 		}
103 	}
104 	if (retval)
105 		goto err_ret;
106 
107 	for_each_possible_cpu(i) {
108 		pr = per_cpu(processors, i);
109 		if (!pr)
110 			continue;
111 
112 		if (cpumask_test_cpu(i, covered_cpus))
113 			continue;
114 		pthrottling = &pr->throttling;
115 
116 		pdomain = &(pthrottling->domain_info);
117 		cpumask_set_cpu(i, pthrottling->shared_cpu_map);
118 		cpumask_set_cpu(i, covered_cpus);
119 		/*
120 		 * If the number of processor in the TSD domain is 1, it is
121 		 * unnecessary to parse the coordination for this CPU.
122 		 */
123 		if (pdomain->num_processors <= 1)
124 			continue;
125 
126 		/* Validate the Domain info */
127 		count_target = pdomain->num_processors;
128 		count = 1;
129 
130 		for_each_possible_cpu(j) {
131 			if (i == j)
132 				continue;
133 
134 			match_pr = per_cpu(processors, j);
135 			if (!match_pr)
136 				continue;
137 
138 			match_pthrottling = &(match_pr->throttling);
139 			match_pdomain = &(match_pthrottling->domain_info);
140 			if (match_pdomain->domain != pdomain->domain)
141 				continue;
142 
143 			/* Here i and j are in the same domain.
144 			 * If two TSD packages have the same domain, they
145 			 * should have the same num_porcessors and
146 			 * coordination type. Otherwise it will be regarded
147 			 * as illegal.
148 			 */
149 			if (match_pdomain->num_processors != count_target) {
150 				retval = -EINVAL;
151 				goto err_ret;
152 			}
153 
154 			if (pdomain->coord_type != match_pdomain->coord_type) {
155 				retval = -EINVAL;
156 				goto err_ret;
157 			}
158 
159 			cpumask_set_cpu(j, covered_cpus);
160 			cpumask_set_cpu(j, pthrottling->shared_cpu_map);
161 			count++;
162 		}
163 		for_each_possible_cpu(j) {
164 			if (i == j)
165 				continue;
166 
167 			match_pr = per_cpu(processors, j);
168 			if (!match_pr)
169 				continue;
170 
171 			match_pthrottling = &(match_pr->throttling);
172 			match_pdomain = &(match_pthrottling->domain_info);
173 			if (match_pdomain->domain != pdomain->domain)
174 				continue;
175 
176 			/*
177 			 * If some CPUS have the same domain, they
178 			 * will have the same shared_cpu_map.
179 			 */
180 			cpumask_copy(match_pthrottling->shared_cpu_map,
181 				     pthrottling->shared_cpu_map);
182 		}
183 	}
184 
185 err_ret:
186 	free_cpumask_var(covered_cpus);
187 
188 	for_each_possible_cpu(i) {
189 		pr = per_cpu(processors, i);
190 		if (!pr)
191 			continue;
192 
193 		/*
194 		 * Assume no coordination on any error parsing domain info.
195 		 * The coordination type will be forced as SW_ALL.
196 		 */
197 		if (retval) {
198 			pthrottling = &(pr->throttling);
199 			cpumask_clear(pthrottling->shared_cpu_map);
200 			cpumask_set_cpu(i, pthrottling->shared_cpu_map);
201 			pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
202 		}
203 	}
204 
205 	return retval;
206 }
207 
208 /*
209  * Update the T-state coordination after the _TSD
210  * data for all cpus is obtained.
211  */
212 void acpi_processor_throttling_init(void)
213 {
214 	if (acpi_processor_update_tsd_coord())
215 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
216 			"Assume no T-state coordination\n"));
217 
218 	return;
219 }
220 
221 static int acpi_processor_throttling_notifier(unsigned long event, void *data)
222 {
223 	struct throttling_tstate *p_tstate = data;
224 	struct acpi_processor *pr;
225 	unsigned int cpu ;
226 	int target_state;
227 	struct acpi_processor_limit *p_limit;
228 	struct acpi_processor_throttling *p_throttling;
229 
230 	cpu = p_tstate->cpu;
231 	pr = per_cpu(processors, cpu);
232 	if (!pr) {
233 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
234 		return 0;
235 	}
236 	if (!pr->flags.throttling) {
237 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
238 				"unsupported on CPU %d\n", cpu));
239 		return 0;
240 	}
241 	target_state = p_tstate->target_state;
242 	p_throttling = &(pr->throttling);
243 	switch (event) {
244 	case THROTTLING_PRECHANGE:
245 		/*
246 		 * Prechange event is used to choose one proper t-state,
247 		 * which meets the limits of thermal, user and _TPC.
248 		 */
249 		p_limit = &pr->limit;
250 		if (p_limit->thermal.tx > target_state)
251 			target_state = p_limit->thermal.tx;
252 		if (p_limit->user.tx > target_state)
253 			target_state = p_limit->user.tx;
254 		if (pr->throttling_platform_limit > target_state)
255 			target_state = pr->throttling_platform_limit;
256 		if (target_state >= p_throttling->state_count) {
257 			printk(KERN_WARNING
258 				"Exceed the limit of T-state \n");
259 			target_state = p_throttling->state_count - 1;
260 		}
261 		p_tstate->target_state = target_state;
262 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
263 				"target T-state of CPU %d is T%d\n",
264 				cpu, target_state));
265 		break;
266 	case THROTTLING_POSTCHANGE:
267 		/*
268 		 * Postchange event is only used to update the
269 		 * T-state flag of acpi_processor_throttling.
270 		 */
271 		p_throttling->state = target_state;
272 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
273 				"CPU %d is switched to T%d\n",
274 				cpu, target_state));
275 		break;
276 	default:
277 		printk(KERN_WARNING
278 			"Unsupported Throttling notifier event\n");
279 		break;
280 	}
281 
282 	return 0;
283 }
284 
285 /*
286  * _TPC - Throttling Present Capabilities
287  */
288 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
289 {
290 	acpi_status status = 0;
291 	unsigned long long tpc = 0;
292 
293 	if (!pr)
294 		return -EINVAL;
295 
296 	if (ignore_tpc)
297 		goto end;
298 
299 	status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
300 	if (ACPI_FAILURE(status)) {
301 		if (status != AE_NOT_FOUND) {
302 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
303 		}
304 		return -ENODEV;
305 	}
306 
307 end:
308 	pr->throttling_platform_limit = (int)tpc;
309 	return 0;
310 }
311 
312 int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
313 {
314 	int result = 0;
315 	int throttling_limit;
316 	int current_state;
317 	struct acpi_processor_limit *limit;
318 	int target_state;
319 
320 	if (ignore_tpc)
321 		return 0;
322 
323 	result = acpi_processor_get_platform_limit(pr);
324 	if (result) {
325 		/* Throttling Limit is unsupported */
326 		return result;
327 	}
328 
329 	throttling_limit = pr->throttling_platform_limit;
330 	if (throttling_limit >= pr->throttling.state_count) {
331 		/* Uncorrect Throttling Limit */
332 		return -EINVAL;
333 	}
334 
335 	current_state = pr->throttling.state;
336 	if (current_state > throttling_limit) {
337 		/*
338 		 * The current state can meet the requirement of
339 		 * _TPC limit. But it is reasonable that OSPM changes
340 		 * t-states from high to low for better performance.
341 		 * Of course the limit condition of thermal
342 		 * and user should be considered.
343 		 */
344 		limit = &pr->limit;
345 		target_state = throttling_limit;
346 		if (limit->thermal.tx > target_state)
347 			target_state = limit->thermal.tx;
348 		if (limit->user.tx > target_state)
349 			target_state = limit->user.tx;
350 	} else if (current_state == throttling_limit) {
351 		/*
352 		 * Unnecessary to change the throttling state
353 		 */
354 		return 0;
355 	} else {
356 		/*
357 		 * If the current state is lower than the limit of _TPC, it
358 		 * will be forced to switch to the throttling state defined
359 		 * by throttling_platfor_limit.
360 		 * Because the previous state meets with the limit condition
361 		 * of thermal and user, it is unnecessary to check it again.
362 		 */
363 		target_state = throttling_limit;
364 	}
365 	return acpi_processor_set_throttling(pr, target_state, false);
366 }
367 
368 /*
369  * This function is used to reevaluate whether the T-state is valid
370  * after one CPU is onlined/offlined.
371  * It is noted that it won't reevaluate the following properties for
372  * the T-state.
373  *	1. Control method.
374  *	2. the number of supported T-state
375  *	3. TSD domain
376  */
377 void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
378 					unsigned long action)
379 {
380 	int result = 0;
381 
382 	if (action == CPU_DEAD) {
383 		/* When one CPU is offline, the T-state throttling
384 		 * will be invalidated.
385 		 */
386 		pr->flags.throttling = 0;
387 		return;
388 	}
389 	/* the following is to recheck whether the T-state is valid for
390 	 * the online CPU
391 	 */
392 	if (!pr->throttling.state_count) {
393 		/* If the number of T-state is invalid, it is
394 		 * invalidated.
395 		 */
396 		pr->flags.throttling = 0;
397 		return;
398 	}
399 	pr->flags.throttling = 1;
400 
401 	/* Disable throttling (if enabled).  We'll let subsequent
402 	 * policy (e.g.thermal) decide to lower performance if it
403 	 * so chooses, but for now we'll crank up the speed.
404 	 */
405 
406 	result = acpi_processor_get_throttling(pr);
407 	if (result)
408 		goto end;
409 
410 	if (pr->throttling.state) {
411 		result = acpi_processor_set_throttling(pr, 0, false);
412 		if (result)
413 			goto end;
414 	}
415 
416 end:
417 	if (result)
418 		pr->flags.throttling = 0;
419 }
420 /*
421  * _PTC - Processor Throttling Control (and status) register location
422  */
423 static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
424 {
425 	int result = 0;
426 	acpi_status status = 0;
427 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
428 	union acpi_object *ptc = NULL;
429 	union acpi_object obj = { 0 };
430 	struct acpi_processor_throttling *throttling;
431 
432 	status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
433 	if (ACPI_FAILURE(status)) {
434 		if (status != AE_NOT_FOUND) {
435 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
436 		}
437 		return -ENODEV;
438 	}
439 
440 	ptc = (union acpi_object *)buffer.pointer;
441 	if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
442 	    || (ptc->package.count != 2)) {
443 		printk(KERN_ERR PREFIX "Invalid _PTC data\n");
444 		result = -EFAULT;
445 		goto end;
446 	}
447 
448 	/*
449 	 * control_register
450 	 */
451 
452 	obj = ptc->package.elements[0];
453 
454 	if ((obj.type != ACPI_TYPE_BUFFER)
455 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
456 	    || (obj.buffer.pointer == NULL)) {
457 		printk(KERN_ERR PREFIX
458 		       "Invalid _PTC data (control_register)\n");
459 		result = -EFAULT;
460 		goto end;
461 	}
462 	memcpy(&pr->throttling.control_register, obj.buffer.pointer,
463 	       sizeof(struct acpi_ptc_register));
464 
465 	/*
466 	 * status_register
467 	 */
468 
469 	obj = ptc->package.elements[1];
470 
471 	if ((obj.type != ACPI_TYPE_BUFFER)
472 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
473 	    || (obj.buffer.pointer == NULL)) {
474 		printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
475 		result = -EFAULT;
476 		goto end;
477 	}
478 
479 	memcpy(&pr->throttling.status_register, obj.buffer.pointer,
480 	       sizeof(struct acpi_ptc_register));
481 
482 	throttling = &pr->throttling;
483 
484 	if ((throttling->control_register.bit_width +
485 		throttling->control_register.bit_offset) > 32) {
486 		printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
487 		result = -EFAULT;
488 		goto end;
489 	}
490 
491 	if ((throttling->status_register.bit_width +
492 		throttling->status_register.bit_offset) > 32) {
493 		printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
494 		result = -EFAULT;
495 		goto end;
496 	}
497 
498       end:
499 	kfree(buffer.pointer);
500 
501 	return result;
502 }
503 
504 /*
505  * _TSS - Throttling Supported States
506  */
507 static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
508 {
509 	int result = 0;
510 	acpi_status status = AE_OK;
511 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
512 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
513 	struct acpi_buffer state = { 0, NULL };
514 	union acpi_object *tss = NULL;
515 	int i;
516 
517 	status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
518 	if (ACPI_FAILURE(status)) {
519 		if (status != AE_NOT_FOUND) {
520 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
521 		}
522 		return -ENODEV;
523 	}
524 
525 	tss = buffer.pointer;
526 	if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
527 		printk(KERN_ERR PREFIX "Invalid _TSS data\n");
528 		result = -EFAULT;
529 		goto end;
530 	}
531 
532 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
533 			  tss->package.count));
534 
535 	pr->throttling.state_count = tss->package.count;
536 	pr->throttling.states_tss =
537 	    kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
538 		    GFP_KERNEL);
539 	if (!pr->throttling.states_tss) {
540 		result = -ENOMEM;
541 		goto end;
542 	}
543 
544 	for (i = 0; i < pr->throttling.state_count; i++) {
545 
546 		struct acpi_processor_tx_tss *tx =
547 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
548 						      states_tss[i]);
549 
550 		state.length = sizeof(struct acpi_processor_tx_tss);
551 		state.pointer = tx;
552 
553 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
554 
555 		status = acpi_extract_package(&(tss->package.elements[i]),
556 					      &format, &state);
557 		if (ACPI_FAILURE(status)) {
558 			ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
559 			result = -EFAULT;
560 			kfree(pr->throttling.states_tss);
561 			goto end;
562 		}
563 
564 		if (!tx->freqpercentage) {
565 			printk(KERN_ERR PREFIX
566 			       "Invalid _TSS data: freq is zero\n");
567 			result = -EFAULT;
568 			kfree(pr->throttling.states_tss);
569 			goto end;
570 		}
571 	}
572 
573       end:
574 	kfree(buffer.pointer);
575 
576 	return result;
577 }
578 
579 /*
580  * _TSD - T-State Dependencies
581  */
582 static int acpi_processor_get_tsd(struct acpi_processor *pr)
583 {
584 	int result = 0;
585 	acpi_status status = AE_OK;
586 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
587 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
588 	struct acpi_buffer state = { 0, NULL };
589 	union acpi_object *tsd = NULL;
590 	struct acpi_tsd_package *pdomain;
591 	struct acpi_processor_throttling *pthrottling;
592 
593 	pthrottling = &pr->throttling;
594 	pthrottling->tsd_valid_flag = 0;
595 
596 	status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
597 	if (ACPI_FAILURE(status)) {
598 		if (status != AE_NOT_FOUND) {
599 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
600 		}
601 		return -ENODEV;
602 	}
603 
604 	tsd = buffer.pointer;
605 	if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
606 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
607 		result = -EFAULT;
608 		goto end;
609 	}
610 
611 	if (tsd->package.count != 1) {
612 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
613 		result = -EFAULT;
614 		goto end;
615 	}
616 
617 	pdomain = &(pr->throttling.domain_info);
618 
619 	state.length = sizeof(struct acpi_tsd_package);
620 	state.pointer = pdomain;
621 
622 	status = acpi_extract_package(&(tsd->package.elements[0]),
623 				      &format, &state);
624 	if (ACPI_FAILURE(status)) {
625 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
626 		result = -EFAULT;
627 		goto end;
628 	}
629 
630 	if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
631 		printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
632 		result = -EFAULT;
633 		goto end;
634 	}
635 
636 	if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
637 		printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
638 		result = -EFAULT;
639 		goto end;
640 	}
641 
642 	pthrottling = &pr->throttling;
643 	pthrottling->tsd_valid_flag = 1;
644 	pthrottling->shared_type = pdomain->coord_type;
645 	cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
646 	/*
647 	 * If the coordination type is not defined in ACPI spec,
648 	 * the tsd_valid_flag will be clear and coordination type
649 	 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
650 	 */
651 	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
652 		pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
653 		pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
654 		pthrottling->tsd_valid_flag = 0;
655 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
656 	}
657 
658       end:
659 	kfree(buffer.pointer);
660 	return result;
661 }
662 
663 /* --------------------------------------------------------------------------
664                               Throttling Control
665    -------------------------------------------------------------------------- */
666 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
667 {
668 	int state = 0;
669 	u32 value = 0;
670 	u32 duty_mask = 0;
671 	u32 duty_value = 0;
672 
673 	if (!pr)
674 		return -EINVAL;
675 
676 	if (!pr->flags.throttling)
677 		return -ENODEV;
678 
679 	pr->throttling.state = 0;
680 
681 	duty_mask = pr->throttling.state_count - 1;
682 
683 	duty_mask <<= pr->throttling.duty_offset;
684 
685 	local_irq_disable();
686 
687 	value = inl(pr->throttling.address);
688 
689 	/*
690 	 * Compute the current throttling state when throttling is enabled
691 	 * (bit 4 is on).
692 	 */
693 	if (value & 0x10) {
694 		duty_value = value & duty_mask;
695 		duty_value >>= pr->throttling.duty_offset;
696 
697 		if (duty_value)
698 			state = pr->throttling.state_count - duty_value;
699 	}
700 
701 	pr->throttling.state = state;
702 
703 	local_irq_enable();
704 
705 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
706 			  "Throttling state is T%d (%d%% throttling applied)\n",
707 			  state, pr->throttling.states[state].performance));
708 
709 	return 0;
710 }
711 
712 #ifdef CONFIG_X86
713 static int acpi_throttling_rdmsr(u64 *value)
714 {
715 	u64 msr_high, msr_low;
716 	u64 msr = 0;
717 	int ret = -1;
718 
719 	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
720 		!this_cpu_has(X86_FEATURE_ACPI)) {
721 		printk(KERN_ERR PREFIX
722 			"HARDWARE addr space,NOT supported yet\n");
723 	} else {
724 		msr_low = 0;
725 		msr_high = 0;
726 		rdmsr_safe(MSR_IA32_THERM_CONTROL,
727 			(u32 *)&msr_low , (u32 *) &msr_high);
728 		msr = (msr_high << 32) | msr_low;
729 		*value = (u64) msr;
730 		ret = 0;
731 	}
732 	return ret;
733 }
734 
735 static int acpi_throttling_wrmsr(u64 value)
736 {
737 	int ret = -1;
738 	u64 msr;
739 
740 	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
741 		!this_cpu_has(X86_FEATURE_ACPI)) {
742 		printk(KERN_ERR PREFIX
743 			"HARDWARE addr space,NOT supported yet\n");
744 	} else {
745 		msr = value;
746 		wrmsr_safe(MSR_IA32_THERM_CONTROL,
747 			msr & 0xffffffff, msr >> 32);
748 		ret = 0;
749 	}
750 	return ret;
751 }
752 #else
753 static int acpi_throttling_rdmsr(u64 *value)
754 {
755 	printk(KERN_ERR PREFIX
756 		"HARDWARE addr space,NOT supported yet\n");
757 	return -1;
758 }
759 
760 static int acpi_throttling_wrmsr(u64 value)
761 {
762 	printk(KERN_ERR PREFIX
763 		"HARDWARE addr space,NOT supported yet\n");
764 	return -1;
765 }
766 #endif
767 
768 static int acpi_read_throttling_status(struct acpi_processor *pr,
769 					u64 *value)
770 {
771 	u32 bit_width, bit_offset;
772 	u32 ptc_value;
773 	u64 ptc_mask;
774 	struct acpi_processor_throttling *throttling;
775 	int ret = -1;
776 
777 	throttling = &pr->throttling;
778 	switch (throttling->status_register.space_id) {
779 	case ACPI_ADR_SPACE_SYSTEM_IO:
780 		bit_width = throttling->status_register.bit_width;
781 		bit_offset = throttling->status_register.bit_offset;
782 
783 		acpi_os_read_port((acpi_io_address) throttling->status_register.
784 				  address, &ptc_value,
785 				  (u32) (bit_width + bit_offset));
786 		ptc_mask = (1 << bit_width) - 1;
787 		*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
788 		ret = 0;
789 		break;
790 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
791 		ret = acpi_throttling_rdmsr(value);
792 		break;
793 	default:
794 		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
795 		       (u32) (throttling->status_register.space_id));
796 	}
797 	return ret;
798 }
799 
800 static int acpi_write_throttling_state(struct acpi_processor *pr,
801 				u64 value)
802 {
803 	u32 bit_width, bit_offset;
804 	u64 ptc_value;
805 	u64 ptc_mask;
806 	struct acpi_processor_throttling *throttling;
807 	int ret = -1;
808 
809 	throttling = &pr->throttling;
810 	switch (throttling->control_register.space_id) {
811 	case ACPI_ADR_SPACE_SYSTEM_IO:
812 		bit_width = throttling->control_register.bit_width;
813 		bit_offset = throttling->control_register.bit_offset;
814 		ptc_mask = (1 << bit_width) - 1;
815 		ptc_value = value & ptc_mask;
816 
817 		acpi_os_write_port((acpi_io_address) throttling->
818 					control_register.address,
819 					(u32) (ptc_value << bit_offset),
820 					(u32) (bit_width + bit_offset));
821 		ret = 0;
822 		break;
823 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
824 		ret = acpi_throttling_wrmsr(value);
825 		break;
826 	default:
827 		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
828 		       (u32) (throttling->control_register.space_id));
829 	}
830 	return ret;
831 }
832 
833 static int acpi_get_throttling_state(struct acpi_processor *pr,
834 				u64 value)
835 {
836 	int i;
837 
838 	for (i = 0; i < pr->throttling.state_count; i++) {
839 		struct acpi_processor_tx_tss *tx =
840 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
841 						      states_tss[i]);
842 		if (tx->control == value)
843 			return i;
844 	}
845 	return -1;
846 }
847 
848 static int acpi_get_throttling_value(struct acpi_processor *pr,
849 			int state, u64 *value)
850 {
851 	int ret = -1;
852 
853 	if (state >= 0 && state <= pr->throttling.state_count) {
854 		struct acpi_processor_tx_tss *tx =
855 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
856 						      states_tss[state]);
857 		*value = tx->control;
858 		ret = 0;
859 	}
860 	return ret;
861 }
862 
863 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
864 {
865 	int state = 0;
866 	int ret;
867 	u64 value;
868 
869 	if (!pr)
870 		return -EINVAL;
871 
872 	if (!pr->flags.throttling)
873 		return -ENODEV;
874 
875 	pr->throttling.state = 0;
876 
877 	value = 0;
878 	ret = acpi_read_throttling_status(pr, &value);
879 	if (ret >= 0) {
880 		state = acpi_get_throttling_state(pr, value);
881 		if (state == -1) {
882 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
883 				"Invalid throttling state, reset\n"));
884 			state = 0;
885 			ret = acpi_processor_set_throttling(pr, state, true);
886 			if (ret)
887 				return ret;
888 		}
889 		pr->throttling.state = state;
890 	}
891 
892 	return 0;
893 }
894 
895 static int acpi_processor_get_throttling(struct acpi_processor *pr)
896 {
897 	cpumask_var_t saved_mask;
898 	int ret;
899 
900 	if (!pr)
901 		return -EINVAL;
902 
903 	if (!pr->flags.throttling)
904 		return -ENODEV;
905 
906 	if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
907 		return -ENOMEM;
908 
909 	/*
910 	 * Migrate task to the cpu pointed by pr.
911 	 */
912 	cpumask_copy(saved_mask, &current->cpus_allowed);
913 	/* FIXME: use work_on_cpu() */
914 	if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
915 		/* Can't migrate to the target pr->id CPU. Exit */
916 		free_cpumask_var(saved_mask);
917 		return -ENODEV;
918 	}
919 	ret = pr->throttling.acpi_processor_get_throttling(pr);
920 	/* restore the previous state */
921 	set_cpus_allowed_ptr(current, saved_mask);
922 	free_cpumask_var(saved_mask);
923 
924 	return ret;
925 }
926 
927 static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
928 {
929 	int i, step;
930 
931 	if (!pr->throttling.address) {
932 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
933 		return -EINVAL;
934 	} else if (!pr->throttling.duty_width) {
935 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
936 		return -EINVAL;
937 	}
938 	/* TBD: Support duty_cycle values that span bit 4. */
939 	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
940 		printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
941 		return -EINVAL;
942 	}
943 
944 	pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
945 
946 	/*
947 	 * Compute state values. Note that throttling displays a linear power
948 	 * performance relationship (at 50% performance the CPU will consume
949 	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
950 	 */
951 
952 	step = (1000 / pr->throttling.state_count);
953 
954 	for (i = 0; i < pr->throttling.state_count; i++) {
955 		pr->throttling.states[i].performance = 1000 - step * i;
956 		pr->throttling.states[i].power = 1000 - step * i;
957 	}
958 	return 0;
959 }
960 
961 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
962 					      int state, bool force)
963 {
964 	u32 value = 0;
965 	u32 duty_mask = 0;
966 	u32 duty_value = 0;
967 
968 	if (!pr)
969 		return -EINVAL;
970 
971 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
972 		return -EINVAL;
973 
974 	if (!pr->flags.throttling)
975 		return -ENODEV;
976 
977 	if (!force && (state == pr->throttling.state))
978 		return 0;
979 
980 	if (state < pr->throttling_platform_limit)
981 		return -EPERM;
982 	/*
983 	 * Calculate the duty_value and duty_mask.
984 	 */
985 	if (state) {
986 		duty_value = pr->throttling.state_count - state;
987 
988 		duty_value <<= pr->throttling.duty_offset;
989 
990 		/* Used to clear all duty_value bits */
991 		duty_mask = pr->throttling.state_count - 1;
992 
993 		duty_mask <<= acpi_gbl_FADT.duty_offset;
994 		duty_mask = ~duty_mask;
995 	}
996 
997 	local_irq_disable();
998 
999 	/*
1000 	 * Disable throttling by writing a 0 to bit 4.  Note that we must
1001 	 * turn it off before you can change the duty_value.
1002 	 */
1003 	value = inl(pr->throttling.address);
1004 	if (value & 0x10) {
1005 		value &= 0xFFFFFFEF;
1006 		outl(value, pr->throttling.address);
1007 	}
1008 
1009 	/*
1010 	 * Write the new duty_value and then enable throttling.  Note
1011 	 * that a state value of 0 leaves throttling disabled.
1012 	 */
1013 	if (state) {
1014 		value &= duty_mask;
1015 		value |= duty_value;
1016 		outl(value, pr->throttling.address);
1017 
1018 		value |= 0x00000010;
1019 		outl(value, pr->throttling.address);
1020 	}
1021 
1022 	pr->throttling.state = state;
1023 
1024 	local_irq_enable();
1025 
1026 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1027 			  "Throttling state set to T%d (%d%%)\n", state,
1028 			  (pr->throttling.states[state].performance ? pr->
1029 			   throttling.states[state].performance / 10 : 0)));
1030 
1031 	return 0;
1032 }
1033 
1034 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
1035 					     int state, bool force)
1036 {
1037 	int ret;
1038 	u64 value;
1039 
1040 	if (!pr)
1041 		return -EINVAL;
1042 
1043 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1044 		return -EINVAL;
1045 
1046 	if (!pr->flags.throttling)
1047 		return -ENODEV;
1048 
1049 	if (!force && (state == pr->throttling.state))
1050 		return 0;
1051 
1052 	if (state < pr->throttling_platform_limit)
1053 		return -EPERM;
1054 
1055 	value = 0;
1056 	ret = acpi_get_throttling_value(pr, state, &value);
1057 	if (ret >= 0) {
1058 		acpi_write_throttling_state(pr, value);
1059 		pr->throttling.state = state;
1060 	}
1061 
1062 	return 0;
1063 }
1064 
1065 int acpi_processor_set_throttling(struct acpi_processor *pr,
1066 						int state, bool force)
1067 {
1068 	cpumask_var_t saved_mask;
1069 	int ret = 0;
1070 	unsigned int i;
1071 	struct acpi_processor *match_pr;
1072 	struct acpi_processor_throttling *p_throttling;
1073 	struct throttling_tstate t_state;
1074 	cpumask_var_t online_throttling_cpus;
1075 
1076 	if (!pr)
1077 		return -EINVAL;
1078 
1079 	if (!pr->flags.throttling)
1080 		return -ENODEV;
1081 
1082 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1083 		return -EINVAL;
1084 
1085 	if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
1086 		return -ENOMEM;
1087 
1088 	if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
1089 		free_cpumask_var(saved_mask);
1090 		return -ENOMEM;
1091 	}
1092 
1093 	if (cpu_is_offline(pr->id)) {
1094 		/*
1095 		 * the cpu pointed by pr->id is offline. Unnecessary to change
1096 		 * the throttling state any more.
1097 		 */
1098 		return -ENODEV;
1099 	}
1100 
1101 	cpumask_copy(saved_mask, &current->cpus_allowed);
1102 	t_state.target_state = state;
1103 	p_throttling = &(pr->throttling);
1104 	cpumask_and(online_throttling_cpus, cpu_online_mask,
1105 		    p_throttling->shared_cpu_map);
1106 	/*
1107 	 * The throttling notifier will be called for every
1108 	 * affected cpu in order to get one proper T-state.
1109 	 * The notifier event is THROTTLING_PRECHANGE.
1110 	 */
1111 	for_each_cpu(i, online_throttling_cpus) {
1112 		t_state.cpu = i;
1113 		acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1114 							&t_state);
1115 	}
1116 	/*
1117 	 * The function of acpi_processor_set_throttling will be called
1118 	 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
1119 	 * it is necessary to call it for every affected cpu. Otherwise
1120 	 * it can be called only for the cpu pointed by pr.
1121 	 */
1122 	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1123 		/* FIXME: use work_on_cpu() */
1124 		if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
1125 			/* Can't migrate to the pr->id CPU. Exit */
1126 			ret = -ENODEV;
1127 			goto exit;
1128 		}
1129 		ret = p_throttling->acpi_processor_set_throttling(pr,
1130 						t_state.target_state, force);
1131 	} else {
1132 		/*
1133 		 * When the T-state coordination is SW_ALL or HW_ALL,
1134 		 * it is necessary to set T-state for every affected
1135 		 * cpus.
1136 		 */
1137 		for_each_cpu(i, online_throttling_cpus) {
1138 			match_pr = per_cpu(processors, i);
1139 			/*
1140 			 * If the pointer is invalid, we will report the
1141 			 * error message and continue.
1142 			 */
1143 			if (!match_pr) {
1144 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1145 					"Invalid Pointer for CPU %d\n", i));
1146 				continue;
1147 			}
1148 			/*
1149 			 * If the throttling control is unsupported on CPU i,
1150 			 * we will report the error message and continue.
1151 			 */
1152 			if (!match_pr->flags.throttling) {
1153 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1154 					"Throttling Control is unsupported "
1155 					"on CPU %d\n", i));
1156 				continue;
1157 			}
1158 			t_state.cpu = i;
1159 			/* FIXME: use work_on_cpu() */
1160 			if (set_cpus_allowed_ptr(current, cpumask_of(i)))
1161 				continue;
1162 			ret = match_pr->throttling.
1163 				acpi_processor_set_throttling(
1164 				match_pr, t_state.target_state, force);
1165 		}
1166 	}
1167 	/*
1168 	 * After the set_throttling is called, the
1169 	 * throttling notifier is called for every
1170 	 * affected cpu to update the T-states.
1171 	 * The notifier event is THROTTLING_POSTCHANGE
1172 	 */
1173 	for_each_cpu(i, online_throttling_cpus) {
1174 		t_state.cpu = i;
1175 		acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1176 							&t_state);
1177 	}
1178 	/* restore the previous state */
1179 	/* FIXME: use work_on_cpu() */
1180 	set_cpus_allowed_ptr(current, saved_mask);
1181 exit:
1182 	free_cpumask_var(online_throttling_cpus);
1183 	free_cpumask_var(saved_mask);
1184 	return ret;
1185 }
1186 
1187 int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1188 {
1189 	int result = 0;
1190 	struct acpi_processor_throttling *pthrottling;
1191 
1192 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1193 			  "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1194 			  pr->throttling.address,
1195 			  pr->throttling.duty_offset,
1196 			  pr->throttling.duty_width));
1197 
1198 	/*
1199 	 * Evaluate _PTC, _TSS and _TPC
1200 	 * They must all be present or none of them can be used.
1201 	 */
1202 	if (acpi_processor_get_throttling_control(pr) ||
1203 		acpi_processor_get_throttling_states(pr) ||
1204 		acpi_processor_get_platform_limit(pr))
1205 	{
1206 		pr->throttling.acpi_processor_get_throttling =
1207 		    &acpi_processor_get_throttling_fadt;
1208 		pr->throttling.acpi_processor_set_throttling =
1209 		    &acpi_processor_set_throttling_fadt;
1210 		if (acpi_processor_get_fadt_info(pr))
1211 			return 0;
1212 	} else {
1213 		pr->throttling.acpi_processor_get_throttling =
1214 		    &acpi_processor_get_throttling_ptc;
1215 		pr->throttling.acpi_processor_set_throttling =
1216 		    &acpi_processor_set_throttling_ptc;
1217 	}
1218 
1219 	/*
1220 	 * If TSD package for one CPU can't be parsed successfully, it means
1221 	 * that this CPU will have no coordination with other CPUs.
1222 	 */
1223 	if (acpi_processor_get_tsd(pr)) {
1224 		pthrottling = &pr->throttling;
1225 		pthrottling->tsd_valid_flag = 0;
1226 		cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
1227 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
1228 	}
1229 
1230 	/*
1231 	 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1232 	 * This shouldn't be an issue as few (if any) mobile systems ever
1233 	 * used this part.
1234 	 */
1235 	if (errata.piix4.throttle) {
1236 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1237 				  "Throttling not supported on PIIX4 A- or B-step\n"));
1238 		return 0;
1239 	}
1240 
1241 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
1242 			  pr->throttling.state_count));
1243 
1244 	pr->flags.throttling = 1;
1245 
1246 	/*
1247 	 * Disable throttling (if enabled).  We'll let subsequent policy (e.g.
1248 	 * thermal) decide to lower performance if it so chooses, but for now
1249 	 * we'll crank up the speed.
1250 	 */
1251 
1252 	result = acpi_processor_get_throttling(pr);
1253 	if (result)
1254 		goto end;
1255 
1256 	if (pr->throttling.state) {
1257 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1258 				  "Disabling throttling (was T%d)\n",
1259 				  pr->throttling.state));
1260 		result = acpi_processor_set_throttling(pr, 0, false);
1261 		if (result)
1262 			goto end;
1263 	}
1264 
1265       end:
1266 	if (result)
1267 		pr->flags.throttling = 0;
1268 
1269 	return result;
1270 }
1271 
1272