1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * processor_throttling.c - Throttling submodule of the ACPI processor driver
4  *
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
8  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  *                      - Added processor hotplug support
10  */
11 
12 #define pr_fmt(fmt) "ACPI: " fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/cpufreq.h>
20 #include <linux/acpi.h>
21 #include <acpi/processor.h>
22 #include <asm/io.h>
23 #include <linux/uaccess.h>
24 
25 /* ignore_tpc:
26  *  0 -> acpi processor driver doesn't ignore _TPC values
27  *  1 -> acpi processor driver ignores _TPC values
28  */
29 static int ignore_tpc;
30 module_param(ignore_tpc, int, 0644);
31 MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
32 
33 struct throttling_tstate {
34 	unsigned int cpu;		/* cpu nr */
35 	int target_state;		/* target T-state */
36 };
37 
38 struct acpi_processor_throttling_arg {
39 	struct acpi_processor *pr;
40 	int target_state;
41 	bool force;
42 };
43 
44 #define THROTTLING_PRECHANGE       (1)
45 #define THROTTLING_POSTCHANGE      (2)
46 
47 static int acpi_processor_get_throttling(struct acpi_processor *pr);
48 static int __acpi_processor_set_throttling(struct acpi_processor *pr,
49 					   int state, bool force, bool direct);
50 
51 static int acpi_processor_update_tsd_coord(void)
52 {
53 	int count, count_target;
54 	int retval = 0;
55 	unsigned int i, j;
56 	cpumask_var_t covered_cpus;
57 	struct acpi_processor *pr, *match_pr;
58 	struct acpi_tsd_package *pdomain, *match_pdomain;
59 	struct acpi_processor_throttling *pthrottling, *match_pthrottling;
60 
61 	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
62 		return -ENOMEM;
63 
64 	/*
65 	 * Now that we have _TSD data from all CPUs, lets setup T-state
66 	 * coordination between all CPUs.
67 	 */
68 	for_each_possible_cpu(i) {
69 		pr = per_cpu(processors, i);
70 		if (!pr)
71 			continue;
72 
73 		/* Basic validity check for domain info */
74 		pthrottling = &(pr->throttling);
75 
76 		/*
77 		 * If tsd package for one cpu is invalid, the coordination
78 		 * among all CPUs is thought as invalid.
79 		 * Maybe it is ugly.
80 		 */
81 		if (!pthrottling->tsd_valid_flag) {
82 			retval = -EINVAL;
83 			break;
84 		}
85 	}
86 	if (retval)
87 		goto err_ret;
88 
89 	for_each_possible_cpu(i) {
90 		pr = per_cpu(processors, i);
91 		if (!pr)
92 			continue;
93 
94 		if (cpumask_test_cpu(i, covered_cpus))
95 			continue;
96 		pthrottling = &pr->throttling;
97 
98 		pdomain = &(pthrottling->domain_info);
99 		cpumask_set_cpu(i, pthrottling->shared_cpu_map);
100 		cpumask_set_cpu(i, covered_cpus);
101 		/*
102 		 * If the number of processor in the TSD domain is 1, it is
103 		 * unnecessary to parse the coordination for this CPU.
104 		 */
105 		if (pdomain->num_processors <= 1)
106 			continue;
107 
108 		/* Validate the Domain info */
109 		count_target = pdomain->num_processors;
110 		count = 1;
111 
112 		for_each_possible_cpu(j) {
113 			if (i == j)
114 				continue;
115 
116 			match_pr = per_cpu(processors, j);
117 			if (!match_pr)
118 				continue;
119 
120 			match_pthrottling = &(match_pr->throttling);
121 			match_pdomain = &(match_pthrottling->domain_info);
122 			if (match_pdomain->domain != pdomain->domain)
123 				continue;
124 
125 			/* Here i and j are in the same domain.
126 			 * If two TSD packages have the same domain, they
127 			 * should have the same num_porcessors and
128 			 * coordination type. Otherwise it will be regarded
129 			 * as illegal.
130 			 */
131 			if (match_pdomain->num_processors != count_target) {
132 				retval = -EINVAL;
133 				goto err_ret;
134 			}
135 
136 			if (pdomain->coord_type != match_pdomain->coord_type) {
137 				retval = -EINVAL;
138 				goto err_ret;
139 			}
140 
141 			cpumask_set_cpu(j, covered_cpus);
142 			cpumask_set_cpu(j, pthrottling->shared_cpu_map);
143 			count++;
144 		}
145 		for_each_possible_cpu(j) {
146 			if (i == j)
147 				continue;
148 
149 			match_pr = per_cpu(processors, j);
150 			if (!match_pr)
151 				continue;
152 
153 			match_pthrottling = &(match_pr->throttling);
154 			match_pdomain = &(match_pthrottling->domain_info);
155 			if (match_pdomain->domain != pdomain->domain)
156 				continue;
157 
158 			/*
159 			 * If some CPUS have the same domain, they
160 			 * will have the same shared_cpu_map.
161 			 */
162 			cpumask_copy(match_pthrottling->shared_cpu_map,
163 				     pthrottling->shared_cpu_map);
164 		}
165 	}
166 
167 err_ret:
168 	free_cpumask_var(covered_cpus);
169 
170 	for_each_possible_cpu(i) {
171 		pr = per_cpu(processors, i);
172 		if (!pr)
173 			continue;
174 
175 		/*
176 		 * Assume no coordination on any error parsing domain info.
177 		 * The coordination type will be forced as SW_ALL.
178 		 */
179 		if (retval) {
180 			pthrottling = &(pr->throttling);
181 			cpumask_clear(pthrottling->shared_cpu_map);
182 			cpumask_set_cpu(i, pthrottling->shared_cpu_map);
183 			pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
184 		}
185 	}
186 
187 	return retval;
188 }
189 
190 /*
191  * Update the T-state coordination after the _TSD
192  * data for all cpus is obtained.
193  */
194 void acpi_processor_throttling_init(void)
195 {
196 	if (acpi_processor_update_tsd_coord())
197 		pr_debug("Assume no T-state coordination\n");
198 }
199 
200 static int acpi_processor_throttling_notifier(unsigned long event, void *data)
201 {
202 	struct throttling_tstate *p_tstate = data;
203 	struct acpi_processor *pr;
204 	unsigned int cpu;
205 	int target_state;
206 	struct acpi_processor_limit *p_limit;
207 	struct acpi_processor_throttling *p_throttling;
208 
209 	cpu = p_tstate->cpu;
210 	pr = per_cpu(processors, cpu);
211 	if (!pr) {
212 		pr_debug("Invalid pr pointer\n");
213 		return 0;
214 	}
215 	if (!pr->flags.throttling) {
216 		acpi_handle_debug(pr->handle,
217 				  "Throttling control unsupported on CPU %d\n",
218 				  cpu);
219 		return 0;
220 	}
221 	target_state = p_tstate->target_state;
222 	p_throttling = &(pr->throttling);
223 	switch (event) {
224 	case THROTTLING_PRECHANGE:
225 		/*
226 		 * Prechange event is used to choose one proper t-state,
227 		 * which meets the limits of thermal, user and _TPC.
228 		 */
229 		p_limit = &pr->limit;
230 		if (p_limit->thermal.tx > target_state)
231 			target_state = p_limit->thermal.tx;
232 		if (p_limit->user.tx > target_state)
233 			target_state = p_limit->user.tx;
234 		if (pr->throttling_platform_limit > target_state)
235 			target_state = pr->throttling_platform_limit;
236 		if (target_state >= p_throttling->state_count) {
237 			pr_warn("Exceed the limit of T-state \n");
238 			target_state = p_throttling->state_count - 1;
239 		}
240 		p_tstate->target_state = target_state;
241 		acpi_handle_debug(pr->handle,
242 				  "PreChange Event: target T-state of CPU %d is T%d\n",
243 				  cpu, target_state);
244 		break;
245 	case THROTTLING_POSTCHANGE:
246 		/*
247 		 * Postchange event is only used to update the
248 		 * T-state flag of acpi_processor_throttling.
249 		 */
250 		p_throttling->state = target_state;
251 		acpi_handle_debug(pr->handle,
252 				  "PostChange Event: CPU %d is switched to T%d\n",
253 				  cpu, target_state);
254 		break;
255 	default:
256 		pr_warn("Unsupported Throttling notifier event\n");
257 		break;
258 	}
259 
260 	return 0;
261 }
262 
263 /*
264  * _TPC - Throttling Present Capabilities
265  */
266 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
267 {
268 	acpi_status status = 0;
269 	unsigned long long tpc = 0;
270 
271 	if (!pr)
272 		return -EINVAL;
273 
274 	if (ignore_tpc)
275 		goto end;
276 
277 	status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
278 	if (ACPI_FAILURE(status)) {
279 		if (status != AE_NOT_FOUND)
280 			acpi_evaluation_failure_warn(pr->handle, "_TPC", status);
281 
282 		return -ENODEV;
283 	}
284 
285 end:
286 	pr->throttling_platform_limit = (int)tpc;
287 	return 0;
288 }
289 
290 int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
291 {
292 	int result = 0;
293 	int throttling_limit;
294 	int current_state;
295 	struct acpi_processor_limit *limit;
296 	int target_state;
297 
298 	if (ignore_tpc)
299 		return 0;
300 
301 	result = acpi_processor_get_platform_limit(pr);
302 	if (result) {
303 		/* Throttling Limit is unsupported */
304 		return result;
305 	}
306 
307 	throttling_limit = pr->throttling_platform_limit;
308 	if (throttling_limit >= pr->throttling.state_count) {
309 		/* Uncorrect Throttling Limit */
310 		return -EINVAL;
311 	}
312 
313 	current_state = pr->throttling.state;
314 	if (current_state > throttling_limit) {
315 		/*
316 		 * The current state can meet the requirement of
317 		 * _TPC limit. But it is reasonable that OSPM changes
318 		 * t-states from high to low for better performance.
319 		 * Of course the limit condition of thermal
320 		 * and user should be considered.
321 		 */
322 		limit = &pr->limit;
323 		target_state = throttling_limit;
324 		if (limit->thermal.tx > target_state)
325 			target_state = limit->thermal.tx;
326 		if (limit->user.tx > target_state)
327 			target_state = limit->user.tx;
328 	} else if (current_state == throttling_limit) {
329 		/*
330 		 * Unnecessary to change the throttling state
331 		 */
332 		return 0;
333 	} else {
334 		/*
335 		 * If the current state is lower than the limit of _TPC, it
336 		 * will be forced to switch to the throttling state defined
337 		 * by throttling_platfor_limit.
338 		 * Because the previous state meets with the limit condition
339 		 * of thermal and user, it is unnecessary to check it again.
340 		 */
341 		target_state = throttling_limit;
342 	}
343 	return acpi_processor_set_throttling(pr, target_state, false);
344 }
345 
346 /*
347  * This function is used to reevaluate whether the T-state is valid
348  * after one CPU is onlined/offlined.
349  * It is noted that it won't reevaluate the following properties for
350  * the T-state.
351  *	1. Control method.
352  *	2. the number of supported T-state
353  *	3. TSD domain
354  */
355 void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
356 					bool is_dead)
357 {
358 	int result = 0;
359 
360 	if (is_dead) {
361 		/* When one CPU is offline, the T-state throttling
362 		 * will be invalidated.
363 		 */
364 		pr->flags.throttling = 0;
365 		return;
366 	}
367 	/* the following is to recheck whether the T-state is valid for
368 	 * the online CPU
369 	 */
370 	if (!pr->throttling.state_count) {
371 		/* If the number of T-state is invalid, it is
372 		 * invalidated.
373 		 */
374 		pr->flags.throttling = 0;
375 		return;
376 	}
377 	pr->flags.throttling = 1;
378 
379 	/* Disable throttling (if enabled).  We'll let subsequent
380 	 * policy (e.g.thermal) decide to lower performance if it
381 	 * so chooses, but for now we'll crank up the speed.
382 	 */
383 
384 	result = acpi_processor_get_throttling(pr);
385 	if (result)
386 		goto end;
387 
388 	if (pr->throttling.state) {
389 		result = acpi_processor_set_throttling(pr, 0, false);
390 		if (result)
391 			goto end;
392 	}
393 
394 end:
395 	if (result)
396 		pr->flags.throttling = 0;
397 }
398 /*
399  * _PTC - Processor Throttling Control (and status) register location
400  */
401 static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
402 {
403 	int result = 0;
404 	acpi_status status = 0;
405 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
406 	union acpi_object *ptc = NULL;
407 	union acpi_object obj;
408 	struct acpi_processor_throttling *throttling;
409 
410 	status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
411 	if (ACPI_FAILURE(status)) {
412 		if (status != AE_NOT_FOUND)
413 			acpi_evaluation_failure_warn(pr->handle, "_PTC", status);
414 
415 		return -ENODEV;
416 	}
417 
418 	ptc = (union acpi_object *)buffer.pointer;
419 	if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
420 	    || (ptc->package.count != 2)) {
421 		pr_err("Invalid _PTC data\n");
422 		result = -EFAULT;
423 		goto end;
424 	}
425 
426 	/*
427 	 * control_register
428 	 */
429 
430 	obj = ptc->package.elements[0];
431 
432 	if ((obj.type != ACPI_TYPE_BUFFER)
433 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
434 	    || (obj.buffer.pointer == NULL)) {
435 		pr_err("Invalid _PTC data (control_register)\n");
436 		result = -EFAULT;
437 		goto end;
438 	}
439 	memcpy(&pr->throttling.control_register, obj.buffer.pointer,
440 	       sizeof(struct acpi_ptc_register));
441 
442 	/*
443 	 * status_register
444 	 */
445 
446 	obj = ptc->package.elements[1];
447 
448 	if ((obj.type != ACPI_TYPE_BUFFER)
449 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
450 	    || (obj.buffer.pointer == NULL)) {
451 		pr_err("Invalid _PTC data (status_register)\n");
452 		result = -EFAULT;
453 		goto end;
454 	}
455 
456 	memcpy(&pr->throttling.status_register, obj.buffer.pointer,
457 	       sizeof(struct acpi_ptc_register));
458 
459 	throttling = &pr->throttling;
460 
461 	if ((throttling->control_register.bit_width +
462 		throttling->control_register.bit_offset) > 32) {
463 		pr_err("Invalid _PTC control register\n");
464 		result = -EFAULT;
465 		goto end;
466 	}
467 
468 	if ((throttling->status_register.bit_width +
469 		throttling->status_register.bit_offset) > 32) {
470 		pr_err("Invalid _PTC status register\n");
471 		result = -EFAULT;
472 		goto end;
473 	}
474 
475 end:
476 	kfree(buffer.pointer);
477 
478 	return result;
479 }
480 
481 /*
482  * _TSS - Throttling Supported States
483  */
484 static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
485 {
486 	int result = 0;
487 	acpi_status status = AE_OK;
488 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
489 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
490 	struct acpi_buffer state = { 0, NULL };
491 	union acpi_object *tss = NULL;
492 	int i;
493 
494 	status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
495 	if (ACPI_FAILURE(status)) {
496 		if (status != AE_NOT_FOUND)
497 			acpi_evaluation_failure_warn(pr->handle, "_TSS", status);
498 
499 		return -ENODEV;
500 	}
501 
502 	tss = buffer.pointer;
503 	if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
504 		pr_err("Invalid _TSS data\n");
505 		result = -EFAULT;
506 		goto end;
507 	}
508 
509 	acpi_handle_debug(pr->handle, "Found %d throttling states\n",
510 			  tss->package.count);
511 
512 	pr->throttling.state_count = tss->package.count;
513 	pr->throttling.states_tss =
514 	    kmalloc_array(tss->package.count,
515 			  sizeof(struct acpi_processor_tx_tss),
516 			  GFP_KERNEL);
517 	if (!pr->throttling.states_tss) {
518 		result = -ENOMEM;
519 		goto end;
520 	}
521 
522 	for (i = 0; i < pr->throttling.state_count; i++) {
523 
524 		struct acpi_processor_tx_tss *tx =
525 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
526 						      states_tss[i]);
527 
528 		state.length = sizeof(struct acpi_processor_tx_tss);
529 		state.pointer = tx;
530 
531 		acpi_handle_debug(pr->handle, "Extracting state %d\n", i);
532 
533 		status = acpi_extract_package(&(tss->package.elements[i]),
534 					      &format, &state);
535 		if (ACPI_FAILURE(status)) {
536 			acpi_handle_warn(pr->handle, "Invalid _TSS data: %s\n",
537 					 acpi_format_exception(status));
538 			result = -EFAULT;
539 			kfree(pr->throttling.states_tss);
540 			goto end;
541 		}
542 
543 		if (!tx->freqpercentage) {
544 			pr_err("Invalid _TSS data: freq is zero\n");
545 			result = -EFAULT;
546 			kfree(pr->throttling.states_tss);
547 			goto end;
548 		}
549 	}
550 
551 end:
552 	kfree(buffer.pointer);
553 
554 	return result;
555 }
556 
557 /*
558  * _TSD - T-State Dependencies
559  */
560 static int acpi_processor_get_tsd(struct acpi_processor *pr)
561 {
562 	int result = 0;
563 	acpi_status status = AE_OK;
564 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
565 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
566 	struct acpi_buffer state = { 0, NULL };
567 	union acpi_object *tsd = NULL;
568 	struct acpi_tsd_package *pdomain;
569 	struct acpi_processor_throttling *pthrottling;
570 
571 	pthrottling = &pr->throttling;
572 	pthrottling->tsd_valid_flag = 0;
573 
574 	status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
575 	if (ACPI_FAILURE(status)) {
576 		if (status != AE_NOT_FOUND)
577 			acpi_evaluation_failure_warn(pr->handle, "_TSD", status);
578 
579 		return -ENODEV;
580 	}
581 
582 	tsd = buffer.pointer;
583 	if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
584 		pr_err("Invalid _TSD data\n");
585 		result = -EFAULT;
586 		goto end;
587 	}
588 
589 	if (tsd->package.count != 1) {
590 		pr_err("Invalid _TSD data\n");
591 		result = -EFAULT;
592 		goto end;
593 	}
594 
595 	pdomain = &(pr->throttling.domain_info);
596 
597 	state.length = sizeof(struct acpi_tsd_package);
598 	state.pointer = pdomain;
599 
600 	status = acpi_extract_package(&(tsd->package.elements[0]),
601 				      &format, &state);
602 	if (ACPI_FAILURE(status)) {
603 		pr_err("Invalid _TSD data\n");
604 		result = -EFAULT;
605 		goto end;
606 	}
607 
608 	if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
609 		pr_err("Unknown _TSD:num_entries\n");
610 		result = -EFAULT;
611 		goto end;
612 	}
613 
614 	if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
615 		pr_err("Unknown _TSD:revision\n");
616 		result = -EFAULT;
617 		goto end;
618 	}
619 
620 	pthrottling = &pr->throttling;
621 	pthrottling->tsd_valid_flag = 1;
622 	pthrottling->shared_type = pdomain->coord_type;
623 	cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
624 	/*
625 	 * If the coordination type is not defined in ACPI spec,
626 	 * the tsd_valid_flag will be clear and coordination type
627 	 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
628 	 */
629 	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
630 		pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
631 		pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
632 		pthrottling->tsd_valid_flag = 0;
633 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
634 	}
635 
636 end:
637 	kfree(buffer.pointer);
638 	return result;
639 }
640 
641 /* --------------------------------------------------------------------------
642                               Throttling Control
643    -------------------------------------------------------------------------- */
644 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
645 {
646 	int state = 0;
647 	u32 value = 0;
648 	u32 duty_mask = 0;
649 	u32 duty_value = 0;
650 
651 	if (!pr)
652 		return -EINVAL;
653 
654 	if (!pr->flags.throttling)
655 		return -ENODEV;
656 
657 	/*
658 	 * We don't care about error returns - we just try to mark
659 	 * these reserved so that nobody else is confused into thinking
660 	 * that this region might be unused..
661 	 *
662 	 * (In particular, allocating the IO range for Cardbus)
663 	 */
664 	request_region(pr->throttling.address, 6, "ACPI CPU throttle");
665 
666 	pr->throttling.state = 0;
667 
668 	duty_mask = pr->throttling.state_count - 1;
669 
670 	duty_mask <<= pr->throttling.duty_offset;
671 
672 	local_irq_disable();
673 
674 	value = inl(pr->throttling.address);
675 
676 	/*
677 	 * Compute the current throttling state when throttling is enabled
678 	 * (bit 4 is on).
679 	 */
680 	if (value & 0x10) {
681 		duty_value = value & duty_mask;
682 		duty_value >>= pr->throttling.duty_offset;
683 
684 		if (duty_value)
685 			state = pr->throttling.state_count - duty_value;
686 	}
687 
688 	pr->throttling.state = state;
689 
690 	local_irq_enable();
691 
692 	acpi_handle_debug(pr->handle,
693 			  "Throttling state is T%d (%d%% throttling applied)\n",
694 			  state, pr->throttling.states[state].performance);
695 
696 	return 0;
697 }
698 
699 #ifdef CONFIG_X86
700 static int acpi_throttling_rdmsr(u64 *value)
701 {
702 	u64 msr_high, msr_low;
703 	u64 msr = 0;
704 	int ret = -1;
705 
706 	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
707 		!this_cpu_has(X86_FEATURE_ACPI)) {
708 		pr_err("HARDWARE addr space,NOT supported yet\n");
709 	} else {
710 		msr_low = 0;
711 		msr_high = 0;
712 		rdmsr_safe(MSR_IA32_THERM_CONTROL,
713 			(u32 *)&msr_low, (u32 *) &msr_high);
714 		msr = (msr_high << 32) | msr_low;
715 		*value = (u64) msr;
716 		ret = 0;
717 	}
718 	return ret;
719 }
720 
721 static int acpi_throttling_wrmsr(u64 value)
722 {
723 	int ret = -1;
724 	u64 msr;
725 
726 	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
727 		!this_cpu_has(X86_FEATURE_ACPI)) {
728 		pr_err("HARDWARE addr space,NOT supported yet\n");
729 	} else {
730 		msr = value;
731 		wrmsr_safe(MSR_IA32_THERM_CONTROL,
732 			msr & 0xffffffff, msr >> 32);
733 		ret = 0;
734 	}
735 	return ret;
736 }
737 #else
738 static int acpi_throttling_rdmsr(u64 *value)
739 {
740 	pr_err("HARDWARE addr space,NOT supported yet\n");
741 	return -1;
742 }
743 
744 static int acpi_throttling_wrmsr(u64 value)
745 {
746 	pr_err("HARDWARE addr space,NOT supported yet\n");
747 	return -1;
748 }
749 #endif
750 
751 static int acpi_read_throttling_status(struct acpi_processor *pr,
752 					u64 *value)
753 {
754 	u32 bit_width, bit_offset;
755 	u32 ptc_value;
756 	u64 ptc_mask;
757 	struct acpi_processor_throttling *throttling;
758 	int ret = -1;
759 
760 	throttling = &pr->throttling;
761 	switch (throttling->status_register.space_id) {
762 	case ACPI_ADR_SPACE_SYSTEM_IO:
763 		bit_width = throttling->status_register.bit_width;
764 		bit_offset = throttling->status_register.bit_offset;
765 
766 		acpi_os_read_port((acpi_io_address) throttling->status_register.
767 				  address, &ptc_value,
768 				  (u32) (bit_width + bit_offset));
769 		ptc_mask = (1 << bit_width) - 1;
770 		*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
771 		ret = 0;
772 		break;
773 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
774 		ret = acpi_throttling_rdmsr(value);
775 		break;
776 	default:
777 		pr_err("Unknown addr space %d\n",
778 		       (u32) (throttling->status_register.space_id));
779 	}
780 	return ret;
781 }
782 
783 static int acpi_write_throttling_state(struct acpi_processor *pr,
784 				u64 value)
785 {
786 	u32 bit_width, bit_offset;
787 	u64 ptc_value;
788 	u64 ptc_mask;
789 	struct acpi_processor_throttling *throttling;
790 	int ret = -1;
791 
792 	throttling = &pr->throttling;
793 	switch (throttling->control_register.space_id) {
794 	case ACPI_ADR_SPACE_SYSTEM_IO:
795 		bit_width = throttling->control_register.bit_width;
796 		bit_offset = throttling->control_register.bit_offset;
797 		ptc_mask = (1 << bit_width) - 1;
798 		ptc_value = value & ptc_mask;
799 
800 		acpi_os_write_port((acpi_io_address) throttling->
801 					control_register.address,
802 					(u32) (ptc_value << bit_offset),
803 					(u32) (bit_width + bit_offset));
804 		ret = 0;
805 		break;
806 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
807 		ret = acpi_throttling_wrmsr(value);
808 		break;
809 	default:
810 		pr_err("Unknown addr space %d\n",
811 		       (u32) (throttling->control_register.space_id));
812 	}
813 	return ret;
814 }
815 
816 static int acpi_get_throttling_state(struct acpi_processor *pr,
817 				u64 value)
818 {
819 	int i;
820 
821 	for (i = 0; i < pr->throttling.state_count; i++) {
822 		struct acpi_processor_tx_tss *tx =
823 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
824 						      states_tss[i]);
825 		if (tx->control == value)
826 			return i;
827 	}
828 	return -1;
829 }
830 
831 static int acpi_get_throttling_value(struct acpi_processor *pr,
832 			int state, u64 *value)
833 {
834 	int ret = -1;
835 
836 	if (state >= 0 && state <= pr->throttling.state_count) {
837 		struct acpi_processor_tx_tss *tx =
838 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
839 						      states_tss[state]);
840 		*value = tx->control;
841 		ret = 0;
842 	}
843 	return ret;
844 }
845 
846 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
847 {
848 	int state = 0;
849 	int ret;
850 	u64 value;
851 
852 	if (!pr)
853 		return -EINVAL;
854 
855 	if (!pr->flags.throttling)
856 		return -ENODEV;
857 
858 	pr->throttling.state = 0;
859 
860 	value = 0;
861 	ret = acpi_read_throttling_status(pr, &value);
862 	if (ret >= 0) {
863 		state = acpi_get_throttling_state(pr, value);
864 		if (state == -1) {
865 			acpi_handle_debug(pr->handle,
866 					  "Invalid throttling state, reset\n");
867 			state = 0;
868 			ret = __acpi_processor_set_throttling(pr, state, true,
869 							      true);
870 			if (ret)
871 				return ret;
872 		}
873 		pr->throttling.state = state;
874 	}
875 
876 	return 0;
877 }
878 
879 static long __acpi_processor_get_throttling(void *data)
880 {
881 	struct acpi_processor *pr = data;
882 
883 	return pr->throttling.acpi_processor_get_throttling(pr);
884 }
885 
886 static int acpi_processor_get_throttling(struct acpi_processor *pr)
887 {
888 	if (!pr)
889 		return -EINVAL;
890 
891 	if (!pr->flags.throttling)
892 		return -ENODEV;
893 
894 	/*
895 	 * This is either called from the CPU hotplug callback of
896 	 * processor_driver or via the ACPI probe function. In the latter
897 	 * case the CPU is not guaranteed to be online. Both call sites are
898 	 * protected against CPU hotplug.
899 	 */
900 	if (!cpu_online(pr->id))
901 		return -ENODEV;
902 
903 	return call_on_cpu(pr->id, __acpi_processor_get_throttling, pr, false);
904 }
905 
906 static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
907 {
908 	int i, step;
909 
910 	if (!pr->throttling.address) {
911 		acpi_handle_debug(pr->handle, "No throttling register\n");
912 		return -EINVAL;
913 	} else if (!pr->throttling.duty_width) {
914 		acpi_handle_debug(pr->handle, "No throttling states\n");
915 		return -EINVAL;
916 	}
917 	/* TBD: Support duty_cycle values that span bit 4. */
918 	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
919 		pr_warn("duty_cycle spans bit 4\n");
920 		return -EINVAL;
921 	}
922 
923 	pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
924 
925 	/*
926 	 * Compute state values. Note that throttling displays a linear power
927 	 * performance relationship (at 50% performance the CPU will consume
928 	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
929 	 */
930 
931 	step = (1000 / pr->throttling.state_count);
932 
933 	for (i = 0; i < pr->throttling.state_count; i++) {
934 		pr->throttling.states[i].performance = 1000 - step * i;
935 		pr->throttling.states[i].power = 1000 - step * i;
936 	}
937 	return 0;
938 }
939 
940 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
941 					      int state, bool force)
942 {
943 	u32 value = 0;
944 	u32 duty_mask = 0;
945 	u32 duty_value = 0;
946 
947 	if (!pr)
948 		return -EINVAL;
949 
950 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
951 		return -EINVAL;
952 
953 	if (!pr->flags.throttling)
954 		return -ENODEV;
955 
956 	if (!force && (state == pr->throttling.state))
957 		return 0;
958 
959 	if (state < pr->throttling_platform_limit)
960 		return -EPERM;
961 	/*
962 	 * Calculate the duty_value and duty_mask.
963 	 */
964 	if (state) {
965 		duty_value = pr->throttling.state_count - state;
966 
967 		duty_value <<= pr->throttling.duty_offset;
968 
969 		/* Used to clear all duty_value bits */
970 		duty_mask = pr->throttling.state_count - 1;
971 
972 		duty_mask <<= acpi_gbl_FADT.duty_offset;
973 		duty_mask = ~duty_mask;
974 	}
975 
976 	local_irq_disable();
977 
978 	/*
979 	 * Disable throttling by writing a 0 to bit 4.  Note that we must
980 	 * turn it off before you can change the duty_value.
981 	 */
982 	value = inl(pr->throttling.address);
983 	if (value & 0x10) {
984 		value &= 0xFFFFFFEF;
985 		outl(value, pr->throttling.address);
986 	}
987 
988 	/*
989 	 * Write the new duty_value and then enable throttling.  Note
990 	 * that a state value of 0 leaves throttling disabled.
991 	 */
992 	if (state) {
993 		value &= duty_mask;
994 		value |= duty_value;
995 		outl(value, pr->throttling.address);
996 
997 		value |= 0x00000010;
998 		outl(value, pr->throttling.address);
999 	}
1000 
1001 	pr->throttling.state = state;
1002 
1003 	local_irq_enable();
1004 
1005 	acpi_handle_debug(pr->handle,
1006 			  "Throttling state set to T%d (%d%%)\n", state,
1007 			  (pr->throttling.states[state].performance ? pr->
1008 			   throttling.states[state].performance / 10 : 0));
1009 
1010 	return 0;
1011 }
1012 
1013 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
1014 					     int state, bool force)
1015 {
1016 	int ret;
1017 	u64 value;
1018 
1019 	if (!pr)
1020 		return -EINVAL;
1021 
1022 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1023 		return -EINVAL;
1024 
1025 	if (!pr->flags.throttling)
1026 		return -ENODEV;
1027 
1028 	if (!force && (state == pr->throttling.state))
1029 		return 0;
1030 
1031 	if (state < pr->throttling_platform_limit)
1032 		return -EPERM;
1033 
1034 	value = 0;
1035 	ret = acpi_get_throttling_value(pr, state, &value);
1036 	if (ret >= 0) {
1037 		acpi_write_throttling_state(pr, value);
1038 		pr->throttling.state = state;
1039 	}
1040 
1041 	return 0;
1042 }
1043 
1044 static long acpi_processor_throttling_fn(void *data)
1045 {
1046 	struct acpi_processor_throttling_arg *arg = data;
1047 	struct acpi_processor *pr = arg->pr;
1048 
1049 	return pr->throttling.acpi_processor_set_throttling(pr,
1050 			arg->target_state, arg->force);
1051 }
1052 
1053 static int __acpi_processor_set_throttling(struct acpi_processor *pr,
1054 					   int state, bool force, bool direct)
1055 {
1056 	int ret = 0;
1057 	unsigned int i;
1058 	struct acpi_processor *match_pr;
1059 	struct acpi_processor_throttling *p_throttling;
1060 	struct acpi_processor_throttling_arg arg;
1061 	struct throttling_tstate t_state;
1062 
1063 	if (!pr)
1064 		return -EINVAL;
1065 
1066 	if (!pr->flags.throttling)
1067 		return -ENODEV;
1068 
1069 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1070 		return -EINVAL;
1071 
1072 	if (cpu_is_offline(pr->id)) {
1073 		/*
1074 		 * the cpu pointed by pr->id is offline. Unnecessary to change
1075 		 * the throttling state any more.
1076 		 */
1077 		return -ENODEV;
1078 	}
1079 
1080 	t_state.target_state = state;
1081 	p_throttling = &(pr->throttling);
1082 
1083 	/*
1084 	 * The throttling notifier will be called for every
1085 	 * affected cpu in order to get one proper T-state.
1086 	 * The notifier event is THROTTLING_PRECHANGE.
1087 	 */
1088 	for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
1089 		t_state.cpu = i;
1090 		acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1091 							&t_state);
1092 	}
1093 	/*
1094 	 * The function of acpi_processor_set_throttling will be called
1095 	 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
1096 	 * it is necessary to call it for every affected cpu. Otherwise
1097 	 * it can be called only for the cpu pointed by pr.
1098 	 */
1099 	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1100 		arg.pr = pr;
1101 		arg.target_state = state;
1102 		arg.force = force;
1103 		ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
1104 				  direct);
1105 	} else {
1106 		/*
1107 		 * When the T-state coordination is SW_ALL or HW_ALL,
1108 		 * it is necessary to set T-state for every affected
1109 		 * cpus.
1110 		 */
1111 		for_each_cpu_and(i, cpu_online_mask,
1112 		    p_throttling->shared_cpu_map) {
1113 			match_pr = per_cpu(processors, i);
1114 			/*
1115 			 * If the pointer is invalid, we will report the
1116 			 * error message and continue.
1117 			 */
1118 			if (!match_pr) {
1119 				acpi_handle_debug(pr->handle,
1120 					"Invalid Pointer for CPU %d\n", i);
1121 				continue;
1122 			}
1123 			/*
1124 			 * If the throttling control is unsupported on CPU i,
1125 			 * we will report the error message and continue.
1126 			 */
1127 			if (!match_pr->flags.throttling) {
1128 				acpi_handle_debug(pr->handle,
1129 					"Throttling Control unsupported on CPU %d\n", i);
1130 				continue;
1131 			}
1132 
1133 			arg.pr = match_pr;
1134 			arg.target_state = state;
1135 			arg.force = force;
1136 			ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
1137 					  &arg, direct);
1138 		}
1139 	}
1140 	/*
1141 	 * After the set_throttling is called, the
1142 	 * throttling notifier is called for every
1143 	 * affected cpu to update the T-states.
1144 	 * The notifier event is THROTTLING_POSTCHANGE
1145 	 */
1146 	for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
1147 		t_state.cpu = i;
1148 		acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1149 							&t_state);
1150 	}
1151 
1152 	return ret;
1153 }
1154 
1155 int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
1156 				  bool force)
1157 {
1158 	return __acpi_processor_set_throttling(pr, state, force, false);
1159 }
1160 
1161 int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1162 {
1163 	int result = 0;
1164 	struct acpi_processor_throttling *pthrottling;
1165 
1166 	acpi_handle_debug(pr->handle,
1167 			  "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1168 			  pr->throttling.address,
1169 			  pr->throttling.duty_offset,
1170 			  pr->throttling.duty_width);
1171 
1172 	/*
1173 	 * Evaluate _PTC, _TSS and _TPC
1174 	 * They must all be present or none of them can be used.
1175 	 */
1176 	if (acpi_processor_get_throttling_control(pr) ||
1177 		acpi_processor_get_throttling_states(pr) ||
1178 		acpi_processor_get_platform_limit(pr)) {
1179 		pr->throttling.acpi_processor_get_throttling =
1180 		    &acpi_processor_get_throttling_fadt;
1181 		pr->throttling.acpi_processor_set_throttling =
1182 		    &acpi_processor_set_throttling_fadt;
1183 		if (acpi_processor_get_fadt_info(pr))
1184 			return 0;
1185 	} else {
1186 		pr->throttling.acpi_processor_get_throttling =
1187 		    &acpi_processor_get_throttling_ptc;
1188 		pr->throttling.acpi_processor_set_throttling =
1189 		    &acpi_processor_set_throttling_ptc;
1190 	}
1191 
1192 	/*
1193 	 * If TSD package for one CPU can't be parsed successfully, it means
1194 	 * that this CPU will have no coordination with other CPUs.
1195 	 */
1196 	if (acpi_processor_get_tsd(pr)) {
1197 		pthrottling = &pr->throttling;
1198 		pthrottling->tsd_valid_flag = 0;
1199 		cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
1200 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
1201 	}
1202 
1203 	/*
1204 	 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1205 	 * This shouldn't be an issue as few (if any) mobile systems ever
1206 	 * used this part.
1207 	 */
1208 	if (errata.piix4.throttle) {
1209 		acpi_handle_debug(pr->handle,
1210 				  "Throttling not supported on PIIX4 A- or B-step\n");
1211 		return 0;
1212 	}
1213 
1214 	acpi_handle_debug(pr->handle, "Found %d throttling states\n",
1215 			  pr->throttling.state_count);
1216 
1217 	pr->flags.throttling = 1;
1218 
1219 	/*
1220 	 * Disable throttling (if enabled).  We'll let subsequent policy (e.g.
1221 	 * thermal) decide to lower performance if it so chooses, but for now
1222 	 * we'll crank up the speed.
1223 	 */
1224 
1225 	result = acpi_processor_get_throttling(pr);
1226 	if (result)
1227 		goto end;
1228 
1229 	if (pr->throttling.state) {
1230 		acpi_handle_debug(pr->handle,
1231 				  "Disabling throttling (was T%d)\n",
1232 				  pr->throttling.state);
1233 		result = acpi_processor_set_throttling(pr, 0, false);
1234 		if (result)
1235 			goto end;
1236 	}
1237 
1238 end:
1239 	if (result)
1240 		pr->flags.throttling = 0;
1241 
1242 	return result;
1243 }
1244 
1245