1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
4  *
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
8  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  *  			- Added processor hotplug support
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/cpufreq.h>
16 #include <linux/acpi.h>
17 #include <acpi/processor.h>
18 #include <linux/uaccess.h>
19 
20 #define PREFIX "ACPI: "
21 
22 #define ACPI_PROCESSOR_CLASS            "processor"
23 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
24 ACPI_MODULE_NAME("processor_thermal");
25 
26 #ifdef CONFIG_CPU_FREQ
27 
28 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
29  * offers (in most cases) voltage scaling in addition to frequency scaling, and
30  * thus a cubic (instead of linear) reduction of energy. Also, we allow for
31  * _any_ cpufreq driver and not only the acpi-cpufreq driver.
32  */
33 
34 #define CPUFREQ_THERMAL_MIN_STEP 0
35 #define CPUFREQ_THERMAL_MAX_STEP 3
36 
37 static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
38 static unsigned int acpi_thermal_cpufreq_is_init = 0;
39 
40 #define reduction_pctg(cpu) \
41 	per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
42 
43 /*
44  * Emulate "per package data" using per cpu data (which should really be
45  * provided elsewhere)
46  *
47  * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
48  * temporarily. Fortunately that's not a big issue here (I hope)
49  */
50 static int phys_package_first_cpu(int cpu)
51 {
52 	int i;
53 	int id = topology_physical_package_id(cpu);
54 
55 	for_each_online_cpu(i)
56 		if (topology_physical_package_id(i) == id)
57 			return i;
58 	return 0;
59 }
60 
61 static int cpu_has_cpufreq(unsigned int cpu)
62 {
63 	struct cpufreq_policy policy;
64 	if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
65 		return 0;
66 	return 1;
67 }
68 
69 static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
70 					 unsigned long event, void *data)
71 {
72 	struct cpufreq_policy *policy = data;
73 	unsigned long max_freq = 0;
74 
75 	if (event != CPUFREQ_ADJUST)
76 		goto out;
77 
78 	max_freq = (
79 	    policy->cpuinfo.max_freq *
80 	    (100 - reduction_pctg(policy->cpu) * 20)
81 	) / 100;
82 
83 	cpufreq_verify_within_limits(policy, 0, max_freq);
84 
85       out:
86 	return 0;
87 }
88 
89 static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
90 	.notifier_call = acpi_thermal_cpufreq_notifier,
91 };
92 
93 static int cpufreq_get_max_state(unsigned int cpu)
94 {
95 	if (!cpu_has_cpufreq(cpu))
96 		return 0;
97 
98 	return CPUFREQ_THERMAL_MAX_STEP;
99 }
100 
101 static int cpufreq_get_cur_state(unsigned int cpu)
102 {
103 	if (!cpu_has_cpufreq(cpu))
104 		return 0;
105 
106 	return reduction_pctg(cpu);
107 }
108 
109 static int cpufreq_set_cur_state(unsigned int cpu, int state)
110 {
111 	int i;
112 
113 	if (!cpu_has_cpufreq(cpu))
114 		return 0;
115 
116 	reduction_pctg(cpu) = state;
117 
118 	/*
119 	 * Update all the CPUs in the same package because they all
120 	 * contribute to the temperature and often share the same
121 	 * frequency.
122 	 */
123 	for_each_online_cpu(i) {
124 		if (topology_physical_package_id(i) ==
125 		    topology_physical_package_id(cpu))
126 			cpufreq_update_policy(i);
127 	}
128 	return 0;
129 }
130 
131 void acpi_thermal_cpufreq_init(void)
132 {
133 	int i;
134 
135 	i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
136 				      CPUFREQ_POLICY_NOTIFIER);
137 	if (!i)
138 		acpi_thermal_cpufreq_is_init = 1;
139 }
140 
141 void acpi_thermal_cpufreq_exit(void)
142 {
143 	if (acpi_thermal_cpufreq_is_init)
144 		cpufreq_unregister_notifier
145 		    (&acpi_thermal_cpufreq_notifier_block,
146 		     CPUFREQ_POLICY_NOTIFIER);
147 
148 	acpi_thermal_cpufreq_is_init = 0;
149 }
150 
151 #else				/* ! CONFIG_CPU_FREQ */
152 static int cpufreq_get_max_state(unsigned int cpu)
153 {
154 	return 0;
155 }
156 
157 static int cpufreq_get_cur_state(unsigned int cpu)
158 {
159 	return 0;
160 }
161 
162 static int cpufreq_set_cur_state(unsigned int cpu, int state)
163 {
164 	return 0;
165 }
166 
167 #endif
168 
169 /* thermal cooling device callbacks */
170 static int acpi_processor_max_state(struct acpi_processor *pr)
171 {
172 	int max_state = 0;
173 
174 	/*
175 	 * There exists four states according to
176 	 * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
177 	 */
178 	max_state += cpufreq_get_max_state(pr->id);
179 	if (pr->flags.throttling)
180 		max_state += (pr->throttling.state_count -1);
181 
182 	return max_state;
183 }
184 static int
185 processor_get_max_state(struct thermal_cooling_device *cdev,
186 			unsigned long *state)
187 {
188 	struct acpi_device *device = cdev->devdata;
189 	struct acpi_processor *pr;
190 
191 	if (!device)
192 		return -EINVAL;
193 
194 	pr = acpi_driver_data(device);
195 	if (!pr)
196 		return -EINVAL;
197 
198 	*state = acpi_processor_max_state(pr);
199 	return 0;
200 }
201 
202 static int
203 processor_get_cur_state(struct thermal_cooling_device *cdev,
204 			unsigned long *cur_state)
205 {
206 	struct acpi_device *device = cdev->devdata;
207 	struct acpi_processor *pr;
208 
209 	if (!device)
210 		return -EINVAL;
211 
212 	pr = acpi_driver_data(device);
213 	if (!pr)
214 		return -EINVAL;
215 
216 	*cur_state = cpufreq_get_cur_state(pr->id);
217 	if (pr->flags.throttling)
218 		*cur_state += pr->throttling.state;
219 	return 0;
220 }
221 
222 static int
223 processor_set_cur_state(struct thermal_cooling_device *cdev,
224 			unsigned long state)
225 {
226 	struct acpi_device *device = cdev->devdata;
227 	struct acpi_processor *pr;
228 	int result = 0;
229 	int max_pstate;
230 
231 	if (!device)
232 		return -EINVAL;
233 
234 	pr = acpi_driver_data(device);
235 	if (!pr)
236 		return -EINVAL;
237 
238 	max_pstate = cpufreq_get_max_state(pr->id);
239 
240 	if (state > acpi_processor_max_state(pr))
241 		return -EINVAL;
242 
243 	if (state <= max_pstate) {
244 		if (pr->flags.throttling && pr->throttling.state)
245 			result = acpi_processor_set_throttling(pr, 0, false);
246 		cpufreq_set_cur_state(pr->id, state);
247 	} else {
248 		cpufreq_set_cur_state(pr->id, max_pstate);
249 		result = acpi_processor_set_throttling(pr,
250 				state - max_pstate, false);
251 	}
252 	return result;
253 }
254 
255 const struct thermal_cooling_device_ops processor_cooling_ops = {
256 	.get_max_state = processor_get_max_state,
257 	.get_cur_state = processor_get_cur_state,
258 	.set_cur_state = processor_set_cur_state,
259 };
260