1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * PSCI CPU idle driver.
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7  */
8 
9 #define pr_fmt(fmt) "CPUidle PSCI: " fmt
10 
11 #include <linux/cpuhotplug.h>
12 #include <linux/cpu_cooling.h>
13 #include <linux/cpuidle.h>
14 #include <linux/cpumask.h>
15 #include <linux/cpu_pm.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_device.h>
20 #include <linux/psci.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/slab.h>
23 
24 #include <asm/cpuidle.h>
25 
26 #include "cpuidle-psci.h"
27 #include "dt_idle_states.h"
28 
29 struct psci_cpuidle_data {
30 	u32 *psci_states;
31 	struct device *dev;
32 };
33 
34 static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
35 static DEFINE_PER_CPU(u32, domain_state);
36 static bool psci_cpuidle_use_cpuhp __initdata;
37 
38 void psci_set_domain_state(u32 state)
39 {
40 	__this_cpu_write(domain_state, state);
41 }
42 
43 static inline u32 psci_get_domain_state(void)
44 {
45 	return __this_cpu_read(domain_state);
46 }
47 
48 static inline int psci_enter_state(int idx, u32 state)
49 {
50 	return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state);
51 }
52 
53 static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
54 					struct cpuidle_driver *drv, int idx)
55 {
56 	struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
57 	u32 *states = data->psci_states;
58 	struct device *pd_dev = data->dev;
59 	u32 state;
60 	int ret;
61 
62 	ret = cpu_pm_enter();
63 	if (ret)
64 		return -1;
65 
66 	/* Do runtime PM to manage a hierarchical CPU toplogy. */
67 	pm_runtime_put_sync_suspend(pd_dev);
68 
69 	state = psci_get_domain_state();
70 	if (!state)
71 		state = states[idx];
72 
73 	ret = psci_cpu_suspend_enter(state) ? -1 : idx;
74 
75 	pm_runtime_get_sync(pd_dev);
76 
77 	cpu_pm_exit();
78 
79 	/* Clear the domain state to start fresh when back from idle. */
80 	psci_set_domain_state(0);
81 	return ret;
82 }
83 
84 static int psci_idle_cpuhp_up(unsigned int cpu)
85 {
86 	struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
87 
88 	if (pd_dev)
89 		pm_runtime_get_sync(pd_dev);
90 
91 	return 0;
92 }
93 
94 static int psci_idle_cpuhp_down(unsigned int cpu)
95 {
96 	struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
97 
98 	if (pd_dev) {
99 		pm_runtime_put_sync(pd_dev);
100 		/* Clear domain state to start fresh at next online. */
101 		psci_set_domain_state(0);
102 	}
103 
104 	return 0;
105 }
106 
107 static void __init psci_idle_init_cpuhp(void)
108 {
109 	int err;
110 
111 	if (!psci_cpuidle_use_cpuhp)
112 		return;
113 
114 	err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
115 					"cpuidle/psci:online",
116 					psci_idle_cpuhp_up,
117 					psci_idle_cpuhp_down);
118 	if (err)
119 		pr_warn("Failed %d while setup cpuhp state\n", err);
120 }
121 
122 static int psci_enter_idle_state(struct cpuidle_device *dev,
123 				struct cpuidle_driver *drv, int idx)
124 {
125 	u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);
126 
127 	return psci_enter_state(idx, state[idx]);
128 }
129 
130 static struct cpuidle_driver psci_idle_driver __initdata = {
131 	.name = "psci_idle",
132 	.owner = THIS_MODULE,
133 	/*
134 	 * PSCI idle states relies on architectural WFI to
135 	 * be represented as state index 0.
136 	 */
137 	.states[0] = {
138 		.enter                  = psci_enter_idle_state,
139 		.exit_latency           = 1,
140 		.target_residency       = 1,
141 		.power_usage		= UINT_MAX,
142 		.name                   = "WFI",
143 		.desc                   = "ARM WFI",
144 	}
145 };
146 
147 static const struct of_device_id psci_idle_state_match[] __initconst = {
148 	{ .compatible = "arm,idle-state",
149 	  .data = psci_enter_idle_state },
150 	{ },
151 };
152 
153 int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
154 {
155 	int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
156 
157 	if (err) {
158 		pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
159 		return err;
160 	}
161 
162 	if (!psci_power_state_is_valid(*state)) {
163 		pr_warn("Invalid PSCI power state %#x\n", *state);
164 		return -EINVAL;
165 	}
166 
167 	return 0;
168 }
169 
170 static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
171 					    struct psci_cpuidle_data *data,
172 					    unsigned int state_count, int cpu)
173 {
174 	/* Currently limit the hierarchical topology to be used in OSI mode. */
175 	if (!psci_has_osi_support())
176 		return 0;
177 
178 	data->dev = psci_dt_attach_cpu(cpu);
179 	if (IS_ERR_OR_NULL(data->dev))
180 		return PTR_ERR_OR_ZERO(data->dev);
181 
182 	/*
183 	 * Using the deepest state for the CPU to trigger a potential selection
184 	 * of a shared state for the domain, assumes the domain states are all
185 	 * deeper states.
186 	 */
187 	drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
188 	psci_cpuidle_use_cpuhp = true;
189 
190 	return 0;
191 }
192 
193 static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
194 					struct device_node *cpu_node,
195 					unsigned int state_count, int cpu)
196 {
197 	int i, ret = 0;
198 	u32 *psci_states;
199 	struct device_node *state_node;
200 	struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
201 
202 	state_count++; /* Add WFI state too */
203 	psci_states = kcalloc(state_count, sizeof(*psci_states), GFP_KERNEL);
204 	if (!psci_states)
205 		return -ENOMEM;
206 
207 	for (i = 1; i < state_count; i++) {
208 		state_node = of_get_cpu_state_node(cpu_node, i - 1);
209 		if (!state_node)
210 			break;
211 
212 		ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
213 		of_node_put(state_node);
214 
215 		if (ret)
216 			goto free_mem;
217 
218 		pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
219 	}
220 
221 	if (i != state_count) {
222 		ret = -ENODEV;
223 		goto free_mem;
224 	}
225 
226 	/* Initialize optional data, used for the hierarchical topology. */
227 	ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu);
228 	if (ret < 0)
229 		goto free_mem;
230 
231 	/* Idle states parsed correctly, store them in the per-cpu struct. */
232 	data->psci_states = psci_states;
233 	return 0;
234 
235 free_mem:
236 	kfree(psci_states);
237 	return ret;
238 }
239 
240 static __init int psci_cpu_init_idle(struct cpuidle_driver *drv,
241 				     unsigned int cpu, unsigned int state_count)
242 {
243 	struct device_node *cpu_node;
244 	int ret;
245 
246 	/*
247 	 * If the PSCI cpu_suspend function hook has not been initialized
248 	 * idle states must not be enabled, so bail out
249 	 */
250 	if (!psci_ops.cpu_suspend)
251 		return -EOPNOTSUPP;
252 
253 	cpu_node = of_cpu_device_node_get(cpu);
254 	if (!cpu_node)
255 		return -ENODEV;
256 
257 	ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu);
258 
259 	of_node_put(cpu_node);
260 
261 	return ret;
262 }
263 
264 static int __init psci_idle_init_cpu(int cpu)
265 {
266 	struct cpuidle_driver *drv;
267 	struct device_node *cpu_node;
268 	const char *enable_method;
269 	int ret = 0;
270 
271 	cpu_node = of_cpu_device_node_get(cpu);
272 	if (!cpu_node)
273 		return -ENODEV;
274 
275 	/*
276 	 * Check whether the enable-method for the cpu is PSCI, fail
277 	 * if it is not.
278 	 */
279 	enable_method = of_get_property(cpu_node, "enable-method", NULL);
280 	if (!enable_method || (strcmp(enable_method, "psci")))
281 		ret = -ENODEV;
282 
283 	of_node_put(cpu_node);
284 	if (ret)
285 		return ret;
286 
287 	drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL);
288 	if (!drv)
289 		return -ENOMEM;
290 
291 	drv->cpumask = (struct cpumask *)cpumask_of(cpu);
292 
293 	/*
294 	 * Initialize idle states data, starting at index 1, since
295 	 * by default idle state 0 is the quiescent state reached
296 	 * by the cpu by executing the wfi instruction.
297 	 *
298 	 * If no DT idle states are detected (ret == 0) let the driver
299 	 * initialization fail accordingly since there is no reason to
300 	 * initialize the idle driver if only wfi is supported, the
301 	 * default archictectural back-end already executes wfi
302 	 * on idle entry.
303 	 */
304 	ret = dt_init_idle_driver(drv, psci_idle_state_match, 1);
305 	if (ret <= 0) {
306 		ret = ret ? : -ENODEV;
307 		goto out_kfree_drv;
308 	}
309 
310 	/*
311 	 * Initialize PSCI idle states.
312 	 */
313 	ret = psci_cpu_init_idle(drv, cpu, ret);
314 	if (ret) {
315 		pr_err("CPU %d failed to PSCI idle\n", cpu);
316 		goto out_kfree_drv;
317 	}
318 
319 	ret = cpuidle_register(drv, NULL);
320 	if (ret)
321 		goto out_kfree_drv;
322 
323 	cpuidle_cooling_register(drv);
324 
325 	return 0;
326 
327 out_kfree_drv:
328 	kfree(drv);
329 	return ret;
330 }
331 
332 /*
333  * psci_idle_init - Initializes PSCI cpuidle driver
334  *
335  * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
336  * to register cpuidle driver then rollback to cancel all CPUs
337  * registration.
338  */
339 static int __init psci_idle_init(void)
340 {
341 	int cpu, ret;
342 	struct cpuidle_driver *drv;
343 	struct cpuidle_device *dev;
344 
345 	for_each_possible_cpu(cpu) {
346 		ret = psci_idle_init_cpu(cpu);
347 		if (ret)
348 			goto out_fail;
349 	}
350 
351 	psci_idle_init_cpuhp();
352 	return 0;
353 
354 out_fail:
355 	while (--cpu >= 0) {
356 		dev = per_cpu(cpuidle_devices, cpu);
357 		drv = cpuidle_get_cpu_driver(dev);
358 		cpuidle_unregister(drv);
359 		kfree(drv);
360 	}
361 
362 	return ret;
363 }
364 device_initcall(psci_idle_init);
365