1 /*
2  * DT idle states parsing code.
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #define pr_fmt(fmt) "DT idle-states: " fmt
13 
14 #include <linux/cpuidle.h>
15 #include <linux/cpumask.h>
16 #include <linux/errno.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 
22 #include "dt_idle_states.h"
23 
24 static int init_state_node(struct cpuidle_state *idle_state,
25 			   const struct of_device_id *matches,
26 			   struct device_node *state_node)
27 {
28 	int err;
29 	const struct of_device_id *match_id;
30 	const char *desc;
31 
32 	match_id = of_match_node(matches, state_node);
33 	if (!match_id)
34 		return -ENODEV;
35 	/*
36 	 * CPUidle drivers are expected to initialize the const void *data
37 	 * pointer of the passed in struct of_device_id array to the idle
38 	 * state enter function.
39 	 */
40 	idle_state->enter = match_id->data;
41 	/*
42 	 * Since this is not a "coupled" state, it's safe to assume interrupts
43 	 * won't be enabled when it exits allowing the tick to be frozen
44 	 * safely. So enter() can be also enter_s2idle() callback.
45 	 */
46 	idle_state->enter_s2idle = match_id->data;
47 
48 	err = of_property_read_u32(state_node, "wakeup-latency-us",
49 				   &idle_state->exit_latency);
50 	if (err) {
51 		u32 entry_latency, exit_latency;
52 
53 		err = of_property_read_u32(state_node, "entry-latency-us",
54 					   &entry_latency);
55 		if (err) {
56 			pr_debug(" * %pOF missing entry-latency-us property\n",
57 				 state_node);
58 			return -EINVAL;
59 		}
60 
61 		err = of_property_read_u32(state_node, "exit-latency-us",
62 					   &exit_latency);
63 		if (err) {
64 			pr_debug(" * %pOF missing exit-latency-us property\n",
65 				 state_node);
66 			return -EINVAL;
67 		}
68 		/*
69 		 * If wakeup-latency-us is missing, default to entry+exit
70 		 * latencies as defined in idle states bindings
71 		 */
72 		idle_state->exit_latency = entry_latency + exit_latency;
73 	}
74 
75 	err = of_property_read_u32(state_node, "min-residency-us",
76 				   &idle_state->target_residency);
77 	if (err) {
78 		pr_debug(" * %pOF missing min-residency-us property\n",
79 			     state_node);
80 		return -EINVAL;
81 	}
82 
83 	err = of_property_read_string(state_node, "idle-state-name", &desc);
84 	if (err)
85 		desc = state_node->name;
86 
87 	idle_state->flags = 0;
88 	if (of_property_read_bool(state_node, "local-timer-stop"))
89 		idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
90 	/*
91 	 * TODO:
92 	 *	replace with kstrdup and pointer assignment when name
93 	 *	and desc become string pointers
94 	 */
95 	strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1);
96 	strncpy(idle_state->desc, desc, CPUIDLE_DESC_LEN - 1);
97 	return 0;
98 }
99 
100 /*
101  * Check that the idle state is uniform across all CPUs in the CPUidle driver
102  * cpumask
103  */
104 static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
105 			     const cpumask_t *cpumask)
106 {
107 	int cpu;
108 	struct device_node *cpu_node, *curr_state_node;
109 	bool valid = true;
110 
111 	/*
112 	 * Compare idle state phandles for index idx on all CPUs in the
113 	 * CPUidle driver cpumask. Start from next logical cpu following
114 	 * cpumask_first(cpumask) since that's the CPU state_node was
115 	 * retrieved from. If a mismatch is found bail out straight
116 	 * away since we certainly hit a firmware misconfiguration.
117 	 */
118 	for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
119 	     cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
120 		cpu_node = of_cpu_device_node_get(cpu);
121 		curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
122 						   idx);
123 		if (state_node != curr_state_node)
124 			valid = false;
125 
126 		of_node_put(curr_state_node);
127 		of_node_put(cpu_node);
128 		if (!valid)
129 			break;
130 	}
131 
132 	return valid;
133 }
134 
135 /**
136  * dt_init_idle_driver() - Parse the DT idle states and initialize the
137  *			   idle driver states array
138  * @drv:	  Pointer to CPU idle driver to be initialized
139  * @matches:	  Array of of_device_id match structures to search in for
140  *		  compatible idle state nodes. The data pointer for each valid
141  *		  struct of_device_id entry in the matches array must point to
142  *		  a function with the following signature, that corresponds to
143  *		  the CPUidle state enter function signature:
144  *
145  *		  int (*)(struct cpuidle_device *dev,
146  *			  struct cpuidle_driver *drv,
147  *			  int index);
148  *
149  * @start_idx:    First idle state index to be initialized
150  *
151  * If DT idle states are detected and are valid the state count and states
152  * array entries in the cpuidle driver are initialized accordingly starting
153  * from index start_idx.
154  *
155  * Return: number of valid DT idle states parsed, <0 on failure
156  */
157 int dt_init_idle_driver(struct cpuidle_driver *drv,
158 			const struct of_device_id *matches,
159 			unsigned int start_idx)
160 {
161 	struct cpuidle_state *idle_state;
162 	struct device_node *state_node, *cpu_node;
163 	int i, err = 0;
164 	const cpumask_t *cpumask;
165 	unsigned int state_idx = start_idx;
166 
167 	if (state_idx >= CPUIDLE_STATE_MAX)
168 		return -EINVAL;
169 	/*
170 	 * We get the idle states for the first logical cpu in the
171 	 * driver mask (or cpu_possible_mask if the driver cpumask is not set)
172 	 * and we check through idle_state_valid() if they are uniform
173 	 * across CPUs, otherwise we hit a firmware misconfiguration.
174 	 */
175 	cpumask = drv->cpumask ? : cpu_possible_mask;
176 	cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
177 
178 	for (i = 0; ; i++) {
179 		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
180 		if (!state_node)
181 			break;
182 
183 		if (!of_device_is_available(state_node)) {
184 			of_node_put(state_node);
185 			continue;
186 		}
187 
188 		if (!idle_state_valid(state_node, i, cpumask)) {
189 			pr_warn("%pOF idle state not valid, bailing out\n",
190 				state_node);
191 			err = -EINVAL;
192 			break;
193 		}
194 
195 		if (state_idx == CPUIDLE_STATE_MAX) {
196 			pr_warn("State index reached static CPU idle driver states array size\n");
197 			break;
198 		}
199 
200 		idle_state = &drv->states[state_idx++];
201 		err = init_state_node(idle_state, matches, state_node);
202 		if (err) {
203 			pr_err("Parsing idle state node %pOF failed with err %d\n",
204 			       state_node, err);
205 			err = -EINVAL;
206 			break;
207 		}
208 		of_node_put(state_node);
209 	}
210 
211 	of_node_put(state_node);
212 	of_node_put(cpu_node);
213 	if (err)
214 		return err;
215 	/*
216 	 * Update the driver state count only if some valid DT idle states
217 	 * were detected
218 	 */
219 	if (i)
220 		drv->state_count = state_idx;
221 
222 	/*
223 	 * Return the number of present and valid DT idle states, which can
224 	 * also be 0 on platforms with missing DT idle states or legacy DT
225 	 * configuration predating the DT idle states bindings.
226 	 */
227 	return i;
228 }
229 EXPORT_SYMBOL_GPL(dt_init_idle_driver);
230