1 /*
2  * linux/arch/arm/mach-omap2/cpuidle34xx.c
3  *
4  * OMAP3 CPU IDLE Routines
5  *
6  * Copyright (C) 2008 Texas Instruments, Inc.
7  * Rajendra Nayak <rnayak@ti.com>
8  *
9  * Copyright (C) 2007 Texas Instruments, Inc.
10  * Karthik Dasu <karthik-dp@ti.com>
11  *
12  * Copyright (C) 2006 Nokia Corporation
13  * Tony Lindgren <tony@atomide.com>
14  *
15  * Copyright (C) 2005 Texas Instruments, Inc.
16  * Richard Woodruff <r-woodruff2@ti.com>
17  *
18  * Based on pm.c for omap2
19  *
20  * This program is free software; you can redistribute it and/or modify
21  * it under the terms of the GNU General Public License version 2 as
22  * published by the Free Software Foundation.
23  */
24 
25 #include <linux/sched.h>
26 #include <linux/cpuidle.h>
27 #include <linux/export.h>
28 #include <linux/cpu_pm.h>
29 
30 #include "powerdomain.h"
31 #include "clockdomain.h"
32 
33 #include "pm.h"
34 #include "control.h"
35 #include "common.h"
36 
37 /* Mach specific information to be recorded in the C-state driver_data */
38 struct omap3_idle_statedata {
39 	u8 mpu_state;
40 	u8 core_state;
41 	u8 per_min_state;
42 	u8 flags;
43 };
44 
45 static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
46 
47 /*
48  * Possible flag bits for struct omap3_idle_statedata.flags:
49  *
50  * OMAP_CPUIDLE_CX_NO_CLKDM_IDLE: don't allow the MPU clockdomain to go
51  *    inactive.  This in turn prevents the MPU DPLL from entering autoidle
52  *    mode, so wakeup latency is greatly reduced, at the cost of additional
53  *    energy consumption.  This also prevents the CORE clockdomain from
54  *    entering idle.
55  */
56 #define OMAP_CPUIDLE_CX_NO_CLKDM_IDLE		BIT(0)
57 
58 /*
59  * Prevent PER OFF if CORE is not in RETention or OFF as this would
60  * disable PER wakeups completely.
61  */
62 static struct omap3_idle_statedata omap3_idle_data[] = {
63 	{
64 		.mpu_state = PWRDM_POWER_ON,
65 		.core_state = PWRDM_POWER_ON,
66 		/* In C1 do not allow PER state lower than CORE state */
67 		.per_min_state = PWRDM_POWER_ON,
68 		.flags = OMAP_CPUIDLE_CX_NO_CLKDM_IDLE,
69 	},
70 	{
71 		.mpu_state = PWRDM_POWER_ON,
72 		.core_state = PWRDM_POWER_ON,
73 		.per_min_state = PWRDM_POWER_RET,
74 	},
75 	{
76 		.mpu_state = PWRDM_POWER_RET,
77 		.core_state = PWRDM_POWER_ON,
78 		.per_min_state = PWRDM_POWER_RET,
79 	},
80 	{
81 		.mpu_state = PWRDM_POWER_OFF,
82 		.core_state = PWRDM_POWER_ON,
83 		.per_min_state = PWRDM_POWER_RET,
84 	},
85 	{
86 		.mpu_state = PWRDM_POWER_RET,
87 		.core_state = PWRDM_POWER_RET,
88 		.per_min_state = PWRDM_POWER_OFF,
89 	},
90 	{
91 		.mpu_state = PWRDM_POWER_OFF,
92 		.core_state = PWRDM_POWER_RET,
93 		.per_min_state = PWRDM_POWER_OFF,
94 	},
95 	{
96 		.mpu_state = PWRDM_POWER_OFF,
97 		.core_state = PWRDM_POWER_OFF,
98 		.per_min_state = PWRDM_POWER_OFF,
99 	},
100 };
101 
102 /* Private functions */
103 
104 static int __omap3_enter_idle(struct cpuidle_device *dev,
105 				struct cpuidle_driver *drv,
106 				int index)
107 {
108 	struct omap3_idle_statedata *cx = &omap3_idle_data[index];
109 
110 	local_fiq_disable();
111 
112 	if (omap_irq_pending() || need_resched())
113 		goto return_sleep_time;
114 
115 	/* Deny idle for C1 */
116 	if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) {
117 		clkdm_deny_idle(mpu_pd->pwrdm_clkdms[0]);
118 	} else {
119 		pwrdm_set_next_pwrst(mpu_pd, cx->mpu_state);
120 		pwrdm_set_next_pwrst(core_pd, cx->core_state);
121 	}
122 
123 	/*
124 	 * Call idle CPU PM enter notifier chain so that
125 	 * VFP context is saved.
126 	 */
127 	if (cx->mpu_state == PWRDM_POWER_OFF)
128 		cpu_pm_enter();
129 
130 	/* Execute ARM wfi */
131 	omap_sram_idle();
132 
133 	/*
134 	 * Call idle CPU PM enter notifier chain to restore
135 	 * VFP context.
136 	 */
137 	if (cx->mpu_state == PWRDM_POWER_OFF &&
138 	    pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
139 		cpu_pm_exit();
140 
141 	/* Re-allow idle for C1 */
142 	if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
143 		clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
144 
145 return_sleep_time:
146 	local_fiq_enable();
147 
148 	return index;
149 }
150 
151 /**
152  * omap3_enter_idle - Programs OMAP3 to enter the specified state
153  * @dev: cpuidle device
154  * @drv: cpuidle driver
155  * @index: the index of state to be entered
156  *
157  * Called from the CPUidle framework to program the device to the
158  * specified target state selected by the governor.
159  */
160 static inline int omap3_enter_idle(struct cpuidle_device *dev,
161 				struct cpuidle_driver *drv,
162 				int index)
163 {
164 	return cpuidle_wrap_enter(dev, drv, index, __omap3_enter_idle);
165 }
166 
167 /**
168  * next_valid_state - Find next valid C-state
169  * @dev: cpuidle device
170  * @drv: cpuidle driver
171  * @index: Index of currently selected c-state
172  *
173  * If the state corresponding to index is valid, index is returned back
174  * to the caller. Else, this function searches for a lower c-state which is
175  * still valid (as defined in omap3_power_states[]) and returns its index.
176  *
177  * A state is valid if the 'valid' field is enabled and
178  * if it satisfies the enable_off_mode condition.
179  */
180 static int next_valid_state(struct cpuidle_device *dev,
181 			    struct cpuidle_driver *drv, int index)
182 {
183 	struct omap3_idle_statedata *cx = &omap3_idle_data[index];
184 	u32 mpu_deepest_state = PWRDM_POWER_RET;
185 	u32 core_deepest_state = PWRDM_POWER_RET;
186 	int idx;
187 	int next_index = 0; /* C1 is the default value */
188 
189 	if (enable_off_mode) {
190 		mpu_deepest_state = PWRDM_POWER_OFF;
191 		/*
192 		 * Erratum i583: valable for ES rev < Es1.2 on 3630.
193 		 * CORE OFF mode is not supported in a stable form, restrict
194 		 * instead the CORE state to RET.
195 		 */
196 		if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
197 			core_deepest_state = PWRDM_POWER_OFF;
198 	}
199 
200 	/* Check if current state is valid */
201 	if ((cx->mpu_state >= mpu_deepest_state) &&
202 	    (cx->core_state >= core_deepest_state))
203 		return index;
204 
205 	/*
206 	 * Drop to next valid state.
207 	 * Start search from the next (lower) state.
208 	 */
209 	for (idx = index - 1; idx >= 0; idx--) {
210 		cx = &omap3_idle_data[idx];
211 		if ((cx->mpu_state >= mpu_deepest_state) &&
212 		    (cx->core_state >= core_deepest_state)) {
213 			next_index = idx;
214 			break;
215 		}
216 	}
217 
218 	return next_index;
219 }
220 
221 /**
222  * omap3_enter_idle_bm - Checks for any bus activity
223  * @dev: cpuidle device
224  * @drv: cpuidle driver
225  * @index: array index of target state to be programmed
226  *
227  * This function checks for any pending activity and then programs
228  * the device to the specified or a safer state.
229  */
230 static int omap3_enter_idle_bm(struct cpuidle_device *dev,
231 			       struct cpuidle_driver *drv,
232 			       int index)
233 {
234 	int new_state_idx, ret;
235 	u8 per_next_state, per_saved_state;
236 	struct omap3_idle_statedata *cx;
237 
238 	/*
239 	 * Use only C1 if CAM is active.
240 	 * CAM does not have wakeup capability in OMAP3.
241 	 */
242 	if (pwrdm_read_pwrst(cam_pd) == PWRDM_POWER_ON)
243 		new_state_idx = drv->safe_state_index;
244 	else
245 		new_state_idx = next_valid_state(dev, drv, index);
246 
247 	/*
248 	 * FIXME: we currently manage device-specific idle states
249 	 *        for PER and CORE in combination with CPU-specific
250 	 *        idle states.  This is wrong, and device-specific
251 	 *        idle management needs to be separated out into
252 	 *        its own code.
253 	 */
254 
255 	/* Program PER state */
256 	cx = &omap3_idle_data[new_state_idx];
257 
258 	per_next_state = pwrdm_read_next_pwrst(per_pd);
259 	per_saved_state = per_next_state;
260 	if (per_next_state < cx->per_min_state) {
261 		per_next_state = cx->per_min_state;
262 		pwrdm_set_next_pwrst(per_pd, per_next_state);
263 	}
264 
265 	ret = omap3_enter_idle(dev, drv, new_state_idx);
266 
267 	/* Restore original PER state if it was modified */
268 	if (per_next_state != per_saved_state)
269 		pwrdm_set_next_pwrst(per_pd, per_saved_state);
270 
271 	return ret;
272 }
273 
274 static DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
275 
276 static struct cpuidle_driver omap3_idle_driver = {
277 	.name =		"omap3_idle",
278 	.owner =	THIS_MODULE,
279 	.states = {
280 		{
281 			.enter		  = omap3_enter_idle_bm,
282 			.exit_latency	  = 2 + 2,
283 			.target_residency = 5,
284 			.flags		  = CPUIDLE_FLAG_TIME_VALID,
285 			.name		  = "C1",
286 			.desc		  = "MPU ON + CORE ON",
287 		},
288 		{
289 			.enter		  = omap3_enter_idle_bm,
290 			.exit_latency	  = 10 + 10,
291 			.target_residency = 30,
292 			.flags		  = CPUIDLE_FLAG_TIME_VALID,
293 			.name		  = "C2",
294 			.desc		  = "MPU ON + CORE ON",
295 		},
296 		{
297 			.enter		  = omap3_enter_idle_bm,
298 			.exit_latency	  = 50 + 50,
299 			.target_residency = 300,
300 			.flags		  = CPUIDLE_FLAG_TIME_VALID,
301 			.name		  = "C3",
302 			.desc		  = "MPU RET + CORE ON",
303 		},
304 		{
305 			.enter		  = omap3_enter_idle_bm,
306 			.exit_latency	  = 1500 + 1800,
307 			.target_residency = 4000,
308 			.flags		  = CPUIDLE_FLAG_TIME_VALID,
309 			.name		  = "C4",
310 			.desc		  = "MPU OFF + CORE ON",
311 		},
312 		{
313 			.enter		  = omap3_enter_idle_bm,
314 			.exit_latency	  = 2500 + 7500,
315 			.target_residency = 12000,
316 			.flags		  = CPUIDLE_FLAG_TIME_VALID,
317 			.name		  = "C5",
318 			.desc		  = "MPU RET + CORE RET",
319 		},
320 		{
321 			.enter		  = omap3_enter_idle_bm,
322 			.exit_latency	  = 3000 + 8500,
323 			.target_residency = 15000,
324 			.flags		  = CPUIDLE_FLAG_TIME_VALID,
325 			.name		  = "C6",
326 			.desc		  = "MPU OFF + CORE RET",
327 		},
328 		{
329 			.enter		  = omap3_enter_idle_bm,
330 			.exit_latency	  = 10000 + 30000,
331 			.target_residency = 30000,
332 			.flags		  = CPUIDLE_FLAG_TIME_VALID,
333 			.name		  = "C7",
334 			.desc		  = "MPU OFF + CORE OFF",
335 		},
336 	},
337 	.state_count = ARRAY_SIZE(omap3_idle_data),
338 	.safe_state_index = 0,
339 };
340 
341 /* Public functions */
342 
343 /**
344  * omap3_idle_init - Init routine for OMAP3 idle
345  *
346  * Registers the OMAP3 specific cpuidle driver to the cpuidle
347  * framework with the valid set of states.
348  */
349 int __init omap3_idle_init(void)
350 {
351 	struct cpuidle_device *dev;
352 
353 	mpu_pd = pwrdm_lookup("mpu_pwrdm");
354 	core_pd = pwrdm_lookup("core_pwrdm");
355 	per_pd = pwrdm_lookup("per_pwrdm");
356 	cam_pd = pwrdm_lookup("cam_pwrdm");
357 
358 	if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
359 		return -ENODEV;
360 
361 	cpuidle_register_driver(&omap3_idle_driver);
362 
363 	dev = &per_cpu(omap3_idle_dev, smp_processor_id());
364 	dev->cpu = 0;
365 
366 	if (cpuidle_register_device(dev)) {
367 		printk(KERN_ERR "%s: CPUidle register device failed\n",
368 		       __func__);
369 		return -EIO;
370 	}
371 
372 	return 0;
373 }
374