1 /*
2  * linux/arch/arm/mach-omap2/cpuidle34xx.c
3  *
4  * OMAP3 CPU IDLE Routines
5  *
6  * Copyright (C) 2008 Texas Instruments, Inc.
7  * Rajendra Nayak <rnayak@ti.com>
8  *
9  * Copyright (C) 2007 Texas Instruments, Inc.
10  * Karthik Dasu <karthik-dp@ti.com>
11  *
12  * Copyright (C) 2006 Nokia Corporation
13  * Tony Lindgren <tony@atomide.com>
14  *
15  * Copyright (C) 2005 Texas Instruments, Inc.
16  * Richard Woodruff <r-woodruff2@ti.com>
17  *
18  * Based on pm.c for omap2
19  *
20  * This program is free software; you can redistribute it and/or modify
21  * it under the terms of the GNU General Public License version 2 as
22  * published by the Free Software Foundation.
23  */
24 
25 #include <linux/sched.h>
26 #include <linux/cpuidle.h>
27 #include <linux/export.h>
28 #include <linux/cpu_pm.h>
29 #include <asm/cpuidle.h>
30 
31 #include "powerdomain.h"
32 #include "clockdomain.h"
33 
34 #include "pm.h"
35 #include "control.h"
36 #include "common.h"
37 #include "soc.h"
38 
39 /* Mach specific information to be recorded in the C-state driver_data */
40 struct omap3_idle_statedata {
41 	u8 mpu_state;
42 	u8 core_state;
43 	u8 per_min_state;
44 	u8 flags;
45 };
46 
47 static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
48 
49 /*
50  * Possible flag bits for struct omap3_idle_statedata.flags:
51  *
52  * OMAP_CPUIDLE_CX_NO_CLKDM_IDLE: don't allow the MPU clockdomain to go
53  *    inactive.  This in turn prevents the MPU DPLL from entering autoidle
54  *    mode, so wakeup latency is greatly reduced, at the cost of additional
55  *    energy consumption.  This also prevents the CORE clockdomain from
56  *    entering idle.
57  */
58 #define OMAP_CPUIDLE_CX_NO_CLKDM_IDLE		BIT(0)
59 
60 /*
61  * Prevent PER OFF if CORE is not in RETention or OFF as this would
62  * disable PER wakeups completely.
63  */
64 static struct omap3_idle_statedata omap3_idle_data[] = {
65 	{
66 		.mpu_state = PWRDM_POWER_ON,
67 		.core_state = PWRDM_POWER_ON,
68 		/* In C1 do not allow PER state lower than CORE state */
69 		.per_min_state = PWRDM_POWER_ON,
70 		.flags = OMAP_CPUIDLE_CX_NO_CLKDM_IDLE,
71 	},
72 	{
73 		.mpu_state = PWRDM_POWER_ON,
74 		.core_state = PWRDM_POWER_ON,
75 		.per_min_state = PWRDM_POWER_RET,
76 	},
77 	{
78 		.mpu_state = PWRDM_POWER_RET,
79 		.core_state = PWRDM_POWER_ON,
80 		.per_min_state = PWRDM_POWER_RET,
81 	},
82 	{
83 		.mpu_state = PWRDM_POWER_OFF,
84 		.core_state = PWRDM_POWER_ON,
85 		.per_min_state = PWRDM_POWER_RET,
86 	},
87 	{
88 		.mpu_state = PWRDM_POWER_RET,
89 		.core_state = PWRDM_POWER_RET,
90 		.per_min_state = PWRDM_POWER_OFF,
91 	},
92 	{
93 		.mpu_state = PWRDM_POWER_OFF,
94 		.core_state = PWRDM_POWER_RET,
95 		.per_min_state = PWRDM_POWER_OFF,
96 	},
97 	{
98 		.mpu_state = PWRDM_POWER_OFF,
99 		.core_state = PWRDM_POWER_OFF,
100 		.per_min_state = PWRDM_POWER_OFF,
101 	},
102 };
103 
104 /**
105  * omap3_enter_idle - Programs OMAP3 to enter the specified state
106  * @dev: cpuidle device
107  * @drv: cpuidle driver
108  * @index: the index of state to be entered
109  */
110 static int omap3_enter_idle(struct cpuidle_device *dev,
111 			    struct cpuidle_driver *drv,
112 			    int index)
113 {
114 	struct omap3_idle_statedata *cx = &omap3_idle_data[index];
115 
116 	if (omap_irq_pending() || need_resched())
117 		goto return_sleep_time;
118 
119 	/* Deny idle for C1 */
120 	if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) {
121 		clkdm_deny_idle(mpu_pd->pwrdm_clkdms[0]);
122 	} else {
123 		pwrdm_set_next_pwrst(mpu_pd, cx->mpu_state);
124 		pwrdm_set_next_pwrst(core_pd, cx->core_state);
125 	}
126 
127 	/*
128 	 * Call idle CPU PM enter notifier chain so that
129 	 * VFP context is saved.
130 	 */
131 	if (cx->mpu_state == PWRDM_POWER_OFF)
132 		cpu_pm_enter();
133 
134 	/* Execute ARM wfi */
135 	omap_sram_idle();
136 
137 	/*
138 	 * Call idle CPU PM enter notifier chain to restore
139 	 * VFP context.
140 	 */
141 	if (cx->mpu_state == PWRDM_POWER_OFF &&
142 	    pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
143 		cpu_pm_exit();
144 
145 	/* Re-allow idle for C1 */
146 	if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
147 		clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
148 
149 return_sleep_time:
150 
151 	return index;
152 }
153 
154 /**
155  * next_valid_state - Find next valid C-state
156  * @dev: cpuidle device
157  * @drv: cpuidle driver
158  * @index: Index of currently selected c-state
159  *
160  * If the state corresponding to index is valid, index is returned back
161  * to the caller. Else, this function searches for a lower c-state which is
162  * still valid (as defined in omap3_power_states[]) and returns its index.
163  *
164  * A state is valid if the 'valid' field is enabled and
165  * if it satisfies the enable_off_mode condition.
166  */
167 static int next_valid_state(struct cpuidle_device *dev,
168 			    struct cpuidle_driver *drv, int index)
169 {
170 	struct omap3_idle_statedata *cx = &omap3_idle_data[index];
171 	u32 mpu_deepest_state = PWRDM_POWER_RET;
172 	u32 core_deepest_state = PWRDM_POWER_RET;
173 	int idx;
174 	int next_index = 0; /* C1 is the default value */
175 
176 	if (enable_off_mode) {
177 		mpu_deepest_state = PWRDM_POWER_OFF;
178 		/*
179 		 * Erratum i583: valable for ES rev < Es1.2 on 3630.
180 		 * CORE OFF mode is not supported in a stable form, restrict
181 		 * instead the CORE state to RET.
182 		 */
183 		if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
184 			core_deepest_state = PWRDM_POWER_OFF;
185 	}
186 
187 	/* Check if current state is valid */
188 	if ((cx->mpu_state >= mpu_deepest_state) &&
189 	    (cx->core_state >= core_deepest_state))
190 		return index;
191 
192 	/*
193 	 * Drop to next valid state.
194 	 * Start search from the next (lower) state.
195 	 */
196 	for (idx = index - 1; idx >= 0; idx--) {
197 		cx = &omap3_idle_data[idx];
198 		if ((cx->mpu_state >= mpu_deepest_state) &&
199 		    (cx->core_state >= core_deepest_state)) {
200 			next_index = idx;
201 			break;
202 		}
203 	}
204 
205 	return next_index;
206 }
207 
208 /**
209  * omap3_enter_idle_bm - Checks for any bus activity
210  * @dev: cpuidle device
211  * @drv: cpuidle driver
212  * @index: array index of target state to be programmed
213  *
214  * This function checks for any pending activity and then programs
215  * the device to the specified or a safer state.
216  */
217 static int omap3_enter_idle_bm(struct cpuidle_device *dev,
218 			       struct cpuidle_driver *drv,
219 			       int index)
220 {
221 	int new_state_idx, ret;
222 	u8 per_next_state, per_saved_state;
223 	struct omap3_idle_statedata *cx;
224 
225 	/*
226 	 * Use only C1 if CAM is active.
227 	 * CAM does not have wakeup capability in OMAP3.
228 	 */
229 	if (pwrdm_read_pwrst(cam_pd) == PWRDM_POWER_ON)
230 		new_state_idx = drv->safe_state_index;
231 	else
232 		new_state_idx = next_valid_state(dev, drv, index);
233 
234 	/*
235 	 * FIXME: we currently manage device-specific idle states
236 	 *        for PER and CORE in combination with CPU-specific
237 	 *        idle states.  This is wrong, and device-specific
238 	 *        idle management needs to be separated out into
239 	 *        its own code.
240 	 */
241 
242 	/* Program PER state */
243 	cx = &omap3_idle_data[new_state_idx];
244 
245 	per_next_state = pwrdm_read_next_pwrst(per_pd);
246 	per_saved_state = per_next_state;
247 	if (per_next_state < cx->per_min_state) {
248 		per_next_state = cx->per_min_state;
249 		pwrdm_set_next_pwrst(per_pd, per_next_state);
250 	}
251 
252 	ret = omap3_enter_idle(dev, drv, new_state_idx);
253 
254 	/* Restore original PER state if it was modified */
255 	if (per_next_state != per_saved_state)
256 		pwrdm_set_next_pwrst(per_pd, per_saved_state);
257 
258 	return ret;
259 }
260 
261 static struct cpuidle_driver omap3_idle_driver = {
262 	.name             = "omap3_idle",
263 	.owner            = THIS_MODULE,
264 	.states = {
265 		{
266 			.enter		  = omap3_enter_idle_bm,
267 			.exit_latency	  = 2 + 2,
268 			.target_residency = 5,
269 			.name		  = "C1",
270 			.desc		  = "MPU ON + CORE ON",
271 		},
272 		{
273 			.enter		  = omap3_enter_idle_bm,
274 			.exit_latency	  = 10 + 10,
275 			.target_residency = 30,
276 			.name		  = "C2",
277 			.desc		  = "MPU ON + CORE ON",
278 		},
279 		{
280 			.enter		  = omap3_enter_idle_bm,
281 			.exit_latency	  = 50 + 50,
282 			.target_residency = 300,
283 			.name		  = "C3",
284 			.desc		  = "MPU RET + CORE ON",
285 		},
286 		{
287 			.enter		  = omap3_enter_idle_bm,
288 			.exit_latency	  = 1500 + 1800,
289 			.target_residency = 4000,
290 			.name		  = "C4",
291 			.desc		  = "MPU OFF + CORE ON",
292 		},
293 		{
294 			.enter		  = omap3_enter_idle_bm,
295 			.exit_latency	  = 2500 + 7500,
296 			.target_residency = 12000,
297 			.name		  = "C5",
298 			.desc		  = "MPU RET + CORE RET",
299 		},
300 		{
301 			.enter		  = omap3_enter_idle_bm,
302 			.exit_latency	  = 3000 + 8500,
303 			.target_residency = 15000,
304 			.name		  = "C6",
305 			.desc		  = "MPU OFF + CORE RET",
306 		},
307 		{
308 			.enter		  = omap3_enter_idle_bm,
309 			.exit_latency	  = 10000 + 30000,
310 			.target_residency = 30000,
311 			.name		  = "C7",
312 			.desc		  = "MPU OFF + CORE OFF",
313 		},
314 	},
315 	.state_count = ARRAY_SIZE(omap3_idle_data),
316 	.safe_state_index = 0,
317 };
318 
319 /*
320  * Numbers based on measurements made in October 2009 for PM optimized kernel
321  * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
322  * and worst case latencies).
323  */
324 static struct cpuidle_driver omap3430_idle_driver = {
325 	.name             = "omap3430_idle",
326 	.owner            = THIS_MODULE,
327 	.states = {
328 		{
329 			.enter		  = omap3_enter_idle_bm,
330 			.exit_latency	  = 110 + 162,
331 			.target_residency = 5,
332 			.name		  = "C1",
333 			.desc		  = "MPU ON + CORE ON",
334 		},
335 		{
336 			.enter		  = omap3_enter_idle_bm,
337 			.exit_latency	  = 106 + 180,
338 			.target_residency = 309,
339 			.name		  = "C2",
340 			.desc		  = "MPU ON + CORE ON",
341 		},
342 		{
343 			.enter		  = omap3_enter_idle_bm,
344 			.exit_latency	  = 107 + 410,
345 			.target_residency = 46057,
346 			.name		  = "C3",
347 			.desc		  = "MPU RET + CORE ON",
348 		},
349 		{
350 			.enter		  = omap3_enter_idle_bm,
351 			.exit_latency	  = 121 + 3374,
352 			.target_residency = 46057,
353 			.name		  = "C4",
354 			.desc		  = "MPU OFF + CORE ON",
355 		},
356 		{
357 			.enter		  = omap3_enter_idle_bm,
358 			.exit_latency	  = 855 + 1146,
359 			.target_residency = 46057,
360 			.name		  = "C5",
361 			.desc		  = "MPU RET + CORE RET",
362 		},
363 		{
364 			.enter		  = omap3_enter_idle_bm,
365 			.exit_latency	  = 7580 + 4134,
366 			.target_residency = 484329,
367 			.name		  = "C6",
368 			.desc		  = "MPU OFF + CORE RET",
369 		},
370 		{
371 			.enter		  = omap3_enter_idle_bm,
372 			.exit_latency	  = 7505 + 15274,
373 			.target_residency = 484329,
374 			.name		  = "C7",
375 			.desc		  = "MPU OFF + CORE OFF",
376 		},
377 	},
378 	.state_count = ARRAY_SIZE(omap3_idle_data),
379 	.safe_state_index = 0,
380 };
381 
382 /* Public functions */
383 
384 /**
385  * omap3_idle_init - Init routine for OMAP3 idle
386  *
387  * Registers the OMAP3 specific cpuidle driver to the cpuidle
388  * framework with the valid set of states.
389  */
390 int __init omap3_idle_init(void)
391 {
392 	mpu_pd = pwrdm_lookup("mpu_pwrdm");
393 	core_pd = pwrdm_lookup("core_pwrdm");
394 	per_pd = pwrdm_lookup("per_pwrdm");
395 	cam_pd = pwrdm_lookup("cam_pwrdm");
396 
397 	if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
398 		return -ENODEV;
399 
400 	if (cpu_is_omap3430())
401 		return cpuidle_register(&omap3430_idle_driver, NULL);
402 	else
403 		return cpuidle_register(&omap3_idle_driver, NULL);
404 }
405