xref: /openbmc/linux/arch/arm/mach-omap2/pm34xx.c (revision 1540f214065982e6cbc6b8da1fe65a15e358f7c5)
1 /*
2  * OMAP3 Power Management Routines
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation
5  * Tony Lindgren <tony@atomide.com>
6  * Jouni Hogander
7  *
8  * Copyright (C) 2007 Texas Instruments, Inc.
9  * Rajendra Nayak <rnayak@ti.com>
10  *
11  * Copyright (C) 2005 Texas Instruments, Inc.
12  * Richard Woodruff <r-woodruff2@ti.com>
13  *
14  * Based on pm.c for omap1
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License version 2 as
18  * published by the Free Software Foundation.
19  */
20 
21 #include <linux/pm.h>
22 #include <linux/suspend.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/err.h>
27 #include <linux/gpio.h>
28 #include <linux/clk.h>
29 #include <linux/delay.h>
30 #include <linux/slab.h>
31 #include <linux/console.h>
32 
33 #include <plat/sram.h>
34 #include "clockdomain.h"
35 #include <plat/powerdomain.h>
36 #include <plat/serial.h>
37 #include <plat/sdrc.h>
38 #include <plat/prcm.h>
39 #include <plat/gpmc.h>
40 #include <plat/dma.h>
41 
42 #include <asm/tlbflush.h>
43 
44 #include "cm2xxx_3xxx.h"
45 #include "cm-regbits-34xx.h"
46 #include "prm-regbits-34xx.h"
47 
48 #include "prm2xxx_3xxx.h"
49 #include "pm.h"
50 #include "sdrc.h"
51 #include "control.h"
52 
53 #ifdef CONFIG_SUSPEND
54 static suspend_state_t suspend_state = PM_SUSPEND_ON;
55 static inline bool is_suspending(void)
56 {
57 	return (suspend_state != PM_SUSPEND_ON);
58 }
59 #else
60 static inline bool is_suspending(void)
61 {
62 	return false;
63 }
64 #endif
65 
66 /* Scratchpad offsets */
67 #define OMAP343X_TABLE_ADDRESS_OFFSET	   0xc4
68 #define OMAP343X_TABLE_VALUE_OFFSET	   0xc0
69 #define OMAP343X_CONTROL_REG_VALUE_OFFSET  0xc8
70 
71 /* pm34xx errata defined in pm.h */
72 u16 pm34xx_errata;
73 
74 struct power_state {
75 	struct powerdomain *pwrdm;
76 	u32 next_state;
77 #ifdef CONFIG_SUSPEND
78 	u32 saved_state;
79 #endif
80 	struct list_head node;
81 };
82 
83 static LIST_HEAD(pwrst_list);
84 
85 static void (*_omap_sram_idle)(u32 *addr, int save_state);
86 
87 static int (*_omap_save_secure_sram)(u32 *addr);
88 
89 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
90 static struct powerdomain *core_pwrdm, *per_pwrdm;
91 static struct powerdomain *cam_pwrdm;
92 
93 static inline void omap3_per_save_context(void)
94 {
95 	omap_gpio_save_context();
96 }
97 
98 static inline void omap3_per_restore_context(void)
99 {
100 	omap_gpio_restore_context();
101 }
102 
103 static void omap3_enable_io_chain(void)
104 {
105 	int timeout = 0;
106 
107 	if (omap_rev() >= OMAP3430_REV_ES3_1) {
108 		omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
109 				     PM_WKEN);
110 		/* Do a readback to assure write has been done */
111 		omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
112 
113 		while (!(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
114 			 OMAP3430_ST_IO_CHAIN_MASK)) {
115 			timeout++;
116 			if (timeout > 1000) {
117 				printk(KERN_ERR "Wake up daisy chain "
118 				       "activation failed.\n");
119 				return;
120 			}
121 			omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
122 					     WKUP_MOD, PM_WKEN);
123 		}
124 	}
125 }
126 
127 static void omap3_disable_io_chain(void)
128 {
129 	if (omap_rev() >= OMAP3430_REV_ES3_1)
130 		omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
131 				       PM_WKEN);
132 }
133 
134 static void omap3_core_save_context(void)
135 {
136 	u32 control_padconf_off;
137 
138 	/* Save the padconf registers */
139 	control_padconf_off = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF);
140 	control_padconf_off |= START_PADCONF_SAVE;
141 	omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF);
142 	/* wait for the save to complete */
143 	while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
144 			& PADCONF_SAVE_DONE))
145 		udelay(1);
146 
147 	/*
148 	 * Force write last pad into memory, as this can fail in some
149 	 * cases according to errata 1.157, 1.185
150 	 */
151 	omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
152 		OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
153 
154 	/* Save the Interrupt controller context */
155 	omap_intc_save_context();
156 	/* Save the GPMC context */
157 	omap3_gpmc_save_context();
158 	/* Save the system control module context, padconf already save above*/
159 	omap3_control_save_context();
160 	omap_dma_global_context_save();
161 }
162 
163 static void omap3_core_restore_context(void)
164 {
165 	/* Restore the control module context, padconf restored by h/w */
166 	omap3_control_restore_context();
167 	/* Restore the GPMC context */
168 	omap3_gpmc_restore_context();
169 	/* Restore the interrupt controller context */
170 	omap_intc_restore_context();
171 	omap_dma_global_context_restore();
172 }
173 
174 /*
175  * FIXME: This function should be called before entering off-mode after
176  * OMAP3 secure services have been accessed. Currently it is only called
177  * once during boot sequence, but this works as we are not using secure
178  * services.
179  */
180 static void omap3_save_secure_ram_context(u32 target_mpu_state)
181 {
182 	u32 ret;
183 
184 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
185 		/*
186 		 * MPU next state must be set to POWER_ON temporarily,
187 		 * otherwise the WFI executed inside the ROM code
188 		 * will hang the system.
189 		 */
190 		pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
191 		ret = _omap_save_secure_sram((u32 *)
192 				__pa(omap3_secure_ram_storage));
193 		pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
194 		/* Following is for error tracking, it should not happen */
195 		if (ret) {
196 			printk(KERN_ERR "save_secure_sram() returns %08x\n",
197 				ret);
198 			while (1)
199 				;
200 		}
201 	}
202 }
203 
204 /*
205  * PRCM Interrupt Handler Helper Function
206  *
207  * The purpose of this function is to clear any wake-up events latched
208  * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
209  * may occur whilst attempting to clear a PM_WKST_x register and thus
210  * set another bit in this register. A while loop is used to ensure
211  * that any peripheral wake-up events occurring while attempting to
212  * clear the PM_WKST_x are detected and cleared.
213  */
214 static int prcm_clear_mod_irqs(s16 module, u8 regs)
215 {
216 	u32 wkst, fclk, iclk, clken;
217 	u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
218 	u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
219 	u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
220 	u16 grpsel_off = (regs == 3) ?
221 		OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
222 	int c = 0;
223 
224 	wkst = omap2_prm_read_mod_reg(module, wkst_off);
225 	wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
226 	if (wkst) {
227 		iclk = omap2_cm_read_mod_reg(module, iclk_off);
228 		fclk = omap2_cm_read_mod_reg(module, fclk_off);
229 		while (wkst) {
230 			clken = wkst;
231 			omap2_cm_set_mod_reg_bits(clken, module, iclk_off);
232 			/*
233 			 * For USBHOST, we don't know whether HOST1 or
234 			 * HOST2 woke us up, so enable both f-clocks
235 			 */
236 			if (module == OMAP3430ES2_USBHOST_MOD)
237 				clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
238 			omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
239 			omap2_prm_write_mod_reg(wkst, module, wkst_off);
240 			wkst = omap2_prm_read_mod_reg(module, wkst_off);
241 			c++;
242 		}
243 		omap2_cm_write_mod_reg(iclk, module, iclk_off);
244 		omap2_cm_write_mod_reg(fclk, module, fclk_off);
245 	}
246 
247 	return c;
248 }
249 
250 static int _prcm_int_handle_wakeup(void)
251 {
252 	int c;
253 
254 	c = prcm_clear_mod_irqs(WKUP_MOD, 1);
255 	c += prcm_clear_mod_irqs(CORE_MOD, 1);
256 	c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
257 	if (omap_rev() > OMAP3430_REV_ES1_0) {
258 		c += prcm_clear_mod_irqs(CORE_MOD, 3);
259 		c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
260 	}
261 
262 	return c;
263 }
264 
265 /*
266  * PRCM Interrupt Handler
267  *
268  * The PRM_IRQSTATUS_MPU register indicates if there are any pending
269  * interrupts from the PRCM for the MPU. These bits must be cleared in
270  * order to clear the PRCM interrupt. The PRCM interrupt handler is
271  * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
272  * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
273  * register indicates that a wake-up event is pending for the MPU and
274  * this bit can only be cleared if the all the wake-up events latched
275  * in the various PM_WKST_x registers have been cleared. The interrupt
276  * handler is implemented using a do-while loop so that if a wake-up
277  * event occurred during the processing of the prcm interrupt handler
278  * (setting a bit in the corresponding PM_WKST_x register and thus
279  * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
280  * this would be handled.
281  */
282 static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
283 {
284 	u32 irqenable_mpu, irqstatus_mpu;
285 	int c = 0;
286 
287 	irqenable_mpu = omap2_prm_read_mod_reg(OCP_MOD,
288 					 OMAP3_PRM_IRQENABLE_MPU_OFFSET);
289 	irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
290 					 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
291 	irqstatus_mpu &= irqenable_mpu;
292 
293 	do {
294 		if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
295 				     OMAP3430_IO_ST_MASK)) {
296 			c = _prcm_int_handle_wakeup();
297 
298 			/*
299 			 * Is the MPU PRCM interrupt handler racing with the
300 			 * IVA2 PRCM interrupt handler ?
301 			 */
302 			WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
303 			     "but no wakeup sources are marked\n");
304 		} else {
305 			/* XXX we need to expand our PRCM interrupt handler */
306 			WARN(1, "prcm: WARNING: PRCM interrupt received, but "
307 			     "no code to handle it (%08x)\n", irqstatus_mpu);
308 		}
309 
310 		omap2_prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
311 					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
312 
313 		irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
314 					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
315 		irqstatus_mpu &= irqenable_mpu;
316 
317 	} while (irqstatus_mpu);
318 
319 	return IRQ_HANDLED;
320 }
321 
322 static void restore_control_register(u32 val)
323 {
324 	__asm__ __volatile__ ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val));
325 }
326 
327 /* Function to restore the table entry that was modified for enabling MMU */
328 static void restore_table_entry(void)
329 {
330 	void __iomem *scratchpad_address;
331 	u32 previous_value, control_reg_value;
332 	u32 *address;
333 
334 	scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
335 
336 	/* Get address of entry that was modified */
337 	address = (u32 *)__raw_readl(scratchpad_address +
338 				     OMAP343X_TABLE_ADDRESS_OFFSET);
339 	/* Get the previous value which needs to be restored */
340 	previous_value = __raw_readl(scratchpad_address +
341 				     OMAP343X_TABLE_VALUE_OFFSET);
342 	address = __va(address);
343 	*address = previous_value;
344 	flush_tlb_all();
345 	control_reg_value = __raw_readl(scratchpad_address
346 					+ OMAP343X_CONTROL_REG_VALUE_OFFSET);
347 	/* This will enable caches and prediction */
348 	restore_control_register(control_reg_value);
349 }
350 
351 void omap_sram_idle(void)
352 {
353 	/* Variable to tell what needs to be saved and restored
354 	 * in omap_sram_idle*/
355 	/* save_state = 0 => Nothing to save and restored */
356 	/* save_state = 1 => Only L1 and logic lost */
357 	/* save_state = 2 => Only L2 lost */
358 	/* save_state = 3 => L1, L2 and logic lost */
359 	int save_state = 0;
360 	int mpu_next_state = PWRDM_POWER_ON;
361 	int per_next_state = PWRDM_POWER_ON;
362 	int core_next_state = PWRDM_POWER_ON;
363 	int core_prev_state, per_prev_state;
364 	u32 sdrc_pwr = 0;
365 
366 	if (!_omap_sram_idle)
367 		return;
368 
369 	pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
370 	pwrdm_clear_all_prev_pwrst(neon_pwrdm);
371 	pwrdm_clear_all_prev_pwrst(core_pwrdm);
372 	pwrdm_clear_all_prev_pwrst(per_pwrdm);
373 
374 	mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
375 	switch (mpu_next_state) {
376 	case PWRDM_POWER_ON:
377 	case PWRDM_POWER_RET:
378 		/* No need to save context */
379 		save_state = 0;
380 		break;
381 	case PWRDM_POWER_OFF:
382 		save_state = 3;
383 		break;
384 	default:
385 		/* Invalid state */
386 		printk(KERN_ERR "Invalid mpu state in sram_idle\n");
387 		return;
388 	}
389 	pwrdm_pre_transition();
390 
391 	/* NEON control */
392 	if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
393 		pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
394 
395 	/* Enable IO-PAD and IO-CHAIN wakeups */
396 	per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
397 	core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
398 	if (omap3_has_io_wakeup() &&
399 	    (per_next_state < PWRDM_POWER_ON ||
400 	     core_next_state < PWRDM_POWER_ON)) {
401 		omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
402 		omap3_enable_io_chain();
403 	}
404 
405 	/* Block console output in case it is on one of the OMAP UARTs */
406 	if (!is_suspending())
407 		if (per_next_state < PWRDM_POWER_ON ||
408 		    core_next_state < PWRDM_POWER_ON)
409 			if (try_acquire_console_sem())
410 				goto console_still_active;
411 
412 	/* PER */
413 	if (per_next_state < PWRDM_POWER_ON) {
414 		omap_uart_prepare_idle(2);
415 		omap_uart_prepare_idle(3);
416 		omap2_gpio_prepare_for_idle(per_next_state);
417 		if (per_next_state == PWRDM_POWER_OFF)
418 				omap3_per_save_context();
419 	}
420 
421 	/* CORE */
422 	if (core_next_state < PWRDM_POWER_ON) {
423 		omap_uart_prepare_idle(0);
424 		omap_uart_prepare_idle(1);
425 		if (core_next_state == PWRDM_POWER_OFF) {
426 			omap3_core_save_context();
427 			omap3_cm_save_context();
428 		}
429 	}
430 
431 	omap3_intc_prepare_idle();
432 
433 	/*
434 	* On EMU/HS devices ROM code restores a SRDC value
435 	* from scratchpad which has automatic self refresh on timeout
436 	* of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
437 	* Hence store/restore the SDRC_POWER register here.
438 	*/
439 	if (omap_rev() >= OMAP3430_REV_ES3_0 &&
440 	    omap_type() != OMAP2_DEVICE_TYPE_GP &&
441 	    core_next_state == PWRDM_POWER_OFF)
442 		sdrc_pwr = sdrc_read_reg(SDRC_POWER);
443 
444 	/*
445 	 * omap3_arm_context is the location where ARM registers
446 	 * get saved. The restore path then reads from this
447 	 * location and restores them back.
448 	 */
449 	_omap_sram_idle(omap3_arm_context, save_state);
450 	cpu_init();
451 
452 	/* Restore normal SDRC POWER settings */
453 	if (omap_rev() >= OMAP3430_REV_ES3_0 &&
454 	    omap_type() != OMAP2_DEVICE_TYPE_GP &&
455 	    core_next_state == PWRDM_POWER_OFF)
456 		sdrc_write_reg(sdrc_pwr, SDRC_POWER);
457 
458 	/* Restore table entry modified during MMU restoration */
459 	if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
460 		restore_table_entry();
461 
462 	/* CORE */
463 	if (core_next_state < PWRDM_POWER_ON) {
464 		core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
465 		if (core_prev_state == PWRDM_POWER_OFF) {
466 			omap3_core_restore_context();
467 			omap3_cm_restore_context();
468 			omap3_sram_restore_context();
469 			omap2_sms_restore_context();
470 		}
471 		omap_uart_resume_idle(0);
472 		omap_uart_resume_idle(1);
473 		if (core_next_state == PWRDM_POWER_OFF)
474 			omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
475 					       OMAP3430_GR_MOD,
476 					       OMAP3_PRM_VOLTCTRL_OFFSET);
477 	}
478 	omap3_intc_resume_idle();
479 
480 	/* PER */
481 	if (per_next_state < PWRDM_POWER_ON) {
482 		per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
483 		omap2_gpio_resume_after_idle();
484 		if (per_prev_state == PWRDM_POWER_OFF)
485 			omap3_per_restore_context();
486 		omap_uart_resume_idle(2);
487 		omap_uart_resume_idle(3);
488 	}
489 
490 	if (!is_suspending())
491 		release_console_sem();
492 
493 console_still_active:
494 	/* Disable IO-PAD and IO-CHAIN wakeup */
495 	if (omap3_has_io_wakeup() &&
496 	    (per_next_state < PWRDM_POWER_ON ||
497 	     core_next_state < PWRDM_POWER_ON)) {
498 		omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
499 					     PM_WKEN);
500 		omap3_disable_io_chain();
501 	}
502 
503 	pwrdm_post_transition();
504 
505 	omap2_clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
506 }
507 
508 int omap3_can_sleep(void)
509 {
510 	if (!sleep_while_idle)
511 		return 0;
512 	if (!omap_uart_can_sleep())
513 		return 0;
514 	return 1;
515 }
516 
517 static void omap3_pm_idle(void)
518 {
519 	local_irq_disable();
520 	local_fiq_disable();
521 
522 	if (!omap3_can_sleep())
523 		goto out;
524 
525 	if (omap_irq_pending() || need_resched())
526 		goto out;
527 
528 	omap_sram_idle();
529 
530 out:
531 	local_fiq_enable();
532 	local_irq_enable();
533 }
534 
535 #ifdef CONFIG_SUSPEND
536 static int omap3_pm_suspend(void)
537 {
538 	struct power_state *pwrst;
539 	int state, ret = 0;
540 
541 	if (wakeup_timer_seconds || wakeup_timer_milliseconds)
542 		omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
543 					 wakeup_timer_milliseconds);
544 
545 	/* Read current next_pwrsts */
546 	list_for_each_entry(pwrst, &pwrst_list, node)
547 		pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
548 	/* Set ones wanted by suspend */
549 	list_for_each_entry(pwrst, &pwrst_list, node) {
550 		if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
551 			goto restore;
552 		if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
553 			goto restore;
554 	}
555 
556 	omap_uart_prepare_suspend();
557 	omap3_intc_suspend();
558 
559 	omap_sram_idle();
560 
561 restore:
562 	/* Restore next_pwrsts */
563 	list_for_each_entry(pwrst, &pwrst_list, node) {
564 		state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
565 		if (state > pwrst->next_state) {
566 			printk(KERN_INFO "Powerdomain (%s) didn't enter "
567 			       "target state %d\n",
568 			       pwrst->pwrdm->name, pwrst->next_state);
569 			ret = -1;
570 		}
571 		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
572 	}
573 	if (ret)
574 		printk(KERN_ERR "Could not enter target state in pm_suspend\n");
575 	else
576 		printk(KERN_INFO "Successfully put all powerdomains "
577 		       "to target state\n");
578 
579 	return ret;
580 }
581 
582 static int omap3_pm_enter(suspend_state_t unused)
583 {
584 	int ret = 0;
585 
586 	switch (suspend_state) {
587 	case PM_SUSPEND_STANDBY:
588 	case PM_SUSPEND_MEM:
589 		ret = omap3_pm_suspend();
590 		break;
591 	default:
592 		ret = -EINVAL;
593 	}
594 
595 	return ret;
596 }
597 
598 /* Hooks to enable / disable UART interrupts during suspend */
599 static int omap3_pm_begin(suspend_state_t state)
600 {
601 	disable_hlt();
602 	suspend_state = state;
603 	omap_uart_enable_irqs(0);
604 	return 0;
605 }
606 
607 static void omap3_pm_end(void)
608 {
609 	suspend_state = PM_SUSPEND_ON;
610 	omap_uart_enable_irqs(1);
611 	enable_hlt();
612 	return;
613 }
614 
615 static struct platform_suspend_ops omap_pm_ops = {
616 	.begin		= omap3_pm_begin,
617 	.end		= omap3_pm_end,
618 	.enter		= omap3_pm_enter,
619 	.valid		= suspend_valid_only_mem,
620 };
621 #endif /* CONFIG_SUSPEND */
622 
623 
624 /**
625  * omap3_iva_idle(): ensure IVA is in idle so it can be put into
626  *                   retention
627  *
628  * In cases where IVA2 is activated by bootcode, it may prevent
629  * full-chip retention or off-mode because it is not idle.  This
630  * function forces the IVA2 into idle state so it can go
631  * into retention/off and thus allow full-chip retention/off.
632  *
633  **/
634 static void __init omap3_iva_idle(void)
635 {
636 	/* ensure IVA2 clock is disabled */
637 	omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
638 
639 	/* if no clock activity, nothing else to do */
640 	if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
641 	      OMAP3430_CLKACTIVITY_IVA2_MASK))
642 		return;
643 
644 	/* Reset IVA2 */
645 	omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
646 			  OMAP3430_RST2_IVA2_MASK |
647 			  OMAP3430_RST3_IVA2_MASK,
648 			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
649 
650 	/* Enable IVA2 clock */
651 	omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
652 			 OMAP3430_IVA2_MOD, CM_FCLKEN);
653 
654 	/* Set IVA2 boot mode to 'idle' */
655 	omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
656 			 OMAP343X_CONTROL_IVA2_BOOTMOD);
657 
658 	/* Un-reset IVA2 */
659 	omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
660 
661 	/* Disable IVA2 clock */
662 	omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
663 
664 	/* Reset IVA2 */
665 	omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
666 			  OMAP3430_RST2_IVA2_MASK |
667 			  OMAP3430_RST3_IVA2_MASK,
668 			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
669 }
670 
671 static void __init omap3_d2d_idle(void)
672 {
673 	u16 mask, padconf;
674 
675 	/* In a stand alone OMAP3430 where there is not a stacked
676 	 * modem for the D2D Idle Ack and D2D MStandby must be pulled
677 	 * high. S CONTROL_PADCONF_SAD2D_IDLEACK and
678 	 * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
679 	mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
680 	padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
681 	padconf |= mask;
682 	omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);
683 
684 	padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
685 	padconf |= mask;
686 	omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
687 
688 	/* reset modem */
689 	omap2_prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
690 			  OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
691 			  CORE_MOD, OMAP2_RM_RSTCTRL);
692 	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
693 }
694 
695 static void __init prcm_setup_regs(void)
696 {
697 	u32 omap3630_auto_uart4_mask = cpu_is_omap3630() ?
698 					OMAP3630_AUTO_UART4_MASK : 0;
699 	u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
700 					OMAP3630_EN_UART4_MASK : 0;
701 	u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
702 					OMAP3630_GRPSEL_UART4_MASK : 0;
703 
704 
705 	/* XXX Reset all wkdeps. This should be done when initializing
706 	 * powerdomains */
707 	omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
708 	omap2_prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
709 	omap2_prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
710 	omap2_prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
711 	omap2_prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
712 	omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
713 	if (omap_rev() > OMAP3430_REV_ES1_0) {
714 		omap2_prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
715 		omap2_prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
716 	} else
717 		omap2_prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
718 
719 	/*
720 	 * Enable interface clock autoidle for all modules.
721 	 * Note that in the long run this should be done by clockfw
722 	 */
723 	omap2_cm_write_mod_reg(
724 		OMAP3430_AUTO_MODEM_MASK |
725 		OMAP3430ES2_AUTO_MMC3_MASK |
726 		OMAP3430ES2_AUTO_ICR_MASK |
727 		OMAP3430_AUTO_AES2_MASK |
728 		OMAP3430_AUTO_SHA12_MASK |
729 		OMAP3430_AUTO_DES2_MASK |
730 		OMAP3430_AUTO_MMC2_MASK |
731 		OMAP3430_AUTO_MMC1_MASK |
732 		OMAP3430_AUTO_MSPRO_MASK |
733 		OMAP3430_AUTO_HDQ_MASK |
734 		OMAP3430_AUTO_MCSPI4_MASK |
735 		OMAP3430_AUTO_MCSPI3_MASK |
736 		OMAP3430_AUTO_MCSPI2_MASK |
737 		OMAP3430_AUTO_MCSPI1_MASK |
738 		OMAP3430_AUTO_I2C3_MASK |
739 		OMAP3430_AUTO_I2C2_MASK |
740 		OMAP3430_AUTO_I2C1_MASK |
741 		OMAP3430_AUTO_UART2_MASK |
742 		OMAP3430_AUTO_UART1_MASK |
743 		OMAP3430_AUTO_GPT11_MASK |
744 		OMAP3430_AUTO_GPT10_MASK |
745 		OMAP3430_AUTO_MCBSP5_MASK |
746 		OMAP3430_AUTO_MCBSP1_MASK |
747 		OMAP3430ES1_AUTO_FAC_MASK | /* This is es1 only */
748 		OMAP3430_AUTO_MAILBOXES_MASK |
749 		OMAP3430_AUTO_OMAPCTRL_MASK |
750 		OMAP3430ES1_AUTO_FSHOSTUSB_MASK |
751 		OMAP3430_AUTO_HSOTGUSB_MASK |
752 		OMAP3430_AUTO_SAD2D_MASK |
753 		OMAP3430_AUTO_SSI_MASK,
754 		CORE_MOD, CM_AUTOIDLE1);
755 
756 	omap2_cm_write_mod_reg(
757 		OMAP3430_AUTO_PKA_MASK |
758 		OMAP3430_AUTO_AES1_MASK |
759 		OMAP3430_AUTO_RNG_MASK |
760 		OMAP3430_AUTO_SHA11_MASK |
761 		OMAP3430_AUTO_DES1_MASK,
762 		CORE_MOD, CM_AUTOIDLE2);
763 
764 	if (omap_rev() > OMAP3430_REV_ES1_0) {
765 		omap2_cm_write_mod_reg(
766 			OMAP3430_AUTO_MAD2D_MASK |
767 			OMAP3430ES2_AUTO_USBTLL_MASK,
768 			CORE_MOD, CM_AUTOIDLE3);
769 	}
770 
771 	omap2_cm_write_mod_reg(
772 		OMAP3430_AUTO_WDT2_MASK |
773 		OMAP3430_AUTO_WDT1_MASK |
774 		OMAP3430_AUTO_GPIO1_MASK |
775 		OMAP3430_AUTO_32KSYNC_MASK |
776 		OMAP3430_AUTO_GPT12_MASK |
777 		OMAP3430_AUTO_GPT1_MASK,
778 		WKUP_MOD, CM_AUTOIDLE);
779 
780 	omap2_cm_write_mod_reg(
781 		OMAP3430_AUTO_DSS_MASK,
782 		OMAP3430_DSS_MOD,
783 		CM_AUTOIDLE);
784 
785 	omap2_cm_write_mod_reg(
786 		OMAP3430_AUTO_CAM_MASK,
787 		OMAP3430_CAM_MOD,
788 		CM_AUTOIDLE);
789 
790 	omap2_cm_write_mod_reg(
791 		omap3630_auto_uart4_mask |
792 		OMAP3430_AUTO_GPIO6_MASK |
793 		OMAP3430_AUTO_GPIO5_MASK |
794 		OMAP3430_AUTO_GPIO4_MASK |
795 		OMAP3430_AUTO_GPIO3_MASK |
796 		OMAP3430_AUTO_GPIO2_MASK |
797 		OMAP3430_AUTO_WDT3_MASK |
798 		OMAP3430_AUTO_UART3_MASK |
799 		OMAP3430_AUTO_GPT9_MASK |
800 		OMAP3430_AUTO_GPT8_MASK |
801 		OMAP3430_AUTO_GPT7_MASK |
802 		OMAP3430_AUTO_GPT6_MASK |
803 		OMAP3430_AUTO_GPT5_MASK |
804 		OMAP3430_AUTO_GPT4_MASK |
805 		OMAP3430_AUTO_GPT3_MASK |
806 		OMAP3430_AUTO_GPT2_MASK |
807 		OMAP3430_AUTO_MCBSP4_MASK |
808 		OMAP3430_AUTO_MCBSP3_MASK |
809 		OMAP3430_AUTO_MCBSP2_MASK,
810 		OMAP3430_PER_MOD,
811 		CM_AUTOIDLE);
812 
813 	if (omap_rev() > OMAP3430_REV_ES1_0) {
814 		omap2_cm_write_mod_reg(
815 			OMAP3430ES2_AUTO_USBHOST_MASK,
816 			OMAP3430ES2_USBHOST_MOD,
817 			CM_AUTOIDLE);
818 	}
819 
820 	omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
821 
822 	/*
823 	 * Set all plls to autoidle. This is needed until autoidle is
824 	 * enabled by clockfw
825 	 */
826 	omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
827 			 OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
828 	omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
829 			 MPU_MOD,
830 			 CM_AUTOIDLE2);
831 	omap2_cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
832 			 (1 << OMAP3430_AUTO_CORE_DPLL_SHIFT),
833 			 PLL_MOD,
834 			 CM_AUTOIDLE);
835 	omap2_cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
836 			 PLL_MOD,
837 			 CM_AUTOIDLE2);
838 
839 	/*
840 	 * Enable control of expternal oscillator through
841 	 * sys_clkreq. In the long run clock framework should
842 	 * take care of this.
843 	 */
844 	omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
845 			     1 << OMAP_AUTOEXTCLKMODE_SHIFT,
846 			     OMAP3430_GR_MOD,
847 			     OMAP3_PRM_CLKSRC_CTRL_OFFSET);
848 
849 	/* setup wakup source */
850 	omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
851 			  OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
852 			  WKUP_MOD, PM_WKEN);
853 	/* No need to write EN_IO, that is always enabled */
854 	omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
855 			  OMAP3430_GRPSEL_GPT1_MASK |
856 			  OMAP3430_GRPSEL_GPT12_MASK,
857 			  WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
858 	/* For some reason IO doesn't generate wakeup event even if
859 	 * it is selected to mpu wakeup goup */
860 	omap2_prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
861 			  OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
862 
863 	/* Enable PM_WKEN to support DSS LPR */
864 	omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
865 				OMAP3430_DSS_MOD, PM_WKEN);
866 
867 	/* Enable wakeups in PER */
868 	omap2_prm_write_mod_reg(omap3630_en_uart4_mask |
869 			  OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
870 			  OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
871 			  OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
872 			  OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
873 			  OMAP3430_EN_MCBSP4_MASK,
874 			  OMAP3430_PER_MOD, PM_WKEN);
875 	/* and allow them to wake up MPU */
876 	omap2_prm_write_mod_reg(omap3630_grpsel_uart4_mask |
877 			  OMAP3430_GRPSEL_GPIO2_MASK |
878 			  OMAP3430_GRPSEL_GPIO3_MASK |
879 			  OMAP3430_GRPSEL_GPIO4_MASK |
880 			  OMAP3430_GRPSEL_GPIO5_MASK |
881 			  OMAP3430_GRPSEL_GPIO6_MASK |
882 			  OMAP3430_GRPSEL_UART3_MASK |
883 			  OMAP3430_GRPSEL_MCBSP2_MASK |
884 			  OMAP3430_GRPSEL_MCBSP3_MASK |
885 			  OMAP3430_GRPSEL_MCBSP4_MASK,
886 			  OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
887 
888 	/* Don't attach IVA interrupts */
889 	omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
890 	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
891 	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
892 	omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
893 
894 	/* Clear any pending 'reset' flags */
895 	omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
896 	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
897 	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
898 	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
899 	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
900 	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
901 	omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
902 
903 	/* Clear any pending PRCM interrupts */
904 	omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
905 
906 	omap3_iva_idle();
907 	omap3_d2d_idle();
908 }
909 
910 void omap3_pm_off_mode_enable(int enable)
911 {
912 	struct power_state *pwrst;
913 	u32 state;
914 
915 	if (enable)
916 		state = PWRDM_POWER_OFF;
917 	else
918 		state = PWRDM_POWER_RET;
919 
920 #ifdef CONFIG_CPU_IDLE
921 	/*
922 	 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
923 	 * enable OFF mode in a stable form for previous revisions, restrict
924 	 * instead to RET
925 	 */
926 	if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
927 		omap3_cpuidle_update_states(state, PWRDM_POWER_RET);
928 	else
929 		omap3_cpuidle_update_states(state, state);
930 #endif
931 
932 	list_for_each_entry(pwrst, &pwrst_list, node) {
933 		if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
934 				pwrst->pwrdm == core_pwrdm &&
935 				state == PWRDM_POWER_OFF) {
936 			pwrst->next_state = PWRDM_POWER_RET;
937 			WARN_ONCE(1,
938 				"%s: Core OFF disabled due to errata i583\n",
939 				__func__);
940 		} else {
941 			pwrst->next_state = state;
942 		}
943 		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
944 	}
945 }
946 
947 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
948 {
949 	struct power_state *pwrst;
950 
951 	list_for_each_entry(pwrst, &pwrst_list, node) {
952 		if (pwrst->pwrdm == pwrdm)
953 			return pwrst->next_state;
954 	}
955 	return -EINVAL;
956 }
957 
958 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
959 {
960 	struct power_state *pwrst;
961 
962 	list_for_each_entry(pwrst, &pwrst_list, node) {
963 		if (pwrst->pwrdm == pwrdm) {
964 			pwrst->next_state = state;
965 			return 0;
966 		}
967 	}
968 	return -EINVAL;
969 }
970 
971 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
972 {
973 	struct power_state *pwrst;
974 
975 	if (!pwrdm->pwrsts)
976 		return 0;
977 
978 	pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
979 	if (!pwrst)
980 		return -ENOMEM;
981 	pwrst->pwrdm = pwrdm;
982 	pwrst->next_state = PWRDM_POWER_RET;
983 	list_add(&pwrst->node, &pwrst_list);
984 
985 	if (pwrdm_has_hdwr_sar(pwrdm))
986 		pwrdm_enable_hdwr_sar(pwrdm);
987 
988 	return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
989 }
990 
991 /*
992  * Enable hw supervised mode for all clockdomains if it's
993  * supported. Initiate sleep transition for other clockdomains, if
994  * they are not used
995  */
996 static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
997 {
998 	if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
999 		omap2_clkdm_allow_idle(clkdm);
1000 	else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
1001 		 atomic_read(&clkdm->usecount) == 0)
1002 		omap2_clkdm_sleep(clkdm);
1003 	return 0;
1004 }
1005 
1006 void omap_push_sram_idle(void)
1007 {
1008 	_omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
1009 					omap34xx_cpu_suspend_sz);
1010 	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
1011 		_omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
1012 				save_secure_ram_context_sz);
1013 }
1014 
1015 static void __init pm_errata_configure(void)
1016 {
1017 	if (cpu_is_omap3630()) {
1018 		pm34xx_errata |= PM_RTA_ERRATUM_i608;
1019 		/* Enable the l2 cache toggling in sleep logic */
1020 		enable_omap3630_toggle_l2_on_restore();
1021 		if (omap_rev() < OMAP3630_REV_ES1_2)
1022 			pm34xx_errata |= PM_SDRC_WAKEUP_ERRATUM_i583;
1023 	}
1024 }
1025 
1026 static int __init omap3_pm_init(void)
1027 {
1028 	struct power_state *pwrst, *tmp;
1029 	struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm;
1030 	int ret;
1031 
1032 	if (!cpu_is_omap34xx())
1033 		return -ENODEV;
1034 
1035 	pm_errata_configure();
1036 
1037 	printk(KERN_ERR "Power Management for TI OMAP3.\n");
1038 
1039 	/* XXX prcm_setup_regs needs to be before enabling hw
1040 	 * supervised mode for powerdomains */
1041 	prcm_setup_regs();
1042 
1043 	ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
1044 			  (irq_handler_t)prcm_interrupt_handler,
1045 			  IRQF_DISABLED, "prcm", NULL);
1046 	if (ret) {
1047 		printk(KERN_ERR "request_irq failed to register for 0x%x\n",
1048 		       INT_34XX_PRCM_MPU_IRQ);
1049 		goto err1;
1050 	}
1051 
1052 	ret = pwrdm_for_each(pwrdms_setup, NULL);
1053 	if (ret) {
1054 		printk(KERN_ERR "Failed to setup powerdomains\n");
1055 		goto err2;
1056 	}
1057 
1058 	(void) clkdm_for_each(clkdms_setup, NULL);
1059 
1060 	mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
1061 	if (mpu_pwrdm == NULL) {
1062 		printk(KERN_ERR "Failed to get mpu_pwrdm\n");
1063 		goto err2;
1064 	}
1065 
1066 	neon_pwrdm = pwrdm_lookup("neon_pwrdm");
1067 	per_pwrdm = pwrdm_lookup("per_pwrdm");
1068 	core_pwrdm = pwrdm_lookup("core_pwrdm");
1069 	cam_pwrdm = pwrdm_lookup("cam_pwrdm");
1070 
1071 	neon_clkdm = clkdm_lookup("neon_clkdm");
1072 	mpu_clkdm = clkdm_lookup("mpu_clkdm");
1073 	per_clkdm = clkdm_lookup("per_clkdm");
1074 	core_clkdm = clkdm_lookup("core_clkdm");
1075 
1076 	omap_push_sram_idle();
1077 #ifdef CONFIG_SUSPEND
1078 	suspend_set_ops(&omap_pm_ops);
1079 #endif /* CONFIG_SUSPEND */
1080 
1081 	pm_idle = omap3_pm_idle;
1082 	omap3_idle_init();
1083 
1084 	/*
1085 	 * RTA is disabled during initialization as per erratum i608
1086 	 * it is safer to disable RTA by the bootloader, but we would like
1087 	 * to be doubly sure here and prevent any mishaps.
1088 	 */
1089 	if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
1090 		omap3630_ctrl_disable_rta();
1091 
1092 	clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
1093 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
1094 		omap3_secure_ram_storage =
1095 			kmalloc(0x803F, GFP_KERNEL);
1096 		if (!omap3_secure_ram_storage)
1097 			printk(KERN_ERR "Memory allocation failed when"
1098 					"allocating for secure sram context\n");
1099 
1100 		local_irq_disable();
1101 		local_fiq_disable();
1102 
1103 		omap_dma_global_context_save();
1104 		omap3_save_secure_ram_context(PWRDM_POWER_ON);
1105 		omap_dma_global_context_restore();
1106 
1107 		local_irq_enable();
1108 		local_fiq_enable();
1109 	}
1110 
1111 	omap3_save_scratchpad_contents();
1112 err1:
1113 	return ret;
1114 err2:
1115 	free_irq(INT_34XX_PRCM_MPU_IRQ, NULL);
1116 	list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
1117 		list_del(&pwrst->node);
1118 		kfree(pwrst);
1119 	}
1120 	return ret;
1121 }
1122 
1123 late_initcall(omap3_pm_init);
1124