xref: /openbmc/linux/arch/arm/mach-omap2/pm34xx.c (revision 72e06d087204f3bc9acf281717b90ebf0b9731f7)
1 /*
2  * OMAP3 Power Management Routines
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation
5  * Tony Lindgren <tony@atomide.com>
6  * Jouni Hogander
7  *
8  * Copyright (C) 2007 Texas Instruments, Inc.
9  * Rajendra Nayak <rnayak@ti.com>
10  *
11  * Copyright (C) 2005 Texas Instruments, Inc.
12  * Richard Woodruff <r-woodruff2@ti.com>
13  *
14  * Based on pm.c for omap1
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License version 2 as
18  * published by the Free Software Foundation.
19  */
20 
21 #include <linux/pm.h>
22 #include <linux/suspend.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/err.h>
27 #include <linux/gpio.h>
28 #include <linux/clk.h>
29 #include <linux/delay.h>
30 #include <linux/slab.h>
31 #include <linux/console.h>
32 
33 #include <plat/sram.h>
34 #include "clockdomain.h"
35 #include "powerdomain.h"
36 #include <plat/serial.h>
37 #include <plat/sdrc.h>
38 #include <plat/prcm.h>
39 #include <plat/gpmc.h>
40 #include <plat/dma.h>
41 
42 #include <asm/tlbflush.h>
43 
44 #include "cm2xxx_3xxx.h"
45 #include "cm-regbits-34xx.h"
46 #include "prm-regbits-34xx.h"
47 
48 #include "prm2xxx_3xxx.h"
49 #include "pm.h"
50 #include "sdrc.h"
51 #include "control.h"
52 
53 #ifdef CONFIG_SUSPEND
54 static suspend_state_t suspend_state = PM_SUSPEND_ON;
55 static inline bool is_suspending(void)
56 {
57 	return (suspend_state != PM_SUSPEND_ON);
58 }
59 #else
60 static inline bool is_suspending(void)
61 {
62 	return false;
63 }
64 #endif
65 
66 /* Scratchpad offsets */
67 #define OMAP343X_TABLE_ADDRESS_OFFSET	   0xc4
68 #define OMAP343X_TABLE_VALUE_OFFSET	   0xc0
69 #define OMAP343X_CONTROL_REG_VALUE_OFFSET  0xc8
70 
71 /* pm34xx errata defined in pm.h */
72 u16 pm34xx_errata;
73 
74 struct power_state {
75 	struct powerdomain *pwrdm;
76 	u32 next_state;
77 #ifdef CONFIG_SUSPEND
78 	u32 saved_state;
79 #endif
80 	struct list_head node;
81 };
82 
83 static LIST_HEAD(pwrst_list);
84 
85 static void (*_omap_sram_idle)(u32 *addr, int save_state);
86 
87 static int (*_omap_save_secure_sram)(u32 *addr);
88 
89 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
90 static struct powerdomain *core_pwrdm, *per_pwrdm;
91 static struct powerdomain *cam_pwrdm;
92 
93 static inline void omap3_per_save_context(void)
94 {
95 	omap_gpio_save_context();
96 }
97 
98 static inline void omap3_per_restore_context(void)
99 {
100 	omap_gpio_restore_context();
101 }
102 
103 static void omap3_enable_io_chain(void)
104 {
105 	int timeout = 0;
106 
107 	if (omap_rev() >= OMAP3430_REV_ES3_1) {
108 		omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
109 				     PM_WKEN);
110 		/* Do a readback to assure write has been done */
111 		omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
112 
113 		while (!(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
114 			 OMAP3430_ST_IO_CHAIN_MASK)) {
115 			timeout++;
116 			if (timeout > 1000) {
117 				printk(KERN_ERR "Wake up daisy chain "
118 				       "activation failed.\n");
119 				return;
120 			}
121 			omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
122 					     WKUP_MOD, PM_WKEN);
123 		}
124 	}
125 }
126 
127 static void omap3_disable_io_chain(void)
128 {
129 	if (omap_rev() >= OMAP3430_REV_ES3_1)
130 		omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
131 				       PM_WKEN);
132 }
133 
134 static void omap3_core_save_context(void)
135 {
136 	u32 control_padconf_off;
137 
138 	/* Save the padconf registers */
139 	control_padconf_off = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF);
140 	control_padconf_off |= START_PADCONF_SAVE;
141 	omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF);
142 	/* wait for the save to complete */
143 	while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
144 			& PADCONF_SAVE_DONE))
145 		udelay(1);
146 
147 	/*
148 	 * Force write last pad into memory, as this can fail in some
149 	 * cases according to errata 1.157, 1.185
150 	 */
151 	omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
152 		OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
153 
154 	/* Save the Interrupt controller context */
155 	omap_intc_save_context();
156 	/* Save the GPMC context */
157 	omap3_gpmc_save_context();
158 	/* Save the system control module context, padconf already save above*/
159 	omap3_control_save_context();
160 	omap_dma_global_context_save();
161 }
162 
163 static void omap3_core_restore_context(void)
164 {
165 	/* Restore the control module context, padconf restored by h/w */
166 	omap3_control_restore_context();
167 	/* Restore the GPMC context */
168 	omap3_gpmc_restore_context();
169 	/* Restore the interrupt controller context */
170 	omap_intc_restore_context();
171 	omap_dma_global_context_restore();
172 }
173 
174 /*
175  * FIXME: This function should be called before entering off-mode after
176  * OMAP3 secure services have been accessed. Currently it is only called
177  * once during boot sequence, but this works as we are not using secure
178  * services.
179  */
180 static void omap3_save_secure_ram_context(u32 target_mpu_state)
181 {
182 	u32 ret;
183 
184 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
185 		/*
186 		 * MPU next state must be set to POWER_ON temporarily,
187 		 * otherwise the WFI executed inside the ROM code
188 		 * will hang the system.
189 		 */
190 		pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
191 		ret = _omap_save_secure_sram((u32 *)
192 				__pa(omap3_secure_ram_storage));
193 		pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
194 		/* Following is for error tracking, it should not happen */
195 		if (ret) {
196 			printk(KERN_ERR "save_secure_sram() returns %08x\n",
197 				ret);
198 			while (1)
199 				;
200 		}
201 	}
202 }
203 
204 /*
205  * PRCM Interrupt Handler Helper Function
206  *
207  * The purpose of this function is to clear any wake-up events latched
208  * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
209  * may occur whilst attempting to clear a PM_WKST_x register and thus
210  * set another bit in this register. A while loop is used to ensure
211  * that any peripheral wake-up events occurring while attempting to
212  * clear the PM_WKST_x are detected and cleared.
213  */
214 static int prcm_clear_mod_irqs(s16 module, u8 regs)
215 {
216 	u32 wkst, fclk, iclk, clken;
217 	u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
218 	u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
219 	u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
220 	u16 grpsel_off = (regs == 3) ?
221 		OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
222 	int c = 0;
223 
224 	wkst = omap2_prm_read_mod_reg(module, wkst_off);
225 	wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
226 	if (wkst) {
227 		iclk = omap2_cm_read_mod_reg(module, iclk_off);
228 		fclk = omap2_cm_read_mod_reg(module, fclk_off);
229 		while (wkst) {
230 			clken = wkst;
231 			omap2_cm_set_mod_reg_bits(clken, module, iclk_off);
232 			/*
233 			 * For USBHOST, we don't know whether HOST1 or
234 			 * HOST2 woke us up, so enable both f-clocks
235 			 */
236 			if (module == OMAP3430ES2_USBHOST_MOD)
237 				clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
238 			omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
239 			omap2_prm_write_mod_reg(wkst, module, wkst_off);
240 			wkst = omap2_prm_read_mod_reg(module, wkst_off);
241 			c++;
242 		}
243 		omap2_cm_write_mod_reg(iclk, module, iclk_off);
244 		omap2_cm_write_mod_reg(fclk, module, fclk_off);
245 	}
246 
247 	return c;
248 }
249 
250 static int _prcm_int_handle_wakeup(void)
251 {
252 	int c;
253 
254 	c = prcm_clear_mod_irqs(WKUP_MOD, 1);
255 	c += prcm_clear_mod_irqs(CORE_MOD, 1);
256 	c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
257 	if (omap_rev() > OMAP3430_REV_ES1_0) {
258 		c += prcm_clear_mod_irqs(CORE_MOD, 3);
259 		c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
260 	}
261 
262 	return c;
263 }
264 
265 /*
266  * PRCM Interrupt Handler
267  *
268  * The PRM_IRQSTATUS_MPU register indicates if there are any pending
269  * interrupts from the PRCM for the MPU. These bits must be cleared in
270  * order to clear the PRCM interrupt. The PRCM interrupt handler is
271  * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
272  * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
273  * register indicates that a wake-up event is pending for the MPU and
274  * this bit can only be cleared if the all the wake-up events latched
275  * in the various PM_WKST_x registers have been cleared. The interrupt
276  * handler is implemented using a do-while loop so that if a wake-up
277  * event occurred during the processing of the prcm interrupt handler
278  * (setting a bit in the corresponding PM_WKST_x register and thus
279  * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
280  * this would be handled.
281  */
282 static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
283 {
284 	u32 irqenable_mpu, irqstatus_mpu;
285 	int c = 0;
286 
287 	irqenable_mpu = omap2_prm_read_mod_reg(OCP_MOD,
288 					 OMAP3_PRM_IRQENABLE_MPU_OFFSET);
289 	irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
290 					 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
291 	irqstatus_mpu &= irqenable_mpu;
292 
293 	do {
294 		if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
295 				     OMAP3430_IO_ST_MASK)) {
296 			c = _prcm_int_handle_wakeup();
297 
298 			/*
299 			 * Is the MPU PRCM interrupt handler racing with the
300 			 * IVA2 PRCM interrupt handler ?
301 			 */
302 			WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
303 			     "but no wakeup sources are marked\n");
304 		} else {
305 			/* XXX we need to expand our PRCM interrupt handler */
306 			WARN(1, "prcm: WARNING: PRCM interrupt received, but "
307 			     "no code to handle it (%08x)\n", irqstatus_mpu);
308 		}
309 
310 		omap2_prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
311 					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
312 
313 		irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
314 					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
315 		irqstatus_mpu &= irqenable_mpu;
316 
317 	} while (irqstatus_mpu);
318 
319 	return IRQ_HANDLED;
320 }
321 
322 static void restore_control_register(u32 val)
323 {
324 	__asm__ __volatile__ ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val));
325 }
326 
327 /* Function to restore the table entry that was modified for enabling MMU */
328 static void restore_table_entry(void)
329 {
330 	void __iomem *scratchpad_address;
331 	u32 previous_value, control_reg_value;
332 	u32 *address;
333 
334 	scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
335 
336 	/* Get address of entry that was modified */
337 	address = (u32 *)__raw_readl(scratchpad_address +
338 				     OMAP343X_TABLE_ADDRESS_OFFSET);
339 	/* Get the previous value which needs to be restored */
340 	previous_value = __raw_readl(scratchpad_address +
341 				     OMAP343X_TABLE_VALUE_OFFSET);
342 	address = __va(address);
343 	*address = previous_value;
344 	flush_tlb_all();
345 	control_reg_value = __raw_readl(scratchpad_address
346 					+ OMAP343X_CONTROL_REG_VALUE_OFFSET);
347 	/* This will enable caches and prediction */
348 	restore_control_register(control_reg_value);
349 }
350 
351 void omap_sram_idle(void)
352 {
353 	/* Variable to tell what needs to be saved and restored
354 	 * in omap_sram_idle*/
355 	/* save_state = 0 => Nothing to save and restored */
356 	/* save_state = 1 => Only L1 and logic lost */
357 	/* save_state = 2 => Only L2 lost */
358 	/* save_state = 3 => L1, L2 and logic lost */
359 	int save_state = 0;
360 	int mpu_next_state = PWRDM_POWER_ON;
361 	int per_next_state = PWRDM_POWER_ON;
362 	int core_next_state = PWRDM_POWER_ON;
363 	int per_going_off;
364 	int core_prev_state, per_prev_state;
365 	u32 sdrc_pwr = 0;
366 
367 	if (!_omap_sram_idle)
368 		return;
369 
370 	pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
371 	pwrdm_clear_all_prev_pwrst(neon_pwrdm);
372 	pwrdm_clear_all_prev_pwrst(core_pwrdm);
373 	pwrdm_clear_all_prev_pwrst(per_pwrdm);
374 
375 	mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
376 	switch (mpu_next_state) {
377 	case PWRDM_POWER_ON:
378 	case PWRDM_POWER_RET:
379 		/* No need to save context */
380 		save_state = 0;
381 		break;
382 	case PWRDM_POWER_OFF:
383 		save_state = 3;
384 		break;
385 	default:
386 		/* Invalid state */
387 		printk(KERN_ERR "Invalid mpu state in sram_idle\n");
388 		return;
389 	}
390 	pwrdm_pre_transition();
391 
392 	/* NEON control */
393 	if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
394 		pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
395 
396 	/* Enable IO-PAD and IO-CHAIN wakeups */
397 	per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
398 	core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
399 	if (omap3_has_io_wakeup() &&
400 	    (per_next_state < PWRDM_POWER_ON ||
401 	     core_next_state < PWRDM_POWER_ON)) {
402 		omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
403 		omap3_enable_io_chain();
404 	}
405 
406 	/* Block console output in case it is on one of the OMAP UARTs */
407 	if (!is_suspending())
408 		if (per_next_state < PWRDM_POWER_ON ||
409 		    core_next_state < PWRDM_POWER_ON)
410 			if (try_acquire_console_sem())
411 				goto console_still_active;
412 
413 	/* PER */
414 	if (per_next_state < PWRDM_POWER_ON) {
415 		per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
416 		omap_uart_prepare_idle(2);
417 		omap_uart_prepare_idle(3);
418 		omap2_gpio_prepare_for_idle(per_going_off);
419 		if (per_next_state == PWRDM_POWER_OFF)
420 				omap3_per_save_context();
421 	}
422 
423 	/* CORE */
424 	if (core_next_state < PWRDM_POWER_ON) {
425 		omap_uart_prepare_idle(0);
426 		omap_uart_prepare_idle(1);
427 		if (core_next_state == PWRDM_POWER_OFF) {
428 			omap3_core_save_context();
429 			omap3_cm_save_context();
430 		}
431 	}
432 
433 	omap3_intc_prepare_idle();
434 
435 	/*
436 	* On EMU/HS devices ROM code restores a SRDC value
437 	* from scratchpad which has automatic self refresh on timeout
438 	* of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
439 	* Hence store/restore the SDRC_POWER register here.
440 	*/
441 	if (omap_rev() >= OMAP3430_REV_ES3_0 &&
442 	    omap_type() != OMAP2_DEVICE_TYPE_GP &&
443 	    core_next_state == PWRDM_POWER_OFF)
444 		sdrc_pwr = sdrc_read_reg(SDRC_POWER);
445 
446 	/*
447 	 * omap3_arm_context is the location where ARM registers
448 	 * get saved. The restore path then reads from this
449 	 * location and restores them back.
450 	 */
451 	_omap_sram_idle(omap3_arm_context, save_state);
452 	cpu_init();
453 
454 	/* Restore normal SDRC POWER settings */
455 	if (omap_rev() >= OMAP3430_REV_ES3_0 &&
456 	    omap_type() != OMAP2_DEVICE_TYPE_GP &&
457 	    core_next_state == PWRDM_POWER_OFF)
458 		sdrc_write_reg(sdrc_pwr, SDRC_POWER);
459 
460 	/* Restore table entry modified during MMU restoration */
461 	if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
462 		restore_table_entry();
463 
464 	/* CORE */
465 	if (core_next_state < PWRDM_POWER_ON) {
466 		core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
467 		if (core_prev_state == PWRDM_POWER_OFF) {
468 			omap3_core_restore_context();
469 			omap3_cm_restore_context();
470 			omap3_sram_restore_context();
471 			omap2_sms_restore_context();
472 		}
473 		omap_uart_resume_idle(0);
474 		omap_uart_resume_idle(1);
475 		if (core_next_state == PWRDM_POWER_OFF)
476 			omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
477 					       OMAP3430_GR_MOD,
478 					       OMAP3_PRM_VOLTCTRL_OFFSET);
479 	}
480 	omap3_intc_resume_idle();
481 
482 	/* PER */
483 	if (per_next_state < PWRDM_POWER_ON) {
484 		per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
485 		omap2_gpio_resume_after_idle();
486 		if (per_prev_state == PWRDM_POWER_OFF)
487 			omap3_per_restore_context();
488 		omap_uart_resume_idle(2);
489 		omap_uart_resume_idle(3);
490 	}
491 
492 	if (!is_suspending())
493 		release_console_sem();
494 
495 console_still_active:
496 	/* Disable IO-PAD and IO-CHAIN wakeup */
497 	if (omap3_has_io_wakeup() &&
498 	    (per_next_state < PWRDM_POWER_ON ||
499 	     core_next_state < PWRDM_POWER_ON)) {
500 		omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
501 					     PM_WKEN);
502 		omap3_disable_io_chain();
503 	}
504 
505 	pwrdm_post_transition();
506 
507 	omap2_clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
508 }
509 
510 int omap3_can_sleep(void)
511 {
512 	if (!sleep_while_idle)
513 		return 0;
514 	if (!omap_uart_can_sleep())
515 		return 0;
516 	return 1;
517 }
518 
519 static void omap3_pm_idle(void)
520 {
521 	local_irq_disable();
522 	local_fiq_disable();
523 
524 	if (!omap3_can_sleep())
525 		goto out;
526 
527 	if (omap_irq_pending() || need_resched())
528 		goto out;
529 
530 	omap_sram_idle();
531 
532 out:
533 	local_fiq_enable();
534 	local_irq_enable();
535 }
536 
537 #ifdef CONFIG_SUSPEND
538 static int omap3_pm_suspend(void)
539 {
540 	struct power_state *pwrst;
541 	int state, ret = 0;
542 
543 	if (wakeup_timer_seconds || wakeup_timer_milliseconds)
544 		omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
545 					 wakeup_timer_milliseconds);
546 
547 	/* Read current next_pwrsts */
548 	list_for_each_entry(pwrst, &pwrst_list, node)
549 		pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
550 	/* Set ones wanted by suspend */
551 	list_for_each_entry(pwrst, &pwrst_list, node) {
552 		if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
553 			goto restore;
554 		if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
555 			goto restore;
556 	}
557 
558 	omap_uart_prepare_suspend();
559 	omap3_intc_suspend();
560 
561 	omap_sram_idle();
562 
563 restore:
564 	/* Restore next_pwrsts */
565 	list_for_each_entry(pwrst, &pwrst_list, node) {
566 		state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
567 		if (state > pwrst->next_state) {
568 			printk(KERN_INFO "Powerdomain (%s) didn't enter "
569 			       "target state %d\n",
570 			       pwrst->pwrdm->name, pwrst->next_state);
571 			ret = -1;
572 		}
573 		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
574 	}
575 	if (ret)
576 		printk(KERN_ERR "Could not enter target state in pm_suspend\n");
577 	else
578 		printk(KERN_INFO "Successfully put all powerdomains "
579 		       "to target state\n");
580 
581 	return ret;
582 }
583 
584 static int omap3_pm_enter(suspend_state_t unused)
585 {
586 	int ret = 0;
587 
588 	switch (suspend_state) {
589 	case PM_SUSPEND_STANDBY:
590 	case PM_SUSPEND_MEM:
591 		ret = omap3_pm_suspend();
592 		break;
593 	default:
594 		ret = -EINVAL;
595 	}
596 
597 	return ret;
598 }
599 
600 /* Hooks to enable / disable UART interrupts during suspend */
601 static int omap3_pm_begin(suspend_state_t state)
602 {
603 	disable_hlt();
604 	suspend_state = state;
605 	omap_uart_enable_irqs(0);
606 	return 0;
607 }
608 
609 static void omap3_pm_end(void)
610 {
611 	suspend_state = PM_SUSPEND_ON;
612 	omap_uart_enable_irqs(1);
613 	enable_hlt();
614 	return;
615 }
616 
617 static struct platform_suspend_ops omap_pm_ops = {
618 	.begin		= omap3_pm_begin,
619 	.end		= omap3_pm_end,
620 	.enter		= omap3_pm_enter,
621 	.valid		= suspend_valid_only_mem,
622 };
623 #endif /* CONFIG_SUSPEND */
624 
625 
626 /**
627  * omap3_iva_idle(): ensure IVA is in idle so it can be put into
628  *                   retention
629  *
630  * In cases where IVA2 is activated by bootcode, it may prevent
631  * full-chip retention or off-mode because it is not idle.  This
632  * function forces the IVA2 into idle state so it can go
633  * into retention/off and thus allow full-chip retention/off.
634  *
635  **/
636 static void __init omap3_iva_idle(void)
637 {
638 	/* ensure IVA2 clock is disabled */
639 	omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
640 
641 	/* if no clock activity, nothing else to do */
642 	if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
643 	      OMAP3430_CLKACTIVITY_IVA2_MASK))
644 		return;
645 
646 	/* Reset IVA2 */
647 	omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
648 			  OMAP3430_RST2_IVA2_MASK |
649 			  OMAP3430_RST3_IVA2_MASK,
650 			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
651 
652 	/* Enable IVA2 clock */
653 	omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
654 			 OMAP3430_IVA2_MOD, CM_FCLKEN);
655 
656 	/* Set IVA2 boot mode to 'idle' */
657 	omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
658 			 OMAP343X_CONTROL_IVA2_BOOTMOD);
659 
660 	/* Un-reset IVA2 */
661 	omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
662 
663 	/* Disable IVA2 clock */
664 	omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
665 
666 	/* Reset IVA2 */
667 	omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
668 			  OMAP3430_RST2_IVA2_MASK |
669 			  OMAP3430_RST3_IVA2_MASK,
670 			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
671 }
672 
673 static void __init omap3_d2d_idle(void)
674 {
675 	u16 mask, padconf;
676 
677 	/* In a stand alone OMAP3430 where there is not a stacked
678 	 * modem for the D2D Idle Ack and D2D MStandby must be pulled
679 	 * high. S CONTROL_PADCONF_SAD2D_IDLEACK and
680 	 * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
681 	mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
682 	padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
683 	padconf |= mask;
684 	omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);
685 
686 	padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
687 	padconf |= mask;
688 	omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
689 
690 	/* reset modem */
691 	omap2_prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
692 			  OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
693 			  CORE_MOD, OMAP2_RM_RSTCTRL);
694 	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
695 }
696 
697 static void __init prcm_setup_regs(void)
698 {
699 	u32 omap3630_auto_uart4_mask = cpu_is_omap3630() ?
700 					OMAP3630_AUTO_UART4_MASK : 0;
701 	u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
702 					OMAP3630_EN_UART4_MASK : 0;
703 	u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
704 					OMAP3630_GRPSEL_UART4_MASK : 0;
705 
706 
707 	/* XXX Reset all wkdeps. This should be done when initializing
708 	 * powerdomains */
709 	omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
710 	omap2_prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
711 	omap2_prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
712 	omap2_prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
713 	omap2_prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
714 	omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
715 	if (omap_rev() > OMAP3430_REV_ES1_0) {
716 		omap2_prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
717 		omap2_prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
718 	} else
719 		omap2_prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
720 
721 	/*
722 	 * Enable interface clock autoidle for all modules.
723 	 * Note that in the long run this should be done by clockfw
724 	 */
725 	omap2_cm_write_mod_reg(
726 		OMAP3430_AUTO_MODEM_MASK |
727 		OMAP3430ES2_AUTO_MMC3_MASK |
728 		OMAP3430ES2_AUTO_ICR_MASK |
729 		OMAP3430_AUTO_AES2_MASK |
730 		OMAP3430_AUTO_SHA12_MASK |
731 		OMAP3430_AUTO_DES2_MASK |
732 		OMAP3430_AUTO_MMC2_MASK |
733 		OMAP3430_AUTO_MMC1_MASK |
734 		OMAP3430_AUTO_MSPRO_MASK |
735 		OMAP3430_AUTO_HDQ_MASK |
736 		OMAP3430_AUTO_MCSPI4_MASK |
737 		OMAP3430_AUTO_MCSPI3_MASK |
738 		OMAP3430_AUTO_MCSPI2_MASK |
739 		OMAP3430_AUTO_MCSPI1_MASK |
740 		OMAP3430_AUTO_I2C3_MASK |
741 		OMAP3430_AUTO_I2C2_MASK |
742 		OMAP3430_AUTO_I2C1_MASK |
743 		OMAP3430_AUTO_UART2_MASK |
744 		OMAP3430_AUTO_UART1_MASK |
745 		OMAP3430_AUTO_GPT11_MASK |
746 		OMAP3430_AUTO_GPT10_MASK |
747 		OMAP3430_AUTO_MCBSP5_MASK |
748 		OMAP3430_AUTO_MCBSP1_MASK |
749 		OMAP3430ES1_AUTO_FAC_MASK | /* This is es1 only */
750 		OMAP3430_AUTO_MAILBOXES_MASK |
751 		OMAP3430_AUTO_OMAPCTRL_MASK |
752 		OMAP3430ES1_AUTO_FSHOSTUSB_MASK |
753 		OMAP3430_AUTO_HSOTGUSB_MASK |
754 		OMAP3430_AUTO_SAD2D_MASK |
755 		OMAP3430_AUTO_SSI_MASK,
756 		CORE_MOD, CM_AUTOIDLE1);
757 
758 	omap2_cm_write_mod_reg(
759 		OMAP3430_AUTO_PKA_MASK |
760 		OMAP3430_AUTO_AES1_MASK |
761 		OMAP3430_AUTO_RNG_MASK |
762 		OMAP3430_AUTO_SHA11_MASK |
763 		OMAP3430_AUTO_DES1_MASK,
764 		CORE_MOD, CM_AUTOIDLE2);
765 
766 	if (omap_rev() > OMAP3430_REV_ES1_0) {
767 		omap2_cm_write_mod_reg(
768 			OMAP3430_AUTO_MAD2D_MASK |
769 			OMAP3430ES2_AUTO_USBTLL_MASK,
770 			CORE_MOD, CM_AUTOIDLE3);
771 	}
772 
773 	omap2_cm_write_mod_reg(
774 		OMAP3430_AUTO_WDT2_MASK |
775 		OMAP3430_AUTO_WDT1_MASK |
776 		OMAP3430_AUTO_GPIO1_MASK |
777 		OMAP3430_AUTO_32KSYNC_MASK |
778 		OMAP3430_AUTO_GPT12_MASK |
779 		OMAP3430_AUTO_GPT1_MASK,
780 		WKUP_MOD, CM_AUTOIDLE);
781 
782 	omap2_cm_write_mod_reg(
783 		OMAP3430_AUTO_DSS_MASK,
784 		OMAP3430_DSS_MOD,
785 		CM_AUTOIDLE);
786 
787 	omap2_cm_write_mod_reg(
788 		OMAP3430_AUTO_CAM_MASK,
789 		OMAP3430_CAM_MOD,
790 		CM_AUTOIDLE);
791 
792 	omap2_cm_write_mod_reg(
793 		omap3630_auto_uart4_mask |
794 		OMAP3430_AUTO_GPIO6_MASK |
795 		OMAP3430_AUTO_GPIO5_MASK |
796 		OMAP3430_AUTO_GPIO4_MASK |
797 		OMAP3430_AUTO_GPIO3_MASK |
798 		OMAP3430_AUTO_GPIO2_MASK |
799 		OMAP3430_AUTO_WDT3_MASK |
800 		OMAP3430_AUTO_UART3_MASK |
801 		OMAP3430_AUTO_GPT9_MASK |
802 		OMAP3430_AUTO_GPT8_MASK |
803 		OMAP3430_AUTO_GPT7_MASK |
804 		OMAP3430_AUTO_GPT6_MASK |
805 		OMAP3430_AUTO_GPT5_MASK |
806 		OMAP3430_AUTO_GPT4_MASK |
807 		OMAP3430_AUTO_GPT3_MASK |
808 		OMAP3430_AUTO_GPT2_MASK |
809 		OMAP3430_AUTO_MCBSP4_MASK |
810 		OMAP3430_AUTO_MCBSP3_MASK |
811 		OMAP3430_AUTO_MCBSP2_MASK,
812 		OMAP3430_PER_MOD,
813 		CM_AUTOIDLE);
814 
815 	if (omap_rev() > OMAP3430_REV_ES1_0) {
816 		omap2_cm_write_mod_reg(
817 			OMAP3430ES2_AUTO_USBHOST_MASK,
818 			OMAP3430ES2_USBHOST_MOD,
819 			CM_AUTOIDLE);
820 	}
821 
822 	omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
823 
824 	/*
825 	 * Set all plls to autoidle. This is needed until autoidle is
826 	 * enabled by clockfw
827 	 */
828 	omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
829 			 OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
830 	omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
831 			 MPU_MOD,
832 			 CM_AUTOIDLE2);
833 	omap2_cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
834 			 (1 << OMAP3430_AUTO_CORE_DPLL_SHIFT),
835 			 PLL_MOD,
836 			 CM_AUTOIDLE);
837 	omap2_cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
838 			 PLL_MOD,
839 			 CM_AUTOIDLE2);
840 
841 	/*
842 	 * Enable control of expternal oscillator through
843 	 * sys_clkreq. In the long run clock framework should
844 	 * take care of this.
845 	 */
846 	omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
847 			     1 << OMAP_AUTOEXTCLKMODE_SHIFT,
848 			     OMAP3430_GR_MOD,
849 			     OMAP3_PRM_CLKSRC_CTRL_OFFSET);
850 
851 	/* setup wakup source */
852 	omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
853 			  OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
854 			  WKUP_MOD, PM_WKEN);
855 	/* No need to write EN_IO, that is always enabled */
856 	omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
857 			  OMAP3430_GRPSEL_GPT1_MASK |
858 			  OMAP3430_GRPSEL_GPT12_MASK,
859 			  WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
860 	/* For some reason IO doesn't generate wakeup event even if
861 	 * it is selected to mpu wakeup goup */
862 	omap2_prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
863 			  OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
864 
865 	/* Enable PM_WKEN to support DSS LPR */
866 	omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
867 				OMAP3430_DSS_MOD, PM_WKEN);
868 
869 	/* Enable wakeups in PER */
870 	omap2_prm_write_mod_reg(omap3630_en_uart4_mask |
871 			  OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
872 			  OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
873 			  OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
874 			  OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
875 			  OMAP3430_EN_MCBSP4_MASK,
876 			  OMAP3430_PER_MOD, PM_WKEN);
877 	/* and allow them to wake up MPU */
878 	omap2_prm_write_mod_reg(omap3630_grpsel_uart4_mask |
879 			  OMAP3430_GRPSEL_GPIO2_MASK |
880 			  OMAP3430_GRPSEL_GPIO3_MASK |
881 			  OMAP3430_GRPSEL_GPIO4_MASK |
882 			  OMAP3430_GRPSEL_GPIO5_MASK |
883 			  OMAP3430_GRPSEL_GPIO6_MASK |
884 			  OMAP3430_GRPSEL_UART3_MASK |
885 			  OMAP3430_GRPSEL_MCBSP2_MASK |
886 			  OMAP3430_GRPSEL_MCBSP3_MASK |
887 			  OMAP3430_GRPSEL_MCBSP4_MASK,
888 			  OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
889 
890 	/* Don't attach IVA interrupts */
891 	omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
892 	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
893 	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
894 	omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
895 
896 	/* Clear any pending 'reset' flags */
897 	omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
898 	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
899 	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
900 	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
901 	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
902 	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
903 	omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
904 
905 	/* Clear any pending PRCM interrupts */
906 	omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
907 
908 	omap3_iva_idle();
909 	omap3_d2d_idle();
910 }
911 
912 void omap3_pm_off_mode_enable(int enable)
913 {
914 	struct power_state *pwrst;
915 	u32 state;
916 
917 	if (enable)
918 		state = PWRDM_POWER_OFF;
919 	else
920 		state = PWRDM_POWER_RET;
921 
922 #ifdef CONFIG_CPU_IDLE
923 	/*
924 	 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
925 	 * enable OFF mode in a stable form for previous revisions, restrict
926 	 * instead to RET
927 	 */
928 	if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
929 		omap3_cpuidle_update_states(state, PWRDM_POWER_RET);
930 	else
931 		omap3_cpuidle_update_states(state, state);
932 #endif
933 
934 	list_for_each_entry(pwrst, &pwrst_list, node) {
935 		if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
936 				pwrst->pwrdm == core_pwrdm &&
937 				state == PWRDM_POWER_OFF) {
938 			pwrst->next_state = PWRDM_POWER_RET;
939 			WARN_ONCE(1,
940 				"%s: Core OFF disabled due to errata i583\n",
941 				__func__);
942 		} else {
943 			pwrst->next_state = state;
944 		}
945 		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
946 	}
947 }
948 
949 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
950 {
951 	struct power_state *pwrst;
952 
953 	list_for_each_entry(pwrst, &pwrst_list, node) {
954 		if (pwrst->pwrdm == pwrdm)
955 			return pwrst->next_state;
956 	}
957 	return -EINVAL;
958 }
959 
960 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
961 {
962 	struct power_state *pwrst;
963 
964 	list_for_each_entry(pwrst, &pwrst_list, node) {
965 		if (pwrst->pwrdm == pwrdm) {
966 			pwrst->next_state = state;
967 			return 0;
968 		}
969 	}
970 	return -EINVAL;
971 }
972 
973 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
974 {
975 	struct power_state *pwrst;
976 
977 	if (!pwrdm->pwrsts)
978 		return 0;
979 
980 	pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
981 	if (!pwrst)
982 		return -ENOMEM;
983 	pwrst->pwrdm = pwrdm;
984 	pwrst->next_state = PWRDM_POWER_RET;
985 	list_add(&pwrst->node, &pwrst_list);
986 
987 	if (pwrdm_has_hdwr_sar(pwrdm))
988 		pwrdm_enable_hdwr_sar(pwrdm);
989 
990 	return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
991 }
992 
993 /*
994  * Enable hw supervised mode for all clockdomains if it's
995  * supported. Initiate sleep transition for other clockdomains, if
996  * they are not used
997  */
998 static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
999 {
1000 	if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
1001 		omap2_clkdm_allow_idle(clkdm);
1002 	else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
1003 		 atomic_read(&clkdm->usecount) == 0)
1004 		omap2_clkdm_sleep(clkdm);
1005 	return 0;
1006 }
1007 
1008 void omap_push_sram_idle(void)
1009 {
1010 	_omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
1011 					omap34xx_cpu_suspend_sz);
1012 	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
1013 		_omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
1014 				save_secure_ram_context_sz);
1015 }
1016 
1017 static void __init pm_errata_configure(void)
1018 {
1019 	if (cpu_is_omap3630()) {
1020 		pm34xx_errata |= PM_RTA_ERRATUM_i608;
1021 		/* Enable the l2 cache toggling in sleep logic */
1022 		enable_omap3630_toggle_l2_on_restore();
1023 		if (omap_rev() < OMAP3630_REV_ES1_2)
1024 			pm34xx_errata |= PM_SDRC_WAKEUP_ERRATUM_i583;
1025 	}
1026 }
1027 
1028 static int __init omap3_pm_init(void)
1029 {
1030 	struct power_state *pwrst, *tmp;
1031 	struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm;
1032 	int ret;
1033 
1034 	if (!cpu_is_omap34xx())
1035 		return -ENODEV;
1036 
1037 	pm_errata_configure();
1038 
1039 	printk(KERN_ERR "Power Management for TI OMAP3.\n");
1040 
1041 	/* XXX prcm_setup_regs needs to be before enabling hw
1042 	 * supervised mode for powerdomains */
1043 	prcm_setup_regs();
1044 
1045 	ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
1046 			  (irq_handler_t)prcm_interrupt_handler,
1047 			  IRQF_DISABLED, "prcm", NULL);
1048 	if (ret) {
1049 		printk(KERN_ERR "request_irq failed to register for 0x%x\n",
1050 		       INT_34XX_PRCM_MPU_IRQ);
1051 		goto err1;
1052 	}
1053 
1054 	ret = pwrdm_for_each(pwrdms_setup, NULL);
1055 	if (ret) {
1056 		printk(KERN_ERR "Failed to setup powerdomains\n");
1057 		goto err2;
1058 	}
1059 
1060 	(void) clkdm_for_each(clkdms_setup, NULL);
1061 
1062 	mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
1063 	if (mpu_pwrdm == NULL) {
1064 		printk(KERN_ERR "Failed to get mpu_pwrdm\n");
1065 		goto err2;
1066 	}
1067 
1068 	neon_pwrdm = pwrdm_lookup("neon_pwrdm");
1069 	per_pwrdm = pwrdm_lookup("per_pwrdm");
1070 	core_pwrdm = pwrdm_lookup("core_pwrdm");
1071 	cam_pwrdm = pwrdm_lookup("cam_pwrdm");
1072 
1073 	neon_clkdm = clkdm_lookup("neon_clkdm");
1074 	mpu_clkdm = clkdm_lookup("mpu_clkdm");
1075 	per_clkdm = clkdm_lookup("per_clkdm");
1076 	core_clkdm = clkdm_lookup("core_clkdm");
1077 
1078 	omap_push_sram_idle();
1079 #ifdef CONFIG_SUSPEND
1080 	suspend_set_ops(&omap_pm_ops);
1081 #endif /* CONFIG_SUSPEND */
1082 
1083 	pm_idle = omap3_pm_idle;
1084 	omap3_idle_init();
1085 
1086 	/*
1087 	 * RTA is disabled during initialization as per erratum i608
1088 	 * it is safer to disable RTA by the bootloader, but we would like
1089 	 * to be doubly sure here and prevent any mishaps.
1090 	 */
1091 	if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
1092 		omap3630_ctrl_disable_rta();
1093 
1094 	clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
1095 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
1096 		omap3_secure_ram_storage =
1097 			kmalloc(0x803F, GFP_KERNEL);
1098 		if (!omap3_secure_ram_storage)
1099 			printk(KERN_ERR "Memory allocation failed when"
1100 					"allocating for secure sram context\n");
1101 
1102 		local_irq_disable();
1103 		local_fiq_disable();
1104 
1105 		omap_dma_global_context_save();
1106 		omap3_save_secure_ram_context(PWRDM_POWER_ON);
1107 		omap_dma_global_context_restore();
1108 
1109 		local_irq_enable();
1110 		local_fiq_enable();
1111 	}
1112 
1113 	omap3_save_scratchpad_contents();
1114 err1:
1115 	return ret;
1116 err2:
1117 	free_irq(INT_34XX_PRCM_MPU_IRQ, NULL);
1118 	list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
1119 		list_del(&pwrst->node);
1120 		kfree(pwrst);
1121 	}
1122 	return ret;
1123 }
1124 
1125 late_initcall(omap3_pm_init);
1126