xref: /openbmc/linux/arch/arm/mach-omap2/sleep33xx.S (revision 41d9d44d)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Low level suspend code for AM33XX SoCs
4 *
5 * Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
6 *	Dave Gerlach, Vaibhav Bedia
7 */
8
9#include <generated/ti-emif-asm-offsets.h>
10#include <generated/ti-pm-asm-offsets.h>
11#include <linux/linkage.h>
12#include <linux/ti-emif-sram.h>
13#include <asm/assembler.h>
14#include <asm/memory.h>
15
16#include "iomap.h"
17#include "cm33xx.h"
18
19#define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED			0x00030000
20#define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE			0x0003
21#define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE			0x0002
22
23	.arm
24	.align 3
25
26ENTRY(am33xx_do_wfi)
27	stmfd	sp!, {r4 - r11, lr}	@ save registers on stack
28
29	/*
30	 * Flush all data from the L1 and L2 data cache before disabling
31	 * SCTLR.C bit.
32	 */
33	ldr	r1, kernel_flush
34	blx	r1
35
36	/*
37	 * Clear the SCTLR.C bit to prevent further data cache
38	 * allocation. Clearing SCTLR.C would make all the data accesses
39	 * strongly ordered and would not hit the cache.
40	 */
41	mrc	p15, 0, r0, c1, c0, 0
42	bic	r0, r0, #(1 << 2)	@ Disable the C bit
43	mcr	p15, 0, r0, c1, c0, 0
44	isb
45
46	/*
47	 * Invalidate L1 and L2 data cache.
48	 */
49	ldr	r1, kernel_flush
50	blx	r1
51
52	adr	r9, am33xx_emif_sram_table
53
54	ldr	r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
55	blx	r3
56
57	ldr	r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
58	blx	r3
59
60	/* Disable EMIF */
61	ldr     r1, virt_emif_clkctrl
62	ldr     r2, [r1]
63	bic     r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
64	str     r2, [r1]
65
66	ldr	r1, virt_emif_clkctrl
67wait_emif_disable:
68	ldr	r2, [r1]
69	mov	r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED
70	cmp	r2, r3
71	bne	wait_emif_disable
72
73	/*
74	 * For the MPU WFI to be registered as an interrupt
75	 * to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
76	 * to DISABLED
77	 */
78	ldr	r1, virt_mpu_clkctrl
79	ldr	r2, [r1]
80	bic	r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
81	str	r2, [r1]
82
83	/*
84	 * Execute an ISB instruction to ensure that all of the
85	 * CP15 register changes have been committed.
86	 */
87	isb
88
89	/*
90	 * Execute a barrier instruction to ensure that all cache,
91	 * TLB and branch predictor maintenance operations issued
92	 * have completed.
93	 */
94	dsb
95	dmb
96
97	/*
98	 * Execute a WFI instruction and wait until the
99	 * STANDBYWFI output is asserted to indicate that the
100	 * CPU is in idle and low power state. CPU can specualatively
101	 * prefetch the instructions so add NOPs after WFI. Thirteen
102	 * NOPs as per Cortex-A8 pipeline.
103	 */
104	wfi
105
106	nop
107	nop
108	nop
109	nop
110	nop
111	nop
112	nop
113	nop
114	nop
115	nop
116	nop
117	nop
118	nop
119
120	/* We come here in case of an abort due to a late interrupt */
121
122	/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
123	ldr	r1, virt_mpu_clkctrl
124	mov	r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
125	str	r2, [r1]
126
127	/* Re-enable EMIF */
128	ldr	r1, virt_emif_clkctrl
129	mov	r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
130	str	r2, [r1]
131wait_emif_enable:
132	ldr	r3, [r1]
133	cmp	r2, r3
134	bne	wait_emif_enable
135
136
137	ldr	r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
138	blx	r1
139
140	/*
141	 * Set SCTLR.C bit to allow data cache allocation
142	 */
143	mrc	p15, 0, r0, c1, c0, 0
144	orr	r0, r0, #(1 << 2)	@ Enable the C bit
145	mcr	p15, 0, r0, c1, c0, 0
146	isb
147
148	/* Let the suspend code know about the abort */
149	mov	r0, #1
150	ldmfd	sp!, {r4 - r11, pc}	@ restore regs and return
151ENDPROC(am33xx_do_wfi)
152
153	.align
154ENTRY(am33xx_resume_offset)
155	.word . - am33xx_do_wfi
156
157ENTRY(am33xx_resume_from_deep_sleep)
158	/* Re-enable EMIF */
159	ldr	r0, phys_emif_clkctrl
160	mov	r1, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
161	str	r1, [r0]
162wait_emif_enable1:
163	ldr	r2, [r0]
164	cmp	r1, r2
165	bne	wait_emif_enable1
166
167	adr	r9, am33xx_emif_sram_table
168
169	ldr	r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET]
170	blx	r1
171
172	ldr	r1, [r9, #EMIF_PM_EXIT_SR_OFFSET]
173	blx	r1
174
175resume_to_ddr:
176	/* We are back. Branch to the common CPU resume routine */
177	mov	r0, #0
178	ldr	pc, resume_addr
179ENDPROC(am33xx_resume_from_deep_sleep)
180
181/*
182 * Local variables
183 */
184	.align
185resume_addr:
186	.word	cpu_resume - PAGE_OFFSET + 0x80000000
187kernel_flush:
188	.word   v7_flush_dcache_all
189virt_mpu_clkctrl:
190	.word	AM33XX_CM_MPU_MPU_CLKCTRL
191virt_emif_clkctrl:
192	.word	AM33XX_CM_PER_EMIF_CLKCTRL
193phys_emif_clkctrl:
194	.word	(AM33XX_CM_BASE + AM33XX_CM_PER_MOD + \
195		AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET)
196
197.align 3
198/* DDR related defines */
199am33xx_emif_sram_table:
200	.space EMIF_PM_FUNCTIONS_SIZE
201
202ENTRY(am33xx_pm_sram)
203	.word am33xx_do_wfi
204	.word am33xx_do_wfi_sz
205	.word am33xx_resume_offset
206	.word am33xx_emif_sram_table
207	.word am33xx_pm_ro_sram_data
208
209.align 3
210ENTRY(am33xx_pm_ro_sram_data)
211	.space AMX3_PM_RO_SRAM_DATA_SIZE
212
213ENTRY(am33xx_do_wfi_sz)
214	.word	. - am33xx_do_wfi
215