xref: /openbmc/linux/arch/arm/mach-omap2/sleep33xx.S (revision 5692fcee)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Low level suspend code for AM33XX SoCs
4 *
5 * Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
6 *	Dave Gerlach, Vaibhav Bedia
7 */
8
9#include <generated/ti-pm-asm-offsets.h>
10#include <linux/linkage.h>
11#include <linux/ti-emif-sram.h>
12#include <asm/assembler.h>
13#include <asm/memory.h>
14
15#include "iomap.h"
16#include "cm33xx.h"
17
18#define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED			0x00030000
19#define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE			0x0003
20#define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE			0x0002
21
22	.arm
23	.align 3
24
25ENTRY(am33xx_do_wfi)
26	stmfd	sp!, {r4 - r11, lr}	@ save registers on stack
27
28	/*
29	 * Flush all data from the L1 and L2 data cache before disabling
30	 * SCTLR.C bit.
31	 */
32	ldr	r1, kernel_flush
33	blx	r1
34
35	/*
36	 * Clear the SCTLR.C bit to prevent further data cache
37	 * allocation. Clearing SCTLR.C would make all the data accesses
38	 * strongly ordered and would not hit the cache.
39	 */
40	mrc	p15, 0, r0, c1, c0, 0
41	bic	r0, r0, #(1 << 2)	@ Disable the C bit
42	mcr	p15, 0, r0, c1, c0, 0
43	isb
44
45	/*
46	 * Invalidate L1 and L2 data cache.
47	 */
48	ldr	r1, kernel_flush
49	blx	r1
50
51	adr	r9, am33xx_emif_sram_table
52
53	ldr	r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
54	blx	r3
55
56	ldr	r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
57	blx	r3
58
59	/* Disable EMIF */
60	ldr     r1, virt_emif_clkctrl
61	ldr     r2, [r1]
62	bic     r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
63	str     r2, [r1]
64
65	ldr	r1, virt_emif_clkctrl
66wait_emif_disable:
67	ldr	r2, [r1]
68	mov	r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED
69	cmp	r2, r3
70	bne	wait_emif_disable
71
72	/*
73	 * For the MPU WFI to be registered as an interrupt
74	 * to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
75	 * to DISABLED
76	 */
77	ldr	r1, virt_mpu_clkctrl
78	ldr	r2, [r1]
79	bic	r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
80	str	r2, [r1]
81
82	/*
83	 * Execute an ISB instruction to ensure that all of the
84	 * CP15 register changes have been committed.
85	 */
86	isb
87
88	/*
89	 * Execute a barrier instruction to ensure that all cache,
90	 * TLB and branch predictor maintenance operations issued
91	 * have completed.
92	 */
93	dsb
94	dmb
95
96	/*
97	 * Execute a WFI instruction and wait until the
98	 * STANDBYWFI output is asserted to indicate that the
99	 * CPU is in idle and low power state. CPU can specualatively
100	 * prefetch the instructions so add NOPs after WFI. Thirteen
101	 * NOPs as per Cortex-A8 pipeline.
102	 */
103	wfi
104
105	nop
106	nop
107	nop
108	nop
109	nop
110	nop
111	nop
112	nop
113	nop
114	nop
115	nop
116	nop
117	nop
118
119	/* We come here in case of an abort due to a late interrupt */
120
121	/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
122	ldr	r1, virt_mpu_clkctrl
123	mov	r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
124	str	r2, [r1]
125
126	/* Re-enable EMIF */
127	ldr	r1, virt_emif_clkctrl
128	mov	r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
129	str	r2, [r1]
130wait_emif_enable:
131	ldr	r3, [r1]
132	cmp	r2, r3
133	bne	wait_emif_enable
134
135
136	ldr	r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
137	blx	r1
138
139	/*
140	 * Set SCTLR.C bit to allow data cache allocation
141	 */
142	mrc	p15, 0, r0, c1, c0, 0
143	orr	r0, r0, #(1 << 2)	@ Enable the C bit
144	mcr	p15, 0, r0, c1, c0, 0
145	isb
146
147	/* Let the suspend code know about the abort */
148	mov	r0, #1
149	ldmfd	sp!, {r4 - r11, pc}	@ restore regs and return
150ENDPROC(am33xx_do_wfi)
151
152	.align
153ENTRY(am33xx_resume_offset)
154	.word . - am33xx_do_wfi
155
156ENTRY(am33xx_resume_from_deep_sleep)
157	/* Re-enable EMIF */
158	ldr	r0, phys_emif_clkctrl
159	mov	r1, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
160	str	r1, [r0]
161wait_emif_enable1:
162	ldr	r2, [r0]
163	cmp	r1, r2
164	bne	wait_emif_enable1
165
166	adr	r9, am33xx_emif_sram_table
167
168	ldr	r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET]
169	blx	r1
170
171	ldr	r1, [r9, #EMIF_PM_EXIT_SR_OFFSET]
172	blx	r1
173
174resume_to_ddr:
175	/* We are back. Branch to the common CPU resume routine */
176	mov	r0, #0
177	ldr	pc, resume_addr
178ENDPROC(am33xx_resume_from_deep_sleep)
179
180/*
181 * Local variables
182 */
183	.align
184resume_addr:
185	.word	cpu_resume - PAGE_OFFSET + 0x80000000
186kernel_flush:
187	.word   v7_flush_dcache_all
188virt_mpu_clkctrl:
189	.word	AM33XX_CM_MPU_MPU_CLKCTRL
190virt_emif_clkctrl:
191	.word	AM33XX_CM_PER_EMIF_CLKCTRL
192phys_emif_clkctrl:
193	.word	(AM33XX_CM_BASE + AM33XX_CM_PER_MOD + \
194		AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET)
195
196.align 3
197/* DDR related defines */
198am33xx_emif_sram_table:
199	.space EMIF_PM_FUNCTIONS_SIZE
200
201ENTRY(am33xx_pm_sram)
202	.word am33xx_do_wfi
203	.word am33xx_do_wfi_sz
204	.word am33xx_resume_offset
205	.word am33xx_emif_sram_table
206	.word am33xx_pm_ro_sram_data
207
208.align 3
209ENTRY(am33xx_pm_ro_sram_data)
210	.space AMX3_PM_RO_SRAM_DATA_SIZE
211
212ENTRY(am33xx_do_wfi_sz)
213	.word	. - am33xx_do_wfi
214