xref: /openbmc/linux/arch/arm/kernel/sleep.S (revision 2c74a0cefa463a7a483b07ba4d2ea8e4ec7b996c)
1#include <linux/linkage.h>
2#include <linux/threads.h>
3#include <asm/asm-offsets.h>
4#include <asm/assembler.h>
5#include <asm/glue-cache.h>
6#include <asm/glue-proc.h>
7#include <asm/system.h>
8	.text
9
10/*
11 * Save CPU state for a suspend
12 *  r1 = v:p offset
13 *  r2 = suspend function arg0
14 *  r3 = suspend function
15 * Note: does not return until system resumes
16 */
17ENTRY(__cpu_suspend)
18	stmfd	sp!, {r4 - r11, lr}
19#ifdef MULTI_CPU
20	ldr	r10, =processor
21	ldr	r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
22	ldr	ip, [r10, #CPU_DO_RESUME] @ virtual resume function
23#else
24	ldr	r5, =cpu_suspend_size
25	ldr	ip, =cpu_do_resume
26#endif
27	mov	r6, sp			@ current virtual SP
28	sub	sp, sp, r5		@ allocate CPU state on stack
29	mov	r0, sp			@ save pointer
30	add	ip, ip, r1		@ convert resume fn to phys
31	stmfd	sp!, {r1, r6, ip}	@ save v:p, virt SP, phys resume fn
32	ldr	r5, =sleep_save_sp
33	add	r6, sp, r1		@ convert SP to phys
34	stmfd	sp!, {r2, r3}		@ save suspend func arg and pointer
35#ifdef CONFIG_SMP
36	ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
37	ALT_UP(mov lr, #0)
38	and	lr, lr, #15
39	str	r6, [r5, lr, lsl #2]	@ save phys SP
40#else
41	str	r6, [r5]		@ save phys SP
42#endif
43#ifdef MULTI_CPU
44	mov	lr, pc
45	ldr	pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
46#else
47	bl	cpu_do_suspend
48#endif
49
50	@ flush data cache
51#ifdef MULTI_CACHE
52	ldr	r10, =cpu_cache
53	mov	lr, pc
54	ldr	pc, [r10, #CACHE_FLUSH_KERN_ALL]
55#else
56	bl	__cpuc_flush_kern_all
57#endif
58	ldmfd	sp!, {r0, pc}		@ call suspend fn
59ENDPROC(__cpu_suspend)
60	.ltorg
61
62/*
63 * r0 = control register value
64 * r1 = v:p offset (preserved by cpu_do_resume)
65 * r2 = phys page table base
66 * r3 = L1 section flags
67 */
68ENTRY(cpu_resume_mmu)
69	adr	r4, cpu_resume_turn_mmu_on
70	mov	r4, r4, lsr #20
71	orr	r3, r3, r4, lsl #20
72	ldr	r5, [r2, r4, lsl #2]	@ save old mapping
73	str	r3, [r2, r4, lsl #2]	@ setup 1:1 mapping for mmu code
74	sub	r2, r2, r1
75	ldr	r3, =cpu_resume_after_mmu
76	bic	r1, r0, #CR_C		@ ensure D-cache is disabled
77	b	cpu_resume_turn_mmu_on
78ENDPROC(cpu_resume_mmu)
79	.ltorg
80	.align	5
81cpu_resume_turn_mmu_on:
82	mcr	p15, 0, r1, c1, c0, 0	@ turn on MMU, I-cache, etc
83	mrc	p15, 0, r1, c0, c0, 0	@ read id reg
84	mov	r1, r1
85	mov	r1, r1
86	mov	pc, r3			@ jump to virtual address
87ENDPROC(cpu_resume_turn_mmu_on)
88cpu_resume_after_mmu:
89	str	r5, [r2, r4, lsl #2]	@ restore old mapping
90	mcr	p15, 0, r0, c1, c0, 0	@ turn on D-cache
91	bl	cpu_init		@ restore the und/abt/irq banked regs
92	ldmfd	sp!, {r4 - r11, pc}
93ENDPROC(cpu_resume_after_mmu)
94
95/*
96 * Note: Yes, part of the following code is located into the .data section.
97 *       This is to allow sleep_save_sp to be accessed with a relative load
98 *       while we can't rely on any MMU translation.  We could have put
99 *       sleep_save_sp in the .text section as well, but some setups might
100 *       insist on it to be truly read-only.
101 */
102	.data
103	.align
104ENTRY(cpu_resume)
105#ifdef CONFIG_SMP
106	adr	r0, sleep_save_sp
107	ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
108	ALT_UP(mov r1, #0)
109	and	r1, r1, #15
110	ldr	r0, [r0, r1, lsl #2]	@ stack phys addr
111#else
112	ldr	r0, sleep_save_sp	@ stack phys addr
113#endif
114	setmode	PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1  @ set SVC, irqs off
115	@ load v:p, stack, resume fn
116  ARM(	ldmia	r0!, {r1, sp, pc}	)
117THUMB(	ldmia	r0!, {r1, r2, r3}	)
118THUMB(	mov	sp, r2			)
119THUMB(	bx	r3			)
120ENDPROC(cpu_resume)
121
122sleep_save_sp:
123	.rept	CONFIG_NR_CPUS
124	.long	0				@ preserve stack phys ptr here
125	.endr
126