xref: /openbmc/linux/arch/arm/kernel/sleep.S (revision 75f25bd3)
1#include <linux/linkage.h>
2#include <linux/threads.h>
3#include <asm/asm-offsets.h>
4#include <asm/assembler.h>
5#include <asm/glue-cache.h>
6#include <asm/glue-proc.h>
7#include <asm/system.h>
8	.text
9
10/*
11 * Save CPU state for a suspend
12 *  r1 = v:p offset
13 *  r2 = suspend function arg0
14 *  r3 = suspend function
15 */
16ENTRY(__cpu_suspend)
17	stmfd	sp!, {r4 - r11, lr}
18#ifdef MULTI_CPU
19	ldr	r10, =processor
20	ldr	r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
21	ldr	ip, [r10, #CPU_DO_RESUME] @ virtual resume function
22#else
23	ldr	r5, =cpu_suspend_size
24	ldr	ip, =cpu_do_resume
25#endif
26	mov	r6, sp			@ current virtual SP
27	sub	sp, sp, r5		@ allocate CPU state on stack
28	mov	r0, sp			@ save pointer to CPU save block
29	add	ip, ip, r1		@ convert resume fn to phys
30	stmfd	sp!, {r1, r6, ip}	@ save v:p, virt SP, phys resume fn
31	ldr	r5, =sleep_save_sp
32	add	r6, sp, r1		@ convert SP to phys
33	stmfd	sp!, {r2, r3}		@ save suspend func arg and pointer
34#ifdef CONFIG_SMP
35	ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
36	ALT_UP(mov lr, #0)
37	and	lr, lr, #15
38	str	r6, [r5, lr, lsl #2]	@ save phys SP
39#else
40	str	r6, [r5]		@ save phys SP
41#endif
42#ifdef MULTI_CPU
43	mov	lr, pc
44	ldr	pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
45#else
46	bl	cpu_do_suspend
47#endif
48
49	@ flush data cache
50#ifdef MULTI_CACHE
51	ldr	r10, =cpu_cache
52	mov	lr, pc
53	ldr	pc, [r10, #CACHE_FLUSH_KERN_ALL]
54#else
55	bl	__cpuc_flush_kern_all
56#endif
57	adr	lr, BSYM(cpu_suspend_abort)
58	ldmfd	sp!, {r0, pc}		@ call suspend fn
59ENDPROC(__cpu_suspend)
60	.ltorg
61
62cpu_suspend_abort:
63	ldmia	sp!, {r1 - r3}		@ pop v:p, virt SP, phys resume fn
64	mov	sp, r2
65	ldmfd	sp!, {r4 - r11, pc}
66ENDPROC(cpu_suspend_abort)
67
68/*
69 * r0 = control register value
70 * r1 = v:p offset (preserved by cpu_do_resume)
71 * r2 = phys page table base
72 * r3 = L1 section flags
73 */
74ENTRY(cpu_resume_mmu)
75	adr	r4, cpu_resume_turn_mmu_on
76	mov	r4, r4, lsr #20
77	orr	r3, r3, r4, lsl #20
78	ldr	r5, [r2, r4, lsl #2]	@ save old mapping
79	str	r3, [r2, r4, lsl #2]	@ setup 1:1 mapping for mmu code
80	sub	r2, r2, r1
81	ldr	r3, =cpu_resume_after_mmu
82	bic	r1, r0, #CR_C		@ ensure D-cache is disabled
83	b	cpu_resume_turn_mmu_on
84ENDPROC(cpu_resume_mmu)
85	.ltorg
86	.align	5
87cpu_resume_turn_mmu_on:
88	mcr	p15, 0, r1, c1, c0, 0	@ turn on MMU, I-cache, etc
89	mrc	p15, 0, r1, c0, c0, 0	@ read id reg
90	mov	r1, r1
91	mov	r1, r1
92	mov	pc, r3			@ jump to virtual address
93ENDPROC(cpu_resume_turn_mmu_on)
94cpu_resume_after_mmu:
95	str	r5, [r2, r4, lsl #2]	@ restore old mapping
96	mcr	p15, 0, r0, c1, c0, 0	@ turn on D-cache
97	bl	cpu_init		@ restore the und/abt/irq banked regs
98	mov	r0, #0			@ return zero on success
99	ldmfd	sp!, {r4 - r11, pc}
100ENDPROC(cpu_resume_after_mmu)
101
102/*
103 * Note: Yes, part of the following code is located into the .data section.
104 *       This is to allow sleep_save_sp to be accessed with a relative load
105 *       while we can't rely on any MMU translation.  We could have put
106 *       sleep_save_sp in the .text section as well, but some setups might
107 *       insist on it to be truly read-only.
108 */
109	.data
110	.align
111ENTRY(cpu_resume)
112#ifdef CONFIG_SMP
113	adr	r0, sleep_save_sp
114	ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
115	ALT_UP(mov r1, #0)
116	and	r1, r1, #15
117	ldr	r0, [r0, r1, lsl #2]	@ stack phys addr
118#else
119	ldr	r0, sleep_save_sp	@ stack phys addr
120#endif
121	setmode	PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1  @ set SVC, irqs off
122	@ load v:p, stack, resume fn
123  ARM(	ldmia	r0!, {r1, sp, pc}	)
124THUMB(	ldmia	r0!, {r1, r2, r3}	)
125THUMB(	mov	sp, r2			)
126THUMB(	bx	r3			)
127ENDPROC(cpu_resume)
128
129sleep_save_sp:
130	.rept	CONFIG_NR_CPUS
131	.long	0				@ preserve stack phys ptr here
132	.endr
133