xref: /openbmc/u-boot/arch/arm/cpu/armv7/psci.S (revision b5281323)
1/*
2 * Copyright (C) 2013,2014 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <config.h>
19#include <linux/linkage.h>
20#include <asm/macro.h>
21#include <asm/psci.h>
22
23	.pushsection ._secure.text, "ax"
24
25	.arch_extension	sec
26
27	.align	5
28	.globl _psci_vectors
29_psci_vectors:
30	b	default_psci_vector	@ reset
31	b	default_psci_vector	@ undef
32	b	_smc_psci		@ smc
33	b	default_psci_vector	@ pabort
34	b	default_psci_vector	@ dabort
35	b	default_psci_vector	@ hyp
36	b	default_psci_vector	@ irq
37	b	psci_fiq_enter		@ fiq
38
39ENTRY(psci_fiq_enter)
40	movs	pc, lr
41ENDPROC(psci_fiq_enter)
42.weak psci_fiq_enter
43
44ENTRY(default_psci_vector)
45	movs	pc, lr
46ENDPROC(default_psci_vector)
47.weak default_psci_vector
48
49ENTRY(psci_cpu_suspend)
50ENTRY(psci_cpu_off)
51ENTRY(psci_cpu_on)
52ENTRY(psci_migrate)
53	mov	r0, #ARM_PSCI_RET_NI	@ Return -1 (Not Implemented)
54	mov	pc, lr
55ENDPROC(psci_migrate)
56ENDPROC(psci_cpu_on)
57ENDPROC(psci_cpu_off)
58ENDPROC(psci_cpu_suspend)
59.weak psci_cpu_suspend
60.weak psci_cpu_off
61.weak psci_cpu_on
62.weak psci_migrate
63
64_psci_table:
65	.word	ARM_PSCI_FN_CPU_SUSPEND
66	.word	psci_cpu_suspend
67	.word	ARM_PSCI_FN_CPU_OFF
68	.word	psci_cpu_off
69	.word	ARM_PSCI_FN_CPU_ON
70	.word	psci_cpu_on
71	.word	ARM_PSCI_FN_MIGRATE
72	.word	psci_migrate
73	.word	0
74	.word	0
75
76_smc_psci:
77	push	{r4-r7,lr}
78
79	@ Switch to secure
80	mrc	p15, 0, r7, c1, c1, 0
81	bic	r4, r7, #1
82	mcr	p15, 0, r4, c1, c1, 0
83	isb
84
85	adr	r4, _psci_table
861:	ldr	r5, [r4]		@ Load PSCI function ID
87	ldr	r6, [r4, #4]		@ Load target PC
88	cmp	r5, #0			@ If reach the end, bail out
89	moveq	r0, #ARM_PSCI_RET_INVAL	@ Return -2 (Invalid)
90	beq	2f
91	cmp	r0, r5			@ If not matching, try next entry
92	addne	r4, r4, #8
93	bne	1b
94
95	blx	r6			@ Execute PSCI function
96
97	@ Switch back to non-secure
982:	mcr	p15, 0, r7, c1, c1, 0
99
100	pop	{r4-r7, lr}
101	movs	pc, lr			@ Return to the kernel
102
103@ Requires dense and single-cluster CPU ID space
104ENTRY(psci_get_cpu_id)
105	mrc	p15, 0, r0, c0, c0, 5	/* read MPIDR */
106	and	r0, r0, #0xff		/* return CPU ID in cluster */
107	bx	lr
108ENDPROC(psci_get_cpu_id)
109.weak psci_get_cpu_id
110
111/* Imported from Linux kernel */
112LENTRY(v7_flush_dcache_all)
113	stmfd	sp!, {r4-r5, r7, r9-r11, lr}
114	dmb					@ ensure ordering with previous memory accesses
115	mrc	p15, 1, r0, c0, c0, 1		@ read clidr
116	ands	r3, r0, #0x7000000		@ extract loc from clidr
117	mov	r3, r3, lsr #23			@ left align loc bit field
118	beq	finished			@ if loc is 0, then no need to clean
119	mov	r10, #0				@ start clean at cache level 0
120flush_levels:
121	add	r2, r10, r10, lsr #1		@ work out 3x current cache level
122	mov	r1, r0, lsr r2			@ extract cache type bits from clidr
123	and	r1, r1, #7			@ mask of the bits for current cache only
124	cmp	r1, #2				@ see what cache we have at this level
125	blt	skip				@ skip if no cache, or just i-cache
126	mrs     r9, cpsr			@ make cssr&csidr read atomic
127	mcr	p15, 2, r10, c0, c0, 0		@ select current cache level in cssr
128	isb					@ isb to sych the new cssr&csidr
129	mrc	p15, 1, r1, c0, c0, 0		@ read the new csidr
130	msr     cpsr_c, r9
131	and	r2, r1, #7			@ extract the length of the cache lines
132	add	r2, r2, #4			@ add 4 (line length offset)
133	ldr	r4, =0x3ff
134	ands	r4, r4, r1, lsr #3		@ find maximum number on the way size
135	clz	r5, r4				@ find bit position of way size increment
136	ldr	r7, =0x7fff
137	ands	r7, r7, r1, lsr #13		@ extract max number of the index size
138loop1:
139	mov	r9, r7				@ create working copy of max index
140loop2:
141	orr	r11, r10, r4, lsl r5		@ factor way and cache number into r11
142	orr	r11, r11, r9, lsl r2		@ factor index number into r11
143	mcr	p15, 0, r11, c7, c14, 2		@ clean & invalidate by set/way
144	subs	r9, r9, #1			@ decrement the index
145	bge	loop2
146	subs	r4, r4, #1			@ decrement the way
147	bge	loop1
148skip:
149	add	r10, r10, #2			@ increment cache number
150	cmp	r3, r10
151	bgt	flush_levels
152finished:
153	mov	r10, #0				@ swith back to cache level 0
154	mcr	p15, 2, r10, c0, c0, 0		@ select current cache level in cssr
155	dsb	st
156	isb
157	ldmfd	sp!, {r4-r5, r7, r9-r11, lr}
158	bx	lr
159ENDPROC(v7_flush_dcache_all)
160
161ENTRY(psci_disable_smp)
162	mrc	p15, 0, r0, c1, c0, 1		@ ACTLR
163	bic	r0, r0, #(1 << 6)		@ Clear SMP bit
164	mcr	p15, 0, r0, c1, c0, 1		@ ACTLR
165	isb
166	dsb
167	bx	lr
168ENDPROC(psci_disable_smp)
169.weak psci_disable_smp
170
171ENTRY(psci_enable_smp)
172	mrc	p15, 0, r0, c1, c0, 1		@ ACTLR
173	orr	r0, r0, #(1 << 6)		@ Set SMP bit
174	mcr	p15, 0, r0, c1, c0, 1		@ ACTLR
175	isb
176	bx	lr
177ENDPROC(psci_enable_smp)
178.weak psci_enable_smp
179
180ENTRY(psci_cpu_off_common)
181	push	{lr}
182
183	mrc	p15, 0, r0, c1, c0, 0		@ SCTLR
184	bic	r0, r0, #(1 << 2)		@ Clear C bit
185	mcr	p15, 0, r0, c1, c0, 0		@ SCTLR
186	isb
187	dsb
188
189	bl	v7_flush_dcache_all
190
191	clrex					@ Why???
192
193	bl	psci_disable_smp
194
195	pop	{lr}
196	bx	lr
197ENDPROC(psci_cpu_off_common)
198
199@ expects CPU ID in r0 and returns stack top in r0
200ENTRY(psci_get_cpu_stack_top)
201	mov	r3, #0x400			@ 1kB of stack per CPU
202	mul	r0, r0, r3
203
204	ldr	r3, =psci_text_end		@ end of monitor text
205	add	r3, r3, #0x2000			@ Skip two pages
206	lsr	r3, r3, #12			@ Align to start of page
207	lsl	r3, r3, #12
208	sub	r3, r3, #4			@ reserve 1 word for target PC
209	sub	r0, r3, r0			@ here's our stack!
210
211	bx	lr
212ENDPROC(psci_get_cpu_stack_top)
213
214@ {r0, r1, r2, ip} from _do_nonsec_entry(kernel_entry, 0, machid, r2) in
215@ arch/arm/lib/bootm.c:boot_jump_linux() must remain unchanged across
216@ this function.
217ENTRY(psci_stack_setup)
218	mov	r6, lr
219	mov	r7, r0
220	bl	psci_get_cpu_id		@ CPU ID => r0
221	bl	psci_get_cpu_stack_top	@ stack top => r0
222	mov	sp, r0
223	mov	r0, r7
224	bx	r6
225ENDPROC(psci_stack_setup)
226
227ENTRY(psci_arch_init)
228	mov	pc, lr
229ENDPROC(psci_arch_init)
230.weak psci_arch_init
231
232ENTRY(psci_cpu_entry)
233	bl	psci_enable_smp
234
235	bl	_nonsec_init
236
237	bl	psci_get_cpu_id			@ CPU ID => r0
238	bl	psci_get_cpu_stack_top		@ stack top => r0
239	ldr	r0, [r0]			@ target PC at stack top
240	b	_do_nonsec_entry
241ENDPROC(psci_cpu_entry)
242
243	.popsection
244