xref: /openbmc/linux/arch/arm/common/mcpm_head.S (revision 801b27e8)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
4 *
5 * Created by:  Nicolas Pitre, March 2012
6 * Copyright:   (C) 2012-2013  Linaro Limited
7 *
8 * Refer to Documentation/arch/arm/cluster-pm-race-avoidance.rst
9 * for details of the synchronisation algorithms used here.
10 */
11
12#include <linux/linkage.h>
13#include <asm/mcpm.h>
14#include <asm/assembler.h>
15
16#include "vlock.h"
17
18.arch armv7-a
19
20.if MCPM_SYNC_CLUSTER_CPUS
21.error "cpus must be the first member of struct mcpm_sync_struct"
22.endif
23
24	.macro	pr_dbg	string
25#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
26	b	1901f
271902:	.asciz	"CPU"
281903:	.asciz	" cluster"
291904:	.asciz	": \string"
30	.align
311901:	adr	r0, 1902b
32	bl	printascii
33	mov	r0, r9
34	bl	printhex2
35	adr	r0, 1903b
36	bl	printascii
37	mov	r0, r10
38	bl	printhex2
39	adr	r0, 1904b
40	bl	printascii
41#endif
42	.endm
43
44	.arm
45	.align
46
47ENTRY(mcpm_entry_point)
48
49 ARM_BE8(setend        be)
50 THUMB(	badr	r12, 1f		)
51 THUMB(	bx	r12		)
52 THUMB(	.thumb			)
531:
54	mrc	p15, 0, r0, c0, c0, 5		@ MPIDR
55	ubfx	r9, r0, #0, #8			@ r9 = cpu
56	ubfx	r10, r0, #8, #8			@ r10 = cluster
57	mov	r3, #MAX_CPUS_PER_CLUSTER
58	mla	r4, r3, r10, r9			@ r4 = canonical CPU index
59	cmp	r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
60	blo	2f
61
62	/* We didn't expect this CPU.  Try to cheaply make it quiet. */
631:	wfi
64	wfe
65	b	1b
66
672:	pr_dbg	"kernel mcpm_entry_point\n"
68
69	/*
70	 * MMU is off so we need to get to various variables in a
71	 * position independent way.
72	 */
73	adr	r5, 3f
74	ldmia	r5, {r0, r6, r7, r8, r11}
75	add	r0, r5, r0			@ r0 = mcpm_entry_early_pokes
76	add	r6, r5, r6			@ r6 = mcpm_entry_vectors
77	ldr	r7, [r5, r7]			@ r7 = mcpm_power_up_setup_phys
78	add	r8, r5, r8			@ r8 = mcpm_sync
79	add	r11, r5, r11			@ r11 = first_man_locks
80
81	@ Perform an early poke, if any
82	add	r0, r0, r4, lsl #3
83	ldmia	r0, {r0, r1}
84	teq	r0, #0
85	strne	r1, [r0]
86
87	mov	r0, #MCPM_SYNC_CLUSTER_SIZE
88	mla	r8, r0, r10, r8			@ r8 = sync cluster base
89
90	@ Signal that this CPU is coming UP:
91	mov	r0, #CPU_COMING_UP
92	mov	r5, #MCPM_SYNC_CPU_SIZE
93	mla	r5, r9, r5, r8			@ r5 = sync cpu address
94	strb	r0, [r5]
95
96	@ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
97	@ state, because there is at least one active CPU (this CPU).
98
99	mov	r0, #VLOCK_SIZE
100	mla	r11, r0, r10, r11		@ r11 = cluster first man lock
101	mov	r0, r11
102	mov	r1, r9				@ cpu
103	bl	vlock_trylock			@ implies DMB
104
105	cmp	r0, #0				@ failed to get the lock?
106	bne	mcpm_setup_wait		@ wait for cluster setup if so
107
108	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
109	cmp	r0, #CLUSTER_UP			@ cluster already up?
110	bne	mcpm_setup			@ if not, set up the cluster
111
112	@ Otherwise, release the first man lock and skip setup:
113	mov	r0, r11
114	bl	vlock_unlock
115	b	mcpm_setup_complete
116
117mcpm_setup:
118	@ Control dependency implies strb not observable before previous ldrb.
119
120	@ Signal that the cluster is being brought up:
121	mov	r0, #INBOUND_COMING_UP
122	strb	r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
123	dmb
124
125	@ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
126	@ point onwards will observe INBOUND_COMING_UP and abort.
127
128	@ Wait for any previously-pending cluster teardown operations to abort
129	@ or complete:
130mcpm_teardown_wait:
131	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
132	cmp	r0, #CLUSTER_GOING_DOWN
133	bne	first_man_setup
134	wfe
135	b	mcpm_teardown_wait
136
137first_man_setup:
138	dmb
139
140	@ If the outbound gave up before teardown started, skip cluster setup:
141
142	cmp	r0, #CLUSTER_UP
143	beq	mcpm_setup_leave
144
145	@ power_up_setup is now responsible for setting up the cluster:
146
147	cmp	r7, #0
148	mov	r0, #1		@ second (cluster) affinity level
149	blxne	r7		@ Call power_up_setup if defined
150	dmb
151
152	mov	r0, #CLUSTER_UP
153	strb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
154	dmb
155
156mcpm_setup_leave:
157	@ Leave the cluster setup critical section:
158
159	mov	r0, #INBOUND_NOT_COMING_UP
160	strb	r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
161	dsb	st
162	sev
163
164	mov	r0, r11
165	bl	vlock_unlock	@ implies DMB
166	b	mcpm_setup_complete
167
168	@ In the contended case, non-first men wait here for cluster setup
169	@ to complete:
170mcpm_setup_wait:
171	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
172	cmp	r0, #CLUSTER_UP
173	wfene
174	bne	mcpm_setup_wait
175	dmb
176
177mcpm_setup_complete:
178	@ If a platform-specific CPU setup hook is needed, it is
179	@ called from here.
180
181	cmp	r7, #0
182	mov	r0, #0		@ first (CPU) affinity level
183	blxne	r7		@ Call power_up_setup if defined
184	dmb
185
186	@ Mark the CPU as up:
187
188	mov	r0, #CPU_UP
189	strb	r0, [r5]
190
191	@ Observability order of CPU_UP and opening of the gate does not matter.
192
193mcpm_entry_gated:
194	ldr	r5, [r6, r4, lsl #2]		@ r5 = CPU entry vector
195	cmp	r5, #0
196	wfeeq
197	beq	mcpm_entry_gated
198	dmb
199
200	pr_dbg	"released\n"
201	bx	r5
202
203	.align	2
204
2053:	.word	mcpm_entry_early_pokes - .
206	.word	mcpm_entry_vectors - 3b
207	.word	mcpm_power_up_setup_phys - 3b
208	.word	mcpm_sync - 3b
209	.word	first_man_locks - 3b
210
211ENDPROC(mcpm_entry_point)
212
213	.bss
214
215	.align	CACHE_WRITEBACK_ORDER
216	.type	first_man_locks, #object
217first_man_locks:
218	.space	VLOCK_SIZE * MAX_NR_CLUSTERS
219	.align	CACHE_WRITEBACK_ORDER
220
221	.type	mcpm_entry_vectors, #object
222ENTRY(mcpm_entry_vectors)
223	.space	4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
224
225	.type	mcpm_entry_early_pokes, #object
226ENTRY(mcpm_entry_early_pokes)
227	.space	8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
228
229	.type	mcpm_power_up_setup_phys, #object
230ENTRY(mcpm_power_up_setup_phys)
231	.space  4		@ set by mcpm_sync_init()
232