1/*
2 * (C) Copyright 2014-2015 Freescale Semiconductor
3 *
4 * SPDX-License-Identifier:	GPL-2.0+
5 *
6 * Extracted from armv8/start.S
7 */
8
9#include <config.h>
10#include <linux/linkage.h>
11#include <asm/gic.h>
12#include <asm/macro.h>
13#ifdef CONFIG_MP
14#include <asm/arch/mp.h>
15#endif
16#ifdef CONFIG_FSL_LSCH3
17#include <asm/arch-fsl-layerscape/immap_lsch3.h>
18#include <asm/arch-fsl-layerscape/soc.h>
19#endif
20#include <asm/u-boot.h>
21
22ENTRY(lowlevel_init)
23	mov	x29, lr			/* Save LR */
24
25#ifdef CONFIG_FSL_LSCH3
26
27	/* Set Wuo bit for RN-I 20 */
28#ifdef CONFIG_LS2080A
29	ldr	x0, =CCI_AUX_CONTROL_BASE(20)
30	ldr	x1, =0x00000010
31	bl	ccn504_set_aux
32#endif
33
34	/* Add fully-coherent masters to DVM domain */
35	ldr	x0, =CCI_MN_BASE
36	ldr	x1, =CCI_MN_RNF_NODEID_LIST
37	ldr	x2, =CCI_MN_DVM_DOMAIN_CTL_SET
38	bl	ccn504_add_masters_to_dvm
39
40	/* Set all RN-I ports to QoS of 15 */
41	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(0)
42	ldr	x1, =0x00FF000C
43	bl	ccn504_set_qos
44	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(0)
45	ldr	x1, =0x00FF000C
46	bl	ccn504_set_qos
47	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(0)
48	ldr	x1, =0x00FF000C
49	bl	ccn504_set_qos
50
51	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(2)
52	ldr	x1, =0x00FF000C
53	bl	ccn504_set_qos
54	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(2)
55	ldr	x1, =0x00FF000C
56	bl	ccn504_set_qos
57	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(2)
58	ldr	x1, =0x00FF000C
59	bl	ccn504_set_qos
60
61	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(6)
62	ldr	x1, =0x00FF000C
63	bl	ccn504_set_qos
64	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(6)
65	ldr	x1, =0x00FF000C
66	bl	ccn504_set_qos
67	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(6)
68	ldr	x1, =0x00FF000C
69	bl	ccn504_set_qos
70
71	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(12)
72	ldr	x1, =0x00FF000C
73	bl	ccn504_set_qos
74	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(12)
75	ldr	x1, =0x00FF000C
76	bl	ccn504_set_qos
77	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(12)
78	ldr	x1, =0x00FF000C
79	bl	ccn504_set_qos
80
81	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(16)
82	ldr	x1, =0x00FF000C
83	bl	ccn504_set_qos
84	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(16)
85	ldr	x1, =0x00FF000C
86	bl	ccn504_set_qos
87	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(16)
88	ldr	x1, =0x00FF000C
89	bl	ccn504_set_qos
90
91	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(20)
92	ldr	x1, =0x00FF000C
93	bl	ccn504_set_qos
94	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(20)
95	ldr	x1, =0x00FF000C
96	bl	ccn504_set_qos
97	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(20)
98	ldr	x1, =0x00FF000C
99	bl	ccn504_set_qos
100#endif
101
102#ifdef SMMU_BASE
103	/* Set the SMMU page size in the sACR register */
104	ldr	x1, =SMMU_BASE
105	ldr	w0, [x1, #0x10]
106	orr	w0, w0, #1 << 16  /* set sACR.pagesize to indicate 64K page */
107	str	w0, [x1, #0x10]
108#endif
109
110	/* Initialize GIC Secure Bank Status */
111#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
112	branch_if_slave x0, 1f
113	ldr	x0, =GICD_BASE
114	bl	gic_init_secure
1151:
116#ifdef CONFIG_GICV3
117	ldr	x0, =GICR_BASE
118	bl	gic_init_secure_percpu
119#elif defined(CONFIG_GICV2)
120	ldr	x0, =GICD_BASE
121	ldr	x1, =GICC_BASE
122	bl	gic_init_secure_percpu
123#endif
124#endif
125
126	branch_if_master x0, x1, 2f
127
128#if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
129	ldr	x0, =secondary_boot_func
130	blr	x0
131#endif
132
1332:
134#ifdef CONFIG_FSL_TZPC_BP147
135	/* Set Non Secure access for all devices protected via TZPC */
136	ldr	x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
137	orr	w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
138	str	w0, [x1]
139
140	isb
141	dsb	sy
142#endif
143
144#ifdef CONFIG_FSL_TZASC_400
145	/*
146	 * LS2080 and its personalities does not support TZASC
147	 * So skip TZASC related operations
148	 */
149	bl	get_svr
150	lsr	w0, w0, #16
151	ldr	w1, =SVR_DEV_LS2080A
152	cmp	w0, w1
153	b.eq	1f
154
155	/* Set TZASC so that:
156	 * a. We use only Region0 whose global secure write/read is EN
157	 * b. We use only Region0 whose NSAID write/read is EN
158	 *
159	 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
160	 * 	 placeholders.
161	 */
162	ldr	x1, =TZASC_GATE_KEEPER(0)
163	ldr	w0, [x1]		/* Filter 0 Gate Keeper Register */
164	orr	w0, w0, #1 << 0		/* Set open_request for Filter 0 */
165	str	w0, [x1]
166
167	ldr	x1, =TZASC_GATE_KEEPER(1)
168	ldr	w0, [x1]		/* Filter 0 Gate Keeper Register */
169	orr	w0, w0, #1 << 0		/* Set open_request for Filter 0 */
170	str	w0, [x1]
171
172	ldr	x1, =TZASC_REGION_ATTRIBUTES_0(0)
173	ldr	w0, [x1]		/* Region-0 Attributes Register */
174	orr	w0, w0, #1 << 31	/* Set Sec global write en, Bit[31] */
175	orr	w0, w0, #1 << 30	/* Set Sec global read en, Bit[30] */
176	str	w0, [x1]
177
178	ldr	x1, =TZASC_REGION_ATTRIBUTES_0(1)
179	ldr	w0, [x1]		/* Region-1 Attributes Register */
180	orr	w0, w0, #1 << 31	/* Set Sec global write en, Bit[31] */
181	orr	w0, w0, #1 << 30	/* Set Sec global read en, Bit[30] */
182	str	w0, [x1]
183
184	ldr	x1, =TZASC_REGION_ID_ACCESS_0(0)
185	ldr	w0, [x1]		/* Region-0 Access Register */
186	mov	w0, #0xFFFFFFFF		/* Set nsaid_wr_en and nsaid_rd_en */
187	str	w0, [x1]
188
189	ldr	x1, =TZASC_REGION_ID_ACCESS_0(1)
190	ldr	w0, [x1]		/* Region-1 Attributes Register */
191	mov	w0, #0xFFFFFFFF		/* Set nsaid_wr_en and nsaid_rd_en */
192	str	w0, [x1]
193
194	isb
195	dsb	sy
196#endif
1971:
198#ifdef CONFIG_ARCH_LS1046A
199	/* Initialize the L2 RAM latency */
200	mrs   x1, S3_1_c11_c0_2
201	mov   x0, #0x1C7
202	/* Clear L2 Tag RAM latency and L2 Data RAM latency */
203	bic   x1, x1, x0
204	/* Set L2 data ram latency bits [2:0] */
205	orr   x1, x1, #0x2
206	/* set L2 tag ram latency bits [8:6] */
207	orr   x1,  x1, #0x80
208	msr   S3_1_c11_c0_2, x1
209	isb
210#endif
211
212	mov	lr, x29			/* Restore LR */
213	ret
214ENDPROC(lowlevel_init)
215
216#ifdef CONFIG_FSL_LSCH3
217	.globl get_svr
218get_svr:
219	ldr	x1, =FSL_LSCH3_SVR
220	ldr	w0, [x1]
221	ret
222
223hnf_pstate_poll:
224	/* x0 has the desired status, return 0 for success, 1 for timeout
225	 * clobber x1, x2, x3, x4, x6, x7
226	 */
227	mov	x1, x0
228	mov	x7, #0			/* flag for timeout */
229	mrs	x3, cntpct_el0		/* read timer */
230	add	x3, x3, #1200		/* timeout after 100 microseconds */
231	mov	x0, #0x18
232	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_STATUS */
233	mov	w6, #8			/* HN-F node count */
2341:
235	ldr	x2, [x0]
236	cmp	x2, x1			/* check status */
237	b.eq	2f
238	mrs	x4, cntpct_el0
239	cmp	x4, x3
240	b.ls	1b
241	mov	x7, #1			/* timeout */
242	b	3f
2432:
244	add	x0, x0, #0x10000	/* move to next node */
245	subs	w6, w6, #1
246	cbnz	w6, 1b
2473:
248	mov	x0, x7
249	ret
250
251hnf_set_pstate:
252	/* x0 has the desired state, clobber x1, x2, x6 */
253	mov	x1, x0
254	/* power state to SFONLY */
255	mov	w6, #8			/* HN-F node count */
256	mov	x0, #0x10
257	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_REQ */
2581:	/* set pstate to sfonly */
259	ldr	x2, [x0]
260	and	x2, x2, #0xfffffffffffffffc	/* & HNFPSTAT_MASK */
261	orr	x2, x2, x1
262	str	x2, [x0]
263	add	x0, x0, #0x10000	/* move to next node */
264	subs	w6, w6, #1
265	cbnz	w6, 1b
266
267	ret
268
269ENTRY(__asm_flush_l3_dcache)
270	/*
271	 * Return status in x0
272	 *    success 0
273	 *    tmeout 1 for setting SFONLY, 2 for FAM, 3 for both
274	 */
275	mov	x29, lr
276	mov	x8, #0
277
278	dsb	sy
279	mov	x0, #0x1		/* HNFPSTAT_SFONLY */
280	bl	hnf_set_pstate
281
282	mov	x0, #0x4		/* SFONLY status */
283	bl	hnf_pstate_poll
284	cbz	x0, 1f
285	mov	x8, #1			/* timeout */
2861:
287	dsb	sy
288	mov	x0, #0x3		/* HNFPSTAT_FAM */
289	bl	hnf_set_pstate
290
291	mov	x0, #0xc		/* FAM status */
292	bl	hnf_pstate_poll
293	cbz	x0, 1f
294	add	x8, x8, #0x2
2951:
296	mov	x0, x8
297	mov	lr, x29
298	ret
299ENDPROC(__asm_flush_l3_dcache)
300#endif
301
302#ifdef CONFIG_MP
303	/* Keep literals not used by the secondary boot code outside it */
304	.ltorg
305
306	/* Using 64 bit alignment since the spin table is accessed as data */
307	.align 4
308	.global secondary_boot_code
309	/* Secondary Boot Code starts here */
310secondary_boot_code:
311	.global __spin_table
312__spin_table:
313	.space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
314
315	.align 2
316ENTRY(secondary_boot_func)
317	/*
318	 * MPIDR_EL1 Fields:
319	 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
320	 * MPIDR[7:2] = AFF0_RES
321	 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
322	 * MPIDR[23:16] = AFF2_CLUSTERID
323	 * MPIDR[24] = MT
324	 * MPIDR[29:25] = RES0
325	 * MPIDR[30] = U
326	 * MPIDR[31] = ME
327	 * MPIDR[39:32] = AFF3
328	 *
329	 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
330	 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
331	 * until AFF2_CLUSTERID and AFF3 have non-zero values)
332	 *
333	 * LPID = MPIDR[15:8] | MPIDR[1:0]
334	 */
335	mrs	x0, mpidr_el1
336	ubfm	x1, x0, #8, #15
337	ubfm	x2, x0, #0, #1
338	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
339	ubfm    x9, x0, #0, #15         /* x9 contains MPIDR[15:0] */
340	/*
341	 * offset of the spin table element for this core from start of spin
342	 * table (each elem is padded to 64 bytes)
343	 */
344	lsl	x1, x10, #6
345	ldr	x0, =__spin_table
346	/* physical address of this cpus spin table element */
347	add	x11, x1, x0
348
349	ldr	x0, =__real_cntfrq
350	ldr	x0, [x0]
351	msr	cntfrq_el0, x0	/* set with real frequency */
352	str	x9, [x11, #16]	/* LPID */
353	mov	x4, #1
354	str	x4, [x11, #8]	/* STATUS */
355	dsb	sy
356#if defined(CONFIG_GICV3)
357	gic_wait_for_interrupt_m x0
358#elif defined(CONFIG_GICV2)
359        ldr     x0, =GICC_BASE
360        gic_wait_for_interrupt_m x0, w1
361#endif
362
363slave_cpu:
364	wfe
365	ldr	x0, [x11]
366	cbz	x0, slave_cpu
367#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
368	mrs     x1, sctlr_el2
369#else
370	mrs     x1, sctlr_el1
371#endif
372	tbz     x1, #25, cpu_is_le
373	rev     x0, x0                  /* BE to LE conversion */
374cpu_is_le:
375	ldr	x5, [x11, #24]
376	ldr	x6, =IH_ARCH_DEFAULT
377	cmp	x6, x5
378	b.eq	1f
379
380#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
381	adr	x3, secondary_switch_to_el1
382	ldr	x4, =ES_TO_AARCH64
383#else
384	ldr	x3, [x11]
385	ldr	x4, =ES_TO_AARCH32
386#endif
387	bl	secondary_switch_to_el2
388
3891:
390#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
391	adr	x3, secondary_switch_to_el1
392#else
393	ldr	x3, [x11]
394#endif
395	ldr	x4, =ES_TO_AARCH64
396	bl	secondary_switch_to_el2
397
398ENDPROC(secondary_boot_func)
399
400ENTRY(secondary_switch_to_el2)
401	switch_el x5, 1f, 0f, 0f
4020:	ret
4031:	armv8_switch_to_el2_m x3, x4, x5
404ENDPROC(secondary_switch_to_el2)
405
406ENTRY(secondary_switch_to_el1)
407	mrs	x0, mpidr_el1
408	ubfm	x1, x0, #8, #15
409	ubfm	x2, x0, #0, #1
410	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
411
412	lsl	x1, x10, #6
413	ldr	x0, =__spin_table
414	/* physical address of this cpus spin table element */
415	add	x11, x1, x0
416
417	ldr	x3, [x11]
418
419	ldr	x5, [x11, #24]
420	ldr	x6, =IH_ARCH_DEFAULT
421	cmp	x6, x5
422	b.eq	2f
423
424	ldr	x4, =ES_TO_AARCH32
425	bl	switch_to_el1
426
4272:	ldr	x4, =ES_TO_AARCH64
428
429switch_to_el1:
430	switch_el x5, 0f, 1f, 0f
4310:	ret
4321:	armv8_switch_to_el1_m x3, x4, x5
433ENDPROC(secondary_switch_to_el1)
434
435	/* Ensure that the literals used by the secondary boot code are
436	 * assembled within it (this is required so that we can protect
437	 * this area with a single memreserve region
438	 */
439	.ltorg
440
441	/* 64 bit alignment for elements accessed as data */
442	.align 4
443	.global __real_cntfrq
444__real_cntfrq:
445	.quad COUNTER_FREQUENCY
446	.globl __secondary_boot_code_size
447	.type __secondary_boot_code_size, %object
448	/* Secondary Boot Code ends here */
449__secondary_boot_code_size:
450	.quad .-secondary_boot_code
451#endif
452