1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * (C) Copyright 2014-2015 Freescale Semiconductor
4 *
5 * Extracted from armv8/start.S
6 */
7
8#include <config.h>
9#include <linux/linkage.h>
10#include <asm/gic.h>
11#include <asm/macro.h>
12#include <asm/arch-fsl-layerscape/soc.h>
13#ifdef CONFIG_MP
14#include <asm/arch/mp.h>
15#endif
16#ifdef CONFIG_FSL_LSCH3
17#include <asm/arch-fsl-layerscape/immap_lsch3.h>
18#endif
19#include <asm/u-boot.h>
20
21/* Get GIC offset
22* For LS1043a rev1.0, GIC base address align with 4k.
23* For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT]
24* is set, GIC base address align with 4K, or else align
25* with 64k.
26* output:
27*	x0: the base address of GICD
28*	x1: the base address of GICC
29*/
30ENTRY(get_gic_offset)
31	ldr     x0, =GICD_BASE
32#ifdef CONFIG_GICV2
33	ldr     x1, =GICC_BASE
34#endif
35#ifdef CONFIG_HAS_FEATURE_GIC64K_ALIGN
36	ldr     x2, =DCFG_CCSR_SVR
37	ldr	w2, [x2]
38	rev	w2, w2
39	lsr	w3, w2, #16
40	ldr	w4, =SVR_DEV(SVR_LS1043A)
41	cmp	w3, w4
42	b.ne	1f
43	ands	w2, w2, #0xff
44	cmp	w2, #REV1_0
45	b.eq	1f
46	ldr	x2, =SCFG_GIC400_ALIGN
47	ldr	w2, [x2]
48	rev	w2, w2
49	tbnz	w2, #GIC_ADDR_BIT, 1f
50	ldr     x0, =GICD_BASE_64K
51#ifdef CONFIG_GICV2
52	ldr     x1, =GICC_BASE_64K
53#endif
541:
55#endif
56	ret
57ENDPROC(get_gic_offset)
58
59ENTRY(smp_kick_all_cpus)
60	/* Kick secondary cpus up by SGI 0 interrupt */
61#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
62	mov	x29, lr			/* Save LR */
63	bl	get_gic_offset
64	bl	gic_kick_secondary_cpus
65	mov	lr, x29			/* Restore LR */
66#endif
67	ret
68ENDPROC(smp_kick_all_cpus)
69
70
71ENTRY(lowlevel_init)
72	mov	x29, lr			/* Save LR */
73
74	switch_el x1, 1f, 100f, 100f	/* skip if not in EL3 */
751:
76
77#if defined (CONFIG_SYS_FSL_HAS_CCN504)
78
79	/* Set Wuo bit for RN-I 20 */
80#ifdef CONFIG_ARCH_LS2080A
81	ldr	x0, =CCI_AUX_CONTROL_BASE(20)
82	ldr	x1, =0x00000010
83	bl	ccn504_set_aux
84
85	/*
86	 * Set forced-order mode in RNI-6, RNI-20
87	 * This is required for performance optimization on LS2088A
88	 * LS2080A family does not support setting forced-order mode,
89	 * so skip this operation for LS2080A family
90	 */
91	bl	get_svr
92	lsr	w0, w0, #16
93	ldr	w1, =SVR_DEV(SVR_LS2080A)
94	cmp	w0, w1
95	b.eq	1f
96
97	ldr	x0, =CCI_AUX_CONTROL_BASE(6)
98	ldr	x1, =0x00000020
99	bl	ccn504_set_aux
100	ldr	x0, =CCI_AUX_CONTROL_BASE(20)
101	ldr	x1, =0x00000020
102	bl	ccn504_set_aux
1031:
104#endif
105
106	/* Add fully-coherent masters to DVM domain */
107	ldr	x0, =CCI_MN_BASE
108	ldr	x1, =CCI_MN_RNF_NODEID_LIST
109	ldr	x2, =CCI_MN_DVM_DOMAIN_CTL_SET
110	bl	ccn504_add_masters_to_dvm
111
112	/* Set all RN-I ports to QoS of 15 */
113	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(0)
114	ldr	x1, =0x00FF000C
115	bl	ccn504_set_qos
116	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(0)
117	ldr	x1, =0x00FF000C
118	bl	ccn504_set_qos
119	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(0)
120	ldr	x1, =0x00FF000C
121	bl	ccn504_set_qos
122
123	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(2)
124	ldr	x1, =0x00FF000C
125	bl	ccn504_set_qos
126	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(2)
127	ldr	x1, =0x00FF000C
128	bl	ccn504_set_qos
129	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(2)
130	ldr	x1, =0x00FF000C
131	bl	ccn504_set_qos
132
133	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(6)
134	ldr	x1, =0x00FF000C
135	bl	ccn504_set_qos
136	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(6)
137	ldr	x1, =0x00FF000C
138	bl	ccn504_set_qos
139	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(6)
140	ldr	x1, =0x00FF000C
141	bl	ccn504_set_qos
142
143	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(12)
144	ldr	x1, =0x00FF000C
145	bl	ccn504_set_qos
146	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(12)
147	ldr	x1, =0x00FF000C
148	bl	ccn504_set_qos
149	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(12)
150	ldr	x1, =0x00FF000C
151	bl	ccn504_set_qos
152
153	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(16)
154	ldr	x1, =0x00FF000C
155	bl	ccn504_set_qos
156	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(16)
157	ldr	x1, =0x00FF000C
158	bl	ccn504_set_qos
159	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(16)
160	ldr	x1, =0x00FF000C
161	bl	ccn504_set_qos
162
163	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(20)
164	ldr	x1, =0x00FF000C
165	bl	ccn504_set_qos
166	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(20)
167	ldr	x1, =0x00FF000C
168	bl	ccn504_set_qos
169	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(20)
170	ldr	x1, =0x00FF000C
171	bl	ccn504_set_qos
172#endif /* CONFIG_SYS_FSL_HAS_CCN504 */
173
174#ifdef SMMU_BASE
175	/* Set the SMMU page size in the sACR register */
176	ldr	x1, =SMMU_BASE
177	ldr	w0, [x1, #0x10]
178	orr	w0, w0, #1 << 16  /* set sACR.pagesize to indicate 64K page */
179	str	w0, [x1, #0x10]
180#endif
181
182	/* Initialize GIC Secure Bank Status */
183#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
184	branch_if_slave x0, 1f
185	bl	get_gic_offset
186	bl	gic_init_secure
1871:
188#ifdef CONFIG_GICV3
189	ldr	x0, =GICR_BASE
190	bl	gic_init_secure_percpu
191#elif defined(CONFIG_GICV2)
192	bl	get_gic_offset
193	bl	gic_init_secure_percpu
194#endif
195#endif
196
197100:
198	branch_if_master x0, x1, 2f
199
200#if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
201	ldr	x0, =secondary_boot_func
202	blr	x0
203#endif
204
2052:
206	switch_el x1, 1f, 100f, 100f	/* skip if not in EL3 */
2071:
208#ifdef CONFIG_FSL_TZPC_BP147
209	/* Set Non Secure access for all devices protected via TZPC */
210	ldr	x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
211	orr	w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
212	str	w0, [x1]
213
214	isb
215	dsb	sy
216#endif
217
218#ifdef CONFIG_FSL_TZASC_400
219	/*
220	 * LS2080 and its personalities does not support TZASC
221	 * So skip TZASC related operations
222	 */
223	bl	get_svr
224	lsr	w0, w0, #16
225	ldr	w1, =SVR_DEV(SVR_LS2080A)
226	cmp	w0, w1
227	b.eq	1f
228
229	/* Set TZASC so that:
230	 * a. We use only Region0 whose global secure write/read is EN
231	 * b. We use only Region0 whose NSAID write/read is EN
232	 *
233	 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
234	 * 	 placeholders.
235	 */
236
237.macro tzasc_prog, xreg
238
239	mov     x12, TZASC1_BASE
240	mov     x16, #0x10000
241	mul     x14, \xreg, x16
242	add     x14, x14,x12
243	mov 	x1, #0x8
244	add     x1, x1, x14
245
246	ldr     w0, [x1]		/* Filter 0 Gate Keeper Register */
247	orr     w0, w0, #1 << 0		/* Set open_request for Filter 0 */
248	str     w0, [x1]
249
250	mov	x1, #0x110
251	add     x1, x1, x14
252
253	ldr     w0, [x1]		/* Region-0 Attributes Register */
254	orr     w0, w0, #1 << 31	/* Set Sec global write en, Bit[31] */
255	orr     w0, w0, #1 << 30	/* Set Sec global read en, Bit[30] */
256	str     w0, [x1]
257
258	mov	x1, #0x114
259	add     x1, x1, x14
260
261	ldr     w0, [x1]		/* Region-0 Access Register */
262	mov     w0, #0xFFFFFFFF		/* Set nsaid_wr_en and nsaid_rd_en */
263	str     w0, [x1]
264.endm
265
266#ifdef CONFIG_FSL_TZASC_1
267	mov     x13, #0
268	tzasc_prog	x13
269
270#endif
271#ifdef CONFIG_FSL_TZASC_2
272	mov     x13, #1
273	tzasc_prog	x13
274
275#endif
276	isb
277	dsb	sy
278#endif
279100:
2801:
281#ifdef CONFIG_ARCH_LS1046A
282	switch_el x1, 1f, 100f, 100f	/* skip if not in EL3 */
2831:
284	/* Initialize the L2 RAM latency */
285	mrs   x1, S3_1_c11_c0_2
286	mov   x0, #0x1C7
287	/* Clear L2 Tag RAM latency and L2 Data RAM latency */
288	bic   x1, x1, x0
289	/* Set L2 data ram latency bits [2:0] */
290	orr   x1, x1, #0x2
291	/* set L2 tag ram latency bits [8:6] */
292	orr   x1,  x1, #0x80
293	msr   S3_1_c11_c0_2, x1
294	isb
295100:
296#endif
297
298#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
299	bl	fsl_ocram_init
300#endif
301
302	mov	lr, x29			/* Restore LR */
303	ret
304ENDPROC(lowlevel_init)
305
306#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
307ENTRY(fsl_ocram_init)
308	mov	x28, lr			/* Save LR */
309	bl	fsl_clear_ocram
310	bl	fsl_ocram_clear_ecc_err
311	mov	lr, x28			/* Restore LR */
312	ret
313ENDPROC(fsl_ocram_init)
314
315ENTRY(fsl_clear_ocram)
316/* Clear OCRAM */
317	ldr	x0, =CONFIG_SYS_FSL_OCRAM_BASE
318	ldr	x1, =(CONFIG_SYS_FSL_OCRAM_BASE + CONFIG_SYS_FSL_OCRAM_SIZE)
319	mov	x2, #0
320clear_loop:
321	str	x2, [x0]
322	add	x0, x0, #8
323	cmp	x0, x1
324	b.lo	clear_loop
325	ret
326ENDPROC(fsl_clear_ocram)
327
328ENTRY(fsl_ocram_clear_ecc_err)
329	/* OCRAM1/2 ECC status bit */
330	mov	w1, #0x60
331	ldr	x0, =DCSR_DCFG_SBEESR2
332	str	w1, [x0]
333	ldr	x0, =DCSR_DCFG_MBEESR2
334	str	w1, [x0]
335	ret
336ENDPROC(fsl_ocram_init)
337#endif
338
339#ifdef CONFIG_FSL_LSCH3
340	.globl get_svr
341get_svr:
342	ldr	x1, =FSL_LSCH3_SVR
343	ldr	w0, [x1]
344	ret
345#endif
346
347#ifdef CONFIG_SYS_FSL_HAS_CCN504
348hnf_pstate_poll:
349	/* x0 has the desired status, return 0 for success, 1 for timeout
350	 * clobber x1, x2, x3, x4, x6, x7
351	 */
352	mov	x1, x0
353	mov	x7, #0			/* flag for timeout */
354	mrs	x3, cntpct_el0		/* read timer */
355	add	x3, x3, #1200		/* timeout after 100 microseconds */
356	mov	x0, #0x18
357	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_STATUS */
358	mov	w6, #8			/* HN-F node count */
3591:
360	ldr	x2, [x0]
361	cmp	x2, x1			/* check status */
362	b.eq	2f
363	mrs	x4, cntpct_el0
364	cmp	x4, x3
365	b.ls	1b
366	mov	x7, #1			/* timeout */
367	b	3f
3682:
369	add	x0, x0, #0x10000	/* move to next node */
370	subs	w6, w6, #1
371	cbnz	w6, 1b
3723:
373	mov	x0, x7
374	ret
375
376hnf_set_pstate:
377	/* x0 has the desired state, clobber x1, x2, x6 */
378	mov	x1, x0
379	/* power state to SFONLY */
380	mov	w6, #8			/* HN-F node count */
381	mov	x0, #0x10
382	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_REQ */
3831:	/* set pstate to sfonly */
384	ldr	x2, [x0]
385	and	x2, x2, #0xfffffffffffffffc	/* & HNFPSTAT_MASK */
386	orr	x2, x2, x1
387	str	x2, [x0]
388	add	x0, x0, #0x10000	/* move to next node */
389	subs	w6, w6, #1
390	cbnz	w6, 1b
391
392	ret
393
394ENTRY(__asm_flush_l3_dcache)
395	/*
396	 * Return status in x0
397	 *    success 0
398	 *    timeout 1 for setting SFONLY, 2 for FAM, 3 for both
399	 */
400	mov	x29, lr
401	mov	x8, #0
402
403	dsb	sy
404	mov	x0, #0x1		/* HNFPSTAT_SFONLY */
405	bl	hnf_set_pstate
406
407	mov	x0, #0x4		/* SFONLY status */
408	bl	hnf_pstate_poll
409	cbz	x0, 1f
410	mov	x8, #1			/* timeout */
4111:
412	dsb	sy
413	mov	x0, #0x3		/* HNFPSTAT_FAM */
414	bl	hnf_set_pstate
415
416	mov	x0, #0xc		/* FAM status */
417	bl	hnf_pstate_poll
418	cbz	x0, 1f
419	add	x8, x8, #0x2
4201:
421	mov	x0, x8
422	mov	lr, x29
423	ret
424ENDPROC(__asm_flush_l3_dcache)
425#endif /* CONFIG_SYS_FSL_HAS_CCN504 */
426
427#ifdef CONFIG_MP
428	/* Keep literals not used by the secondary boot code outside it */
429	.ltorg
430
431	/* Using 64 bit alignment since the spin table is accessed as data */
432	.align 4
433	.global secondary_boot_code
434	/* Secondary Boot Code starts here */
435secondary_boot_code:
436	.global __spin_table
437__spin_table:
438	.space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
439
440	.align 2
441ENTRY(secondary_boot_func)
442	/*
443	 * MPIDR_EL1 Fields:
444	 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
445	 * MPIDR[7:2] = AFF0_RES
446	 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
447	 * MPIDR[23:16] = AFF2_CLUSTERID
448	 * MPIDR[24] = MT
449	 * MPIDR[29:25] = RES0
450	 * MPIDR[30] = U
451	 * MPIDR[31] = ME
452	 * MPIDR[39:32] = AFF3
453	 *
454	 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
455	 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
456	 * until AFF2_CLUSTERID and AFF3 have non-zero values)
457	 *
458	 * LPID = MPIDR[15:8] | MPIDR[1:0]
459	 */
460	mrs	x0, mpidr_el1
461	ubfm	x1, x0, #8, #15
462	ubfm	x2, x0, #0, #1
463	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
464	ubfm    x9, x0, #0, #15         /* x9 contains MPIDR[15:0] */
465	/*
466	 * offset of the spin table element for this core from start of spin
467	 * table (each elem is padded to 64 bytes)
468	 */
469	lsl	x1, x10, #6
470	ldr	x0, =__spin_table
471	/* physical address of this cpus spin table element */
472	add	x11, x1, x0
473
474	ldr	x0, =__real_cntfrq
475	ldr	x0, [x0]
476	msr	cntfrq_el0, x0	/* set with real frequency */
477	str	x9, [x11, #16]	/* LPID */
478	mov	x4, #1
479	str	x4, [x11, #8]	/* STATUS */
480	dsb	sy
481#if defined(CONFIG_GICV3)
482	gic_wait_for_interrupt_m x0
483#elif defined(CONFIG_GICV2)
484	bl	get_gic_offset
485	mov	x0, x1
486        gic_wait_for_interrupt_m x0, w1
487#endif
488
489slave_cpu:
490	wfe
491	ldr	x0, [x11]
492	cbz	x0, slave_cpu
493#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
494	mrs     x1, sctlr_el2
495#else
496	mrs     x1, sctlr_el1
497#endif
498	tbz     x1, #25, cpu_is_le
499	rev     x0, x0                  /* BE to LE conversion */
500cpu_is_le:
501	ldr	x5, [x11, #24]
502	cbz	x5, 1f
503
504#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
505	adr	x4, secondary_switch_to_el1
506	ldr	x5, =ES_TO_AARCH64
507#else
508	ldr	x4, [x11]
509	ldr	x5, =ES_TO_AARCH32
510#endif
511	bl	secondary_switch_to_el2
512
5131:
514#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
515	adr	x4, secondary_switch_to_el1
516#else
517	ldr	x4, [x11]
518#endif
519	ldr	x5, =ES_TO_AARCH64
520	bl	secondary_switch_to_el2
521
522ENDPROC(secondary_boot_func)
523
524ENTRY(secondary_switch_to_el2)
525	switch_el x6, 1f, 0f, 0f
5260:	ret
5271:	armv8_switch_to_el2_m x4, x5, x6
528ENDPROC(secondary_switch_to_el2)
529
530ENTRY(secondary_switch_to_el1)
531	mrs	x0, mpidr_el1
532	ubfm	x1, x0, #8, #15
533	ubfm	x2, x0, #0, #1
534	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
535
536	lsl	x1, x10, #6
537	ldr	x0, =__spin_table
538	/* physical address of this cpus spin table element */
539	add	x11, x1, x0
540
541	ldr	x4, [x11]
542
543	ldr	x5, [x11, #24]
544	cbz	x5, 2f
545
546	ldr	x5, =ES_TO_AARCH32
547	bl	switch_to_el1
548
5492:	ldr	x5, =ES_TO_AARCH64
550
551switch_to_el1:
552	switch_el x6, 0f, 1f, 0f
5530:	ret
5541:	armv8_switch_to_el1_m x4, x5, x6
555ENDPROC(secondary_switch_to_el1)
556
557	/* Ensure that the literals used by the secondary boot code are
558	 * assembled within it (this is required so that we can protect
559	 * this area with a single memreserve region
560	 */
561	.ltorg
562
563	/* 64 bit alignment for elements accessed as data */
564	.align 4
565	.global __real_cntfrq
566__real_cntfrq:
567	.quad COUNTER_FREQUENCY
568	.globl __secondary_boot_code_size
569	.type __secondary_boot_code_size, %object
570	/* Secondary Boot Code ends here */
571__secondary_boot_code_size:
572	.quad .-secondary_boot_code
573#endif
574