1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * (C) Copyright 2014-2015 Freescale Semiconductor
4 *
5 * Extracted from armv8/start.S
6 */
7
8#include <config.h>
9#include <linux/linkage.h>
10#include <asm/gic.h>
11#include <asm/macro.h>
12#include <asm/arch-fsl-layerscape/soc.h>
13#ifdef CONFIG_MP
14#include <asm/arch/mp.h>
15#endif
16#ifdef CONFIG_FSL_LSCH3
17#include <asm/arch-fsl-layerscape/immap_lsch3.h>
18#endif
19#include <asm/u-boot.h>
20
21/* Get GIC offset
22* For LS1043a rev1.0, GIC base address align with 4k.
23* For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT]
24* is set, GIC base address align with 4K, or else align
25* with 64k.
26* output:
27*	x0: the base address of GICD
28*	x1: the base address of GICC
29*/
30ENTRY(get_gic_offset)
31	ldr     x0, =GICD_BASE
32#ifdef CONFIG_GICV2
33	ldr     x1, =GICC_BASE
34#endif
35#ifdef CONFIG_HAS_FEATURE_GIC64K_ALIGN
36	ldr     x2, =DCFG_CCSR_SVR
37	ldr	w2, [x2]
38	rev	w2, w2
39	lsr	w3, w2, #16
40	ldr	w4, =SVR_DEV(SVR_LS1043A)
41	cmp	w3, w4
42	b.ne	1f
43	ands	w2, w2, #0xff
44	cmp	w2, #REV1_0
45	b.eq	1f
46	ldr	x2, =SCFG_GIC400_ALIGN
47	ldr	w2, [x2]
48	rev	w2, w2
49	tbnz	w2, #GIC_ADDR_BIT, 1f
50	ldr     x0, =GICD_BASE_64K
51#ifdef CONFIG_GICV2
52	ldr     x1, =GICC_BASE_64K
53#endif
541:
55#endif
56	ret
57ENDPROC(get_gic_offset)
58
59ENTRY(smp_kick_all_cpus)
60	/* Kick secondary cpus up by SGI 0 interrupt */
61#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
62	mov	x29, lr			/* Save LR */
63	bl	get_gic_offset
64	bl	gic_kick_secondary_cpus
65	mov	lr, x29			/* Restore LR */
66#endif
67	ret
68ENDPROC(smp_kick_all_cpus)
69
70
71ENTRY(lowlevel_init)
72	mov	x29, lr			/* Save LR */
73
74	/* unmask SError and abort */
75	msr daifclr, #4
76
77	/* Set HCR_EL2[AMO] so SError @EL2 is taken */
78	mrs	x0, hcr_el2
79	orr	x0, x0, #0x20			/* AMO */
80	msr	hcr_el2, x0
81	isb
82
83	switch_el x1, 1f, 100f, 100f	/* skip if not in EL3 */
841:
85
86#if defined (CONFIG_SYS_FSL_HAS_CCN504)
87
88	/* Set Wuo bit for RN-I 20 */
89#ifdef CONFIG_ARCH_LS2080A
90	ldr	x0, =CCI_AUX_CONTROL_BASE(20)
91	ldr	x1, =0x00000010
92	bl	ccn504_set_aux
93
94	/*
95	 * Set forced-order mode in RNI-6, RNI-20
96	 * This is required for performance optimization on LS2088A
97	 * LS2080A family does not support setting forced-order mode,
98	 * so skip this operation for LS2080A family
99	 */
100	bl	get_svr
101	lsr	w0, w0, #16
102	ldr	w1, =SVR_DEV(SVR_LS2080A)
103	cmp	w0, w1
104	b.eq	1f
105
106	ldr	x0, =CCI_AUX_CONTROL_BASE(6)
107	ldr	x1, =0x00000020
108	bl	ccn504_set_aux
109	ldr	x0, =CCI_AUX_CONTROL_BASE(20)
110	ldr	x1, =0x00000020
111	bl	ccn504_set_aux
1121:
113#endif
114
115	/* Add fully-coherent masters to DVM domain */
116	ldr	x0, =CCI_MN_BASE
117	ldr	x1, =CCI_MN_RNF_NODEID_LIST
118	ldr	x2, =CCI_MN_DVM_DOMAIN_CTL_SET
119	bl	ccn504_add_masters_to_dvm
120
121	/* Set all RN-I ports to QoS of 15 */
122	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(0)
123	ldr	x1, =0x00FF000C
124	bl	ccn504_set_qos
125	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(0)
126	ldr	x1, =0x00FF000C
127	bl	ccn504_set_qos
128	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(0)
129	ldr	x1, =0x00FF000C
130	bl	ccn504_set_qos
131
132	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(2)
133	ldr	x1, =0x00FF000C
134	bl	ccn504_set_qos
135	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(2)
136	ldr	x1, =0x00FF000C
137	bl	ccn504_set_qos
138	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(2)
139	ldr	x1, =0x00FF000C
140	bl	ccn504_set_qos
141
142	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(6)
143	ldr	x1, =0x00FF000C
144	bl	ccn504_set_qos
145	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(6)
146	ldr	x1, =0x00FF000C
147	bl	ccn504_set_qos
148	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(6)
149	ldr	x1, =0x00FF000C
150	bl	ccn504_set_qos
151
152	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(12)
153	ldr	x1, =0x00FF000C
154	bl	ccn504_set_qos
155	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(12)
156	ldr	x1, =0x00FF000C
157	bl	ccn504_set_qos
158	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(12)
159	ldr	x1, =0x00FF000C
160	bl	ccn504_set_qos
161
162	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(16)
163	ldr	x1, =0x00FF000C
164	bl	ccn504_set_qos
165	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(16)
166	ldr	x1, =0x00FF000C
167	bl	ccn504_set_qos
168	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(16)
169	ldr	x1, =0x00FF000C
170	bl	ccn504_set_qos
171
172	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(20)
173	ldr	x1, =0x00FF000C
174	bl	ccn504_set_qos
175	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(20)
176	ldr	x1, =0x00FF000C
177	bl	ccn504_set_qos
178	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(20)
179	ldr	x1, =0x00FF000C
180	bl	ccn504_set_qos
181#endif /* CONFIG_SYS_FSL_HAS_CCN504 */
182
183#ifdef SMMU_BASE
184	/* Set the SMMU page size in the sACR register */
185	ldr	x1, =SMMU_BASE
186	ldr	w0, [x1, #0x10]
187	orr	w0, w0, #1 << 16  /* set sACR.pagesize to indicate 64K page */
188	str	w0, [x1, #0x10]
189#endif
190
191	/* Initialize GIC Secure Bank Status */
192#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
193	branch_if_slave x0, 1f
194	bl	get_gic_offset
195	bl	gic_init_secure
1961:
197#ifdef CONFIG_GICV3
198	ldr	x0, =GICR_BASE
199	bl	gic_init_secure_percpu
200#elif defined(CONFIG_GICV2)
201	bl	get_gic_offset
202	bl	gic_init_secure_percpu
203#endif
204#endif
205
206100:
207	branch_if_master x0, x1, 2f
208
209#if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
210	ldr	x0, =secondary_boot_func
211	blr	x0
212#endif
213
2142:
215	switch_el x1, 1f, 100f, 100f	/* skip if not in EL3 */
2161:
217#ifdef CONFIG_FSL_TZPC_BP147
218	/* Set Non Secure access for all devices protected via TZPC */
219	ldr	x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
220	orr	w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
221	str	w0, [x1]
222
223	isb
224	dsb	sy
225#endif
226
227#ifdef CONFIG_FSL_TZASC_400
228	/*
229	 * LS2080 and its personalities does not support TZASC
230	 * So skip TZASC related operations
231	 */
232	bl	get_svr
233	lsr	w0, w0, #16
234	ldr	w1, =SVR_DEV(SVR_LS2080A)
235	cmp	w0, w1
236	b.eq	1f
237
238	/* Set TZASC so that:
239	 * a. We use only Region0 whose global secure write/read is EN
240	 * b. We use only Region0 whose NSAID write/read is EN
241	 *
242	 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
243	 * 	 placeholders.
244	 */
245
246.macro tzasc_prog, xreg
247
248	mov     x12, TZASC1_BASE
249	mov     x16, #0x10000
250	mul     x14, \xreg, x16
251	add     x14, x14,x12
252	mov 	x1, #0x8
253	add     x1, x1, x14
254
255	ldr     w0, [x1]		/* Filter 0 Gate Keeper Register */
256	orr     w0, w0, #1 << 0		/* Set open_request for Filter 0 */
257	str     w0, [x1]
258
259	mov	x1, #0x110
260	add     x1, x1, x14
261
262	ldr     w0, [x1]		/* Region-0 Attributes Register */
263	orr     w0, w0, #1 << 31	/* Set Sec global write en, Bit[31] */
264	orr     w0, w0, #1 << 30	/* Set Sec global read en, Bit[30] */
265	str     w0, [x1]
266
267	mov	x1, #0x114
268	add     x1, x1, x14
269
270	ldr     w0, [x1]		/* Region-0 Access Register */
271	mov     w0, #0xFFFFFFFF		/* Set nsaid_wr_en and nsaid_rd_en */
272	str     w0, [x1]
273.endm
274
275#ifdef CONFIG_FSL_TZASC_1
276	mov     x13, #0
277	tzasc_prog	x13
278
279#endif
280#ifdef CONFIG_FSL_TZASC_2
281	mov     x13, #1
282	tzasc_prog	x13
283
284#endif
285	isb
286	dsb	sy
287#endif
288100:
2891:
290#ifdef CONFIG_ARCH_LS1046A
291	switch_el x1, 1f, 100f, 100f	/* skip if not in EL3 */
2921:
293	/* Initialize the L2 RAM latency */
294	mrs   x1, S3_1_c11_c0_2
295	mov   x0, #0x1C7
296	/* Clear L2 Tag RAM latency and L2 Data RAM latency */
297	bic   x1, x1, x0
298	/* Set L2 data ram latency bits [2:0] */
299	orr   x1, x1, #0x2
300	/* set L2 tag ram latency bits [8:6] */
301	orr   x1,  x1, #0x80
302	msr   S3_1_c11_c0_2, x1
303	isb
304100:
305#endif
306
307#if !defined(CONFIG_TFABOOT) && \
308	(defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD))
309	bl	fsl_ocram_init
310#endif
311
312	mov	lr, x29			/* Restore LR */
313	ret
314ENDPROC(lowlevel_init)
315
316#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
317ENTRY(fsl_ocram_init)
318	mov	x28, lr			/* Save LR */
319	bl	fsl_clear_ocram
320	bl	fsl_ocram_clear_ecc_err
321	mov	lr, x28			/* Restore LR */
322	ret
323ENDPROC(fsl_ocram_init)
324
325ENTRY(fsl_clear_ocram)
326/* Clear OCRAM */
327	ldr	x0, =CONFIG_SYS_FSL_OCRAM_BASE
328	ldr	x1, =(CONFIG_SYS_FSL_OCRAM_BASE + CONFIG_SYS_FSL_OCRAM_SIZE)
329	mov	x2, #0
330clear_loop:
331	str	x2, [x0]
332	add	x0, x0, #8
333	cmp	x0, x1
334	b.lo	clear_loop
335	ret
336ENDPROC(fsl_clear_ocram)
337
338ENTRY(fsl_ocram_clear_ecc_err)
339	/* OCRAM1/2 ECC status bit */
340	mov	w1, #0x60
341	ldr	x0, =DCSR_DCFG_SBEESR2
342	str	w1, [x0]
343	ldr	x0, =DCSR_DCFG_MBEESR2
344	str	w1, [x0]
345	ret
346ENDPROC(fsl_ocram_init)
347#endif
348
349#ifdef CONFIG_FSL_LSCH3
350	.globl get_svr
351get_svr:
352	ldr	x1, =FSL_LSCH3_SVR
353	ldr	w0, [x1]
354	ret
355#endif
356
357#if defined(CONFIG_SYS_FSL_HAS_CCN504) || defined(CONFIG_SYS_FSL_HAS_CCN508)
358hnf_pstate_poll:
359	/* x0 has the desired status, return 0 for success, 1 for timeout
360	 * clobber x1, x2, x3, x4, x6, x7
361	 */
362	mov	x1, x0
363	mov	x7, #0			/* flag for timeout */
364	mrs	x3, cntpct_el0		/* read timer */
365	add	x3, x3, #1200		/* timeout after 100 microseconds */
366	mov	x0, #0x18
367	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_STATUS */
368	mov	w6, #8			/* HN-F node count */
3691:
370	ldr	x2, [x0]
371	cmp	x2, x1			/* check status */
372	b.eq	2f
373	mrs	x4, cntpct_el0
374	cmp	x4, x3
375	b.ls	1b
376	mov	x7, #1			/* timeout */
377	b	3f
3782:
379	add	x0, x0, #0x10000	/* move to next node */
380	subs	w6, w6, #1
381	cbnz	w6, 1b
3823:
383	mov	x0, x7
384	ret
385
386hnf_set_pstate:
387	/* x0 has the desired state, clobber x1, x2, x6 */
388	mov	x1, x0
389	/* power state to SFONLY */
390	mov	w6, #8			/* HN-F node count */
391	mov	x0, #0x10
392	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_REQ */
3931:	/* set pstate to sfonly */
394	ldr	x2, [x0]
395	and	x2, x2, #0xfffffffffffffffc	/* & HNFPSTAT_MASK */
396	orr	x2, x2, x1
397	str	x2, [x0]
398	add	x0, x0, #0x10000	/* move to next node */
399	subs	w6, w6, #1
400	cbnz	w6, 1b
401
402	ret
403
404ENTRY(__asm_flush_l3_dcache)
405	/*
406	 * Return status in x0
407	 *    success 0
408	 *    timeout 1 for setting SFONLY, 2 for FAM, 3 for both
409	 */
410	mov	x29, lr
411	mov	x8, #0
412
413	dsb	sy
414	mov	x0, #0x1		/* HNFPSTAT_SFONLY */
415	bl	hnf_set_pstate
416
417	mov	x0, #0x4		/* SFONLY status */
418	bl	hnf_pstate_poll
419	cbz	x0, 1f
420	mov	x8, #1			/* timeout */
4211:
422	dsb	sy
423	mov	x0, #0x3		/* HNFPSTAT_FAM */
424	bl	hnf_set_pstate
425
426	mov	x0, #0xc		/* FAM status */
427	bl	hnf_pstate_poll
428	cbz	x0, 1f
429	add	x8, x8, #0x2
4301:
431	mov	x0, x8
432	mov	lr, x29
433	ret
434ENDPROC(__asm_flush_l3_dcache)
435#endif /* CONFIG_SYS_FSL_HAS_CCN504 */
436
437#ifdef CONFIG_MP
438	/* Keep literals not used by the secondary boot code outside it */
439	.ltorg
440
441	/* Using 64 bit alignment since the spin table is accessed as data */
442	.align 4
443	.global secondary_boot_code
444	/* Secondary Boot Code starts here */
445secondary_boot_code:
446	.global __spin_table
447__spin_table:
448	.space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
449
450	.align 2
451ENTRY(secondary_boot_func)
452	/*
453	 * MPIDR_EL1 Fields:
454	 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
455	 * MPIDR[7:2] = AFF0_RES
456	 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
457	 * MPIDR[23:16] = AFF2_CLUSTERID
458	 * MPIDR[24] = MT
459	 * MPIDR[29:25] = RES0
460	 * MPIDR[30] = U
461	 * MPIDR[31] = ME
462	 * MPIDR[39:32] = AFF3
463	 *
464	 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
465	 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
466	 * until AFF2_CLUSTERID and AFF3 have non-zero values)
467	 *
468	 * LPID = MPIDR[15:8] | MPIDR[1:0]
469	 */
470	mrs	x0, mpidr_el1
471	ubfm	x1, x0, #8, #15
472	ubfm	x2, x0, #0, #1
473	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
474	ubfm    x9, x0, #0, #15         /* x9 contains MPIDR[15:0] */
475	/*
476	 * offset of the spin table element for this core from start of spin
477	 * table (each elem is padded to 64 bytes)
478	 */
479	lsl	x1, x10, #6
480	ldr	x0, =__spin_table
481	/* physical address of this cpus spin table element */
482	add	x11, x1, x0
483
484	ldr	x0, =__real_cntfrq
485	ldr	x0, [x0]
486	msr	cntfrq_el0, x0	/* set with real frequency */
487	str	x9, [x11, #16]	/* LPID */
488	mov	x4, #1
489	str	x4, [x11, #8]	/* STATUS */
490	dsb	sy
491#if defined(CONFIG_GICV3)
492	gic_wait_for_interrupt_m x0
493#elif defined(CONFIG_GICV2)
494	bl	get_gic_offset
495	mov	x0, x1
496        gic_wait_for_interrupt_m x0, w1
497#endif
498
499slave_cpu:
500	wfe
501	ldr	x0, [x11]
502	cbz	x0, slave_cpu
503#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
504	mrs     x1, sctlr_el2
505#else
506	mrs     x1, sctlr_el1
507#endif
508	tbz     x1, #25, cpu_is_le
509	rev     x0, x0                  /* BE to LE conversion */
510cpu_is_le:
511	ldr	x5, [x11, #24]
512	cbz	x5, 1f
513
514#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
515	adr	x4, secondary_switch_to_el1
516	ldr	x5, =ES_TO_AARCH64
517#else
518	ldr	x4, [x11]
519	ldr	x5, =ES_TO_AARCH32
520#endif
521	bl	secondary_switch_to_el2
522
5231:
524#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
525	adr	x4, secondary_switch_to_el1
526#else
527	ldr	x4, [x11]
528#endif
529	ldr	x5, =ES_TO_AARCH64
530	bl	secondary_switch_to_el2
531
532ENDPROC(secondary_boot_func)
533
534ENTRY(secondary_switch_to_el2)
535	switch_el x6, 1f, 0f, 0f
5360:	ret
5371:	armv8_switch_to_el2_m x4, x5, x6
538ENDPROC(secondary_switch_to_el2)
539
540ENTRY(secondary_switch_to_el1)
541	mrs	x0, mpidr_el1
542	ubfm	x1, x0, #8, #15
543	ubfm	x2, x0, #0, #1
544	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
545
546	lsl	x1, x10, #6
547	ldr	x0, =__spin_table
548	/* physical address of this cpus spin table element */
549	add	x11, x1, x0
550
551	ldr	x4, [x11]
552
553	ldr	x5, [x11, #24]
554	cbz	x5, 2f
555
556	ldr	x5, =ES_TO_AARCH32
557	bl	switch_to_el1
558
5592:	ldr	x5, =ES_TO_AARCH64
560
561switch_to_el1:
562	switch_el x6, 0f, 1f, 0f
5630:	ret
5641:	armv8_switch_to_el1_m x4, x5, x6
565ENDPROC(secondary_switch_to_el1)
566
567	/* Ensure that the literals used by the secondary boot code are
568	 * assembled within it (this is required so that we can protect
569	 * this area with a single memreserve region
570	 */
571	.ltorg
572
573	/* 64 bit alignment for elements accessed as data */
574	.align 4
575	.global __real_cntfrq
576__real_cntfrq:
577	.quad COUNTER_FREQUENCY
578	.globl __secondary_boot_code_size
579	.type __secondary_boot_code_size, %object
580	/* Secondary Boot Code ends here */
581__secondary_boot_code_size:
582	.quad .-secondary_boot_code
583#endif
584