1/*
2 * (C) Copyright 2014-2015 Freescale Semiconductor
3 *
4 * SPDX-License-Identifier:	GPL-2.0+
5 *
6 * Extracted from armv8/start.S
7 */
8
9#include <config.h>
10#include <linux/linkage.h>
11#include <asm/gic.h>
12#include <asm/macro.h>
13#include <asm/arch-fsl-layerscape/soc.h>
14#ifdef CONFIG_MP
15#include <asm/arch/mp.h>
16#endif
17#ifdef CONFIG_FSL_LSCH3
18#include <asm/arch-fsl-layerscape/immap_lsch3.h>
19#endif
20#include <asm/u-boot.h>
21
22/* Get GIC offset
23* For LS1043a rev1.0, GIC base address align with 4k.
24* For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT]
25* is set, GIC base address align with 4K, or else align
26* with 64k.
27* output:
28*	x0: the base address of GICD
29*	x1: the base address of GICC
30*/
31ENTRY(get_gic_offset)
32	ldr     x0, =GICD_BASE
33#ifdef CONFIG_GICV2
34	ldr     x1, =GICC_BASE
35#endif
36#ifdef CONFIG_HAS_FEATURE_GIC64K_ALIGN
37	ldr     x2, =DCFG_CCSR_SVR
38	ldr	w2, [x2]
39	rev	w2, w2
40	mov	w3, w2
41	ands	w3, w3, #SVR_WO_E << 8
42	mov	w4, #SVR_LS1043A << 8
43	cmp	w3, w4
44	b.ne	1f
45	ands	w2, w2, #0xff
46	cmp	w2, #REV1_0
47	b.eq	1f
48	ldr	x2, =SCFG_GIC400_ALIGN
49	ldr	w2, [x2]
50	rev	w2, w2
51	tbnz	w2, #GIC_ADDR_BIT, 1f
52	ldr     x0, =GICD_BASE_64K
53#ifdef CONFIG_GICV2
54	ldr     x1, =GICC_BASE_64K
55#endif
561:
57#endif
58	ret
59ENDPROC(get_gic_offset)
60
61ENTRY(smp_kick_all_cpus)
62	/* Kick secondary cpus up by SGI 0 interrupt */
63#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
64	mov	x29, lr			/* Save LR */
65	bl	get_gic_offset
66	bl	gic_kick_secondary_cpus
67	mov	lr, x29			/* Restore LR */
68#endif
69	ret
70ENDPROC(smp_kick_all_cpus)
71
72
73ENTRY(lowlevel_init)
74	mov	x29, lr			/* Save LR */
75
76#ifdef CONFIG_FSL_LSCH3
77
78	/* Set Wuo bit for RN-I 20 */
79#ifdef CONFIG_ARCH_LS2080A
80	ldr	x0, =CCI_AUX_CONTROL_BASE(20)
81	ldr	x1, =0x00000010
82	bl	ccn504_set_aux
83
84	/*
85	 * Set forced-order mode in RNI-6, RNI-20
86	 * This is required for performance optimization on LS2088A
87	 * LS2080A family does not support setting forced-order mode,
88	 * so skip this operation for LS2080A family
89	 */
90	bl	get_svr
91	lsr	w0, w0, #16
92	ldr	w1, =SVR_DEV_LS2080A
93	cmp	w0, w1
94	b.eq	1f
95
96	ldr	x0, =CCI_AUX_CONTROL_BASE(6)
97	ldr	x1, =0x00000020
98	bl	ccn504_set_aux
99	ldr	x0, =CCI_AUX_CONTROL_BASE(20)
100	ldr	x1, =0x00000020
101	bl	ccn504_set_aux
1021:
103#endif
104
105	/* Add fully-coherent masters to DVM domain */
106	ldr	x0, =CCI_MN_BASE
107	ldr	x1, =CCI_MN_RNF_NODEID_LIST
108	ldr	x2, =CCI_MN_DVM_DOMAIN_CTL_SET
109	bl	ccn504_add_masters_to_dvm
110
111	/* Set all RN-I ports to QoS of 15 */
112	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(0)
113	ldr	x1, =0x00FF000C
114	bl	ccn504_set_qos
115	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(0)
116	ldr	x1, =0x00FF000C
117	bl	ccn504_set_qos
118	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(0)
119	ldr	x1, =0x00FF000C
120	bl	ccn504_set_qos
121
122	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(2)
123	ldr	x1, =0x00FF000C
124	bl	ccn504_set_qos
125	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(2)
126	ldr	x1, =0x00FF000C
127	bl	ccn504_set_qos
128	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(2)
129	ldr	x1, =0x00FF000C
130	bl	ccn504_set_qos
131
132	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(6)
133	ldr	x1, =0x00FF000C
134	bl	ccn504_set_qos
135	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(6)
136	ldr	x1, =0x00FF000C
137	bl	ccn504_set_qos
138	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(6)
139	ldr	x1, =0x00FF000C
140	bl	ccn504_set_qos
141
142	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(12)
143	ldr	x1, =0x00FF000C
144	bl	ccn504_set_qos
145	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(12)
146	ldr	x1, =0x00FF000C
147	bl	ccn504_set_qos
148	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(12)
149	ldr	x1, =0x00FF000C
150	bl	ccn504_set_qos
151
152	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(16)
153	ldr	x1, =0x00FF000C
154	bl	ccn504_set_qos
155	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(16)
156	ldr	x1, =0x00FF000C
157	bl	ccn504_set_qos
158	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(16)
159	ldr	x1, =0x00FF000C
160	bl	ccn504_set_qos
161
162	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(20)
163	ldr	x1, =0x00FF000C
164	bl	ccn504_set_qos
165	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(20)
166	ldr	x1, =0x00FF000C
167	bl	ccn504_set_qos
168	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(20)
169	ldr	x1, =0x00FF000C
170	bl	ccn504_set_qos
171#endif
172
173#ifdef SMMU_BASE
174	/* Set the SMMU page size in the sACR register */
175	ldr	x1, =SMMU_BASE
176	ldr	w0, [x1, #0x10]
177	orr	w0, w0, #1 << 16  /* set sACR.pagesize to indicate 64K page */
178	str	w0, [x1, #0x10]
179#endif
180
181	/* Initialize GIC Secure Bank Status */
182#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
183	branch_if_slave x0, 1f
184	bl	get_gic_offset
185	bl	gic_init_secure
1861:
187#ifdef CONFIG_GICV3
188	ldr	x0, =GICR_BASE
189	bl	gic_init_secure_percpu
190#elif defined(CONFIG_GICV2)
191	bl	get_gic_offset
192	bl	gic_init_secure_percpu
193#endif
194#endif
195
196	branch_if_master x0, x1, 2f
197
198#if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
199	ldr	x0, =secondary_boot_func
200	blr	x0
201#endif
202
2032:
204#ifdef CONFIG_FSL_TZPC_BP147
205	/* Set Non Secure access for all devices protected via TZPC */
206	ldr	x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
207	orr	w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
208	str	w0, [x1]
209
210	isb
211	dsb	sy
212#endif
213
214#ifdef CONFIG_FSL_TZASC_400
215	/*
216	 * LS2080 and its personalities does not support TZASC
217	 * So skip TZASC related operations
218	 */
219	bl	get_svr
220	lsr	w0, w0, #16
221	ldr	w1, =SVR_DEV_LS2080A
222	cmp	w0, w1
223	b.eq	1f
224
225	/* Set TZASC so that:
226	 * a. We use only Region0 whose global secure write/read is EN
227	 * b. We use only Region0 whose NSAID write/read is EN
228	 *
229	 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
230	 * 	 placeholders.
231	 */
232#ifdef CONFIG_FSL_TZASC_1
233	ldr	x1, =TZASC_GATE_KEEPER(0)
234	ldr	w0, [x1]		/* Filter 0 Gate Keeper Register */
235	orr	w0, w0, #1 << 0		/* Set open_request for Filter 0 */
236	str	w0, [x1]
237
238	ldr	x1, =TZASC_REGION_ATTRIBUTES_0(0)
239	ldr	w0, [x1]		/* Region-0 Attributes Register */
240	orr	w0, w0, #1 << 31	/* Set Sec global write en, Bit[31] */
241	orr	w0, w0, #1 << 30	/* Set Sec global read en, Bit[30] */
242	str	w0, [x1]
243
244	ldr	x1, =TZASC_REGION_ID_ACCESS_0(0)
245	ldr	w0, [x1]		/* Region-0 Access Register */
246	mov	w0, #0xFFFFFFFF		/* Set nsaid_wr_en and nsaid_rd_en */
247	str	w0, [x1]
248#endif
249#ifdef CONFIG_FSL_TZASC_2
250	ldr	x1, =TZASC_GATE_KEEPER(1)
251	ldr	w0, [x1]		/* Filter 0 Gate Keeper Register */
252	orr	w0, w0, #1 << 0		/* Set open_request for Filter 0 */
253	str	w0, [x1]
254
255	ldr	x1, =TZASC_REGION_ATTRIBUTES_0(1)
256	ldr	w0, [x1]		/* Region-1 Attributes Register */
257	orr	w0, w0, #1 << 31	/* Set Sec global write en, Bit[31] */
258	orr	w0, w0, #1 << 30	/* Set Sec global read en, Bit[30] */
259	str	w0, [x1]
260
261	ldr	x1, =TZASC_REGION_ID_ACCESS_0(1)
262	ldr	w0, [x1]		/* Region-1 Attributes Register */
263	mov	w0, #0xFFFFFFFF		/* Set nsaid_wr_en and nsaid_rd_en */
264	str	w0, [x1]
265#endif
266	isb
267	dsb	sy
268#endif
2691:
270#ifdef CONFIG_ARCH_LS1046A
271	/* Initialize the L2 RAM latency */
272	mrs   x1, S3_1_c11_c0_2
273	mov   x0, #0x1C7
274	/* Clear L2 Tag RAM latency and L2 Data RAM latency */
275	bic   x1, x1, x0
276	/* Set L2 data ram latency bits [2:0] */
277	orr   x1, x1, #0x2
278	/* set L2 tag ram latency bits [8:6] */
279	orr   x1,  x1, #0x80
280	msr   S3_1_c11_c0_2, x1
281	isb
282#endif
283
284#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
285	bl	fsl_ocram_init
286#endif
287
288	mov	lr, x29			/* Restore LR */
289	ret
290ENDPROC(lowlevel_init)
291
292#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
293ENTRY(fsl_ocram_init)
294	mov	x28, lr			/* Save LR */
295	bl	fsl_clear_ocram
296	bl	fsl_ocram_clear_ecc_err
297	mov	lr, x28			/* Restore LR */
298	ret
299ENDPROC(fsl_ocram_init)
300
301ENTRY(fsl_clear_ocram)
302/* Clear OCRAM */
303	ldr	x0, =CONFIG_SYS_FSL_OCRAM_BASE
304	ldr	x1, =(CONFIG_SYS_FSL_OCRAM_BASE + CONFIG_SYS_FSL_OCRAM_SIZE)
305	mov	x2, #0
306clear_loop:
307	str	x2, [x0]
308	add	x0, x0, #8
309	cmp	x0, x1
310	b.lo	clear_loop
311	ret
312ENDPROC(fsl_clear_ocram)
313
314ENTRY(fsl_ocram_clear_ecc_err)
315	/* OCRAM1/2 ECC status bit */
316	mov	w1, #0x60
317	ldr	x0, =DCSR_DCFG_SBEESR2
318	str	w1, [x0]
319	ldr	x0, =DCSR_DCFG_MBEESR2
320	str	w1, [x0]
321	ret
322ENDPROC(fsl_ocram_init)
323#endif
324
325#ifdef CONFIG_FSL_LSCH3
326	.globl get_svr
327get_svr:
328	ldr	x1, =FSL_LSCH3_SVR
329	ldr	w0, [x1]
330	ret
331
332hnf_pstate_poll:
333	/* x0 has the desired status, return 0 for success, 1 for timeout
334	 * clobber x1, x2, x3, x4, x6, x7
335	 */
336	mov	x1, x0
337	mov	x7, #0			/* flag for timeout */
338	mrs	x3, cntpct_el0		/* read timer */
339	add	x3, x3, #1200		/* timeout after 100 microseconds */
340	mov	x0, #0x18
341	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_STATUS */
342	mov	w6, #8			/* HN-F node count */
3431:
344	ldr	x2, [x0]
345	cmp	x2, x1			/* check status */
346	b.eq	2f
347	mrs	x4, cntpct_el0
348	cmp	x4, x3
349	b.ls	1b
350	mov	x7, #1			/* timeout */
351	b	3f
3522:
353	add	x0, x0, #0x10000	/* move to next node */
354	subs	w6, w6, #1
355	cbnz	w6, 1b
3563:
357	mov	x0, x7
358	ret
359
360hnf_set_pstate:
361	/* x0 has the desired state, clobber x1, x2, x6 */
362	mov	x1, x0
363	/* power state to SFONLY */
364	mov	w6, #8			/* HN-F node count */
365	mov	x0, #0x10
366	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_REQ */
3671:	/* set pstate to sfonly */
368	ldr	x2, [x0]
369	and	x2, x2, #0xfffffffffffffffc	/* & HNFPSTAT_MASK */
370	orr	x2, x2, x1
371	str	x2, [x0]
372	add	x0, x0, #0x10000	/* move to next node */
373	subs	w6, w6, #1
374	cbnz	w6, 1b
375
376	ret
377
378ENTRY(__asm_flush_l3_dcache)
379	/*
380	 * Return status in x0
381	 *    success 0
382	 *    tmeout 1 for setting SFONLY, 2 for FAM, 3 for both
383	 */
384	mov	x29, lr
385	mov	x8, #0
386
387	dsb	sy
388	mov	x0, #0x1		/* HNFPSTAT_SFONLY */
389	bl	hnf_set_pstate
390
391	mov	x0, #0x4		/* SFONLY status */
392	bl	hnf_pstate_poll
393	cbz	x0, 1f
394	mov	x8, #1			/* timeout */
3951:
396	dsb	sy
397	mov	x0, #0x3		/* HNFPSTAT_FAM */
398	bl	hnf_set_pstate
399
400	mov	x0, #0xc		/* FAM status */
401	bl	hnf_pstate_poll
402	cbz	x0, 1f
403	add	x8, x8, #0x2
4041:
405	mov	x0, x8
406	mov	lr, x29
407	ret
408ENDPROC(__asm_flush_l3_dcache)
409#endif
410
411#ifdef CONFIG_MP
412	/* Keep literals not used by the secondary boot code outside it */
413	.ltorg
414
415	/* Using 64 bit alignment since the spin table is accessed as data */
416	.align 4
417	.global secondary_boot_code
418	/* Secondary Boot Code starts here */
419secondary_boot_code:
420	.global __spin_table
421__spin_table:
422	.space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
423
424	.align 2
425ENTRY(secondary_boot_func)
426	/*
427	 * MPIDR_EL1 Fields:
428	 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
429	 * MPIDR[7:2] = AFF0_RES
430	 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
431	 * MPIDR[23:16] = AFF2_CLUSTERID
432	 * MPIDR[24] = MT
433	 * MPIDR[29:25] = RES0
434	 * MPIDR[30] = U
435	 * MPIDR[31] = ME
436	 * MPIDR[39:32] = AFF3
437	 *
438	 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
439	 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
440	 * until AFF2_CLUSTERID and AFF3 have non-zero values)
441	 *
442	 * LPID = MPIDR[15:8] | MPIDR[1:0]
443	 */
444	mrs	x0, mpidr_el1
445	ubfm	x1, x0, #8, #15
446	ubfm	x2, x0, #0, #1
447	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
448	ubfm    x9, x0, #0, #15         /* x9 contains MPIDR[15:0] */
449	/*
450	 * offset of the spin table element for this core from start of spin
451	 * table (each elem is padded to 64 bytes)
452	 */
453	lsl	x1, x10, #6
454	ldr	x0, =__spin_table
455	/* physical address of this cpus spin table element */
456	add	x11, x1, x0
457
458	ldr	x0, =__real_cntfrq
459	ldr	x0, [x0]
460	msr	cntfrq_el0, x0	/* set with real frequency */
461	str	x9, [x11, #16]	/* LPID */
462	mov	x4, #1
463	str	x4, [x11, #8]	/* STATUS */
464	dsb	sy
465#if defined(CONFIG_GICV3)
466	gic_wait_for_interrupt_m x0
467#elif defined(CONFIG_GICV2)
468	bl	get_gic_offset
469	mov	x0, x1
470        gic_wait_for_interrupt_m x0, w1
471#endif
472
473slave_cpu:
474	wfe
475	ldr	x0, [x11]
476	cbz	x0, slave_cpu
477#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
478	mrs     x1, sctlr_el2
479#else
480	mrs     x1, sctlr_el1
481#endif
482	tbz     x1, #25, cpu_is_le
483	rev     x0, x0                  /* BE to LE conversion */
484cpu_is_le:
485	ldr	x5, [x11, #24]
486	ldr	x6, =IH_ARCH_DEFAULT
487	cmp	x6, x5
488	b.eq	1f
489
490#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
491	adr	x4, secondary_switch_to_el1
492	ldr	x5, =ES_TO_AARCH64
493#else
494	ldr	x4, [x11]
495	ldr	x5, =ES_TO_AARCH32
496#endif
497	bl	secondary_switch_to_el2
498
4991:
500#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
501	adr	x4, secondary_switch_to_el1
502#else
503	ldr	x4, [x11]
504#endif
505	ldr	x5, =ES_TO_AARCH64
506	bl	secondary_switch_to_el2
507
508ENDPROC(secondary_boot_func)
509
510ENTRY(secondary_switch_to_el2)
511	switch_el x6, 1f, 0f, 0f
5120:	ret
5131:	armv8_switch_to_el2_m x4, x5, x6
514ENDPROC(secondary_switch_to_el2)
515
516ENTRY(secondary_switch_to_el1)
517	mrs	x0, mpidr_el1
518	ubfm	x1, x0, #8, #15
519	ubfm	x2, x0, #0, #1
520	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
521
522	lsl	x1, x10, #6
523	ldr	x0, =__spin_table
524	/* physical address of this cpus spin table element */
525	add	x11, x1, x0
526
527	ldr	x4, [x11]
528
529	ldr	x5, [x11, #24]
530	ldr	x6, =IH_ARCH_DEFAULT
531	cmp	x6, x5
532	b.eq	2f
533
534	ldr	x5, =ES_TO_AARCH32
535	bl	switch_to_el1
536
5372:	ldr	x5, =ES_TO_AARCH64
538
539switch_to_el1:
540	switch_el x6, 0f, 1f, 0f
5410:	ret
5421:	armv8_switch_to_el1_m x4, x5, x6
543ENDPROC(secondary_switch_to_el1)
544
545	/* Ensure that the literals used by the secondary boot code are
546	 * assembled within it (this is required so that we can protect
547	 * this area with a single memreserve region
548	 */
549	.ltorg
550
551	/* 64 bit alignment for elements accessed as data */
552	.align 4
553	.global __real_cntfrq
554__real_cntfrq:
555	.quad COUNTER_FREQUENCY
556	.globl __secondary_boot_code_size
557	.type __secondary_boot_code_size, %object
558	/* Secondary Boot Code ends here */
559__secondary_boot_code_size:
560	.quad .-secondary_boot_code
561#endif
562