1/*
2 * (C) Copyright 2014-2015 Freescale Semiconductor
3 *
4 * SPDX-License-Identifier:	GPL-2.0+
5 *
6 * Extracted from armv8/start.S
7 */
8
9#include <config.h>
10#include <linux/linkage.h>
11#include <asm/gic.h>
12#include <asm/macro.h>
13#ifdef CONFIG_MP
14#include <asm/arch/mp.h>
15#endif
16#ifdef CONFIG_FSL_LSCH3
17#include <asm/arch-fsl-layerscape/immap_lsch3.h>
18#include <asm/arch-fsl-layerscape/soc.h>
19#endif
20#include <asm/u-boot.h>
21
22ENTRY(lowlevel_init)
23	mov	x29, lr			/* Save LR */
24
25#ifdef CONFIG_FSL_LSCH3
26
27	/* Set Wuo bit for RN-I 20 */
28#ifdef CONFIG_LS2080A
29	ldr	x0, =CCI_AUX_CONTROL_BASE(20)
30	ldr	x1, =0x00000010
31	bl	ccn504_set_aux
32
33	/*
34	 * Set forced-order mode in RNI-6, RNI-20
35	 * This is required for performance optimization on LS2088A
36	 * LS2080A family does not support setting forced-order mode,
37	 * so skip this operation for LS2080A family
38	 */
39	bl	get_svr
40	lsr	w0, w0, #16
41	ldr	w1, =SVR_DEV_LS2080A
42	cmp	w0, w1
43	b.eq	1f
44
45	ldr	x0, =CCI_AUX_CONTROL_BASE(6)
46	ldr	x1, =0x00000020
47	bl	ccn504_set_aux
48	ldr	x0, =CCI_AUX_CONTROL_BASE(20)
49	ldr	x1, =0x00000020
50	bl	ccn504_set_aux
511:
52#endif
53
54	/* Add fully-coherent masters to DVM domain */
55	ldr	x0, =CCI_MN_BASE
56	ldr	x1, =CCI_MN_RNF_NODEID_LIST
57	ldr	x2, =CCI_MN_DVM_DOMAIN_CTL_SET
58	bl	ccn504_add_masters_to_dvm
59
60	/* Set all RN-I ports to QoS of 15 */
61	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(0)
62	ldr	x1, =0x00FF000C
63	bl	ccn504_set_qos
64	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(0)
65	ldr	x1, =0x00FF000C
66	bl	ccn504_set_qos
67	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(0)
68	ldr	x1, =0x00FF000C
69	bl	ccn504_set_qos
70
71	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(2)
72	ldr	x1, =0x00FF000C
73	bl	ccn504_set_qos
74	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(2)
75	ldr	x1, =0x00FF000C
76	bl	ccn504_set_qos
77	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(2)
78	ldr	x1, =0x00FF000C
79	bl	ccn504_set_qos
80
81	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(6)
82	ldr	x1, =0x00FF000C
83	bl	ccn504_set_qos
84	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(6)
85	ldr	x1, =0x00FF000C
86	bl	ccn504_set_qos
87	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(6)
88	ldr	x1, =0x00FF000C
89	bl	ccn504_set_qos
90
91	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(12)
92	ldr	x1, =0x00FF000C
93	bl	ccn504_set_qos
94	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(12)
95	ldr	x1, =0x00FF000C
96	bl	ccn504_set_qos
97	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(12)
98	ldr	x1, =0x00FF000C
99	bl	ccn504_set_qos
100
101	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(16)
102	ldr	x1, =0x00FF000C
103	bl	ccn504_set_qos
104	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(16)
105	ldr	x1, =0x00FF000C
106	bl	ccn504_set_qos
107	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(16)
108	ldr	x1, =0x00FF000C
109	bl	ccn504_set_qos
110
111	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(20)
112	ldr	x1, =0x00FF000C
113	bl	ccn504_set_qos
114	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(20)
115	ldr	x1, =0x00FF000C
116	bl	ccn504_set_qos
117	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(20)
118	ldr	x1, =0x00FF000C
119	bl	ccn504_set_qos
120#endif
121
122#ifdef SMMU_BASE
123	/* Set the SMMU page size in the sACR register */
124	ldr	x1, =SMMU_BASE
125	ldr	w0, [x1, #0x10]
126	orr	w0, w0, #1 << 16  /* set sACR.pagesize to indicate 64K page */
127	str	w0, [x1, #0x10]
128#endif
129
130	/* Initialize GIC Secure Bank Status */
131#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
132	branch_if_slave x0, 1f
133	ldr	x0, =GICD_BASE
134	bl	gic_init_secure
1351:
136#ifdef CONFIG_GICV3
137	ldr	x0, =GICR_BASE
138	bl	gic_init_secure_percpu
139#elif defined(CONFIG_GICV2)
140	ldr	x0, =GICD_BASE
141	ldr	x1, =GICC_BASE
142	bl	gic_init_secure_percpu
143#endif
144#endif
145
146	branch_if_master x0, x1, 2f
147
148#if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
149	ldr	x0, =secondary_boot_func
150	blr	x0
151#endif
152
1532:
154#ifdef CONFIG_FSL_TZPC_BP147
155	/* Set Non Secure access for all devices protected via TZPC */
156	ldr	x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
157	orr	w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
158	str	w0, [x1]
159
160	isb
161	dsb	sy
162#endif
163
164#ifdef CONFIG_FSL_TZASC_400
165	/*
166	 * LS2080 and its personalities does not support TZASC
167	 * So skip TZASC related operations
168	 */
169	bl	get_svr
170	lsr	w0, w0, #16
171	ldr	w1, =SVR_DEV_LS2080A
172	cmp	w0, w1
173	b.eq	1f
174
175	/* Set TZASC so that:
176	 * a. We use only Region0 whose global secure write/read is EN
177	 * b. We use only Region0 whose NSAID write/read is EN
178	 *
179	 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
180	 * 	 placeholders.
181	 */
182	ldr	x1, =TZASC_GATE_KEEPER(0)
183	ldr	w0, [x1]		/* Filter 0 Gate Keeper Register */
184	orr	w0, w0, #1 << 0		/* Set open_request for Filter 0 */
185	str	w0, [x1]
186
187	ldr	x1, =TZASC_GATE_KEEPER(1)
188	ldr	w0, [x1]		/* Filter 0 Gate Keeper Register */
189	orr	w0, w0, #1 << 0		/* Set open_request for Filter 0 */
190	str	w0, [x1]
191
192	ldr	x1, =TZASC_REGION_ATTRIBUTES_0(0)
193	ldr	w0, [x1]		/* Region-0 Attributes Register */
194	orr	w0, w0, #1 << 31	/* Set Sec global write en, Bit[31] */
195	orr	w0, w0, #1 << 30	/* Set Sec global read en, Bit[30] */
196	str	w0, [x1]
197
198	ldr	x1, =TZASC_REGION_ATTRIBUTES_0(1)
199	ldr	w0, [x1]		/* Region-1 Attributes Register */
200	orr	w0, w0, #1 << 31	/* Set Sec global write en, Bit[31] */
201	orr	w0, w0, #1 << 30	/* Set Sec global read en, Bit[30] */
202	str	w0, [x1]
203
204	ldr	x1, =TZASC_REGION_ID_ACCESS_0(0)
205	ldr	w0, [x1]		/* Region-0 Access Register */
206	mov	w0, #0xFFFFFFFF		/* Set nsaid_wr_en and nsaid_rd_en */
207	str	w0, [x1]
208
209	ldr	x1, =TZASC_REGION_ID_ACCESS_0(1)
210	ldr	w0, [x1]		/* Region-1 Attributes Register */
211	mov	w0, #0xFFFFFFFF		/* Set nsaid_wr_en and nsaid_rd_en */
212	str	w0, [x1]
213
214	isb
215	dsb	sy
216#endif
2171:
218#ifdef CONFIG_ARCH_LS1046A
219	/* Initialize the L2 RAM latency */
220	mrs   x1, S3_1_c11_c0_2
221	mov   x0, #0x1C7
222	/* Clear L2 Tag RAM latency and L2 Data RAM latency */
223	bic   x1, x1, x0
224	/* Set L2 data ram latency bits [2:0] */
225	orr   x1, x1, #0x2
226	/* set L2 tag ram latency bits [8:6] */
227	orr   x1,  x1, #0x80
228	msr   S3_1_c11_c0_2, x1
229	isb
230#endif
231
232#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
233	bl	fsl_ocram_init
234#endif
235
236	mov	lr, x29			/* Restore LR */
237	ret
238ENDPROC(lowlevel_init)
239
240#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
241ENTRY(fsl_ocram_init)
242	mov	x28, lr			/* Save LR */
243	bl	fsl_clear_ocram
244	bl	fsl_ocram_clear_ecc_err
245	mov	lr, x28			/* Restore LR */
246	ret
247ENDPROC(fsl_ocram_init)
248
249ENTRY(fsl_clear_ocram)
250/* Clear OCRAM */
251	ldr	x0, =CONFIG_SYS_FSL_OCRAM_BASE
252	ldr	x1, =(CONFIG_SYS_FSL_OCRAM_BASE + CONFIG_SYS_FSL_OCRAM_SIZE)
253	mov	x2, #0
254clear_loop:
255	str	x2, [x0]
256	add	x0, x0, #8
257	cmp	x0, x1
258	b.lo	clear_loop
259	ret
260ENDPROC(fsl_clear_ocram)
261
262ENTRY(fsl_ocram_clear_ecc_err)
263	/* OCRAM1/2 ECC status bit */
264	mov	w1, #0x60
265	ldr	x0, =DCSR_DCFG_SBEESR2
266	str	w1, [x0]
267	ldr	x0, =DCSR_DCFG_MBEESR2
268	str	w1, [x0]
269	ret
270ENDPROC(fsl_ocram_init)
271#endif
272
273#ifdef CONFIG_FSL_LSCH3
274	.globl get_svr
275get_svr:
276	ldr	x1, =FSL_LSCH3_SVR
277	ldr	w0, [x1]
278	ret
279
280hnf_pstate_poll:
281	/* x0 has the desired status, return 0 for success, 1 for timeout
282	 * clobber x1, x2, x3, x4, x6, x7
283	 */
284	mov	x1, x0
285	mov	x7, #0			/* flag for timeout */
286	mrs	x3, cntpct_el0		/* read timer */
287	add	x3, x3, #1200		/* timeout after 100 microseconds */
288	mov	x0, #0x18
289	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_STATUS */
290	mov	w6, #8			/* HN-F node count */
2911:
292	ldr	x2, [x0]
293	cmp	x2, x1			/* check status */
294	b.eq	2f
295	mrs	x4, cntpct_el0
296	cmp	x4, x3
297	b.ls	1b
298	mov	x7, #1			/* timeout */
299	b	3f
3002:
301	add	x0, x0, #0x10000	/* move to next node */
302	subs	w6, w6, #1
303	cbnz	w6, 1b
3043:
305	mov	x0, x7
306	ret
307
308hnf_set_pstate:
309	/* x0 has the desired state, clobber x1, x2, x6 */
310	mov	x1, x0
311	/* power state to SFONLY */
312	mov	w6, #8			/* HN-F node count */
313	mov	x0, #0x10
314	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_REQ */
3151:	/* set pstate to sfonly */
316	ldr	x2, [x0]
317	and	x2, x2, #0xfffffffffffffffc	/* & HNFPSTAT_MASK */
318	orr	x2, x2, x1
319	str	x2, [x0]
320	add	x0, x0, #0x10000	/* move to next node */
321	subs	w6, w6, #1
322	cbnz	w6, 1b
323
324	ret
325
326ENTRY(__asm_flush_l3_dcache)
327	/*
328	 * Return status in x0
329	 *    success 0
330	 *    tmeout 1 for setting SFONLY, 2 for FAM, 3 for both
331	 */
332	mov	x29, lr
333	mov	x8, #0
334
335	dsb	sy
336	mov	x0, #0x1		/* HNFPSTAT_SFONLY */
337	bl	hnf_set_pstate
338
339	mov	x0, #0x4		/* SFONLY status */
340	bl	hnf_pstate_poll
341	cbz	x0, 1f
342	mov	x8, #1			/* timeout */
3431:
344	dsb	sy
345	mov	x0, #0x3		/* HNFPSTAT_FAM */
346	bl	hnf_set_pstate
347
348	mov	x0, #0xc		/* FAM status */
349	bl	hnf_pstate_poll
350	cbz	x0, 1f
351	add	x8, x8, #0x2
3521:
353	mov	x0, x8
354	mov	lr, x29
355	ret
356ENDPROC(__asm_flush_l3_dcache)
357#endif
358
359#ifdef CONFIG_MP
360	/* Keep literals not used by the secondary boot code outside it */
361	.ltorg
362
363	/* Using 64 bit alignment since the spin table is accessed as data */
364	.align 4
365	.global secondary_boot_code
366	/* Secondary Boot Code starts here */
367secondary_boot_code:
368	.global __spin_table
369__spin_table:
370	.space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
371
372	.align 2
373ENTRY(secondary_boot_func)
374	/*
375	 * MPIDR_EL1 Fields:
376	 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
377	 * MPIDR[7:2] = AFF0_RES
378	 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
379	 * MPIDR[23:16] = AFF2_CLUSTERID
380	 * MPIDR[24] = MT
381	 * MPIDR[29:25] = RES0
382	 * MPIDR[30] = U
383	 * MPIDR[31] = ME
384	 * MPIDR[39:32] = AFF3
385	 *
386	 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
387	 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
388	 * until AFF2_CLUSTERID and AFF3 have non-zero values)
389	 *
390	 * LPID = MPIDR[15:8] | MPIDR[1:0]
391	 */
392	mrs	x0, mpidr_el1
393	ubfm	x1, x0, #8, #15
394	ubfm	x2, x0, #0, #1
395	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
396	ubfm    x9, x0, #0, #15         /* x9 contains MPIDR[15:0] */
397	/*
398	 * offset of the spin table element for this core from start of spin
399	 * table (each elem is padded to 64 bytes)
400	 */
401	lsl	x1, x10, #6
402	ldr	x0, =__spin_table
403	/* physical address of this cpus spin table element */
404	add	x11, x1, x0
405
406	ldr	x0, =__real_cntfrq
407	ldr	x0, [x0]
408	msr	cntfrq_el0, x0	/* set with real frequency */
409	str	x9, [x11, #16]	/* LPID */
410	mov	x4, #1
411	str	x4, [x11, #8]	/* STATUS */
412	dsb	sy
413#if defined(CONFIG_GICV3)
414	gic_wait_for_interrupt_m x0
415#elif defined(CONFIG_GICV2)
416        ldr     x0, =GICC_BASE
417        gic_wait_for_interrupt_m x0, w1
418#endif
419
420slave_cpu:
421	wfe
422	ldr	x0, [x11]
423	cbz	x0, slave_cpu
424#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
425	mrs     x1, sctlr_el2
426#else
427	mrs     x1, sctlr_el1
428#endif
429	tbz     x1, #25, cpu_is_le
430	rev     x0, x0                  /* BE to LE conversion */
431cpu_is_le:
432	ldr	x5, [x11, #24]
433	ldr	x6, =IH_ARCH_DEFAULT
434	cmp	x6, x5
435	b.eq	1f
436
437#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
438	adr	x3, secondary_switch_to_el1
439	ldr	x4, =ES_TO_AARCH64
440#else
441	ldr	x3, [x11]
442	ldr	x4, =ES_TO_AARCH32
443#endif
444	bl	secondary_switch_to_el2
445
4461:
447#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
448	adr	x3, secondary_switch_to_el1
449#else
450	ldr	x3, [x11]
451#endif
452	ldr	x4, =ES_TO_AARCH64
453	bl	secondary_switch_to_el2
454
455ENDPROC(secondary_boot_func)
456
457ENTRY(secondary_switch_to_el2)
458	switch_el x5, 1f, 0f, 0f
4590:	ret
4601:	armv8_switch_to_el2_m x3, x4, x5
461ENDPROC(secondary_switch_to_el2)
462
463ENTRY(secondary_switch_to_el1)
464	mrs	x0, mpidr_el1
465	ubfm	x1, x0, #8, #15
466	ubfm	x2, x0, #0, #1
467	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
468
469	lsl	x1, x10, #6
470	ldr	x0, =__spin_table
471	/* physical address of this cpus spin table element */
472	add	x11, x1, x0
473
474	ldr	x3, [x11]
475
476	ldr	x5, [x11, #24]
477	ldr	x6, =IH_ARCH_DEFAULT
478	cmp	x6, x5
479	b.eq	2f
480
481	ldr	x4, =ES_TO_AARCH32
482	bl	switch_to_el1
483
4842:	ldr	x4, =ES_TO_AARCH64
485
486switch_to_el1:
487	switch_el x5, 0f, 1f, 0f
4880:	ret
4891:	armv8_switch_to_el1_m x3, x4, x5
490ENDPROC(secondary_switch_to_el1)
491
492	/* Ensure that the literals used by the secondary boot code are
493	 * assembled within it (this is required so that we can protect
494	 * this area with a single memreserve region
495	 */
496	.ltorg
497
498	/* 64 bit alignment for elements accessed as data */
499	.align 4
500	.global __real_cntfrq
501__real_cntfrq:
502	.quad COUNTER_FREQUENCY
503	.globl __secondary_boot_code_size
504	.type __secondary_boot_code_size, %object
505	/* Secondary Boot Code ends here */
506__secondary_boot_code_size:
507	.quad .-secondary_boot_code
508#endif
509