xref: /openbmc/linux/arch/riscv/kernel/head.S (revision a13f2ef1)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6#include <asm/thread_info.h>
7#include <asm/asm-offsets.h>
8#include <asm/asm.h>
9#include <linux/init.h>
10#include <linux/linkage.h>
11#include <asm/thread_info.h>
12#include <asm/page.h>
13#include <asm/csr.h>
14#include <asm/hwcap.h>
15#include <asm/image.h>
16
17__HEAD
18ENTRY(_start)
19	/*
20	 * Image header expected by Linux boot-loaders. The image header data
21	 * structure is described in asm/image.h.
22	 * Do not modify it without modifying the structure and all bootloaders
23	 * that expects this header format!!
24	 */
25	/* jump to start kernel */
26	j _start_kernel
27	/* reserved */
28	.word 0
29	.balign 8
30#if __riscv_xlen == 64
31	/* Image load offset(2MB) from start of RAM */
32	.dword 0x200000
33#else
34	/* Image load offset(4MB) from start of RAM */
35	.dword 0x400000
36#endif
37	/* Effective size of kernel image */
38	.dword _end - _start
39	.dword __HEAD_FLAGS
40	.word RISCV_HEADER_VERSION
41	.word 0
42	.dword 0
43	.ascii RISCV_IMAGE_MAGIC
44	.balign 4
45	.ascii RISCV_IMAGE_MAGIC2
46	.word 0
47
48.align 2
49#ifdef CONFIG_MMU
50relocate:
51	/* Relocate return address */
52	li a1, PAGE_OFFSET
53	la a2, _start
54	sub a1, a1, a2
55	add ra, ra, a1
56
57	/* Point stvec to virtual address of intruction after satp write */
58	la a2, 1f
59	add a2, a2, a1
60	csrw CSR_TVEC, a2
61
62	/* Compute satp for kernel page tables, but don't load it yet */
63	srl a2, a0, PAGE_SHIFT
64	li a1, SATP_MODE
65	or a2, a2, a1
66
67	/*
68	 * Load trampoline page directory, which will cause us to trap to
69	 * stvec if VA != PA, or simply fall through if VA == PA.  We need a
70	 * full fence here because setup_vm() just wrote these PTEs and we need
71	 * to ensure the new translations are in use.
72	 */
73	la a0, trampoline_pg_dir
74	srl a0, a0, PAGE_SHIFT
75	or a0, a0, a1
76	sfence.vma
77	csrw CSR_SATP, a0
78.align 2
791:
80	/* Set trap vector to spin forever to help debug */
81	la a0, .Lsecondary_park
82	csrw CSR_TVEC, a0
83
84	/* Reload the global pointer */
85.option push
86.option norelax
87	la gp, __global_pointer$
88.option pop
89
90	/*
91	 * Switch to kernel page tables.  A full fence is necessary in order to
92	 * avoid using the trampoline translations, which are only correct for
93	 * the first superpage.  Fetching the fence is guarnteed to work
94	 * because that first superpage is translated the same way.
95	 */
96	csrw CSR_SATP, a2
97	sfence.vma
98
99	ret
100#endif /* CONFIG_MMU */
101#ifdef CONFIG_SMP
102	.global secondary_start_sbi
103secondary_start_sbi:
104	/* Mask all interrupts */
105	csrw CSR_IE, zero
106	csrw CSR_IP, zero
107
108	/* Load the global pointer */
109	.option push
110	.option norelax
111		la gp, __global_pointer$
112	.option pop
113
114	/*
115	 * Disable FPU to detect illegal usage of
116	 * floating point in kernel space
117	 */
118	li t0, SR_FS
119	csrc CSR_STATUS, t0
120
121	/* Set trap vector to spin forever to help debug */
122	la a3, .Lsecondary_park
123	csrw CSR_TVEC, a3
124
125	slli a3, a0, LGREG
126	la a4, __cpu_up_stack_pointer
127	la a5, __cpu_up_task_pointer
128	add a4, a3, a4
129	add a5, a3, a5
130	REG_L sp, (a4)
131	REG_L tp, (a5)
132
133	.global secondary_start_common
134secondary_start_common:
135
136#ifdef CONFIG_MMU
137	/* Enable virtual memory and relocate to virtual address */
138	la a0, swapper_pg_dir
139	call relocate
140#endif
141	tail smp_callin
142#endif /* CONFIG_SMP */
143
144.Lsecondary_park:
145	/* We lack SMP support or have too many harts, so park this hart */
146	wfi
147	j .Lsecondary_park
148
149END(_start)
150
151	__INIT
152ENTRY(_start_kernel)
153	/* Mask all interrupts */
154	csrw CSR_IE, zero
155	csrw CSR_IP, zero
156
157#ifdef CONFIG_RISCV_M_MODE
158	/* flush the instruction cache */
159	fence.i
160
161	/* Reset all registers except ra, a0, a1 */
162	call reset_regs
163
164	/*
165	 * Setup a PMP to permit access to all of memory.  Some machines may
166	 * not implement PMPs, so we set up a quick trap handler to just skip
167	 * touching the PMPs on any trap.
168	 */
169	la a0, pmp_done
170	csrw CSR_TVEC, a0
171
172	li a0, -1
173	csrw CSR_PMPADDR0, a0
174	li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
175	csrw CSR_PMPCFG0, a0
176.align 2
177pmp_done:
178
179	/*
180	 * The hartid in a0 is expected later on, and we have no firmware
181	 * to hand it to us.
182	 */
183	csrr a0, CSR_MHARTID
184#endif /* CONFIG_RISCV_M_MODE */
185
186	/* Load the global pointer */
187.option push
188.option norelax
189	la gp, __global_pointer$
190.option pop
191
192	/*
193	 * Disable FPU to detect illegal usage of
194	 * floating point in kernel space
195	 */
196	li t0, SR_FS
197	csrc CSR_STATUS, t0
198
199#ifdef CONFIG_SMP
200	li t0, CONFIG_NR_CPUS
201	blt a0, t0, .Lgood_cores
202	tail .Lsecondary_park
203.Lgood_cores:
204#endif
205
206	/* Pick one hart to run the main boot sequence */
207	la a3, hart_lottery
208	li a2, 1
209	amoadd.w a3, a2, (a3)
210	bnez a3, .Lsecondary_start
211
212	/* Clear BSS for flat non-ELF images */
213	la a3, __bss_start
214	la a4, __bss_stop
215	ble a4, a3, clear_bss_done
216clear_bss:
217	REG_S zero, (a3)
218	add a3, a3, RISCV_SZPTR
219	blt a3, a4, clear_bss
220clear_bss_done:
221
222	/* Save hart ID and DTB physical address */
223	mv s0, a0
224	mv s1, a1
225	la a2, boot_cpu_hartid
226	REG_S a0, (a2)
227
228	/* Initialize page tables and relocate to virtual addresses */
229	la sp, init_thread_union + THREAD_SIZE
230	mv a0, s1
231	call setup_vm
232#ifdef CONFIG_MMU
233	la a0, early_pg_dir
234	call relocate
235#endif /* CONFIG_MMU */
236
237	/* Restore C environment */
238	la tp, init_task
239	sw zero, TASK_TI_CPU(tp)
240	la sp, init_thread_union + THREAD_SIZE
241
242#ifdef CONFIG_KASAN
243	call kasan_early_init
244#endif
245	/* Start the kernel */
246	call soc_early_init
247	call parse_dtb
248	tail start_kernel
249
250.Lsecondary_start:
251#ifdef CONFIG_SMP
252	/* Set trap vector to spin forever to help debug */
253	la a3, .Lsecondary_park
254	csrw CSR_TVEC, a3
255
256	slli a3, a0, LGREG
257	la a1, __cpu_up_stack_pointer
258	la a2, __cpu_up_task_pointer
259	add a1, a3, a1
260	add a2, a3, a2
261
262	/*
263	 * This hart didn't win the lottery, so we wait for the winning hart to
264	 * get far enough along the boot process that it should continue.
265	 */
266.Lwait_for_cpu_up:
267	/* FIXME: We should WFI to save some energy here. */
268	REG_L sp, (a1)
269	REG_L tp, (a2)
270	beqz sp, .Lwait_for_cpu_up
271	beqz tp, .Lwait_for_cpu_up
272	fence
273
274	tail secondary_start_common
275#endif
276
277END(_start_kernel)
278
279#ifdef CONFIG_RISCV_M_MODE
280ENTRY(reset_regs)
281	li	sp, 0
282	li	gp, 0
283	li	tp, 0
284	li	t0, 0
285	li	t1, 0
286	li	t2, 0
287	li	s0, 0
288	li	s1, 0
289	li	a2, 0
290	li	a3, 0
291	li	a4, 0
292	li	a5, 0
293	li	a6, 0
294	li	a7, 0
295	li	s2, 0
296	li	s3, 0
297	li	s4, 0
298	li	s5, 0
299	li	s6, 0
300	li	s7, 0
301	li	s8, 0
302	li	s9, 0
303	li	s10, 0
304	li	s11, 0
305	li	t3, 0
306	li	t4, 0
307	li	t5, 0
308	li	t6, 0
309	csrw	CSR_SCRATCH, 0
310
311#ifdef CONFIG_FPU
312	csrr	t0, CSR_MISA
313	andi	t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
314	beqz	t0, .Lreset_regs_done
315
316	li	t1, SR_FS
317	csrs	CSR_STATUS, t1
318	fmv.s.x	f0, zero
319	fmv.s.x	f1, zero
320	fmv.s.x	f2, zero
321	fmv.s.x	f3, zero
322	fmv.s.x	f4, zero
323	fmv.s.x	f5, zero
324	fmv.s.x	f6, zero
325	fmv.s.x	f7, zero
326	fmv.s.x	f8, zero
327	fmv.s.x	f9, zero
328	fmv.s.x	f10, zero
329	fmv.s.x	f11, zero
330	fmv.s.x	f12, zero
331	fmv.s.x	f13, zero
332	fmv.s.x	f14, zero
333	fmv.s.x	f15, zero
334	fmv.s.x	f16, zero
335	fmv.s.x	f17, zero
336	fmv.s.x	f18, zero
337	fmv.s.x	f19, zero
338	fmv.s.x	f20, zero
339	fmv.s.x	f21, zero
340	fmv.s.x	f22, zero
341	fmv.s.x	f23, zero
342	fmv.s.x	f24, zero
343	fmv.s.x	f25, zero
344	fmv.s.x	f26, zero
345	fmv.s.x	f27, zero
346	fmv.s.x	f28, zero
347	fmv.s.x	f29, zero
348	fmv.s.x	f30, zero
349	fmv.s.x	f31, zero
350	csrw	fcsr, 0
351	/* note that the caller must clear SR_FS */
352#endif /* CONFIG_FPU */
353.Lreset_regs_done:
354	ret
355END(reset_regs)
356#endif /* CONFIG_RISCV_M_MODE */
357
358__PAGE_ALIGNED_BSS
359	/* Empty zero page */
360	.balign PAGE_SIZE
361