xref: /openbmc/linux/arch/riscv/kernel/head.S (revision 0cabf991)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6#include <asm/thread_info.h>
7#include <asm/asm-offsets.h>
8#include <asm/asm.h>
9#include <linux/init.h>
10#include <linux/linkage.h>
11#include <asm/thread_info.h>
12#include <asm/page.h>
13#include <asm/csr.h>
14#include <asm/hwcap.h>
15#include <asm/image.h>
16
17__HEAD
18ENTRY(_start)
19	/*
20	 * Image header expected by Linux boot-loaders. The image header data
21	 * structure is described in asm/image.h.
22	 * Do not modify it without modifying the structure and all bootloaders
23	 * that expects this header format!!
24	 */
25	/* jump to start kernel */
26	j _start_kernel
27	/* reserved */
28	.word 0
29	.balign 8
30#if __riscv_xlen == 64
31	/* Image load offset(2MB) from start of RAM */
32	.dword 0x200000
33#else
34	/* Image load offset(4MB) from start of RAM */
35	.dword 0x400000
36#endif
37	/* Effective size of kernel image */
38	.dword _end - _start
39	.dword __HEAD_FLAGS
40	.word RISCV_HEADER_VERSION
41	.word 0
42	.dword 0
43	.ascii RISCV_IMAGE_MAGIC
44	.balign 4
45	.ascii RISCV_IMAGE_MAGIC2
46	.word 0
47
48.align 2
49#ifdef CONFIG_MMU
50relocate:
51	/* Relocate return address */
52	li a1, PAGE_OFFSET
53	la a2, _start
54	sub a1, a1, a2
55	add ra, ra, a1
56
57	/* Point stvec to virtual address of intruction after satp write */
58	la a2, 1f
59	add a2, a2, a1
60	csrw CSR_TVEC, a2
61
62	/* Compute satp for kernel page tables, but don't load it yet */
63	srl a2, a0, PAGE_SHIFT
64	li a1, SATP_MODE
65	or a2, a2, a1
66
67	/*
68	 * Load trampoline page directory, which will cause us to trap to
69	 * stvec if VA != PA, or simply fall through if VA == PA.  We need a
70	 * full fence here because setup_vm() just wrote these PTEs and we need
71	 * to ensure the new translations are in use.
72	 */
73	la a0, trampoline_pg_dir
74	srl a0, a0, PAGE_SHIFT
75	or a0, a0, a1
76	sfence.vma
77	csrw CSR_SATP, a0
78.align 2
791:
80	/* Set trap vector to exception handler */
81	la a0, handle_exception
82	csrw CSR_TVEC, a0
83
84	/*
85	 * Set sup0 scratch register to 0, indicating to exception vector that
86	 * we are presently executing in kernel.
87	 */
88	csrw CSR_SCRATCH, zero
89
90	/* Reload the global pointer */
91.option push
92.option norelax
93	la gp, __global_pointer$
94.option pop
95
96	/*
97	 * Switch to kernel page tables.  A full fence is necessary in order to
98	 * avoid using the trampoline translations, which are only correct for
99	 * the first superpage.  Fetching the fence is guarnteed to work
100	 * because that first superpage is translated the same way.
101	 */
102	csrw CSR_SATP, a2
103	sfence.vma
104
105	ret
106#endif /* CONFIG_MMU */
107#ifdef CONFIG_SMP
108	.global secondary_start_sbi
109secondary_start_sbi:
110	/* Mask all interrupts */
111	csrw CSR_IE, zero
112	csrw CSR_IP, zero
113
114	/* Load the global pointer */
115	.option push
116	.option norelax
117		la gp, __global_pointer$
118	.option pop
119
120	/*
121	 * Disable FPU to detect illegal usage of
122	 * floating point in kernel space
123	 */
124	li t0, SR_FS
125	csrc CSR_STATUS, t0
126
127	/* Set trap vector to spin forever to help debug */
128	la a3, .Lsecondary_park
129	csrw CSR_TVEC, a3
130
131	slli a3, a0, LGREG
132	la a4, __cpu_up_stack_pointer
133	la a5, __cpu_up_task_pointer
134	add a4, a3, a4
135	add a5, a3, a5
136	REG_L sp, (a4)
137	REG_L tp, (a5)
138
139	.global secondary_start_common
140secondary_start_common:
141
142#ifdef CONFIG_MMU
143	/* Enable virtual memory and relocate to virtual address */
144	la a0, swapper_pg_dir
145	call relocate
146#endif
147	tail smp_callin
148#endif /* CONFIG_SMP */
149
150.Lsecondary_park:
151	/* We lack SMP support or have too many harts, so park this hart */
152	wfi
153	j .Lsecondary_park
154
155END(_start)
156
157	__INIT
158ENTRY(_start_kernel)
159	/* Mask all interrupts */
160	csrw CSR_IE, zero
161	csrw CSR_IP, zero
162
163#ifdef CONFIG_RISCV_M_MODE
164	/* flush the instruction cache */
165	fence.i
166
167	/* Reset all registers except ra, a0, a1 */
168	call reset_regs
169
170	/*
171	 * Setup a PMP to permit access to all of memory.  Some machines may
172	 * not implement PMPs, so we set up a quick trap handler to just skip
173	 * touching the PMPs on any trap.
174	 */
175	la a0, pmp_done
176	csrw CSR_TVEC, a0
177
178	li a0, -1
179	csrw CSR_PMPADDR0, a0
180	li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
181	csrw CSR_PMPCFG0, a0
182.align 2
183pmp_done:
184
185	/*
186	 * The hartid in a0 is expected later on, and we have no firmware
187	 * to hand it to us.
188	 */
189	csrr a0, CSR_MHARTID
190#endif /* CONFIG_RISCV_M_MODE */
191
192	/* Load the global pointer */
193.option push
194.option norelax
195	la gp, __global_pointer$
196.option pop
197
198	/*
199	 * Disable FPU to detect illegal usage of
200	 * floating point in kernel space
201	 */
202	li t0, SR_FS
203	csrc CSR_STATUS, t0
204
205#ifdef CONFIG_SMP
206	li t0, CONFIG_NR_CPUS
207	blt a0, t0, .Lgood_cores
208	tail .Lsecondary_park
209.Lgood_cores:
210#endif
211
212	/* Pick one hart to run the main boot sequence */
213	la a3, hart_lottery
214	li a2, 1
215	amoadd.w a3, a2, (a3)
216	bnez a3, .Lsecondary_start
217
218	/* Clear BSS for flat non-ELF images */
219	la a3, __bss_start
220	la a4, __bss_stop
221	ble a4, a3, clear_bss_done
222clear_bss:
223	REG_S zero, (a3)
224	add a3, a3, RISCV_SZPTR
225	blt a3, a4, clear_bss
226clear_bss_done:
227
228	/* Save hart ID and DTB physical address */
229	mv s0, a0
230	mv s1, a1
231	la a2, boot_cpu_hartid
232	REG_S a0, (a2)
233
234	/* Initialize page tables and relocate to virtual addresses */
235	la sp, init_thread_union + THREAD_SIZE
236	mv a0, s1
237	call setup_vm
238#ifdef CONFIG_MMU
239	la a0, early_pg_dir
240	call relocate
241#endif /* CONFIG_MMU */
242
243	/* Restore C environment */
244	la tp, init_task
245	sw zero, TASK_TI_CPU(tp)
246	la sp, init_thread_union + THREAD_SIZE
247
248#ifdef CONFIG_KASAN
249	call kasan_early_init
250#endif
251	/* Start the kernel */
252	call soc_early_init
253	call parse_dtb
254	tail start_kernel
255
256.Lsecondary_start:
257#ifdef CONFIG_SMP
258	/* Set trap vector to spin forever to help debug */
259	la a3, .Lsecondary_park
260	csrw CSR_TVEC, a3
261
262	slli a3, a0, LGREG
263	la a1, __cpu_up_stack_pointer
264	la a2, __cpu_up_task_pointer
265	add a1, a3, a1
266	add a2, a3, a2
267
268	/*
269	 * This hart didn't win the lottery, so we wait for the winning hart to
270	 * get far enough along the boot process that it should continue.
271	 */
272.Lwait_for_cpu_up:
273	/* FIXME: We should WFI to save some energy here. */
274	REG_L sp, (a1)
275	REG_L tp, (a2)
276	beqz sp, .Lwait_for_cpu_up
277	beqz tp, .Lwait_for_cpu_up
278	fence
279
280	tail secondary_start_common
281#endif
282
283END(_start_kernel)
284
285#ifdef CONFIG_RISCV_M_MODE
286ENTRY(reset_regs)
287	li	sp, 0
288	li	gp, 0
289	li	tp, 0
290	li	t0, 0
291	li	t1, 0
292	li	t2, 0
293	li	s0, 0
294	li	s1, 0
295	li	a2, 0
296	li	a3, 0
297	li	a4, 0
298	li	a5, 0
299	li	a6, 0
300	li	a7, 0
301	li	s2, 0
302	li	s3, 0
303	li	s4, 0
304	li	s5, 0
305	li	s6, 0
306	li	s7, 0
307	li	s8, 0
308	li	s9, 0
309	li	s10, 0
310	li	s11, 0
311	li	t3, 0
312	li	t4, 0
313	li	t5, 0
314	li	t6, 0
315	csrw	CSR_SCRATCH, 0
316
317#ifdef CONFIG_FPU
318	csrr	t0, CSR_MISA
319	andi	t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
320	beqz	t0, .Lreset_regs_done
321
322	li	t1, SR_FS
323	csrs	CSR_STATUS, t1
324	fmv.s.x	f0, zero
325	fmv.s.x	f1, zero
326	fmv.s.x	f2, zero
327	fmv.s.x	f3, zero
328	fmv.s.x	f4, zero
329	fmv.s.x	f5, zero
330	fmv.s.x	f6, zero
331	fmv.s.x	f7, zero
332	fmv.s.x	f8, zero
333	fmv.s.x	f9, zero
334	fmv.s.x	f10, zero
335	fmv.s.x	f11, zero
336	fmv.s.x	f12, zero
337	fmv.s.x	f13, zero
338	fmv.s.x	f14, zero
339	fmv.s.x	f15, zero
340	fmv.s.x	f16, zero
341	fmv.s.x	f17, zero
342	fmv.s.x	f18, zero
343	fmv.s.x	f19, zero
344	fmv.s.x	f20, zero
345	fmv.s.x	f21, zero
346	fmv.s.x	f22, zero
347	fmv.s.x	f23, zero
348	fmv.s.x	f24, zero
349	fmv.s.x	f25, zero
350	fmv.s.x	f26, zero
351	fmv.s.x	f27, zero
352	fmv.s.x	f28, zero
353	fmv.s.x	f29, zero
354	fmv.s.x	f30, zero
355	fmv.s.x	f31, zero
356	csrw	fcsr, 0
357	/* note that the caller must clear SR_FS */
358#endif /* CONFIG_FPU */
359.Lreset_regs_done:
360	ret
361END(reset_regs)
362#endif /* CONFIG_RISCV_M_MODE */
363
364__PAGE_ALIGNED_BSS
365	/* Empty zero page */
366	.balign PAGE_SIZE
367