1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#include <asm/asm-offsets.h> 7#include <asm/asm.h> 8#include <linux/init.h> 9#include <linux/linkage.h> 10#include <asm/thread_info.h> 11#include <asm/page.h> 12#include <asm/csr.h> 13#include <asm/hwcap.h> 14#include <asm/image.h> 15#include "efi-header.S" 16 17__HEAD 18ENTRY(_start) 19 /* 20 * Image header expected by Linux boot-loaders. The image header data 21 * structure is described in asm/image.h. 22 * Do not modify it without modifying the structure and all bootloaders 23 * that expects this header format!! 24 */ 25#ifdef CONFIG_EFI 26 /* 27 * This instruction decodes to "MZ" ASCII required by UEFI. 28 */ 29 c.li s4,-13 30 j _start_kernel 31#else 32 /* jump to start kernel */ 33 j _start_kernel 34 /* reserved */ 35 .word 0 36#endif 37 .balign 8 38#if __riscv_xlen == 64 39 /* Image load offset(2MB) from start of RAM */ 40 .dword 0x200000 41#else 42 /* Image load offset(4MB) from start of RAM */ 43 .dword 0x400000 44#endif 45 /* Effective size of kernel image */ 46 .dword _end - _start 47 .dword __HEAD_FLAGS 48 .word RISCV_HEADER_VERSION 49 .word 0 50 .dword 0 51 .ascii RISCV_IMAGE_MAGIC 52 .balign 4 53 .ascii RISCV_IMAGE_MAGIC2 54#ifdef CONFIG_EFI 55 .word pe_head_start - _start 56pe_head_start: 57 58 __EFI_PE_HEADER 59#else 60 .word 0 61#endif 62 63.align 2 64#ifdef CONFIG_MMU 65relocate: 66 /* Relocate return address */ 67 li a1, PAGE_OFFSET 68 la a2, _start 69 sub a1, a1, a2 70 add ra, ra, a1 71 72 /* Point stvec to virtual address of intruction after satp write */ 73 la a2, 1f 74 add a2, a2, a1 75 csrw CSR_TVEC, a2 76 77 /* Compute satp for kernel page tables, but don't load it yet */ 78 srl a2, a0, PAGE_SHIFT 79 li a1, SATP_MODE 80 or a2, a2, a1 81 82 /* 83 * Load trampoline page directory, which will cause us to trap to 84 * stvec if VA != PA, or simply fall through if VA == PA. We need a 85 * full fence here because setup_vm() just wrote these PTEs and we need 86 * to ensure the new translations are in use. 87 */ 88 la a0, trampoline_pg_dir 89 srl a0, a0, PAGE_SHIFT 90 or a0, a0, a1 91 sfence.vma 92 csrw CSR_SATP, a0 93.align 2 941: 95 /* Set trap vector to spin forever to help debug */ 96 la a0, .Lsecondary_park 97 csrw CSR_TVEC, a0 98 99 /* Reload the global pointer */ 100.option push 101.option norelax 102 la gp, __global_pointer$ 103.option pop 104 105 /* 106 * Switch to kernel page tables. A full fence is necessary in order to 107 * avoid using the trampoline translations, which are only correct for 108 * the first superpage. Fetching the fence is guarnteed to work 109 * because that first superpage is translated the same way. 110 */ 111 csrw CSR_SATP, a2 112 sfence.vma 113 114 ret 115#endif /* CONFIG_MMU */ 116#ifdef CONFIG_SMP 117 .global secondary_start_sbi 118secondary_start_sbi: 119 /* Mask all interrupts */ 120 csrw CSR_IE, zero 121 csrw CSR_IP, zero 122 123 /* Load the global pointer */ 124 .option push 125 .option norelax 126 la gp, __global_pointer$ 127 .option pop 128 129 /* 130 * Disable FPU to detect illegal usage of 131 * floating point in kernel space 132 */ 133 li t0, SR_FS 134 csrc CSR_STATUS, t0 135 136 /* Set trap vector to spin forever to help debug */ 137 la a3, .Lsecondary_park 138 csrw CSR_TVEC, a3 139 140 slli a3, a0, LGREG 141 la a4, __cpu_up_stack_pointer 142 la a5, __cpu_up_task_pointer 143 add a4, a3, a4 144 add a5, a3, a5 145 REG_L sp, (a4) 146 REG_L tp, (a5) 147 148 .global secondary_start_common 149secondary_start_common: 150 151#ifdef CONFIG_MMU 152 /* Enable virtual memory and relocate to virtual address */ 153 la a0, swapper_pg_dir 154 call relocate 155#endif 156 call setup_trap_vector 157 tail smp_callin 158#endif /* CONFIG_SMP */ 159 160.align 2 161setup_trap_vector: 162 /* Set trap vector to exception handler */ 163 la a0, handle_exception 164 csrw CSR_TVEC, a0 165 166 /* 167 * Set sup0 scratch register to 0, indicating to exception vector that 168 * we are presently executing in kernel. 169 */ 170 csrw CSR_SCRATCH, zero 171 ret 172 173.Lsecondary_park: 174 /* We lack SMP support or have too many harts, so park this hart */ 175 wfi 176 j .Lsecondary_park 177 178END(_start) 179 180 __INIT 181ENTRY(_start_kernel) 182 /* Mask all interrupts */ 183 csrw CSR_IE, zero 184 csrw CSR_IP, zero 185 186#ifdef CONFIG_RISCV_M_MODE 187 /* flush the instruction cache */ 188 fence.i 189 190 /* Reset all registers except ra, a0, a1 */ 191 call reset_regs 192 193 /* 194 * Setup a PMP to permit access to all of memory. Some machines may 195 * not implement PMPs, so we set up a quick trap handler to just skip 196 * touching the PMPs on any trap. 197 */ 198 la a0, pmp_done 199 csrw CSR_TVEC, a0 200 201 li a0, -1 202 csrw CSR_PMPADDR0, a0 203 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) 204 csrw CSR_PMPCFG0, a0 205.align 2 206pmp_done: 207 208 /* 209 * The hartid in a0 is expected later on, and we have no firmware 210 * to hand it to us. 211 */ 212 csrr a0, CSR_MHARTID 213#endif /* CONFIG_RISCV_M_MODE */ 214 215 /* Load the global pointer */ 216.option push 217.option norelax 218 la gp, __global_pointer$ 219.option pop 220 221 /* 222 * Disable FPU to detect illegal usage of 223 * floating point in kernel space 224 */ 225 li t0, SR_FS 226 csrc CSR_STATUS, t0 227 228#ifdef CONFIG_SMP 229 li t0, CONFIG_NR_CPUS 230 blt a0, t0, .Lgood_cores 231 tail .Lsecondary_park 232.Lgood_cores: 233#endif 234 235 /* Pick one hart to run the main boot sequence */ 236 la a3, hart_lottery 237 li a2, 1 238 amoadd.w a3, a2, (a3) 239 bnez a3, .Lsecondary_start 240 241 /* Clear BSS for flat non-ELF images */ 242 la a3, __bss_start 243 la a4, __bss_stop 244 ble a4, a3, clear_bss_done 245clear_bss: 246 REG_S zero, (a3) 247 add a3, a3, RISCV_SZPTR 248 blt a3, a4, clear_bss 249clear_bss_done: 250 251 /* Save hart ID and DTB physical address */ 252 mv s0, a0 253 mv s1, a1 254 la a2, boot_cpu_hartid 255 REG_S a0, (a2) 256 257 /* Initialize page tables and relocate to virtual addresses */ 258 la sp, init_thread_union + THREAD_SIZE 259 mv a0, s1 260 call setup_vm 261#ifdef CONFIG_MMU 262 la a0, early_pg_dir 263 call relocate 264#endif /* CONFIG_MMU */ 265 266 call setup_trap_vector 267 /* Restore C environment */ 268 la tp, init_task 269 sw zero, TASK_TI_CPU(tp) 270 la sp, init_thread_union + THREAD_SIZE 271 272#ifdef CONFIG_KASAN 273 call kasan_early_init 274#endif 275 /* Start the kernel */ 276 call soc_early_init 277 tail start_kernel 278 279.Lsecondary_start: 280#ifdef CONFIG_SMP 281 /* Set trap vector to spin forever to help debug */ 282 la a3, .Lsecondary_park 283 csrw CSR_TVEC, a3 284 285 slli a3, a0, LGREG 286 la a1, __cpu_up_stack_pointer 287 la a2, __cpu_up_task_pointer 288 add a1, a3, a1 289 add a2, a3, a2 290 291 /* 292 * This hart didn't win the lottery, so we wait for the winning hart to 293 * get far enough along the boot process that it should continue. 294 */ 295.Lwait_for_cpu_up: 296 /* FIXME: We should WFI to save some energy here. */ 297 REG_L sp, (a1) 298 REG_L tp, (a2) 299 beqz sp, .Lwait_for_cpu_up 300 beqz tp, .Lwait_for_cpu_up 301 fence 302 303 tail secondary_start_common 304#endif 305 306END(_start_kernel) 307 308#ifdef CONFIG_RISCV_M_MODE 309ENTRY(reset_regs) 310 li sp, 0 311 li gp, 0 312 li tp, 0 313 li t0, 0 314 li t1, 0 315 li t2, 0 316 li s0, 0 317 li s1, 0 318 li a2, 0 319 li a3, 0 320 li a4, 0 321 li a5, 0 322 li a6, 0 323 li a7, 0 324 li s2, 0 325 li s3, 0 326 li s4, 0 327 li s5, 0 328 li s6, 0 329 li s7, 0 330 li s8, 0 331 li s9, 0 332 li s10, 0 333 li s11, 0 334 li t3, 0 335 li t4, 0 336 li t5, 0 337 li t6, 0 338 csrw CSR_SCRATCH, 0 339 340#ifdef CONFIG_FPU 341 csrr t0, CSR_MISA 342 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) 343 beqz t0, .Lreset_regs_done 344 345 li t1, SR_FS 346 csrs CSR_STATUS, t1 347 fmv.s.x f0, zero 348 fmv.s.x f1, zero 349 fmv.s.x f2, zero 350 fmv.s.x f3, zero 351 fmv.s.x f4, zero 352 fmv.s.x f5, zero 353 fmv.s.x f6, zero 354 fmv.s.x f7, zero 355 fmv.s.x f8, zero 356 fmv.s.x f9, zero 357 fmv.s.x f10, zero 358 fmv.s.x f11, zero 359 fmv.s.x f12, zero 360 fmv.s.x f13, zero 361 fmv.s.x f14, zero 362 fmv.s.x f15, zero 363 fmv.s.x f16, zero 364 fmv.s.x f17, zero 365 fmv.s.x f18, zero 366 fmv.s.x f19, zero 367 fmv.s.x f20, zero 368 fmv.s.x f21, zero 369 fmv.s.x f22, zero 370 fmv.s.x f23, zero 371 fmv.s.x f24, zero 372 fmv.s.x f25, zero 373 fmv.s.x f26, zero 374 fmv.s.x f27, zero 375 fmv.s.x f28, zero 376 fmv.s.x f29, zero 377 fmv.s.x f30, zero 378 fmv.s.x f31, zero 379 csrw fcsr, 0 380 /* note that the caller must clear SR_FS */ 381#endif /* CONFIG_FPU */ 382.Lreset_regs_done: 383 ret 384END(reset_regs) 385#endif /* CONFIG_RISCV_M_MODE */ 386 387__PAGE_ALIGNED_BSS 388 /* Empty zero page */ 389 .balign PAGE_SIZE 390