1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#include <asm/asm-offsets.h> 7#include <asm/asm.h> 8#include <linux/init.h> 9#include <linux/linkage.h> 10#include <asm/thread_info.h> 11#include <asm/page.h> 12#include <asm/pgtable.h> 13#include <asm/csr.h> 14#include <asm/cpu_ops_sbi.h> 15#include <asm/hwcap.h> 16#include <asm/image.h> 17#include <asm/xip_fixup.h> 18#include "efi-header.S" 19 20__HEAD 21ENTRY(_start) 22 /* 23 * Image header expected by Linux boot-loaders. The image header data 24 * structure is described in asm/image.h. 25 * Do not modify it without modifying the structure and all bootloaders 26 * that expects this header format!! 27 */ 28#ifdef CONFIG_EFI 29 /* 30 * This instruction decodes to "MZ" ASCII required by UEFI. 31 */ 32 c.li s4,-13 33 j _start_kernel 34#else 35 /* jump to start kernel */ 36 j _start_kernel 37 /* reserved */ 38 .word 0 39#endif 40 .balign 8 41#ifdef CONFIG_RISCV_M_MODE 42 /* Image load offset (0MB) from start of RAM for M-mode */ 43 .dword 0 44#else 45#if __riscv_xlen == 64 46 /* Image load offset(2MB) from start of RAM */ 47 .dword 0x200000 48#else 49 /* Image load offset(4MB) from start of RAM */ 50 .dword 0x400000 51#endif 52#endif 53 /* Effective size of kernel image */ 54 .dword _end - _start 55 .dword __HEAD_FLAGS 56 .word RISCV_HEADER_VERSION 57 .word 0 58 .dword 0 59 .ascii RISCV_IMAGE_MAGIC 60 .balign 4 61 .ascii RISCV_IMAGE_MAGIC2 62#ifdef CONFIG_EFI 63 .word pe_head_start - _start 64pe_head_start: 65 66 __EFI_PE_HEADER 67#else 68 .word 0 69#endif 70 71.align 2 72#ifdef CONFIG_MMU 73 .global relocate_enable_mmu 74relocate_enable_mmu: 75 /* Relocate return address */ 76 la a1, kernel_map 77 XIP_FIXUP_OFFSET a1 78 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1) 79 la a2, _start 80 sub a1, a1, a2 81 add ra, ra, a1 82 83 /* Point stvec to virtual address of intruction after satp write */ 84 la a2, 1f 85 add a2, a2, a1 86 csrw CSR_TVEC, a2 87 88 /* Compute satp for kernel page tables, but don't load it yet */ 89 srl a2, a0, PAGE_SHIFT 90 la a1, satp_mode 91 REG_L a1, 0(a1) 92 or a2, a2, a1 93 94 /* 95 * Load trampoline page directory, which will cause us to trap to 96 * stvec if VA != PA, or simply fall through if VA == PA. We need a 97 * full fence here because setup_vm() just wrote these PTEs and we need 98 * to ensure the new translations are in use. 99 */ 100 la a0, trampoline_pg_dir 101 XIP_FIXUP_OFFSET a0 102 srl a0, a0, PAGE_SHIFT 103 or a0, a0, a1 104 sfence.vma 105 csrw CSR_SATP, a0 106.align 2 1071: 108 /* Set trap vector to spin forever to help debug */ 109 la a0, .Lsecondary_park 110 csrw CSR_TVEC, a0 111 112 /* Reload the global pointer */ 113.option push 114.option norelax 115 la gp, __global_pointer$ 116.option pop 117 118 /* 119 * Switch to kernel page tables. A full fence is necessary in order to 120 * avoid using the trampoline translations, which are only correct for 121 * the first superpage. Fetching the fence is guaranteed to work 122 * because that first superpage is translated the same way. 123 */ 124 csrw CSR_SATP, a2 125 sfence.vma 126 127 ret 128#endif /* CONFIG_MMU */ 129#ifdef CONFIG_SMP 130 .global secondary_start_sbi 131secondary_start_sbi: 132 /* Mask all interrupts */ 133 csrw CSR_IE, zero 134 csrw CSR_IP, zero 135 136 /* Load the global pointer */ 137 .option push 138 .option norelax 139 la gp, __global_pointer$ 140 .option pop 141 142 /* 143 * Disable FPU & VECTOR to detect illegal usage of 144 * floating point or vector in kernel space 145 */ 146 li t0, SR_FS_VS 147 csrc CSR_STATUS, t0 148 149 /* Set trap vector to spin forever to help debug */ 150 la a3, .Lsecondary_park 151 csrw CSR_TVEC, a3 152 153 /* a0 contains the hartid & a1 contains boot data */ 154 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET 155 XIP_FIXUP_OFFSET a2 156 add a2, a2, a1 157 REG_L tp, (a2) 158 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET 159 XIP_FIXUP_OFFSET a3 160 add a3, a3, a1 161 REG_L sp, (a3) 162 163.Lsecondary_start_common: 164 165#ifdef CONFIG_MMU 166 /* Enable virtual memory and relocate to virtual address */ 167 la a0, swapper_pg_dir 168 XIP_FIXUP_OFFSET a0 169 call relocate_enable_mmu 170#endif 171 call setup_trap_vector 172 tail smp_callin 173#endif /* CONFIG_SMP */ 174 175.align 2 176setup_trap_vector: 177 /* Set trap vector to exception handler */ 178 la a0, handle_exception 179 csrw CSR_TVEC, a0 180 181 /* 182 * Set sup0 scratch register to 0, indicating to exception vector that 183 * we are presently executing in kernel. 184 */ 185 csrw CSR_SCRATCH, zero 186 ret 187 188.align 2 189.Lsecondary_park: 190 /* We lack SMP support or have too many harts, so park this hart */ 191 wfi 192 j .Lsecondary_park 193 194END(_start) 195 196ENTRY(_start_kernel) 197 /* Mask all interrupts */ 198 csrw CSR_IE, zero 199 csrw CSR_IP, zero 200 201#ifdef CONFIG_RISCV_M_MODE 202 /* flush the instruction cache */ 203 fence.i 204 205 /* Reset all registers except ra, a0, a1 */ 206 call reset_regs 207 208 /* 209 * Setup a PMP to permit access to all of memory. Some machines may 210 * not implement PMPs, so we set up a quick trap handler to just skip 211 * touching the PMPs on any trap. 212 */ 213 la a0, pmp_done 214 csrw CSR_TVEC, a0 215 216 li a0, -1 217 csrw CSR_PMPADDR0, a0 218 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) 219 csrw CSR_PMPCFG0, a0 220.align 2 221pmp_done: 222 223 /* 224 * The hartid in a0 is expected later on, and we have no firmware 225 * to hand it to us. 226 */ 227 csrr a0, CSR_MHARTID 228#endif /* CONFIG_RISCV_M_MODE */ 229 230 /* Load the global pointer */ 231.option push 232.option norelax 233 la gp, __global_pointer$ 234.option pop 235 236 /* 237 * Disable FPU & VECTOR to detect illegal usage of 238 * floating point or vector in kernel space 239 */ 240 li t0, SR_FS_VS 241 csrc CSR_STATUS, t0 242 243#ifdef CONFIG_RISCV_BOOT_SPINWAIT 244 li t0, CONFIG_NR_CPUS 245 blt a0, t0, .Lgood_cores 246 tail .Lsecondary_park 247.Lgood_cores: 248 249 /* The lottery system is only required for spinwait booting method */ 250#ifndef CONFIG_XIP_KERNEL 251 /* Pick one hart to run the main boot sequence */ 252 la a3, hart_lottery 253 li a2, 1 254 amoadd.w a3, a2, (a3) 255 bnez a3, .Lsecondary_start 256 257#else 258 /* hart_lottery in flash contains a magic number */ 259 la a3, hart_lottery 260 mv a2, a3 261 XIP_FIXUP_OFFSET a2 262 XIP_FIXUP_FLASH_OFFSET a3 263 lw t1, (a3) 264 amoswap.w t0, t1, (a2) 265 /* first time here if hart_lottery in RAM is not set */ 266 beq t0, t1, .Lsecondary_start 267 268#endif /* CONFIG_XIP */ 269#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ 270 271#ifdef CONFIG_XIP_KERNEL 272 la sp, _end + THREAD_SIZE 273 XIP_FIXUP_OFFSET sp 274 mv s0, a0 275 call __copy_data 276 277 /* Restore a0 copy */ 278 mv a0, s0 279#endif 280 281#ifndef CONFIG_XIP_KERNEL 282 /* Clear BSS for flat non-ELF images */ 283 la a3, __bss_start 284 la a4, __bss_stop 285 ble a4, a3, clear_bss_done 286clear_bss: 287 REG_S zero, (a3) 288 add a3, a3, RISCV_SZPTR 289 blt a3, a4, clear_bss 290clear_bss_done: 291#endif 292 la a2, boot_cpu_hartid 293 XIP_FIXUP_OFFSET a2 294 REG_S a0, (a2) 295 296 /* Initialize page tables and relocate to virtual addresses */ 297 la tp, init_task 298 la sp, init_thread_union + THREAD_SIZE 299 XIP_FIXUP_OFFSET sp 300 addi sp, sp, -PT_SIZE_ON_STACK 301#ifdef CONFIG_BUILTIN_DTB 302 la a0, __dtb_start 303 XIP_FIXUP_OFFSET a0 304#else 305 mv a0, a1 306#endif /* CONFIG_BUILTIN_DTB */ 307 call setup_vm 308#ifdef CONFIG_MMU 309 la a0, early_pg_dir 310 XIP_FIXUP_OFFSET a0 311 call relocate_enable_mmu 312#endif /* CONFIG_MMU */ 313 314 call setup_trap_vector 315 /* Restore C environment */ 316 la tp, init_task 317 la sp, init_thread_union + THREAD_SIZE 318 addi sp, sp, -PT_SIZE_ON_STACK 319 320#ifdef CONFIG_KASAN 321 call kasan_early_init 322#endif 323 /* Start the kernel */ 324 call soc_early_init 325 tail start_kernel 326 327#ifdef CONFIG_RISCV_BOOT_SPINWAIT 328.Lsecondary_start: 329 /* Set trap vector to spin forever to help debug */ 330 la a3, .Lsecondary_park 331 csrw CSR_TVEC, a3 332 333 slli a3, a0, LGREG 334 la a1, __cpu_spinwait_stack_pointer 335 XIP_FIXUP_OFFSET a1 336 la a2, __cpu_spinwait_task_pointer 337 XIP_FIXUP_OFFSET a2 338 add a1, a3, a1 339 add a2, a3, a2 340 341 /* 342 * This hart didn't win the lottery, so we wait for the winning hart to 343 * get far enough along the boot process that it should continue. 344 */ 345.Lwait_for_cpu_up: 346 /* FIXME: We should WFI to save some energy here. */ 347 REG_L sp, (a1) 348 REG_L tp, (a2) 349 beqz sp, .Lwait_for_cpu_up 350 beqz tp, .Lwait_for_cpu_up 351 fence 352 353 tail .Lsecondary_start_common 354#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ 355 356END(_start_kernel) 357 358#ifdef CONFIG_RISCV_M_MODE 359ENTRY(reset_regs) 360 li sp, 0 361 li gp, 0 362 li tp, 0 363 li t0, 0 364 li t1, 0 365 li t2, 0 366 li s0, 0 367 li s1, 0 368 li a2, 0 369 li a3, 0 370 li a4, 0 371 li a5, 0 372 li a6, 0 373 li a7, 0 374 li s2, 0 375 li s3, 0 376 li s4, 0 377 li s5, 0 378 li s6, 0 379 li s7, 0 380 li s8, 0 381 li s9, 0 382 li s10, 0 383 li s11, 0 384 li t3, 0 385 li t4, 0 386 li t5, 0 387 li t6, 0 388 csrw CSR_SCRATCH, 0 389 390#ifdef CONFIG_FPU 391 csrr t0, CSR_MISA 392 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) 393 beqz t0, .Lreset_regs_done_fpu 394 395 li t1, SR_FS 396 csrs CSR_STATUS, t1 397 fmv.s.x f0, zero 398 fmv.s.x f1, zero 399 fmv.s.x f2, zero 400 fmv.s.x f3, zero 401 fmv.s.x f4, zero 402 fmv.s.x f5, zero 403 fmv.s.x f6, zero 404 fmv.s.x f7, zero 405 fmv.s.x f8, zero 406 fmv.s.x f9, zero 407 fmv.s.x f10, zero 408 fmv.s.x f11, zero 409 fmv.s.x f12, zero 410 fmv.s.x f13, zero 411 fmv.s.x f14, zero 412 fmv.s.x f15, zero 413 fmv.s.x f16, zero 414 fmv.s.x f17, zero 415 fmv.s.x f18, zero 416 fmv.s.x f19, zero 417 fmv.s.x f20, zero 418 fmv.s.x f21, zero 419 fmv.s.x f22, zero 420 fmv.s.x f23, zero 421 fmv.s.x f24, zero 422 fmv.s.x f25, zero 423 fmv.s.x f26, zero 424 fmv.s.x f27, zero 425 fmv.s.x f28, zero 426 fmv.s.x f29, zero 427 fmv.s.x f30, zero 428 fmv.s.x f31, zero 429 csrw fcsr, 0 430 /* note that the caller must clear SR_FS */ 431.Lreset_regs_done_fpu: 432#endif /* CONFIG_FPU */ 433 434#ifdef CONFIG_RISCV_ISA_V 435 csrr t0, CSR_MISA 436 li t1, COMPAT_HWCAP_ISA_V 437 and t0, t0, t1 438 beqz t0, .Lreset_regs_done_vector 439 440 /* 441 * Clear vector registers and reset vcsr 442 * VLMAX has a defined value, VLEN is a constant, 443 * and this form of vsetvli is defined to set vl to VLMAX. 444 */ 445 li t1, SR_VS 446 csrs CSR_STATUS, t1 447 csrs CSR_VCSR, x0 448 vsetvli t1, x0, e8, m8, ta, ma 449 vmv.v.i v0, 0 450 vmv.v.i v8, 0 451 vmv.v.i v16, 0 452 vmv.v.i v24, 0 453 /* note that the caller must clear SR_VS */ 454.Lreset_regs_done_vector: 455#endif /* CONFIG_RISCV_ISA_V */ 456 ret 457END(reset_regs) 458#endif /* CONFIG_RISCV_M_MODE */ 459