1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#include <asm/asm-offsets.h> 7#include <asm/asm.h> 8#include <linux/init.h> 9#include <linux/linkage.h> 10#include <asm/thread_info.h> 11#include <asm/page.h> 12#include <asm/pgtable.h> 13#include <asm/csr.h> 14#include <asm/cpu_ops_sbi.h> 15#include <asm/hwcap.h> 16#include <asm/image.h> 17#include <asm/xip_fixup.h> 18#include "efi-header.S" 19 20__HEAD 21ENTRY(_start) 22 /* 23 * Image header expected by Linux boot-loaders. The image header data 24 * structure is described in asm/image.h. 25 * Do not modify it without modifying the structure and all bootloaders 26 * that expects this header format!! 27 */ 28#ifdef CONFIG_EFI 29 /* 30 * This instruction decodes to "MZ" ASCII required by UEFI. 31 */ 32 c.li s4,-13 33 j _start_kernel 34#else 35 /* jump to start kernel */ 36 j _start_kernel 37 /* reserved */ 38 .word 0 39#endif 40 .balign 8 41#ifdef CONFIG_RISCV_M_MODE 42 /* Image load offset (0MB) from start of RAM for M-mode */ 43 .dword 0 44#else 45#if __riscv_xlen == 64 46 /* Image load offset(2MB) from start of RAM */ 47 .dword 0x200000 48#else 49 /* Image load offset(4MB) from start of RAM */ 50 .dword 0x400000 51#endif 52#endif 53 /* Effective size of kernel image */ 54 .dword _end - _start 55 .dword __HEAD_FLAGS 56 .word RISCV_HEADER_VERSION 57 .word 0 58 .dword 0 59 .ascii RISCV_IMAGE_MAGIC 60 .balign 4 61 .ascii RISCV_IMAGE_MAGIC2 62#ifdef CONFIG_EFI 63 .word pe_head_start - _start 64pe_head_start: 65 66 __EFI_PE_HEADER 67#else 68 .word 0 69#endif 70 71.align 2 72#ifdef CONFIG_MMU 73 .global relocate_enable_mmu 74relocate_enable_mmu: 75 /* Relocate return address */ 76 la a1, kernel_map 77 XIP_FIXUP_OFFSET a1 78 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1) 79 la a2, _start 80 sub a1, a1, a2 81 add ra, ra, a1 82 83 /* Point stvec to virtual address of intruction after satp write */ 84 la a2, 1f 85 add a2, a2, a1 86 csrw CSR_TVEC, a2 87 88 /* Compute satp for kernel page tables, but don't load it yet */ 89 srl a2, a0, PAGE_SHIFT 90 la a1, satp_mode 91 XIP_FIXUP_OFFSET a1 92 REG_L a1, 0(a1) 93 or a2, a2, a1 94 95 /* 96 * Load trampoline page directory, which will cause us to trap to 97 * stvec if VA != PA, or simply fall through if VA == PA. We need a 98 * full fence here because setup_vm() just wrote these PTEs and we need 99 * to ensure the new translations are in use. 100 */ 101 la a0, trampoline_pg_dir 102 XIP_FIXUP_OFFSET a0 103 srl a0, a0, PAGE_SHIFT 104 or a0, a0, a1 105 sfence.vma 106 csrw CSR_SATP, a0 107.align 2 1081: 109 /* Set trap vector to spin forever to help debug */ 110 la a0, .Lsecondary_park 111 csrw CSR_TVEC, a0 112 113 /* Reload the global pointer */ 114.option push 115.option norelax 116 la gp, __global_pointer$ 117.option pop 118 119 /* 120 * Switch to kernel page tables. A full fence is necessary in order to 121 * avoid using the trampoline translations, which are only correct for 122 * the first superpage. Fetching the fence is guaranteed to work 123 * because that first superpage is translated the same way. 124 */ 125 csrw CSR_SATP, a2 126 sfence.vma 127 128 ret 129#endif /* CONFIG_MMU */ 130#ifdef CONFIG_SMP 131 .global secondary_start_sbi 132secondary_start_sbi: 133 /* Mask all interrupts */ 134 csrw CSR_IE, zero 135 csrw CSR_IP, zero 136 137 /* Load the global pointer */ 138 .option push 139 .option norelax 140 la gp, __global_pointer$ 141 .option pop 142 143 /* 144 * Disable FPU & VECTOR to detect illegal usage of 145 * floating point or vector in kernel space 146 */ 147 li t0, SR_FS_VS 148 csrc CSR_STATUS, t0 149 150 /* Set trap vector to spin forever to help debug */ 151 la a3, .Lsecondary_park 152 csrw CSR_TVEC, a3 153 154 /* a0 contains the hartid & a1 contains boot data */ 155 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET 156 XIP_FIXUP_OFFSET a2 157 add a2, a2, a1 158 REG_L tp, (a2) 159 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET 160 XIP_FIXUP_OFFSET a3 161 add a3, a3, a1 162 REG_L sp, (a3) 163 164.Lsecondary_start_common: 165 166#ifdef CONFIG_MMU 167 /* Enable virtual memory and relocate to virtual address */ 168 la a0, swapper_pg_dir 169 XIP_FIXUP_OFFSET a0 170 call relocate_enable_mmu 171#endif 172 call setup_trap_vector 173 tail smp_callin 174#endif /* CONFIG_SMP */ 175 176.align 2 177setup_trap_vector: 178 /* Set trap vector to exception handler */ 179 la a0, handle_exception 180 csrw CSR_TVEC, a0 181 182 /* 183 * Set sup0 scratch register to 0, indicating to exception vector that 184 * we are presently executing in kernel. 185 */ 186 csrw CSR_SCRATCH, zero 187 ret 188 189.align 2 190.Lsecondary_park: 191 /* We lack SMP support or have too many harts, so park this hart */ 192 wfi 193 j .Lsecondary_park 194 195END(_start) 196 197ENTRY(_start_kernel) 198 /* Mask all interrupts */ 199 csrw CSR_IE, zero 200 csrw CSR_IP, zero 201 202#ifdef CONFIG_RISCV_M_MODE 203 /* flush the instruction cache */ 204 fence.i 205 206 /* Reset all registers except ra, a0, a1 */ 207 call reset_regs 208 209 /* 210 * Setup a PMP to permit access to all of memory. Some machines may 211 * not implement PMPs, so we set up a quick trap handler to just skip 212 * touching the PMPs on any trap. 213 */ 214 la a0, pmp_done 215 csrw CSR_TVEC, a0 216 217 li a0, -1 218 csrw CSR_PMPADDR0, a0 219 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) 220 csrw CSR_PMPCFG0, a0 221.align 2 222pmp_done: 223 224 /* 225 * The hartid in a0 is expected later on, and we have no firmware 226 * to hand it to us. 227 */ 228 csrr a0, CSR_MHARTID 229#endif /* CONFIG_RISCV_M_MODE */ 230 231 /* Load the global pointer */ 232.option push 233.option norelax 234 la gp, __global_pointer$ 235.option pop 236 237 /* 238 * Disable FPU & VECTOR to detect illegal usage of 239 * floating point or vector in kernel space 240 */ 241 li t0, SR_FS_VS 242 csrc CSR_STATUS, t0 243 244#ifdef CONFIG_RISCV_BOOT_SPINWAIT 245 li t0, CONFIG_NR_CPUS 246 blt a0, t0, .Lgood_cores 247 tail .Lsecondary_park 248.Lgood_cores: 249 250 /* The lottery system is only required for spinwait booting method */ 251#ifndef CONFIG_XIP_KERNEL 252 /* Pick one hart to run the main boot sequence */ 253 la a3, hart_lottery 254 li a2, 1 255 amoadd.w a3, a2, (a3) 256 bnez a3, .Lsecondary_start 257 258#else 259 /* hart_lottery in flash contains a magic number */ 260 la a3, hart_lottery 261 mv a2, a3 262 XIP_FIXUP_OFFSET a2 263 XIP_FIXUP_FLASH_OFFSET a3 264 lw t1, (a3) 265 amoswap.w t0, t1, (a2) 266 /* first time here if hart_lottery in RAM is not set */ 267 beq t0, t1, .Lsecondary_start 268 269#endif /* CONFIG_XIP */ 270#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ 271 272#ifdef CONFIG_XIP_KERNEL 273 la sp, _end + THREAD_SIZE 274 XIP_FIXUP_OFFSET sp 275 mv s0, a0 276 call __copy_data 277 278 /* Restore a0 copy */ 279 mv a0, s0 280#endif 281 282#ifndef CONFIG_XIP_KERNEL 283 /* Clear BSS for flat non-ELF images */ 284 la a3, __bss_start 285 la a4, __bss_stop 286 ble a4, a3, clear_bss_done 287clear_bss: 288 REG_S zero, (a3) 289 add a3, a3, RISCV_SZPTR 290 blt a3, a4, clear_bss 291clear_bss_done: 292#endif 293 la a2, boot_cpu_hartid 294 XIP_FIXUP_OFFSET a2 295 REG_S a0, (a2) 296 297 /* Initialize page tables and relocate to virtual addresses */ 298 la tp, init_task 299 la sp, init_thread_union + THREAD_SIZE 300 XIP_FIXUP_OFFSET sp 301 addi sp, sp, -PT_SIZE_ON_STACK 302#ifdef CONFIG_BUILTIN_DTB 303 la a0, __dtb_start 304 XIP_FIXUP_OFFSET a0 305#else 306 mv a0, a1 307#endif /* CONFIG_BUILTIN_DTB */ 308 call setup_vm 309#ifdef CONFIG_MMU 310 la a0, early_pg_dir 311 XIP_FIXUP_OFFSET a0 312 call relocate_enable_mmu 313#endif /* CONFIG_MMU */ 314 315 call setup_trap_vector 316 /* Restore C environment */ 317 la tp, init_task 318 la sp, init_thread_union + THREAD_SIZE 319 addi sp, sp, -PT_SIZE_ON_STACK 320 321#ifdef CONFIG_KASAN 322 call kasan_early_init 323#endif 324 /* Start the kernel */ 325 call soc_early_init 326 tail start_kernel 327 328#ifdef CONFIG_RISCV_BOOT_SPINWAIT 329.Lsecondary_start: 330 /* Set trap vector to spin forever to help debug */ 331 la a3, .Lsecondary_park 332 csrw CSR_TVEC, a3 333 334 slli a3, a0, LGREG 335 la a1, __cpu_spinwait_stack_pointer 336 XIP_FIXUP_OFFSET a1 337 la a2, __cpu_spinwait_task_pointer 338 XIP_FIXUP_OFFSET a2 339 add a1, a3, a1 340 add a2, a3, a2 341 342 /* 343 * This hart didn't win the lottery, so we wait for the winning hart to 344 * get far enough along the boot process that it should continue. 345 */ 346.Lwait_for_cpu_up: 347 /* FIXME: We should WFI to save some energy here. */ 348 REG_L sp, (a1) 349 REG_L tp, (a2) 350 beqz sp, .Lwait_for_cpu_up 351 beqz tp, .Lwait_for_cpu_up 352 fence 353 354 tail .Lsecondary_start_common 355#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ 356 357END(_start_kernel) 358 359#ifdef CONFIG_RISCV_M_MODE 360ENTRY(reset_regs) 361 li sp, 0 362 li gp, 0 363 li tp, 0 364 li t0, 0 365 li t1, 0 366 li t2, 0 367 li s0, 0 368 li s1, 0 369 li a2, 0 370 li a3, 0 371 li a4, 0 372 li a5, 0 373 li a6, 0 374 li a7, 0 375 li s2, 0 376 li s3, 0 377 li s4, 0 378 li s5, 0 379 li s6, 0 380 li s7, 0 381 li s8, 0 382 li s9, 0 383 li s10, 0 384 li s11, 0 385 li t3, 0 386 li t4, 0 387 li t5, 0 388 li t6, 0 389 csrw CSR_SCRATCH, 0 390 391#ifdef CONFIG_FPU 392 csrr t0, CSR_MISA 393 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) 394 beqz t0, .Lreset_regs_done_fpu 395 396 li t1, SR_FS 397 csrs CSR_STATUS, t1 398 fmv.s.x f0, zero 399 fmv.s.x f1, zero 400 fmv.s.x f2, zero 401 fmv.s.x f3, zero 402 fmv.s.x f4, zero 403 fmv.s.x f5, zero 404 fmv.s.x f6, zero 405 fmv.s.x f7, zero 406 fmv.s.x f8, zero 407 fmv.s.x f9, zero 408 fmv.s.x f10, zero 409 fmv.s.x f11, zero 410 fmv.s.x f12, zero 411 fmv.s.x f13, zero 412 fmv.s.x f14, zero 413 fmv.s.x f15, zero 414 fmv.s.x f16, zero 415 fmv.s.x f17, zero 416 fmv.s.x f18, zero 417 fmv.s.x f19, zero 418 fmv.s.x f20, zero 419 fmv.s.x f21, zero 420 fmv.s.x f22, zero 421 fmv.s.x f23, zero 422 fmv.s.x f24, zero 423 fmv.s.x f25, zero 424 fmv.s.x f26, zero 425 fmv.s.x f27, zero 426 fmv.s.x f28, zero 427 fmv.s.x f29, zero 428 fmv.s.x f30, zero 429 fmv.s.x f31, zero 430 csrw fcsr, 0 431 /* note that the caller must clear SR_FS */ 432.Lreset_regs_done_fpu: 433#endif /* CONFIG_FPU */ 434 435#ifdef CONFIG_RISCV_ISA_V 436 csrr t0, CSR_MISA 437 li t1, COMPAT_HWCAP_ISA_V 438 and t0, t0, t1 439 beqz t0, .Lreset_regs_done_vector 440 441 /* 442 * Clear vector registers and reset vcsr 443 * VLMAX has a defined value, VLEN is a constant, 444 * and this form of vsetvli is defined to set vl to VLMAX. 445 */ 446 li t1, SR_VS 447 csrs CSR_STATUS, t1 448 csrs CSR_VCSR, x0 449 vsetvli t1, x0, e8, m8, ta, ma 450 vmv.v.i v0, 0 451 vmv.v.i v8, 0 452 vmv.v.i v16, 0 453 vmv.v.i v24, 0 454 /* note that the caller must clear SR_VS */ 455.Lreset_regs_done_vector: 456#endif /* CONFIG_RISCV_ISA_V */ 457 ret 458END(reset_regs) 459#endif /* CONFIG_RISCV_M_MODE */ 460