1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#include <asm/asm-offsets.h> 7#include <asm/asm.h> 8#include <linux/init.h> 9#include <linux/linkage.h> 10#include <asm/thread_info.h> 11#include <asm/page.h> 12#include <asm/pgtable.h> 13#include <asm/csr.h> 14#include <asm/cpu_ops_sbi.h> 15#include <asm/hwcap.h> 16#include <asm/image.h> 17#include "efi-header.S" 18 19#ifdef CONFIG_XIP_KERNEL 20.macro XIP_FIXUP_OFFSET reg 21 REG_L t0, _xip_fixup 22 add \reg, \reg, t0 23.endm 24.macro XIP_FIXUP_FLASH_OFFSET reg 25 la t1, __data_loc 26 li t0, XIP_OFFSET_MASK 27 and t1, t1, t0 28 li t1, XIP_OFFSET 29 sub t0, t0, t1 30 sub \reg, \reg, t0 31.endm 32_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET 33#else 34.macro XIP_FIXUP_OFFSET reg 35.endm 36.macro XIP_FIXUP_FLASH_OFFSET reg 37.endm 38#endif /* CONFIG_XIP_KERNEL */ 39 40__HEAD 41ENTRY(_start) 42 /* 43 * Image header expected by Linux boot-loaders. The image header data 44 * structure is described in asm/image.h. 45 * Do not modify it without modifying the structure and all bootloaders 46 * that expects this header format!! 47 */ 48#ifdef CONFIG_EFI 49 /* 50 * This instruction decodes to "MZ" ASCII required by UEFI. 51 */ 52 c.li s4,-13 53 j _start_kernel 54#else 55 /* jump to start kernel */ 56 j _start_kernel 57 /* reserved */ 58 .word 0 59#endif 60 .balign 8 61#ifdef CONFIG_RISCV_M_MODE 62 /* Image load offset (0MB) from start of RAM for M-mode */ 63 .dword 0 64#else 65#if __riscv_xlen == 64 66 /* Image load offset(2MB) from start of RAM */ 67 .dword 0x200000 68#else 69 /* Image load offset(4MB) from start of RAM */ 70 .dword 0x400000 71#endif 72#endif 73 /* Effective size of kernel image */ 74 .dword _end - _start 75 .dword __HEAD_FLAGS 76 .word RISCV_HEADER_VERSION 77 .word 0 78 .dword 0 79 .ascii RISCV_IMAGE_MAGIC 80 .balign 4 81 .ascii RISCV_IMAGE_MAGIC2 82#ifdef CONFIG_EFI 83 .word pe_head_start - _start 84pe_head_start: 85 86 __EFI_PE_HEADER 87#else 88 .word 0 89#endif 90 91.align 2 92#ifdef CONFIG_MMU 93relocate: 94 /* Relocate return address */ 95 la a1, kernel_map 96 XIP_FIXUP_OFFSET a1 97 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1) 98 la a2, _start 99 sub a1, a1, a2 100 add ra, ra, a1 101 102 /* Point stvec to virtual address of intruction after satp write */ 103 la a2, 1f 104 add a2, a2, a1 105 csrw CSR_TVEC, a2 106 107 /* Compute satp for kernel page tables, but don't load it yet */ 108 srl a2, a0, PAGE_SHIFT 109 la a1, satp_mode 110 REG_L a1, 0(a1) 111 or a2, a2, a1 112 113 /* 114 * Load trampoline page directory, which will cause us to trap to 115 * stvec if VA != PA, or simply fall through if VA == PA. We need a 116 * full fence here because setup_vm() just wrote these PTEs and we need 117 * to ensure the new translations are in use. 118 */ 119 la a0, trampoline_pg_dir 120 XIP_FIXUP_OFFSET a0 121 srl a0, a0, PAGE_SHIFT 122 or a0, a0, a1 123 sfence.vma 124 csrw CSR_SATP, a0 125.align 2 1261: 127 /* Set trap vector to spin forever to help debug */ 128 la a0, .Lsecondary_park 129 csrw CSR_TVEC, a0 130 131 /* Reload the global pointer */ 132.option push 133.option norelax 134 la gp, __global_pointer$ 135.option pop 136 137 /* 138 * Switch to kernel page tables. A full fence is necessary in order to 139 * avoid using the trampoline translations, which are only correct for 140 * the first superpage. Fetching the fence is guaranteed to work 141 * because that first superpage is translated the same way. 142 */ 143 csrw CSR_SATP, a2 144 sfence.vma 145 146 ret 147#endif /* CONFIG_MMU */ 148#ifdef CONFIG_SMP 149 .global secondary_start_sbi 150secondary_start_sbi: 151 /* Mask all interrupts */ 152 csrw CSR_IE, zero 153 csrw CSR_IP, zero 154 155 /* Load the global pointer */ 156 .option push 157 .option norelax 158 la gp, __global_pointer$ 159 .option pop 160 161 /* 162 * Disable FPU to detect illegal usage of 163 * floating point in kernel space 164 */ 165 li t0, SR_FS 166 csrc CSR_STATUS, t0 167 168 /* Set trap vector to spin forever to help debug */ 169 la a3, .Lsecondary_park 170 csrw CSR_TVEC, a3 171 172 /* a0 contains the hartid & a1 contains boot data */ 173 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET 174 XIP_FIXUP_OFFSET a2 175 add a2, a2, a1 176 REG_L tp, (a2) 177 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET 178 XIP_FIXUP_OFFSET a3 179 add a3, a3, a1 180 REG_L sp, (a3) 181 182.Lsecondary_start_common: 183 184#ifdef CONFIG_MMU 185 /* Enable virtual memory and relocate to virtual address */ 186 la a0, swapper_pg_dir 187 XIP_FIXUP_OFFSET a0 188 call relocate 189#endif 190 call setup_trap_vector 191 tail smp_callin 192#endif /* CONFIG_SMP */ 193 194.align 2 195setup_trap_vector: 196 /* Set trap vector to exception handler */ 197 la a0, handle_exception 198 csrw CSR_TVEC, a0 199 200 /* 201 * Set sup0 scratch register to 0, indicating to exception vector that 202 * we are presently executing in kernel. 203 */ 204 csrw CSR_SCRATCH, zero 205 ret 206 207.align 2 208.Lsecondary_park: 209 /* We lack SMP support or have too many harts, so park this hart */ 210 wfi 211 j .Lsecondary_park 212 213END(_start) 214 215ENTRY(_start_kernel) 216 /* Mask all interrupts */ 217 csrw CSR_IE, zero 218 csrw CSR_IP, zero 219 220#ifdef CONFIG_RISCV_M_MODE 221 /* flush the instruction cache */ 222 fence.i 223 224 /* Reset all registers except ra, a0, a1 */ 225 call reset_regs 226 227 /* 228 * Setup a PMP to permit access to all of memory. Some machines may 229 * not implement PMPs, so we set up a quick trap handler to just skip 230 * touching the PMPs on any trap. 231 */ 232 la a0, pmp_done 233 csrw CSR_TVEC, a0 234 235 li a0, -1 236 csrw CSR_PMPADDR0, a0 237 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) 238 csrw CSR_PMPCFG0, a0 239.align 2 240pmp_done: 241 242 /* 243 * The hartid in a0 is expected later on, and we have no firmware 244 * to hand it to us. 245 */ 246 csrr a0, CSR_MHARTID 247#endif /* CONFIG_RISCV_M_MODE */ 248 249 /* Load the global pointer */ 250.option push 251.option norelax 252 la gp, __global_pointer$ 253.option pop 254 255 /* 256 * Disable FPU to detect illegal usage of 257 * floating point in kernel space 258 */ 259 li t0, SR_FS 260 csrc CSR_STATUS, t0 261 262#ifdef CONFIG_RISCV_BOOT_SPINWAIT 263 li t0, CONFIG_NR_CPUS 264 blt a0, t0, .Lgood_cores 265 tail .Lsecondary_park 266.Lgood_cores: 267 268 /* The lottery system is only required for spinwait booting method */ 269#ifndef CONFIG_XIP_KERNEL 270 /* Pick one hart to run the main boot sequence */ 271 la a3, hart_lottery 272 li a2, 1 273 amoadd.w a3, a2, (a3) 274 bnez a3, .Lsecondary_start 275 276#else 277 /* hart_lottery in flash contains a magic number */ 278 la a3, hart_lottery 279 mv a2, a3 280 XIP_FIXUP_OFFSET a2 281 XIP_FIXUP_FLASH_OFFSET a3 282 lw t1, (a3) 283 amoswap.w t0, t1, (a2) 284 /* first time here if hart_lottery in RAM is not set */ 285 beq t0, t1, .Lsecondary_start 286 287#endif /* CONFIG_XIP */ 288#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ 289 290#ifdef CONFIG_XIP_KERNEL 291 la sp, _end + THREAD_SIZE 292 XIP_FIXUP_OFFSET sp 293 mv s0, a0 294 call __copy_data 295 296 /* Restore a0 copy */ 297 mv a0, s0 298#endif 299 300#ifndef CONFIG_XIP_KERNEL 301 /* Clear BSS for flat non-ELF images */ 302 la a3, __bss_start 303 la a4, __bss_stop 304 ble a4, a3, clear_bss_done 305clear_bss: 306 REG_S zero, (a3) 307 add a3, a3, RISCV_SZPTR 308 blt a3, a4, clear_bss 309clear_bss_done: 310#endif 311 /* Save hart ID and DTB physical address */ 312 mv s0, a0 313 mv s1, a1 314 315 la a2, boot_cpu_hartid 316 XIP_FIXUP_OFFSET a2 317 REG_S a0, (a2) 318 319 /* Initialize page tables and relocate to virtual addresses */ 320 la sp, init_thread_union + THREAD_SIZE 321 XIP_FIXUP_OFFSET sp 322#ifdef CONFIG_BUILTIN_DTB 323 la a0, __dtb_start 324 XIP_FIXUP_OFFSET a0 325#else 326 mv a0, s1 327#endif /* CONFIG_BUILTIN_DTB */ 328 call setup_vm 329#ifdef CONFIG_MMU 330 la a0, early_pg_dir 331 XIP_FIXUP_OFFSET a0 332 call relocate 333#endif /* CONFIG_MMU */ 334 335 call setup_trap_vector 336 /* Restore C environment */ 337 la tp, init_task 338 la sp, init_thread_union + THREAD_SIZE 339 340#ifdef CONFIG_KASAN 341 call kasan_early_init 342#endif 343 /* Start the kernel */ 344 call soc_early_init 345 tail start_kernel 346 347#if CONFIG_RISCV_BOOT_SPINWAIT 348.Lsecondary_start: 349 /* Set trap vector to spin forever to help debug */ 350 la a3, .Lsecondary_park 351 csrw CSR_TVEC, a3 352 353 slli a3, a0, LGREG 354 la a1, __cpu_spinwait_stack_pointer 355 XIP_FIXUP_OFFSET a1 356 la a2, __cpu_spinwait_task_pointer 357 XIP_FIXUP_OFFSET a2 358 add a1, a3, a1 359 add a2, a3, a2 360 361 /* 362 * This hart didn't win the lottery, so we wait for the winning hart to 363 * get far enough along the boot process that it should continue. 364 */ 365.Lwait_for_cpu_up: 366 /* FIXME: We should WFI to save some energy here. */ 367 REG_L sp, (a1) 368 REG_L tp, (a2) 369 beqz sp, .Lwait_for_cpu_up 370 beqz tp, .Lwait_for_cpu_up 371 fence 372 373 tail .Lsecondary_start_common 374#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ 375 376END(_start_kernel) 377 378#ifdef CONFIG_RISCV_M_MODE 379ENTRY(reset_regs) 380 li sp, 0 381 li gp, 0 382 li tp, 0 383 li t0, 0 384 li t1, 0 385 li t2, 0 386 li s0, 0 387 li s1, 0 388 li a2, 0 389 li a3, 0 390 li a4, 0 391 li a5, 0 392 li a6, 0 393 li a7, 0 394 li s2, 0 395 li s3, 0 396 li s4, 0 397 li s5, 0 398 li s6, 0 399 li s7, 0 400 li s8, 0 401 li s9, 0 402 li s10, 0 403 li s11, 0 404 li t3, 0 405 li t4, 0 406 li t5, 0 407 li t6, 0 408 csrw CSR_SCRATCH, 0 409 410#ifdef CONFIG_FPU 411 csrr t0, CSR_MISA 412 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) 413 beqz t0, .Lreset_regs_done 414 415 li t1, SR_FS 416 csrs CSR_STATUS, t1 417 fmv.s.x f0, zero 418 fmv.s.x f1, zero 419 fmv.s.x f2, zero 420 fmv.s.x f3, zero 421 fmv.s.x f4, zero 422 fmv.s.x f5, zero 423 fmv.s.x f6, zero 424 fmv.s.x f7, zero 425 fmv.s.x f8, zero 426 fmv.s.x f9, zero 427 fmv.s.x f10, zero 428 fmv.s.x f11, zero 429 fmv.s.x f12, zero 430 fmv.s.x f13, zero 431 fmv.s.x f14, zero 432 fmv.s.x f15, zero 433 fmv.s.x f16, zero 434 fmv.s.x f17, zero 435 fmv.s.x f18, zero 436 fmv.s.x f19, zero 437 fmv.s.x f20, zero 438 fmv.s.x f21, zero 439 fmv.s.x f22, zero 440 fmv.s.x f23, zero 441 fmv.s.x f24, zero 442 fmv.s.x f25, zero 443 fmv.s.x f26, zero 444 fmv.s.x f27, zero 445 fmv.s.x f28, zero 446 fmv.s.x f29, zero 447 fmv.s.x f30, zero 448 fmv.s.x f31, zero 449 csrw fcsr, 0 450 /* note that the caller must clear SR_FS */ 451#endif /* CONFIG_FPU */ 452.Lreset_regs_done: 453 ret 454END(reset_regs) 455#endif /* CONFIG_RISCV_M_MODE */ 456