1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#include <asm/asm-offsets.h> 7#include <asm/asm.h> 8#include <linux/init.h> 9#include <linux/linkage.h> 10#include <asm/thread_info.h> 11#include <asm/page.h> 12#include <asm/pgtable.h> 13#include <asm/csr.h> 14#include <asm/cpu_ops_sbi.h> 15#include <asm/hwcap.h> 16#include <asm/image.h> 17#include "efi-header.S" 18 19#ifdef CONFIG_XIP_KERNEL 20.macro XIP_FIXUP_OFFSET reg 21 REG_L t0, _xip_fixup 22 add \reg, \reg, t0 23.endm 24.macro XIP_FIXUP_FLASH_OFFSET reg 25 la t1, __data_loc 26 li t0, XIP_OFFSET_MASK 27 and t1, t1, t0 28 li t1, XIP_OFFSET 29 sub t0, t0, t1 30 sub \reg, \reg, t0 31.endm 32_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET 33#else 34.macro XIP_FIXUP_OFFSET reg 35.endm 36.macro XIP_FIXUP_FLASH_OFFSET reg 37.endm 38#endif /* CONFIG_XIP_KERNEL */ 39 40__HEAD 41ENTRY(_start) 42 /* 43 * Image header expected by Linux boot-loaders. The image header data 44 * structure is described in asm/image.h. 45 * Do not modify it without modifying the structure and all bootloaders 46 * that expects this header format!! 47 */ 48#ifdef CONFIG_EFI 49 /* 50 * This instruction decodes to "MZ" ASCII required by UEFI. 51 */ 52 c.li s4,-13 53 j _start_kernel 54#else 55 /* jump to start kernel */ 56 j _start_kernel 57 /* reserved */ 58 .word 0 59#endif 60 .balign 8 61#ifdef CONFIG_RISCV_M_MODE 62 /* Image load offset (0MB) from start of RAM for M-mode */ 63 .dword 0 64#else 65#if __riscv_xlen == 64 66 /* Image load offset(2MB) from start of RAM */ 67 .dword 0x200000 68#else 69 /* Image load offset(4MB) from start of RAM */ 70 .dword 0x400000 71#endif 72#endif 73 /* Effective size of kernel image */ 74 .dword _end - _start 75 .dword __HEAD_FLAGS 76 .word RISCV_HEADER_VERSION 77 .word 0 78 .dword 0 79 .ascii RISCV_IMAGE_MAGIC 80 .balign 4 81 .ascii RISCV_IMAGE_MAGIC2 82#ifdef CONFIG_EFI 83 .word pe_head_start - _start 84pe_head_start: 85 86 __EFI_PE_HEADER 87#else 88 .word 0 89#endif 90 91.align 2 92#ifdef CONFIG_MMU 93relocate: 94 /* Relocate return address */ 95 la a1, kernel_map 96 XIP_FIXUP_OFFSET a1 97 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1) 98 la a2, _start 99 sub a1, a1, a2 100 add ra, ra, a1 101 102 /* Point stvec to virtual address of intruction after satp write */ 103 la a2, 1f 104 add a2, a2, a1 105 csrw CSR_TVEC, a2 106 107 /* Compute satp for kernel page tables, but don't load it yet */ 108 srl a2, a0, PAGE_SHIFT 109 la a1, satp_mode 110 REG_L a1, 0(a1) 111 or a2, a2, a1 112 113 /* 114 * Load trampoline page directory, which will cause us to trap to 115 * stvec if VA != PA, or simply fall through if VA == PA. We need a 116 * full fence here because setup_vm() just wrote these PTEs and we need 117 * to ensure the new translations are in use. 118 */ 119 la a0, trampoline_pg_dir 120 XIP_FIXUP_OFFSET a0 121 srl a0, a0, PAGE_SHIFT 122 or a0, a0, a1 123 sfence.vma 124 csrw CSR_SATP, a0 125.align 2 1261: 127 /* Set trap vector to spin forever to help debug */ 128 la a0, .Lsecondary_park 129 csrw CSR_TVEC, a0 130 131 /* Reload the global pointer */ 132.option push 133.option norelax 134 la gp, __global_pointer$ 135.option pop 136 137 /* 138 * Switch to kernel page tables. A full fence is necessary in order to 139 * avoid using the trampoline translations, which are only correct for 140 * the first superpage. Fetching the fence is guaranteed to work 141 * because that first superpage is translated the same way. 142 */ 143 csrw CSR_SATP, a2 144 sfence.vma 145 146 ret 147#endif /* CONFIG_MMU */ 148#ifdef CONFIG_SMP 149 .global secondary_start_sbi 150secondary_start_sbi: 151 /* Mask all interrupts */ 152 csrw CSR_IE, zero 153 csrw CSR_IP, zero 154 155 /* Load the global pointer */ 156 .option push 157 .option norelax 158 la gp, __global_pointer$ 159 .option pop 160 161 /* 162 * Disable FPU to detect illegal usage of 163 * floating point in kernel space 164 */ 165 li t0, SR_FS 166 csrc CSR_STATUS, t0 167 168 /* Set trap vector to spin forever to help debug */ 169 la a3, .Lsecondary_park 170 csrw CSR_TVEC, a3 171 172 /* a0 contains the hartid & a1 contains boot data */ 173 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET 174 XIP_FIXUP_OFFSET a2 175 add a2, a2, a1 176 REG_L tp, (a2) 177 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET 178 XIP_FIXUP_OFFSET a3 179 add a3, a3, a1 180 REG_L sp, (a3) 181 182.Lsecondary_start_common: 183 184#ifdef CONFIG_MMU 185 /* Enable virtual memory and relocate to virtual address */ 186 la a0, swapper_pg_dir 187 XIP_FIXUP_OFFSET a0 188 call relocate 189#endif 190 call setup_trap_vector 191 tail smp_callin 192#endif /* CONFIG_SMP */ 193 194.align 2 195setup_trap_vector: 196 /* Set trap vector to exception handler */ 197 la a0, handle_exception 198 csrw CSR_TVEC, a0 199 200 /* 201 * Set sup0 scratch register to 0, indicating to exception vector that 202 * we are presently executing in kernel. 203 */ 204 csrw CSR_SCRATCH, zero 205 ret 206 207.align 2 208.Lsecondary_park: 209 /* We lack SMP support or have too many harts, so park this hart */ 210 wfi 211 j .Lsecondary_park 212 213END(_start) 214 215ENTRY(_start_kernel) 216 /* Mask all interrupts */ 217 csrw CSR_IE, zero 218 csrw CSR_IP, zero 219 220#ifdef CONFIG_RISCV_M_MODE 221 /* flush the instruction cache */ 222 fence.i 223 224 /* Reset all registers except ra, a0, a1 */ 225 call reset_regs 226 227 /* 228 * Setup a PMP to permit access to all of memory. Some machines may 229 * not implement PMPs, so we set up a quick trap handler to just skip 230 * touching the PMPs on any trap. 231 */ 232 la a0, pmp_done 233 csrw CSR_TVEC, a0 234 235 li a0, -1 236 csrw CSR_PMPADDR0, a0 237 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) 238 csrw CSR_PMPCFG0, a0 239.align 2 240pmp_done: 241 242 /* 243 * The hartid in a0 is expected later on, and we have no firmware 244 * to hand it to us. 245 */ 246 csrr a0, CSR_MHARTID 247#endif /* CONFIG_RISCV_M_MODE */ 248 249 /* Load the global pointer */ 250.option push 251.option norelax 252 la gp, __global_pointer$ 253.option pop 254 255 /* 256 * Disable FPU to detect illegal usage of 257 * floating point in kernel space 258 */ 259 li t0, SR_FS 260 csrc CSR_STATUS, t0 261 262#ifdef CONFIG_SMP 263 li t0, CONFIG_NR_CPUS 264 blt a0, t0, .Lgood_cores 265 tail .Lsecondary_park 266.Lgood_cores: 267#endif 268 269#ifndef CONFIG_XIP_KERNEL 270 /* Pick one hart to run the main boot sequence */ 271 la a3, hart_lottery 272 li a2, 1 273 amoadd.w a3, a2, (a3) 274 bnez a3, .Lsecondary_start 275 276#else 277 /* hart_lottery in flash contains a magic number */ 278 la a3, hart_lottery 279 mv a2, a3 280 XIP_FIXUP_OFFSET a2 281 XIP_FIXUP_FLASH_OFFSET a3 282 lw t1, (a3) 283 amoswap.w t0, t1, (a2) 284 /* first time here if hart_lottery in RAM is not set */ 285 beq t0, t1, .Lsecondary_start 286 287 la sp, _end + THREAD_SIZE 288 XIP_FIXUP_OFFSET sp 289 mv s0, a0 290 call __copy_data 291 292 /* Restore a0 copy */ 293 mv a0, s0 294#endif 295 296#ifndef CONFIG_XIP_KERNEL 297 /* Clear BSS for flat non-ELF images */ 298 la a3, __bss_start 299 la a4, __bss_stop 300 ble a4, a3, clear_bss_done 301clear_bss: 302 REG_S zero, (a3) 303 add a3, a3, RISCV_SZPTR 304 blt a3, a4, clear_bss 305clear_bss_done: 306#endif 307 /* Save hart ID and DTB physical address */ 308 mv s0, a0 309 mv s1, a1 310 311 la a2, boot_cpu_hartid 312 XIP_FIXUP_OFFSET a2 313 REG_S a0, (a2) 314 315 /* Initialize page tables and relocate to virtual addresses */ 316 la sp, init_thread_union + THREAD_SIZE 317 XIP_FIXUP_OFFSET sp 318#ifdef CONFIG_BUILTIN_DTB 319 la a0, __dtb_start 320 XIP_FIXUP_OFFSET a0 321#else 322 mv a0, s1 323#endif /* CONFIG_BUILTIN_DTB */ 324 call setup_vm 325#ifdef CONFIG_MMU 326 la a0, early_pg_dir 327 XIP_FIXUP_OFFSET a0 328 call relocate 329#endif /* CONFIG_MMU */ 330 331 call setup_trap_vector 332 /* Restore C environment */ 333 la tp, init_task 334 la sp, init_thread_union + THREAD_SIZE 335 336#ifdef CONFIG_KASAN 337 call kasan_early_init 338#endif 339 /* Start the kernel */ 340 call soc_early_init 341 tail start_kernel 342 343.Lsecondary_start: 344#ifdef CONFIG_SMP 345 /* Set trap vector to spin forever to help debug */ 346 la a3, .Lsecondary_park 347 csrw CSR_TVEC, a3 348 349 slli a3, a0, LGREG 350 la a1, __cpu_spinwait_stack_pointer 351 XIP_FIXUP_OFFSET a1 352 la a2, __cpu_spinwait_task_pointer 353 XIP_FIXUP_OFFSET a2 354 add a1, a3, a1 355 add a2, a3, a2 356 357 /* 358 * This hart didn't win the lottery, so we wait for the winning hart to 359 * get far enough along the boot process that it should continue. 360 */ 361.Lwait_for_cpu_up: 362 /* FIXME: We should WFI to save some energy here. */ 363 REG_L sp, (a1) 364 REG_L tp, (a2) 365 beqz sp, .Lwait_for_cpu_up 366 beqz tp, .Lwait_for_cpu_up 367 fence 368 369 tail .Lsecondary_start_common 370#endif 371 372END(_start_kernel) 373 374#ifdef CONFIG_RISCV_M_MODE 375ENTRY(reset_regs) 376 li sp, 0 377 li gp, 0 378 li tp, 0 379 li t0, 0 380 li t1, 0 381 li t2, 0 382 li s0, 0 383 li s1, 0 384 li a2, 0 385 li a3, 0 386 li a4, 0 387 li a5, 0 388 li a6, 0 389 li a7, 0 390 li s2, 0 391 li s3, 0 392 li s4, 0 393 li s5, 0 394 li s6, 0 395 li s7, 0 396 li s8, 0 397 li s9, 0 398 li s10, 0 399 li s11, 0 400 li t3, 0 401 li t4, 0 402 li t5, 0 403 li t6, 0 404 csrw CSR_SCRATCH, 0 405 406#ifdef CONFIG_FPU 407 csrr t0, CSR_MISA 408 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) 409 beqz t0, .Lreset_regs_done 410 411 li t1, SR_FS 412 csrs CSR_STATUS, t1 413 fmv.s.x f0, zero 414 fmv.s.x f1, zero 415 fmv.s.x f2, zero 416 fmv.s.x f3, zero 417 fmv.s.x f4, zero 418 fmv.s.x f5, zero 419 fmv.s.x f6, zero 420 fmv.s.x f7, zero 421 fmv.s.x f8, zero 422 fmv.s.x f9, zero 423 fmv.s.x f10, zero 424 fmv.s.x f11, zero 425 fmv.s.x f12, zero 426 fmv.s.x f13, zero 427 fmv.s.x f14, zero 428 fmv.s.x f15, zero 429 fmv.s.x f16, zero 430 fmv.s.x f17, zero 431 fmv.s.x f18, zero 432 fmv.s.x f19, zero 433 fmv.s.x f20, zero 434 fmv.s.x f21, zero 435 fmv.s.x f22, zero 436 fmv.s.x f23, zero 437 fmv.s.x f24, zero 438 fmv.s.x f25, zero 439 fmv.s.x f26, zero 440 fmv.s.x f27, zero 441 fmv.s.x f28, zero 442 fmv.s.x f29, zero 443 fmv.s.x f30, zero 444 fmv.s.x f31, zero 445 csrw fcsr, 0 446 /* note that the caller must clear SR_FS */ 447#endif /* CONFIG_FPU */ 448.Lreset_regs_done: 449 ret 450END(reset_regs) 451#endif /* CONFIG_RISCV_M_MODE */ 452