1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#include <asm/asm-offsets.h> 7#include <asm/asm.h> 8#include <linux/init.h> 9#include <linux/linkage.h> 10#include <asm/thread_info.h> 11#include <asm/page.h> 12#include <asm/pgtable.h> 13#include <asm/csr.h> 14#include <asm/hwcap.h> 15#include <asm/image.h> 16#include "efi-header.S" 17 18#ifdef CONFIG_XIP_KERNEL 19.macro XIP_FIXUP_OFFSET reg 20 REG_L t0, _xip_fixup 21 add \reg, \reg, t0 22.endm 23.macro XIP_FIXUP_FLASH_OFFSET reg 24 la t1, __data_loc 25 li t0, XIP_OFFSET_MASK 26 and t1, t1, t0 27 li t1, XIP_OFFSET 28 sub t0, t0, t1 29 sub \reg, \reg, t0 30.endm 31_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET 32#else 33.macro XIP_FIXUP_OFFSET reg 34.endm 35.macro XIP_FIXUP_FLASH_OFFSET reg 36.endm 37#endif /* CONFIG_XIP_KERNEL */ 38 39__HEAD 40ENTRY(_start) 41 /* 42 * Image header expected by Linux boot-loaders. The image header data 43 * structure is described in asm/image.h. 44 * Do not modify it without modifying the structure and all bootloaders 45 * that expects this header format!! 46 */ 47#ifdef CONFIG_EFI 48 /* 49 * This instruction decodes to "MZ" ASCII required by UEFI. 50 */ 51 c.li s4,-13 52 j _start_kernel 53#else 54 /* jump to start kernel */ 55 j _start_kernel 56 /* reserved */ 57 .word 0 58#endif 59 .balign 8 60#ifdef CONFIG_RISCV_M_MODE 61 /* Image load offset (0MB) from start of RAM for M-mode */ 62 .dword 0 63#else 64#if __riscv_xlen == 64 65 /* Image load offset(2MB) from start of RAM */ 66 .dword 0x200000 67#else 68 /* Image load offset(4MB) from start of RAM */ 69 .dword 0x400000 70#endif 71#endif 72 /* Effective size of kernel image */ 73 .dword _end - _start 74 .dword __HEAD_FLAGS 75 .word RISCV_HEADER_VERSION 76 .word 0 77 .dword 0 78 .ascii RISCV_IMAGE_MAGIC 79 .balign 4 80 .ascii RISCV_IMAGE_MAGIC2 81#ifdef CONFIG_EFI 82 .word pe_head_start - _start 83pe_head_start: 84 85 __EFI_PE_HEADER 86#else 87 .word 0 88#endif 89 90.align 2 91#ifdef CONFIG_MMU 92relocate: 93 /* Relocate return address */ 94 la a1, kernel_map 95 XIP_FIXUP_OFFSET a1 96 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1) 97 la a2, _start 98 sub a1, a1, a2 99 add ra, ra, a1 100 101 /* Point stvec to virtual address of intruction after satp write */ 102 la a2, 1f 103 add a2, a2, a1 104 csrw CSR_TVEC, a2 105 106 /* Compute satp for kernel page tables, but don't load it yet */ 107 srl a2, a0, PAGE_SHIFT 108 li a1, SATP_MODE 109 or a2, a2, a1 110 111 /* 112 * Load trampoline page directory, which will cause us to trap to 113 * stvec if VA != PA, or simply fall through if VA == PA. We need a 114 * full fence here because setup_vm() just wrote these PTEs and we need 115 * to ensure the new translations are in use. 116 */ 117 la a0, trampoline_pg_dir 118 XIP_FIXUP_OFFSET a0 119 srl a0, a0, PAGE_SHIFT 120 or a0, a0, a1 121 sfence.vma 122 csrw CSR_SATP, a0 123.align 2 1241: 125 /* Set trap vector to spin forever to help debug */ 126 la a0, .Lsecondary_park 127 csrw CSR_TVEC, a0 128 129 /* Reload the global pointer */ 130.option push 131.option norelax 132 la gp, __global_pointer$ 133.option pop 134 135 /* 136 * Switch to kernel page tables. A full fence is necessary in order to 137 * avoid using the trampoline translations, which are only correct for 138 * the first superpage. Fetching the fence is guarnteed to work 139 * because that first superpage is translated the same way. 140 */ 141 csrw CSR_SATP, a2 142 sfence.vma 143 144 ret 145#endif /* CONFIG_MMU */ 146#ifdef CONFIG_SMP 147 .global secondary_start_sbi 148secondary_start_sbi: 149 /* Mask all interrupts */ 150 csrw CSR_IE, zero 151 csrw CSR_IP, zero 152 153 /* Load the global pointer */ 154 .option push 155 .option norelax 156 la gp, __global_pointer$ 157 .option pop 158 159 /* 160 * Disable FPU to detect illegal usage of 161 * floating point in kernel space 162 */ 163 li t0, SR_FS 164 csrc CSR_STATUS, t0 165 166 /* Set trap vector to spin forever to help debug */ 167 la a3, .Lsecondary_park 168 csrw CSR_TVEC, a3 169 170 slli a3, a0, LGREG 171 la a4, __cpu_up_stack_pointer 172 XIP_FIXUP_OFFSET a4 173 la a5, __cpu_up_task_pointer 174 XIP_FIXUP_OFFSET a5 175 add a4, a3, a4 176 add a5, a3, a5 177 REG_L sp, (a4) 178 REG_L tp, (a5) 179 180 .global secondary_start_common 181secondary_start_common: 182 183#ifdef CONFIG_MMU 184 /* Enable virtual memory and relocate to virtual address */ 185 la a0, swapper_pg_dir 186 XIP_FIXUP_OFFSET a0 187 call relocate 188#endif 189 call setup_trap_vector 190 tail smp_callin 191#endif /* CONFIG_SMP */ 192 193.align 2 194setup_trap_vector: 195 /* Set trap vector to exception handler */ 196 la a0, handle_exception 197 csrw CSR_TVEC, a0 198 199 /* 200 * Set sup0 scratch register to 0, indicating to exception vector that 201 * we are presently executing in kernel. 202 */ 203 csrw CSR_SCRATCH, zero 204 ret 205 206.align 2 207.Lsecondary_park: 208 /* We lack SMP support or have too many harts, so park this hart */ 209 wfi 210 j .Lsecondary_park 211 212END(_start) 213 214ENTRY(_start_kernel) 215 /* Mask all interrupts */ 216 csrw CSR_IE, zero 217 csrw CSR_IP, zero 218 219#ifdef CONFIG_RISCV_M_MODE 220 /* flush the instruction cache */ 221 fence.i 222 223 /* Reset all registers except ra, a0, a1 */ 224 call reset_regs 225 226 /* 227 * Setup a PMP to permit access to all of memory. Some machines may 228 * not implement PMPs, so we set up a quick trap handler to just skip 229 * touching the PMPs on any trap. 230 */ 231 la a0, pmp_done 232 csrw CSR_TVEC, a0 233 234 li a0, -1 235 csrw CSR_PMPADDR0, a0 236 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) 237 csrw CSR_PMPCFG0, a0 238.align 2 239pmp_done: 240 241 /* 242 * The hartid in a0 is expected later on, and we have no firmware 243 * to hand it to us. 244 */ 245 csrr a0, CSR_MHARTID 246#endif /* CONFIG_RISCV_M_MODE */ 247 248 /* Load the global pointer */ 249.option push 250.option norelax 251 la gp, __global_pointer$ 252.option pop 253 254 /* 255 * Disable FPU to detect illegal usage of 256 * floating point in kernel space 257 */ 258 li t0, SR_FS 259 csrc CSR_STATUS, t0 260 261#ifdef CONFIG_SMP 262 li t0, CONFIG_NR_CPUS 263 blt a0, t0, .Lgood_cores 264 tail .Lsecondary_park 265.Lgood_cores: 266#endif 267 268#ifndef CONFIG_XIP_KERNEL 269 /* Pick one hart to run the main boot sequence */ 270 la a3, hart_lottery 271 li a2, 1 272 amoadd.w a3, a2, (a3) 273 bnez a3, .Lsecondary_start 274 275#else 276 /* hart_lottery in flash contains a magic number */ 277 la a3, hart_lottery 278 mv a2, a3 279 XIP_FIXUP_OFFSET a2 280 XIP_FIXUP_FLASH_OFFSET a3 281 lw t1, (a3) 282 amoswap.w t0, t1, (a2) 283 /* first time here if hart_lottery in RAM is not set */ 284 beq t0, t1, .Lsecondary_start 285 286 la sp, _end + THREAD_SIZE 287 XIP_FIXUP_OFFSET sp 288 mv s0, a0 289 call __copy_data 290 291 /* Restore a0 copy */ 292 mv a0, s0 293#endif 294 295#ifndef CONFIG_XIP_KERNEL 296 /* Clear BSS for flat non-ELF images */ 297 la a3, __bss_start 298 la a4, __bss_stop 299 ble a4, a3, clear_bss_done 300clear_bss: 301 REG_S zero, (a3) 302 add a3, a3, RISCV_SZPTR 303 blt a3, a4, clear_bss 304clear_bss_done: 305#endif 306 /* Save hart ID and DTB physical address */ 307 mv s0, a0 308 mv s1, a1 309 310 la a2, boot_cpu_hartid 311 XIP_FIXUP_OFFSET a2 312 REG_S a0, (a2) 313 314 /* Initialize page tables and relocate to virtual addresses */ 315 la sp, init_thread_union + THREAD_SIZE 316 XIP_FIXUP_OFFSET sp 317#ifdef CONFIG_BUILTIN_DTB 318 la a0, __dtb_start 319 XIP_FIXUP_OFFSET a0 320#else 321 mv a0, s1 322#endif /* CONFIG_BUILTIN_DTB */ 323 call setup_vm 324#ifdef CONFIG_MMU 325 la a0, early_pg_dir 326 XIP_FIXUP_OFFSET a0 327 call relocate 328#endif /* CONFIG_MMU */ 329 330 call setup_trap_vector 331 /* Restore C environment */ 332 la tp, init_task 333 la sp, init_thread_union + THREAD_SIZE 334 335#ifdef CONFIG_KASAN 336 call kasan_early_init 337#endif 338 /* Start the kernel */ 339 call soc_early_init 340 tail start_kernel 341 342.Lsecondary_start: 343#ifdef CONFIG_SMP 344 /* Set trap vector to spin forever to help debug */ 345 la a3, .Lsecondary_park 346 csrw CSR_TVEC, a3 347 348 slli a3, a0, LGREG 349 la a1, __cpu_up_stack_pointer 350 XIP_FIXUP_OFFSET a1 351 la a2, __cpu_up_task_pointer 352 XIP_FIXUP_OFFSET a2 353 add a1, a3, a1 354 add a2, a3, a2 355 356 /* 357 * This hart didn't win the lottery, so we wait for the winning hart to 358 * get far enough along the boot process that it should continue. 359 */ 360.Lwait_for_cpu_up: 361 /* FIXME: We should WFI to save some energy here. */ 362 REG_L sp, (a1) 363 REG_L tp, (a2) 364 beqz sp, .Lwait_for_cpu_up 365 beqz tp, .Lwait_for_cpu_up 366 fence 367 368 tail secondary_start_common 369#endif 370 371END(_start_kernel) 372 373#ifdef CONFIG_RISCV_M_MODE 374ENTRY(reset_regs) 375 li sp, 0 376 li gp, 0 377 li tp, 0 378 li t0, 0 379 li t1, 0 380 li t2, 0 381 li s0, 0 382 li s1, 0 383 li a2, 0 384 li a3, 0 385 li a4, 0 386 li a5, 0 387 li a6, 0 388 li a7, 0 389 li s2, 0 390 li s3, 0 391 li s4, 0 392 li s5, 0 393 li s6, 0 394 li s7, 0 395 li s8, 0 396 li s9, 0 397 li s10, 0 398 li s11, 0 399 li t3, 0 400 li t4, 0 401 li t5, 0 402 li t6, 0 403 csrw CSR_SCRATCH, 0 404 405#ifdef CONFIG_FPU 406 csrr t0, CSR_MISA 407 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) 408 beqz t0, .Lreset_regs_done 409 410 li t1, SR_FS 411 csrs CSR_STATUS, t1 412 fmv.s.x f0, zero 413 fmv.s.x f1, zero 414 fmv.s.x f2, zero 415 fmv.s.x f3, zero 416 fmv.s.x f4, zero 417 fmv.s.x f5, zero 418 fmv.s.x f6, zero 419 fmv.s.x f7, zero 420 fmv.s.x f8, zero 421 fmv.s.x f9, zero 422 fmv.s.x f10, zero 423 fmv.s.x f11, zero 424 fmv.s.x f12, zero 425 fmv.s.x f13, zero 426 fmv.s.x f14, zero 427 fmv.s.x f15, zero 428 fmv.s.x f16, zero 429 fmv.s.x f17, zero 430 fmv.s.x f18, zero 431 fmv.s.x f19, zero 432 fmv.s.x f20, zero 433 fmv.s.x f21, zero 434 fmv.s.x f22, zero 435 fmv.s.x f23, zero 436 fmv.s.x f24, zero 437 fmv.s.x f25, zero 438 fmv.s.x f26, zero 439 fmv.s.x f27, zero 440 fmv.s.x f28, zero 441 fmv.s.x f29, zero 442 fmv.s.x f30, zero 443 fmv.s.x f31, zero 444 csrw fcsr, 0 445 /* note that the caller must clear SR_FS */ 446#endif /* CONFIG_FPU */ 447.Lreset_regs_done: 448 ret 449END(reset_regs) 450#endif /* CONFIG_RISCV_M_MODE */ 451 452__PAGE_ALIGNED_BSS 453 /* Empty zero page */ 454 .balign PAGE_SIZE 455