1/* 2 * Low-level CPU initialisation 3 * Based on arch/arm/kernel/head.S 4 * 5 * Copyright (C) 1994-2002 Russell King 6 * Copyright (C) 2003-2012 ARM Ltd. 7 * Authors: Catalin Marinas <catalin.marinas@arm.com> 8 * Will Deacon <will.deacon@arm.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program. If not, see <http://www.gnu.org/licenses/>. 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <linux/irqchip/arm-gic-v3.h> 26 27#include <asm/assembler.h> 28#include <asm/ptrace.h> 29#include <asm/asm-offsets.h> 30#include <asm/cache.h> 31#include <asm/cputype.h> 32#include <asm/memory.h> 33#include <asm/thread_info.h> 34#include <asm/pgtable-hwdef.h> 35#include <asm/pgtable.h> 36#include <asm/page.h> 37#include <asm/virt.h> 38 39#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) 40 41#if (TEXT_OFFSET & 0xfff) != 0 42#error TEXT_OFFSET must be at least 4KB aligned 43#elif (PAGE_OFFSET & 0x1fffff) != 0 44#error PAGE_OFFSET must be at least 2MB aligned 45#elif TEXT_OFFSET > 0x1fffff 46#error TEXT_OFFSET must be less than 2MB 47#endif 48 49#ifdef CONFIG_ARM64_64K_PAGES 50#define BLOCK_SHIFT PAGE_SHIFT 51#define BLOCK_SIZE PAGE_SIZE 52#define TABLE_SHIFT PMD_SHIFT 53#else 54#define BLOCK_SHIFT SECTION_SHIFT 55#define BLOCK_SIZE SECTION_SIZE 56#define TABLE_SHIFT PUD_SHIFT 57#endif 58 59#define KERNEL_START _text 60#define KERNEL_END _end 61 62/* 63 * Initial memory map attributes. 64 */ 65#ifndef CONFIG_SMP 66#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF 67#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF 68#else 69#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED 70#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S 71#endif 72 73#ifdef CONFIG_ARM64_64K_PAGES 74#define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS 75#else 76#define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS 77#endif 78 79/* 80 * Kernel startup entry point. 81 * --------------------------- 82 * 83 * The requirements are: 84 * MMU = off, D-cache = off, I-cache = on or off, 85 * x0 = physical address to the FDT blob. 86 * 87 * This code is mostly position independent so you call this at 88 * __pa(PAGE_OFFSET + TEXT_OFFSET). 89 * 90 * Note that the callee-saved registers are used for storing variables 91 * that are useful before the MMU is enabled. The allocations are described 92 * in the entry routines. 93 */ 94 __HEAD 95 96 /* 97 * DO NOT MODIFY. Image header expected by Linux boot-loaders. 98 */ 99#ifdef CONFIG_EFI 100efi_head: 101 /* 102 * This add instruction has no meaningful effect except that 103 * its opcode forms the magic "MZ" signature required by UEFI. 104 */ 105 add x13, x18, #0x16 106 b stext 107#else 108 b stext // branch to kernel start, magic 109 .long 0 // reserved 110#endif 111 .quad _kernel_offset_le // Image load offset from start of RAM, little-endian 112 .quad _kernel_size_le // Effective size of kernel image, little-endian 113 .quad _kernel_flags_le // Informative flags, little-endian 114 .quad 0 // reserved 115 .quad 0 // reserved 116 .quad 0 // reserved 117 .byte 0x41 // Magic number, "ARM\x64" 118 .byte 0x52 119 .byte 0x4d 120 .byte 0x64 121#ifdef CONFIG_EFI 122 .long pe_header - efi_head // Offset to the PE header. 123#else 124 .word 0 // reserved 125#endif 126 127#ifdef CONFIG_EFI 128 .globl stext_offset 129 .set stext_offset, stext - efi_head 130 .align 3 131pe_header: 132 .ascii "PE" 133 .short 0 134coff_header: 135 .short 0xaa64 // AArch64 136 .short 2 // nr_sections 137 .long 0 // TimeDateStamp 138 .long 0 // PointerToSymbolTable 139 .long 1 // NumberOfSymbols 140 .short section_table - optional_header // SizeOfOptionalHeader 141 .short 0x206 // Characteristics. 142 // IMAGE_FILE_DEBUG_STRIPPED | 143 // IMAGE_FILE_EXECUTABLE_IMAGE | 144 // IMAGE_FILE_LINE_NUMS_STRIPPED 145optional_header: 146 .short 0x20b // PE32+ format 147 .byte 0x02 // MajorLinkerVersion 148 .byte 0x14 // MinorLinkerVersion 149 .long _end - stext // SizeOfCode 150 .long 0 // SizeOfInitializedData 151 .long 0 // SizeOfUninitializedData 152 .long efi_stub_entry - efi_head // AddressOfEntryPoint 153 .long stext_offset // BaseOfCode 154 155extra_header_fields: 156 .quad 0 // ImageBase 157 .long 0x1000 // SectionAlignment 158 .long PECOFF_FILE_ALIGNMENT // FileAlignment 159 .short 0 // MajorOperatingSystemVersion 160 .short 0 // MinorOperatingSystemVersion 161 .short 0 // MajorImageVersion 162 .short 0 // MinorImageVersion 163 .short 0 // MajorSubsystemVersion 164 .short 0 // MinorSubsystemVersion 165 .long 0 // Win32VersionValue 166 167 .long _end - efi_head // SizeOfImage 168 169 // Everything before the kernel image is considered part of the header 170 .long stext_offset // SizeOfHeaders 171 .long 0 // CheckSum 172 .short 0xa // Subsystem (EFI application) 173 .short 0 // DllCharacteristics 174 .quad 0 // SizeOfStackReserve 175 .quad 0 // SizeOfStackCommit 176 .quad 0 // SizeOfHeapReserve 177 .quad 0 // SizeOfHeapCommit 178 .long 0 // LoaderFlags 179 .long 0x6 // NumberOfRvaAndSizes 180 181 .quad 0 // ExportTable 182 .quad 0 // ImportTable 183 .quad 0 // ResourceTable 184 .quad 0 // ExceptionTable 185 .quad 0 // CertificationTable 186 .quad 0 // BaseRelocationTable 187 188 // Section table 189section_table: 190 191 /* 192 * The EFI application loader requires a relocation section 193 * because EFI applications must be relocatable. This is a 194 * dummy section as far as we are concerned. 195 */ 196 .ascii ".reloc" 197 .byte 0 198 .byte 0 // end of 0 padding of section name 199 .long 0 200 .long 0 201 .long 0 // SizeOfRawData 202 .long 0 // PointerToRawData 203 .long 0 // PointerToRelocations 204 .long 0 // PointerToLineNumbers 205 .short 0 // NumberOfRelocations 206 .short 0 // NumberOfLineNumbers 207 .long 0x42100040 // Characteristics (section flags) 208 209 210 .ascii ".text" 211 .byte 0 212 .byte 0 213 .byte 0 // end of 0 padding of section name 214 .long _end - stext // VirtualSize 215 .long stext_offset // VirtualAddress 216 .long _edata - stext // SizeOfRawData 217 .long stext_offset // PointerToRawData 218 219 .long 0 // PointerToRelocations (0 for executables) 220 .long 0 // PointerToLineNumbers (0 for executables) 221 .short 0 // NumberOfRelocations (0 for executables) 222 .short 0 // NumberOfLineNumbers (0 for executables) 223 .long 0xe0500020 // Characteristics (section flags) 224 225 /* 226 * EFI will load stext onwards at the 4k section alignment 227 * described in the PE/COFF header. To ensure that instruction 228 * sequences using an adrp and a :lo12: immediate will function 229 * correctly at this alignment, we must ensure that stext is 230 * placed at a 4k boundary in the Image to begin with. 231 */ 232 .align 12 233#endif 234 235ENTRY(stext) 236 bl preserve_boot_args 237 bl el2_setup // Drop to EL1, w20=cpu_boot_mode 238 adrp x24, __PHYS_OFFSET 239 bl set_cpu_boot_mode_flag 240 bl __create_page_tables // x25=TTBR0, x26=TTBR1 241 /* 242 * The following calls CPU setup code, see arch/arm64/mm/proc.S for 243 * details. 244 * On return, the CPU will be ready for the MMU to be turned on and 245 * the TCR will have been set. 246 */ 247 ldr x27, =__mmap_switched // address to jump to after 248 // MMU has been enabled 249 adr_l lr, __enable_mmu // return (PIC) address 250 b __cpu_setup // initialise processor 251ENDPROC(stext) 252 253/* 254 * Preserve the arguments passed by the bootloader in x0 .. x3 255 */ 256preserve_boot_args: 257 mov x21, x0 // x21=FDT 258 259 adr_l x0, boot_args // record the contents of 260 stp x21, x1, [x0] // x0 .. x3 at kernel entry 261 stp x2, x3, [x0, #16] 262 263 dmb sy // needed before dc ivac with 264 // MMU off 265 266 add x1, x0, #0x20 // 4 x 8 bytes 267 b __inval_cache_range // tail call 268ENDPROC(preserve_boot_args) 269 270/* 271 * Macro to create a table entry to the next page. 272 * 273 * tbl: page table address 274 * virt: virtual address 275 * shift: #imm page table shift 276 * ptrs: #imm pointers per table page 277 * 278 * Preserves: virt 279 * Corrupts: tmp1, tmp2 280 * Returns: tbl -> next level table page address 281 */ 282 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 283 lsr \tmp1, \virt, #\shift 284 and \tmp1, \tmp1, #\ptrs - 1 // table index 285 add \tmp2, \tbl, #PAGE_SIZE 286 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type 287 str \tmp2, [\tbl, \tmp1, lsl #3] 288 add \tbl, \tbl, #PAGE_SIZE // next level table page 289 .endm 290 291/* 292 * Macro to populate the PGD (and possibily PUD) for the corresponding 293 * block entry in the next level (tbl) for the given virtual address. 294 * 295 * Preserves: tbl, next, virt 296 * Corrupts: tmp1, tmp2 297 */ 298 .macro create_pgd_entry, tbl, virt, tmp1, tmp2 299 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 300#if SWAPPER_PGTABLE_LEVELS == 3 301 create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 302#endif 303 .endm 304 305/* 306 * Macro to populate block entries in the page table for the start..end 307 * virtual range (inclusive). 308 * 309 * Preserves: tbl, flags 310 * Corrupts: phys, start, end, pstate 311 */ 312 .macro create_block_map, tbl, flags, phys, start, end 313 lsr \phys, \phys, #BLOCK_SHIFT 314 lsr \start, \start, #BLOCK_SHIFT 315 and \start, \start, #PTRS_PER_PTE - 1 // table index 316 orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry 317 lsr \end, \end, #BLOCK_SHIFT 318 and \end, \end, #PTRS_PER_PTE - 1 // table end index 3199999: str \phys, [\tbl, \start, lsl #3] // store the entry 320 add \start, \start, #1 // next entry 321 add \phys, \phys, #BLOCK_SIZE // next block 322 cmp \start, \end 323 b.ls 9999b 324 .endm 325 326/* 327 * Setup the initial page tables. We only setup the barest amount which is 328 * required to get the kernel running. The following sections are required: 329 * - identity mapping to enable the MMU (low address, TTBR0) 330 * - first few MB of the kernel linear mapping to jump to once the MMU has 331 * been enabled 332 */ 333__create_page_tables: 334 adrp x25, idmap_pg_dir 335 adrp x26, swapper_pg_dir 336 mov x27, lr 337 338 /* 339 * Invalidate the idmap and swapper page tables to avoid potential 340 * dirty cache lines being evicted. 341 */ 342 mov x0, x25 343 add x1, x26, #SWAPPER_DIR_SIZE 344 bl __inval_cache_range 345 346 /* 347 * Clear the idmap and swapper page tables. 348 */ 349 mov x0, x25 350 add x6, x26, #SWAPPER_DIR_SIZE 3511: stp xzr, xzr, [x0], #16 352 stp xzr, xzr, [x0], #16 353 stp xzr, xzr, [x0], #16 354 stp xzr, xzr, [x0], #16 355 cmp x0, x6 356 b.lo 1b 357 358 ldr x7, =MM_MMUFLAGS 359 360 /* 361 * Create the identity mapping. 362 */ 363 mov x0, x25 // idmap_pg_dir 364 adrp x3, __idmap_text_start // __pa(__idmap_text_start) 365 366#ifndef CONFIG_ARM64_VA_BITS_48 367#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) 368#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) 369 370 /* 371 * If VA_BITS < 48, it may be too small to allow for an ID mapping to be 372 * created that covers system RAM if that is located sufficiently high 373 * in the physical address space. So for the ID map, use an extended 374 * virtual range in that case, by configuring an additional translation 375 * level. 376 * First, we have to verify our assumption that the current value of 377 * VA_BITS was chosen such that all translation levels are fully 378 * utilised, and that lowering T0SZ will always result in an additional 379 * translation level to be configured. 380 */ 381#if VA_BITS != EXTRA_SHIFT 382#error "Mismatch between VA_BITS and page size/number of translation levels" 383#endif 384 385 /* 386 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the 387 * entire ID map region can be mapped. As T0SZ == (64 - #bits used), 388 * this number conveniently equals the number of leading zeroes in 389 * the physical address of __idmap_text_end. 390 */ 391 adrp x5, __idmap_text_end 392 clz x5, x5 393 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? 394 b.ge 1f // .. then skip additional level 395 396 adr_l x6, idmap_t0sz 397 str x5, [x6] 398 dmb sy 399 dc ivac, x6 // Invalidate potentially stale cache line 400 401 create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 4021: 403#endif 404 405 create_pgd_entry x0, x3, x5, x6 406 mov x5, x3 // __pa(__idmap_text_start) 407 adr_l x6, __idmap_text_end // __pa(__idmap_text_end) 408 create_block_map x0, x7, x3, x5, x6 409 410 /* 411 * Map the kernel image (starting with PHYS_OFFSET). 412 */ 413 mov x0, x26 // swapper_pg_dir 414 mov x5, #PAGE_OFFSET 415 create_pgd_entry x0, x5, x3, x6 416 ldr x6, =KERNEL_END // __va(KERNEL_END) 417 mov x3, x24 // phys offset 418 create_block_map x0, x7, x3, x5, x6 419 420 /* 421 * Since the page tables have been populated with non-cacheable 422 * accesses (MMU disabled), invalidate the idmap and swapper page 423 * tables again to remove any speculatively loaded cache lines. 424 */ 425 mov x0, x25 426 add x1, x26, #SWAPPER_DIR_SIZE 427 dmb sy 428 bl __inval_cache_range 429 430 mov lr, x27 431 ret 432ENDPROC(__create_page_tables) 433 .ltorg 434 435/* 436 * The following fragment of code is executed with the MMU enabled. 437 */ 438 .set initial_sp, init_thread_union + THREAD_START_SP 439__mmap_switched: 440 adr_l x6, __bss_start 441 adr_l x7, __bss_stop 442 4431: cmp x6, x7 444 b.hs 2f 445 str xzr, [x6], #8 // Clear BSS 446 b 1b 4472: 448 adr_l sp, initial_sp, x4 449 str_l x21, __fdt_pointer, x5 // Save FDT pointer 450 str_l x24, memstart_addr, x6 // Save PHYS_OFFSET 451 mov x29, #0 452 b start_kernel 453ENDPROC(__mmap_switched) 454 455/* 456 * end early head section, begin head code that is also used for 457 * hotplug and needs to have the same protections as the text region 458 */ 459 .section ".text","ax" 460/* 461 * If we're fortunate enough to boot at EL2, ensure that the world is 462 * sane before dropping to EL1. 463 * 464 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if 465 * booted in EL1 or EL2 respectively. 466 */ 467ENTRY(el2_setup) 468 mrs x0, CurrentEL 469 cmp x0, #CurrentEL_EL2 470 b.ne 1f 471 mrs x0, sctlr_el2 472CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2 473CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2 474 msr sctlr_el2, x0 475 b 2f 4761: mrs x0, sctlr_el1 477CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1 478CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 479 msr sctlr_el1, x0 480 mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 481 isb 482 ret 483 484 /* Hyp configuration. */ 4852: mov x0, #(1 << 31) // 64-bit EL1 486 msr hcr_el2, x0 487 488 /* Generic timers. */ 489 mrs x0, cnthctl_el2 490 orr x0, x0, #3 // Enable EL1 physical timers 491 msr cnthctl_el2, x0 492 msr cntvoff_el2, xzr // Clear virtual offset 493 494#ifdef CONFIG_ARM_GIC_V3 495 /* GICv3 system register access */ 496 mrs x0, id_aa64pfr0_el1 497 ubfx x0, x0, #24, #4 498 cmp x0, #1 499 b.ne 3f 500 501 mrs_s x0, ICC_SRE_EL2 502 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 503 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 504 msr_s ICC_SRE_EL2, x0 505 isb // Make sure SRE is now set 506 msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults 507 5083: 509#endif 510 511 /* Populate ID registers. */ 512 mrs x0, midr_el1 513 mrs x1, mpidr_el1 514 msr vpidr_el2, x0 515 msr vmpidr_el2, x1 516 517 /* sctlr_el1 */ 518 mov x0, #0x0800 // Set/clear RES{1,0} bits 519CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems 520CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems 521 msr sctlr_el1, x0 522 523 /* Coprocessor traps. */ 524 mov x0, #0x33ff 525 msr cptr_el2, x0 // Disable copro. traps to EL2 526 527#ifdef CONFIG_COMPAT 528 msr hstr_el2, xzr // Disable CP15 traps to EL2 529#endif 530 531 /* Stage-2 translation */ 532 msr vttbr_el2, xzr 533 534 /* Hypervisor stub */ 535 adrp x0, __hyp_stub_vectors 536 add x0, x0, #:lo12:__hyp_stub_vectors 537 msr vbar_el2, x0 538 539 /* spsr */ 540 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 541 PSR_MODE_EL1h) 542 msr spsr_el2, x0 543 msr elr_el2, lr 544 mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 545 eret 546ENDPROC(el2_setup) 547 548/* 549 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed 550 * in x20. See arch/arm64/include/asm/virt.h for more info. 551 */ 552ENTRY(set_cpu_boot_mode_flag) 553 adr_l x1, __boot_cpu_mode 554 cmp w20, #BOOT_CPU_MODE_EL2 555 b.ne 1f 556 add x1, x1, #4 5571: str w20, [x1] // This CPU has booted in EL1 558 dmb sy 559 dc ivac, x1 // Invalidate potentially stale cache line 560 ret 561ENDPROC(set_cpu_boot_mode_flag) 562 563/* 564 * We need to find out the CPU boot mode long after boot, so we need to 565 * store it in a writable variable. 566 * 567 * This is not in .bss, because we set it sufficiently early that the boot-time 568 * zeroing of .bss would clobber it. 569 */ 570 .pushsection .data..cacheline_aligned 571 .align L1_CACHE_SHIFT 572ENTRY(__boot_cpu_mode) 573 .long BOOT_CPU_MODE_EL2 574 .long BOOT_CPU_MODE_EL1 575 .popsection 576 577#ifdef CONFIG_SMP 578 /* 579 * This provides a "holding pen" for platforms to hold all secondary 580 * cores are held until we're ready for them to initialise. 581 */ 582ENTRY(secondary_holding_pen) 583 bl el2_setup // Drop to EL1, w20=cpu_boot_mode 584 bl set_cpu_boot_mode_flag 585 mrs x0, mpidr_el1 586 ldr x1, =MPIDR_HWID_BITMASK 587 and x0, x0, x1 588 adr_l x3, secondary_holding_pen_release 589pen: ldr x4, [x3] 590 cmp x4, x0 591 b.eq secondary_startup 592 wfe 593 b pen 594ENDPROC(secondary_holding_pen) 595 596 /* 597 * Secondary entry point that jumps straight into the kernel. Only to 598 * be used where CPUs are brought online dynamically by the kernel. 599 */ 600ENTRY(secondary_entry) 601 bl el2_setup // Drop to EL1 602 bl set_cpu_boot_mode_flag 603 b secondary_startup 604ENDPROC(secondary_entry) 605 606ENTRY(secondary_startup) 607 /* 608 * Common entry point for secondary CPUs. 609 */ 610 adrp x25, idmap_pg_dir 611 adrp x26, swapper_pg_dir 612 bl __cpu_setup // initialise processor 613 614 ldr x21, =secondary_data 615 ldr x27, =__secondary_switched // address to jump to after enabling the MMU 616 b __enable_mmu 617ENDPROC(secondary_startup) 618 619ENTRY(__secondary_switched) 620 ldr x0, [x21] // get secondary_data.stack 621 mov sp, x0 622 mov x29, #0 623 b secondary_start_kernel 624ENDPROC(__secondary_switched) 625#endif /* CONFIG_SMP */ 626 627/* 628 * Enable the MMU. 629 * 630 * x0 = SCTLR_EL1 value for turning on the MMU. 631 * x27 = *virtual* address to jump to upon completion 632 * 633 * other registers depend on the function called upon completion 634 */ 635 .section ".idmap.text", "ax" 636__enable_mmu: 637 ldr x5, =vectors 638 msr vbar_el1, x5 639 msr ttbr0_el1, x25 // load TTBR0 640 msr ttbr1_el1, x26 // load TTBR1 641 isb 642 msr sctlr_el1, x0 643 isb 644 br x27 645ENDPROC(__enable_mmu) 646