1/* 2 * Low-level CPU initialisation 3 * Based on arch/arm/kernel/head.S 4 * 5 * Copyright (C) 1994-2002 Russell King 6 * Copyright (C) 2003-2012 ARM Ltd. 7 * Authors: Catalin Marinas <catalin.marinas@arm.com> 8 * Will Deacon <will.deacon@arm.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program. If not, see <http://www.gnu.org/licenses/>. 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25 26#include <asm/assembler.h> 27#include <asm/ptrace.h> 28#include <asm/asm-offsets.h> 29#include <asm/cache.h> 30#include <asm/cputype.h> 31#include <asm/memory.h> 32#include <asm/thread_info.h> 33#include <asm/pgtable-hwdef.h> 34#include <asm/pgtable.h> 35#include <asm/page.h> 36#include <asm/virt.h> 37 38/* 39 * swapper_pg_dir is the virtual address of the initial page table. We place 40 * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has 41 * 2 pages and is placed below swapper_pg_dir. 42 */ 43#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 44 45#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000 46#error KERNEL_RAM_VADDR must start at 0xXXX80000 47#endif 48 49#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) 50#define IDMAP_DIR_SIZE (2 * PAGE_SIZE) 51 52 .globl swapper_pg_dir 53 .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE 54 55 .globl idmap_pg_dir 56 .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE 57 58 .macro pgtbl, ttb0, ttb1, phys 59 add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE 60 sub \ttb0, \ttb1, #IDMAP_DIR_SIZE 61 .endm 62 63#ifdef CONFIG_ARM64_64K_PAGES 64#define BLOCK_SHIFT PAGE_SHIFT 65#define BLOCK_SIZE PAGE_SIZE 66#else 67#define BLOCK_SHIFT SECTION_SHIFT 68#define BLOCK_SIZE SECTION_SIZE 69#endif 70 71#define KERNEL_START KERNEL_RAM_VADDR 72#define KERNEL_END _end 73 74/* 75 * Initial memory map attributes. 76 */ 77#ifndef CONFIG_SMP 78#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF 79#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF 80#else 81#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED 82#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S 83#endif 84 85#ifdef CONFIG_ARM64_64K_PAGES 86#define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS 87#else 88#define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS 89#endif 90 91/* 92 * Kernel startup entry point. 93 * --------------------------- 94 * 95 * The requirements are: 96 * MMU = off, D-cache = off, I-cache = on or off, 97 * x0 = physical address to the FDT blob. 98 * 99 * This code is mostly position independent so you call this at 100 * __pa(PAGE_OFFSET + TEXT_OFFSET). 101 * 102 * Note that the callee-saved registers are used for storing variables 103 * that are useful before the MMU is enabled. The allocations are described 104 * in the entry routines. 105 */ 106 __HEAD 107 108 /* 109 * DO NOT MODIFY. Image header expected by Linux boot-loaders. 110 */ 111#ifdef CONFIG_EFI 112efi_head: 113 /* 114 * This add instruction has no meaningful effect except that 115 * its opcode forms the magic "MZ" signature required by UEFI. 116 */ 117 add x13, x18, #0x16 118 b stext 119#else 120 b stext // branch to kernel start, magic 121 .long 0 // reserved 122#endif 123 .quad TEXT_OFFSET // Image load offset from start of RAM 124 .quad 0 // reserved 125 .quad 0 // reserved 126 .quad 0 // reserved 127 .quad 0 // reserved 128 .quad 0 // reserved 129 .byte 0x41 // Magic number, "ARM\x64" 130 .byte 0x52 131 .byte 0x4d 132 .byte 0x64 133#ifdef CONFIG_EFI 134 .long pe_header - efi_head // Offset to the PE header. 135#else 136 .word 0 // reserved 137#endif 138 139#ifdef CONFIG_EFI 140 .align 3 141pe_header: 142 .ascii "PE" 143 .short 0 144coff_header: 145 .short 0xaa64 // AArch64 146 .short 2 // nr_sections 147 .long 0 // TimeDateStamp 148 .long 0 // PointerToSymbolTable 149 .long 1 // NumberOfSymbols 150 .short section_table - optional_header // SizeOfOptionalHeader 151 .short 0x206 // Characteristics. 152 // IMAGE_FILE_DEBUG_STRIPPED | 153 // IMAGE_FILE_EXECUTABLE_IMAGE | 154 // IMAGE_FILE_LINE_NUMS_STRIPPED 155optional_header: 156 .short 0x20b // PE32+ format 157 .byte 0x02 // MajorLinkerVersion 158 .byte 0x14 // MinorLinkerVersion 159 .long _edata - stext // SizeOfCode 160 .long 0 // SizeOfInitializedData 161 .long 0 // SizeOfUninitializedData 162 .long efi_stub_entry - efi_head // AddressOfEntryPoint 163 .long stext - efi_head // BaseOfCode 164 165extra_header_fields: 166 .quad 0 // ImageBase 167 .long 0x20 // SectionAlignment 168 .long 0x8 // FileAlignment 169 .short 0 // MajorOperatingSystemVersion 170 .short 0 // MinorOperatingSystemVersion 171 .short 0 // MajorImageVersion 172 .short 0 // MinorImageVersion 173 .short 0 // MajorSubsystemVersion 174 .short 0 // MinorSubsystemVersion 175 .long 0 // Win32VersionValue 176 177 .long _edata - efi_head // SizeOfImage 178 179 // Everything before the kernel image is considered part of the header 180 .long stext - efi_head // SizeOfHeaders 181 .long 0 // CheckSum 182 .short 0xa // Subsystem (EFI application) 183 .short 0 // DllCharacteristics 184 .quad 0 // SizeOfStackReserve 185 .quad 0 // SizeOfStackCommit 186 .quad 0 // SizeOfHeapReserve 187 .quad 0 // SizeOfHeapCommit 188 .long 0 // LoaderFlags 189 .long 0x6 // NumberOfRvaAndSizes 190 191 .quad 0 // ExportTable 192 .quad 0 // ImportTable 193 .quad 0 // ResourceTable 194 .quad 0 // ExceptionTable 195 .quad 0 // CertificationTable 196 .quad 0 // BaseRelocationTable 197 198 // Section table 199section_table: 200 201 /* 202 * The EFI application loader requires a relocation section 203 * because EFI applications must be relocatable. This is a 204 * dummy section as far as we are concerned. 205 */ 206 .ascii ".reloc" 207 .byte 0 208 .byte 0 // end of 0 padding of section name 209 .long 0 210 .long 0 211 .long 0 // SizeOfRawData 212 .long 0 // PointerToRawData 213 .long 0 // PointerToRelocations 214 .long 0 // PointerToLineNumbers 215 .short 0 // NumberOfRelocations 216 .short 0 // NumberOfLineNumbers 217 .long 0x42100040 // Characteristics (section flags) 218 219 220 .ascii ".text" 221 .byte 0 222 .byte 0 223 .byte 0 // end of 0 padding of section name 224 .long _edata - stext // VirtualSize 225 .long stext - efi_head // VirtualAddress 226 .long _edata - stext // SizeOfRawData 227 .long stext - efi_head // PointerToRawData 228 229 .long 0 // PointerToRelocations (0 for executables) 230 .long 0 // PointerToLineNumbers (0 for executables) 231 .short 0 // NumberOfRelocations (0 for executables) 232 .short 0 // NumberOfLineNumbers (0 for executables) 233 .long 0xe0500020 // Characteristics (section flags) 234 .align 5 235#endif 236 237ENTRY(stext) 238 mov x21, x0 // x21=FDT 239 bl el2_setup // Drop to EL1, w20=cpu_boot_mode 240 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET 241 bl set_cpu_boot_mode_flag 242 mrs x22, midr_el1 // x22=cpuid 243 mov x0, x22 244 bl lookup_processor_type 245 mov x23, x0 // x23=current cpu_table 246 cbz x23, __error_p // invalid processor (x23=0)? 247 bl __vet_fdt 248 bl __create_page_tables // x25=TTBR0, x26=TTBR1 249 /* 250 * The following calls CPU specific code in a position independent 251 * manner. See arch/arm64/mm/proc.S for details. x23 = base of 252 * cpu_info structure selected by lookup_processor_type above. 253 * On return, the CPU will be ready for the MMU to be turned on and 254 * the TCR will have been set. 255 */ 256 ldr x27, __switch_data // address to jump to after 257 // MMU has been enabled 258 adr lr, __enable_mmu // return (PIC) address 259 ldr x12, [x23, #CPU_INFO_SETUP] 260 add x12, x12, x28 // __virt_to_phys 261 br x12 // initialise processor 262ENDPROC(stext) 263 264/* 265 * If we're fortunate enough to boot at EL2, ensure that the world is 266 * sane before dropping to EL1. 267 * 268 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if 269 * booted in EL1 or EL2 respectively. 270 */ 271ENTRY(el2_setup) 272 mrs x0, CurrentEL 273 cmp x0, #PSR_MODE_EL2t 274 ccmp x0, #PSR_MODE_EL2h, #0x4, ne 275 b.ne 1f 276 mrs x0, sctlr_el2 277CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2 278CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2 279 msr sctlr_el2, x0 280 b 2f 2811: mrs x0, sctlr_el1 282CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1 283CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 284 msr sctlr_el1, x0 285 mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 286 isb 287 ret 288 289 /* Hyp configuration. */ 2902: mov x0, #(1 << 31) // 64-bit EL1 291 msr hcr_el2, x0 292 293 /* Generic timers. */ 294 mrs x0, cnthctl_el2 295 orr x0, x0, #3 // Enable EL1 physical timers 296 msr cnthctl_el2, x0 297 msr cntvoff_el2, xzr // Clear virtual offset 298 299 /* Populate ID registers. */ 300 mrs x0, midr_el1 301 mrs x1, mpidr_el1 302 msr vpidr_el2, x0 303 msr vmpidr_el2, x1 304 305 /* sctlr_el1 */ 306 mov x0, #0x0800 // Set/clear RES{1,0} bits 307CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems 308CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems 309 msr sctlr_el1, x0 310 311 /* Coprocessor traps. */ 312 mov x0, #0x33ff 313 msr cptr_el2, x0 // Disable copro. traps to EL2 314 315#ifdef CONFIG_COMPAT 316 msr hstr_el2, xzr // Disable CP15 traps to EL2 317#endif 318 319 /* Stage-2 translation */ 320 msr vttbr_el2, xzr 321 322 /* Hypervisor stub */ 323 adr x0, __hyp_stub_vectors 324 msr vbar_el2, x0 325 326 /* spsr */ 327 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 328 PSR_MODE_EL1h) 329 msr spsr_el2, x0 330 msr elr_el2, lr 331 mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 332 eret 333ENDPROC(el2_setup) 334 335/* 336 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed 337 * in x20. See arch/arm64/include/asm/virt.h for more info. 338 */ 339ENTRY(set_cpu_boot_mode_flag) 340 ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode 341 add x1, x1, x28 342 cmp w20, #BOOT_CPU_MODE_EL2 343 b.ne 1f 344 add x1, x1, #4 3451: str w20, [x1] // This CPU has booted in EL1 346 dmb sy 347 dc ivac, x1 // Invalidate potentially stale cache line 348 ret 349ENDPROC(set_cpu_boot_mode_flag) 350 351/* 352 * We need to find out the CPU boot mode long after boot, so we need to 353 * store it in a writable variable. 354 * 355 * This is not in .bss, because we set it sufficiently early that the boot-time 356 * zeroing of .bss would clobber it. 357 */ 358 .pushsection .data..cacheline_aligned 359ENTRY(__boot_cpu_mode) 360 .align L1_CACHE_SHIFT 361 .long BOOT_CPU_MODE_EL2 362 .long 0 363 .popsection 364 365 .align 3 3662: .quad . 367 .quad PAGE_OFFSET 368 369#ifdef CONFIG_SMP 370 .align 3 3711: .quad . 372 .quad secondary_holding_pen_release 373 374 /* 375 * This provides a "holding pen" for platforms to hold all secondary 376 * cores are held until we're ready for them to initialise. 377 */ 378ENTRY(secondary_holding_pen) 379 bl el2_setup // Drop to EL1, w20=cpu_boot_mode 380 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET 381 bl set_cpu_boot_mode_flag 382 mrs x0, mpidr_el1 383 ldr x1, =MPIDR_HWID_BITMASK 384 and x0, x0, x1 385 adr x1, 1b 386 ldp x2, x3, [x1] 387 sub x1, x1, x2 388 add x3, x3, x1 389pen: ldr x4, [x3] 390 cmp x4, x0 391 b.eq secondary_startup 392 wfe 393 b pen 394ENDPROC(secondary_holding_pen) 395 396 /* 397 * Secondary entry point that jumps straight into the kernel. Only to 398 * be used where CPUs are brought online dynamically by the kernel. 399 */ 400ENTRY(secondary_entry) 401 bl el2_setup // Drop to EL1 402 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET 403 bl set_cpu_boot_mode_flag 404 b secondary_startup 405ENDPROC(secondary_entry) 406 407ENTRY(secondary_startup) 408 /* 409 * Common entry point for secondary CPUs. 410 */ 411 mrs x22, midr_el1 // x22=cpuid 412 mov x0, x22 413 bl lookup_processor_type 414 mov x23, x0 // x23=current cpu_table 415 cbz x23, __error_p // invalid processor (x23=0)? 416 417 pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1 418 ldr x12, [x23, #CPU_INFO_SETUP] 419 add x12, x12, x28 // __virt_to_phys 420 blr x12 // initialise processor 421 422 ldr x21, =secondary_data 423 ldr x27, =__secondary_switched // address to jump to after enabling the MMU 424 b __enable_mmu 425ENDPROC(secondary_startup) 426 427ENTRY(__secondary_switched) 428 ldr x0, [x21] // get secondary_data.stack 429 mov sp, x0 430 mov x29, #0 431 b secondary_start_kernel 432ENDPROC(__secondary_switched) 433#endif /* CONFIG_SMP */ 434 435/* 436 * Setup common bits before finally enabling the MMU. Essentially this is just 437 * loading the page table pointer and vector base registers. 438 * 439 * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on 440 * the MMU. 441 */ 442__enable_mmu: 443 ldr x5, =vectors 444 msr vbar_el1, x5 445 msr ttbr0_el1, x25 // load TTBR0 446 msr ttbr1_el1, x26 // load TTBR1 447 isb 448 b __turn_mmu_on 449ENDPROC(__enable_mmu) 450 451/* 452 * Enable the MMU. This completely changes the structure of the visible memory 453 * space. You will not be able to trace execution through this. 454 * 455 * x0 = system control register 456 * x27 = *virtual* address to jump to upon completion 457 * 458 * other registers depend on the function called upon completion 459 */ 460 .align 6 461__turn_mmu_on: 462 msr sctlr_el1, x0 463 isb 464 br x27 465ENDPROC(__turn_mmu_on) 466 467/* 468 * Calculate the start of physical memory. 469 */ 470__calc_phys_offset: 471 adr x0, 1f 472 ldp x1, x2, [x0] 473 sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET 474 add x24, x2, x28 // x24 = PHYS_OFFSET 475 ret 476ENDPROC(__calc_phys_offset) 477 478 .align 3 4791: .quad . 480 .quad PAGE_OFFSET 481 482/* 483 * Macro to populate the PGD for the corresponding block entry in the next 484 * level (tbl) for the given virtual address. 485 * 486 * Preserves: pgd, tbl, virt 487 * Corrupts: tmp1, tmp2 488 */ 489 .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2 490 lsr \tmp1, \virt, #PGDIR_SHIFT 491 and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index 492 orr \tmp2, \tbl, #3 // PGD entry table type 493 str \tmp2, [\pgd, \tmp1, lsl #3] 494 .endm 495 496/* 497 * Macro to populate block entries in the page table for the start..end 498 * virtual range (inclusive). 499 * 500 * Preserves: tbl, flags 501 * Corrupts: phys, start, end, pstate 502 */ 503 .macro create_block_map, tbl, flags, phys, start, end 504 lsr \phys, \phys, #BLOCK_SHIFT 505 lsr \start, \start, #BLOCK_SHIFT 506 and \start, \start, #PTRS_PER_PTE - 1 // table index 507 orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry 508 lsr \end, \end, #BLOCK_SHIFT 509 and \end, \end, #PTRS_PER_PTE - 1 // table end index 5109999: str \phys, [\tbl, \start, lsl #3] // store the entry 511 add \start, \start, #1 // next entry 512 add \phys, \phys, #BLOCK_SIZE // next block 513 cmp \start, \end 514 b.ls 9999b 515 .endm 516 517/* 518 * Setup the initial page tables. We only setup the barest amount which is 519 * required to get the kernel running. The following sections are required: 520 * - identity mapping to enable the MMU (low address, TTBR0) 521 * - first few MB of the kernel linear mapping to jump to once the MMU has 522 * been enabled, including the FDT blob (TTBR1) 523 * - pgd entry for fixed mappings (TTBR1) 524 */ 525__create_page_tables: 526 pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses 527 mov x27, lr 528 529 /* 530 * Invalidate the idmap and swapper page tables to avoid potential 531 * dirty cache lines being evicted. 532 */ 533 mov x0, x25 534 add x1, x26, #SWAPPER_DIR_SIZE 535 bl __inval_cache_range 536 537 /* 538 * Clear the idmap and swapper page tables. 539 */ 540 mov x0, x25 541 add x6, x26, #SWAPPER_DIR_SIZE 5421: stp xzr, xzr, [x0], #16 543 stp xzr, xzr, [x0], #16 544 stp xzr, xzr, [x0], #16 545 stp xzr, xzr, [x0], #16 546 cmp x0, x6 547 b.lo 1b 548 549 ldr x7, =MM_MMUFLAGS 550 551 /* 552 * Create the identity mapping. 553 */ 554 add x0, x25, #PAGE_SIZE // section table address 555 ldr x3, =KERNEL_START 556 add x3, x3, x28 // __pa(KERNEL_START) 557 create_pgd_entry x25, x0, x3, x5, x6 558 ldr x6, =KERNEL_END 559 mov x5, x3 // __pa(KERNEL_START) 560 add x6, x6, x28 // __pa(KERNEL_END) 561 create_block_map x0, x7, x3, x5, x6 562 563 /* 564 * Map the kernel image (starting with PHYS_OFFSET). 565 */ 566 add x0, x26, #PAGE_SIZE // section table address 567 mov x5, #PAGE_OFFSET 568 create_pgd_entry x26, x0, x5, x3, x6 569 ldr x6, =KERNEL_END 570 mov x3, x24 // phys offset 571 create_block_map x0, x7, x3, x5, x6 572 573 /* 574 * Map the FDT blob (maximum 2MB; must be within 512MB of 575 * PHYS_OFFSET). 576 */ 577 mov x3, x21 // FDT phys address 578 and x3, x3, #~((1 << 21) - 1) // 2MB aligned 579 mov x6, #PAGE_OFFSET 580 sub x5, x3, x24 // subtract PHYS_OFFSET 581 tst x5, #~((1 << 29) - 1) // within 512MB? 582 csel x21, xzr, x21, ne // zero the FDT pointer 583 b.ne 1f 584 add x5, x5, x6 // __va(FDT blob) 585 add x6, x5, #1 << 21 // 2MB for the FDT blob 586 sub x6, x6, #1 // inclusive range 587 create_block_map x0, x7, x3, x5, x6 5881: 589 /* 590 * Create the pgd entry for the fixed mappings. 591 */ 592 ldr x5, =FIXADDR_TOP // Fixed mapping virtual address 593 add x0, x26, #2 * PAGE_SIZE // section table address 594 create_pgd_entry x26, x0, x5, x6, x7 595 596 /* 597 * Since the page tables have been populated with non-cacheable 598 * accesses (MMU disabled), invalidate the idmap and swapper page 599 * tables again to remove any speculatively loaded cache lines. 600 */ 601 mov x0, x25 602 add x1, x26, #SWAPPER_DIR_SIZE 603 bl __inval_cache_range 604 605 mov lr, x27 606 ret 607ENDPROC(__create_page_tables) 608 .ltorg 609 610 .align 3 611 .type __switch_data, %object 612__switch_data: 613 .quad __mmap_switched 614 .quad __bss_start // x6 615 .quad _end // x7 616 .quad processor_id // x4 617 .quad __fdt_pointer // x5 618 .quad memstart_addr // x6 619 .quad init_thread_union + THREAD_START_SP // sp 620 621/* 622 * The following fragment of code is executed with the MMU on in MMU mode, and 623 * uses absolute addresses; this is not position independent. 624 */ 625__mmap_switched: 626 adr x3, __switch_data + 8 627 628 ldp x6, x7, [x3], #16 6291: cmp x6, x7 630 b.hs 2f 631 str xzr, [x6], #8 // Clear BSS 632 b 1b 6332: 634 ldp x4, x5, [x3], #16 635 ldr x6, [x3], #8 636 ldr x16, [x3] 637 mov sp, x16 638 str x22, [x4] // Save processor ID 639 str x21, [x5] // Save FDT pointer 640 str x24, [x6] // Save PHYS_OFFSET 641 mov x29, #0 642 b start_kernel 643ENDPROC(__mmap_switched) 644 645/* 646 * Exception handling. Something went wrong and we can't proceed. We ought to 647 * tell the user, but since we don't have any guarantee that we're even 648 * running on the right architecture, we do virtually nothing. 649 */ 650__error_p: 651ENDPROC(__error_p) 652 653__error: 6541: nop 655 b 1b 656ENDPROC(__error) 657 658/* 659 * This function gets the processor ID in w0 and searches the cpu_table[] for 660 * a match. It returns a pointer to the struct cpu_info it found. The 661 * cpu_table[] must end with an empty (all zeros) structure. 662 * 663 * This routine can be called via C code and it needs to work with the MMU 664 * both disabled and enabled (the offset is calculated automatically). 665 */ 666ENTRY(lookup_processor_type) 667 adr x1, __lookup_processor_type_data 668 ldp x2, x3, [x1] 669 sub x1, x1, x2 // get offset between VA and PA 670 add x3, x3, x1 // convert VA to PA 6711: 672 ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask 673 cbz w5, 2f // end of list? 674 and w6, w6, w0 675 cmp w5, w6 676 b.eq 3f 677 add x3, x3, #CPU_INFO_SZ 678 b 1b 6792: 680 mov x3, #0 // unknown processor 6813: 682 mov x0, x3 683 ret 684ENDPROC(lookup_processor_type) 685 686 .align 3 687 .type __lookup_processor_type_data, %object 688__lookup_processor_type_data: 689 .quad . 690 .quad cpu_table 691 .size __lookup_processor_type_data, . - __lookup_processor_type_data 692 693/* 694 * Determine validity of the x21 FDT pointer. 695 * The dtb must be 8-byte aligned and live in the first 512M of memory. 696 */ 697__vet_fdt: 698 tst x21, #0x7 699 b.ne 1f 700 cmp x21, x24 701 b.lt 1f 702 mov x0, #(1 << 29) 703 add x0, x0, x24 704 cmp x21, x0 705 b.ge 1f 706 ret 7071: 708 mov x21, #0 709 ret 710ENDPROC(__vet_fdt) 711