1d2912cb1SThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */ 21da177e4SLinus Torvalds/* 31da177e4SLinus Torvalds * linux/arch/arm/boot/compressed/head.S 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1996-2002 Russell King 610c2df65SHyok S. Choi * Copyright (C) 2004 Hyok S. Choi (MPU support) 71da177e4SLinus Torvalds */ 81da177e4SLinus Torvalds#include <linux/linkage.h> 9424e5994SDave Martin#include <asm/assembler.h> 10c20611dfSJoachim Eastwood#include <asm/v7m.h> 111da177e4SLinus Torvalds 1281a0bc39SRoy Franz#include "efi-header.S" 1381a0bc39SRoy Franz 14551b39efSGeert Uytterhoeven#ifdef __ARMEB__ 15551b39efSGeert Uytterhoeven#define OF_DT_MAGIC 0xd00dfeed 16551b39efSGeert Uytterhoeven#else 17551b39efSGeert Uytterhoeven#define OF_DT_MAGIC 0xedfe0dd0 18551b39efSGeert Uytterhoeven#endif 19551b39efSGeert Uytterhoeven 20c20611dfSJoachim Eastwood AR_CLASS( .arch armv7-a ) 21c20611dfSJoachim Eastwood M_CLASS( .arch armv7-m ) 22c20611dfSJoachim Eastwood 231da177e4SLinus Torvalds/* 241da177e4SLinus Torvalds * Debugging stuff 251da177e4SLinus Torvalds * 261da177e4SLinus Torvalds * Note that these macros must not contain any code which is not 271da177e4SLinus Torvalds * 100% relocatable. Any attempt to do so will result in a crash. 281da177e4SLinus Torvalds * Please select one of the following when turning on debugging. 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds#ifdef DEBUG 315cd0c344SRussell King 325cd0c344SRussell King#if defined(CONFIG_DEBUG_ICEDCC) 337d95ded9STony Lindgren 34dfad549dSStephen Boyd#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 35e07e3c33SŁukasz Stelmach .macro loadsp, rb, tmp1, tmp2 367d95ded9STony Lindgren .endm 370b0c1dbdSLinus Walleij .macro writeb, ch, rb, tmp 387d95ded9STony Lindgren mcr p14, 0, \ch, c0, c5, 0 397d95ded9STony Lindgren .endm 40c633c3cfSJean-Christop PLAGNIOL-VILLARD#elif defined(CONFIG_CPU_XSCALE) 41e07e3c33SŁukasz Stelmach .macro loadsp, rb, tmp1, tmp2 42c633c3cfSJean-Christop PLAGNIOL-VILLARD .endm 430b0c1dbdSLinus Walleij .macro writeb, ch, rb, tmp 44c633c3cfSJean-Christop PLAGNIOL-VILLARD mcr p14, 0, \ch, c8, c0, 0 45c633c3cfSJean-Christop PLAGNIOL-VILLARD .endm 467d95ded9STony Lindgren#else 47e07e3c33SŁukasz Stelmach .macro loadsp, rb, tmp1, tmp2 481da177e4SLinus Torvalds .endm 490b0c1dbdSLinus Walleij .macro writeb, ch, rb, tmp 5041a9e680SUwe Kleine-König mcr p14, 0, \ch, c1, c0, 0 511da177e4SLinus Torvalds .endm 527d95ded9STony Lindgren#endif 537d95ded9STony Lindgren 545cd0c344SRussell King#else 55224b5be6SRussell King 564beba08bSShawn Guo#include CONFIG_DEBUG_LL_INCLUDE 57224b5be6SRussell King 580b0c1dbdSLinus Walleij .macro writeb, ch, rb, tmp 590b0c1dbdSLinus Walleij#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL 600b0c1dbdSLinus Walleij waituartcts \tmp, \rb 610b0c1dbdSLinus Walleij#endif 620b0c1dbdSLinus Walleij waituarttxrdy \tmp, \rb 635cd0c344SRussell King senduart \ch, \rb 640b0c1dbdSLinus Walleij busyuart \tmp, \rb 655cd0c344SRussell King .endm 665cd0c344SRussell King 67224b5be6SRussell King#if defined(CONFIG_ARCH_SA1100) 68e07e3c33SŁukasz Stelmach .macro loadsp, rb, tmp1, tmp2 691da177e4SLinus Torvalds mov \rb, #0x80000000 @ physical base address 701da177e4SLinus Torvalds add \rb, \rb, #0x00010000 @ Ser1 711da177e4SLinus Torvalds .endm 721da177e4SLinus Torvalds#else 73e07e3c33SŁukasz Stelmach .macro loadsp, rb, tmp1, tmp2 74e07e3c33SŁukasz Stelmach addruart \rb, \tmp1, \tmp2 75224b5be6SRussell King .endm 761da177e4SLinus Torvalds#endif 771da177e4SLinus Torvalds#endif 785cd0c344SRussell King#endif 791da177e4SLinus Torvalds 801da177e4SLinus Torvalds .macro kputc,val 811da177e4SLinus Torvalds mov r0, \val 821da177e4SLinus Torvalds bl putc 831da177e4SLinus Torvalds .endm 841da177e4SLinus Torvalds 851da177e4SLinus Torvalds .macro kphex,val,len 861da177e4SLinus Torvalds mov r0, \val 871da177e4SLinus Torvalds mov r1, #\len 881da177e4SLinus Torvalds bl phex 891da177e4SLinus Torvalds .endm 901da177e4SLinus Torvalds 91f3c89992SFabrizio Castro /* 92f3c89992SFabrizio Castro * Debug kernel copy by printing the memory addresses involved 93f3c89992SFabrizio Castro */ 94f3c89992SFabrizio Castro .macro dbgkc, begin, end, cbegin, cend 95f3c89992SFabrizio Castro#ifdef DEBUG 96f3c89992SFabrizio Castro kputc #'C' 97f3c89992SFabrizio Castro kputc #':' 98f3c89992SFabrizio Castro kputc #'0' 99f3c89992SFabrizio Castro kputc #'x' 100f3c89992SFabrizio Castro kphex \begin, 8 /* Start of compressed kernel */ 101f3c89992SFabrizio Castro kputc #'-' 102f3c89992SFabrizio Castro kputc #'0' 103f3c89992SFabrizio Castro kputc #'x' 104f3c89992SFabrizio Castro kphex \end, 8 /* End of compressed kernel */ 105f3c89992SFabrizio Castro kputc #'-' 106f3c89992SFabrizio Castro kputc #'>' 107f3c89992SFabrizio Castro kputc #'0' 108f3c89992SFabrizio Castro kputc #'x' 109f3c89992SFabrizio Castro kphex \cbegin, 8 /* Start of kernel copy */ 110f3c89992SFabrizio Castro kputc #'-' 111f3c89992SFabrizio Castro kputc #'0' 112f3c89992SFabrizio Castro kputc #'x' 113f3c89992SFabrizio Castro kphex \cend, 8 /* End of kernel copy */ 114f3c89992SFabrizio Castro kputc #'\n' 115f3c89992SFabrizio Castro#endif 116f3c89992SFabrizio Castro .endm 117f3c89992SFabrizio Castro 118c03e4147SLinus Walleij /* 119c03e4147SLinus Walleij * Debug print of the final appended DTB location 120c03e4147SLinus Walleij */ 1211ecec385SGeert Uytterhoeven .macro dbgadtb, begin, size 122c03e4147SLinus Walleij#ifdef DEBUG 123c03e4147SLinus Walleij kputc #'D' 124c03e4147SLinus Walleij kputc #'T' 125c03e4147SLinus Walleij kputc #'B' 126c03e4147SLinus Walleij kputc #':' 127c03e4147SLinus Walleij kputc #'0' 128c03e4147SLinus Walleij kputc #'x' 129c03e4147SLinus Walleij kphex \begin, 8 /* Start of appended DTB */ 130c03e4147SLinus Walleij kputc #' ' 131c03e4147SLinus Walleij kputc #'(' 132c03e4147SLinus Walleij kputc #'0' 133c03e4147SLinus Walleij kputc #'x' 1341ecec385SGeert Uytterhoeven kphex \size, 8 /* Size of appended DTB */ 135c03e4147SLinus Walleij kputc #')' 136c03e4147SLinus Walleij kputc #'\n' 137c03e4147SLinus Walleij#endif 138c03e4147SLinus Walleij .endm 139c03e4147SLinus Walleij 1408239fc77SArd Biesheuvel .macro enable_cp15_barriers, reg 1418239fc77SArd Biesheuvel mrc p15, 0, \reg, c1, c0, 0 @ read SCTLR 1428239fc77SArd Biesheuvel tst \reg, #(1 << 5) @ CP15BEN bit set? 1438239fc77SArd Biesheuvel bne .L_\@ 1448239fc77SArd Biesheuvel orr \reg, \reg, #(1 << 5) @ CP15 barrier instructions 1458239fc77SArd Biesheuvel mcr p15, 0, \reg, c1, c0, 0 @ write SCTLR 1468239fc77SArd Biesheuvel ARM( .inst 0xf57ff06f @ v7+ isb ) 1478239fc77SArd Biesheuvel THUMB( isb ) 1488239fc77SArd Biesheuvel.L_\@: 1498239fc77SArd Biesheuvel .endm 1508239fc77SArd Biesheuvel 151184bf653SArd Biesheuvel /* 152184bf653SArd Biesheuvel * The kernel build system appends the size of the 153184bf653SArd Biesheuvel * decompressed kernel at the end of the compressed data 154184bf653SArd Biesheuvel * in little-endian form. 155184bf653SArd Biesheuvel */ 156184bf653SArd Biesheuvel .macro get_inflated_image_size, res:req, tmp1:req, tmp2:req 157184bf653SArd Biesheuvel adr \res, .Linflated_image_size_offset 158184bf653SArd Biesheuvel ldr \tmp1, [\res] 159184bf653SArd Biesheuvel add \tmp1, \tmp1, \res @ address of inflated image size 160184bf653SArd Biesheuvel 161184bf653SArd Biesheuvel ldrb \res, [\tmp1] @ get_unaligned_le32 162184bf653SArd Biesheuvel ldrb \tmp2, [\tmp1, #1] 163184bf653SArd Biesheuvel orr \res, \res, \tmp2, lsl #8 164184bf653SArd Biesheuvel ldrb \tmp2, [\tmp1, #2] 165184bf653SArd Biesheuvel ldrb \tmp1, [\tmp1, #3] 166184bf653SArd Biesheuvel orr \res, \res, \tmp2, lsl #16 167184bf653SArd Biesheuvel orr \res, \res, \tmp1, lsl #24 168184bf653SArd Biesheuvel .endm 169184bf653SArd Biesheuvel 1700557ac83SGeert Uytterhoeven .macro be32tocpu, val, tmp 1710557ac83SGeert Uytterhoeven#ifndef __ARMEB__ 1720557ac83SGeert Uytterhoeven /* convert to little endian */ 1736468e898SArd Biesheuvel rev_l \val, \tmp 1740557ac83SGeert Uytterhoeven#endif 1750557ac83SGeert Uytterhoeven .endm 1760557ac83SGeert Uytterhoeven 177790756c7SNick Desaulniers .section ".start", "ax" 1781da177e4SLinus Torvalds/* 1791da177e4SLinus Torvalds * sort out different calling conventions 1801da177e4SLinus Torvalds */ 1811da177e4SLinus Torvalds .align 182c20611dfSJoachim Eastwood /* 183c20611dfSJoachim Eastwood * Always enter in ARM state for CPUs that support the ARM ISA. 184c20611dfSJoachim Eastwood * As of today (2014) that's exactly the members of the A and R 185c20611dfSJoachim Eastwood * classes. 186c20611dfSJoachim Eastwood */ 187c20611dfSJoachim Eastwood AR_CLASS( .arm ) 1881da177e4SLinus Torvaldsstart: 1891da177e4SLinus Torvalds .type start,#function 19020699a42SLinus Walleij /* 19120699a42SLinus Walleij * These 7 nops along with the 1 nop immediately below for 19220699a42SLinus Walleij * !THUMB2 form 8 nops that make the compressed kernel bootable 19320699a42SLinus Walleij * on legacy ARM systems that were assuming the kernel in a.out 19420699a42SLinus Walleij * binary format. The boot loaders on these systems would 19520699a42SLinus Walleij * jump 32 bytes into the image to skip the a.out header. 19620699a42SLinus Walleij * with these 8 nops filling exactly 32 bytes, things still 19720699a42SLinus Walleij * work as expected on these legacy systems. Thumb2 mode keeps 19820699a42SLinus Walleij * 7 of the nops as it turns out that some boot loaders 19920699a42SLinus Walleij * were patching the initial instructions of the kernel, i.e 20020699a42SLinus Walleij * had started to exploit this "patch area". 20120699a42SLinus Walleij */ 202*a92882a4SAndre Przywara __initial_nops 203*a92882a4SAndre Przywara .rept 5 20481a0bc39SRoy Franz __nop 2051da177e4SLinus Torvalds .endr 20606a4b6d0SArd Biesheuvel#ifndef CONFIG_THUMB2_KERNEL 2076583d829SLinus Walleij __nop 20806a4b6d0SArd Biesheuvel#else 20906a4b6d0SArd Biesheuvel AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode 21006a4b6d0SArd Biesheuvel M_CLASS( nop.w ) @ M: already in Thumb2 mode 21106a4b6d0SArd Biesheuvel .thumb 21206a4b6d0SArd Biesheuvel#endif 21306a4b6d0SArd Biesheuvel W(b) 1f 2141da177e4SLinus Torvalds 21533656d56SNicolas Pitre .word _magic_sig @ Magic numbers to help the loader 21633656d56SNicolas Pitre .word _magic_start @ absolute load/run zImage address 21733656d56SNicolas Pitre .word _magic_end @ zImage end address 2189696fcaeSNicolas Pitre .word 0x04030201 @ endianness flag 219c7725687SRussell King .word 0x45454545 @ another magic number to indicate 220c7725687SRussell King .word _magic_table @ additional data table 22133656d56SNicolas Pitre 22206a4b6d0SArd Biesheuvel __EFI_HEADER 22306a4b6d0SArd Biesheuvel1: 22497bcb0feSBen Dooks ARM_BE8( setend be ) @ go BE8 if compiled for BE8 225c20611dfSJoachim Eastwood AR_CLASS( mrs r9, cpsr ) 226424e5994SDave Martin#ifdef CONFIG_ARM_VIRT_EXT 227424e5994SDave Martin bl __hyp_stub_install @ get into SVC mode, reversibly 228424e5994SDave Martin#endif 229424e5994SDave Martin mov r7, r1 @ save architecture ID 230f4619025SRussell King mov r8, r2 @ save atags pointer 2311da177e4SLinus Torvalds 232c20611dfSJoachim Eastwood#ifndef CONFIG_CPU_V7M 2331da177e4SLinus Torvalds /* 2341da177e4SLinus Torvalds * Booting from Angel - need to enter SVC mode and disable 2351da177e4SLinus Torvalds * FIQs/IRQs (numeric definitions from angel arm.h source). 2361da177e4SLinus Torvalds * We only do this if we were in user mode on entry. 2371da177e4SLinus Torvalds */ 2381da177e4SLinus Torvalds mrs r2, cpsr @ get current mode 2391da177e4SLinus Torvalds tst r2, #3 @ not user? 2401da177e4SLinus Torvalds bne not_angel 2411da177e4SLinus Torvalds mov r0, #0x17 @ angel_SWIreason_EnterSVC 2420e056f20SCatalin Marinas ARM( swi 0x123456 ) @ angel_SWI_ARM 2430e056f20SCatalin Marinas THUMB( svc 0xab ) @ angel_SWI_THUMB 2441da177e4SLinus Torvaldsnot_angel: 245424e5994SDave Martin safe_svcmode_maskall r0 246424e5994SDave Martin msr spsr_cxsf, r9 @ Save the CPU boot mode in 247424e5994SDave Martin @ SPSR 248c20611dfSJoachim Eastwood#endif 2491da177e4SLinus Torvalds /* 2501da177e4SLinus Torvalds * Note that some cache flushing and other stuff may 2511da177e4SLinus Torvalds * be needed here - is there an Angel SWI call for this? 2521da177e4SLinus Torvalds */ 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds /* 2551da177e4SLinus Torvalds * some architecture specific code can be inserted 256f4619025SRussell King * by the linker here, but it should preserve r7, r8, and r9. 2571da177e4SLinus Torvalds */ 2581da177e4SLinus Torvalds 2591da177e4SLinus Torvalds .text 2606d7d0ae5SNicolas Pitre 261e69edc79SEric Miao#ifdef CONFIG_AUTO_ZRELADDR 2620a6a78b8SRussell King /* 2630a6a78b8SRussell King * Find the start of physical memory. As we are executing 2640a6a78b8SRussell King * without the MMU on, we are in the physical address space. 2650a6a78b8SRussell King * We just need to get rid of any offset by aligning the 2660a6a78b8SRussell King * address. 2670a6a78b8SRussell King * 2680a6a78b8SRussell King * This alignment is a balance between the requirements of 2690a6a78b8SRussell King * different platforms - we have chosen 128MB to allow 2700a6a78b8SRussell King * platforms which align the start of their physical memory 2710a6a78b8SRussell King * to 128MB to use this feature, while allowing the zImage 2720a6a78b8SRussell King * to be placed within the first 128MB of memory on other 2730a6a78b8SRussell King * platforms. Increasing the alignment means we place 2740a6a78b8SRussell King * stricter alignment requirements on the start of physical 2750a6a78b8SRussell King * memory, but relaxing it means that we break people who 2760a6a78b8SRussell King * are already placing their zImage in (eg) the top 64MB 2770a6a78b8SRussell King * of this range. 2780a6a78b8SRussell King */ 2790673cb38SGeert Uytterhoeven mov r0, pc 2800673cb38SGeert Uytterhoeven and r0, r0, #0xf8000000 2810673cb38SGeert Uytterhoeven#ifdef CONFIG_USE_OF 2820673cb38SGeert Uytterhoeven adr r1, LC1 2830673cb38SGeert Uytterhoeven#ifdef CONFIG_ARM_APPENDED_DTB 2840673cb38SGeert Uytterhoeven /* 2850673cb38SGeert Uytterhoeven * Look for an appended DTB. If found, we cannot use it to 2860673cb38SGeert Uytterhoeven * validate the calculated start of physical memory, as its 2870673cb38SGeert Uytterhoeven * memory nodes may need to be augmented by ATAGS stored at 2880673cb38SGeert Uytterhoeven * an offset from the same start of physical memory. 2890673cb38SGeert Uytterhoeven */ 2900673cb38SGeert Uytterhoeven ldr r2, [r1, #4] @ get &_edata 2910673cb38SGeert Uytterhoeven add r2, r2, r1 @ relocate it 2920673cb38SGeert Uytterhoeven ldr r2, [r2] @ get DTB signature 2930673cb38SGeert Uytterhoeven ldr r3, =OF_DT_MAGIC 2940673cb38SGeert Uytterhoeven cmp r2, r3 @ do we have a DTB there? 2950673cb38SGeert Uytterhoeven beq 1f @ if yes, skip validation 2960673cb38SGeert Uytterhoeven#endif /* CONFIG_ARM_APPENDED_DTB */ 2970673cb38SGeert Uytterhoeven 2980673cb38SGeert Uytterhoeven /* 2990673cb38SGeert Uytterhoeven * Make sure we have some stack before calling C code. 3000673cb38SGeert Uytterhoeven * No GOT fixup has occurred yet, but none of the code we're 3010673cb38SGeert Uytterhoeven * about to call uses any global variables. 3020673cb38SGeert Uytterhoeven */ 3030673cb38SGeert Uytterhoeven ldr sp, [r1] @ get stack location 3040673cb38SGeert Uytterhoeven add sp, sp, r1 @ apply relocation 3050673cb38SGeert Uytterhoeven 3060673cb38SGeert Uytterhoeven /* Validate calculated start against passed DTB */ 3070673cb38SGeert Uytterhoeven mov r1, r8 3080673cb38SGeert Uytterhoeven bl fdt_check_mem_start 3090673cb38SGeert Uytterhoeven1: 3100673cb38SGeert Uytterhoeven#endif /* CONFIG_USE_OF */ 3110a6a78b8SRussell King /* Determine final kernel image address. */ 3120673cb38SGeert Uytterhoeven add r4, r0, #TEXT_OFFSET 313e69edc79SEric Miao#else 3149e84ed63SRussell King ldr r4, =zreladdr 315e69edc79SEric Miao#endif 3161da177e4SLinus Torvalds 3172874865cSNicolas Pitre /* 3182874865cSNicolas Pitre * Set up a page table only if it won't overwrite ourself. 3197d57909bSMasahiro Yamada * That means r4 < pc || r4 - 16k page directory > &_end. 3202874865cSNicolas Pitre * Given that r4 > &_end is most unfrequent, we add a rough 3212874865cSNicolas Pitre * additional 1MB of room for a possible appended DTB. 3222874865cSNicolas Pitre */ 3232874865cSNicolas Pitre mov r0, pc 3242874865cSNicolas Pitre cmp r0, r4 325691cbe5bSArd Biesheuvel ldrcc r0, .Lheadroom 3262874865cSNicolas Pitre addcc r0, r0, pc 3272874865cSNicolas Pitre cmpcc r4, r0 3282874865cSNicolas Pitre orrcc r4, r4, #1 @ remember we skipped cache_on 3292874865cSNicolas Pitre blcs cache_on 3306d7d0ae5SNicolas Pitre 331161e04a5SArd Biesheuvelrestart: adr r0, LC1 332161e04a5SArd Biesheuvel ldr sp, [r0] 333161e04a5SArd Biesheuvel ldr r6, [r0, #4] 334161e04a5SArd Biesheuvel add sp, sp, r0 335161e04a5SArd Biesheuvel add r6, r6, r0 3361da177e4SLinus Torvalds 337184bf653SArd Biesheuvel get_inflated_image_size r9, r10, lr 3386d7d0ae5SNicolas Pitre 3396d7d0ae5SNicolas Pitre#ifndef CONFIG_ZBOOT_ROM 3406d7d0ae5SNicolas Pitre /* malloc space is above the relocated stack (64k max) */ 341adc5f702SRussell King add r10, sp, #MALLOC_SIZE 3426d7d0ae5SNicolas Pitre#else 3436d7d0ae5SNicolas Pitre /* 3446d7d0ae5SNicolas Pitre * With ZBOOT_ROM the bss/stack is non relocatable, 3456d7d0ae5SNicolas Pitre * but someone could still run this code from RAM, 3466d7d0ae5SNicolas Pitre * in which case our reference is _edata. 3476d7d0ae5SNicolas Pitre */ 3486d7d0ae5SNicolas Pitre mov r10, r6 3496d7d0ae5SNicolas Pitre#endif 3506d7d0ae5SNicolas Pitre 351e2a6a3aaSJohn Bonesio mov r5, #0 @ init dtb size to 0 352e2a6a3aaSJohn Bonesio#ifdef CONFIG_ARM_APPENDED_DTB 353e2a6a3aaSJohn Bonesio/* 3542874865cSNicolas Pitre * r4 = final kernel address (possibly with LSB set) 355e2a6a3aaSJohn Bonesio * r5 = appended dtb size (still unknown) 356e2a6a3aaSJohn Bonesio * r6 = _edata 357e2a6a3aaSJohn Bonesio * r7 = architecture ID 358e2a6a3aaSJohn Bonesio * r8 = atags/device tree pointer 359e2a6a3aaSJohn Bonesio * r9 = size of decompressed image 360e2a6a3aaSJohn Bonesio * r10 = end of this image, including bss/stack/malloc space if non XIP 361e2a6a3aaSJohn Bonesio * sp = stack pointer 362e2a6a3aaSJohn Bonesio * 363e2a6a3aaSJohn Bonesio * if there are device trees (dtb) appended to zImage, advance r10 so that the 364e2a6a3aaSJohn Bonesio * dtb data will get relocated along with the kernel if necessary. 365e2a6a3aaSJohn Bonesio */ 366e2a6a3aaSJohn Bonesio 367e2a6a3aaSJohn Bonesio ldr lr, [r6, #0] 368551b39efSGeert Uytterhoeven ldr r1, =OF_DT_MAGIC 369e2a6a3aaSJohn Bonesio cmp lr, r1 370e2a6a3aaSJohn Bonesio bne dtb_check_done @ not found 371e2a6a3aaSJohn Bonesio 372b90b9a38SNicolas Pitre#ifdef CONFIG_ARM_ATAG_DTB_COMPAT 373b90b9a38SNicolas Pitre /* 374b90b9a38SNicolas Pitre * OK... Let's do some funky business here. 375b90b9a38SNicolas Pitre * If we do have a DTB appended to zImage, and we do have 376b90b9a38SNicolas Pitre * an ATAG list around, we want the later to be translated 377c2607f74SNicolas Pitre * and folded into the former here. No GOT fixup has occurred 378c2607f74SNicolas Pitre * yet, but none of the code we're about to call uses any 379c2607f74SNicolas Pitre * global variable. 380b90b9a38SNicolas Pitre */ 381c2607f74SNicolas Pitre 382c2607f74SNicolas Pitre /* Get the initial DTB size */ 383c2607f74SNicolas Pitre ldr r5, [r6, #4] 3840557ac83SGeert Uytterhoeven be32tocpu r5, r1 385c03e4147SLinus Walleij dbgadtb r6, r5 386c2607f74SNicolas Pitre /* 50% DTB growth should be good enough */ 387c2607f74SNicolas Pitre add r5, r5, r5, lsr #1 388c2607f74SNicolas Pitre /* preserve 64-bit alignment */ 389c2607f74SNicolas Pitre add r5, r5, #7 390c2607f74SNicolas Pitre bic r5, r5, #7 391c2607f74SNicolas Pitre /* clamp to 32KB min and 1MB max */ 392c2607f74SNicolas Pitre cmp r5, #(1 << 15) 393c2607f74SNicolas Pitre movlo r5, #(1 << 15) 394c2607f74SNicolas Pitre cmp r5, #(1 << 20) 395c2607f74SNicolas Pitre movhi r5, #(1 << 20) 396c2607f74SNicolas Pitre /* temporarily relocate the stack past the DTB work space */ 397c2607f74SNicolas Pitre add sp, sp, r5 398c2607f74SNicolas Pitre 399b90b9a38SNicolas Pitre mov r0, r8 400b90b9a38SNicolas Pitre mov r1, r6 401c2607f74SNicolas Pitre mov r2, r5 402b90b9a38SNicolas Pitre bl atags_to_fdt 403b90b9a38SNicolas Pitre 404b90b9a38SNicolas Pitre /* 405b90b9a38SNicolas Pitre * If returned value is 1, there is no ATAG at the location 406b90b9a38SNicolas Pitre * pointed by r8. Try the typical 0x100 offset from start 407b90b9a38SNicolas Pitre * of RAM and hope for the best. 408b90b9a38SNicolas Pitre */ 409b90b9a38SNicolas Pitre cmp r0, #1 410531a6a94SNicolas Pitre sub r0, r4, #TEXT_OFFSET 4112874865cSNicolas Pitre bic r0, r0, #1 412531a6a94SNicolas Pitre add r0, r0, #0x100 413b90b9a38SNicolas Pitre mov r1, r6 414c2607f74SNicolas Pitre mov r2, r5 4159c5fd9e8SMarc Zyngier bleq atags_to_fdt 416b90b9a38SNicolas Pitre 417c2607f74SNicolas Pitre sub sp, sp, r5 418b90b9a38SNicolas Pitre#endif 419b90b9a38SNicolas Pitre 420e2a6a3aaSJohn Bonesio mov r8, r6 @ use the appended device tree 421e2a6a3aaSJohn Bonesio 4225ffb04f6SNicolas Pitre /* 4235ffb04f6SNicolas Pitre * Make sure that the DTB doesn't end up in the final 4245ffb04f6SNicolas Pitre * kernel's .bss area. To do so, we adjust the decompressed 4255ffb04f6SNicolas Pitre * kernel size to compensate if that .bss size is larger 4265ffb04f6SNicolas Pitre * than the relocated code. 4275ffb04f6SNicolas Pitre */ 4285ffb04f6SNicolas Pitre ldr r5, =_kernel_bss_size 4295ffb04f6SNicolas Pitre adr r1, wont_overwrite 4305ffb04f6SNicolas Pitre sub r1, r6, r1 4315ffb04f6SNicolas Pitre subs r1, r5, r1 4325ffb04f6SNicolas Pitre addhi r9, r9, r1 4335ffb04f6SNicolas Pitre 434c2607f74SNicolas Pitre /* Get the current DTB size */ 435e2a6a3aaSJohn Bonesio ldr r5, [r6, #4] 4360557ac83SGeert Uytterhoeven be32tocpu r5, r1 437e2a6a3aaSJohn Bonesio 438e2a6a3aaSJohn Bonesio /* preserve 64-bit alignment */ 439e2a6a3aaSJohn Bonesio add r5, r5, #7 440e2a6a3aaSJohn Bonesio bic r5, r5, #7 441e2a6a3aaSJohn Bonesio 442e2a6a3aaSJohn Bonesio /* relocate some pointers past the appended dtb */ 443e2a6a3aaSJohn Bonesio add r6, r6, r5 444e2a6a3aaSJohn Bonesio add r10, r10, r5 445e2a6a3aaSJohn Bonesio add sp, sp, r5 446e2a6a3aaSJohn Bonesiodtb_check_done: 447e2a6a3aaSJohn Bonesio#endif 448e2a6a3aaSJohn Bonesio 4496d7d0ae5SNicolas Pitre/* 4506d7d0ae5SNicolas Pitre * Check to see if we will overwrite ourselves. 4512874865cSNicolas Pitre * r4 = final kernel address (possibly with LSB set) 4526d7d0ae5SNicolas Pitre * r9 = size of decompressed image 4536d7d0ae5SNicolas Pitre * r10 = end of this image, including bss/stack/malloc space if non XIP 4546d7d0ae5SNicolas Pitre * We basically want: 455ea9df3b1SNicolas Pitre * r4 - 16k page directory >= r10 -> OK 4565ffb04f6SNicolas Pitre * r4 + image length <= address of wont_overwrite -> OK 4572874865cSNicolas Pitre * Note: the possible LSB in r4 is harmless here. 4586d7d0ae5SNicolas Pitre */ 459ea9df3b1SNicolas Pitre add r10, r10, #16384 4606d7d0ae5SNicolas Pitre cmp r4, r10 4616d7d0ae5SNicolas Pitre bhs wont_overwrite 4626d7d0ae5SNicolas Pitre add r10, r4, r9 4635ffb04f6SNicolas Pitre adr r9, wont_overwrite 4645ffb04f6SNicolas Pitre cmp r10, r9 4656d7d0ae5SNicolas Pitre bls wont_overwrite 4666d7d0ae5SNicolas Pitre 4676d7d0ae5SNicolas Pitre/* 4686d7d0ae5SNicolas Pitre * Relocate ourselves past the end of the decompressed kernel. 4696d7d0ae5SNicolas Pitre * r6 = _edata 4706d7d0ae5SNicolas Pitre * r10 = end of the decompressed kernel 4716d7d0ae5SNicolas Pitre * Because we always copy ahead, we need to do it from the end and go 4726d7d0ae5SNicolas Pitre * backward in case the source and destination overlap. 4736d7d0ae5SNicolas Pitre */ 474adcc2591SNicolas Pitre /* 475adcc2591SNicolas Pitre * Bump to the next 256-byte boundary with the size of 476adcc2591SNicolas Pitre * the relocation code added. This avoids overwriting 477adcc2591SNicolas Pitre * ourself when the offset is small. 478adcc2591SNicolas Pitre */ 479adcc2591SNicolas Pitre add r10, r10, #((reloc_code_end - restart + 256) & ~255) 4806d7d0ae5SNicolas Pitre bic r10, r10, #255 4816d7d0ae5SNicolas Pitre 482adcc2591SNicolas Pitre /* Get start of code we want to copy and align it down. */ 483adcc2591SNicolas Pitre adr r5, restart 484adcc2591SNicolas Pitre bic r5, r5, #31 485adcc2591SNicolas Pitre 486424e5994SDave Martin/* Relocate the hyp vector base if necessary */ 487424e5994SDave Martin#ifdef CONFIG_ARM_VIRT_EXT 488424e5994SDave Martin mrs r0, spsr 489424e5994SDave Martin and r0, r0, #MODE_MASK 490424e5994SDave Martin cmp r0, #HYP_MODE 491424e5994SDave Martin bne 1f 492424e5994SDave Martin 4934897e36cSMarc Zyngier /* 4944897e36cSMarc Zyngier * Compute the address of the hyp vectors after relocation. 4954897e36cSMarc Zyngier * Call __hyp_set_vectors with the new address so that we 4964897e36cSMarc Zyngier * can HVC again after the copy. 4974897e36cSMarc Zyngier */ 498aaac3733SArd Biesheuvel adr_l r0, __hyp_stub_vectors 499424e5994SDave Martin sub r0, r0, r5 500424e5994SDave Martin add r0, r0, r10 501424e5994SDave Martin bl __hyp_set_vectors 502424e5994SDave Martin1: 503424e5994SDave Martin#endif 504424e5994SDave Martin 5056d7d0ae5SNicolas Pitre sub r9, r6, r5 @ size to copy 5066d7d0ae5SNicolas Pitre add r9, r9, #31 @ rounded up to a multiple 5076d7d0ae5SNicolas Pitre bic r9, r9, #31 @ ... of 32 bytes 5086d7d0ae5SNicolas Pitre add r6, r9, r5 5096d7d0ae5SNicolas Pitre add r9, r9, r10 5106d7d0ae5SNicolas Pitre 511f3c89992SFabrizio Castro#ifdef DEBUG 512f3c89992SFabrizio Castro sub r10, r6, r5 513f3c89992SFabrizio Castro sub r10, r9, r10 514f3c89992SFabrizio Castro /* 515f3c89992SFabrizio Castro * We are about to copy the kernel to a new memory area. 516f3c89992SFabrizio Castro * The boundaries of the new memory area can be found in 517f3c89992SFabrizio Castro * r10 and r9, whilst r5 and r6 contain the boundaries 518f3c89992SFabrizio Castro * of the memory we are going to copy. 519f3c89992SFabrizio Castro * Calling dbgkc will help with the printing of this 520f3c89992SFabrizio Castro * information. 521f3c89992SFabrizio Castro */ 522f3c89992SFabrizio Castro dbgkc r5, r6, r10, r9 523f3c89992SFabrizio Castro#endif 524f3c89992SFabrizio Castro 5256d7d0ae5SNicolas Pitre1: ldmdb r6!, {r0 - r3, r10 - r12, lr} 5266d7d0ae5SNicolas Pitre cmp r6, r5 5276d7d0ae5SNicolas Pitre stmdb r9!, {r0 - r3, r10 - r12, lr} 5286d7d0ae5SNicolas Pitre bhi 1b 5296d7d0ae5SNicolas Pitre 5306d7d0ae5SNicolas Pitre /* Preserve offset to relocated code. */ 5316d7d0ae5SNicolas Pitre sub r6, r9, r6 5326d7d0ae5SNicolas Pitre 533e114412fSArd Biesheuvel mov r0, r9 @ start of relocated zImage 534e114412fSArd Biesheuvel add r1, sp, r6 @ end of relocated zImage 535238962acSWill Deacon bl cache_clean_flush 5366d7d0ae5SNicolas Pitre 53714327c66SRussell King badr r0, restart 5386d7d0ae5SNicolas Pitre add r0, r0, r6 5396d7d0ae5SNicolas Pitre mov pc, r0 5406d7d0ae5SNicolas Pitre 5416d7d0ae5SNicolas Pitrewont_overwrite: 542f1f012b0SArd Biesheuvel adr r0, LC0 543f1f012b0SArd Biesheuvel ldmia r0, {r1, r2, r3, r11, r12} 544f1f012b0SArd Biesheuvel sub r0, r0, r1 @ calculate the delta offset 545f1f012b0SArd Biesheuvel 5466d7d0ae5SNicolas Pitre/* 5476d7d0ae5SNicolas Pitre * If delta is zero, we are running at the address we were linked at. 5486d7d0ae5SNicolas Pitre * r0 = delta 5496d7d0ae5SNicolas Pitre * r2 = BSS start 5506d7d0ae5SNicolas Pitre * r3 = BSS end 5512874865cSNicolas Pitre * r4 = kernel execution address (possibly with LSB set) 552e2a6a3aaSJohn Bonesio * r5 = appended dtb size (0 if not present) 5536d7d0ae5SNicolas Pitre * r7 = architecture ID 5546d7d0ae5SNicolas Pitre * r8 = atags pointer 5556d7d0ae5SNicolas Pitre * r11 = GOT start 5566d7d0ae5SNicolas Pitre * r12 = GOT end 5576d7d0ae5SNicolas Pitre * sp = stack pointer 5586d7d0ae5SNicolas Pitre */ 559e2a6a3aaSJohn Bonesio orrs r1, r0, r5 5606d7d0ae5SNicolas Pitre beq not_relocated 561e2a6a3aaSJohn Bonesio 56298e12b5aSRussell King add r11, r11, r0 5636d7d0ae5SNicolas Pitre add r12, r12, r0 5641da177e4SLinus Torvalds 5651da177e4SLinus Torvalds#ifndef CONFIG_ZBOOT_ROM 5661da177e4SLinus Torvalds /* 5671da177e4SLinus Torvalds * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, 5681da177e4SLinus Torvalds * we need to fix up pointers into the BSS region. 5696d7d0ae5SNicolas Pitre * Note that the stack pointer has already been fixed up. 5701da177e4SLinus Torvalds */ 5711da177e4SLinus Torvalds add r2, r2, r0 5721da177e4SLinus Torvalds add r3, r3, r0 5731da177e4SLinus Torvalds 5741da177e4SLinus Torvalds /* 5751da177e4SLinus Torvalds * Relocate all entries in the GOT table. 576e2a6a3aaSJohn Bonesio * Bump bss entries to _edata + dtb size 5771da177e4SLinus Torvalds */ 57898e12b5aSRussell King1: ldr r1, [r11, #0] @ relocate entries in the GOT 579e2a6a3aaSJohn Bonesio add r1, r1, r0 @ This fixes up C references 580e2a6a3aaSJohn Bonesio cmp r1, r2 @ if entry >= bss_start && 581e2a6a3aaSJohn Bonesio cmphs r3, r1 @ bss_end > entry 582e2a6a3aaSJohn Bonesio addhi r1, r1, r5 @ entry += dtb size 583e2a6a3aaSJohn Bonesio str r1, [r11], #4 @ next entry 5846d7d0ae5SNicolas Pitre cmp r11, r12 5851da177e4SLinus Torvalds blo 1b 586e2a6a3aaSJohn Bonesio 587e2a6a3aaSJohn Bonesio /* bump our bss pointers too */ 588e2a6a3aaSJohn Bonesio add r2, r2, r5 589e2a6a3aaSJohn Bonesio add r3, r3, r5 590e2a6a3aaSJohn Bonesio 5911da177e4SLinus Torvalds#else 5921da177e4SLinus Torvalds 5931da177e4SLinus Torvalds /* 5941da177e4SLinus Torvalds * Relocate entries in the GOT table. We only relocate 5951da177e4SLinus Torvalds * the entries that are outside the (relocated) BSS region. 5961da177e4SLinus Torvalds */ 59798e12b5aSRussell King1: ldr r1, [r11, #0] @ relocate entries in the GOT 5981da177e4SLinus Torvalds cmp r1, r2 @ entry < bss_start || 5991da177e4SLinus Torvalds cmphs r3, r1 @ _end < entry 6001da177e4SLinus Torvalds addlo r1, r1, r0 @ table. This fixes up the 60198e12b5aSRussell King str r1, [r11], #4 @ C references. 6026d7d0ae5SNicolas Pitre cmp r11, r12 6031da177e4SLinus Torvalds blo 1b 6041da177e4SLinus Torvalds#endif 6051da177e4SLinus Torvalds 6061da177e4SLinus Torvaldsnot_relocated: mov r0, #0 6071da177e4SLinus Torvalds1: str r0, [r2], #4 @ clear bss 6081da177e4SLinus Torvalds str r0, [r2], #4 6091da177e4SLinus Torvalds str r0, [r2], #4 6101da177e4SLinus Torvalds str r0, [r2], #4 6111da177e4SLinus Torvalds cmp r2, r3 6121da177e4SLinus Torvalds blo 1b 6131da177e4SLinus Torvalds 6141da177e4SLinus Torvalds /* 6152874865cSNicolas Pitre * Did we skip the cache setup earlier? 6162874865cSNicolas Pitre * That is indicated by the LSB in r4. 6172874865cSNicolas Pitre * Do it now if so. 6182874865cSNicolas Pitre */ 6192874865cSNicolas Pitre tst r4, #1 6202874865cSNicolas Pitre bic r4, r4, #1 6212874865cSNicolas Pitre blne cache_on 6222874865cSNicolas Pitre 6232874865cSNicolas Pitre/* 6246d7d0ae5SNicolas Pitre * The C runtime environment should now be setup sufficiently. 6256d7d0ae5SNicolas Pitre * Set up some pointers, and start decompressing. 6261da177e4SLinus Torvalds * r4 = kernel execution address 6271da177e4SLinus Torvalds * r7 = architecture ID 628f4619025SRussell King * r8 = atags pointer 6291da177e4SLinus Torvalds */ 6306d7d0ae5SNicolas Pitre mov r0, r4 6316d7d0ae5SNicolas Pitre mov r1, sp @ malloc space above stack 632adc5f702SRussell King add r2, sp, #MALLOC_SIZE @ 64k max 6331da177e4SLinus Torvalds mov r3, r7 6341da177e4SLinus Torvalds bl decompress_kernel 635e114412fSArd Biesheuvel 636e114412fSArd Biesheuvel get_inflated_image_size r1, r2, r3 637e114412fSArd Biesheuvel 638e114412fSArd Biesheuvel mov r0, r4 @ start of inflated image 639e114412fSArd Biesheuvel add r1, r1, r0 @ end of inflated image 6406d7d0ae5SNicolas Pitre bl cache_clean_flush 6416d7d0ae5SNicolas Pitre bl cache_off 642424e5994SDave Martin 643424e5994SDave Martin#ifdef CONFIG_ARM_VIRT_EXT 644424e5994SDave Martin mrs r0, spsr @ Get saved CPU boot mode 645424e5994SDave Martin and r0, r0, #MODE_MASK 646424e5994SDave Martin cmp r0, #HYP_MODE @ if not booted in HYP mode... 647424e5994SDave Martin bne __enter_kernel @ boot kernel directly 648424e5994SDave Martin 649aaac3733SArd Biesheuvel adr_l r0, __hyp_reentry_vectors 650424e5994SDave Martin bl __hyp_set_vectors 651424e5994SDave Martin __HVC(0) @ otherwise bounce to hyp mode 652424e5994SDave Martin 653424e5994SDave Martin b . @ should never be reached 654424e5994SDave Martin#else 655424e5994SDave Martin b __enter_kernel 656424e5994SDave Martin#endif 6571da177e4SLinus Torvalds 65888987ef9SCatalin Marinas .align 2 6591da177e4SLinus Torvalds .type LC0, #object 6601da177e4SLinus TorvaldsLC0: .word LC0 @ r1 6611da177e4SLinus Torvalds .word __bss_start @ r2 6621da177e4SLinus Torvalds .word _end @ r3 66398e12b5aSRussell King .word _got_start @ r11 6641da177e4SLinus Torvalds .word _got_end @ ip 6651da177e4SLinus Torvalds .size LC0, . - LC0 6661da177e4SLinus Torvalds 667161e04a5SArd Biesheuvel .type LC1, #object 668161e04a5SArd BiesheuvelLC1: .word .L_user_stack_end - LC1 @ sp 669161e04a5SArd Biesheuvel .word _edata - LC1 @ r6 670161e04a5SArd Biesheuvel .size LC1, . - LC1 671161e04a5SArd Biesheuvel 672691cbe5bSArd Biesheuvel.Lheadroom: 673691cbe5bSArd Biesheuvel .word _end - restart + 16384 + 1024*1024 674691cbe5bSArd Biesheuvel 675184bf653SArd Biesheuvel.Linflated_image_size_offset: 676184bf653SArd Biesheuvel .long (input_data_end - 4) - . 677184bf653SArd Biesheuvel 6781da177e4SLinus Torvalds#ifdef CONFIG_ARCH_RPC 6791da177e4SLinus Torvalds .globl params 680db7b2b4bSEric Miaoparams: ldr r0, =0x10000100 @ params_phys for RPC 6811da177e4SLinus Torvalds mov pc, lr 6821da177e4SLinus Torvalds .ltorg 6831da177e4SLinus Torvalds .align 6841da177e4SLinus Torvalds#endif 6851da177e4SLinus Torvalds 6861da177e4SLinus Torvalds/* 687401b368cSArd Biesheuvel * dcache_line_size - get the minimum D-cache line size from the CTR register 688401b368cSArd Biesheuvel * on ARMv7. 689401b368cSArd Biesheuvel */ 690401b368cSArd Biesheuvel .macro dcache_line_size, reg, tmp 691401b368cSArd Biesheuvel#ifdef CONFIG_CPU_V7M 692401b368cSArd Biesheuvel movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR 693401b368cSArd Biesheuvel movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR 694401b368cSArd Biesheuvel ldr \tmp, [\tmp] 695401b368cSArd Biesheuvel#else 696401b368cSArd Biesheuvel mrc p15, 0, \tmp, c0, c0, 1 @ read ctr 697401b368cSArd Biesheuvel#endif 698401b368cSArd Biesheuvel lsr \tmp, \tmp, #16 699401b368cSArd Biesheuvel and \tmp, \tmp, #0xf @ cache line size encoding 700401b368cSArd Biesheuvel mov \reg, #4 @ bytes per word 701401b368cSArd Biesheuvel mov \reg, \reg, lsl \tmp @ actual cache line size 702401b368cSArd Biesheuvel .endm 703401b368cSArd Biesheuvel 704401b368cSArd Biesheuvel/* 7051da177e4SLinus Torvalds * Turn on the cache. We need to setup some page tables so that we 7061da177e4SLinus Torvalds * can have both the I and D caches on. 7071da177e4SLinus Torvalds * 7081da177e4SLinus Torvalds * We place the page tables 16k down from the kernel execution address, 7091da177e4SLinus Torvalds * and we hope that nothing else is using it. If we're using it, we 7101da177e4SLinus Torvalds * will go pop! 7111da177e4SLinus Torvalds * 7121da177e4SLinus Torvalds * On entry, 7131da177e4SLinus Torvalds * r4 = kernel execution address 7141da177e4SLinus Torvalds * r7 = architecture number 715f4619025SRussell King * r8 = atags pointer 7161da177e4SLinus Torvalds * On exit, 71721b2841dSUwe Kleine-König * r0, r1, r2, r3, r9, r10, r12 corrupted 7181da177e4SLinus Torvalds * This routine must preserve: 7196d7d0ae5SNicolas Pitre * r4, r7, r8 7201da177e4SLinus Torvalds */ 7211da177e4SLinus Torvalds .align 5 7221da177e4SLinus Torvaldscache_on: mov r3, #8 @ cache_on function 7231da177e4SLinus Torvalds b call_cache_fn 7241da177e4SLinus Torvalds 72510c2df65SHyok S. Choi/* 72610c2df65SHyok S. Choi * Initialize the highest priority protection region, PR7 72710c2df65SHyok S. Choi * to cover all 32bit address and cacheable and bufferable. 72810c2df65SHyok S. Choi */ 72910c2df65SHyok S. Choi__armv4_mpu_cache_on: 73010c2df65SHyok S. Choi mov r0, #0x3f @ 4G, the whole 73110c2df65SHyok S. Choi mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 73210c2df65SHyok S. Choi mcr p15, 0, r0, c6, c7, 1 73310c2df65SHyok S. Choi 73410c2df65SHyok S. Choi mov r0, #0x80 @ PR7 73510c2df65SHyok S. Choi mcr p15, 0, r0, c2, c0, 0 @ D-cache on 73610c2df65SHyok S. Choi mcr p15, 0, r0, c2, c0, 1 @ I-cache on 73710c2df65SHyok S. Choi mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 73810c2df65SHyok S. Choi 73910c2df65SHyok S. Choi mov r0, #0xc000 74010c2df65SHyok S. Choi mcr p15, 0, r0, c5, c0, 1 @ I-access permission 74110c2df65SHyok S. Choi mcr p15, 0, r0, c5, c0, 0 @ D-access permission 74210c2df65SHyok S. Choi 74310c2df65SHyok S. Choi mov r0, #0 74410c2df65SHyok S. Choi mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 74510c2df65SHyok S. Choi mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 74610c2df65SHyok S. Choi mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 74710c2df65SHyok S. Choi mrc p15, 0, r0, c1, c0, 0 @ read control reg 74810c2df65SHyok S. Choi @ ...I .... ..D. WC.M 74910c2df65SHyok S. Choi orr r0, r0, #0x002d @ .... .... ..1. 11.1 75010c2df65SHyok S. Choi orr r0, r0, #0x1000 @ ...1 .... .... .... 75110c2df65SHyok S. Choi 75210c2df65SHyok S. Choi mcr p15, 0, r0, c1, c0, 0 @ write control reg 75310c2df65SHyok S. Choi 75410c2df65SHyok S. Choi mov r0, #0 75510c2df65SHyok S. Choi mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 75610c2df65SHyok S. Choi mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 75710c2df65SHyok S. Choi mov pc, lr 75810c2df65SHyok S. Choi 75910c2df65SHyok S. Choi__armv3_mpu_cache_on: 76010c2df65SHyok S. Choi mov r0, #0x3f @ 4G, the whole 76110c2df65SHyok S. Choi mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 76210c2df65SHyok S. Choi 76310c2df65SHyok S. Choi mov r0, #0x80 @ PR7 76410c2df65SHyok S. Choi mcr p15, 0, r0, c2, c0, 0 @ cache on 76510c2df65SHyok S. Choi mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 76610c2df65SHyok S. Choi 76710c2df65SHyok S. Choi mov r0, #0xc000 76810c2df65SHyok S. Choi mcr p15, 0, r0, c5, c0, 0 @ access permission 76910c2df65SHyok S. Choi 77010c2df65SHyok S. Choi mov r0, #0 77110c2df65SHyok S. Choi mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 7724a8d57a5SUwe Kleine-König /* 7734a8d57a5SUwe Kleine-König * ?? ARMv3 MMU does not allow reading the control register, 7744a8d57a5SUwe Kleine-König * does this really work on ARMv3 MPU? 7754a8d57a5SUwe Kleine-König */ 77610c2df65SHyok S. Choi mrc p15, 0, r0, c1, c0, 0 @ read control reg 77710c2df65SHyok S. Choi @ .... .... .... WC.M 77810c2df65SHyok S. Choi orr r0, r0, #0x000d @ .... .... .... 11.1 7794a8d57a5SUwe Kleine-König /* ?? this overwrites the value constructed above? */ 78010c2df65SHyok S. Choi mov r0, #0 78110c2df65SHyok S. Choi mcr p15, 0, r0, c1, c0, 0 @ write control reg 78210c2df65SHyok S. Choi 7834a8d57a5SUwe Kleine-König /* ?? invalidate for the second time? */ 78410c2df65SHyok S. Choi mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 78510c2df65SHyok S. Choi mov pc, lr 78610c2df65SHyok S. Choi 7871fdc08abSRussell King#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 7881fdc08abSRussell King#define CB_BITS 0x08 7891fdc08abSRussell King#else 7901fdc08abSRussell King#define CB_BITS 0x0c 7911fdc08abSRussell King#endif 7921fdc08abSRussell King 7931da177e4SLinus Torvalds__setup_mmu: sub r3, r4, #16384 @ Page directory size 7941da177e4SLinus Torvalds bic r3, r3, #0xff @ Align the pointer 7951da177e4SLinus Torvalds bic r3, r3, #0x3f00 7961da177e4SLinus Torvalds/* 7971da177e4SLinus Torvalds * Initialise the page tables, turning on the cacheable and bufferable 7981da177e4SLinus Torvalds * bits for the RAM area only. 7991da177e4SLinus Torvalds */ 8001da177e4SLinus Torvalds mov r0, r3 801f4619025SRussell King mov r9, r0, lsr #18 802f4619025SRussell King mov r9, r9, lsl #18 @ start of RAM 803f4619025SRussell King add r10, r9, #0x10000000 @ a reasonable RAM size 8041fdc08abSRussell King mov r1, #0x12 @ XN|U + section mapping 8051fdc08abSRussell King orr r1, r1, #3 << 10 @ AP=11 8061da177e4SLinus Torvalds add r2, r3, #16384 807265d5e48SNicolas Pitre1: cmp r1, r9 @ if virt > start of RAM 8081fdc08abSRussell King cmphs r10, r1 @ && end of RAM > virt 8091fdc08abSRussell King bic r1, r1, #0x1c @ clear XN|U + C + B 8101fdc08abSRussell King orrlo r1, r1, #0x10 @ Set XN|U for non-RAM 8111fdc08abSRussell King orrhs r1, r1, r6 @ set RAM section settings 8121da177e4SLinus Torvalds str r1, [r0], #4 @ 1:1 mapping 8131da177e4SLinus Torvalds add r1, r1, #1048576 8141da177e4SLinus Torvalds teq r0, r2 8151da177e4SLinus Torvalds bne 1b 8161da177e4SLinus Torvalds/* 8171da177e4SLinus Torvalds * If ever we are running from Flash, then we surely want the cache 8181da177e4SLinus Torvalds * to be enabled also for our execution instance... We map 2MB of it 8191da177e4SLinus Torvalds * so there is no map overlap problem for up to 1 MB compressed kernel. 8201da177e4SLinus Torvalds * If the execution is in RAM then we would only be duplicating the above. 8211da177e4SLinus Torvalds */ 8221fdc08abSRussell King orr r1, r6, #0x04 @ ensure B is set for this 8231da177e4SLinus Torvalds orr r1, r1, #3 << 10 824bfa64c4aSDave Martin mov r2, pc 825bfa64c4aSDave Martin mov r2, r2, lsr #20 8261da177e4SLinus Torvalds orr r1, r1, r2, lsl #20 8271da177e4SLinus Torvalds add r0, r3, r2, lsl #2 8281da177e4SLinus Torvalds str r1, [r0], #4 8291da177e4SLinus Torvalds add r1, r1, #1048576 8301da177e4SLinus Torvalds str r1, [r0] 8311da177e4SLinus Torvalds mov pc, lr 83293ed3970SCatalin MarinasENDPROC(__setup_mmu) 8331da177e4SLinus Torvalds 8345010192dSDave Martin@ Enable unaligned access on v6, to allow better code generation 8355010192dSDave Martin@ for the decompressor C code: 8365010192dSDave Martin__armv6_mmu_cache_on: 8375010192dSDave Martin mrc p15, 0, r0, c1, c0, 0 @ read SCTLR 8385010192dSDave Martin bic r0, r0, #2 @ A (no unaligned access fault) 8395010192dSDave Martin orr r0, r0, #1 << 22 @ U (v6 unaligned access model) 8405010192dSDave Martin mcr p15, 0, r0, c1, c0, 0 @ write SCTLR 8415010192dSDave Martin b __armv4_mmu_cache_on 8425010192dSDave Martin 843af3e4fd3SMark A. Greer__arm926ejs_mmu_cache_on: 844af3e4fd3SMark A. Greer#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 845af3e4fd3SMark A. Greer mov r0, #4 @ put dcache in WT mode 846af3e4fd3SMark A. Greer mcr p15, 7, r0, c15, c0, 0 847af3e4fd3SMark A. Greer#endif 848af3e4fd3SMark A. Greer 849c76b6b41SHyok S. Choi__armv4_mmu_cache_on: 8501da177e4SLinus Torvalds mov r12, lr 8518bdca0acSCatalin Marinas#ifdef CONFIG_MMU 8521fdc08abSRussell King mov r6, #CB_BITS | 0x12 @ U 8531da177e4SLinus Torvalds bl __setup_mmu 8541da177e4SLinus Torvalds mov r0, #0 8551da177e4SLinus Torvalds mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 8561da177e4SLinus Torvalds mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 8571da177e4SLinus Torvalds mrc p15, 0, r0, c1, c0, 0 @ read control reg 8581da177e4SLinus Torvalds orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 8591da177e4SLinus Torvalds orr r0, r0, #0x0030 860457c2403SBen Dooks ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables 861c76b6b41SHyok S. Choi bl __common_mmu_cache_on 8621da177e4SLinus Torvalds mov r0, #0 8631da177e4SLinus Torvalds mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 8648bdca0acSCatalin Marinas#endif 8651da177e4SLinus Torvalds mov pc, r12 8661da177e4SLinus Torvalds 8677d09e854SCatalin Marinas__armv7_mmu_cache_on: 8688239fc77SArd Biesheuvel enable_cp15_barriers r11 8697d09e854SCatalin Marinas mov r12, lr 8708bdca0acSCatalin Marinas#ifdef CONFIG_MMU 8717d09e854SCatalin Marinas mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 8727d09e854SCatalin Marinas tst r11, #0xf @ VMSA 8731fdc08abSRussell King movne r6, #CB_BITS | 0x02 @ !XN 8747d09e854SCatalin Marinas blne __setup_mmu 8757d09e854SCatalin Marinas mov r0, #0 8767d09e854SCatalin Marinas mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 8777d09e854SCatalin Marinas tst r11, #0xf @ VMSA 8787d09e854SCatalin Marinas mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 8798bdca0acSCatalin Marinas#endif 8807d09e854SCatalin Marinas mrc p15, 0, r0, c1, c0, 0 @ read control reg 881e1e5b7e4SMatthew Leach bic r0, r0, #1 << 28 @ clear SCTLR.TRE 8827d09e854SCatalin Marinas orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 8837d09e854SCatalin Marinas orr r0, r0, #0x003c @ write buffer 8845010192dSDave Martin bic r0, r0, #2 @ A (no unaligned access fault) 8855010192dSDave Martin orr r0, r0, #1 << 22 @ U (v6 unaligned access model) 8865010192dSDave Martin @ (needed for ARM1176) 8878bdca0acSCatalin Marinas#ifdef CONFIG_MMU 888457c2403SBen Dooks ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables 889dbece458SWill Deacon mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg 8907d09e854SCatalin Marinas orrne r0, r0, #1 @ MMU enabled 8911fdc08abSRussell King movne r1, #0xfffffffd @ domain 0 = client 892dbece458SWill Deacon bic r6, r6, #1 << 31 @ 32-bit translation system 893117e5e9cSSrinivas Ramana bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0 8947d09e854SCatalin Marinas mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer 8957d09e854SCatalin Marinas mcrne p15, 0, r1, c3, c0, 0 @ load domain access control 896dbece458SWill Deacon mcrne p15, 0, r6, c2, c0, 2 @ load ttb control 8978bdca0acSCatalin Marinas#endif 898d675d0bcSWill Deacon mcr p15, 0, r0, c7, c5, 4 @ ISB 8997d09e854SCatalin Marinas mcr p15, 0, r0, c1, c0, 0 @ load control register 9007d09e854SCatalin Marinas mrc p15, 0, r0, c1, c0, 0 @ and read it back 9017d09e854SCatalin Marinas mov r0, #0 9027d09e854SCatalin Marinas mcr p15, 0, r0, c7, c5, 4 @ ISB 9037d09e854SCatalin Marinas mov pc, r12 9047d09e854SCatalin Marinas 90528853ac8SPaulius Zaleckas__fa526_cache_on: 90628853ac8SPaulius Zaleckas mov r12, lr 9071fdc08abSRussell King mov r6, #CB_BITS | 0x12 @ U 90828853ac8SPaulius Zaleckas bl __setup_mmu 90928853ac8SPaulius Zaleckas mov r0, #0 91028853ac8SPaulius Zaleckas mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache 91128853ac8SPaulius Zaleckas mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 91228853ac8SPaulius Zaleckas mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 91328853ac8SPaulius Zaleckas mrc p15, 0, r0, c1, c0, 0 @ read control reg 91428853ac8SPaulius Zaleckas orr r0, r0, #0x1000 @ I-cache enable 91528853ac8SPaulius Zaleckas bl __common_mmu_cache_on 91628853ac8SPaulius Zaleckas mov r0, #0 91728853ac8SPaulius Zaleckas mcr p15, 0, r0, c8, c7, 0 @ flush UTLB 91828853ac8SPaulius Zaleckas mov pc, r12 91928853ac8SPaulius Zaleckas 920c76b6b41SHyok S. Choi__common_mmu_cache_on: 9210e056f20SCatalin Marinas#ifndef CONFIG_THUMB2_KERNEL 9221da177e4SLinus Torvalds#ifndef DEBUG 9231da177e4SLinus Torvalds orr r0, r0, #0x000d @ Write buffer, mmu 9241da177e4SLinus Torvalds#endif 9251da177e4SLinus Torvalds mov r1, #-1 9261da177e4SLinus Torvalds mcr p15, 0, r3, c2, c0, 0 @ load page table pointer 9271da177e4SLinus Torvalds mcr p15, 0, r1, c3, c0, 0 @ load domain access control 9282dc7667bSNicolas Pitre b 1f 9292dc7667bSNicolas Pitre .align 5 @ cache line aligned 9302dc7667bSNicolas Pitre1: mcr p15, 0, r0, c1, c0, 0 @ load control register 9312dc7667bSNicolas Pitre mrc p15, 0, r0, c1, c0, 0 @ and read it back to 9322dc7667bSNicolas Pitre sub pc, lr, r0, lsr #32 @ properly flush pipeline 9330e056f20SCatalin Marinas#endif 9341da177e4SLinus Torvalds 935946a105eSDave Martin#define PROC_ENTRY_SIZE (4*5) 936946a105eSDave Martin 9371da177e4SLinus Torvalds/* 9381da177e4SLinus Torvalds * Here follow the relocatable cache support functions for the 9391da177e4SLinus Torvalds * various processors. This is a generic hook for locating an 9401da177e4SLinus Torvalds * entry and jumping to an instruction at the specified offset 9411da177e4SLinus Torvalds * from the start of the block. Please note this is all position 9421da177e4SLinus Torvalds * independent code. 9431da177e4SLinus Torvalds * 9441da177e4SLinus Torvalds * r1 = corrupted 9451da177e4SLinus Torvalds * r2 = corrupted 9461da177e4SLinus Torvalds * r3 = block offset 94798e12b5aSRussell King * r9 = corrupted 9481da177e4SLinus Torvalds * r12 = corrupted 9491da177e4SLinus Torvalds */ 9501da177e4SLinus Torvalds 9511da177e4SLinus Torvaldscall_cache_fn: adr r12, proc_types 952f12d0d7cSHyok S. Choi#ifdef CONFIG_CPU_CP15 95398e12b5aSRussell King mrc p15, 0, r9, c0, c0 @ get processor ID 954c20611dfSJoachim Eastwood#elif defined(CONFIG_CPU_V7M) 955c20611dfSJoachim Eastwood /* 956c20611dfSJoachim Eastwood * On v7-M the processor id is located in the V7M_SCB_CPUID 957c20611dfSJoachim Eastwood * register, but as cache handling is IMPLEMENTATION DEFINED on 958c20611dfSJoachim Eastwood * v7-M (if existant at all) we just return early here. 959c20611dfSJoachim Eastwood * If V7M_SCB_CPUID were used the cpu ID functions (i.e. 960c20611dfSJoachim Eastwood * __armv7_mmu_cache_{on,off,flush}) would be selected which 961c20611dfSJoachim Eastwood * use cp15 registers that are not implemented on v7-M. 962c20611dfSJoachim Eastwood */ 963c20611dfSJoachim Eastwood bx lr 964f12d0d7cSHyok S. Choi#else 96598e12b5aSRussell King ldr r9, =CONFIG_PROCESSOR_ID 966f12d0d7cSHyok S. Choi#endif 9671da177e4SLinus Torvalds1: ldr r1, [r12, #0] @ get value 9681da177e4SLinus Torvalds ldr r2, [r12, #4] @ get mask 96998e12b5aSRussell King eor r1, r1, r9 @ (real ^ match) 9701da177e4SLinus Torvalds tst r1, r2 @ & mask 9710e056f20SCatalin Marinas ARM( addeq pc, r12, r3 ) @ call cache function 9720e056f20SCatalin Marinas THUMB( addeq r12, r3 ) 9730e056f20SCatalin Marinas THUMB( moveq pc, r12 ) @ call cache function 974946a105eSDave Martin add r12, r12, #PROC_ENTRY_SIZE 9751da177e4SLinus Torvalds b 1b 9761da177e4SLinus Torvalds 9771da177e4SLinus Torvalds/* 9781da177e4SLinus Torvalds * Table for cache operations. This is basically: 9791da177e4SLinus Torvalds * - CPU ID match 9801da177e4SLinus Torvalds * - CPU ID mask 9811da177e4SLinus Torvalds * - 'cache on' method instruction 9821da177e4SLinus Torvalds * - 'cache off' method instruction 9831da177e4SLinus Torvalds * - 'cache flush' method instruction 9841da177e4SLinus Torvalds * 9851da177e4SLinus Torvalds * We match an entry using: ((real_id ^ match) & mask) == 0 9861da177e4SLinus Torvalds * 9871da177e4SLinus Torvalds * Writethrough caches generally only need 'on' and 'off' 9881da177e4SLinus Torvalds * methods. Writeback caches _must_ have the flush method 9891da177e4SLinus Torvalds * defined. 9901da177e4SLinus Torvalds */ 99188987ef9SCatalin Marinas .align 2 9921da177e4SLinus Torvalds .type proc_types,#object 9931da177e4SLinus Torvaldsproc_types: 994ced2a3b8SMarc C .word 0x41000000 @ old ARM ID 995ced2a3b8SMarc C .word 0xff00f000 9961da177e4SLinus Torvalds mov pc, lr 9970e056f20SCatalin Marinas THUMB( nop ) 9981da177e4SLinus Torvalds mov pc, lr 9990e056f20SCatalin Marinas THUMB( nop ) 10001da177e4SLinus Torvalds mov pc, lr 10010e056f20SCatalin Marinas THUMB( nop ) 10021da177e4SLinus Torvalds 10031da177e4SLinus Torvalds .word 0x41007000 @ ARM7/710 10041da177e4SLinus Torvalds .word 0xfff8fe00 10054cdfc2ecSRussell King mov pc, lr 10064cdfc2ecSRussell King THUMB( nop ) 10074cdfc2ecSRussell King mov pc, lr 10084cdfc2ecSRussell King THUMB( nop ) 10091da177e4SLinus Torvalds mov pc, lr 10100e056f20SCatalin Marinas THUMB( nop ) 10111da177e4SLinus Torvalds 10121da177e4SLinus Torvalds .word 0x41807200 @ ARM720T (writethrough) 10131da177e4SLinus Torvalds .word 0xffffff00 10140e056f20SCatalin Marinas W(b) __armv4_mmu_cache_on 10150e056f20SCatalin Marinas W(b) __armv4_mmu_cache_off 10161da177e4SLinus Torvalds mov pc, lr 10170e056f20SCatalin Marinas THUMB( nop ) 10181da177e4SLinus Torvalds 101910c2df65SHyok S. Choi .word 0x41007400 @ ARM74x 102010c2df65SHyok S. Choi .word 0xff00ff00 10210e056f20SCatalin Marinas W(b) __armv3_mpu_cache_on 10220e056f20SCatalin Marinas W(b) __armv3_mpu_cache_off 10230e056f20SCatalin Marinas W(b) __armv3_mpu_cache_flush 102410c2df65SHyok S. Choi 102510c2df65SHyok S. Choi .word 0x41009400 @ ARM94x 102610c2df65SHyok S. Choi .word 0xff00ff00 10270e056f20SCatalin Marinas W(b) __armv4_mpu_cache_on 10280e056f20SCatalin Marinas W(b) __armv4_mpu_cache_off 10290e056f20SCatalin Marinas W(b) __armv4_mpu_cache_flush 103010c2df65SHyok S. Choi 1031af3e4fd3SMark A. Greer .word 0x41069260 @ ARM926EJ-S (v5TEJ) 1032af3e4fd3SMark A. Greer .word 0xff0ffff0 1033720c60e1SNicolas Pitre W(b) __arm926ejs_mmu_cache_on 1034720c60e1SNicolas Pitre W(b) __armv4_mmu_cache_off 1035720c60e1SNicolas Pitre W(b) __armv5tej_mmu_cache_flush 1036af3e4fd3SMark A. Greer 10371da177e4SLinus Torvalds .word 0x00007000 @ ARM7 IDs 10381da177e4SLinus Torvalds .word 0x0000f000 10391da177e4SLinus Torvalds mov pc, lr 10400e056f20SCatalin Marinas THUMB( nop ) 10411da177e4SLinus Torvalds mov pc, lr 10420e056f20SCatalin Marinas THUMB( nop ) 10431da177e4SLinus Torvalds mov pc, lr 10440e056f20SCatalin Marinas THUMB( nop ) 10451da177e4SLinus Torvalds 10461da177e4SLinus Torvalds @ Everything from here on will be the new ID system. 10471da177e4SLinus Torvalds 10481da177e4SLinus Torvalds .word 0x4401a100 @ sa110 / sa1100 10491da177e4SLinus Torvalds .word 0xffffffe0 10500e056f20SCatalin Marinas W(b) __armv4_mmu_cache_on 10510e056f20SCatalin Marinas W(b) __armv4_mmu_cache_off 10520e056f20SCatalin Marinas W(b) __armv4_mmu_cache_flush 10531da177e4SLinus Torvalds 10541da177e4SLinus Torvalds .word 0x6901b110 @ sa1110 10551da177e4SLinus Torvalds .word 0xfffffff0 10560e056f20SCatalin Marinas W(b) __armv4_mmu_cache_on 10570e056f20SCatalin Marinas W(b) __armv4_mmu_cache_off 10580e056f20SCatalin Marinas W(b) __armv4_mmu_cache_flush 10591da177e4SLinus Torvalds 10604157d317SHaojian Zhuang .word 0x56056900 10614157d317SHaojian Zhuang .word 0xffffff00 @ PXA9xx 10620e056f20SCatalin Marinas W(b) __armv4_mmu_cache_on 10630e056f20SCatalin Marinas W(b) __armv4_mmu_cache_off 10640e056f20SCatalin Marinas W(b) __armv4_mmu_cache_flush 106559c7bcd4SEric Miao 106649cbe786SEric Miao .word 0x56158000 @ PXA168 106749cbe786SEric Miao .word 0xfffff000 10680e056f20SCatalin Marinas W(b) __armv4_mmu_cache_on 10690e056f20SCatalin Marinas W(b) __armv4_mmu_cache_off 10700e056f20SCatalin Marinas W(b) __armv5tej_mmu_cache_flush 107149cbe786SEric Miao 10722e2023feSNicolas Pitre .word 0x56050000 @ Feroceon 10732e2023feSNicolas Pitre .word 0xff0f0000 10740e056f20SCatalin Marinas W(b) __armv4_mmu_cache_on 10750e056f20SCatalin Marinas W(b) __armv4_mmu_cache_off 10760e056f20SCatalin Marinas W(b) __armv5tej_mmu_cache_flush 10773ebb5a2bSNicolas Pitre 10785587931cSJoonyoung Shim#ifdef CONFIG_CPU_FEROCEON_OLD_ID 10795587931cSJoonyoung Shim /* this conflicts with the standard ARMv5TE entry */ 10805587931cSJoonyoung Shim .long 0x41009260 @ Old Feroceon 10815587931cSJoonyoung Shim .long 0xff00fff0 10825587931cSJoonyoung Shim b __armv4_mmu_cache_on 10835587931cSJoonyoung Shim b __armv4_mmu_cache_off 10845587931cSJoonyoung Shim b __armv5tej_mmu_cache_flush 10855587931cSJoonyoung Shim#endif 10865587931cSJoonyoung Shim 108728853ac8SPaulius Zaleckas .word 0x66015261 @ FA526 108828853ac8SPaulius Zaleckas .word 0xff01fff1 10890e056f20SCatalin Marinas W(b) __fa526_cache_on 10900e056f20SCatalin Marinas W(b) __armv4_mmu_cache_off 10910e056f20SCatalin Marinas W(b) __fa526_cache_flush 109228853ac8SPaulius Zaleckas 10931da177e4SLinus Torvalds @ These match on the architecture ID 10941da177e4SLinus Torvalds 10951da177e4SLinus Torvalds .word 0x00020000 @ ARMv4T 10961da177e4SLinus Torvalds .word 0x000f0000 10970e056f20SCatalin Marinas W(b) __armv4_mmu_cache_on 10980e056f20SCatalin Marinas W(b) __armv4_mmu_cache_off 10990e056f20SCatalin Marinas W(b) __armv4_mmu_cache_flush 11001da177e4SLinus Torvalds 11011da177e4SLinus Torvalds .word 0x00050000 @ ARMv5TE 11021da177e4SLinus Torvalds .word 0x000f0000 11030e056f20SCatalin Marinas W(b) __armv4_mmu_cache_on 11040e056f20SCatalin Marinas W(b) __armv4_mmu_cache_off 11050e056f20SCatalin Marinas W(b) __armv4_mmu_cache_flush 11061da177e4SLinus Torvalds 11071da177e4SLinus Torvalds .word 0x00060000 @ ARMv5TEJ 11081da177e4SLinus Torvalds .word 0x000f0000 11090e056f20SCatalin Marinas W(b) __armv4_mmu_cache_on 11100e056f20SCatalin Marinas W(b) __armv4_mmu_cache_off 111175216859SSascha Hauer W(b) __armv5tej_mmu_cache_flush 11121da177e4SLinus Torvalds 111345a7b9cfSCatalin Marinas .word 0x0007b000 @ ARMv6 11147d09e854SCatalin Marinas .word 0x000ff000 11155010192dSDave Martin W(b) __armv6_mmu_cache_on 11160e056f20SCatalin Marinas W(b) __armv4_mmu_cache_off 11170e056f20SCatalin Marinas W(b) __armv6_mmu_cache_flush 11181da177e4SLinus Torvalds 11197d09e854SCatalin Marinas .word 0x000f0000 @ new CPU Id 11207d09e854SCatalin Marinas .word 0x000f0000 11210e056f20SCatalin Marinas W(b) __armv7_mmu_cache_on 11220e056f20SCatalin Marinas W(b) __armv7_mmu_cache_off 11230e056f20SCatalin Marinas W(b) __armv7_mmu_cache_flush 11247d09e854SCatalin Marinas 11251da177e4SLinus Torvalds .word 0 @ unrecognised type 11261da177e4SLinus Torvalds .word 0 11271da177e4SLinus Torvalds mov pc, lr 11280e056f20SCatalin Marinas THUMB( nop ) 11291da177e4SLinus Torvalds mov pc, lr 11300e056f20SCatalin Marinas THUMB( nop ) 11311da177e4SLinus Torvalds mov pc, lr 11320e056f20SCatalin Marinas THUMB( nop ) 11331da177e4SLinus Torvalds 11341da177e4SLinus Torvalds .size proc_types, . - proc_types 11351da177e4SLinus Torvalds 11361da177e4SLinus Torvalds /* 1137946a105eSDave Martin * If you get a "non-constant expression in ".if" statement" 1138946a105eSDave Martin * error from the assembler on this line, check that you have 1139946a105eSDave Martin * not accidentally written a "b" instruction where you should 1140946a105eSDave Martin * have written W(b). 1141946a105eSDave Martin */ 1142946a105eSDave Martin .if (. - proc_types) % PROC_ENTRY_SIZE != 0 1143946a105eSDave Martin .error "The size of one or more proc_types entries is wrong." 1144946a105eSDave Martin .endif 1145946a105eSDave Martin 1146946a105eSDave Martin/* 11471da177e4SLinus Torvalds * Turn off the Cache and MMU. ARMv3 does not support 11481da177e4SLinus Torvalds * reading the control register, but ARMv4 does. 11491da177e4SLinus Torvalds * 115021b2841dSUwe Kleine-König * On exit, 115121b2841dSUwe Kleine-König * r0, r1, r2, r3, r9, r12 corrupted 115221b2841dSUwe Kleine-König * This routine must preserve: 11536d7d0ae5SNicolas Pitre * r4, r7, r8 11541da177e4SLinus Torvalds */ 11551da177e4SLinus Torvalds .align 5 11561da177e4SLinus Torvaldscache_off: mov r3, #12 @ cache_off function 11571da177e4SLinus Torvalds b call_cache_fn 11581da177e4SLinus Torvalds 115910c2df65SHyok S. Choi__armv4_mpu_cache_off: 116010c2df65SHyok S. Choi mrc p15, 0, r0, c1, c0 116110c2df65SHyok S. Choi bic r0, r0, #0x000d 116210c2df65SHyok S. Choi mcr p15, 0, r0, c1, c0 @ turn MPU and cache off 116310c2df65SHyok S. Choi mov r0, #0 116410c2df65SHyok S. Choi mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 116510c2df65SHyok S. Choi mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache 116610c2df65SHyok S. Choi mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache 116710c2df65SHyok S. Choi mov pc, lr 116810c2df65SHyok S. Choi 116910c2df65SHyok S. Choi__armv3_mpu_cache_off: 117010c2df65SHyok S. Choi mrc p15, 0, r0, c1, c0 117110c2df65SHyok S. Choi bic r0, r0, #0x000d 117210c2df65SHyok S. Choi mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off 117310c2df65SHyok S. Choi mov r0, #0 117410c2df65SHyok S. Choi mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 117510c2df65SHyok S. Choi mov pc, lr 117610c2df65SHyok S. Choi 1177c76b6b41SHyok S. Choi__armv4_mmu_cache_off: 11788bdca0acSCatalin Marinas#ifdef CONFIG_MMU 11791da177e4SLinus Torvalds mrc p15, 0, r0, c1, c0 11801da177e4SLinus Torvalds bic r0, r0, #0x000d 11811da177e4SLinus Torvalds mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 11821da177e4SLinus Torvalds mov r0, #0 11831da177e4SLinus Torvalds mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 11841da177e4SLinus Torvalds mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 11858bdca0acSCatalin Marinas#endif 11861da177e4SLinus Torvalds mov pc, lr 11871da177e4SLinus Torvalds 11887d09e854SCatalin Marinas__armv7_mmu_cache_off: 11897d09e854SCatalin Marinas mrc p15, 0, r0, c1, c0 11908bdca0acSCatalin Marinas#ifdef CONFIG_MMU 11912acb9097SVladimir Murzin bic r0, r0, #0x0005 11928bdca0acSCatalin Marinas#else 11932acb9097SVladimir Murzin bic r0, r0, #0x0004 11948bdca0acSCatalin Marinas#endif 11957d09e854SCatalin Marinas mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 11967d09e854SCatalin Marinas mov r0, #0 11978bdca0acSCatalin Marinas#ifdef CONFIG_MMU 11987d09e854SCatalin Marinas mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB 11998bdca0acSCatalin Marinas#endif 1200c30c2f99SCatalin Marinas mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC 1201c30c2f99SCatalin Marinas mcr p15, 0, r0, c7, c10, 4 @ DSB 1202c30c2f99SCatalin Marinas mcr p15, 0, r0, c7, c5, 4 @ ISB 1203401b368cSArd Biesheuvel mov pc, lr 12047d09e854SCatalin Marinas 12051da177e4SLinus Torvalds/* 12061da177e4SLinus Torvalds * Clean and flush the cache to maintain consistency. 12071da177e4SLinus Torvalds * 1208e114412fSArd Biesheuvel * On entry, 1209e114412fSArd Biesheuvel * r0 = start address 1210e114412fSArd Biesheuvel * r1 = end address (exclusive) 12111da177e4SLinus Torvalds * On exit, 121221b2841dSUwe Kleine-König * r1, r2, r3, r9, r10, r11, r12 corrupted 12131da177e4SLinus Torvalds * This routine must preserve: 12146d7d0ae5SNicolas Pitre * r4, r6, r7, r8 12151da177e4SLinus Torvalds */ 12161da177e4SLinus Torvalds .align 5 12171da177e4SLinus Torvaldscache_clean_flush: 12181da177e4SLinus Torvalds mov r3, #16 1219401b368cSArd Biesheuvel mov r11, r1 12201da177e4SLinus Torvalds b call_cache_fn 12211da177e4SLinus Torvalds 122210c2df65SHyok S. Choi__armv4_mpu_cache_flush: 1223238962acSWill Deacon tst r4, #1 1224238962acSWill Deacon movne pc, lr 122510c2df65SHyok S. Choi mov r2, #1 122610c2df65SHyok S. Choi mov r3, #0 122710c2df65SHyok S. Choi mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 122810c2df65SHyok S. Choi mov r1, #7 << 5 @ 8 segments 122910c2df65SHyok S. Choi1: orr r3, r1, #63 << 26 @ 64 entries 123010c2df65SHyok S. Choi2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 123110c2df65SHyok S. Choi subs r3, r3, #1 << 26 123210c2df65SHyok S. Choi bcs 2b @ entries 63 to 0 123310c2df65SHyok S. Choi subs r1, r1, #1 << 5 123410c2df65SHyok S. Choi bcs 1b @ segments 7 to 0 123510c2df65SHyok S. Choi 123610c2df65SHyok S. Choi teq r2, #0 123710c2df65SHyok S. Choi mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 123810c2df65SHyok S. Choi mcr p15, 0, ip, c7, c10, 4 @ drain WB 123910c2df65SHyok S. Choi mov pc, lr 124010c2df65SHyok S. Choi 124128853ac8SPaulius Zaleckas__fa526_cache_flush: 1242238962acSWill Deacon tst r4, #1 1243238962acSWill Deacon movne pc, lr 124428853ac8SPaulius Zaleckas mov r1, #0 124528853ac8SPaulius Zaleckas mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache 124628853ac8SPaulius Zaleckas mcr p15, 0, r1, c7, c5, 0 @ flush I cache 124728853ac8SPaulius Zaleckas mcr p15, 0, r1, c7, c10, 4 @ drain WB 124828853ac8SPaulius Zaleckas mov pc, lr 124910c2df65SHyok S. Choi 1250c76b6b41SHyok S. Choi__armv6_mmu_cache_flush: 12511da177e4SLinus Torvalds mov r1, #0 1252238962acSWill Deacon tst r4, #1 1253238962acSWill Deacon mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D 12541da177e4SLinus Torvalds mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB 1255238962acSWill Deacon mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified 12561da177e4SLinus Torvalds mcr p15, 0, r1, c7, c10, 4 @ drain WB 12571da177e4SLinus Torvalds mov pc, lr 12581da177e4SLinus Torvalds 12597d09e854SCatalin Marinas__armv7_mmu_cache_flush: 12608239fc77SArd Biesheuvel enable_cp15_barriers r10 1261238962acSWill Deacon tst r4, #1 1262238962acSWill Deacon bne iflush 12637d09e854SCatalin Marinas mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 12647d09e854SCatalin Marinas tst r10, #0xf << 16 @ hierarchical cache (ARMv7) 12657d09e854SCatalin Marinas mov r10, #0 1266c30c2f99SCatalin Marinas beq hierarchical 12677d09e854SCatalin Marinas mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D 12687d09e854SCatalin Marinas b iflush 12697d09e854SCatalin Marinashierarchical: 1270401b368cSArd Biesheuvel dcache_line_size r1, r2 @ r1 := dcache min line size 1271401b368cSArd Biesheuvel sub r2, r1, #1 @ r2 := line size mask 1272401b368cSArd Biesheuvel bic r0, r0, r2 @ round down start to line size 1273401b368cSArd Biesheuvel sub r11, r11, #1 @ end address is exclusive 1274401b368cSArd Biesheuvel bic r11, r11, r2 @ round down end to line size 1275401b368cSArd Biesheuvel0: cmp r0, r11 @ finished? 1276401b368cSArd Biesheuvel bgt iflush 1277401b368cSArd Biesheuvel mcr p15, 0, r0, c7, c14, 1 @ Dcache clean/invalidate by VA 1278401b368cSArd Biesheuvel add r0, r0, r1 1279401b368cSArd Biesheuvel b 0b 12807d09e854SCatalin Marinasiflush: 1281c30c2f99SCatalin Marinas mcr p15, 0, r10, c7, c10, 4 @ DSB 12827d09e854SCatalin Marinas mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB 1283c30c2f99SCatalin Marinas mcr p15, 0, r10, c7, c10, 4 @ DSB 1284c30c2f99SCatalin Marinas mcr p15, 0, r10, c7, c5, 4 @ ISB 12857d09e854SCatalin Marinas mov pc, lr 12867d09e854SCatalin Marinas 128715754bf9SNicolas Pitre__armv5tej_mmu_cache_flush: 1288238962acSWill Deacon tst r4, #1 1289238962acSWill Deacon movne pc, lr 12909f1984c6SStefan Agner1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate D cache 129115754bf9SNicolas Pitre bne 1b 129215754bf9SNicolas Pitre mcr p15, 0, r0, c7, c5, 0 @ flush I cache 129315754bf9SNicolas Pitre mcr p15, 0, r0, c7, c10, 4 @ drain WB 129415754bf9SNicolas Pitre mov pc, lr 129515754bf9SNicolas Pitre 1296c76b6b41SHyok S. Choi__armv4_mmu_cache_flush: 1297238962acSWill Deacon tst r4, #1 1298238962acSWill Deacon movne pc, lr 12991da177e4SLinus Torvalds mov r2, #64*1024 @ default: 32K dcache size (*2) 13001da177e4SLinus Torvalds mov r11, #32 @ default: 32 byte line size 13011da177e4SLinus Torvalds mrc p15, 0, r3, c0, c0, 1 @ read cache type 130298e12b5aSRussell King teq r3, r9 @ cache ID register present? 13031da177e4SLinus Torvalds beq no_cache_id 13041da177e4SLinus Torvalds mov r1, r3, lsr #18 13051da177e4SLinus Torvalds and r1, r1, #7 13061da177e4SLinus Torvalds mov r2, #1024 13071da177e4SLinus Torvalds mov r2, r2, lsl r1 @ base dcache size *2 13081da177e4SLinus Torvalds tst r3, #1 << 14 @ test M bit 13091da177e4SLinus Torvalds addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 13101da177e4SLinus Torvalds mov r3, r3, lsr #12 13111da177e4SLinus Torvalds and r3, r3, #3 13121da177e4SLinus Torvalds mov r11, #8 13131da177e4SLinus Torvalds mov r11, r11, lsl r3 @ cache line size in bytes 13141da177e4SLinus Torvaldsno_cache_id: 13150e056f20SCatalin Marinas mov r1, pc 13160e056f20SCatalin Marinas bic r1, r1, #63 @ align to longest cache line 13171da177e4SLinus Torvalds add r2, r1, r2 13180e056f20SCatalin Marinas1: 13190e056f20SCatalin Marinas ARM( ldr r3, [r1], r11 ) @ s/w flush D cache 13200e056f20SCatalin Marinas THUMB( ldr r3, [r1] ) @ s/w flush D cache 13210e056f20SCatalin Marinas THUMB( add r1, r1, r11 ) 13221da177e4SLinus Torvalds teq r1, r2 13231da177e4SLinus Torvalds bne 1b 13241da177e4SLinus Torvalds 13251da177e4SLinus Torvalds mcr p15, 0, r1, c7, c5, 0 @ flush I cache 13261da177e4SLinus Torvalds mcr p15, 0, r1, c7, c6, 0 @ flush D cache 13271da177e4SLinus Torvalds mcr p15, 0, r1, c7, c10, 4 @ drain WB 13281da177e4SLinus Torvalds mov pc, lr 13291da177e4SLinus Torvalds 1330c76b6b41SHyok S. Choi__armv3_mmu_cache_flush: 133110c2df65SHyok S. Choi__armv3_mpu_cache_flush: 1332238962acSWill Deacon tst r4, #1 1333238962acSWill Deacon movne pc, lr 13341da177e4SLinus Torvalds mov r1, #0 133563fa7187SUwe Kleine-König mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 13361da177e4SLinus Torvalds mov pc, lr 13371da177e4SLinus Torvalds 13381da177e4SLinus Torvalds/* 13391da177e4SLinus Torvalds * Various debugging routines for printing hex characters and 13401da177e4SLinus Torvalds * memory, which again must be relocatable. 13411da177e4SLinus Torvalds */ 13421da177e4SLinus Torvalds#ifdef DEBUG 134388987ef9SCatalin Marinas .align 2 13441da177e4SLinus Torvalds .type phexbuf,#object 13451da177e4SLinus Torvaldsphexbuf: .space 12 13461da177e4SLinus Torvalds .size phexbuf, . - phexbuf 13471da177e4SLinus Torvalds 1348be6f9f00SUwe Kleine-König@ phex corrupts {r0, r1, r2, r3} 13491da177e4SLinus Torvaldsphex: adr r3, phexbuf 13501da177e4SLinus Torvalds mov r2, #0 13511da177e4SLinus Torvalds strb r2, [r3, r1] 13521da177e4SLinus Torvalds1: subs r1, r1, #1 13531da177e4SLinus Torvalds movmi r0, r3 13541da177e4SLinus Torvalds bmi puts 13551da177e4SLinus Torvalds and r2, r0, #15 13561da177e4SLinus Torvalds mov r0, r0, lsr #4 13571da177e4SLinus Torvalds cmp r2, #10 13581da177e4SLinus Torvalds addge r2, r2, #7 13591da177e4SLinus Torvalds add r2, r2, #'0' 13601da177e4SLinus Torvalds strb r2, [r3, r1] 13611da177e4SLinus Torvalds b 1b 13621da177e4SLinus Torvalds 1363be6f9f00SUwe Kleine-König@ puts corrupts {r0, r1, r2, r3} 1364e07e3c33SŁukasz Stelmachputs: loadsp r3, r2, r1 13651da177e4SLinus Torvalds1: ldrb r2, [r0], #1 13661da177e4SLinus Torvalds teq r2, #0 13671da177e4SLinus Torvalds moveq pc, lr 13680b0c1dbdSLinus Walleij2: writeb r2, r3, r1 13691da177e4SLinus Torvalds mov r1, #0x00020000 13701da177e4SLinus Torvalds3: subs r1, r1, #1 13711da177e4SLinus Torvalds bne 3b 13721da177e4SLinus Torvalds teq r2, #'\n' 13731da177e4SLinus Torvalds moveq r2, #'\r' 13741da177e4SLinus Torvalds beq 2b 13751da177e4SLinus Torvalds teq r0, #0 13761da177e4SLinus Torvalds bne 1b 13771da177e4SLinus Torvalds mov pc, lr 1378be6f9f00SUwe Kleine-König@ putc corrupts {r0, r1, r2, r3} 13791da177e4SLinus Torvaldsputc: 13801da177e4SLinus Torvalds mov r2, r0 1381e07e3c33SŁukasz Stelmach loadsp r3, r1, r0 13821da177e4SLinus Torvalds mov r0, #0 13831da177e4SLinus Torvalds b 2b 13841da177e4SLinus Torvalds 1385be6f9f00SUwe Kleine-König@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} 13861da177e4SLinus Torvaldsmemdump: mov r12, r0 13871da177e4SLinus Torvalds mov r10, lr 13881da177e4SLinus Torvalds mov r11, #0 13891da177e4SLinus Torvalds2: mov r0, r11, lsl #2 13901da177e4SLinus Torvalds add r0, r0, r12 13911da177e4SLinus Torvalds mov r1, #8 13921da177e4SLinus Torvalds bl phex 13931da177e4SLinus Torvalds mov r0, #':' 13941da177e4SLinus Torvalds bl putc 13951da177e4SLinus Torvalds1: mov r0, #' ' 13961da177e4SLinus Torvalds bl putc 13971da177e4SLinus Torvalds ldr r0, [r12, r11, lsl #2] 13981da177e4SLinus Torvalds mov r1, #8 13991da177e4SLinus Torvalds bl phex 14001da177e4SLinus Torvalds and r0, r11, #7 14011da177e4SLinus Torvalds teq r0, #3 14021da177e4SLinus Torvalds moveq r0, #' ' 14031da177e4SLinus Torvalds bleq putc 14041da177e4SLinus Torvalds and r0, r11, #7 14051da177e4SLinus Torvalds add r11, r11, #1 14061da177e4SLinus Torvalds teq r0, #7 14071da177e4SLinus Torvalds bne 1b 14081da177e4SLinus Torvalds mov r0, #'\n' 14091da177e4SLinus Torvalds bl putc 14101da177e4SLinus Torvalds cmp r11, #64 14111da177e4SLinus Torvalds blt 2b 14121da177e4SLinus Torvalds mov pc, r10 14131da177e4SLinus Torvalds#endif 14141da177e4SLinus Torvalds 141592c83ff1SCatalin Marinas .ltorg 1416424e5994SDave Martin 1417424e5994SDave Martin#ifdef CONFIG_ARM_VIRT_EXT 1418424e5994SDave Martin.align 5 1419424e5994SDave Martin__hyp_reentry_vectors: 1420424e5994SDave Martin W(b) . @ reset 1421424e5994SDave Martin W(b) . @ undef 1422db227c19SArd Biesheuvel#ifdef CONFIG_EFI_STUB 1423db227c19SArd Biesheuvel W(b) __enter_kernel_from_hyp @ hvc from HYP 1424db227c19SArd Biesheuvel#else 1425424e5994SDave Martin W(b) . @ svc 1426db227c19SArd Biesheuvel#endif 1427424e5994SDave Martin W(b) . @ pabort 1428424e5994SDave Martin W(b) . @ dabort 1429424e5994SDave Martin W(b) __enter_kernel @ hyp 1430424e5994SDave Martin W(b) . @ irq 1431424e5994SDave Martin W(b) . @ fiq 1432424e5994SDave Martin#endif /* CONFIG_ARM_VIRT_EXT */ 1433424e5994SDave Martin 1434424e5994SDave Martin__enter_kernel: 1435424e5994SDave Martin mov r0, #0 @ must be 0 1436f2ae9de0SŁukasz Stelmach mov r1, r7 @ restore architecture number 1437f2ae9de0SŁukasz Stelmach mov r2, r8 @ restore atags pointer 1438424e5994SDave Martin ARM( mov pc, r4 ) @ call kernel 1439c20611dfSJoachim Eastwood M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class 1440c20611dfSJoachim Eastwood THUMB( bx r4 ) @ entry point is always ARM for A/R classes 1441424e5994SDave Martin 1442adcc2591SNicolas Pitrereloc_code_end: 14431da177e4SLinus Torvalds 144481a0bc39SRoy Franz#ifdef CONFIG_EFI_STUB 1445db227c19SArd Biesheuvel__enter_kernel_from_hyp: 1446db227c19SArd Biesheuvel mrc p15, 4, r0, c1, c0, 0 @ read HSCTLR 1447db227c19SArd Biesheuvel bic r0, r0, #0x5 @ disable MMU and caches 1448db227c19SArd Biesheuvel mcr p15, 4, r0, c1, c0, 0 @ write HSCTLR 1449db227c19SArd Biesheuvel isb 1450db227c19SArd Biesheuvel b __enter_kernel 1451db227c19SArd Biesheuvel 14529f922377SArd BiesheuvelENTRY(efi_enter_kernel) 1453d0f9ca9bSArd Biesheuvel mov r4, r0 @ preserve image base 1454d0f9ca9bSArd Biesheuvel mov r8, r1 @ preserve DT pointer 145581a0bc39SRoy Franz 145667e3f828SArd Biesheuvel adr_l r0, call_cache_fn 1457db227c19SArd Biesheuvel adr r1, 0f @ clean the region of code we 1458db227c19SArd Biesheuvel bl cache_clean_flush @ may run with the MMU off 1459db227c19SArd Biesheuvel 1460db227c19SArd Biesheuvel#ifdef CONFIG_ARM_VIRT_EXT 1461db227c19SArd Biesheuvel @ 1462db227c19SArd Biesheuvel @ The EFI spec does not support booting on ARM in HYP mode, 1463db227c19SArd Biesheuvel @ since it mandates that the MMU and caches are on, with all 1464db227c19SArd Biesheuvel @ 32-bit addressable DRAM mapped 1:1 using short descriptors. 1465db227c19SArd Biesheuvel @ 1466db227c19SArd Biesheuvel @ While the EDK2 reference implementation adheres to this, 1467db227c19SArd Biesheuvel @ U-Boot might decide to enter the EFI stub in HYP mode 1468db227c19SArd Biesheuvel @ anyway, with the MMU and caches either on or off. 1469db227c19SArd Biesheuvel @ 1470db227c19SArd Biesheuvel mrs r0, cpsr @ get the current mode 1471db227c19SArd Biesheuvel msr spsr_cxsf, r0 @ record boot mode 1472db227c19SArd Biesheuvel and r0, r0, #MODE_MASK @ are we running in HYP mode? 1473db227c19SArd Biesheuvel cmp r0, #HYP_MODE 1474db227c19SArd Biesheuvel bne .Lefi_svc 1475db227c19SArd Biesheuvel 1476db227c19SArd Biesheuvel mrc p15, 4, r1, c1, c0, 0 @ read HSCTLR 1477db227c19SArd Biesheuvel tst r1, #0x1 @ MMU enabled at HYP? 1478db227c19SArd Biesheuvel beq 1f 1479db227c19SArd Biesheuvel 1480db227c19SArd Biesheuvel @ 1481db227c19SArd Biesheuvel @ When running in HYP mode with the caches on, we're better 1482db227c19SArd Biesheuvel @ off just carrying on using the cached 1:1 mapping that the 1483db227c19SArd Biesheuvel @ firmware provided. Set up the HYP vectors so HVC instructions 1484db227c19SArd Biesheuvel @ issued from HYP mode take us to the correct handler code. We 1485db227c19SArd Biesheuvel @ will disable the MMU before jumping to the kernel proper. 1486db227c19SArd Biesheuvel @ 1487fbc81ec5SArd Biesheuvel ARM( bic r1, r1, #(1 << 30) ) @ clear HSCTLR.TE 1488fbc81ec5SArd Biesheuvel THUMB( orr r1, r1, #(1 << 30) ) @ set HSCTLR.TE 1489fbc81ec5SArd Biesheuvel mcr p15, 4, r1, c1, c0, 0 1490db227c19SArd Biesheuvel adr r0, __hyp_reentry_vectors 1491db227c19SArd Biesheuvel mcr p15, 4, r0, c12, c0, 0 @ set HYP vector base (HVBAR) 1492db227c19SArd Biesheuvel isb 1493db227c19SArd Biesheuvel b .Lefi_hyp 1494db227c19SArd Biesheuvel 1495db227c19SArd Biesheuvel @ 1496db227c19SArd Biesheuvel @ When running in HYP mode with the caches off, we need to drop 1497db227c19SArd Biesheuvel @ into SVC mode now, and let the decompressor set up its cached 1498db227c19SArd Biesheuvel @ 1:1 mapping as usual. 1499db227c19SArd Biesheuvel @ 1500db227c19SArd Biesheuvel1: mov r9, r4 @ preserve image base 1501db227c19SArd Biesheuvel bl __hyp_stub_install @ install HYP stub vectors 1502db227c19SArd Biesheuvel safe_svcmode_maskall r1 @ drop to SVC mode 1503db227c19SArd Biesheuvel msr spsr_cxsf, r0 @ record boot mode 1504db227c19SArd Biesheuvel orr r4, r9, #1 @ restore image base and set LSB 1505db227c19SArd Biesheuvel b .Lefi_hyp 1506db227c19SArd Biesheuvel.Lefi_svc: 1507db227c19SArd Biesheuvel#endif 1508d0f9ca9bSArd Biesheuvel mrc p15, 0, r0, c1, c0, 0 @ read SCTLR 1509d0f9ca9bSArd Biesheuvel tst r0, #0x1 @ MMU enabled? 1510d0f9ca9bSArd Biesheuvel orreq r4, r4, #1 @ set LSB if not 1511d0f9ca9bSArd Biesheuvel 1512db227c19SArd Biesheuvel.Lefi_hyp: 1513d0f9ca9bSArd Biesheuvel mov r0, r8 @ DT start 1514d0f9ca9bSArd Biesheuvel add r1, r8, r2 @ DT end 1515e951a1f4SArd Biesheuvel bl cache_clean_flush 1516e951a1f4SArd Biesheuvel 1517d0f9ca9bSArd Biesheuvel adr r0, 0f @ switch to our stack 1518d0f9ca9bSArd Biesheuvel ldr sp, [r0] 1519d0f9ca9bSArd Biesheuvel add sp, sp, r0 1520c7225494SArd Biesheuvel 1521d0f9ca9bSArd Biesheuvel mov r5, #0 @ appended DTB size 1522d0f9ca9bSArd Biesheuvel mov r7, #0xFFFFFFFF @ machine ID 1523d0f9ca9bSArd Biesheuvel b wont_overwrite 15249f922377SArd BiesheuvelENDPROC(efi_enter_kernel) 1525d0f9ca9bSArd Biesheuvel0: .long .L_user_stack_end - . 152681a0bc39SRoy Franz#endif 152781a0bc39SRoy Franz 15281da177e4SLinus Torvalds .align 1529b0c4d4eeSRussell King .section ".stack", "aw", %nobits 15308d7e4cc2SNicolas Pitre.L_user_stack: .space 4096 15318d7e4cc2SNicolas Pitre.L_user_stack_end: 1532