1/* 2 * relocate - common relocation function for ARM U-Boot 3 * 4 * Copyright (c) 2013 Albert ARIBAUD <albert.u.boot@aribaud.net> 5 * 6 * SPDX-License-Identifier: GPL-2.0+ 7 */ 8 9#include <asm-offsets.h> 10#include <config.h> 11#include <linux/linkage.h> 12 13/* 14 * Default/weak exception vectors relocation routine 15 * 16 * This routine covers the standard ARM cases: normal (0x00000000), 17 * high (0xffff0000) and VBAR. SoCs which do not comply with any of 18 * the standard cases must provide their own, strong, version. 19 */ 20 21 .section .text.relocate_vectors,"ax",%progbits 22 .weak relocate_vectors 23 24ENTRY(relocate_vectors) 25 26#ifdef CONFIG_HAS_VBAR 27 /* 28 * If the ARM processor has the security extensions, 29 * use VBAR to relocate the exception vectors. 30 */ 31 ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */ 32 mcr p15, 0, r0, c12, c0, 0 /* Set VBAR */ 33#else 34 /* 35 * Copy the relocated exception vectors to the 36 * correct address 37 * CP15 c1 V bit gives us the location of the vectors: 38 * 0x00000000 or 0xFFFF0000. 39 */ 40 ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */ 41 mrc p15, 0, r2, c1, c0, 0 /* V bit (bit[13]) in CP15 c1 */ 42 ands r2, r2, #(1 << 13) 43 ldreq r1, =0x00000000 /* If V=0 */ 44 ldrne r1, =0xFFFF0000 /* If V=1 */ 45 ldmia r0!, {r2-r8,r10} 46 stmia r1!, {r2-r8,r10} 47 ldmia r0!, {r2-r8,r10} 48 stmia r1!, {r2-r8,r10} 49#endif 50 bx lr 51 52ENDPROC(relocate_vectors) 53 54/* 55 * void relocate_code(addr_moni) 56 * 57 * This function relocates the monitor code. 58 * 59 * NOTE: 60 * To prevent the code below from containing references with an R_ARM_ABS32 61 * relocation record type, we never refer to linker-defined symbols directly. 62 * Instead, we declare literals which contain their relative location with 63 * respect to relocate_code, and at run time, add relocate_code back to them. 64 */ 65 66ENTRY(relocate_code) 67 ldr r1, =__image_copy_start /* r1 <- SRC &__image_copy_start */ 68 subs r4, r0, r1 /* r4 <- relocation offset */ 69 beq relocate_done /* skip relocation */ 70 ldr r2, =__image_copy_end /* r2 <- SRC &__image_copy_end */ 71 72copy_loop: 73 ldmia r1!, {r10-r11} /* copy from source address [r1] */ 74 stmia r0!, {r10-r11} /* copy to target address [r0] */ 75 cmp r1, r2 /* until source end address [r2] */ 76 blo copy_loop 77 78 /* 79 * fix .rel.dyn relocations 80 */ 81 ldr r2, =__rel_dyn_start /* r2 <- SRC &__rel_dyn_start */ 82 ldr r3, =__rel_dyn_end /* r3 <- SRC &__rel_dyn_end */ 83fixloop: 84 ldmia r2!, {r0-r1} /* (r0,r1) <- (SRC location,fixup) */ 85 and r1, r1, #0xff 86 cmp r1, #23 /* relative fixup? */ 87 bne fixnext 88 89 /* relative fix: increase location by offset */ 90 add r0, r0, r4 91 ldr r1, [r0] 92 add r1, r1, r4 93 str r1, [r0] 94fixnext: 95 cmp r2, r3 96 blo fixloop 97 98relocate_done: 99 100#ifdef __XSCALE__ 101 /* 102 * On xscale, icache must be invalidated and write buffers drained, 103 * even with cache disabled - 4.2.7 of xscale core developer's manual 104 */ 105 mcr p15, 0, r0, c7, c7, 0 /* invalidate icache */ 106 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ 107#endif 108 109 /* ARMv4- don't know bx lr but the assembler fails to see that */ 110 111#ifdef __ARM_ARCH_4__ 112 mov pc, lr 113#else 114 bx lr 115#endif 116 117ENDPROC(relocate_code) 118