1/* SPDX-License-Identifier: GPL-2.0+ */ 2/* 3 * relocate - common relocation function for ARM U-Boot 4 * 5 * Copyright (c) 2013 Albert ARIBAUD <albert.u.boot@aribaud.net> 6 */ 7 8#include <asm-offsets.h> 9#include <asm/assembler.h> 10#include <config.h> 11#include <elf.h> 12#include <linux/linkage.h> 13#ifdef CONFIG_CPU_V7M 14#include <asm/armv7m.h> 15#endif 16 17/* 18 * Default/weak exception vectors relocation routine 19 * 20 * This routine covers the standard ARM cases: normal (0x00000000), 21 * high (0xffff0000) and VBAR. SoCs which do not comply with any of 22 * the standard cases must provide their own, strong, version. 23 */ 24 25 .section .text.relocate_vectors,"ax",%progbits 26 .weak relocate_vectors 27 28ENTRY(relocate_vectors) 29 30#ifdef CONFIG_CPU_V7M 31 /* 32 * On ARMv7-M we only have to write the new vector address 33 * to VTOR register. 34 */ 35 ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */ 36 ldr r1, =V7M_SCB_BASE 37 str r0, [r1, V7M_SCB_VTOR] 38#else 39#ifdef CONFIG_HAS_VBAR 40 /* 41 * If the ARM processor has the security extensions, 42 * use VBAR to relocate the exception vectors. 43 */ 44 ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */ 45 mcr p15, 0, r0, c12, c0, 0 /* Set VBAR */ 46#else 47 /* 48 * Copy the relocated exception vectors to the 49 * correct address 50 * CP15 c1 V bit gives us the location of the vectors: 51 * 0x00000000 or 0xFFFF0000. 52 */ 53 ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */ 54 mrc p15, 0, r2, c1, c0, 0 /* V bit (bit[13]) in CP15 c1 */ 55 ands r2, r2, #(1 << 13) 56 ldreq r1, =0x00000000 /* If V=0 */ 57 ldrne r1, =0xFFFF0000 /* If V=1 */ 58 ldmia r0!, {r2-r8,r10} 59 stmia r1!, {r2-r8,r10} 60 ldmia r0!, {r2-r8,r10} 61 stmia r1!, {r2-r8,r10} 62#endif 63#endif 64 bx lr 65 66ENDPROC(relocate_vectors) 67 68/* 69 * void relocate_code(addr_moni) 70 * 71 * This function relocates the monitor code. 72 * 73 * NOTE: 74 * To prevent the code below from containing references with an R_ARM_ABS32 75 * relocation record type, we never refer to linker-defined symbols directly. 76 * Instead, we declare literals which contain their relative location with 77 * respect to relocate_code, and at run time, add relocate_code back to them. 78 */ 79 80ENTRY(relocate_code) 81 ldr r1, =__image_copy_start /* r1 <- SRC &__image_copy_start */ 82 subs r4, r0, r1 /* r4 <- relocation offset */ 83 beq relocate_done /* skip relocation */ 84 ldr r2, =__image_copy_end /* r2 <- SRC &__image_copy_end */ 85 86#ifdef CONFIG_ASPEED_SPI_DMA 87 sub r2, r1 88 add r2, #0x3 89 bic r2, #0x3 90 mov r5, lr 91 bl aspeed_spi_fastcpy 92 mov lr, r5 93#else 94copy_loop: 95 ldmia r1!, {r10-r11} /* copy from source address [r1] */ 96 stmia r0!, {r10-r11} /* copy to target address [r0] */ 97 cmp r1, r2 /* until source end address [r2] */ 98 blo copy_loop 99#endif 100 101 /* 102 * fix .rel.dyn relocations 103 */ 104 ldr r2, =__rel_dyn_start /* r2 <- SRC &__rel_dyn_start */ 105 ldr r3, =__rel_dyn_end /* r3 <- SRC &__rel_dyn_end */ 106fixloop: 107 ldmia r2!, {r0-r1} /* (r0,r1) <- (SRC location,fixup) */ 108 and r1, r1, #0xff 109 cmp r1, #R_ARM_RELATIVE 110 bne fixnext 111 112 /* relative fix: increase location by offset */ 113 add r0, r0, r4 114 ldr r1, [r0] 115 add r1, r1, r4 116 str r1, [r0] 117fixnext: 118 cmp r2, r3 119 blo fixloop 120 121relocate_done: 122 123#ifdef __XSCALE__ 124 /* 125 * On xscale, icache must be invalidated and write buffers drained, 126 * even with cache disabled - 4.2.7 of xscale core developer's manual 127 */ 128 mcr p15, 0, r0, c7, c7, 0 /* invalidate icache */ 129 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ 130#endif 131 132 /* ARMv4- don't know bx lr but the assembler fails to see that */ 133 134#ifdef __ARM_ARCH_4__ 135 mov pc, lr 136#else 137 bx lr 138#endif 139 140ENDPROC(relocate_code) 141