1/* 2 * Copyright (c) 2004-2008 Texas Instruments 3 * 4 * (C) Copyright 2002 5 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de> 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10#include <config.h> 11 12OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm") 13OUTPUT_ARCH(arm) 14ENTRY(_start) 15SECTIONS 16{ 17#if defined(CONFIG_ARMV7_SECURE_BASE) && defined(CONFIG_ARMV7_NONSEC) 18 /* 19 * If CONFIG_ARMV7_SECURE_BASE is true, secure code will not 20 * bundle with u-boot, and code offsets are fixed. Secure zone 21 * only needs to be copied from the loading address to 22 * CONFIG_ARMV7_SECURE_BASE, which is the linking and running 23 * address for secure code. 24 * 25 * If CONFIG_ARMV7_SECURE_BASE is undefined, the secure zone will 26 * be included in u-boot address space, and some absolute address 27 * were used in secure code. The absolute addresses of the secure 28 * code also needs to be relocated along with the accompanying u-boot 29 * code. 30 * 31 * So DISCARD is only for CONFIG_ARMV7_SECURE_BASE. 32 */ 33 /DISCARD/ : { *(.rel._secure*) } 34#endif 35 . = 0x00000000; 36 37 . = ALIGN(4); 38 .text : 39 { 40 *(.__image_copy_start) 41 *(.vectors) 42 CPUDIR/start.o (.text*) 43 *(.text*) 44 } 45 46#ifdef CONFIG_ARMV7_NONSEC 47 48#ifndef CONFIG_ARMV7_SECURE_BASE 49#define CONFIG_ARMV7_SECURE_BASE 50#endif 51 52 .__secure_start : { 53 . = ALIGN(0x1000); 54 *(.__secure_start) 55 } 56 57 .secure_text CONFIG_ARMV7_SECURE_BASE : 58 AT(ADDR(.__secure_start) + SIZEOF(.__secure_start)) 59 { 60 *(._secure.text) 61 } 62 63 . = LOADADDR(.__secure_start) + 64 SIZEOF(.__secure_start) + 65 SIZEOF(.secure_text); 66 67 __secure_end_lma = .; 68 .__secure_end : AT(__secure_end_lma) { 69 *(.__secure_end) 70 LONG(0x1d1071c); /* Must output something to reset LMA */ 71 } 72#endif 73 74 . = ALIGN(4); 75 .rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) } 76 77 . = ALIGN(4); 78 .data : { 79 *(.data*) 80 } 81 82 . = ALIGN(4); 83 84 . = .; 85 86 . = ALIGN(4); 87 .u_boot_list : { 88 KEEP(*(SORT(.u_boot_list*))); 89 } 90 91 . = ALIGN(4); 92 93 .__efi_runtime_start : { 94 *(.__efi_runtime_start) 95 } 96 97 .efi_runtime : { 98 *(efi_runtime_text) 99 *(efi_runtime_data) 100 } 101 102 .__efi_runtime_stop : { 103 *(.__efi_runtime_stop) 104 } 105 106 .efi_runtime_rel_start : 107 { 108 *(.__efi_runtime_rel_start) 109 } 110 111 .efi_runtime_rel : { 112 *(.relefi_runtime_text) 113 *(.relefi_runtime_data) 114 } 115 116 .efi_runtime_rel_stop : 117 { 118 *(.__efi_runtime_rel_stop) 119 } 120 121 . = ALIGN(4); 122 123 .image_copy_end : 124 { 125 *(.__image_copy_end) 126 } 127 128 .rel_dyn_start : 129 { 130 *(.__rel_dyn_start) 131 } 132 133 .rel.dyn : { 134 *(.rel*) 135 } 136 137 .rel_dyn_end : 138 { 139 *(.__rel_dyn_end) 140 } 141 142 .end : 143 { 144 *(.__end) 145 } 146 147 _image_binary_end = .; 148 149 /* 150 * Deprecated: this MMU section is used by pxa at present but 151 * should not be used by new boards/CPUs. 152 */ 153 . = ALIGN(4096); 154 .mmutable : { 155 *(.mmutable) 156 } 157 158/* 159 * Compiler-generated __bss_start and __bss_end, see arch/arm/lib/bss.c 160 * __bss_base and __bss_limit are for linker only (overlay ordering) 161 */ 162 163 .bss_start __rel_dyn_start (OVERLAY) : { 164 KEEP(*(.__bss_start)); 165 __bss_base = .; 166 } 167 168 .bss __bss_base (OVERLAY) : { 169 *(.bss*) 170 . = ALIGN(4); 171 __bss_limit = .; 172 } 173 174 .bss_end __bss_limit (OVERLAY) : { 175 KEEP(*(.__bss_end)); 176 } 177 178 .dynsym _image_binary_end : { *(.dynsym) } 179 .dynbss : { *(.dynbss) } 180 .dynstr : { *(.dynstr*) } 181 .dynamic : { *(.dynamic*) } 182 .plt : { *(.plt*) } 183 .interp : { *(.interp*) } 184 .gnu.hash : { *(.gnu.hash) } 185 .gnu : { *(.gnu*) } 186 .ARM.exidx : { *(.ARM.exidx*) } 187 .gnu.linkonce.armexidx : { *(.gnu.linkonce.armexidx.*) } 188} 189