1/* 2 * Copyright (c) 2004-2008 Texas Instruments 3 * 4 * (C) Copyright 2002 5 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de> 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10#include <config.h> 11 12OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm") 13OUTPUT_ARCH(arm) 14ENTRY(_start) 15SECTIONS 16{ 17#ifndef CONFIG_CMDLINE 18 /DISCARD/ : { *(.u_boot_list_2_cmd_*) } 19#endif 20#if defined(CONFIG_ARMV7_SECURE_BASE) && defined(CONFIG_ARMV7_NONSEC) 21 /* 22 * If CONFIG_ARMV7_SECURE_BASE is true, secure code will not 23 * bundle with u-boot, and code offsets are fixed. Secure zone 24 * only needs to be copied from the loading address to 25 * CONFIG_ARMV7_SECURE_BASE, which is the linking and running 26 * address for secure code. 27 * 28 * If CONFIG_ARMV7_SECURE_BASE is undefined, the secure zone will 29 * be included in u-boot address space, and some absolute address 30 * were used in secure code. The absolute addresses of the secure 31 * code also needs to be relocated along with the accompanying u-boot 32 * code. 33 * 34 * So DISCARD is only for CONFIG_ARMV7_SECURE_BASE. 35 */ 36 /DISCARD/ : { *(.rel._secure*) } 37#endif 38 . = 0x00000000; 39 40 . = ALIGN(4); 41 .text : 42 { 43 *(.__image_copy_start) 44 *(.vectors) 45 CPUDIR/start.o (.text*) 46 *(.text*) 47 } 48 49#ifdef CONFIG_ARMV7_NONSEC 50 51#ifndef CONFIG_ARMV7_SECURE_BASE 52#define CONFIG_ARMV7_SECURE_BASE 53#endif 54 55 .__secure_start : { 56 . = ALIGN(0x1000); 57 *(.__secure_start) 58 } 59 60 .secure_text CONFIG_ARMV7_SECURE_BASE : 61 AT(ADDR(.__secure_start) + SIZEOF(.__secure_start)) 62 { 63 *(._secure.text) 64 } 65 66 . = LOADADDR(.__secure_start) + 67 SIZEOF(.__secure_start) + 68 SIZEOF(.secure_text); 69 70 __secure_end_lma = .; 71 .__secure_end : AT(__secure_end_lma) { 72 *(.__secure_end) 73 LONG(0x1d1071c); /* Must output something to reset LMA */ 74 } 75#endif 76 77 . = ALIGN(4); 78 .rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) } 79 80 . = ALIGN(4); 81 .data : { 82 *(.data*) 83 } 84 85 . = ALIGN(4); 86 87 . = .; 88 89 . = ALIGN(4); 90 .u_boot_list : { 91 KEEP(*(SORT(.u_boot_list*))); 92 } 93 94 . = ALIGN(4); 95 96 .__efi_runtime_start : { 97 *(.__efi_runtime_start) 98 } 99 100 .efi_runtime : { 101 *(efi_runtime_text) 102 *(efi_runtime_data) 103 } 104 105 .__efi_runtime_stop : { 106 *(.__efi_runtime_stop) 107 } 108 109 .efi_runtime_rel_start : 110 { 111 *(.__efi_runtime_rel_start) 112 } 113 114 .efi_runtime_rel : { 115 *(.relefi_runtime_text) 116 *(.relefi_runtime_data) 117 } 118 119 .efi_runtime_rel_stop : 120 { 121 *(.__efi_runtime_rel_stop) 122 } 123 124 . = ALIGN(4); 125 126 .image_copy_end : 127 { 128 *(.__image_copy_end) 129 } 130 131 .rel_dyn_start : 132 { 133 *(.__rel_dyn_start) 134 } 135 136 .rel.dyn : { 137 *(.rel*) 138 } 139 140 .rel_dyn_end : 141 { 142 *(.__rel_dyn_end) 143 } 144 145 .end : 146 { 147 *(.__end) 148 } 149 150 _image_binary_end = .; 151 152 /* 153 * Deprecated: this MMU section is used by pxa at present but 154 * should not be used by new boards/CPUs. 155 */ 156 . = ALIGN(4096); 157 .mmutable : { 158 *(.mmutable) 159 } 160 161/* 162 * Compiler-generated __bss_start and __bss_end, see arch/arm/lib/bss.c 163 * __bss_base and __bss_limit are for linker only (overlay ordering) 164 */ 165 166 .bss_start __rel_dyn_start (OVERLAY) : { 167 KEEP(*(.__bss_start)); 168 __bss_base = .; 169 } 170 171 .bss __bss_base (OVERLAY) : { 172 *(.bss*) 173 . = ALIGN(4); 174 __bss_limit = .; 175 } 176 177 .bss_end __bss_limit (OVERLAY) : { 178 KEEP(*(.__bss_end)); 179 } 180 181 .dynsym _image_binary_end : { *(.dynsym) } 182 .dynbss : { *(.dynbss) } 183 .dynstr : { *(.dynstr*) } 184 .dynamic : { *(.dynamic*) } 185 .plt : { *(.plt*) } 186 .interp : { *(.interp*) } 187 .gnu.hash : { *(.gnu.hash) } 188 .gnu : { *(.gnu*) } 189 .ARM.exidx : { *(.ARM.exidx*) } 190 .gnu.linkonce.armexidx : { *(.gnu.linkonce.armexidx.*) } 191} 192