1/* SPDX-License-Identifier: GPL-2.0+ */ 2/* 3 * Copyright (c) 2004-2008 Texas Instruments 4 * 5 * (C) Copyright 2002 6 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de> 7 */ 8 9#include <config.h> 10#include <asm/psci.h> 11 12OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm") 13OUTPUT_ARCH(arm) 14ENTRY(_start) 15SECTIONS 16{ 17#ifndef CONFIG_CMDLINE 18 /DISCARD/ : { *(.u_boot_list_2_cmd_*) } 19#endif 20#if defined(CONFIG_ARMV7_SECURE_BASE) && defined(CONFIG_ARMV7_NONSEC) 21 /* 22 * If CONFIG_ARMV7_SECURE_BASE is true, secure code will not 23 * bundle with u-boot, and code offsets are fixed. Secure zone 24 * only needs to be copied from the loading address to 25 * CONFIG_ARMV7_SECURE_BASE, which is the linking and running 26 * address for secure code. 27 * 28 * If CONFIG_ARMV7_SECURE_BASE is undefined, the secure zone will 29 * be included in u-boot address space, and some absolute address 30 * were used in secure code. The absolute addresses of the secure 31 * code also needs to be relocated along with the accompanying u-boot 32 * code. 33 * 34 * So DISCARD is only for CONFIG_ARMV7_SECURE_BASE. 35 */ 36 /DISCARD/ : { *(.rel._secure*) } 37#endif 38 . = 0x00000000; 39 40 . = ALIGN(4); 41 .text : 42 { 43 *(.__image_copy_start) 44 *(.vectors) 45 CPUDIR/start.o (.text*) 46 } 47 48 /* This needs to come before *(.text*) */ 49 .__efi_runtime_start : { 50 *(.__efi_runtime_start) 51 } 52 53 .efi_runtime : { 54 *(.text.efi_runtime*) 55 *(.rodata.efi_runtime*) 56 *(.data.efi_runtime*) 57 } 58 59 .__efi_runtime_stop : { 60 *(.__efi_runtime_stop) 61 } 62 63 .text_rest : 64 { 65 *(.text*) 66 } 67 68#ifdef CONFIG_ARMV7_NONSEC 69 70 /* Align the secure section only if we're going to use it in situ */ 71 .__secure_start : 72#ifndef CONFIG_ARMV7_SECURE_BASE 73 ALIGN(CONSTANT(COMMONPAGESIZE)) 74#endif 75 { 76 KEEP(*(.__secure_start)) 77 } 78 79#ifndef CONFIG_ARMV7_SECURE_BASE 80#define CONFIG_ARMV7_SECURE_BASE 81#define __ARMV7_PSCI_STACK_IN_RAM 82#endif 83 84 .secure_text CONFIG_ARMV7_SECURE_BASE : 85 AT(ADDR(.__secure_start) + SIZEOF(.__secure_start)) 86 { 87 *(._secure.text) 88 } 89 90 .secure_data : AT(LOADADDR(.secure_text) + SIZEOF(.secure_text)) 91 { 92 *(._secure.data) 93 } 94 95#ifdef CONFIG_ARMV7_PSCI 96 .secure_stack ALIGN(ADDR(.secure_data) + SIZEOF(.secure_data), 97 CONSTANT(COMMONPAGESIZE)) (NOLOAD) : 98#ifdef __ARMV7_PSCI_STACK_IN_RAM 99 AT(ADDR(.secure_stack)) 100#else 101 AT(LOADADDR(.secure_data) + SIZEOF(.secure_data)) 102#endif 103 { 104 KEEP(*(.__secure_stack_start)) 105 106 /* Skip addreses for stack */ 107 . = . + CONFIG_ARMV7_PSCI_NR_CPUS * ARM_PSCI_STACK_SIZE; 108 109 /* Align end of stack section to page boundary */ 110 . = ALIGN(CONSTANT(COMMONPAGESIZE)); 111 112 KEEP(*(.__secure_stack_end)) 113 114#ifdef CONFIG_ARMV7_SECURE_MAX_SIZE 115 /* 116 * We are not checking (__secure_end - __secure_start) here, 117 * as these are the load addresses, and do not include the 118 * stack section. Instead, use the end of the stack section 119 * and the start of the text section. 120 */ 121 ASSERT((. - ADDR(.secure_text)) <= CONFIG_ARMV7_SECURE_MAX_SIZE, 122 "Error: secure section exceeds secure memory size"); 123#endif 124 } 125 126#ifndef __ARMV7_PSCI_STACK_IN_RAM 127 /* Reset VMA but don't allocate space if we have secure SRAM */ 128 . = LOADADDR(.secure_stack); 129#endif 130 131#endif 132 133 .__secure_end : AT(ADDR(.__secure_end)) { 134 *(.__secure_end) 135 LONG(0x1d1071c); /* Must output something to reset LMA */ 136 } 137#endif 138 139 . = ALIGN(4); 140 .rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) } 141 142 . = ALIGN(4); 143 .data : { 144 *(.data*) 145 } 146 147 . = ALIGN(4); 148 149 . = .; 150 151 . = ALIGN(4); 152 .u_boot_list : { 153 KEEP(*(SORT(.u_boot_list*))); 154 } 155 156 . = ALIGN(4); 157 158 .efi_runtime_rel_start : 159 { 160 *(.__efi_runtime_rel_start) 161 } 162 163 .efi_runtime_rel : { 164 *(.rel*.efi_runtime) 165 *(.rel*.efi_runtime.*) 166 } 167 168 .efi_runtime_rel_stop : 169 { 170 *(.__efi_runtime_rel_stop) 171 } 172 173 . = ALIGN(4); 174 175 .image_copy_end : 176 { 177 *(.__image_copy_end) 178 } 179 180 .rel_dyn_start : 181 { 182 *(.__rel_dyn_start) 183 } 184 185 .rel.dyn : { 186 *(.rel*) 187 } 188 189 .rel_dyn_end : 190 { 191 *(.__rel_dyn_end) 192 } 193 194 .end : 195 { 196 *(.__end) 197 } 198 199 _image_binary_end = .; 200 201 /* 202 * Deprecated: this MMU section is used by pxa at present but 203 * should not be used by new boards/CPUs. 204 */ 205 . = ALIGN(4096); 206 .mmutable : { 207 *(.mmutable) 208 } 209 210/* 211 * Compiler-generated __bss_start and __bss_end, see arch/arm/lib/bss.c 212 * __bss_base and __bss_limit are for linker only (overlay ordering) 213 */ 214 215 .bss_start __rel_dyn_start (OVERLAY) : { 216 KEEP(*(.__bss_start)); 217 __bss_base = .; 218 } 219 220 .bss __bss_base (OVERLAY) : { 221 *(.bss*) 222 . = ALIGN(4); 223 __bss_limit = .; 224 } 225 226 .bss_end __bss_limit (OVERLAY) : { 227 KEEP(*(.__bss_end)); 228 } 229 230 .dynsym _image_binary_end : { *(.dynsym) } 231 .dynbss : { *(.dynbss) } 232 .dynstr : { *(.dynstr*) } 233 .dynamic : { *(.dynamic*) } 234 .plt : { *(.plt*) } 235 .interp : { *(.interp*) } 236 .gnu.hash : { *(.gnu.hash) } 237 .gnu : { *(.gnu*) } 238 .ARM.exidx : { *(.ARM.exidx*) } 239 .gnu.linkonce.armexidx : { *(.gnu.linkonce.armexidx.*) } 240} 241