xref: /openbmc/u-boot/arch/arm/cpu/u-boot.lds (revision baefb63a)
1/*
2 * Copyright (c) 2004-2008 Texas Instruments
3 *
4 * (C) Copyright 2002
5 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de>
6 *
7 * SPDX-License-Identifier:	GPL-2.0+
8 */
9
10#include <config.h>
11#include <asm/psci.h>
12
13OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
14OUTPUT_ARCH(arm)
15ENTRY(_start)
16SECTIONS
17{
18#ifndef CONFIG_CMDLINE
19	/DISCARD/ : { *(.u_boot_list_2_cmd_*) }
20#endif
21#if defined(CONFIG_ARMV7_SECURE_BASE) && defined(CONFIG_ARMV7_NONSEC)
22	/*
23	 * If CONFIG_ARMV7_SECURE_BASE is true, secure code will not
24	 * bundle with u-boot, and code offsets are fixed. Secure zone
25	 * only needs to be copied from the loading address to
26	 * CONFIG_ARMV7_SECURE_BASE, which is the linking and running
27	 * address for secure code.
28	 *
29	 * If CONFIG_ARMV7_SECURE_BASE is undefined, the secure zone will
30	 * be included in u-boot address space, and some absolute address
31	 * were used in secure code. The absolute addresses of the secure
32	 * code also needs to be relocated along with the accompanying u-boot
33	 * code.
34	 *
35	 * So DISCARD is only for CONFIG_ARMV7_SECURE_BASE.
36	 */
37	/DISCARD/ : { *(.rel._secure*) }
38#endif
39	. = 0x00000000;
40
41	. = ALIGN(4);
42	.text :
43	{
44		*(.__image_copy_start)
45		*(.vectors)
46		CPUDIR/start.o (.text*)
47		*(.text*)
48	}
49
50#ifdef CONFIG_ARMV7_NONSEC
51
52	/* Align the secure section only if we're going to use it in situ */
53	.__secure_start :
54#ifndef CONFIG_ARMV7_SECURE_BASE
55		ALIGN(CONSTANT(COMMONPAGESIZE))
56#endif
57	{
58		KEEP(*(.__secure_start))
59	}
60
61#ifndef CONFIG_ARMV7_SECURE_BASE
62#define CONFIG_ARMV7_SECURE_BASE
63#define __ARMV7_PSCI_STACK_IN_RAM
64#endif
65
66	.secure_text CONFIG_ARMV7_SECURE_BASE :
67		AT(ADDR(.__secure_start) + SIZEOF(.__secure_start))
68	{
69		*(._secure.text)
70	}
71
72	.secure_data : AT(LOADADDR(.secure_text) + SIZEOF(.secure_text))
73	{
74		*(._secure.data)
75	}
76
77#ifdef CONFIG_ARMV7_PSCI
78	.secure_stack ALIGN(ADDR(.secure_data) + SIZEOF(.secure_data),
79			    CONSTANT(COMMONPAGESIZE)) (NOLOAD) :
80#ifdef __ARMV7_PSCI_STACK_IN_RAM
81		AT(ADDR(.secure_stack))
82#else
83		AT(LOADADDR(.secure_data) + SIZEOF(.secure_data))
84#endif
85	{
86		KEEP(*(.__secure_stack_start))
87
88		/* Skip addreses for stack */
89		. = . + CONFIG_ARMV7_PSCI_NR_CPUS * ARM_PSCI_STACK_SIZE;
90
91		/* Align end of stack section to page boundary */
92		. = ALIGN(CONSTANT(COMMONPAGESIZE));
93
94		KEEP(*(.__secure_stack_end))
95
96#ifdef CONFIG_ARMV7_SECURE_MAX_SIZE
97		/*
98		 * We are not checking (__secure_end - __secure_start) here,
99		 * as these are the load addresses, and do not include the
100		 * stack section. Instead, use the end of the stack section
101		 * and the start of the text section.
102		 */
103		ASSERT((. - ADDR(.secure_text)) <= CONFIG_ARMV7_SECURE_MAX_SIZE,
104		       "Error: secure section exceeds secure memory size");
105#endif
106	}
107
108#ifndef __ARMV7_PSCI_STACK_IN_RAM
109	/* Reset VMA but don't allocate space if we have secure SRAM */
110	. = LOADADDR(.secure_stack);
111#endif
112
113#endif
114
115	.__secure_end : AT(ADDR(.__secure_end)) {
116		*(.__secure_end)
117		LONG(0x1d1071c);	/* Must output something to reset LMA */
118	}
119#endif
120
121	. = ALIGN(4);
122	.rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) }
123
124	. = ALIGN(4);
125	.data : {
126		*(.data*)
127	}
128
129	. = ALIGN(4);
130
131	. = .;
132
133	. = ALIGN(4);
134	.u_boot_list : {
135		KEEP(*(SORT(.u_boot_list*)));
136	}
137
138	. = ALIGN(4);
139
140	.__efi_runtime_start : {
141		*(.__efi_runtime_start)
142	}
143
144	.efi_runtime : {
145		*(efi_runtime_text)
146		*(efi_runtime_data)
147	}
148
149	.__efi_runtime_stop : {
150		*(.__efi_runtime_stop)
151	}
152
153	.efi_runtime_rel_start :
154	{
155		*(.__efi_runtime_rel_start)
156	}
157
158	.efi_runtime_rel : {
159		*(.relefi_runtime_text)
160		*(.relefi_runtime_data)
161	}
162
163	.efi_runtime_rel_stop :
164	{
165		*(.__efi_runtime_rel_stop)
166	}
167
168	. = ALIGN(4);
169
170	.image_copy_end :
171	{
172		*(.__image_copy_end)
173	}
174
175	.rel_dyn_start :
176	{
177		*(.__rel_dyn_start)
178	}
179
180	.rel.dyn : {
181		*(.rel*)
182	}
183
184	.rel_dyn_end :
185	{
186		*(.__rel_dyn_end)
187	}
188
189	.end :
190	{
191		*(.__end)
192	}
193
194	_image_binary_end = .;
195
196	/*
197	 * Deprecated: this MMU section is used by pxa at present but
198	 * should not be used by new boards/CPUs.
199	 */
200	. = ALIGN(4096);
201	.mmutable : {
202		*(.mmutable)
203	}
204
205/*
206 * Compiler-generated __bss_start and __bss_end, see arch/arm/lib/bss.c
207 * __bss_base and __bss_limit are for linker only (overlay ordering)
208 */
209
210	.bss_start __rel_dyn_start (OVERLAY) : {
211		KEEP(*(.__bss_start));
212		__bss_base = .;
213	}
214
215	.bss __bss_base (OVERLAY) : {
216		*(.bss*)
217		 . = ALIGN(4);
218		 __bss_limit = .;
219	}
220
221	.bss_end __bss_limit (OVERLAY) : {
222		KEEP(*(.__bss_end));
223	}
224
225	.dynsym _image_binary_end : { *(.dynsym) }
226	.dynbss : { *(.dynbss) }
227	.dynstr : { *(.dynstr*) }
228	.dynamic : { *(.dynamic*) }
229	.plt : { *(.plt*) }
230	.interp : { *(.interp*) }
231	.gnu.hash : { *(.gnu.hash) }
232	.gnu : { *(.gnu*) }
233	.ARM.exidx : { *(.ARM.exidx*) }
234	.gnu.linkonce.armexidx : { *(.gnu.linkonce.armexidx.*) }
235}
236