xref: /openbmc/u-boot/arch/arm/cpu/u-boot.lds (revision fc0b5948)
1/*
2 * Copyright (c) 2004-2008 Texas Instruments
3 *
4 * (C) Copyright 2002
5 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de>
6 *
7 * SPDX-License-Identifier:	GPL-2.0+
8 */
9
10#include <config.h>
11#include <asm/psci.h>
12
13OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
14OUTPUT_ARCH(arm)
15ENTRY(_start)
16SECTIONS
17{
18#ifndef CONFIG_CMDLINE
19	/DISCARD/ : { *(.u_boot_list_2_cmd_*) }
20#endif
21#if defined(CONFIG_ARMV7_SECURE_BASE) && defined(CONFIG_ARMV7_NONSEC)
22	/*
23	 * If CONFIG_ARMV7_SECURE_BASE is true, secure code will not
24	 * bundle with u-boot, and code offsets are fixed. Secure zone
25	 * only needs to be copied from the loading address to
26	 * CONFIG_ARMV7_SECURE_BASE, which is the linking and running
27	 * address for secure code.
28	 *
29	 * If CONFIG_ARMV7_SECURE_BASE is undefined, the secure zone will
30	 * be included in u-boot address space, and some absolute address
31	 * were used in secure code. The absolute addresses of the secure
32	 * code also needs to be relocated along with the accompanying u-boot
33	 * code.
34	 *
35	 * So DISCARD is only for CONFIG_ARMV7_SECURE_BASE.
36	 */
37	/DISCARD/ : { *(.rel._secure*) }
38#endif
39	. = 0x00000000;
40
41	. = ALIGN(4);
42	.text :
43	{
44		*(.__image_copy_start)
45		*(.vectors)
46		CPUDIR/start.o (.text*)
47		*(.text*)
48	}
49
50#ifdef CONFIG_ARMV7_NONSEC
51
52	/* Align the secure section only if we're going to use it in situ */
53	.__secure_start :
54#ifndef CONFIG_ARMV7_SECURE_BASE
55		ALIGN(CONSTANT(COMMONPAGESIZE))
56#endif
57	{
58		KEEP(*(.__secure_start))
59	}
60
61#ifndef CONFIG_ARMV7_SECURE_BASE
62#define CONFIG_ARMV7_SECURE_BASE
63#define __ARMV7_PSCI_STACK_IN_RAM
64#endif
65
66	.secure_text CONFIG_ARMV7_SECURE_BASE :
67		AT(ADDR(.__secure_start) + SIZEOF(.__secure_start))
68	{
69		*(._secure.text)
70	}
71
72	.secure_data : AT(LOADADDR(.secure_text) + SIZEOF(.secure_text))
73	{
74		*(._secure.data)
75	}
76
77	.secure_stack ALIGN(ADDR(.secure_data) + SIZEOF(.secure_data),
78			    CONSTANT(COMMONPAGESIZE)) (NOLOAD) :
79#ifdef __ARMV7_PSCI_STACK_IN_RAM
80		AT(ADDR(.secure_stack))
81#else
82		AT(LOADADDR(.secure_data) + SIZEOF(.secure_data))
83#endif
84	{
85		KEEP(*(.__secure_stack_start))
86#ifdef CONFIG_ARMV7_PSCI
87		/* Skip addreses for stack */
88		. = . + CONFIG_ARMV7_PSCI_NR_CPUS * ARM_PSCI_STACK_SIZE;
89#endif
90		/* Align end of stack section to page boundary */
91		. = ALIGN(CONSTANT(COMMONPAGESIZE));
92
93		KEEP(*(.__secure_stack_end))
94
95#ifdef CONFIG_ARMV7_SECURE_MAX_SIZE
96		/*
97		 * We are not checking (__secure_end - __secure_start) here,
98		 * as these are the load addresses, and do not include the
99		 * stack section. Instead, use the end of the stack section
100		 * and the start of the text section.
101		 */
102		ASSERT((. - ADDR(.secure_text)) <= CONFIG_ARMV7_SECURE_MAX_SIZE,
103		       "Error: secure section exceeds secure memory size");
104#endif
105	}
106
107#ifndef __ARMV7_PSCI_STACK_IN_RAM
108	/* Reset VMA but don't allocate space if we have secure SRAM */
109	. = LOADADDR(.secure_stack);
110#endif
111
112	.__secure_end : AT(ADDR(.__secure_end)) {
113		*(.__secure_end)
114		LONG(0x1d1071c);	/* Must output something to reset LMA */
115	}
116#endif
117
118	. = ALIGN(4);
119	.rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) }
120
121	. = ALIGN(4);
122	.data : {
123		*(.data*)
124	}
125
126	. = ALIGN(4);
127
128	. = .;
129
130	. = ALIGN(4);
131	.u_boot_list : {
132		KEEP(*(SORT(.u_boot_list*)));
133	}
134
135	. = ALIGN(4);
136
137	.__efi_runtime_start : {
138		*(.__efi_runtime_start)
139	}
140
141	.efi_runtime : {
142		*(efi_runtime_text)
143		*(efi_runtime_data)
144	}
145
146	.__efi_runtime_stop : {
147		*(.__efi_runtime_stop)
148	}
149
150	.efi_runtime_rel_start :
151	{
152		*(.__efi_runtime_rel_start)
153	}
154
155	.efi_runtime_rel : {
156		*(.relefi_runtime_text)
157		*(.relefi_runtime_data)
158	}
159
160	.efi_runtime_rel_stop :
161	{
162		*(.__efi_runtime_rel_stop)
163	}
164
165	. = ALIGN(4);
166
167	.image_copy_end :
168	{
169		*(.__image_copy_end)
170	}
171
172	.rel_dyn_start :
173	{
174		*(.__rel_dyn_start)
175	}
176
177	.rel.dyn : {
178		*(.rel*)
179	}
180
181	.rel_dyn_end :
182	{
183		*(.__rel_dyn_end)
184	}
185
186	.end :
187	{
188		*(.__end)
189	}
190
191	_image_binary_end = .;
192
193	/*
194	 * Deprecated: this MMU section is used by pxa at present but
195	 * should not be used by new boards/CPUs.
196	 */
197	. = ALIGN(4096);
198	.mmutable : {
199		*(.mmutable)
200	}
201
202/*
203 * Compiler-generated __bss_start and __bss_end, see arch/arm/lib/bss.c
204 * __bss_base and __bss_limit are for linker only (overlay ordering)
205 */
206
207	.bss_start __rel_dyn_start (OVERLAY) : {
208		KEEP(*(.__bss_start));
209		__bss_base = .;
210	}
211
212	.bss __bss_base (OVERLAY) : {
213		*(.bss*)
214		 . = ALIGN(4);
215		 __bss_limit = .;
216	}
217
218	.bss_end __bss_limit (OVERLAY) : {
219		KEEP(*(.__bss_end));
220	}
221
222	.dynsym _image_binary_end : { *(.dynsym) }
223	.dynbss : { *(.dynbss) }
224	.dynstr : { *(.dynstr*) }
225	.dynamic : { *(.dynamic*) }
226	.plt : { *(.plt*) }
227	.interp : { *(.interp*) }
228	.gnu.hash : { *(.gnu.hash) }
229	.gnu : { *(.gnu*) }
230	.ARM.exidx : { *(.ARM.exidx*) }
231	.gnu.linkonce.armexidx : { *(.gnu.linkonce.armexidx.*) }
232}
233