xref: /openbmc/u-boot/arch/arm/cpu/u-boot.lds (revision 78a88f79)
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright (c) 2004-2008 Texas Instruments
4 *
5 * (C) Copyright 2002
6 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de>
7 */
8
9#include <config.h>
10#include <asm/psci.h>
11
12OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
13OUTPUT_ARCH(arm)
14ENTRY(_start)
15SECTIONS
16{
17#ifndef CONFIG_CMDLINE
18	/DISCARD/ : { *(.u_boot_list_2_cmd_*) }
19#endif
20#if defined(CONFIG_ARMV7_SECURE_BASE) && defined(CONFIG_ARMV7_NONSEC)
21	/*
22	 * If CONFIG_ARMV7_SECURE_BASE is true, secure code will not
23	 * bundle with u-boot, and code offsets are fixed. Secure zone
24	 * only needs to be copied from the loading address to
25	 * CONFIG_ARMV7_SECURE_BASE, which is the linking and running
26	 * address for secure code.
27	 *
28	 * If CONFIG_ARMV7_SECURE_BASE is undefined, the secure zone will
29	 * be included in u-boot address space, and some absolute address
30	 * were used in secure code. The absolute addresses of the secure
31	 * code also needs to be relocated along with the accompanying u-boot
32	 * code.
33	 *
34	 * So DISCARD is only for CONFIG_ARMV7_SECURE_BASE.
35	 */
36	/DISCARD/ : { *(.rel._secure*) }
37#endif
38	. = 0x00000000;
39
40	. = ALIGN(4);
41	.text :
42	{
43		*(.__image_copy_start)
44		*(.vectors)
45		CPUDIR/start.o (.text*)
46		*(.text*)
47	}
48
49#ifdef CONFIG_ARMV7_NONSEC
50
51	/* Align the secure section only if we're going to use it in situ */
52	.__secure_start :
53#ifndef CONFIG_ARMV7_SECURE_BASE
54		ALIGN(CONSTANT(COMMONPAGESIZE))
55#endif
56	{
57		KEEP(*(.__secure_start))
58	}
59
60#ifndef CONFIG_ARMV7_SECURE_BASE
61#define CONFIG_ARMV7_SECURE_BASE
62#define __ARMV7_PSCI_STACK_IN_RAM
63#endif
64
65	.secure_text CONFIG_ARMV7_SECURE_BASE :
66		AT(ADDR(.__secure_start) + SIZEOF(.__secure_start))
67	{
68		*(._secure.text)
69	}
70
71	.secure_data : AT(LOADADDR(.secure_text) + SIZEOF(.secure_text))
72	{
73		*(._secure.data)
74	}
75
76#ifdef CONFIG_ARMV7_PSCI
77	.secure_stack ALIGN(ADDR(.secure_data) + SIZEOF(.secure_data),
78			    CONSTANT(COMMONPAGESIZE)) (NOLOAD) :
79#ifdef __ARMV7_PSCI_STACK_IN_RAM
80		AT(ADDR(.secure_stack))
81#else
82		AT(LOADADDR(.secure_data) + SIZEOF(.secure_data))
83#endif
84	{
85		KEEP(*(.__secure_stack_start))
86
87		/* Skip addreses for stack */
88		. = . + CONFIG_ARMV7_PSCI_NR_CPUS * ARM_PSCI_STACK_SIZE;
89
90		/* Align end of stack section to page boundary */
91		. = ALIGN(CONSTANT(COMMONPAGESIZE));
92
93		KEEP(*(.__secure_stack_end))
94
95#ifdef CONFIG_ARMV7_SECURE_MAX_SIZE
96		/*
97		 * We are not checking (__secure_end - __secure_start) here,
98		 * as these are the load addresses, and do not include the
99		 * stack section. Instead, use the end of the stack section
100		 * and the start of the text section.
101		 */
102		ASSERT((. - ADDR(.secure_text)) <= CONFIG_ARMV7_SECURE_MAX_SIZE,
103		       "Error: secure section exceeds secure memory size");
104#endif
105	}
106
107#ifndef __ARMV7_PSCI_STACK_IN_RAM
108	/* Reset VMA but don't allocate space if we have secure SRAM */
109	. = LOADADDR(.secure_stack);
110#endif
111
112#endif
113
114	.__secure_end : AT(ADDR(.__secure_end)) {
115		*(.__secure_end)
116		LONG(0x1d1071c);	/* Must output something to reset LMA */
117	}
118#endif
119
120	. = ALIGN(4);
121	.rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) }
122
123	. = ALIGN(4);
124	.data : {
125		*(.data*)
126	}
127
128	. = ALIGN(4);
129
130	. = .;
131
132	. = ALIGN(4);
133	.u_boot_list : {
134		KEEP(*(SORT(.u_boot_list*)));
135	}
136
137	. = ALIGN(4);
138
139	.__efi_runtime_start : {
140		*(.__efi_runtime_start)
141	}
142
143	.efi_runtime : {
144		*(efi_runtime_text)
145		*(efi_runtime_data)
146	}
147
148	.__efi_runtime_stop : {
149		*(.__efi_runtime_stop)
150	}
151
152	.efi_runtime_rel_start :
153	{
154		*(.__efi_runtime_rel_start)
155	}
156
157	.efi_runtime_rel : {
158		*(.relefi_runtime_text)
159		*(.relefi_runtime_data)
160	}
161
162	.efi_runtime_rel_stop :
163	{
164		*(.__efi_runtime_rel_stop)
165	}
166
167	. = ALIGN(4);
168
169	.image_copy_end :
170	{
171		*(.__image_copy_end)
172	}
173
174	.rel_dyn_start :
175	{
176		*(.__rel_dyn_start)
177	}
178
179	.rel.dyn : {
180		*(.rel*)
181	}
182
183	.rel_dyn_end :
184	{
185		*(.__rel_dyn_end)
186	}
187
188	.end :
189	{
190		*(.__end)
191	}
192
193	_image_binary_end = .;
194
195	/*
196	 * Deprecated: this MMU section is used by pxa at present but
197	 * should not be used by new boards/CPUs.
198	 */
199	. = ALIGN(4096);
200	.mmutable : {
201		*(.mmutable)
202	}
203
204/*
205 * Compiler-generated __bss_start and __bss_end, see arch/arm/lib/bss.c
206 * __bss_base and __bss_limit are for linker only (overlay ordering)
207 */
208
209	.bss_start __rel_dyn_start (OVERLAY) : {
210		KEEP(*(.__bss_start));
211		__bss_base = .;
212	}
213
214	.bss __bss_base (OVERLAY) : {
215		*(.bss*)
216		 . = ALIGN(4);
217		 __bss_limit = .;
218	}
219
220	.bss_end __bss_limit (OVERLAY) : {
221		KEEP(*(.__bss_end));
222	}
223
224	.dynsym _image_binary_end : { *(.dynsym) }
225	.dynbss : { *(.dynbss) }
226	.dynstr : { *(.dynstr*) }
227	.dynamic : { *(.dynamic*) }
228	.plt : { *(.plt*) }
229	.interp : { *(.interp*) }
230	.gnu.hash : { *(.gnu.hash) }
231	.gnu : { *(.gnu*) }
232	.ARM.exidx : { *(.ARM.exidx*) }
233	.gnu.linkonce.armexidx : { *(.gnu.linkonce.armexidx.*) }
234}
235