xref: /openbmc/u-boot/arch/arm/cpu/u-boot.lds (revision c6e18144)
1/*
2 * Copyright (c) 2004-2008 Texas Instruments
3 *
4 * (C) Copyright 2002
5 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de>
6 *
7 * SPDX-License-Identifier:	GPL-2.0+
8 */
9
10#include <config.h>
11
12OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
13OUTPUT_ARCH(arm)
14ENTRY(_start)
15SECTIONS
16{
17	/*
18	 * Discard the relocation entries for secure text.
19	 * The secure code is bundled with u-boot image, so there will
20	 * be relocations entries for the secure code, since we use
21	 * "-mword-relocations" to compile and "-pie" to link into the
22	 * final image. We do not need the relocation entries for secure
23	 * code, because secure code will not be relocated, it only needs
24	 * to be copied from loading address to CONFIG_ARMV7_SECURE_BASE,
25	 * which is the linking and running address for secure code.
26	 * If keep the relocation entries in .rel.dyn section,
27	 * "relocation offset + linking address" may locates into an
28	 * address that is reserved by SoC, then will trigger data abort.
29	 *
30	 * The reason that move .rel._secure at the beginning, is to
31	 * avoid hole in the final image.
32	 */
33	/DISCARD/ : { *(.rel._secure*) }
34	. = 0x00000000;
35
36	. = ALIGN(4);
37	.text :
38	{
39		*(.__image_copy_start)
40		*(.vectors)
41		CPUDIR/start.o (.text*)
42		*(.text*)
43	}
44
45#ifdef CONFIG_ARMV7_NONSEC
46
47#ifndef CONFIG_ARMV7_SECURE_BASE
48#define CONFIG_ARMV7_SECURE_BASE
49#endif
50
51	.__secure_start : {
52		. = ALIGN(0x1000);
53		*(.__secure_start)
54	}
55
56	.secure_text CONFIG_ARMV7_SECURE_BASE :
57		AT(ADDR(.__secure_start) + SIZEOF(.__secure_start))
58	{
59		*(._secure.text)
60	}
61
62	. = LOADADDR(.__secure_start) +
63		SIZEOF(.__secure_start) +
64		SIZEOF(.secure_text);
65
66	__secure_end_lma = .;
67	.__secure_end : AT(__secure_end_lma) {
68		*(.__secure_end)
69		LONG(0x1d1071c);	/* Must output something to reset LMA */
70	}
71#endif
72
73	. = ALIGN(4);
74	.rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) }
75
76	. = ALIGN(4);
77	.data : {
78		*(.data*)
79	}
80
81	. = ALIGN(4);
82
83	. = .;
84
85	. = ALIGN(4);
86	.u_boot_list : {
87		KEEP(*(SORT(.u_boot_list*)));
88	}
89
90	. = ALIGN(4);
91
92	.image_copy_end :
93	{
94		*(.__image_copy_end)
95	}
96
97	.rel_dyn_start :
98	{
99		*(.__rel_dyn_start)
100	}
101
102	.rel.dyn : {
103		*(.rel*)
104	}
105
106	.rel_dyn_end :
107	{
108		*(.__rel_dyn_end)
109	}
110
111	.end :
112	{
113		*(.__end)
114	}
115
116	_image_binary_end = .;
117
118	/*
119	 * Deprecated: this MMU section is used by pxa at present but
120	 * should not be used by new boards/CPUs.
121	 */
122	. = ALIGN(4096);
123	.mmutable : {
124		*(.mmutable)
125	}
126
127/*
128 * Compiler-generated __bss_start and __bss_end, see arch/arm/lib/bss.c
129 * __bss_base and __bss_limit are for linker only (overlay ordering)
130 */
131
132	.bss_start __rel_dyn_start (OVERLAY) : {
133		KEEP(*(.__bss_start));
134		__bss_base = .;
135	}
136
137	.bss __bss_base (OVERLAY) : {
138		*(.bss*)
139		 . = ALIGN(4);
140		 __bss_limit = .;
141	}
142
143	.bss_end __bss_limit (OVERLAY) : {
144		KEEP(*(.__bss_end));
145	}
146
147	.dynsym _image_binary_end : { *(.dynsym) }
148	.dynbss : { *(.dynbss) }
149	.dynstr : { *(.dynstr*) }
150	.dynamic : { *(.dynamic*) }
151	.plt : { *(.plt*) }
152	.interp : { *(.interp*) }
153	.gnu.hash : { *(.gnu.hash) }
154	.gnu : { *(.gnu*) }
155	.ARM.exidx : { *(.ARM.exidx*) }
156	.gnu.linkonce.armexidx : { *(.gnu.linkonce.armexidx.*) }
157}
158