xref: /openbmc/linux/arch/arm/kernel/vmlinux.lds.S (revision b78412b8)
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 */
5
6#ifdef CONFIG_XIP_KERNEL
7#include "vmlinux-xip.lds.S"
8#else
9
10#include <asm-generic/vmlinux.lds.h>
11#include <asm/cache.h>
12#include <asm/thread_info.h>
13#include <asm/memory.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16
17#define PROC_INFO							\
18	. = ALIGN(4);							\
19	VMLINUX_SYMBOL(__proc_info_begin) = .;				\
20	*(.proc.info.init)						\
21	VMLINUX_SYMBOL(__proc_info_end) = .;
22
23#define HYPERVISOR_TEXT							\
24	VMLINUX_SYMBOL(__hyp_text_start) = .;				\
25	*(.hyp.text)							\
26	VMLINUX_SYMBOL(__hyp_text_end) = .;
27
28#define IDMAP_TEXT							\
29	ALIGN_FUNCTION();						\
30	VMLINUX_SYMBOL(__idmap_text_start) = .;				\
31	*(.idmap.text)							\
32	VMLINUX_SYMBOL(__idmap_text_end) = .;				\
33	. = ALIGN(PAGE_SIZE);						\
34	VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;			\
35	*(.hyp.idmap.text)						\
36	VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
37
38#ifdef CONFIG_HOTPLUG_CPU
39#define ARM_CPU_DISCARD(x)
40#define ARM_CPU_KEEP(x)		x
41#else
42#define ARM_CPU_DISCARD(x)	x
43#define ARM_CPU_KEEP(x)
44#endif
45
46#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
47	defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
48#define ARM_EXIT_KEEP(x)	x
49#define ARM_EXIT_DISCARD(x)
50#else
51#define ARM_EXIT_KEEP(x)
52#define ARM_EXIT_DISCARD(x)	x
53#endif
54
55OUTPUT_ARCH(arm)
56ENTRY(stext)
57
58#ifndef __ARMEB__
59jiffies = jiffies_64;
60#else
61jiffies = jiffies_64 + 4;
62#endif
63
64SECTIONS
65{
66	/*
67	 * XXX: The linker does not define how output sections are
68	 * assigned to input sections when there are multiple statements
69	 * matching the same input section name.  There is no documented
70	 * order of matching.
71	 *
72	 * unwind exit sections must be discarded before the rest of the
73	 * unwind sections get included.
74	 */
75	/DISCARD/ : {
76		*(.ARM.exidx.exit.text)
77		*(.ARM.extab.exit.text)
78		ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
79		ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
80		ARM_EXIT_DISCARD(EXIT_TEXT)
81		ARM_EXIT_DISCARD(EXIT_DATA)
82		EXIT_CALL
83#ifndef CONFIG_MMU
84		*(.text.fixup)
85		*(__ex_table)
86#endif
87#ifndef CONFIG_SMP_ON_UP
88		*(.alt.smp.init)
89#endif
90		*(.discard)
91		*(.discard.*)
92	}
93
94	. = PAGE_OFFSET + TEXT_OFFSET;
95	.head.text : {
96		_text = .;
97		HEAD_TEXT
98	}
99
100#ifdef CONFIG_STRICT_KERNEL_RWX
101	. = ALIGN(1<<SECTION_SHIFT);
102#endif
103
104	.text : {			/* Real text segment		*/
105		_stext = .;		/* Text and read-only data	*/
106			IDMAP_TEXT
107			__exception_text_start = .;
108			*(.exception.text)
109			__exception_text_end = .;
110			IRQENTRY_TEXT
111			SOFTIRQENTRY_TEXT
112			TEXT_TEXT
113			SCHED_TEXT
114			CPUIDLE_TEXT
115			LOCK_TEXT
116			HYPERVISOR_TEXT
117			KPROBES_TEXT
118			*(.gnu.warning)
119			*(.glue_7)
120			*(.glue_7t)
121		. = ALIGN(4);
122		*(.got)			/* Global offset table		*/
123			ARM_CPU_KEEP(PROC_INFO)
124	}
125
126#ifdef CONFIG_DEBUG_ALIGN_RODATA
127	. = ALIGN(1<<SECTION_SHIFT);
128#endif
129	_etext = .;			/* End of text section */
130
131	RO_DATA(PAGE_SIZE)
132
133	. = ALIGN(4);
134	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
135		__start___ex_table = .;
136#ifdef CONFIG_MMU
137		*(__ex_table)
138#endif
139		__stop___ex_table = .;
140	}
141
142#ifdef CONFIG_ARM_UNWIND
143	/*
144	 * Stack unwinding tables
145	 */
146	. = ALIGN(8);
147	.ARM.unwind_idx : {
148		__start_unwind_idx = .;
149		*(.ARM.exidx*)
150		__stop_unwind_idx = .;
151	}
152	.ARM.unwind_tab : {
153		__start_unwind_tab = .;
154		*(.ARM.extab*)
155		__stop_unwind_tab = .;
156	}
157#endif
158
159	NOTES
160
161#ifdef CONFIG_STRICT_KERNEL_RWX
162	. = ALIGN(1<<SECTION_SHIFT);
163#else
164	. = ALIGN(PAGE_SIZE);
165#endif
166	__init_begin = .;
167
168	/*
169	 * The vectors and stubs are relocatable code, and the
170	 * only thing that matters is their relative offsets
171	 */
172	__vectors_start = .;
173	.vectors 0xffff0000 : AT(__vectors_start) {
174		*(.vectors)
175	}
176	. = __vectors_start + SIZEOF(.vectors);
177	__vectors_end = .;
178
179	__stubs_start = .;
180	.stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
181		*(.stubs)
182	}
183	. = __stubs_start + SIZEOF(.stubs);
184	__stubs_end = .;
185
186	PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
187
188	INIT_TEXT_SECTION(8)
189	.exit.text : {
190		ARM_EXIT_KEEP(EXIT_TEXT)
191	}
192	.init.proc.info : {
193		ARM_CPU_DISCARD(PROC_INFO)
194	}
195	.init.arch.info : {
196		__arch_info_begin = .;
197		*(.arch.info.init)
198		__arch_info_end = .;
199	}
200	.init.tagtable : {
201		__tagtable_begin = .;
202		*(.taglist.init)
203		__tagtable_end = .;
204	}
205#ifdef CONFIG_SMP_ON_UP
206	.init.smpalt : {
207		__smpalt_begin = .;
208		*(.alt.smp.init)
209		__smpalt_end = .;
210	}
211#endif
212	.init.pv_table : {
213		__pv_table_begin = .;
214		*(.pv_table)
215		__pv_table_end = .;
216	}
217	.init.data : {
218		INIT_DATA
219		INIT_SETUP(16)
220		INIT_CALLS
221		CON_INITCALL
222		SECURITY_INITCALL
223		INIT_RAM_FS
224	}
225	.exit.data : {
226		ARM_EXIT_KEEP(EXIT_DATA)
227	}
228
229#ifdef CONFIG_SMP
230	PERCPU_SECTION(L1_CACHE_BYTES)
231#endif
232
233#ifdef CONFIG_STRICT_KERNEL_RWX
234	. = ALIGN(1<<SECTION_SHIFT);
235#else
236	. = ALIGN(THREAD_SIZE);
237#endif
238	__init_end = .;
239	__data_loc = .;
240
241	.data : AT(__data_loc) {
242		_data = .;		/* address in memory */
243		_sdata = .;
244
245		/*
246		 * first, the init task union, aligned
247		 * to an 8192 byte boundary.
248		 */
249		INIT_TASK_DATA(THREAD_SIZE)
250
251		NOSAVE_DATA
252		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
253		READ_MOSTLY_DATA(L1_CACHE_BYTES)
254
255		/*
256		 * and the usual data section
257		 */
258		DATA_DATA
259		CONSTRUCTORS
260
261		_edata = .;
262	}
263	_edata_loc = __data_loc + SIZEOF(.data);
264
265	BUG_TABLE
266
267#ifdef CONFIG_HAVE_TCM
268        /*
269	 * We align everything to a page boundary so we can
270	 * free it after init has commenced and TCM contents have
271	 * been copied to its destination.
272	 */
273	.tcm_start : {
274		. = ALIGN(PAGE_SIZE);
275		__tcm_start = .;
276		__itcm_start = .;
277	}
278
279	/*
280	 * Link these to the ITCM RAM
281	 * Put VMA to the TCM address and LMA to the common RAM
282	 * and we'll upload the contents from RAM to TCM and free
283	 * the used RAM after that.
284	 */
285	.text_itcm ITCM_OFFSET : AT(__itcm_start)
286	{
287		__sitcm_text = .;
288		*(.tcm.text)
289		*(.tcm.rodata)
290		. = ALIGN(4);
291		__eitcm_text = .;
292	}
293
294	/*
295	 * Reset the dot pointer, this is needed to create the
296	 * relative __dtcm_start below (to be used as extern in code).
297	 */
298	. = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
299
300	.dtcm_start : {
301		__dtcm_start = .;
302	}
303
304	/* TODO: add remainder of ITCM as well, that can be used for data! */
305	.data_dtcm DTCM_OFFSET : AT(__dtcm_start)
306	{
307		. = ALIGN(4);
308		__sdtcm_data = .;
309		*(.tcm.data)
310		. = ALIGN(4);
311		__edtcm_data = .;
312	}
313
314	/* Reset the dot pointer or the linker gets confused */
315	. = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
316
317	/* End marker for freeing TCM copy in linked object */
318	.tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
319		. = ALIGN(PAGE_SIZE);
320		__tcm_end = .;
321	}
322#endif
323
324	BSS_SECTION(0, 0, 0)
325	_end = .;
326
327	STABS_DEBUG
328}
329
330#ifdef CONFIG_STRICT_KERNEL_RWX
331/*
332 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
333 * be the first section-aligned location after __start_rodata. Otherwise,
334 * it will be equal to __start_rodata.
335 */
336__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
337#endif
338
339/*
340 * These must never be empty
341 * If you have to comment these two assert statements out, your
342 * binutils is too old (for other reasons as well)
343 */
344ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
345ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
346
347/*
348 * The HYP init code can't be more than a page long,
349 * and should not cross a page boundary.
350 * The above comment applies as well.
351 */
352ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
353	"HYP init code too big or misaligned")
354
355#endif /* CONFIG_XIP_KERNEL */
356