1/* SPDX-License-Identifier: GPL-2.0 */
2/* ld script to make ARM Linux kernel
3 * taken from the i386 version by Russell King
4 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 */
6
7/* No __ro_after_init data in the .rodata section - which will always be ro */
8#define RO_AFTER_INIT_DATA
9
10#include <linux/sizes.h>
11
12#include <asm-generic/vmlinux.lds.h>
13#include <asm/cache.h>
14#include <asm/thread_info.h>
15#include <asm/memory.h>
16#include <asm/page.h>
17
18#define PROC_INFO							\
19	. = ALIGN(4);							\
20	VMLINUX_SYMBOL(__proc_info_begin) = .;				\
21	*(.proc.info.init)						\
22	VMLINUX_SYMBOL(__proc_info_end) = .;
23
24#define IDMAP_TEXT							\
25	ALIGN_FUNCTION();						\
26	VMLINUX_SYMBOL(__idmap_text_start) = .;				\
27	*(.idmap.text)							\
28	VMLINUX_SYMBOL(__idmap_text_end) = .;				\
29	. = ALIGN(PAGE_SIZE);						\
30	VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;			\
31	*(.hyp.idmap.text)						\
32	VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
33
34#ifdef CONFIG_HOTPLUG_CPU
35#define ARM_CPU_DISCARD(x)
36#define ARM_CPU_KEEP(x)		x
37#else
38#define ARM_CPU_DISCARD(x)	x
39#define ARM_CPU_KEEP(x)
40#endif
41
42#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
43	defined(CONFIG_GENERIC_BUG)
44#define ARM_EXIT_KEEP(x)	x
45#define ARM_EXIT_DISCARD(x)
46#else
47#define ARM_EXIT_KEEP(x)
48#define ARM_EXIT_DISCARD(x)	x
49#endif
50
51OUTPUT_ARCH(arm)
52ENTRY(stext)
53
54#ifndef __ARMEB__
55jiffies = jiffies_64;
56#else
57jiffies = jiffies_64 + 4;
58#endif
59
60SECTIONS
61{
62	/*
63	 * XXX: The linker does not define how output sections are
64	 * assigned to input sections when there are multiple statements
65	 * matching the same input section name.  There is no documented
66	 * order of matching.
67	 *
68	 * unwind exit sections must be discarded before the rest of the
69	 * unwind sections get included.
70	 */
71	/DISCARD/ : {
72		*(.ARM.exidx.exit.text)
73		*(.ARM.extab.exit.text)
74		ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
75		ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
76		ARM_EXIT_DISCARD(EXIT_TEXT)
77		ARM_EXIT_DISCARD(EXIT_DATA)
78		EXIT_CALL
79#ifndef CONFIG_MMU
80		*(.text.fixup)
81		*(__ex_table)
82#endif
83		*(.alt.smp.init)
84		*(.discard)
85		*(.discard.*)
86	}
87
88	. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
89	_xiprom = .;			/* XIP ROM area to be mapped */
90
91	.head.text : {
92		_text = .;
93		HEAD_TEXT
94	}
95
96	.text : {			/* Real text segment		*/
97		_stext = .;		/* Text and read-only data	*/
98			IDMAP_TEXT
99			__exception_text_start = .;
100			*(.exception.text)
101			__exception_text_end = .;
102			IRQENTRY_TEXT
103			TEXT_TEXT
104			SCHED_TEXT
105			CPUIDLE_TEXT
106			LOCK_TEXT
107			KPROBES_TEXT
108			*(.gnu.warning)
109			*(.glue_7)
110			*(.glue_7t)
111		. = ALIGN(4);
112		*(.got)			/* Global offset table		*/
113			ARM_CPU_KEEP(PROC_INFO)
114	}
115
116	RO_DATA(PAGE_SIZE)
117
118	. = ALIGN(4);
119	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
120		__start___ex_table = .;
121#ifdef CONFIG_MMU
122		*(__ex_table)
123#endif
124		__stop___ex_table = .;
125	}
126
127#ifdef CONFIG_ARM_UNWIND
128	/*
129	 * Stack unwinding tables
130	 */
131	. = ALIGN(8);
132	.ARM.unwind_idx : {
133		__start_unwind_idx = .;
134		*(.ARM.exidx*)
135		__stop_unwind_idx = .;
136	}
137	.ARM.unwind_tab : {
138		__start_unwind_tab = .;
139		*(.ARM.extab*)
140		__stop_unwind_tab = .;
141	}
142#endif
143
144	NOTES
145
146	_etext = .;			/* End of text and rodata section */
147
148	/*
149	 * The vectors and stubs are relocatable code, and the
150	 * only thing that matters is their relative offsets
151	 */
152	__vectors_start = .;
153	.vectors 0xffff0000 : AT(__vectors_start) {
154		*(.vectors)
155	}
156	. = __vectors_start + SIZEOF(.vectors);
157	__vectors_end = .;
158
159	__stubs_start = .;
160	.stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
161		*(.stubs)
162	}
163	. = __stubs_start + SIZEOF(.stubs);
164	__stubs_end = .;
165
166	PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
167
168	INIT_TEXT_SECTION(8)
169	.exit.text : {
170		ARM_EXIT_KEEP(EXIT_TEXT)
171	}
172	.init.proc.info : {
173		ARM_CPU_DISCARD(PROC_INFO)
174	}
175	.init.arch.info : {
176		__arch_info_begin = .;
177		*(.arch.info.init)
178		__arch_info_end = .;
179	}
180	.init.tagtable : {
181		__tagtable_begin = .;
182		*(.taglist.init)
183		__tagtable_end = .;
184	}
185	.init.rodata : {
186		INIT_SETUP(16)
187		INIT_CALLS
188		CON_INITCALL
189		SECURITY_INITCALL
190		INIT_RAM_FS
191	}
192
193#ifdef CONFIG_ARM_MPU
194	. = ALIGN(SZ_128K);
195#endif
196	_exiprom = .;			/* End of XIP ROM area */
197
198/*
199 * From this point, stuff is considered writable and will be copied to RAM
200 */
201	__data_loc = ALIGN(4);		/* location in file */
202	. = PAGE_OFFSET + TEXT_OFFSET;	/* location in memory */
203#undef LOAD_OFFSET
204#define LOAD_OFFSET (PAGE_OFFSET + TEXT_OFFSET - __data_loc)
205
206	. = ALIGN(THREAD_SIZE);
207	_sdata = .;
208	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
209	.data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) {
210		*(.data..ro_after_init)
211	}
212	_edata = .;
213
214	. = ALIGN(PAGE_SIZE);
215	__init_begin = .;
216	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
217		INIT_DATA
218	}
219	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
220		ARM_EXIT_KEEP(EXIT_DATA)
221	}
222#ifdef CONFIG_SMP
223	PERCPU_SECTION(L1_CACHE_BYTES)
224#endif
225
226	/*
227	 * End of copied data. We need a dummy section to get its LMA.
228	 * Also located before final ALIGN() as trailing padding is not stored
229	 * in the resulting binary file and useless to copy.
230	 */
231	.data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { }
232	_edata_loc = LOADADDR(.data.endmark);
233
234	. = ALIGN(PAGE_SIZE);
235	__init_end = .;
236
237#ifdef CONFIG_HAVE_TCM
238        /*
239	 * We align everything to a page boundary so we can
240	 * free it after init has commenced and TCM contents have
241	 * been copied to its destination.
242	 */
243	.tcm_start : {
244		. = ALIGN(PAGE_SIZE);
245		__tcm_start = .;
246		__itcm_start = .;
247	}
248
249	/*
250	 * Link these to the ITCM RAM
251	 * Put VMA to the TCM address and LMA to the common RAM
252	 * and we'll upload the contents from RAM to TCM and free
253	 * the used RAM after that.
254	 */
255	.text_itcm ITCM_OFFSET : AT(__itcm_start)
256	{
257		__sitcm_text = .;
258		*(.tcm.text)
259		*(.tcm.rodata)
260		. = ALIGN(4);
261		__eitcm_text = .;
262	}
263
264	/*
265	 * Reset the dot pointer, this is needed to create the
266	 * relative __dtcm_start below (to be used as extern in code).
267	 */
268	. = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
269
270	.dtcm_start : {
271		__dtcm_start = .;
272	}
273
274	/* TODO: add remainder of ITCM as well, that can be used for data! */
275	.data_dtcm DTCM_OFFSET : AT(__dtcm_start)
276	{
277		. = ALIGN(4);
278		__sdtcm_data = .;
279		*(.tcm.data)
280		. = ALIGN(4);
281		__edtcm_data = .;
282	}
283
284	/* Reset the dot pointer or the linker gets confused */
285	. = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
286
287	/* End marker for freeing TCM copy in linked object */
288	.tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
289		. = ALIGN(PAGE_SIZE);
290		__tcm_end = .;
291	}
292#endif
293
294	BSS_SECTION(0, 0, 8)
295	_end = .;
296
297	STABS_DEBUG
298}
299
300/*
301 * These must never be empty
302 * If you have to comment these two assert statements out, your
303 * binutils is too old (for other reasons as well)
304 */
305ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
306ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
307
308/*
309 * The HYP init code can't be more than a page long,
310 * and should not cross a page boundary.
311 * The above comment applies as well.
312 */
313ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
314	"HYP init code too big or misaligned")
315
316#ifdef CONFIG_XIP_DEFLATED_DATA
317/*
318 * The .bss is used as a stack area for __inflate_kernel_data() whose stack
319 * frame is 9568 bytes. Make sure it has extra room left.
320 */
321ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA")
322#endif
323
324#ifdef CONFIG_ARM_MPU
325/*
326 * Due to PMSAv7 restriction on base address and size we have to
327 * enforce minimal alignment restrictions. It was seen that weaker
328 * alignment restriction on _xiprom will likely force XIP address
329 * space spawns multiple MPU regions thus it is likely we run in
330 * situation when we are reprogramming MPU region we run on with
331 * something which doesn't cover reprogramming code itself, so as soon
332 * as we update MPU settings we'd immediately try to execute straight
333 * from background region which is XN.
334 * It seem that alignment in 1M should suit most users.
335 * _exiprom is aligned as 1/8 of 1M so can be covered by subregion
336 * disable
337 */
338ASSERT(!(_xiprom & (SZ_1M - 1)), "XIP start address may cause MPU programming issues")
339ASSERT(!(_exiprom & (SZ_128K - 1)), "XIP end address may cause MPU programming issues")
340#endif
341