xref: /openbmc/linux/arch/arm64/kernel/vmlinux.lds.S (revision dfc53baa)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * ld script to make ARM Linux kernel
4 * taken from the i386 version by Russell King
5 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 */
7
8#define RO_EXCEPTION_TABLE_ALIGN	8
9
10#include <asm-generic/vmlinux.lds.h>
11#include <asm/cache.h>
12#include <asm/kernel-pgtable.h>
13#include <asm/memory.h>
14#include <asm/page.h>
15
16#include "image.h"
17
18OUTPUT_ARCH(aarch64)
19ENTRY(_text)
20
21jiffies = jiffies_64;
22
23
24#define HYPERVISOR_EXTABLE					\
25	. = ALIGN(SZ_8);					\
26	__start___kvm_ex_table = .;				\
27	*(__kvm_ex_table)					\
28	__stop___kvm_ex_table = .;
29
30#define HYPERVISOR_TEXT					\
31	/*						\
32	 * Align to 4 KB so that			\
33	 * a) the HYP vector table is at its minimum	\
34	 *    alignment of 2048 bytes			\
35	 * b) the HYP init code will not cross a page	\
36	 *    boundary if its size does not exceed	\
37	 *    4 KB (see related ASSERT() below)		\
38	 */						\
39	. = ALIGN(SZ_4K);				\
40	__hyp_idmap_text_start = .;			\
41	*(.hyp.idmap.text)				\
42	__hyp_idmap_text_end = .;			\
43	__hyp_text_start = .;				\
44	*(.hyp.text)					\
45	HYPERVISOR_EXTABLE				\
46	__hyp_text_end = .;
47
48#define IDMAP_TEXT					\
49	. = ALIGN(SZ_4K);				\
50	__idmap_text_start = .;				\
51	*(.idmap.text)					\
52	__idmap_text_end = .;
53
54#ifdef CONFIG_HIBERNATION
55#define HIBERNATE_TEXT					\
56	. = ALIGN(SZ_4K);				\
57	__hibernate_exit_text_start = .;		\
58	*(.hibernate_exit.text)				\
59	__hibernate_exit_text_end = .;
60#else
61#define HIBERNATE_TEXT
62#endif
63
64#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
65#define TRAMP_TEXT					\
66	. = ALIGN(PAGE_SIZE);				\
67	__entry_tramp_text_start = .;			\
68	*(.entry.tramp.text)				\
69	. = ALIGN(PAGE_SIZE);				\
70	__entry_tramp_text_end = .;
71#else
72#define TRAMP_TEXT
73#endif
74
75/*
76 * The size of the PE/COFF section that covers the kernel image, which
77 * runs from _stext to _edata, must be a round multiple of the PE/COFF
78 * FileAlignment, which we set to its minimum value of 0x200. '_stext'
79 * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
80 * boundary should be sufficient.
81 */
82PECOFF_FILE_ALIGNMENT = 0x200;
83
84#ifdef CONFIG_EFI
85#define PECOFF_EDATA_PADDING	\
86	.pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
87#else
88#define PECOFF_EDATA_PADDING
89#endif
90
91SECTIONS
92{
93	/*
94	 * XXX: The linker does not define how output sections are
95	 * assigned to input sections when there are multiple statements
96	 * matching the same input section name.  There is no documented
97	 * order of matching.
98	 */
99	/DISCARD/ : {
100		EXIT_CALL
101		*(.discard)
102		*(.discard.*)
103		*(.interp .dynamic)
104		*(.dynsym .dynstr .hash .gnu.hash)
105		*(.eh_frame)
106	}
107
108	. = KIMAGE_VADDR + TEXT_OFFSET;
109
110	.head.text : {
111		_text = .;
112		HEAD_TEXT
113	}
114	.text : {			/* Real text segment		*/
115		_stext = .;		/* Text and read-only data	*/
116			IRQENTRY_TEXT
117			SOFTIRQENTRY_TEXT
118			ENTRY_TEXT
119			TEXT_TEXT
120			SCHED_TEXT
121			CPUIDLE_TEXT
122			LOCK_TEXT
123			KPROBES_TEXT
124			HYPERVISOR_TEXT
125			IDMAP_TEXT
126			HIBERNATE_TEXT
127			TRAMP_TEXT
128			*(.fixup)
129			*(.gnu.warning)
130		. = ALIGN(16);
131		*(.got)			/* Global offset table		*/
132	}
133
134	. = ALIGN(SEGMENT_ALIGN);
135	_etext = .;			/* End of text section */
136
137	/* everything from this point to __init_begin will be marked RO NX */
138	RO_DATA(PAGE_SIZE)
139
140	idmap_pg_dir = .;
141	. += IDMAP_DIR_SIZE;
142	idmap_pg_end = .;
143
144#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
145	tramp_pg_dir = .;
146	. += PAGE_SIZE;
147#endif
148
149#ifdef CONFIG_ARM64_SW_TTBR0_PAN
150	reserved_ttbr0 = .;
151	. += RESERVED_TTBR0_SIZE;
152#endif
153	swapper_pg_dir = .;
154	. += PAGE_SIZE;
155	swapper_pg_end = .;
156
157	. = ALIGN(SEGMENT_ALIGN);
158	__init_begin = .;
159	__inittext_begin = .;
160
161	INIT_TEXT_SECTION(8)
162
163	__exittext_begin = .;
164	.exit.text : {
165		EXIT_TEXT
166	}
167	__exittext_end = .;
168
169	. = ALIGN(4);
170	.altinstructions : {
171		__alt_instructions = .;
172		*(.altinstructions)
173		__alt_instructions_end = .;
174	}
175
176	. = ALIGN(SEGMENT_ALIGN);
177	__inittext_end = .;
178	__initdata_begin = .;
179
180	.init.data : {
181		INIT_DATA
182		INIT_SETUP(16)
183		INIT_CALLS
184		CON_INITCALL
185		INIT_RAM_FS
186		*(.init.rodata.* .init.bss)	/* from the EFI stub */
187	}
188	.exit.data : {
189		EXIT_DATA
190	}
191
192	PERCPU_SECTION(L1_CACHE_BYTES)
193
194	.rela.dyn : ALIGN(8) {
195		*(.rela .rela*)
196	}
197
198	__rela_offset	= ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
199	__rela_size	= SIZEOF(.rela.dyn);
200
201#ifdef CONFIG_RELR
202	.relr.dyn : ALIGN(8) {
203		*(.relr.dyn)
204	}
205
206	__relr_offset	= ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
207	__relr_size	= SIZEOF(.relr.dyn);
208#endif
209
210	. = ALIGN(SEGMENT_ALIGN);
211	__initdata_end = .;
212	__init_end = .;
213
214	_data = .;
215	_sdata = .;
216	RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
217
218	/*
219	 * Data written with the MMU off but read with the MMU on requires
220	 * cache lines to be invalidated, discarding up to a Cache Writeback
221	 * Granule (CWG) of data from the cache. Keep the section that
222	 * requires this type of maintenance to be in its own Cache Writeback
223	 * Granule (CWG) area so the cache maintenance operations don't
224	 * interfere with adjacent data.
225	 */
226	.mmuoff.data.write : ALIGN(SZ_2K) {
227		__mmuoff_data_start = .;
228		*(.mmuoff.data.write)
229	}
230	. = ALIGN(SZ_2K);
231	.mmuoff.data.read : {
232		*(.mmuoff.data.read)
233		__mmuoff_data_end = .;
234	}
235
236	PECOFF_EDATA_PADDING
237	__pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
238	_edata = .;
239
240	BSS_SECTION(0, 0, 0)
241
242	. = ALIGN(PAGE_SIZE);
243	init_pg_dir = .;
244	. += INIT_DIR_SIZE;
245	init_pg_end = .;
246
247	. = ALIGN(SEGMENT_ALIGN);
248	__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
249	_end = .;
250
251	STABS_DEBUG
252
253	HEAD_SYMBOLS
254}
255
256#include "image-vars.h"
257
258/*
259 * The HYP init code and ID map text can't be longer than a page each,
260 * and should not cross a page boundary.
261 */
262ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
263	"HYP init code too big or misaligned")
264ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
265	"ID map text too big or misaligned")
266#ifdef CONFIG_HIBERNATION
267ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
268	<= SZ_4K, "Hibernate exit text too big or misaligned")
269#endif
270#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
271ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
272	"Entry trampoline text too big")
273#endif
274/*
275 * If padding is applied before .head.text, virt<->phys conversions will fail.
276 */
277ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
278