xref: /openbmc/linux/arch/x86/kernel/vmlinux.lds.S (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1/*
2 * ld script for the x86 kernel
3 *
4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 *
6 * Modernisation, unification and other changes and fixes:
7 *   Copyright (C) 2007-2009  Sam Ravnborg <sam@ravnborg.org>
8 *
9 *
10 * Don't define absolute symbols until and unless you know that symbol
11 * value is should remain constant even if kernel image is relocated
12 * at run time. Absolute symbols are not relocated. If symbol value should
13 * change if kernel is relocated, make the symbol section relative and
14 * put it inside the section definition.
15 */
16
17#ifdef CONFIG_X86_32
18#define LOAD_OFFSET __PAGE_OFFSET
19#else
20#define LOAD_OFFSET __START_KERNEL_map
21#endif
22
23#include <asm-generic/vmlinux.lds.h>
24#include <asm/asm-offsets.h>
25#include <asm/thread_info.h>
26#include <asm/page_types.h>
27#include <asm/cache.h>
28#include <asm/boot.h>
29
30#undef i386     /* in case the preprocessor is a 32bit one */
31
32OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
33
34#ifdef CONFIG_X86_32
35OUTPUT_ARCH(i386)
36ENTRY(phys_startup_32)
37jiffies = jiffies_64;
38#else
39OUTPUT_ARCH(i386:x86-64)
40ENTRY(phys_startup_64)
41jiffies_64 = jiffies;
42#endif
43
44#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
45/*
46 * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA
47 * we retain large page mappings for boundaries spanning kernel text, rodata
48 * and data sections.
49 *
50 * However, kernel identity mappings will have different RWX permissions
51 * to the pages mapping to text and to the pages padding (which are freed) the
52 * text section. Hence kernel identity mappings will be broken to smaller
53 * pages. For 64-bit, kernel text and kernel identity mappings are different,
54 * so we can enable protection checks that come with CONFIG_DEBUG_RODATA,
55 * as well as retain 2MB large page mappings for kernel text.
56 */
57#define X64_ALIGN_DEBUG_RODATA_BEGIN	. = ALIGN(HPAGE_SIZE);
58
59#define X64_ALIGN_DEBUG_RODATA_END				\
60		. = ALIGN(HPAGE_SIZE);				\
61		__end_rodata_hpage_align = .;
62
63#else
64
65#define X64_ALIGN_DEBUG_RODATA_BEGIN
66#define X64_ALIGN_DEBUG_RODATA_END
67
68#endif
69
70PHDRS {
71	text PT_LOAD FLAGS(5);          /* R_E */
72	data PT_LOAD FLAGS(6);          /* RW_ */
73#ifdef CONFIG_X86_64
74	user PT_LOAD FLAGS(5);          /* R_E */
75#ifdef CONFIG_SMP
76	percpu PT_LOAD FLAGS(6);        /* RW_ */
77#endif
78	init PT_LOAD FLAGS(7);          /* RWE */
79#endif
80	note PT_NOTE FLAGS(0);          /* ___ */
81}
82
83SECTIONS
84{
85#ifdef CONFIG_X86_32
86        . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
87        phys_startup_32 = startup_32 - LOAD_OFFSET;
88#else
89        . = __START_KERNEL;
90        phys_startup_64 = startup_64 - LOAD_OFFSET;
91#endif
92
93	/* Text and read-only data */
94	.text :  AT(ADDR(.text) - LOAD_OFFSET) {
95		_text = .;
96		/* bootstrapping code */
97		HEAD_TEXT
98#ifdef CONFIG_X86_32
99		. = ALIGN(PAGE_SIZE);
100		*(.text..page_aligned)
101#endif
102		. = ALIGN(8);
103		_stext = .;
104		TEXT_TEXT
105		SCHED_TEXT
106		LOCK_TEXT
107		KPROBES_TEXT
108		IRQENTRY_TEXT
109		*(.fixup)
110		*(.gnu.warning)
111		/* End of text section */
112		_etext = .;
113	} :text = 0x9090
114
115	NOTES :text :note
116
117	EXCEPTION_TABLE(16) :text = 0x9090
118
119#if defined(CONFIG_DEBUG_RODATA)
120	/* .text should occupy whole number of pages */
121	. = ALIGN(PAGE_SIZE);
122#endif
123	X64_ALIGN_DEBUG_RODATA_BEGIN
124	RO_DATA(PAGE_SIZE)
125	X64_ALIGN_DEBUG_RODATA_END
126
127	/* Data */
128	.data : AT(ADDR(.data) - LOAD_OFFSET) {
129		/* Start of data section */
130		_sdata = .;
131
132		/* init_task */
133		INIT_TASK_DATA(THREAD_SIZE)
134
135#ifdef CONFIG_X86_32
136		/* 32 bit has nosave before _edata */
137		NOSAVE_DATA
138#endif
139
140		PAGE_ALIGNED_DATA(PAGE_SIZE)
141
142		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
143
144		DATA_DATA
145		CONSTRUCTORS
146
147		/* rarely changed data like cpu maps */
148		READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
149
150		/* End of data section */
151		_edata = .;
152	} :data
153
154#ifdef CONFIG_X86_64
155
156#define VSYSCALL_ADDR (-10*1024*1024)
157
158#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
159#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
160
161#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
162#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
163
164	. = ALIGN(4096);
165	__vsyscall_0 = .;
166
167	. = VSYSCALL_ADDR;
168	.vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
169		*(.vsyscall_0)
170	} :user
171
172	. = ALIGN(L1_CACHE_BYTES);
173	.vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
174		*(.vsyscall_fn)
175	}
176
177	. = ALIGN(L1_CACHE_BYTES);
178	.vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
179		*(.vsyscall_gtod_data)
180	}
181
182	vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
183	.vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
184		*(.vsyscall_clock)
185	}
186	vsyscall_clock = VVIRT(.vsyscall_clock);
187
188
189	.vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
190		*(.vsyscall_1)
191	}
192	.vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
193		*(.vsyscall_2)
194	}
195
196	.vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
197		*(.vgetcpu_mode)
198	}
199	vgetcpu_mode = VVIRT(.vgetcpu_mode);
200
201	. = ALIGN(L1_CACHE_BYTES);
202	.jiffies : AT(VLOAD(.jiffies)) {
203		*(.jiffies)
204	}
205	jiffies = VVIRT(.jiffies);
206
207	.vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
208		*(.vsyscall_3)
209	}
210
211	. = __vsyscall_0 + PAGE_SIZE;
212
213#undef VSYSCALL_ADDR
214#undef VLOAD_OFFSET
215#undef VLOAD
216#undef VVIRT_OFFSET
217#undef VVIRT
218
219#endif /* CONFIG_X86_64 */
220
221	/* Init code and data - will be freed after init */
222	. = ALIGN(PAGE_SIZE);
223	.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
224		__init_begin = .; /* paired with __init_end */
225	}
226
227#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
228	/*
229	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
230	 * output PHDR, so the next output section - .init.text - should
231	 * start another segment - init.
232	 */
233	PERCPU_VADDR(0, :percpu)
234#endif
235
236	INIT_TEXT_SECTION(PAGE_SIZE)
237#ifdef CONFIG_X86_64
238	:init
239#endif
240
241	INIT_DATA_SECTION(16)
242
243	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
244		__x86_cpu_dev_start = .;
245		*(.x86_cpu_dev.init)
246		__x86_cpu_dev_end = .;
247	}
248
249	/*
250	 * start address and size of operations which during runtime
251	 * can be patched with virtualization friendly instructions or
252	 * baremetal native ones. Think page table operations.
253	 * Details in paravirt_types.h
254	 */
255	. = ALIGN(8);
256	.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
257		__parainstructions = .;
258		*(.parainstructions)
259		__parainstructions_end = .;
260	}
261
262	/*
263	 * struct alt_inst entries. From the header (alternative.h):
264	 * "Alternative instructions for different CPU types or capabilities"
265	 * Think locking instructions on spinlocks.
266	 */
267	. = ALIGN(8);
268	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
269		__alt_instructions = .;
270		*(.altinstructions)
271		__alt_instructions_end = .;
272	}
273
274	/*
275	 * And here are the replacement instructions. The linker sticks
276	 * them as binary blobs. The .altinstructions has enough data to
277	 * get the address and the length of them to patch the kernel safely.
278	 */
279	.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
280		*(.altinstr_replacement)
281	}
282
283	/*
284	 * struct iommu_table_entry entries are injected in this section.
285	 * It is an array of IOMMUs which during run time gets sorted depending
286	 * on its dependency order. After rootfs_initcall is complete
287	 * this section can be safely removed.
288	 */
289	.iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
290		__iommu_table = .;
291		*(.iommu_table)
292		__iommu_table_end = .;
293	}
294	. = ALIGN(8);
295	/*
296	 * .exit.text is discard at runtime, not link time, to deal with
297	 *  references from .altinstructions and .eh_frame
298	 */
299	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
300		EXIT_TEXT
301	}
302
303	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
304		EXIT_DATA
305	}
306
307#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
308	PERCPU(THREAD_SIZE)
309#endif
310
311	. = ALIGN(PAGE_SIZE);
312
313	/* freed after init ends here */
314	.init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
315		__init_end = .;
316	}
317
318	/*
319	 * smp_locks might be freed after init
320	 * start/end must be page aligned
321	 */
322	. = ALIGN(PAGE_SIZE);
323	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
324		__smp_locks = .;
325		*(.smp_locks)
326		. = ALIGN(PAGE_SIZE);
327		__smp_locks_end = .;
328	}
329
330#ifdef CONFIG_X86_64
331	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
332		NOSAVE_DATA
333	}
334#endif
335
336	/* BSS */
337	. = ALIGN(PAGE_SIZE);
338	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
339		__bss_start = .;
340		*(.bss..page_aligned)
341		*(.bss)
342		. = ALIGN(PAGE_SIZE);
343		__bss_stop = .;
344	}
345
346	. = ALIGN(PAGE_SIZE);
347	.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
348		__brk_base = .;
349		. += 64 * 1024;		/* 64k alignment slop space */
350		*(.brk_reservation)	/* areas brk users have reserved */
351		__brk_limit = .;
352	}
353
354	_end = .;
355
356        STABS_DEBUG
357        DWARF_DEBUG
358
359	/* Sections to be discarded */
360	DISCARDS
361	/DISCARD/ : { *(.eh_frame) }
362}
363
364
365#ifdef CONFIG_X86_32
366/*
367 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
368 */
369. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
370	   "kernel image bigger than KERNEL_IMAGE_SIZE");
371#else
372/*
373 * Per-cpu symbols which need to be offset from __per_cpu_load
374 * for the boot processor.
375 */
376#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
377INIT_PER_CPU(gdt_page);
378INIT_PER_CPU(irq_stack_union);
379
380/*
381 * Build-time check on the image size:
382 */
383. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
384	   "kernel image bigger than KERNEL_IMAGE_SIZE");
385
386#ifdef CONFIG_SMP
387. = ASSERT((irq_stack_union == 0),
388           "irq_stack_union is not at start of per-cpu area");
389#endif
390
391#endif /* CONFIG_X86_32 */
392
393#ifdef CONFIG_KEXEC
394#include <asm/kexec.h>
395
396. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
397           "kexec control code size is too big");
398#endif
399
400