xref: /openbmc/linux/arch/ia64/kernel/vmlinux.lds.S (revision 3e30a927)
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#include <asm/cache.h>
4#include <asm/ptrace.h>
5#include <asm/pgtable.h>
6#include <asm/thread_info.h>
7
8#define EMITS_PT_NOTE
9#define RO_EXCEPTION_TABLE_ALIGN	16
10
11#include <asm-generic/vmlinux.lds.h>
12
13OUTPUT_FORMAT("elf64-ia64-little")
14OUTPUT_ARCH(ia64)
15ENTRY(phys_start)
16jiffies = jiffies_64;
17
18PHDRS {
19	text   PT_LOAD;
20	percpu PT_LOAD;
21	data   PT_LOAD;
22	note   PT_NOTE;
23	unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
24}
25
26SECTIONS {
27	/*
28	 * unwind exit sections must be discarded before
29	 * the rest of the sections get included.
30	 */
31	/DISCARD/ : {
32		*(.IA_64.unwind.exit.text)
33		*(.IA_64.unwind_info.exit.text)
34		*(.comment)
35		*(.note)
36	}
37
38	v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
39	phys_start = _start - LOAD_OFFSET;
40
41	code : {
42	} :text
43	. = KERNEL_START;
44
45	_text = .;
46	_stext = .;
47
48	.text : AT(ADDR(.text) - LOAD_OFFSET) {
49		__start_ivt_text = .;
50		*(.text..ivt)
51		__end_ivt_text = .;
52		TEXT_TEXT
53		SCHED_TEXT
54		CPUIDLE_TEXT
55		LOCK_TEXT
56		KPROBES_TEXT
57		*(.gnu.linkonce.t*)
58	}
59
60	.text2 : AT(ADDR(.text2) - LOAD_OFFSET)	{
61		*(.text2)
62	}
63
64#ifdef CONFIG_SMP
65	.text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
66		*(.text..lock)
67	}
68#endif
69	_etext = .;
70
71	/*
72	 * Read-only data
73	 */
74
75	/* MCA table */
76	. = ALIGN(16);
77	__mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
78		__start___mca_table = .;
79		*(__mca_table)
80		__stop___mca_table = .;
81	}
82
83	.data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
84		__start___phys_stack_reg_patchlist = .;
85		*(.data..patch.phys_stack_reg)
86		__end___phys_stack_reg_patchlist = .;
87	}
88
89	/*
90	 * Global data
91	 */
92	_data = .;
93
94	/* Unwind info & table: */
95	. = ALIGN(8);
96	.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
97		*(.IA_64.unwind_info*)
98	}
99	.IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
100		__start_unwind = .;
101		*(.IA_64.unwind*)
102		__end_unwind = .;
103	} :text :unwind
104	code_continues2 : {
105	} :text
106
107	RO_DATA(4096)
108
109	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
110		__start_opd = .;
111		*(.opd)
112		__end_opd = .;
113	}
114
115	/*
116	 * Initialization code and data:
117	 */
118	. = ALIGN(PAGE_SIZE);
119	__init_begin = .;
120
121	INIT_TEXT_SECTION(PAGE_SIZE)
122	INIT_DATA_SECTION(16)
123
124	.data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
125		__start___vtop_patchlist = .;
126		*(.data..patch.vtop)
127		__end___vtop_patchlist = .;
128	}
129
130	.data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
131		__start___rse_patchlist = .;
132		*(.data..patch.rse)
133		__end___rse_patchlist = .;
134	}
135
136	.data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
137		__start___mckinley_e9_bundles = .;
138		*(.data..patch.mckinley_e9)
139		__end___mckinley_e9_bundles = .;
140	}
141
142#ifdef	CONFIG_SMP
143	. = ALIGN(PERCPU_PAGE_SIZE);
144	__cpu0_per_cpu = .;
145	. = . + PERCPU_PAGE_SIZE;   /* cpu0 per-cpu space */
146#endif
147
148	. = ALIGN(PAGE_SIZE);
149	__init_end = .;
150
151	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
152		PAGE_ALIGNED_DATA(PAGE_SIZE)
153		. = ALIGN(PAGE_SIZE);
154		__start_gate_section = .;
155		*(.data..gate)
156		__stop_gate_section = .;
157	}
158	/*
159	 * make sure the gate page doesn't expose
160	 * kernel data
161	 */
162	. = ALIGN(PAGE_SIZE);
163
164	/* Per-cpu data: */
165	. = ALIGN(PERCPU_PAGE_SIZE);
166	PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
167	__phys_per_cpu_start = __per_cpu_load;
168	/*
169	 * ensure percpu data fits
170	 * into percpu page size
171	 */
172	. = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
173
174	data : {
175	} :data
176	.data : AT(ADDR(.data) - LOAD_OFFSET) {
177		_sdata  =  .;
178		INIT_TASK_DATA(PAGE_SIZE)
179		CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
180		READ_MOSTLY_DATA(SMP_CACHE_BYTES)
181		DATA_DATA
182		*(.data1)
183		*(.gnu.linkonce.d*)
184		CONSTRUCTORS
185	}
186
187	BUG_TABLE
188
189	. = ALIGN(16);	/* gp must be 16-byte aligned for exc. table */
190	.got : AT(ADDR(.got) - LOAD_OFFSET) {
191		*(.got.plt)
192		*(.got)
193	}
194	__gp = ADDR(.got) + 0x200000;
195
196	/*
197	 * We want the small data sections together,
198	 * so single-instruction offsets can access
199	 * them all, and initialized data all before
200	 * uninitialized, so we can shorten the
201	 * on-disk segment size.
202	 */
203	.sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
204		*(.sdata)
205		*(.sdata1)
206		*(.srdata)
207	}
208	_edata  =  .;
209
210	BSS_SECTION(0, 0, 0)
211
212	_end = .;
213
214	code : {
215	} :text
216
217	STABS_DEBUG
218	DWARF_DEBUG
219
220	/* Default discards */
221	DISCARDS
222}
223