xref: /openbmc/linux/arch/ia64/kernel/vmlinux.lds.S (revision 65417d9f)
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#include <asm/cache.h>
4#include <asm/ptrace.h>
5#include <asm/pgtable.h>
6
7#include <asm-generic/vmlinux.lds.h>
8
9OUTPUT_FORMAT("elf64-ia64-little")
10OUTPUT_ARCH(ia64)
11ENTRY(phys_start)
12jiffies = jiffies_64;
13
14PHDRS {
15	code   PT_LOAD;
16	percpu PT_LOAD;
17	data   PT_LOAD;
18	note   PT_NOTE;
19	unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
20}
21
22SECTIONS {
23	/*
24	 * unwind exit sections must be discarded before
25	 * the rest of the sections get included.
26	 */
27	/DISCARD/ : {
28		*(.IA_64.unwind.exit.text)
29		*(.IA_64.unwind_info.exit.text)
30		*(.comment)
31		*(.note)
32	}
33
34	v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
35	phys_start = _start - LOAD_OFFSET;
36
37	code : {
38	} :code
39	. = KERNEL_START;
40
41	_text = .;
42	_stext = .;
43
44	.text : AT(ADDR(.text) - LOAD_OFFSET) {
45		__start_ivt_text = .;
46		*(.text..ivt)
47		__end_ivt_text = .;
48		TEXT_TEXT
49		SCHED_TEXT
50		CPUIDLE_TEXT
51		LOCK_TEXT
52		KPROBES_TEXT
53		*(.gnu.linkonce.t*)
54	}
55
56	.text2 : AT(ADDR(.text2) - LOAD_OFFSET)	{
57		*(.text2)
58	}
59
60#ifdef CONFIG_SMP
61	.text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
62		*(.text..lock)
63	}
64#endif
65	_etext = .;
66
67	/*
68	 * Read-only data
69	 */
70	NOTES :code :note       /* put .notes in text and mark in PT_NOTE  */
71	code_continues : {
72	} : code               /* switch back to regular program...  */
73
74	EXCEPTION_TABLE(16)
75
76	/* MCA table */
77	. = ALIGN(16);
78	__mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
79		__start___mca_table = .;
80		*(__mca_table)
81		__stop___mca_table = .;
82	}
83
84	.data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
85		__start___phys_stack_reg_patchlist = .;
86		*(.data..patch.phys_stack_reg)
87		__end___phys_stack_reg_patchlist = .;
88	}
89
90	/*
91	 * Global data
92	 */
93	_data = .;
94
95	/* Unwind info & table: */
96	. = ALIGN(8);
97	.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
98		*(.IA_64.unwind_info*)
99	}
100	.IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
101		__start_unwind = .;
102		*(.IA_64.unwind*)
103		__end_unwind = .;
104	} :code :unwind
105	code_continues2 : {
106	} : code
107
108	RODATA
109
110	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
111		*(.opd)
112	}
113
114	/*
115	 * Initialization code and data:
116	 */
117	. = ALIGN(PAGE_SIZE);
118	__init_begin = .;
119
120	INIT_TEXT_SECTION(PAGE_SIZE)
121	INIT_DATA_SECTION(16)
122
123	.data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
124		__start___vtop_patchlist = .;
125		*(.data..patch.vtop)
126		__end___vtop_patchlist = .;
127	}
128
129	.data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
130		__start___rse_patchlist = .;
131		*(.data..patch.rse)
132		__end___rse_patchlist = .;
133	}
134
135	.data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
136		__start___mckinley_e9_bundles = .;
137		*(.data..patch.mckinley_e9)
138		__end___mckinley_e9_bundles = .;
139	}
140
141#if defined(CONFIG_IA64_GENERIC)
142	/* Machine Vector */
143	. = ALIGN(16);
144	.machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
145		machvec_start = .;
146		*(.machvec)
147		machvec_end = .;
148	}
149#endif
150
151#ifdef	CONFIG_SMP
152	. = ALIGN(PERCPU_PAGE_SIZE);
153	__cpu0_per_cpu = .;
154	. = . + PERCPU_PAGE_SIZE;   /* cpu0 per-cpu space */
155#endif
156
157	. = ALIGN(PAGE_SIZE);
158	__init_end = .;
159
160	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
161		PAGE_ALIGNED_DATA(PAGE_SIZE)
162		. = ALIGN(PAGE_SIZE);
163		__start_gate_section = .;
164		*(.data..gate)
165		__stop_gate_section = .;
166	}
167	/*
168	 * make sure the gate page doesn't expose
169	 * kernel data
170	 */
171	. = ALIGN(PAGE_SIZE);
172
173	/* Per-cpu data: */
174	. = ALIGN(PERCPU_PAGE_SIZE);
175	PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
176	__phys_per_cpu_start = __per_cpu_load;
177	/*
178	 * ensure percpu data fits
179	 * into percpu page size
180	 */
181	. = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
182
183	data : {
184	} :data
185	.data : AT(ADDR(.data) - LOAD_OFFSET) {
186		_sdata  =  .;
187		INIT_TASK_DATA(PAGE_SIZE)
188		CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
189		READ_MOSTLY_DATA(SMP_CACHE_BYTES)
190		DATA_DATA
191		*(.data1)
192		*(.gnu.linkonce.d*)
193		CONSTRUCTORS
194	}
195
196	BUG_TABLE
197
198	. = ALIGN(16);	/* gp must be 16-byte aligned for exc. table */
199	.got : AT(ADDR(.got) - LOAD_OFFSET) {
200		*(.got.plt)
201		*(.got)
202	}
203	__gp = ADDR(.got) + 0x200000;
204
205	/*
206	 * We want the small data sections together,
207	 * so single-instruction offsets can access
208	 * them all, and initialized data all before
209	 * uninitialized, so we can shorten the
210	 * on-disk segment size.
211	 */
212	.sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
213		*(.sdata)
214		*(.sdata1)
215		*(.srdata)
216	}
217	_edata  =  .;
218
219	BSS_SECTION(0, 0, 0)
220
221	_end = .;
222
223	code : {
224	} :code
225
226	STABS_DEBUG
227	DWARF_DEBUG
228
229	/* Default discards */
230	DISCARDS
231}
232