xref: /openbmc/linux/include/asm-generic/vmlinux.lds.h (revision f15cbe6f1a4b4d9df59142fc8e4abb973302cf44)
1 #ifndef LOAD_OFFSET
2 #define LOAD_OFFSET 0
3 #endif
4 
5 #ifndef VMLINUX_SYMBOL
6 #define VMLINUX_SYMBOL(_sym_) _sym_
7 #endif
8 
9 /* Align . to a 8 byte boundary equals to maximum function alignment. */
10 #define ALIGN_FUNCTION()  . = ALIGN(8)
11 
12 /* The actual configuration determine if the init/exit sections
13  * are handled as text/data or they can be discarded (which
14  * often happens at runtime)
15  */
16 #ifdef CONFIG_HOTPLUG
17 #define DEV_KEEP(sec)    *(.dev##sec)
18 #define DEV_DISCARD(sec)
19 #else
20 #define DEV_KEEP(sec)
21 #define DEV_DISCARD(sec) *(.dev##sec)
22 #endif
23 
24 #ifdef CONFIG_HOTPLUG_CPU
25 #define CPU_KEEP(sec)    *(.cpu##sec)
26 #define CPU_DISCARD(sec)
27 #else
28 #define CPU_KEEP(sec)
29 #define CPU_DISCARD(sec) *(.cpu##sec)
30 #endif
31 
32 #if defined(CONFIG_MEMORY_HOTPLUG)
33 #define MEM_KEEP(sec)    *(.mem##sec)
34 #define MEM_DISCARD(sec)
35 #else
36 #define MEM_KEEP(sec)
37 #define MEM_DISCARD(sec) *(.mem##sec)
38 #endif
39 
40 
41 /* .data section */
42 #define DATA_DATA							\
43 	*(.data)							\
44 	*(.data.init.refok)						\
45 	*(.ref.data)							\
46 	DEV_KEEP(init.data)						\
47 	DEV_KEEP(exit.data)						\
48 	CPU_KEEP(init.data)						\
49 	CPU_KEEP(exit.data)						\
50 	MEM_KEEP(init.data)						\
51 	MEM_KEEP(exit.data)						\
52 	. = ALIGN(8);							\
53 	VMLINUX_SYMBOL(__start___markers) = .;				\
54 	*(__markers)							\
55 	VMLINUX_SYMBOL(__stop___markers) = .;
56 
57 #define RO_DATA(align)							\
58 	. = ALIGN((align));						\
59 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
60 		VMLINUX_SYMBOL(__start_rodata) = .;			\
61 		*(.rodata) *(.rodata.*)					\
62 		*(__vermagic)		/* Kernel version magic */	\
63 		*(__markers_strings)	/* Markers: strings */		\
64 	}								\
65 									\
66 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
67 		*(.rodata1)						\
68 	}								\
69 									\
70 	BUG_TABLE							\
71 									\
72 	/* PCI quirks */						\
73 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
74 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
75 		*(.pci_fixup_early)					\
76 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
77 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
78 		*(.pci_fixup_header)					\
79 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
80 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
81 		*(.pci_fixup_final)					\
82 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
83 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
84 		*(.pci_fixup_enable)					\
85 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
86 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
87 		*(.pci_fixup_resume)					\
88 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
89 		VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;	\
90 		*(.pci_fixup_resume_early)				\
91 		VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;	\
92 		VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;		\
93 		*(.pci_fixup_suspend)					\
94 		VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;		\
95 	}								\
96 									\
97 	/* Built-in firmware blobs */					\
98 	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
99 		VMLINUX_SYMBOL(__start_builtin_fw) = .;			\
100 		*(.builtin_fw)						\
101 		VMLINUX_SYMBOL(__end_builtin_fw) = .;			\
102 	}								\
103 									\
104 	/* RapidIO route ops */						\
105 	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
106 		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
107 		*(.rio_route_ops)					\
108 		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
109 	}								\
110 									\
111 	TRACEDATA							\
112 									\
113 	/* Kernel symbol table: Normal symbols */			\
114 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
115 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
116 		*(__ksymtab)						\
117 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
118 	}								\
119 									\
120 	/* Kernel symbol table: GPL-only symbols */			\
121 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
122 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
123 		*(__ksymtab_gpl)					\
124 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
125 	}								\
126 									\
127 	/* Kernel symbol table: Normal unused symbols */		\
128 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
129 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
130 		*(__ksymtab_unused)					\
131 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
132 	}								\
133 									\
134 	/* Kernel symbol table: GPL-only unused symbols */		\
135 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
136 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
137 		*(__ksymtab_unused_gpl)					\
138 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
139 	}								\
140 									\
141 	/* Kernel symbol table: GPL-future-only symbols */		\
142 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
143 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
144 		*(__ksymtab_gpl_future)					\
145 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
146 	}								\
147 									\
148 	/* Kernel symbol table: Normal symbols */			\
149 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
150 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
151 		*(__kcrctab)						\
152 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
153 	}								\
154 									\
155 	/* Kernel symbol table: GPL-only symbols */			\
156 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
157 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
158 		*(__kcrctab_gpl)					\
159 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
160 	}								\
161 									\
162 	/* Kernel symbol table: Normal unused symbols */		\
163 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
164 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
165 		*(__kcrctab_unused)					\
166 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
167 	}								\
168 									\
169 	/* Kernel symbol table: GPL-only unused symbols */		\
170 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
171 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
172 		*(__kcrctab_unused_gpl)					\
173 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
174 	}								\
175 									\
176 	/* Kernel symbol table: GPL-future-only symbols */		\
177 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
178 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
179 		*(__kcrctab_gpl_future)					\
180 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
181 	}								\
182 									\
183 	/* Kernel symbol table: strings */				\
184         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
185 		*(__ksymtab_strings)					\
186 	}								\
187 									\
188 	/* __*init sections */						\
189 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
190 		*(.ref.rodata)						\
191 		DEV_KEEP(init.rodata)					\
192 		DEV_KEEP(exit.rodata)					\
193 		CPU_KEEP(init.rodata)					\
194 		CPU_KEEP(exit.rodata)					\
195 		MEM_KEEP(init.rodata)					\
196 		MEM_KEEP(exit.rodata)					\
197 	}								\
198 									\
199 	/* Built-in module parameters. */				\
200 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
201 		VMLINUX_SYMBOL(__start___param) = .;			\
202 		*(__param)						\
203 		VMLINUX_SYMBOL(__stop___param) = .;			\
204 		. = ALIGN((align));					\
205 		VMLINUX_SYMBOL(__end_rodata) = .;			\
206 	}								\
207 	. = ALIGN((align));
208 
209 /* RODATA provided for backward compatibility.
210  * All archs are supposed to use RO_DATA() */
211 #define RODATA RO_DATA(4096)
212 
213 #define SECURITY_INIT							\
214 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
215 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
216 		*(.security_initcall.init) 				\
217 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
218 	}
219 
220 /* .text section. Map to function alignment to avoid address changes
221  * during second ld run in second ld pass when generating System.map */
222 #define TEXT_TEXT							\
223 		ALIGN_FUNCTION();					\
224 		*(.text.hot)						\
225 		*(.text)						\
226 		*(.ref.text)						\
227 		*(.text.init.refok)					\
228 		*(.exit.text.refok)					\
229 	DEV_KEEP(init.text)						\
230 	DEV_KEEP(exit.text)						\
231 	CPU_KEEP(init.text)						\
232 	CPU_KEEP(exit.text)						\
233 	MEM_KEEP(init.text)						\
234 	MEM_KEEP(exit.text)						\
235 		*(.text.unlikely)
236 
237 
238 /* sched.text is aling to function alignment to secure we have same
239  * address even at second ld pass when generating System.map */
240 #define SCHED_TEXT							\
241 		ALIGN_FUNCTION();					\
242 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
243 		*(.sched.text)						\
244 		VMLINUX_SYMBOL(__sched_text_end) = .;
245 
246 /* spinlock.text is aling to function alignment to secure we have same
247  * address even at second ld pass when generating System.map */
248 #define LOCK_TEXT							\
249 		ALIGN_FUNCTION();					\
250 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
251 		*(.spinlock.text)					\
252 		VMLINUX_SYMBOL(__lock_text_end) = .;
253 
254 #define KPROBES_TEXT							\
255 		ALIGN_FUNCTION();					\
256 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
257 		*(.kprobes.text)					\
258 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
259 
260 /* Section used for early init (in .S files) */
261 #define HEAD_TEXT  *(.head.text)
262 
263 /* init and exit section handling */
264 #define INIT_DATA							\
265 	*(.init.data)							\
266 	DEV_DISCARD(init.data)						\
267 	DEV_DISCARD(init.rodata)					\
268 	CPU_DISCARD(init.data)						\
269 	CPU_DISCARD(init.rodata)					\
270 	MEM_DISCARD(init.data)						\
271 	MEM_DISCARD(init.rodata)
272 
273 #define INIT_TEXT							\
274 	*(.init.text)							\
275 	DEV_DISCARD(init.text)						\
276 	CPU_DISCARD(init.text)						\
277 	MEM_DISCARD(init.text)
278 
279 #define EXIT_DATA							\
280 	*(.exit.data)							\
281 	DEV_DISCARD(exit.data)						\
282 	DEV_DISCARD(exit.rodata)					\
283 	CPU_DISCARD(exit.data)						\
284 	CPU_DISCARD(exit.rodata)					\
285 	MEM_DISCARD(exit.data)						\
286 	MEM_DISCARD(exit.rodata)
287 
288 #define EXIT_TEXT							\
289 	*(.exit.text)							\
290 	DEV_DISCARD(exit.text)						\
291 	CPU_DISCARD(exit.text)						\
292 	MEM_DISCARD(exit.text)
293 
294 		/* DWARF debug sections.
295 		Symbols in the DWARF debugging sections are relative to
296 		the beginning of the section so we begin them at 0.  */
297 #define DWARF_DEBUG							\
298 		/* DWARF 1 */						\
299 		.debug          0 : { *(.debug) }			\
300 		.line           0 : { *(.line) }			\
301 		/* GNU DWARF 1 extensions */				\
302 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
303 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
304 		/* DWARF 1.1 and DWARF 2 */				\
305 		.debug_aranges  0 : { *(.debug_aranges) }		\
306 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
307 		/* DWARF 2 */						\
308 		.debug_info     0 : { *(.debug_info			\
309 				.gnu.linkonce.wi.*) }			\
310 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
311 		.debug_line     0 : { *(.debug_line) }			\
312 		.debug_frame    0 : { *(.debug_frame) }			\
313 		.debug_str      0 : { *(.debug_str) }			\
314 		.debug_loc      0 : { *(.debug_loc) }			\
315 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
316 		/* SGI/MIPS DWARF 2 extensions */			\
317 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
318 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
319 		.debug_typenames 0 : { *(.debug_typenames) }		\
320 		.debug_varnames  0 : { *(.debug_varnames) }		\
321 
322 		/* Stabs debugging sections.  */
323 #define STABS_DEBUG							\
324 		.stab 0 : { *(.stab) }					\
325 		.stabstr 0 : { *(.stabstr) }				\
326 		.stab.excl 0 : { *(.stab.excl) }			\
327 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
328 		.stab.index 0 : { *(.stab.index) }			\
329 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
330 		.comment 0 : { *(.comment) }
331 
332 #ifdef CONFIG_GENERIC_BUG
333 #define BUG_TABLE							\
334 	. = ALIGN(8);							\
335 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
336 		__start___bug_table = .;				\
337 		*(__bug_table)						\
338 		__stop___bug_table = .;					\
339 	}
340 #else
341 #define BUG_TABLE
342 #endif
343 
344 #ifdef CONFIG_PM_TRACE
345 #define TRACEDATA							\
346 	. = ALIGN(4);							\
347 	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
348 	  	__tracedata_start = .;					\
349 		*(.tracedata)						\
350 	  	__tracedata_end = .;					\
351 	}
352 #else
353 #define TRACEDATA
354 #endif
355 
356 #define NOTES								\
357 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
358 		VMLINUX_SYMBOL(__start_notes) = .;			\
359 		*(.note.*)						\
360 		VMLINUX_SYMBOL(__stop_notes) = .;			\
361 	}
362 
363 #define INITCALLS							\
364 	*(.initcallearly.init)						\
365 	__early_initcall_end = .;					\
366   	*(.initcall0.init)						\
367   	*(.initcall0s.init)						\
368   	*(.initcall1.init)						\
369   	*(.initcall1s.init)						\
370   	*(.initcall2.init)						\
371   	*(.initcall2s.init)						\
372   	*(.initcall3.init)						\
373   	*(.initcall3s.init)						\
374   	*(.initcall4.init)						\
375   	*(.initcall4s.init)						\
376   	*(.initcall5.init)						\
377   	*(.initcall5s.init)						\
378 	*(.initcallrootfs.init)						\
379   	*(.initcall6.init)						\
380   	*(.initcall6s.init)						\
381   	*(.initcall7.init)						\
382   	*(.initcall7s.init)
383 
384 #define PERCPU(align)							\
385 	. = ALIGN(align);						\
386 	__per_cpu_start = .;						\
387 	.data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {		\
388 		*(.data.percpu)						\
389 		*(.data.percpu.shared_aligned)				\
390 	}								\
391 	__per_cpu_end = .;
392