xref: /openbmc/linux/include/asm-generic/vmlinux.lds.h (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 #ifndef LOAD_OFFSET
2 #define LOAD_OFFSET 0
3 #endif
4 
5 #ifndef VMLINUX_SYMBOL
6 #define VMLINUX_SYMBOL(_sym_) _sym_
7 #endif
8 
9 /* Align . to a 8 byte boundary equals to maximum function alignment. */
10 #define ALIGN_FUNCTION()  . = ALIGN(8)
11 
12 /* The actual configuration determine if the init/exit sections
13  * are handled as text/data or they can be discarded (which
14  * often happens at runtime)
15  */
16 #ifdef CONFIG_HOTPLUG
17 #define DEV_KEEP(sec)    *(.dev##sec)
18 #define DEV_DISCARD(sec)
19 #else
20 #define DEV_KEEP(sec)
21 #define DEV_DISCARD(sec) *(.dev##sec)
22 #endif
23 
24 #ifdef CONFIG_HOTPLUG_CPU
25 #define CPU_KEEP(sec)    *(.cpu##sec)
26 #define CPU_DISCARD(sec)
27 #else
28 #define CPU_KEEP(sec)
29 #define CPU_DISCARD(sec) *(.cpu##sec)
30 #endif
31 
32 #if defined(CONFIG_MEMORY_HOTPLUG)
33 #define MEM_KEEP(sec)    *(.mem##sec)
34 #define MEM_DISCARD(sec)
35 #else
36 #define MEM_KEEP(sec)
37 #define MEM_DISCARD(sec) *(.mem##sec)
38 #endif
39 
40 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
41 #define MCOUNT_REC()	VMLINUX_SYMBOL(__start_mcount_loc) = .; \
42 			*(__mcount_loc)				\
43 			VMLINUX_SYMBOL(__stop_mcount_loc) = .;
44 #else
45 #define MCOUNT_REC()
46 #endif
47 
48 #ifdef CONFIG_TRACE_BRANCH_PROFILING
49 #define LIKELY_PROFILE()	VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
50 				*(_ftrace_annotated_branch)			      \
51 				VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
52 #else
53 #define LIKELY_PROFILE()
54 #endif
55 
56 #ifdef CONFIG_PROFILE_ALL_BRANCHES
57 #define BRANCH_PROFILE()	VMLINUX_SYMBOL(__start_branch_profile) = .;   \
58 				*(_ftrace_branch)			      \
59 				VMLINUX_SYMBOL(__stop_branch_profile) = .;
60 #else
61 #define BRANCH_PROFILE()
62 #endif
63 
64 /* .data section */
65 #define DATA_DATA							\
66 	*(.data)							\
67 	*(.data.init.refok)						\
68 	*(.ref.data)							\
69 	DEV_KEEP(init.data)						\
70 	DEV_KEEP(exit.data)						\
71 	CPU_KEEP(init.data)						\
72 	CPU_KEEP(exit.data)						\
73 	MEM_KEEP(init.data)						\
74 	MEM_KEEP(exit.data)						\
75 	. = ALIGN(8);							\
76 	VMLINUX_SYMBOL(__start___markers) = .;				\
77 	*(__markers)							\
78 	VMLINUX_SYMBOL(__stop___markers) = .;				\
79 	. = ALIGN(32);							\
80 	VMLINUX_SYMBOL(__start___tracepoints) = .;			\
81 	*(__tracepoints)						\
82 	VMLINUX_SYMBOL(__stop___tracepoints) = .;			\
83 	/* implement dynamic printk debug */				\
84 	. = ALIGN(8);							\
85 	VMLINUX_SYMBOL(__start___verbose) = .;                          \
86 	*(__verbose)                                                    \
87 	VMLINUX_SYMBOL(__stop___verbose) = .;				\
88 	LIKELY_PROFILE()		       				\
89 	BRANCH_PROFILE()
90 
91 #define RO_DATA(align)							\
92 	. = ALIGN((align));						\
93 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
94 		VMLINUX_SYMBOL(__start_rodata) = .;			\
95 		*(.rodata) *(.rodata.*)					\
96 		*(__vermagic)		/* Kernel version magic */	\
97 		*(__markers_strings)	/* Markers: strings */		\
98 		*(__tracepoints_strings)/* Tracepoints: strings */	\
99 	}								\
100 									\
101 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
102 		*(.rodata1)						\
103 	}								\
104 									\
105 	BUG_TABLE							\
106 									\
107 	/* PCI quirks */						\
108 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
109 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
110 		*(.pci_fixup_early)					\
111 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
112 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
113 		*(.pci_fixup_header)					\
114 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
115 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
116 		*(.pci_fixup_final)					\
117 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
118 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
119 		*(.pci_fixup_enable)					\
120 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
121 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
122 		*(.pci_fixup_resume)					\
123 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
124 		VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;	\
125 		*(.pci_fixup_resume_early)				\
126 		VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;	\
127 		VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;		\
128 		*(.pci_fixup_suspend)					\
129 		VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;		\
130 	}								\
131 									\
132 	/* Built-in firmware blobs */					\
133 	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
134 		VMLINUX_SYMBOL(__start_builtin_fw) = .;			\
135 		*(.builtin_fw)						\
136 		VMLINUX_SYMBOL(__end_builtin_fw) = .;			\
137 	}								\
138 									\
139 	/* RapidIO route ops */						\
140 	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
141 		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
142 		*(.rio_route_ops)					\
143 		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
144 	}								\
145 									\
146 	TRACEDATA							\
147 									\
148 	/* Kernel symbol table: Normal symbols */			\
149 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
150 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
151 		*(__ksymtab)						\
152 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
153 	}								\
154 									\
155 	/* Kernel symbol table: GPL-only symbols */			\
156 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
157 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
158 		*(__ksymtab_gpl)					\
159 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
160 	}								\
161 									\
162 	/* Kernel symbol table: Normal unused symbols */		\
163 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
164 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
165 		*(__ksymtab_unused)					\
166 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
167 	}								\
168 									\
169 	/* Kernel symbol table: GPL-only unused symbols */		\
170 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
171 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
172 		*(__ksymtab_unused_gpl)					\
173 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
174 	}								\
175 									\
176 	/* Kernel symbol table: GPL-future-only symbols */		\
177 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
178 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
179 		*(__ksymtab_gpl_future)					\
180 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
181 	}								\
182 									\
183 	/* Kernel symbol table: Normal symbols */			\
184 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
185 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
186 		*(__kcrctab)						\
187 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
188 	}								\
189 									\
190 	/* Kernel symbol table: GPL-only symbols */			\
191 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
192 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
193 		*(__kcrctab_gpl)					\
194 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
195 	}								\
196 									\
197 	/* Kernel symbol table: Normal unused symbols */		\
198 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
199 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
200 		*(__kcrctab_unused)					\
201 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
202 	}								\
203 									\
204 	/* Kernel symbol table: GPL-only unused symbols */		\
205 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
206 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
207 		*(__kcrctab_unused_gpl)					\
208 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
209 	}								\
210 									\
211 	/* Kernel symbol table: GPL-future-only symbols */		\
212 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
213 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
214 		*(__kcrctab_gpl_future)					\
215 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
216 	}								\
217 									\
218 	/* Kernel symbol table: strings */				\
219         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
220 		*(__ksymtab_strings)					\
221 	}								\
222 									\
223 	/* __*init sections */						\
224 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
225 		*(.ref.rodata)						\
226 		MCOUNT_REC()						\
227 		DEV_KEEP(init.rodata)					\
228 		DEV_KEEP(exit.rodata)					\
229 		CPU_KEEP(init.rodata)					\
230 		CPU_KEEP(exit.rodata)					\
231 		MEM_KEEP(init.rodata)					\
232 		MEM_KEEP(exit.rodata)					\
233 	}								\
234 									\
235 	/* Built-in module parameters. */				\
236 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
237 		VMLINUX_SYMBOL(__start___param) = .;			\
238 		*(__param)						\
239 		VMLINUX_SYMBOL(__stop___param) = .;			\
240 		. = ALIGN((align));					\
241 		VMLINUX_SYMBOL(__end_rodata) = .;			\
242 	}								\
243 	. = ALIGN((align));
244 
245 /* RODATA provided for backward compatibility.
246  * All archs are supposed to use RO_DATA() */
247 #define RODATA RO_DATA(4096)
248 
249 #define SECURITY_INIT							\
250 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
251 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
252 		*(.security_initcall.init) 				\
253 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
254 	}
255 
256 /* .text section. Map to function alignment to avoid address changes
257  * during second ld run in second ld pass when generating System.map */
258 #define TEXT_TEXT							\
259 		ALIGN_FUNCTION();					\
260 		*(.text.hot)						\
261 		*(.text)						\
262 		*(.ref.text)						\
263 		*(.text.init.refok)					\
264 		*(.exit.text.refok)					\
265 	DEV_KEEP(init.text)						\
266 	DEV_KEEP(exit.text)						\
267 	CPU_KEEP(init.text)						\
268 	CPU_KEEP(exit.text)						\
269 	MEM_KEEP(init.text)						\
270 	MEM_KEEP(exit.text)						\
271 		*(.text.unlikely)
272 
273 
274 /* sched.text is aling to function alignment to secure we have same
275  * address even at second ld pass when generating System.map */
276 #define SCHED_TEXT							\
277 		ALIGN_FUNCTION();					\
278 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
279 		*(.sched.text)						\
280 		VMLINUX_SYMBOL(__sched_text_end) = .;
281 
282 /* spinlock.text is aling to function alignment to secure we have same
283  * address even at second ld pass when generating System.map */
284 #define LOCK_TEXT							\
285 		ALIGN_FUNCTION();					\
286 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
287 		*(.spinlock.text)					\
288 		VMLINUX_SYMBOL(__lock_text_end) = .;
289 
290 #define KPROBES_TEXT							\
291 		ALIGN_FUNCTION();					\
292 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
293 		*(.kprobes.text)					\
294 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
295 
296 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
297 #define IRQENTRY_TEXT							\
298 		ALIGN_FUNCTION();					\
299 		VMLINUX_SYMBOL(__irqentry_text_start) = .;		\
300 		*(.irqentry.text)					\
301 		VMLINUX_SYMBOL(__irqentry_text_end) = .;
302 #else
303 #define IRQENTRY_TEXT
304 #endif
305 
306 /* Section used for early init (in .S files) */
307 #define HEAD_TEXT  *(.head.text)
308 
309 /* init and exit section handling */
310 #define INIT_DATA							\
311 	*(.init.data)							\
312 	DEV_DISCARD(init.data)						\
313 	DEV_DISCARD(init.rodata)					\
314 	CPU_DISCARD(init.data)						\
315 	CPU_DISCARD(init.rodata)					\
316 	MEM_DISCARD(init.data)						\
317 	MEM_DISCARD(init.rodata)
318 
319 #define INIT_TEXT							\
320 	*(.init.text)							\
321 	DEV_DISCARD(init.text)						\
322 	CPU_DISCARD(init.text)						\
323 	MEM_DISCARD(init.text)
324 
325 #define EXIT_DATA							\
326 	*(.exit.data)							\
327 	DEV_DISCARD(exit.data)						\
328 	DEV_DISCARD(exit.rodata)					\
329 	CPU_DISCARD(exit.data)						\
330 	CPU_DISCARD(exit.rodata)					\
331 	MEM_DISCARD(exit.data)						\
332 	MEM_DISCARD(exit.rodata)
333 
334 #define EXIT_TEXT							\
335 	*(.exit.text)							\
336 	DEV_DISCARD(exit.text)						\
337 	CPU_DISCARD(exit.text)						\
338 	MEM_DISCARD(exit.text)
339 
340 		/* DWARF debug sections.
341 		Symbols in the DWARF debugging sections are relative to
342 		the beginning of the section so we begin them at 0.  */
343 #define DWARF_DEBUG							\
344 		/* DWARF 1 */						\
345 		.debug          0 : { *(.debug) }			\
346 		.line           0 : { *(.line) }			\
347 		/* GNU DWARF 1 extensions */				\
348 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
349 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
350 		/* DWARF 1.1 and DWARF 2 */				\
351 		.debug_aranges  0 : { *(.debug_aranges) }		\
352 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
353 		/* DWARF 2 */						\
354 		.debug_info     0 : { *(.debug_info			\
355 				.gnu.linkonce.wi.*) }			\
356 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
357 		.debug_line     0 : { *(.debug_line) }			\
358 		.debug_frame    0 : { *(.debug_frame) }			\
359 		.debug_str      0 : { *(.debug_str) }			\
360 		.debug_loc      0 : { *(.debug_loc) }			\
361 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
362 		/* SGI/MIPS DWARF 2 extensions */			\
363 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
364 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
365 		.debug_typenames 0 : { *(.debug_typenames) }		\
366 		.debug_varnames  0 : { *(.debug_varnames) }		\
367 
368 		/* Stabs debugging sections.  */
369 #define STABS_DEBUG							\
370 		.stab 0 : { *(.stab) }					\
371 		.stabstr 0 : { *(.stabstr) }				\
372 		.stab.excl 0 : { *(.stab.excl) }			\
373 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
374 		.stab.index 0 : { *(.stab.index) }			\
375 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
376 		.comment 0 : { *(.comment) }
377 
378 #ifdef CONFIG_GENERIC_BUG
379 #define BUG_TABLE							\
380 	. = ALIGN(8);							\
381 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
382 		VMLINUX_SYMBOL(__start___bug_table) = .;		\
383 		*(__bug_table)						\
384 		VMLINUX_SYMBOL(__stop___bug_table) = .;			\
385 	}
386 #else
387 #define BUG_TABLE
388 #endif
389 
390 #ifdef CONFIG_PM_TRACE
391 #define TRACEDATA							\
392 	. = ALIGN(4);							\
393 	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
394 		VMLINUX_SYMBOL(__tracedata_start) = .;			\
395 		*(.tracedata)						\
396 		VMLINUX_SYMBOL(__tracedata_end) = .;			\
397 	}
398 #else
399 #define TRACEDATA
400 #endif
401 
402 #define NOTES								\
403 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
404 		VMLINUX_SYMBOL(__start_notes) = .;			\
405 		*(.note.*)						\
406 		VMLINUX_SYMBOL(__stop_notes) = .;			\
407 	}
408 
409 #define INITCALLS							\
410 	*(.initcallearly.init)						\
411 	VMLINUX_SYMBOL(__early_initcall_end) = .;			\
412   	*(.initcall0.init)						\
413   	*(.initcall0s.init)						\
414   	*(.initcall1.init)						\
415   	*(.initcall1s.init)						\
416   	*(.initcall2.init)						\
417   	*(.initcall2s.init)						\
418   	*(.initcall3.init)						\
419   	*(.initcall3s.init)						\
420   	*(.initcall4.init)						\
421   	*(.initcall4s.init)						\
422   	*(.initcall5.init)						\
423   	*(.initcall5s.init)						\
424 	*(.initcallrootfs.init)						\
425   	*(.initcall6.init)						\
426   	*(.initcall6s.init)						\
427   	*(.initcall7.init)						\
428   	*(.initcall7s.init)
429 
430 /**
431  * PERCPU_VADDR - define output section for percpu area
432  * @vaddr: explicit base address (optional)
433  * @phdr: destination PHDR (optional)
434  *
435  * Macro which expands to output section for percpu area.  If @vaddr
436  * is not blank, it specifies explicit base address and all percpu
437  * symbols will be offset from the given address.  If blank, @vaddr
438  * always equals @laddr + LOAD_OFFSET.
439  *
440  * @phdr defines the output PHDR to use if not blank.  Be warned that
441  * output PHDR is sticky.  If @phdr is specified, the next output
442  * section in the linker script will go there too.  @phdr should have
443  * a leading colon.
444  *
445  * Note that this macros defines __per_cpu_load as an absolute symbol.
446  * If there is no need to put the percpu section at a predetermined
447  * address, use PERCPU().
448  */
449 #define PERCPU_VADDR(vaddr, phdr)					\
450 	VMLINUX_SYMBOL(__per_cpu_load) = .;				\
451 	.data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)		\
452 				- LOAD_OFFSET) {			\
453 		VMLINUX_SYMBOL(__per_cpu_start) = .;			\
454 		*(.data.percpu.first)					\
455 		*(.data.percpu.page_aligned)				\
456 		*(.data.percpu)						\
457 		*(.data.percpu.shared_aligned)				\
458 		VMLINUX_SYMBOL(__per_cpu_end) = .;			\
459 	} phdr								\
460 	. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
461 
462 /**
463  * PERCPU - define output section for percpu area, simple version
464  * @align: required alignment
465  *
466  * Align to @align and outputs output section for percpu area.  This
467  * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
468  * __per_cpu_start will be identical.
469  *
470  * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
471  * that __per_cpu_load is defined as a relative symbol against
472  * .data.percpu which is required for relocatable x86_32
473  * configuration.
474  */
475 #define PERCPU(align)							\
476 	. = ALIGN(align);						\
477 	.data.percpu	: AT(ADDR(.data.percpu) - LOAD_OFFSET) {	\
478 		VMLINUX_SYMBOL(__per_cpu_load) = .;			\
479 		VMLINUX_SYMBOL(__per_cpu_start) = .;			\
480 		*(.data.percpu.first)					\
481 		*(.data.percpu.page_aligned)				\
482 		*(.data.percpu)						\
483 		*(.data.percpu.shared_aligned)				\
484 		VMLINUX_SYMBOL(__per_cpu_end) = .;			\
485 	}
486