xref: /openbmc/linux/include/asm-generic/vmlinux.lds.h (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 #ifndef LOAD_OFFSET
2 #define LOAD_OFFSET 0
3 #endif
4 
5 #ifndef VMLINUX_SYMBOL
6 #define VMLINUX_SYMBOL(_sym_) _sym_
7 #endif
8 
9 /* Align . to a 8 byte boundary equals to maximum function alignment. */
10 #define ALIGN_FUNCTION()  . = ALIGN(8)
11 
12 /* .data section */
13 #define DATA_DATA							\
14 	*(.data)							\
15 	*(.data.init.refok)						\
16 	. = ALIGN(8);							\
17 	VMLINUX_SYMBOL(__start___markers) = .;				\
18 	*(__markers)							\
19 	VMLINUX_SYMBOL(__stop___markers) = .;
20 
21 #define RO_DATA(align)							\
22 	. = ALIGN((align));						\
23 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
24 		VMLINUX_SYMBOL(__start_rodata) = .;			\
25 		*(.rodata) *(.rodata.*)					\
26 		*(__vermagic)		/* Kernel version magic */	\
27 		*(__markers_strings)	/* Markers: strings */		\
28 	}								\
29 									\
30 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
31 		*(.rodata1)						\
32 	}								\
33 									\
34 	/* PCI quirks */						\
35 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
36 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
37 		*(.pci_fixup_early)					\
38 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
39 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
40 		*(.pci_fixup_header)					\
41 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
42 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
43 		*(.pci_fixup_final)					\
44 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
45 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
46 		*(.pci_fixup_enable)					\
47 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
48 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
49 		*(.pci_fixup_resume)					\
50 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
51 	}								\
52 									\
53 	/* RapidIO route ops */						\
54 	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
55 		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
56 		*(.rio_route_ops)					\
57 		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
58 	}								\
59 									\
60 	/* Kernel symbol table: Normal symbols */			\
61 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
62 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
63 		*(__ksymtab)						\
64 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
65 	}								\
66 									\
67 	/* Kernel symbol table: GPL-only symbols */			\
68 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
69 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
70 		*(__ksymtab_gpl)					\
71 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
72 	}								\
73 									\
74 	/* Kernel symbol table: Normal unused symbols */		\
75 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
76 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
77 		*(__ksymtab_unused)					\
78 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
79 	}								\
80 									\
81 	/* Kernel symbol table: GPL-only unused symbols */		\
82 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
83 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
84 		*(__ksymtab_unused_gpl)					\
85 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
86 	}								\
87 									\
88 	/* Kernel symbol table: GPL-future-only symbols */		\
89 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
90 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
91 		*(__ksymtab_gpl_future)					\
92 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
93 	}								\
94 									\
95 	/* Kernel symbol table: Normal symbols */			\
96 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
97 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
98 		*(__kcrctab)						\
99 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
100 	}								\
101 									\
102 	/* Kernel symbol table: GPL-only symbols */			\
103 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
104 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
105 		*(__kcrctab_gpl)					\
106 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
107 	}								\
108 									\
109 	/* Kernel symbol table: Normal unused symbols */		\
110 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
111 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
112 		*(__kcrctab_unused)					\
113 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
114 	}								\
115 									\
116 	/* Kernel symbol table: GPL-only unused symbols */		\
117 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
118 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
119 		*(__kcrctab_unused_gpl)					\
120 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
121 	}								\
122 									\
123 	/* Kernel symbol table: GPL-future-only symbols */		\
124 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
125 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
126 		*(__kcrctab_gpl_future)					\
127 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
128 	}								\
129 									\
130 	/* Kernel symbol table: strings */				\
131         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
132 		*(__ksymtab_strings)					\
133 	}								\
134 									\
135 	/* Built-in module parameters. */				\
136 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
137 		VMLINUX_SYMBOL(__start___param) = .;			\
138 		*(__param)						\
139 		VMLINUX_SYMBOL(__stop___param) = .;			\
140 		VMLINUX_SYMBOL(__end_rodata) = .;			\
141 	}								\
142 									\
143 	. = ALIGN((align));
144 
145 /* RODATA provided for backward compatibility.
146  * All archs are supposed to use RO_DATA() */
147 #define RODATA RO_DATA(4096)
148 
149 #define SECURITY_INIT							\
150 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
151 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
152 		*(.security_initcall.init) 				\
153 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
154 	}
155 
156 /* .text section. Map to function alignment to avoid address changes
157  * during second ld run in second ld pass when generating System.map */
158 #define TEXT_TEXT							\
159 		ALIGN_FUNCTION();					\
160 		*(.text)						\
161 		*(.text.init.refok)					\
162 		*(.exit.text.refok)
163 
164 /* sched.text is aling to function alignment to secure we have same
165  * address even at second ld pass when generating System.map */
166 #define SCHED_TEXT							\
167 		ALIGN_FUNCTION();					\
168 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
169 		*(.sched.text)						\
170 		VMLINUX_SYMBOL(__sched_text_end) = .;
171 
172 /* spinlock.text is aling to function alignment to secure we have same
173  * address even at second ld pass when generating System.map */
174 #define LOCK_TEXT							\
175 		ALIGN_FUNCTION();					\
176 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
177 		*(.spinlock.text)					\
178 		VMLINUX_SYMBOL(__lock_text_end) = .;
179 
180 #define KPROBES_TEXT							\
181 		ALIGN_FUNCTION();					\
182 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
183 		*(.kprobes.text)					\
184 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
185 
186 		/* DWARF debug sections.
187 		Symbols in the DWARF debugging sections are relative to
188 		the beginning of the section so we begin them at 0.  */
189 #define DWARF_DEBUG							\
190 		/* DWARF 1 */						\
191 		.debug          0 : { *(.debug) }			\
192 		.line           0 : { *(.line) }			\
193 		/* GNU DWARF 1 extensions */				\
194 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
195 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
196 		/* DWARF 1.1 and DWARF 2 */				\
197 		.debug_aranges  0 : { *(.debug_aranges) }		\
198 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
199 		/* DWARF 2 */						\
200 		.debug_info     0 : { *(.debug_info			\
201 				.gnu.linkonce.wi.*) }			\
202 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
203 		.debug_line     0 : { *(.debug_line) }			\
204 		.debug_frame    0 : { *(.debug_frame) }			\
205 		.debug_str      0 : { *(.debug_str) }			\
206 		.debug_loc      0 : { *(.debug_loc) }			\
207 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
208 		/* SGI/MIPS DWARF 2 extensions */			\
209 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
210 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
211 		.debug_typenames 0 : { *(.debug_typenames) }		\
212 		.debug_varnames  0 : { *(.debug_varnames) }		\
213 
214 		/* Stabs debugging sections.  */
215 #define STABS_DEBUG							\
216 		.stab 0 : { *(.stab) }					\
217 		.stabstr 0 : { *(.stabstr) }				\
218 		.stab.excl 0 : { *(.stab.excl) }			\
219 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
220 		.stab.index 0 : { *(.stab.index) }			\
221 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
222 		.comment 0 : { *(.comment) }
223 
224 #define BUG_TABLE							\
225 	. = ALIGN(8);							\
226 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
227 		__start___bug_table = .;				\
228 		*(__bug_table)						\
229 		__stop___bug_table = .;					\
230 	}
231 
232 #define NOTES								\
233 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
234 		VMLINUX_SYMBOL(__start_notes) = .;			\
235 		*(.note.*)						\
236 		VMLINUX_SYMBOL(__stop_notes) = .;			\
237 	}
238 
239 #define INITCALLS							\
240   	*(.initcall0.init)						\
241   	*(.initcall0s.init)						\
242   	*(.initcall1.init)						\
243   	*(.initcall1s.init)						\
244   	*(.initcall2.init)						\
245   	*(.initcall2s.init)						\
246   	*(.initcall3.init)						\
247   	*(.initcall3s.init)						\
248   	*(.initcall4.init)						\
249   	*(.initcall4s.init)						\
250   	*(.initcall5.init)						\
251   	*(.initcall5s.init)						\
252 	*(.initcallrootfs.init)						\
253   	*(.initcall6.init)						\
254   	*(.initcall6s.init)						\
255   	*(.initcall7.init)						\
256   	*(.initcall7s.init)
257 
258 #define PERCPU(align)							\
259 	. = ALIGN(align);						\
260 	__per_cpu_start = .;						\
261 	.data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {		\
262 		*(.data.percpu)						\
263 		*(.data.percpu.shared_aligned)				\
264 	}								\
265 	__per_cpu_end = .;
266