1 #ifndef LOAD_OFFSET
2 #define LOAD_OFFSET 0
3 #endif
4 
5 #ifndef VMLINUX_SYMBOL
6 #define VMLINUX_SYMBOL(_sym_) _sym_
7 #endif
8 
9 /* Align . to a 8 byte boundary equals to maximum function alignment. */
10 #define ALIGN_FUNCTION()  . = ALIGN(8)
11 
12 /* The actual configuration determine if the init/exit sections
13  * are handled as text/data or they can be discarded (which
14  * often happens at runtime)
15  */
16 #ifdef CONFIG_HOTPLUG
17 #define DEV_KEEP(sec)    *(.dev##sec)
18 #define DEV_DISCARD(sec)
19 #else
20 #define DEV_KEEP(sec)
21 #define DEV_DISCARD(sec) *(.dev##sec)
22 #endif
23 
24 #ifdef CONFIG_HOTPLUG_CPU
25 #define CPU_KEEP(sec)    *(.cpu##sec)
26 #define CPU_DISCARD(sec)
27 #else
28 #define CPU_KEEP(sec)
29 #define CPU_DISCARD(sec) *(.cpu##sec)
30 #endif
31 
32 #if defined(CONFIG_MEMORY_HOTPLUG)
33 #define MEM_KEEP(sec)    *(.mem##sec)
34 #define MEM_DISCARD(sec)
35 #else
36 #define MEM_KEEP(sec)
37 #define MEM_DISCARD(sec) *(.mem##sec)
38 #endif
39 
40 
41 /* .data section */
42 #define DATA_DATA							\
43 	*(.data)							\
44 	*(.data.init.refok)						\
45 	*(.ref.data)							\
46 	DEV_KEEP(init.data)						\
47 	DEV_KEEP(exit.data)						\
48 	CPU_KEEP(init.data)						\
49 	CPU_KEEP(exit.data)						\
50 	MEM_KEEP(init.data)						\
51 	MEM_KEEP(exit.data)						\
52 	. = ALIGN(8);							\
53 	VMLINUX_SYMBOL(__start___markers) = .;				\
54 	*(__markers)							\
55 	VMLINUX_SYMBOL(__stop___markers) = .;
56 
57 #define RO_DATA(align)							\
58 	. = ALIGN((align));						\
59 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
60 		VMLINUX_SYMBOL(__start_rodata) = .;			\
61 		*(.rodata) *(.rodata.*)					\
62 		*(__vermagic)		/* Kernel version magic */	\
63 		*(__markers_strings)	/* Markers: strings */		\
64 	}								\
65 									\
66 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
67 		*(.rodata1)						\
68 	}								\
69 									\
70 	/* PCI quirks */						\
71 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
72 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
73 		*(.pci_fixup_early)					\
74 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
75 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
76 		*(.pci_fixup_header)					\
77 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
78 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
79 		*(.pci_fixup_final)					\
80 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
81 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
82 		*(.pci_fixup_enable)					\
83 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
84 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
85 		*(.pci_fixup_resume)					\
86 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
87 	}								\
88 									\
89 	/* RapidIO route ops */						\
90 	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
91 		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
92 		*(.rio_route_ops)					\
93 		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
94 	}								\
95 									\
96 	/* Kernel symbol table: Normal symbols */			\
97 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
98 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
99 		*(__ksymtab)						\
100 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
101 	}								\
102 									\
103 	/* Kernel symbol table: GPL-only symbols */			\
104 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
105 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
106 		*(__ksymtab_gpl)					\
107 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
108 	}								\
109 									\
110 	/* Kernel symbol table: Normal unused symbols */		\
111 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
112 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
113 		*(__ksymtab_unused)					\
114 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
115 	}								\
116 									\
117 	/* Kernel symbol table: GPL-only unused symbols */		\
118 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
119 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
120 		*(__ksymtab_unused_gpl)					\
121 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
122 	}								\
123 									\
124 	/* Kernel symbol table: GPL-future-only symbols */		\
125 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
126 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
127 		*(__ksymtab_gpl_future)					\
128 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
129 	}								\
130 									\
131 	/* Kernel symbol table: Normal symbols */			\
132 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
133 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
134 		*(__kcrctab)						\
135 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
136 	}								\
137 									\
138 	/* Kernel symbol table: GPL-only symbols */			\
139 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
140 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
141 		*(__kcrctab_gpl)					\
142 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
143 	}								\
144 									\
145 	/* Kernel symbol table: Normal unused symbols */		\
146 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
147 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
148 		*(__kcrctab_unused)					\
149 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
150 	}								\
151 									\
152 	/* Kernel symbol table: GPL-only unused symbols */		\
153 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
154 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
155 		*(__kcrctab_unused_gpl)					\
156 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
157 	}								\
158 									\
159 	/* Kernel symbol table: GPL-future-only symbols */		\
160 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
161 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
162 		*(__kcrctab_gpl_future)					\
163 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
164 	}								\
165 									\
166 	/* Kernel symbol table: strings */				\
167         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
168 		*(__ksymtab_strings)					\
169 	}								\
170 									\
171 	/* __*init sections */						\
172 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
173 		*(.ref.rodata)						\
174 		DEV_KEEP(init.rodata)					\
175 		DEV_KEEP(exit.rodata)					\
176 		CPU_KEEP(init.rodata)					\
177 		CPU_KEEP(exit.rodata)					\
178 		MEM_KEEP(init.rodata)					\
179 		MEM_KEEP(exit.rodata)					\
180 	}								\
181 									\
182 	/* Built-in module parameters. */				\
183 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
184 		VMLINUX_SYMBOL(__start___param) = .;			\
185 		*(__param)						\
186 		VMLINUX_SYMBOL(__stop___param) = .;			\
187 		. = ALIGN((align));					\
188 		VMLINUX_SYMBOL(__end_rodata) = .;			\
189 	}								\
190 	. = ALIGN((align));
191 
192 /* RODATA provided for backward compatibility.
193  * All archs are supposed to use RO_DATA() */
194 #define RODATA RO_DATA(4096)
195 
196 #define SECURITY_INIT							\
197 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
198 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
199 		*(.security_initcall.init) 				\
200 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
201 	}
202 
203 /* .text section. Map to function alignment to avoid address changes
204  * during second ld run in second ld pass when generating System.map */
205 #define TEXT_TEXT							\
206 		ALIGN_FUNCTION();					\
207 		*(.text)						\
208 		*(.ref.text)						\
209 		*(.text.init.refok)					\
210 		*(.exit.text.refok)					\
211 	DEV_KEEP(init.text)						\
212 	DEV_KEEP(exit.text)						\
213 	CPU_KEEP(init.text)						\
214 	CPU_KEEP(exit.text)						\
215 	MEM_KEEP(init.text)						\
216 	MEM_KEEP(exit.text)
217 
218 
219 /* sched.text is aling to function alignment to secure we have same
220  * address even at second ld pass when generating System.map */
221 #define SCHED_TEXT							\
222 		ALIGN_FUNCTION();					\
223 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
224 		*(.sched.text)						\
225 		VMLINUX_SYMBOL(__sched_text_end) = .;
226 
227 /* spinlock.text is aling to function alignment to secure we have same
228  * address even at second ld pass when generating System.map */
229 #define LOCK_TEXT							\
230 		ALIGN_FUNCTION();					\
231 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
232 		*(.spinlock.text)					\
233 		VMLINUX_SYMBOL(__lock_text_end) = .;
234 
235 #define KPROBES_TEXT							\
236 		ALIGN_FUNCTION();					\
237 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
238 		*(.kprobes.text)					\
239 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
240 
241 /* Section used for early init (in .S files) */
242 #define HEAD_TEXT  *(.head.text)
243 
244 /* init and exit section handling */
245 #define INIT_DATA							\
246 	*(.init.data)							\
247 	DEV_DISCARD(init.data)						\
248 	DEV_DISCARD(init.rodata)					\
249 	CPU_DISCARD(init.data)						\
250 	CPU_DISCARD(init.rodata)					\
251 	MEM_DISCARD(init.data)						\
252 	MEM_DISCARD(init.rodata)
253 
254 #define INIT_TEXT							\
255 	*(.init.text)							\
256 	DEV_DISCARD(init.text)						\
257 	CPU_DISCARD(init.text)						\
258 	MEM_DISCARD(init.text)
259 
260 #define EXIT_DATA							\
261 	*(.exit.data)							\
262 	DEV_DISCARD(exit.data)						\
263 	DEV_DISCARD(exit.rodata)					\
264 	CPU_DISCARD(exit.data)						\
265 	CPU_DISCARD(exit.rodata)					\
266 	MEM_DISCARD(exit.data)						\
267 	MEM_DISCARD(exit.rodata)
268 
269 #define EXIT_TEXT							\
270 	*(.exit.text)							\
271 	DEV_DISCARD(exit.text)						\
272 	CPU_DISCARD(exit.text)						\
273 	MEM_DISCARD(exit.text)
274 
275 		/* DWARF debug sections.
276 		Symbols in the DWARF debugging sections are relative to
277 		the beginning of the section so we begin them at 0.  */
278 #define DWARF_DEBUG							\
279 		/* DWARF 1 */						\
280 		.debug          0 : { *(.debug) }			\
281 		.line           0 : { *(.line) }			\
282 		/* GNU DWARF 1 extensions */				\
283 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
284 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
285 		/* DWARF 1.1 and DWARF 2 */				\
286 		.debug_aranges  0 : { *(.debug_aranges) }		\
287 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
288 		/* DWARF 2 */						\
289 		.debug_info     0 : { *(.debug_info			\
290 				.gnu.linkonce.wi.*) }			\
291 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
292 		.debug_line     0 : { *(.debug_line) }			\
293 		.debug_frame    0 : { *(.debug_frame) }			\
294 		.debug_str      0 : { *(.debug_str) }			\
295 		.debug_loc      0 : { *(.debug_loc) }			\
296 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
297 		/* SGI/MIPS DWARF 2 extensions */			\
298 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
299 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
300 		.debug_typenames 0 : { *(.debug_typenames) }		\
301 		.debug_varnames  0 : { *(.debug_varnames) }		\
302 
303 		/* Stabs debugging sections.  */
304 #define STABS_DEBUG							\
305 		.stab 0 : { *(.stab) }					\
306 		.stabstr 0 : { *(.stabstr) }				\
307 		.stab.excl 0 : { *(.stab.excl) }			\
308 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
309 		.stab.index 0 : { *(.stab.index) }			\
310 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
311 		.comment 0 : { *(.comment) }
312 
313 #define BUG_TABLE							\
314 	. = ALIGN(8);							\
315 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
316 		__start___bug_table = .;				\
317 		*(__bug_table)						\
318 		__stop___bug_table = .;					\
319 	}
320 
321 #define NOTES								\
322 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
323 		VMLINUX_SYMBOL(__start_notes) = .;			\
324 		*(.note.*)						\
325 		VMLINUX_SYMBOL(__stop_notes) = .;			\
326 	}
327 
328 #define INITCALLS							\
329   	*(.initcall0.init)						\
330   	*(.initcall0s.init)						\
331   	*(.initcall1.init)						\
332   	*(.initcall1s.init)						\
333   	*(.initcall2.init)						\
334   	*(.initcall2s.init)						\
335   	*(.initcall3.init)						\
336   	*(.initcall3s.init)						\
337   	*(.initcall4.init)						\
338   	*(.initcall4s.init)						\
339   	*(.initcall5.init)						\
340   	*(.initcall5s.init)						\
341 	*(.initcallrootfs.init)						\
342   	*(.initcall6.init)						\
343   	*(.initcall6s.init)						\
344   	*(.initcall7.init)						\
345   	*(.initcall7s.init)
346 
347 #define PERCPU(align)							\
348 	. = ALIGN(align);						\
349 	__per_cpu_start = .;						\
350 	.data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {		\
351 		*(.data.percpu)						\
352 		*(.data.percpu.shared_aligned)				\
353 	}								\
354 	__per_cpu_end = .;
355