1 /*
2  * Helper macros to support writing architecture specific
3  * linker scripts.
4  *
5  * A minimal linker scripts has following content:
6  * [This is a sample, architectures may have special requiriements]
7  *
8  * OUTPUT_FORMAT(...)
9  * OUTPUT_ARCH(...)
10  * ENTRY(...)
11  * SECTIONS
12  * {
13  *	. = START;
14  *	__init_begin = .;
15  *	HEAD_TEXT_SECTION
16  *	INIT_TEXT_SECTION(PAGE_SIZE)
17  *	INIT_DATA_SECTION(...)
18  *	PERCPU_SECTION(CACHELINE_SIZE)
19  *	__init_end = .;
20  *
21  *	_stext = .;
22  *	TEXT_SECTION = 0
23  *	_etext = .;
24  *
25  *      _sdata = .;
26  *	RO_DATA_SECTION(PAGE_SIZE)
27  *	RW_DATA_SECTION(...)
28  *	_edata = .;
29  *
30  *	EXCEPTION_TABLE(...)
31  *	NOTES
32  *
33  *	BSS_SECTION(0, 0, 0)
34  *	_end = .;
35  *
36  *	STABS_DEBUG
37  *	DWARF_DEBUG
38  *
39  *	DISCARDS		// must be the last
40  * }
41  *
42  * [__init_begin, __init_end] is the init section that may be freed after init
43  * 	// __init_begin and __init_end should be page aligned, so that we can
44  *	// free the whole .init memory
45  * [_stext, _etext] is the text section
46  * [_sdata, _edata] is the data section
47  *
48  * Some of the included output section have their own set of constants.
49  * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
50  *               [__nosave_begin, __nosave_end] for the nosave data
51  */
52 
53 #ifndef LOAD_OFFSET
54 #define LOAD_OFFSET 0
55 #endif
56 
57 #include <linux/export.h>
58 
59 /* Align . to a 8 byte boundary equals to maximum function alignment. */
60 #define ALIGN_FUNCTION()  . = ALIGN(8)
61 
62 /*
63  * Align to a 32 byte boundary equal to the
64  * alignment gcc 4.5 uses for a struct
65  */
66 #define STRUCT_ALIGNMENT 32
67 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
68 
69 /* The actual configuration determine if the init/exit sections
70  * are handled as text/data or they can be discarded (which
71  * often happens at runtime)
72  */
73 #ifdef CONFIG_HOTPLUG_CPU
74 #define CPU_KEEP(sec)    *(.cpu##sec)
75 #define CPU_DISCARD(sec)
76 #else
77 #define CPU_KEEP(sec)
78 #define CPU_DISCARD(sec) *(.cpu##sec)
79 #endif
80 
81 #if defined(CONFIG_MEMORY_HOTPLUG)
82 #define MEM_KEEP(sec)    *(.mem##sec)
83 #define MEM_DISCARD(sec)
84 #else
85 #define MEM_KEEP(sec)
86 #define MEM_DISCARD(sec) *(.mem##sec)
87 #endif
88 
89 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
90 #define MCOUNT_REC()	. = ALIGN(8);				\
91 			VMLINUX_SYMBOL(__start_mcount_loc) = .; \
92 			*(__mcount_loc)				\
93 			VMLINUX_SYMBOL(__stop_mcount_loc) = .;
94 #else
95 #define MCOUNT_REC()
96 #endif
97 
98 #ifdef CONFIG_TRACE_BRANCH_PROFILING
99 #define LIKELY_PROFILE()	VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
100 				*(_ftrace_annotated_branch)			      \
101 				VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
102 #else
103 #define LIKELY_PROFILE()
104 #endif
105 
106 #ifdef CONFIG_PROFILE_ALL_BRANCHES
107 #define BRANCH_PROFILE()	VMLINUX_SYMBOL(__start_branch_profile) = .;   \
108 				*(_ftrace_branch)			      \
109 				VMLINUX_SYMBOL(__stop_branch_profile) = .;
110 #else
111 #define BRANCH_PROFILE()
112 #endif
113 
114 #ifdef CONFIG_KPROBES
115 #define KPROBE_BLACKLIST()	. = ALIGN(8);				      \
116 				VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
117 				*(_kprobe_blacklist)			      \
118 				VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
119 #else
120 #define KPROBE_BLACKLIST()
121 #endif
122 
123 #ifdef CONFIG_EVENT_TRACING
124 #define FTRACE_EVENTS()	. = ALIGN(8);					\
125 			VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
126 			*(_ftrace_events)				\
127 			VMLINUX_SYMBOL(__stop_ftrace_events) = .;	\
128 			VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .;	\
129 			*(_ftrace_enum_map)				\
130 			VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
131 #else
132 #define FTRACE_EVENTS()
133 #endif
134 
135 #ifdef CONFIG_TRACING
136 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .;      \
137 			 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
138 			 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
139 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .;	\
140 			 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
141 			 VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
142 #else
143 #define TRACE_PRINTKS()
144 #define TRACEPOINT_STR()
145 #endif
146 
147 #ifdef CONFIG_FTRACE_SYSCALLS
148 #define TRACE_SYSCALLS() . = ALIGN(8);					\
149 			 VMLINUX_SYMBOL(__start_syscalls_metadata) = .;	\
150 			 *(__syscalls_metadata)				\
151 			 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
152 #else
153 #define TRACE_SYSCALLS()
154 #endif
155 
156 #ifdef CONFIG_SERIAL_EARLYCON
157 #define EARLYCON_TABLE() STRUCT_ALIGN();			\
158 			 VMLINUX_SYMBOL(__earlycon_table) = .;	\
159 			 *(__earlycon_table)			\
160 			 VMLINUX_SYMBOL(__earlycon_table_end) = .;
161 #else
162 #define EARLYCON_TABLE()
163 #endif
164 
165 #define ___OF_TABLE(cfg, name)	_OF_TABLE_##cfg(name)
166 #define __OF_TABLE(cfg, name)	___OF_TABLE(cfg, name)
167 #define OF_TABLE(cfg, name)	__OF_TABLE(config_enabled(cfg), name)
168 #define _OF_TABLE_0(name)
169 #define _OF_TABLE_1(name)						\
170 	. = ALIGN(8);							\
171 	VMLINUX_SYMBOL(__##name##_of_table) = .;			\
172 	*(__##name##_of_table)						\
173 	*(__##name##_of_table_end)
174 
175 #define CLKSRC_OF_TABLES()	OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
176 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
177 #define CLK_OF_TABLES()		OF_TABLE(CONFIG_COMMON_CLK, clk)
178 #define IOMMU_OF_TABLES()	OF_TABLE(CONFIG_OF_IOMMU, iommu)
179 #define RESERVEDMEM_OF_TABLES()	OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
180 #define CPU_METHOD_OF_TABLES()	OF_TABLE(CONFIG_SMP, cpu_method)
181 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
182 
183 #ifdef CONFIG_ACPI
184 #define ACPI_PROBE_TABLE(name)						\
185 	. = ALIGN(8);							\
186 	VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .;		\
187 	*(__##name##_acpi_probe_table)					\
188 	VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .;
189 #else
190 #define ACPI_PROBE_TABLE(name)
191 #endif
192 
193 #define KERNEL_DTB()							\
194 	STRUCT_ALIGN();							\
195 	VMLINUX_SYMBOL(__dtb_start) = .;				\
196 	*(.dtb.init.rodata)						\
197 	VMLINUX_SYMBOL(__dtb_end) = .;
198 
199 /* .data section */
200 #define DATA_DATA							\
201 	*(.data)							\
202 	*(.ref.data)							\
203 	*(.data..shared_aligned) /* percpu related */			\
204 	MEM_KEEP(init.data)						\
205 	MEM_KEEP(exit.data)						\
206 	*(.data.unlikely)						\
207 	STRUCT_ALIGN();							\
208 	*(__tracepoints)						\
209 	/* implement dynamic printk debug */				\
210 	. = ALIGN(8);                                                   \
211 	VMLINUX_SYMBOL(__start___jump_table) = .;                       \
212 	*(__jump_table)                                                 \
213 	VMLINUX_SYMBOL(__stop___jump_table) = .;                        \
214 	. = ALIGN(8);							\
215 	VMLINUX_SYMBOL(__start___verbose) = .;                          \
216 	*(__verbose)                                                    \
217 	VMLINUX_SYMBOL(__stop___verbose) = .;				\
218 	LIKELY_PROFILE()		       				\
219 	BRANCH_PROFILE()						\
220 	TRACE_PRINTKS()							\
221 	TRACEPOINT_STR()
222 
223 /*
224  * Data section helpers
225  */
226 #define NOSAVE_DATA							\
227 	. = ALIGN(PAGE_SIZE);						\
228 	VMLINUX_SYMBOL(__nosave_begin) = .;				\
229 	*(.data..nosave)						\
230 	. = ALIGN(PAGE_SIZE);						\
231 	VMLINUX_SYMBOL(__nosave_end) = .;
232 
233 #define PAGE_ALIGNED_DATA(page_align)					\
234 	. = ALIGN(page_align);						\
235 	*(.data..page_aligned)
236 
237 #define READ_MOSTLY_DATA(align)						\
238 	. = ALIGN(align);						\
239 	*(.data..read_mostly)						\
240 	. = ALIGN(align);
241 
242 #define CACHELINE_ALIGNED_DATA(align)					\
243 	. = ALIGN(align);						\
244 	*(.data..cacheline_aligned)
245 
246 #define INIT_TASK_DATA(align)						\
247 	. = ALIGN(align);						\
248 	*(.data..init_task)
249 
250 /*
251  * Read only Data
252  */
253 #define RO_DATA_SECTION(align)						\
254 	. = ALIGN((align));						\
255 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
256 		VMLINUX_SYMBOL(__start_rodata) = .;			\
257 		*(.rodata) *(.rodata.*)					\
258 		*(.data..ro_after_init)	/* Read only after init */	\
259 		*(__vermagic)		/* Kernel version magic */	\
260 		. = ALIGN(8);						\
261 		VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;		\
262 		*(__tracepoints_ptrs)	/* Tracepoints: pointer array */\
263 		VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .;		\
264 		*(__tracepoints_strings)/* Tracepoints: strings */	\
265 	}								\
266 									\
267 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
268 		*(.rodata1)						\
269 	}								\
270 									\
271 	BUG_TABLE							\
272 									\
273 	/* PCI quirks */						\
274 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
275 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
276 		*(.pci_fixup_early)					\
277 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
278 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
279 		*(.pci_fixup_header)					\
280 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
281 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
282 		*(.pci_fixup_final)					\
283 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
284 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
285 		*(.pci_fixup_enable)					\
286 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
287 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
288 		*(.pci_fixup_resume)					\
289 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
290 		VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;	\
291 		*(.pci_fixup_resume_early)				\
292 		VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;	\
293 		VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;		\
294 		*(.pci_fixup_suspend)					\
295 		VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;		\
296 		VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .;	\
297 		*(.pci_fixup_suspend_late)				\
298 		VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .;	\
299 	}								\
300 									\
301 	/* Built-in firmware blobs */					\
302 	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
303 		VMLINUX_SYMBOL(__start_builtin_fw) = .;			\
304 		*(.builtin_fw)						\
305 		VMLINUX_SYMBOL(__end_builtin_fw) = .;			\
306 	}								\
307 									\
308 	TRACEDATA							\
309 									\
310 	/* Kernel symbol table: Normal symbols */			\
311 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
312 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
313 		*(SORT(___ksymtab+*))					\
314 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
315 	}								\
316 									\
317 	/* Kernel symbol table: GPL-only symbols */			\
318 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
319 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
320 		*(SORT(___ksymtab_gpl+*))				\
321 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
322 	}								\
323 									\
324 	/* Kernel symbol table: Normal unused symbols */		\
325 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
326 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
327 		*(SORT(___ksymtab_unused+*))				\
328 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
329 	}								\
330 									\
331 	/* Kernel symbol table: GPL-only unused symbols */		\
332 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
333 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
334 		*(SORT(___ksymtab_unused_gpl+*))			\
335 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
336 	}								\
337 									\
338 	/* Kernel symbol table: GPL-future-only symbols */		\
339 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
340 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
341 		*(SORT(___ksymtab_gpl_future+*))			\
342 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
343 	}								\
344 									\
345 	/* Kernel symbol table: Normal symbols */			\
346 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
347 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
348 		*(SORT(___kcrctab+*))					\
349 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
350 	}								\
351 									\
352 	/* Kernel symbol table: GPL-only symbols */			\
353 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
354 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
355 		*(SORT(___kcrctab_gpl+*))				\
356 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
357 	}								\
358 									\
359 	/* Kernel symbol table: Normal unused symbols */		\
360 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
361 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
362 		*(SORT(___kcrctab_unused+*))				\
363 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
364 	}								\
365 									\
366 	/* Kernel symbol table: GPL-only unused symbols */		\
367 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
368 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
369 		*(SORT(___kcrctab_unused_gpl+*))			\
370 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
371 	}								\
372 									\
373 	/* Kernel symbol table: GPL-future-only symbols */		\
374 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
375 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
376 		*(SORT(___kcrctab_gpl_future+*))			\
377 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
378 	}								\
379 									\
380 	/* Kernel symbol table: strings */				\
381         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
382 		*(__ksymtab_strings)					\
383 	}								\
384 									\
385 	/* __*init sections */						\
386 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
387 		*(.ref.rodata)						\
388 		MEM_KEEP(init.rodata)					\
389 		MEM_KEEP(exit.rodata)					\
390 	}								\
391 									\
392 	/* Built-in module parameters. */				\
393 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
394 		VMLINUX_SYMBOL(__start___param) = .;			\
395 		*(__param)						\
396 		VMLINUX_SYMBOL(__stop___param) = .;			\
397 	}								\
398 									\
399 	/* Built-in module versions. */					\
400 	__modver : AT(ADDR(__modver) - LOAD_OFFSET) {			\
401 		VMLINUX_SYMBOL(__start___modver) = .;			\
402 		*(__modver)						\
403 		VMLINUX_SYMBOL(__stop___modver) = .;			\
404 		. = ALIGN((align));					\
405 		VMLINUX_SYMBOL(__end_rodata) = .;			\
406 	}								\
407 	. = ALIGN((align));
408 
409 /* RODATA & RO_DATA provided for backward compatibility.
410  * All archs are supposed to use RO_DATA() */
411 #define RODATA          RO_DATA_SECTION(4096)
412 #define RO_DATA(align)  RO_DATA_SECTION(align)
413 
414 #define SECURITY_INIT							\
415 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
416 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
417 		*(.security_initcall.init) 				\
418 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
419 	}
420 
421 /* .text section. Map to function alignment to avoid address changes
422  * during second ld run in second ld pass when generating System.map */
423 #define TEXT_TEXT							\
424 		ALIGN_FUNCTION();					\
425 		*(.text.hot .text .text.fixup .text.unlikely)		\
426 		*(.ref.text)						\
427 	MEM_KEEP(init.text)						\
428 	MEM_KEEP(exit.text)						\
429 
430 
431 /* sched.text is aling to function alignment to secure we have same
432  * address even at second ld pass when generating System.map */
433 #define SCHED_TEXT							\
434 		ALIGN_FUNCTION();					\
435 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
436 		*(.sched.text)						\
437 		VMLINUX_SYMBOL(__sched_text_end) = .;
438 
439 /* spinlock.text is aling to function alignment to secure we have same
440  * address even at second ld pass when generating System.map */
441 #define LOCK_TEXT							\
442 		ALIGN_FUNCTION();					\
443 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
444 		*(.spinlock.text)					\
445 		VMLINUX_SYMBOL(__lock_text_end) = .;
446 
447 #define KPROBES_TEXT							\
448 		ALIGN_FUNCTION();					\
449 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
450 		*(.kprobes.text)					\
451 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
452 
453 #define ENTRY_TEXT							\
454 		ALIGN_FUNCTION();					\
455 		VMLINUX_SYMBOL(__entry_text_start) = .;			\
456 		*(.entry.text)						\
457 		VMLINUX_SYMBOL(__entry_text_end) = .;
458 
459 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
460 #define IRQENTRY_TEXT							\
461 		ALIGN_FUNCTION();					\
462 		VMLINUX_SYMBOL(__irqentry_text_start) = .;		\
463 		*(.irqentry.text)					\
464 		VMLINUX_SYMBOL(__irqentry_text_end) = .;
465 #else
466 #define IRQENTRY_TEXT
467 #endif
468 
469 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
470 #define SOFTIRQENTRY_TEXT						\
471 		ALIGN_FUNCTION();					\
472 		VMLINUX_SYMBOL(__softirqentry_text_start) = .;		\
473 		*(.softirqentry.text)					\
474 		VMLINUX_SYMBOL(__softirqentry_text_end) = .;
475 #else
476 #define SOFTIRQENTRY_TEXT
477 #endif
478 
479 /* Section used for early init (in .S files) */
480 #define HEAD_TEXT  *(.head.text)
481 
482 #define HEAD_TEXT_SECTION							\
483 	.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {		\
484 		HEAD_TEXT						\
485 	}
486 
487 /*
488  * Exception table
489  */
490 #define EXCEPTION_TABLE(align)						\
491 	. = ALIGN(align);						\
492 	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {		\
493 		VMLINUX_SYMBOL(__start___ex_table) = .;			\
494 		*(__ex_table)						\
495 		VMLINUX_SYMBOL(__stop___ex_table) = .;			\
496 	}
497 
498 /*
499  * Init task
500  */
501 #define INIT_TASK_DATA_SECTION(align)					\
502 	. = ALIGN(align);						\
503 	.data..init_task :  AT(ADDR(.data..init_task) - LOAD_OFFSET) {	\
504 		INIT_TASK_DATA(align)					\
505 	}
506 
507 #ifdef CONFIG_CONSTRUCTORS
508 #define KERNEL_CTORS()	. = ALIGN(8);			   \
509 			VMLINUX_SYMBOL(__ctors_start) = .; \
510 			*(.ctors)			   \
511 			*(SORT(.init_array.*))		   \
512 			*(.init_array)			   \
513 			VMLINUX_SYMBOL(__ctors_end) = .;
514 #else
515 #define KERNEL_CTORS()
516 #endif
517 
518 /* init and exit section handling */
519 #define INIT_DATA							\
520 	*(.init.data)							\
521 	MEM_DISCARD(init.data)						\
522 	KERNEL_CTORS()							\
523 	MCOUNT_REC()							\
524 	*(.init.rodata)							\
525 	FTRACE_EVENTS()							\
526 	TRACE_SYSCALLS()						\
527 	KPROBE_BLACKLIST()						\
528 	MEM_DISCARD(init.rodata)					\
529 	CLK_OF_TABLES()							\
530 	RESERVEDMEM_OF_TABLES()						\
531 	CLKSRC_OF_TABLES()						\
532 	IOMMU_OF_TABLES()						\
533 	CPU_METHOD_OF_TABLES()						\
534 	CPUIDLE_METHOD_OF_TABLES()					\
535 	KERNEL_DTB()							\
536 	IRQCHIP_OF_MATCH_TABLE()					\
537 	ACPI_PROBE_TABLE(irqchip)					\
538 	ACPI_PROBE_TABLE(clksrc)					\
539 	EARLYCON_TABLE()
540 
541 #define INIT_TEXT							\
542 	*(.init.text)							\
543 	MEM_DISCARD(init.text)
544 
545 #define EXIT_DATA							\
546 	*(.exit.data)							\
547 	MEM_DISCARD(exit.data)						\
548 	MEM_DISCARD(exit.rodata)
549 
550 #define EXIT_TEXT							\
551 	*(.exit.text)							\
552 	MEM_DISCARD(exit.text)
553 
554 #define EXIT_CALL							\
555 	*(.exitcall.exit)
556 
557 /*
558  * bss (Block Started by Symbol) - uninitialized data
559  * zeroed during startup
560  */
561 #define SBSS(sbss_align)						\
562 	. = ALIGN(sbss_align);						\
563 	.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {				\
564 		*(.sbss)						\
565 		*(.scommon)						\
566 	}
567 
568 /*
569  * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
570  * sections to the front of bss.
571  */
572 #ifndef BSS_FIRST_SECTIONS
573 #define BSS_FIRST_SECTIONS
574 #endif
575 
576 #define BSS(bss_align)							\
577 	. = ALIGN(bss_align);						\
578 	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {				\
579 		BSS_FIRST_SECTIONS					\
580 		*(.bss..page_aligned)					\
581 		*(.dynbss)						\
582 		*(.bss)							\
583 		*(COMMON)						\
584 	}
585 
586 /*
587  * DWARF debug sections.
588  * Symbols in the DWARF debugging sections are relative to
589  * the beginning of the section so we begin them at 0.
590  */
591 #define DWARF_DEBUG							\
592 		/* DWARF 1 */						\
593 		.debug          0 : { *(.debug) }			\
594 		.line           0 : { *(.line) }			\
595 		/* GNU DWARF 1 extensions */				\
596 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
597 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
598 		/* DWARF 1.1 and DWARF 2 */				\
599 		.debug_aranges  0 : { *(.debug_aranges) }		\
600 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
601 		/* DWARF 2 */						\
602 		.debug_info     0 : { *(.debug_info			\
603 				.gnu.linkonce.wi.*) }			\
604 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
605 		.debug_line     0 : { *(.debug_line) }			\
606 		.debug_frame    0 : { *(.debug_frame) }			\
607 		.debug_str      0 : { *(.debug_str) }			\
608 		.debug_loc      0 : { *(.debug_loc) }			\
609 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
610 		/* SGI/MIPS DWARF 2 extensions */			\
611 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
612 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
613 		.debug_typenames 0 : { *(.debug_typenames) }		\
614 		.debug_varnames  0 : { *(.debug_varnames) }		\
615 
616 		/* Stabs debugging sections.  */
617 #define STABS_DEBUG							\
618 		.stab 0 : { *(.stab) }					\
619 		.stabstr 0 : { *(.stabstr) }				\
620 		.stab.excl 0 : { *(.stab.excl) }			\
621 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
622 		.stab.index 0 : { *(.stab.index) }			\
623 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
624 		.comment 0 : { *(.comment) }
625 
626 #ifdef CONFIG_GENERIC_BUG
627 #define BUG_TABLE							\
628 	. = ALIGN(8);							\
629 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
630 		VMLINUX_SYMBOL(__start___bug_table) = .;		\
631 		*(__bug_table)						\
632 		VMLINUX_SYMBOL(__stop___bug_table) = .;			\
633 	}
634 #else
635 #define BUG_TABLE
636 #endif
637 
638 #ifdef CONFIG_PM_TRACE
639 #define TRACEDATA							\
640 	. = ALIGN(4);							\
641 	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
642 		VMLINUX_SYMBOL(__tracedata_start) = .;			\
643 		*(.tracedata)						\
644 		VMLINUX_SYMBOL(__tracedata_end) = .;			\
645 	}
646 #else
647 #define TRACEDATA
648 #endif
649 
650 #define NOTES								\
651 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
652 		VMLINUX_SYMBOL(__start_notes) = .;			\
653 		*(.note.*)						\
654 		VMLINUX_SYMBOL(__stop_notes) = .;			\
655 	}
656 
657 #define INIT_SETUP(initsetup_align)					\
658 		. = ALIGN(initsetup_align);				\
659 		VMLINUX_SYMBOL(__setup_start) = .;			\
660 		*(.init.setup)						\
661 		VMLINUX_SYMBOL(__setup_end) = .;
662 
663 #define INIT_CALLS_LEVEL(level)						\
664 		VMLINUX_SYMBOL(__initcall##level##_start) = .;		\
665 		*(.initcall##level##.init)				\
666 		*(.initcall##level##s.init)				\
667 
668 #define INIT_CALLS							\
669 		VMLINUX_SYMBOL(__initcall_start) = .;			\
670 		*(.initcallearly.init)					\
671 		INIT_CALLS_LEVEL(0)					\
672 		INIT_CALLS_LEVEL(1)					\
673 		INIT_CALLS_LEVEL(2)					\
674 		INIT_CALLS_LEVEL(3)					\
675 		INIT_CALLS_LEVEL(4)					\
676 		INIT_CALLS_LEVEL(5)					\
677 		INIT_CALLS_LEVEL(rootfs)				\
678 		INIT_CALLS_LEVEL(6)					\
679 		INIT_CALLS_LEVEL(7)					\
680 		VMLINUX_SYMBOL(__initcall_end) = .;
681 
682 #define CON_INITCALL							\
683 		VMLINUX_SYMBOL(__con_initcall_start) = .;		\
684 		*(.con_initcall.init)					\
685 		VMLINUX_SYMBOL(__con_initcall_end) = .;
686 
687 #define SECURITY_INITCALL						\
688 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
689 		*(.security_initcall.init)				\
690 		VMLINUX_SYMBOL(__security_initcall_end) = .;
691 
692 #ifdef CONFIG_BLK_DEV_INITRD
693 #define INIT_RAM_FS							\
694 	. = ALIGN(4);							\
695 	VMLINUX_SYMBOL(__initramfs_start) = .;				\
696 	*(.init.ramfs)							\
697 	. = ALIGN(8);							\
698 	*(.init.ramfs.info)
699 #else
700 #define INIT_RAM_FS
701 #endif
702 
703 /*
704  * Default discarded sections.
705  *
706  * Some archs want to discard exit text/data at runtime rather than
707  * link time due to cross-section references such as alt instructions,
708  * bug table, eh_frame, etc.  DISCARDS must be the last of output
709  * section definitions so that such archs put those in earlier section
710  * definitions.
711  */
712 #define DISCARDS							\
713 	/DISCARD/ : {							\
714 	EXIT_TEXT							\
715 	EXIT_DATA							\
716 	EXIT_CALL							\
717 	*(.discard)							\
718 	*(.discard.*)							\
719 	}
720 
721 /**
722  * PERCPU_INPUT - the percpu input sections
723  * @cacheline: cacheline size
724  *
725  * The core percpu section names and core symbols which do not rely
726  * directly upon load addresses.
727  *
728  * @cacheline is used to align subsections to avoid false cacheline
729  * sharing between subsections for different purposes.
730  */
731 #define PERCPU_INPUT(cacheline)						\
732 	VMLINUX_SYMBOL(__per_cpu_start) = .;				\
733 	*(.data..percpu..first)						\
734 	. = ALIGN(PAGE_SIZE);						\
735 	*(.data..percpu..page_aligned)					\
736 	. = ALIGN(cacheline);						\
737 	*(.data..percpu..read_mostly)					\
738 	. = ALIGN(cacheline);						\
739 	*(.data..percpu)						\
740 	*(.data..percpu..shared_aligned)				\
741 	VMLINUX_SYMBOL(__per_cpu_end) = .;
742 
743 /**
744  * PERCPU_VADDR - define output section for percpu area
745  * @cacheline: cacheline size
746  * @vaddr: explicit base address (optional)
747  * @phdr: destination PHDR (optional)
748  *
749  * Macro which expands to output section for percpu area.
750  *
751  * @cacheline is used to align subsections to avoid false cacheline
752  * sharing between subsections for different purposes.
753  *
754  * If @vaddr is not blank, it specifies explicit base address and all
755  * percpu symbols will be offset from the given address.  If blank,
756  * @vaddr always equals @laddr + LOAD_OFFSET.
757  *
758  * @phdr defines the output PHDR to use if not blank.  Be warned that
759  * output PHDR is sticky.  If @phdr is specified, the next output
760  * section in the linker script will go there too.  @phdr should have
761  * a leading colon.
762  *
763  * Note that this macros defines __per_cpu_load as an absolute symbol.
764  * If there is no need to put the percpu section at a predetermined
765  * address, use PERCPU_SECTION.
766  */
767 #define PERCPU_VADDR(cacheline, vaddr, phdr)				\
768 	VMLINUX_SYMBOL(__per_cpu_load) = .;				\
769 	.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)		\
770 				- LOAD_OFFSET) {			\
771 		PERCPU_INPUT(cacheline)					\
772 	} phdr								\
773 	. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
774 
775 /**
776  * PERCPU_SECTION - define output section for percpu area, simple version
777  * @cacheline: cacheline size
778  *
779  * Align to PAGE_SIZE and outputs output section for percpu area.  This
780  * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
781  * __per_cpu_start will be identical.
782  *
783  * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
784  * except that __per_cpu_load is defined as a relative symbol against
785  * .data..percpu which is required for relocatable x86_32 configuration.
786  */
787 #define PERCPU_SECTION(cacheline)					\
788 	. = ALIGN(PAGE_SIZE);						\
789 	.data..percpu	: AT(ADDR(.data..percpu) - LOAD_OFFSET) {	\
790 		VMLINUX_SYMBOL(__per_cpu_load) = .;			\
791 		PERCPU_INPUT(cacheline)					\
792 	}
793 
794 
795 /*
796  * Definition of the high level *_SECTION macros
797  * They will fit only a subset of the architectures
798  */
799 
800 
801 /*
802  * Writeable data.
803  * All sections are combined in a single .data section.
804  * The sections following CONSTRUCTORS are arranged so their
805  * typical alignment matches.
806  * A cacheline is typical/always less than a PAGE_SIZE so
807  * the sections that has this restriction (or similar)
808  * is located before the ones requiring PAGE_SIZE alignment.
809  * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
810  * matches the requirement of PAGE_ALIGNED_DATA.
811  *
812  * use 0 as page_align if page_aligned data is not used */
813 #define RW_DATA_SECTION(cacheline, pagealigned, inittask)		\
814 	. = ALIGN(PAGE_SIZE);						\
815 	.data : AT(ADDR(.data) - LOAD_OFFSET) {				\
816 		INIT_TASK_DATA(inittask)				\
817 		NOSAVE_DATA						\
818 		PAGE_ALIGNED_DATA(pagealigned)				\
819 		CACHELINE_ALIGNED_DATA(cacheline)			\
820 		READ_MOSTLY_DATA(cacheline)				\
821 		DATA_DATA						\
822 		CONSTRUCTORS						\
823 	}
824 
825 #define INIT_TEXT_SECTION(inittext_align)				\
826 	. = ALIGN(inittext_align);					\
827 	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {		\
828 		VMLINUX_SYMBOL(_sinittext) = .;				\
829 		INIT_TEXT						\
830 		VMLINUX_SYMBOL(_einittext) = .;				\
831 	}
832 
833 #define INIT_DATA_SECTION(initsetup_align)				\
834 	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {		\
835 		INIT_DATA						\
836 		INIT_SETUP(initsetup_align)				\
837 		INIT_CALLS						\
838 		CON_INITCALL						\
839 		SECURITY_INITCALL					\
840 		INIT_RAM_FS						\
841 	}
842 
843 #define BSS_SECTION(sbss_align, bss_align, stop_align)			\
844 	. = ALIGN(sbss_align);						\
845 	VMLINUX_SYMBOL(__bss_start) = .;				\
846 	SBSS(sbss_align)						\
847 	BSS(bss_align)							\
848 	. = ALIGN(stop_align);						\
849 	VMLINUX_SYMBOL(__bss_stop) = .;
850