1 #ifndef LOAD_OFFSET
2 #define LOAD_OFFSET 0
3 #endif
4 
5 #ifndef VMLINUX_SYMBOL
6 #define VMLINUX_SYMBOL(_sym_) _sym_
7 #endif
8 
9 /* Align . to a 8 byte boundary equals to maximum function alignment. */
10 #define ALIGN_FUNCTION()  . = ALIGN(8)
11 
12 /* The actual configuration determine if the init/exit sections
13  * are handled as text/data or they can be discarded (which
14  * often happens at runtime)
15  */
16 #ifdef CONFIG_HOTPLUG
17 #define DEV_KEEP(sec)    *(.dev##sec)
18 #define DEV_DISCARD(sec)
19 #else
20 #define DEV_KEEP(sec)
21 #define DEV_DISCARD(sec) *(.dev##sec)
22 #endif
23 
24 #ifdef CONFIG_HOTPLUG_CPU
25 #define CPU_KEEP(sec)    *(.cpu##sec)
26 #define CPU_DISCARD(sec)
27 #else
28 #define CPU_KEEP(sec)
29 #define CPU_DISCARD(sec) *(.cpu##sec)
30 #endif
31 
32 #if defined(CONFIG_MEMORY_HOTPLUG)
33 #define MEM_KEEP(sec)    *(.mem##sec)
34 #define MEM_DISCARD(sec)
35 #else
36 #define MEM_KEEP(sec)
37 #define MEM_DISCARD(sec) *(.mem##sec)
38 #endif
39 
40 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
41 #define MCOUNT_REC()	VMLINUX_SYMBOL(__start_mcount_loc) = .; \
42 			*(__mcount_loc)				\
43 			VMLINUX_SYMBOL(__stop_mcount_loc) = .;
44 #else
45 #define MCOUNT_REC()
46 #endif
47 
48 #ifdef CONFIG_TRACE_BRANCH_PROFILING
49 #define LIKELY_PROFILE()	VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
50 				*(_ftrace_annotated_branch)			      \
51 				VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
52 #else
53 #define LIKELY_PROFILE()
54 #endif
55 
56 #ifdef CONFIG_PROFILE_ALL_BRANCHES
57 #define BRANCH_PROFILE()	VMLINUX_SYMBOL(__start_branch_profile) = .;   \
58 				*(_ftrace_branch)			      \
59 				VMLINUX_SYMBOL(__stop_branch_profile) = .;
60 #else
61 #define BRANCH_PROFILE()
62 #endif
63 
64 #ifdef CONFIG_EVENT_TRACER
65 #define FTRACE_EVENTS()	VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
66 			*(_ftrace_events)				\
67 			VMLINUX_SYMBOL(__stop_ftrace_events) = .;
68 #else
69 #define FTRACE_EVENTS()
70 #endif
71 
72 #ifdef CONFIG_TRACING
73 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .;      \
74 			 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
75 			 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
76 #else
77 #define TRACE_PRINTKS()
78 #endif
79 
80 #ifdef CONFIG_FTRACE_SYSCALLS
81 #define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .;	\
82 			 *(__syscalls_metadata)				\
83 			 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
84 #else
85 #define TRACE_SYSCALLS()
86 #endif
87 
88 /* .data section */
89 #define DATA_DATA							\
90 	*(.data)							\
91 	*(.data.init.refok)						\
92 	*(.ref.data)							\
93 	DEV_KEEP(init.data)						\
94 	DEV_KEEP(exit.data)						\
95 	CPU_KEEP(init.data)						\
96 	CPU_KEEP(exit.data)						\
97 	MEM_KEEP(init.data)						\
98 	MEM_KEEP(exit.data)						\
99 	. = ALIGN(8);							\
100 	VMLINUX_SYMBOL(__start___markers) = .;				\
101 	*(__markers)							\
102 	VMLINUX_SYMBOL(__stop___markers) = .;				\
103 	. = ALIGN(32);							\
104 	VMLINUX_SYMBOL(__start___tracepoints) = .;			\
105 	*(__tracepoints)						\
106 	VMLINUX_SYMBOL(__stop___tracepoints) = .;			\
107 	/* implement dynamic printk debug */				\
108 	. = ALIGN(8);							\
109 	VMLINUX_SYMBOL(__start___verbose) = .;                          \
110 	*(__verbose)                                                    \
111 	VMLINUX_SYMBOL(__stop___verbose) = .;				\
112 	LIKELY_PROFILE()		       				\
113 	BRANCH_PROFILE()						\
114 	TRACE_PRINTKS()							\
115 	FTRACE_EVENTS()							\
116 	TRACE_SYSCALLS()
117 
118 #define RO_DATA(align)							\
119 	. = ALIGN((align));						\
120 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
121 		VMLINUX_SYMBOL(__start_rodata) = .;			\
122 		*(.rodata) *(.rodata.*)					\
123 		*(__vermagic)		/* Kernel version magic */	\
124 		*(__markers_strings)	/* Markers: strings */		\
125 		*(__tracepoints_strings)/* Tracepoints: strings */	\
126 	}								\
127 									\
128 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
129 		*(.rodata1)						\
130 	}								\
131 									\
132 	BUG_TABLE							\
133 									\
134 	/* PCI quirks */						\
135 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
136 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
137 		*(.pci_fixup_early)					\
138 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
139 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
140 		*(.pci_fixup_header)					\
141 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
142 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
143 		*(.pci_fixup_final)					\
144 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
145 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
146 		*(.pci_fixup_enable)					\
147 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
148 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
149 		*(.pci_fixup_resume)					\
150 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
151 		VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;	\
152 		*(.pci_fixup_resume_early)				\
153 		VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;	\
154 		VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;		\
155 		*(.pci_fixup_suspend)					\
156 		VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;		\
157 	}								\
158 									\
159 	/* Built-in firmware blobs */					\
160 	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
161 		VMLINUX_SYMBOL(__start_builtin_fw) = .;			\
162 		*(.builtin_fw)						\
163 		VMLINUX_SYMBOL(__end_builtin_fw) = .;			\
164 	}								\
165 									\
166 	/* RapidIO route ops */						\
167 	.rio_route        : AT(ADDR(.rio_route) - LOAD_OFFSET) {	\
168 		VMLINUX_SYMBOL(__start_rio_route_ops) = .;		\
169 		*(.rio_route_ops)					\
170 		VMLINUX_SYMBOL(__end_rio_route_ops) = .;		\
171 	}								\
172 									\
173 	TRACEDATA							\
174 									\
175 	/* Kernel symbol table: Normal symbols */			\
176 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
177 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
178 		*(__ksymtab)						\
179 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
180 	}								\
181 									\
182 	/* Kernel symbol table: GPL-only symbols */			\
183 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
184 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
185 		*(__ksymtab_gpl)					\
186 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
187 	}								\
188 									\
189 	/* Kernel symbol table: Normal unused symbols */		\
190 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
191 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
192 		*(__ksymtab_unused)					\
193 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
194 	}								\
195 									\
196 	/* Kernel symbol table: GPL-only unused symbols */		\
197 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
198 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
199 		*(__ksymtab_unused_gpl)					\
200 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
201 	}								\
202 									\
203 	/* Kernel symbol table: GPL-future-only symbols */		\
204 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
205 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
206 		*(__ksymtab_gpl_future)					\
207 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
208 	}								\
209 									\
210 	/* Kernel symbol table: Normal symbols */			\
211 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
212 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
213 		*(__kcrctab)						\
214 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
215 	}								\
216 									\
217 	/* Kernel symbol table: GPL-only symbols */			\
218 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
219 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
220 		*(__kcrctab_gpl)					\
221 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
222 	}								\
223 									\
224 	/* Kernel symbol table: Normal unused symbols */		\
225 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
226 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
227 		*(__kcrctab_unused)					\
228 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
229 	}								\
230 									\
231 	/* Kernel symbol table: GPL-only unused symbols */		\
232 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
233 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
234 		*(__kcrctab_unused_gpl)					\
235 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
236 	}								\
237 									\
238 	/* Kernel symbol table: GPL-future-only symbols */		\
239 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
240 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
241 		*(__kcrctab_gpl_future)					\
242 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
243 	}								\
244 									\
245 	/* Kernel symbol table: strings */				\
246         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
247 		*(__ksymtab_strings)					\
248 	}								\
249 									\
250 	/* __*init sections */						\
251 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
252 		*(.ref.rodata)						\
253 		MCOUNT_REC()						\
254 		DEV_KEEP(init.rodata)					\
255 		DEV_KEEP(exit.rodata)					\
256 		CPU_KEEP(init.rodata)					\
257 		CPU_KEEP(exit.rodata)					\
258 		MEM_KEEP(init.rodata)					\
259 		MEM_KEEP(exit.rodata)					\
260 	}								\
261 									\
262 	/* Built-in module parameters. */				\
263 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
264 		VMLINUX_SYMBOL(__start___param) = .;			\
265 		*(__param)						\
266 		VMLINUX_SYMBOL(__stop___param) = .;			\
267 		. = ALIGN((align));					\
268 		VMLINUX_SYMBOL(__end_rodata) = .;			\
269 	}								\
270 	. = ALIGN((align));
271 
272 /* RODATA provided for backward compatibility.
273  * All archs are supposed to use RO_DATA() */
274 #define RODATA RO_DATA(4096)
275 
276 #define SECURITY_INIT							\
277 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
278 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
279 		*(.security_initcall.init) 				\
280 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
281 	}
282 
283 /* .text section. Map to function alignment to avoid address changes
284  * during second ld run in second ld pass when generating System.map */
285 #define TEXT_TEXT							\
286 		ALIGN_FUNCTION();					\
287 		*(.text.hot)						\
288 		*(.text)						\
289 		*(.ref.text)						\
290 		*(.text.init.refok)					\
291 		*(.exit.text.refok)					\
292 	DEV_KEEP(init.text)						\
293 	DEV_KEEP(exit.text)						\
294 	CPU_KEEP(init.text)						\
295 	CPU_KEEP(exit.text)						\
296 	MEM_KEEP(init.text)						\
297 	MEM_KEEP(exit.text)						\
298 		*(.text.unlikely)
299 
300 
301 /* sched.text is aling to function alignment to secure we have same
302  * address even at second ld pass when generating System.map */
303 #define SCHED_TEXT							\
304 		ALIGN_FUNCTION();					\
305 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
306 		*(.sched.text)						\
307 		VMLINUX_SYMBOL(__sched_text_end) = .;
308 
309 /* spinlock.text is aling to function alignment to secure we have same
310  * address even at second ld pass when generating System.map */
311 #define LOCK_TEXT							\
312 		ALIGN_FUNCTION();					\
313 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
314 		*(.spinlock.text)					\
315 		VMLINUX_SYMBOL(__lock_text_end) = .;
316 
317 #define KPROBES_TEXT							\
318 		ALIGN_FUNCTION();					\
319 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
320 		*(.kprobes.text)					\
321 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
322 
323 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
324 #define IRQENTRY_TEXT							\
325 		ALIGN_FUNCTION();					\
326 		VMLINUX_SYMBOL(__irqentry_text_start) = .;		\
327 		*(.irqentry.text)					\
328 		VMLINUX_SYMBOL(__irqentry_text_end) = .;
329 #else
330 #define IRQENTRY_TEXT
331 #endif
332 
333 /* Section used for early init (in .S files) */
334 #define HEAD_TEXT  *(.head.text)
335 
336 /* init and exit section handling */
337 #define INIT_DATA							\
338 	*(.init.data)							\
339 	DEV_DISCARD(init.data)						\
340 	DEV_DISCARD(init.rodata)					\
341 	CPU_DISCARD(init.data)						\
342 	CPU_DISCARD(init.rodata)					\
343 	MEM_DISCARD(init.data)						\
344 	MEM_DISCARD(init.rodata)
345 
346 #define INIT_TEXT							\
347 	*(.init.text)							\
348 	DEV_DISCARD(init.text)						\
349 	CPU_DISCARD(init.text)						\
350 	MEM_DISCARD(init.text)
351 
352 #define EXIT_DATA							\
353 	*(.exit.data)							\
354 	DEV_DISCARD(exit.data)						\
355 	DEV_DISCARD(exit.rodata)					\
356 	CPU_DISCARD(exit.data)						\
357 	CPU_DISCARD(exit.rodata)					\
358 	MEM_DISCARD(exit.data)						\
359 	MEM_DISCARD(exit.rodata)
360 
361 #define EXIT_TEXT							\
362 	*(.exit.text)							\
363 	DEV_DISCARD(exit.text)						\
364 	CPU_DISCARD(exit.text)						\
365 	MEM_DISCARD(exit.text)
366 
367 		/* DWARF debug sections.
368 		Symbols in the DWARF debugging sections are relative to
369 		the beginning of the section so we begin them at 0.  */
370 #define DWARF_DEBUG							\
371 		/* DWARF 1 */						\
372 		.debug          0 : { *(.debug) }			\
373 		.line           0 : { *(.line) }			\
374 		/* GNU DWARF 1 extensions */				\
375 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
376 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
377 		/* DWARF 1.1 and DWARF 2 */				\
378 		.debug_aranges  0 : { *(.debug_aranges) }		\
379 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
380 		/* DWARF 2 */						\
381 		.debug_info     0 : { *(.debug_info			\
382 				.gnu.linkonce.wi.*) }			\
383 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
384 		.debug_line     0 : { *(.debug_line) }			\
385 		.debug_frame    0 : { *(.debug_frame) }			\
386 		.debug_str      0 : { *(.debug_str) }			\
387 		.debug_loc      0 : { *(.debug_loc) }			\
388 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
389 		/* SGI/MIPS DWARF 2 extensions */			\
390 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
391 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
392 		.debug_typenames 0 : { *(.debug_typenames) }		\
393 		.debug_varnames  0 : { *(.debug_varnames) }		\
394 
395 		/* Stabs debugging sections.  */
396 #define STABS_DEBUG							\
397 		.stab 0 : { *(.stab) }					\
398 		.stabstr 0 : { *(.stabstr) }				\
399 		.stab.excl 0 : { *(.stab.excl) }			\
400 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
401 		.stab.index 0 : { *(.stab.index) }			\
402 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
403 		.comment 0 : { *(.comment) }
404 
405 #ifdef CONFIG_GENERIC_BUG
406 #define BUG_TABLE							\
407 	. = ALIGN(8);							\
408 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
409 		VMLINUX_SYMBOL(__start___bug_table) = .;		\
410 		*(__bug_table)						\
411 		VMLINUX_SYMBOL(__stop___bug_table) = .;			\
412 	}
413 #else
414 #define BUG_TABLE
415 #endif
416 
417 #ifdef CONFIG_PM_TRACE
418 #define TRACEDATA							\
419 	. = ALIGN(4);							\
420 	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
421 		VMLINUX_SYMBOL(__tracedata_start) = .;			\
422 		*(.tracedata)						\
423 		VMLINUX_SYMBOL(__tracedata_end) = .;			\
424 	}
425 #else
426 #define TRACEDATA
427 #endif
428 
429 #define NOTES								\
430 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
431 		VMLINUX_SYMBOL(__start_notes) = .;			\
432 		*(.note.*)						\
433 		VMLINUX_SYMBOL(__stop_notes) = .;			\
434 	}
435 
436 #define INITCALLS							\
437 	*(.initcallearly.init)						\
438 	VMLINUX_SYMBOL(__early_initcall_end) = .;			\
439   	*(.initcall0.init)						\
440   	*(.initcall0s.init)						\
441   	*(.initcall1.init)						\
442   	*(.initcall1s.init)						\
443   	*(.initcall2.init)						\
444   	*(.initcall2s.init)						\
445   	*(.initcall3.init)						\
446   	*(.initcall3s.init)						\
447   	*(.initcall4.init)						\
448   	*(.initcall4s.init)						\
449   	*(.initcall5.init)						\
450   	*(.initcall5s.init)						\
451 	*(.initcallrootfs.init)						\
452   	*(.initcall6.init)						\
453   	*(.initcall6s.init)						\
454   	*(.initcall7.init)						\
455   	*(.initcall7s.init)
456 
457 /**
458  * PERCPU_VADDR - define output section for percpu area
459  * @vaddr: explicit base address (optional)
460  * @phdr: destination PHDR (optional)
461  *
462  * Macro which expands to output section for percpu area.  If @vaddr
463  * is not blank, it specifies explicit base address and all percpu
464  * symbols will be offset from the given address.  If blank, @vaddr
465  * always equals @laddr + LOAD_OFFSET.
466  *
467  * @phdr defines the output PHDR to use if not blank.  Be warned that
468  * output PHDR is sticky.  If @phdr is specified, the next output
469  * section in the linker script will go there too.  @phdr should have
470  * a leading colon.
471  *
472  * Note that this macros defines __per_cpu_load as an absolute symbol.
473  * If there is no need to put the percpu section at a predetermined
474  * address, use PERCPU().
475  */
476 #define PERCPU_VADDR(vaddr, phdr)					\
477 	VMLINUX_SYMBOL(__per_cpu_load) = .;				\
478 	.data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)		\
479 				- LOAD_OFFSET) {			\
480 		VMLINUX_SYMBOL(__per_cpu_start) = .;			\
481 		*(.data.percpu.first)					\
482 		*(.data.percpu.page_aligned)				\
483 		*(.data.percpu)						\
484 		*(.data.percpu.shared_aligned)				\
485 		VMLINUX_SYMBOL(__per_cpu_end) = .;			\
486 	} phdr								\
487 	. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
488 
489 /**
490  * PERCPU - define output section for percpu area, simple version
491  * @align: required alignment
492  *
493  * Align to @align and outputs output section for percpu area.  This
494  * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
495  * __per_cpu_start will be identical.
496  *
497  * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
498  * that __per_cpu_load is defined as a relative symbol against
499  * .data.percpu which is required for relocatable x86_32
500  * configuration.
501  */
502 #define PERCPU(align)							\
503 	. = ALIGN(align);						\
504 	.data.percpu	: AT(ADDR(.data.percpu) - LOAD_OFFSET) {	\
505 		VMLINUX_SYMBOL(__per_cpu_load) = .;			\
506 		VMLINUX_SYMBOL(__per_cpu_start) = .;			\
507 		*(.data.percpu.first)					\
508 		*(.data.percpu.page_aligned)				\
509 		*(.data.percpu)						\
510 		*(.data.percpu.shared_aligned)				\
511 		VMLINUX_SYMBOL(__per_cpu_end) = .;			\
512 	}
513