1 #include <linux/section-names.h> 2 3 #ifndef LOAD_OFFSET 4 #define LOAD_OFFSET 0 5 #endif 6 7 #ifndef VMLINUX_SYMBOL 8 #define VMLINUX_SYMBOL(_sym_) _sym_ 9 #endif 10 11 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 12 #define ALIGN_FUNCTION() . = ALIGN(8) 13 14 /* The actual configuration determine if the init/exit sections 15 * are handled as text/data or they can be discarded (which 16 * often happens at runtime) 17 */ 18 #ifdef CONFIG_HOTPLUG 19 #define DEV_KEEP(sec) *(.dev##sec) 20 #define DEV_DISCARD(sec) 21 #else 22 #define DEV_KEEP(sec) 23 #define DEV_DISCARD(sec) *(.dev##sec) 24 #endif 25 26 #ifdef CONFIG_HOTPLUG_CPU 27 #define CPU_KEEP(sec) *(.cpu##sec) 28 #define CPU_DISCARD(sec) 29 #else 30 #define CPU_KEEP(sec) 31 #define CPU_DISCARD(sec) *(.cpu##sec) 32 #endif 33 34 #if defined(CONFIG_MEMORY_HOTPLUG) 35 #define MEM_KEEP(sec) *(.mem##sec) 36 #define MEM_DISCARD(sec) 37 #else 38 #define MEM_KEEP(sec) 39 #define MEM_DISCARD(sec) *(.mem##sec) 40 #endif 41 42 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 43 #define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 44 *(__mcount_loc) \ 45 VMLINUX_SYMBOL(__stop_mcount_loc) = .; 46 #else 47 #define MCOUNT_REC() 48 #endif 49 50 #ifdef CONFIG_TRACE_BRANCH_PROFILING 51 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 52 *(_ftrace_annotated_branch) \ 53 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 54 #else 55 #define LIKELY_PROFILE() 56 #endif 57 58 #ifdef CONFIG_PROFILE_ALL_BRANCHES 59 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 60 *(_ftrace_branch) \ 61 VMLINUX_SYMBOL(__stop_branch_profile) = .; 62 #else 63 #define BRANCH_PROFILE() 64 #endif 65 66 #ifdef CONFIG_EVENT_TRACER 67 #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 68 *(_ftrace_events) \ 69 VMLINUX_SYMBOL(__stop_ftrace_events) = .; 70 #else 71 #define FTRACE_EVENTS() 72 #endif 73 74 #ifdef CONFIG_TRACING 75 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 76 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ 77 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 78 #else 79 #define TRACE_PRINTKS() 80 #endif 81 82 #ifdef CONFIG_FTRACE_SYSCALLS 83 #define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 84 *(__syscalls_metadata) \ 85 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 86 #else 87 #define TRACE_SYSCALLS() 88 #endif 89 90 /* .data section */ 91 #define DATA_DATA \ 92 *(.data) \ 93 *(.ref.data) \ 94 DEV_KEEP(init.data) \ 95 DEV_KEEP(exit.data) \ 96 CPU_KEEP(init.data) \ 97 CPU_KEEP(exit.data) \ 98 MEM_KEEP(init.data) \ 99 MEM_KEEP(exit.data) \ 100 . = ALIGN(8); \ 101 VMLINUX_SYMBOL(__start___markers) = .; \ 102 *(__markers) \ 103 VMLINUX_SYMBOL(__stop___markers) = .; \ 104 . = ALIGN(32); \ 105 VMLINUX_SYMBOL(__start___tracepoints) = .; \ 106 *(__tracepoints) \ 107 VMLINUX_SYMBOL(__stop___tracepoints) = .; \ 108 /* implement dynamic printk debug */ \ 109 . = ALIGN(8); \ 110 VMLINUX_SYMBOL(__start___verbose) = .; \ 111 *(__verbose) \ 112 VMLINUX_SYMBOL(__stop___verbose) = .; \ 113 LIKELY_PROFILE() \ 114 BRANCH_PROFILE() \ 115 TRACE_PRINTKS() \ 116 FTRACE_EVENTS() \ 117 TRACE_SYSCALLS() 118 119 #define RO_DATA(align) \ 120 . = ALIGN((align)); \ 121 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 122 VMLINUX_SYMBOL(__start_rodata) = .; \ 123 *(.rodata) *(.rodata.*) \ 124 *(__vermagic) /* Kernel version magic */ \ 125 *(__markers_strings) /* Markers: strings */ \ 126 *(__tracepoints_strings)/* Tracepoints: strings */ \ 127 } \ 128 \ 129 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 130 *(.rodata1) \ 131 } \ 132 \ 133 BUG_TABLE \ 134 \ 135 /* PCI quirks */ \ 136 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 137 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 138 *(.pci_fixup_early) \ 139 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 140 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 141 *(.pci_fixup_header) \ 142 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 143 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 144 *(.pci_fixup_final) \ 145 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 146 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 147 *(.pci_fixup_enable) \ 148 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 149 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 150 *(.pci_fixup_resume) \ 151 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 152 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 153 *(.pci_fixup_resume_early) \ 154 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 155 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 156 *(.pci_fixup_suspend) \ 157 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 158 } \ 159 \ 160 /* Built-in firmware blobs */ \ 161 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 162 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 163 *(.builtin_fw) \ 164 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 165 } \ 166 \ 167 /* RapidIO route ops */ \ 168 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ 169 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ 170 *(.rio_route_ops) \ 171 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ 172 } \ 173 \ 174 TRACEDATA \ 175 \ 176 /* Kernel symbol table: Normal symbols */ \ 177 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 178 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 179 *(__ksymtab) \ 180 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 181 } \ 182 \ 183 /* Kernel symbol table: GPL-only symbols */ \ 184 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 185 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 186 *(__ksymtab_gpl) \ 187 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 188 } \ 189 \ 190 /* Kernel symbol table: Normal unused symbols */ \ 191 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 192 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 193 *(__ksymtab_unused) \ 194 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 195 } \ 196 \ 197 /* Kernel symbol table: GPL-only unused symbols */ \ 198 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 199 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 200 *(__ksymtab_unused_gpl) \ 201 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 202 } \ 203 \ 204 /* Kernel symbol table: GPL-future-only symbols */ \ 205 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 206 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 207 *(__ksymtab_gpl_future) \ 208 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 209 } \ 210 \ 211 /* Kernel symbol table: Normal symbols */ \ 212 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 213 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 214 *(__kcrctab) \ 215 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 216 } \ 217 \ 218 /* Kernel symbol table: GPL-only symbols */ \ 219 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 220 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 221 *(__kcrctab_gpl) \ 222 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 223 } \ 224 \ 225 /* Kernel symbol table: Normal unused symbols */ \ 226 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 227 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 228 *(__kcrctab_unused) \ 229 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 230 } \ 231 \ 232 /* Kernel symbol table: GPL-only unused symbols */ \ 233 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 234 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 235 *(__kcrctab_unused_gpl) \ 236 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 237 } \ 238 \ 239 /* Kernel symbol table: GPL-future-only symbols */ \ 240 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 241 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 242 *(__kcrctab_gpl_future) \ 243 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 244 } \ 245 \ 246 /* Kernel symbol table: strings */ \ 247 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 248 *(__ksymtab_strings) \ 249 } \ 250 \ 251 /* __*init sections */ \ 252 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 253 *(.ref.rodata) \ 254 MCOUNT_REC() \ 255 DEV_KEEP(init.rodata) \ 256 DEV_KEEP(exit.rodata) \ 257 CPU_KEEP(init.rodata) \ 258 CPU_KEEP(exit.rodata) \ 259 MEM_KEEP(init.rodata) \ 260 MEM_KEEP(exit.rodata) \ 261 } \ 262 \ 263 /* Built-in module parameters. */ \ 264 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 265 VMLINUX_SYMBOL(__start___param) = .; \ 266 *(__param) \ 267 VMLINUX_SYMBOL(__stop___param) = .; \ 268 . = ALIGN((align)); \ 269 VMLINUX_SYMBOL(__end_rodata) = .; \ 270 } \ 271 . = ALIGN((align)); 272 273 /* RODATA provided for backward compatibility. 274 * All archs are supposed to use RO_DATA() */ 275 #define RODATA RO_DATA(4096) 276 277 #define SECURITY_INIT \ 278 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 279 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 280 *(.security_initcall.init) \ 281 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 282 } 283 284 /* .text section. Map to function alignment to avoid address changes 285 * during second ld run in second ld pass when generating System.map */ 286 #define TEXT_TEXT \ 287 ALIGN_FUNCTION(); \ 288 *(.text.hot) \ 289 *(.text) \ 290 *(.ref.text) \ 291 DEV_KEEP(init.text) \ 292 DEV_KEEP(exit.text) \ 293 CPU_KEEP(init.text) \ 294 CPU_KEEP(exit.text) \ 295 MEM_KEEP(init.text) \ 296 MEM_KEEP(exit.text) \ 297 *(.text.unlikely) 298 299 300 /* sched.text is aling to function alignment to secure we have same 301 * address even at second ld pass when generating System.map */ 302 #define SCHED_TEXT \ 303 ALIGN_FUNCTION(); \ 304 VMLINUX_SYMBOL(__sched_text_start) = .; \ 305 *(.sched.text) \ 306 VMLINUX_SYMBOL(__sched_text_end) = .; 307 308 /* spinlock.text is aling to function alignment to secure we have same 309 * address even at second ld pass when generating System.map */ 310 #define LOCK_TEXT \ 311 ALIGN_FUNCTION(); \ 312 VMLINUX_SYMBOL(__lock_text_start) = .; \ 313 *(.spinlock.text) \ 314 VMLINUX_SYMBOL(__lock_text_end) = .; 315 316 #define KPROBES_TEXT \ 317 ALIGN_FUNCTION(); \ 318 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 319 *(.kprobes.text) \ 320 VMLINUX_SYMBOL(__kprobes_text_end) = .; 321 322 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 323 #define IRQENTRY_TEXT \ 324 ALIGN_FUNCTION(); \ 325 VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 326 *(.irqentry.text) \ 327 VMLINUX_SYMBOL(__irqentry_text_end) = .; 328 #else 329 #define IRQENTRY_TEXT 330 #endif 331 332 /* Section used for early init (in .S files) */ 333 #define HEAD_TEXT *(HEAD_TEXT_SECTION) 334 335 /* init and exit section handling */ 336 #define INIT_DATA \ 337 *(.init.data) \ 338 DEV_DISCARD(init.data) \ 339 DEV_DISCARD(init.rodata) \ 340 CPU_DISCARD(init.data) \ 341 CPU_DISCARD(init.rodata) \ 342 MEM_DISCARD(init.data) \ 343 MEM_DISCARD(init.rodata) 344 345 #define INIT_TEXT \ 346 *(.init.text) \ 347 DEV_DISCARD(init.text) \ 348 CPU_DISCARD(init.text) \ 349 MEM_DISCARD(init.text) 350 351 #define EXIT_DATA \ 352 *(.exit.data) \ 353 DEV_DISCARD(exit.data) \ 354 DEV_DISCARD(exit.rodata) \ 355 CPU_DISCARD(exit.data) \ 356 CPU_DISCARD(exit.rodata) \ 357 MEM_DISCARD(exit.data) \ 358 MEM_DISCARD(exit.rodata) 359 360 #define EXIT_TEXT \ 361 *(.exit.text) \ 362 DEV_DISCARD(exit.text) \ 363 CPU_DISCARD(exit.text) \ 364 MEM_DISCARD(exit.text) 365 366 /* DWARF debug sections. 367 Symbols in the DWARF debugging sections are relative to 368 the beginning of the section so we begin them at 0. */ 369 #define DWARF_DEBUG \ 370 /* DWARF 1 */ \ 371 .debug 0 : { *(.debug) } \ 372 .line 0 : { *(.line) } \ 373 /* GNU DWARF 1 extensions */ \ 374 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 375 .debug_sfnames 0 : { *(.debug_sfnames) } \ 376 /* DWARF 1.1 and DWARF 2 */ \ 377 .debug_aranges 0 : { *(.debug_aranges) } \ 378 .debug_pubnames 0 : { *(.debug_pubnames) } \ 379 /* DWARF 2 */ \ 380 .debug_info 0 : { *(.debug_info \ 381 .gnu.linkonce.wi.*) } \ 382 .debug_abbrev 0 : { *(.debug_abbrev) } \ 383 .debug_line 0 : { *(.debug_line) } \ 384 .debug_frame 0 : { *(.debug_frame) } \ 385 .debug_str 0 : { *(.debug_str) } \ 386 .debug_loc 0 : { *(.debug_loc) } \ 387 .debug_macinfo 0 : { *(.debug_macinfo) } \ 388 /* SGI/MIPS DWARF 2 extensions */ \ 389 .debug_weaknames 0 : { *(.debug_weaknames) } \ 390 .debug_funcnames 0 : { *(.debug_funcnames) } \ 391 .debug_typenames 0 : { *(.debug_typenames) } \ 392 .debug_varnames 0 : { *(.debug_varnames) } \ 393 394 /* Stabs debugging sections. */ 395 #define STABS_DEBUG \ 396 .stab 0 : { *(.stab) } \ 397 .stabstr 0 : { *(.stabstr) } \ 398 .stab.excl 0 : { *(.stab.excl) } \ 399 .stab.exclstr 0 : { *(.stab.exclstr) } \ 400 .stab.index 0 : { *(.stab.index) } \ 401 .stab.indexstr 0 : { *(.stab.indexstr) } \ 402 .comment 0 : { *(.comment) } 403 404 #ifdef CONFIG_GENERIC_BUG 405 #define BUG_TABLE \ 406 . = ALIGN(8); \ 407 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 408 VMLINUX_SYMBOL(__start___bug_table) = .; \ 409 *(__bug_table) \ 410 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 411 } 412 #else 413 #define BUG_TABLE 414 #endif 415 416 #ifdef CONFIG_PM_TRACE 417 #define TRACEDATA \ 418 . = ALIGN(4); \ 419 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 420 VMLINUX_SYMBOL(__tracedata_start) = .; \ 421 *(.tracedata) \ 422 VMLINUX_SYMBOL(__tracedata_end) = .; \ 423 } 424 #else 425 #define TRACEDATA 426 #endif 427 428 #define NOTES \ 429 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 430 VMLINUX_SYMBOL(__start_notes) = .; \ 431 *(.note.*) \ 432 VMLINUX_SYMBOL(__stop_notes) = .; \ 433 } 434 435 #define INITCALLS \ 436 *(.initcallearly.init) \ 437 VMLINUX_SYMBOL(__early_initcall_end) = .; \ 438 *(.initcall0.init) \ 439 *(.initcall0s.init) \ 440 *(.initcall1.init) \ 441 *(.initcall1s.init) \ 442 *(.initcall2.init) \ 443 *(.initcall2s.init) \ 444 *(.initcall3.init) \ 445 *(.initcall3s.init) \ 446 *(.initcall4.init) \ 447 *(.initcall4s.init) \ 448 *(.initcall5.init) \ 449 *(.initcall5s.init) \ 450 *(.initcallrootfs.init) \ 451 *(.initcall6.init) \ 452 *(.initcall6s.init) \ 453 *(.initcall7.init) \ 454 *(.initcall7s.init) 455 456 /** 457 * PERCPU_VADDR - define output section for percpu area 458 * @vaddr: explicit base address (optional) 459 * @phdr: destination PHDR (optional) 460 * 461 * Macro which expands to output section for percpu area. If @vaddr 462 * is not blank, it specifies explicit base address and all percpu 463 * symbols will be offset from the given address. If blank, @vaddr 464 * always equals @laddr + LOAD_OFFSET. 465 * 466 * @phdr defines the output PHDR to use if not blank. Be warned that 467 * output PHDR is sticky. If @phdr is specified, the next output 468 * section in the linker script will go there too. @phdr should have 469 * a leading colon. 470 * 471 * Note that this macros defines __per_cpu_load as an absolute symbol. 472 * If there is no need to put the percpu section at a predetermined 473 * address, use PERCPU(). 474 */ 475 #define PERCPU_VADDR(vaddr, phdr) \ 476 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 477 .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 478 - LOAD_OFFSET) { \ 479 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 480 *(.data.percpu.first) \ 481 *(.data.percpu.page_aligned) \ 482 *(.data.percpu) \ 483 *(.data.percpu.shared_aligned) \ 484 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 485 } phdr \ 486 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); 487 488 /** 489 * PERCPU - define output section for percpu area, simple version 490 * @align: required alignment 491 * 492 * Align to @align and outputs output section for percpu area. This 493 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and 494 * __per_cpu_start will be identical. 495 * 496 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except 497 * that __per_cpu_load is defined as a relative symbol against 498 * .data.percpu which is required for relocatable x86_32 499 * configuration. 500 */ 501 #define PERCPU(align) \ 502 . = ALIGN(align); \ 503 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ 504 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 505 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 506 *(.data.percpu.first) \ 507 *(.data.percpu.page_aligned) \ 508 *(.data.percpu) \ 509 *(.data.percpu.shared_aligned) \ 510 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 511 } 512