1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * BSS_SECTION(0, 0, 0) 34 * _end = .; 35 * 36 * STABS_DEBUG 37 * DWARF_DEBUG 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * [_stext, _etext] is the text section 44 * [_sdata, _edata] is the data section 45 * 46 * Some of the included output section have their own set of constants. 47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 48 * [__nosave_begin, __nosave_end] for the nosave data 49 */ 50 51 #ifndef LOAD_OFFSET 52 #define LOAD_OFFSET 0 53 #endif 54 55 #include <linux/export.h> 56 57 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 58 #define ALIGN_FUNCTION() . = ALIGN(8) 59 60 /* 61 * Align to a 32 byte boundary equal to the 62 * alignment gcc 4.5 uses for a struct 63 */ 64 #define STRUCT_ALIGNMENT 32 65 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 66 67 /* The actual configuration determine if the init/exit sections 68 * are handled as text/data or they can be discarded (which 69 * often happens at runtime) 70 */ 71 #ifdef CONFIG_HOTPLUG_CPU 72 #define CPU_KEEP(sec) *(.cpu##sec) 73 #define CPU_DISCARD(sec) 74 #else 75 #define CPU_KEEP(sec) 76 #define CPU_DISCARD(sec) *(.cpu##sec) 77 #endif 78 79 #if defined(CONFIG_MEMORY_HOTPLUG) 80 #define MEM_KEEP(sec) *(.mem##sec) 81 #define MEM_DISCARD(sec) 82 #else 83 #define MEM_KEEP(sec) 84 #define MEM_DISCARD(sec) *(.mem##sec) 85 #endif 86 87 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 88 #define MCOUNT_REC() . = ALIGN(8); \ 89 VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 90 *(__mcount_loc) \ 91 VMLINUX_SYMBOL(__stop_mcount_loc) = .; 92 #else 93 #define MCOUNT_REC() 94 #endif 95 96 #ifdef CONFIG_TRACE_BRANCH_PROFILING 97 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 98 *(_ftrace_annotated_branch) \ 99 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 100 #else 101 #define LIKELY_PROFILE() 102 #endif 103 104 #ifdef CONFIG_PROFILE_ALL_BRANCHES 105 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 106 *(_ftrace_branch) \ 107 VMLINUX_SYMBOL(__stop_branch_profile) = .; 108 #else 109 #define BRANCH_PROFILE() 110 #endif 111 112 #ifdef CONFIG_EVENT_TRACING 113 #define FTRACE_EVENTS() . = ALIGN(8); \ 114 VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 115 *(_ftrace_events) \ 116 VMLINUX_SYMBOL(__stop_ftrace_events) = .; 117 #else 118 #define FTRACE_EVENTS() 119 #endif 120 121 #ifdef CONFIG_TRACING 122 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 123 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ 124 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 125 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ 126 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \ 127 VMLINUX_SYMBOL(__stop___tracepoint_str) = .; 128 #else 129 #define TRACE_PRINTKS() 130 #define TRACEPOINT_STR() 131 #endif 132 133 #ifdef CONFIG_FTRACE_SYSCALLS 134 #define TRACE_SYSCALLS() . = ALIGN(8); \ 135 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 136 *(__syscalls_metadata) \ 137 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 138 #else 139 #define TRACE_SYSCALLS() 140 #endif 141 142 #ifdef CONFIG_CLKSRC_OF 143 #define CLKSRC_OF_TABLES() . = ALIGN(8); \ 144 VMLINUX_SYMBOL(__clksrc_of_table) = .; \ 145 *(__clksrc_of_table) \ 146 *(__clksrc_of_table_end) 147 #else 148 #define CLKSRC_OF_TABLES() 149 #endif 150 151 #ifdef CONFIG_IRQCHIP 152 #define IRQCHIP_OF_MATCH_TABLE() \ 153 . = ALIGN(8); \ 154 VMLINUX_SYMBOL(__irqchip_begin) = .; \ 155 *(__irqchip_of_table) \ 156 *(__irqchip_of_end) 157 #else 158 #define IRQCHIP_OF_MATCH_TABLE() 159 #endif 160 161 #ifdef CONFIG_COMMON_CLK 162 #define CLK_OF_TABLES() . = ALIGN(8); \ 163 VMLINUX_SYMBOL(__clk_of_table) = .; \ 164 *(__clk_of_table) \ 165 *(__clk_of_table_end) 166 #else 167 #define CLK_OF_TABLES() 168 #endif 169 170 #ifdef CONFIG_OF_RESERVED_MEM 171 #define RESERVEDMEM_OF_TABLES() \ 172 . = ALIGN(8); \ 173 VMLINUX_SYMBOL(__reservedmem_of_table) = .; \ 174 *(__reservedmem_of_table) \ 175 *(__reservedmem_of_table_end) 176 #else 177 #define RESERVEDMEM_OF_TABLES() 178 #endif 179 180 #ifdef CONFIG_SMP 181 #define CPU_METHOD_OF_TABLES() . = ALIGN(8); \ 182 VMLINUX_SYMBOL(__cpu_method_of_table_begin) = .; \ 183 *(__cpu_method_of_table) \ 184 VMLINUX_SYMBOL(__cpu_method_of_table_end) = .; 185 #else 186 #define CPU_METHOD_OF_TABLES() 187 #endif 188 189 #define KERNEL_DTB() \ 190 STRUCT_ALIGN(); \ 191 VMLINUX_SYMBOL(__dtb_start) = .; \ 192 *(.dtb.init.rodata) \ 193 VMLINUX_SYMBOL(__dtb_end) = .; 194 195 /* .data section */ 196 #define DATA_DATA \ 197 *(.data) \ 198 *(.ref.data) \ 199 *(.data..shared_aligned) /* percpu related */ \ 200 MEM_KEEP(init.data) \ 201 MEM_KEEP(exit.data) \ 202 *(.data.unlikely) \ 203 STRUCT_ALIGN(); \ 204 *(__tracepoints) \ 205 /* implement dynamic printk debug */ \ 206 . = ALIGN(8); \ 207 VMLINUX_SYMBOL(__start___jump_table) = .; \ 208 *(__jump_table) \ 209 VMLINUX_SYMBOL(__stop___jump_table) = .; \ 210 . = ALIGN(8); \ 211 VMLINUX_SYMBOL(__start___verbose) = .; \ 212 *(__verbose) \ 213 VMLINUX_SYMBOL(__stop___verbose) = .; \ 214 LIKELY_PROFILE() \ 215 BRANCH_PROFILE() \ 216 TRACE_PRINTKS() \ 217 TRACEPOINT_STR() 218 219 /* 220 * Data section helpers 221 */ 222 #define NOSAVE_DATA \ 223 . = ALIGN(PAGE_SIZE); \ 224 VMLINUX_SYMBOL(__nosave_begin) = .; \ 225 *(.data..nosave) \ 226 . = ALIGN(PAGE_SIZE); \ 227 VMLINUX_SYMBOL(__nosave_end) = .; 228 229 #define PAGE_ALIGNED_DATA(page_align) \ 230 . = ALIGN(page_align); \ 231 *(.data..page_aligned) 232 233 #define READ_MOSTLY_DATA(align) \ 234 . = ALIGN(align); \ 235 *(.data..read_mostly) \ 236 . = ALIGN(align); 237 238 #define CACHELINE_ALIGNED_DATA(align) \ 239 . = ALIGN(align); \ 240 *(.data..cacheline_aligned) 241 242 #define INIT_TASK_DATA(align) \ 243 . = ALIGN(align); \ 244 *(.data..init_task) 245 246 /* 247 * Read only Data 248 */ 249 #define RO_DATA_SECTION(align) \ 250 . = ALIGN((align)); \ 251 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 252 VMLINUX_SYMBOL(__start_rodata) = .; \ 253 *(.rodata) *(.rodata.*) \ 254 *(__vermagic) /* Kernel version magic */ \ 255 . = ALIGN(8); \ 256 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ 257 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\ 258 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ 259 *(__tracepoints_strings)/* Tracepoints: strings */ \ 260 } \ 261 \ 262 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 263 *(.rodata1) \ 264 } \ 265 \ 266 BUG_TABLE \ 267 \ 268 /* PCI quirks */ \ 269 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 270 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 271 *(.pci_fixup_early) \ 272 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 273 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 274 *(.pci_fixup_header) \ 275 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 276 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 277 *(.pci_fixup_final) \ 278 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 279 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 280 *(.pci_fixup_enable) \ 281 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 282 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 283 *(.pci_fixup_resume) \ 284 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 285 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 286 *(.pci_fixup_resume_early) \ 287 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 288 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 289 *(.pci_fixup_suspend) \ 290 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 291 } \ 292 \ 293 /* Built-in firmware blobs */ \ 294 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 295 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 296 *(.builtin_fw) \ 297 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 298 } \ 299 \ 300 TRACEDATA \ 301 \ 302 /* Kernel symbol table: Normal symbols */ \ 303 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 304 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 305 *(SORT(___ksymtab+*)) \ 306 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 307 } \ 308 \ 309 /* Kernel symbol table: GPL-only symbols */ \ 310 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 311 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 312 *(SORT(___ksymtab_gpl+*)) \ 313 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 314 } \ 315 \ 316 /* Kernel symbol table: Normal unused symbols */ \ 317 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 318 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 319 *(SORT(___ksymtab_unused+*)) \ 320 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 321 } \ 322 \ 323 /* Kernel symbol table: GPL-only unused symbols */ \ 324 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 325 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 326 *(SORT(___ksymtab_unused_gpl+*)) \ 327 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 328 } \ 329 \ 330 /* Kernel symbol table: GPL-future-only symbols */ \ 331 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 332 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 333 *(SORT(___ksymtab_gpl_future+*)) \ 334 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 335 } \ 336 \ 337 /* Kernel symbol table: Normal symbols */ \ 338 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 339 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 340 *(SORT(___kcrctab+*)) \ 341 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 342 } \ 343 \ 344 /* Kernel symbol table: GPL-only symbols */ \ 345 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 346 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 347 *(SORT(___kcrctab_gpl+*)) \ 348 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 349 } \ 350 \ 351 /* Kernel symbol table: Normal unused symbols */ \ 352 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 353 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 354 *(SORT(___kcrctab_unused+*)) \ 355 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 356 } \ 357 \ 358 /* Kernel symbol table: GPL-only unused symbols */ \ 359 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 360 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 361 *(SORT(___kcrctab_unused_gpl+*)) \ 362 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 363 } \ 364 \ 365 /* Kernel symbol table: GPL-future-only symbols */ \ 366 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 367 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 368 *(SORT(___kcrctab_gpl_future+*)) \ 369 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 370 } \ 371 \ 372 /* Kernel symbol table: strings */ \ 373 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 374 *(__ksymtab_strings) \ 375 } \ 376 \ 377 /* __*init sections */ \ 378 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 379 *(.ref.rodata) \ 380 MEM_KEEP(init.rodata) \ 381 MEM_KEEP(exit.rodata) \ 382 } \ 383 \ 384 /* Built-in module parameters. */ \ 385 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 386 VMLINUX_SYMBOL(__start___param) = .; \ 387 *(__param) \ 388 VMLINUX_SYMBOL(__stop___param) = .; \ 389 } \ 390 \ 391 /* Built-in module versions. */ \ 392 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 393 VMLINUX_SYMBOL(__start___modver) = .; \ 394 *(__modver) \ 395 VMLINUX_SYMBOL(__stop___modver) = .; \ 396 . = ALIGN((align)); \ 397 VMLINUX_SYMBOL(__end_rodata) = .; \ 398 } \ 399 . = ALIGN((align)); 400 401 /* RODATA & RO_DATA provided for backward compatibility. 402 * All archs are supposed to use RO_DATA() */ 403 #define RODATA RO_DATA_SECTION(4096) 404 #define RO_DATA(align) RO_DATA_SECTION(align) 405 406 #define SECURITY_INIT \ 407 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 408 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 409 *(.security_initcall.init) \ 410 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 411 } 412 413 /* .text section. Map to function alignment to avoid address changes 414 * during second ld run in second ld pass when generating System.map */ 415 #define TEXT_TEXT \ 416 ALIGN_FUNCTION(); \ 417 *(.text.hot) \ 418 *(.text) \ 419 *(.ref.text) \ 420 MEM_KEEP(init.text) \ 421 MEM_KEEP(exit.text) \ 422 *(.text.unlikely) 423 424 425 /* sched.text is aling to function alignment to secure we have same 426 * address even at second ld pass when generating System.map */ 427 #define SCHED_TEXT \ 428 ALIGN_FUNCTION(); \ 429 VMLINUX_SYMBOL(__sched_text_start) = .; \ 430 *(.sched.text) \ 431 VMLINUX_SYMBOL(__sched_text_end) = .; 432 433 /* spinlock.text is aling to function alignment to secure we have same 434 * address even at second ld pass when generating System.map */ 435 #define LOCK_TEXT \ 436 ALIGN_FUNCTION(); \ 437 VMLINUX_SYMBOL(__lock_text_start) = .; \ 438 *(.spinlock.text) \ 439 VMLINUX_SYMBOL(__lock_text_end) = .; 440 441 #define KPROBES_TEXT \ 442 ALIGN_FUNCTION(); \ 443 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 444 *(.kprobes.text) \ 445 VMLINUX_SYMBOL(__kprobes_text_end) = .; 446 447 #define ENTRY_TEXT \ 448 ALIGN_FUNCTION(); \ 449 VMLINUX_SYMBOL(__entry_text_start) = .; \ 450 *(.entry.text) \ 451 VMLINUX_SYMBOL(__entry_text_end) = .; 452 453 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 454 #define IRQENTRY_TEXT \ 455 ALIGN_FUNCTION(); \ 456 VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 457 *(.irqentry.text) \ 458 VMLINUX_SYMBOL(__irqentry_text_end) = .; 459 #else 460 #define IRQENTRY_TEXT 461 #endif 462 463 /* Section used for early init (in .S files) */ 464 #define HEAD_TEXT *(.head.text) 465 466 #define HEAD_TEXT_SECTION \ 467 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 468 HEAD_TEXT \ 469 } 470 471 /* 472 * Exception table 473 */ 474 #define EXCEPTION_TABLE(align) \ 475 . = ALIGN(align); \ 476 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 477 VMLINUX_SYMBOL(__start___ex_table) = .; \ 478 *(__ex_table) \ 479 VMLINUX_SYMBOL(__stop___ex_table) = .; \ 480 } 481 482 /* 483 * Init task 484 */ 485 #define INIT_TASK_DATA_SECTION(align) \ 486 . = ALIGN(align); \ 487 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 488 INIT_TASK_DATA(align) \ 489 } 490 491 #ifdef CONFIG_CONSTRUCTORS 492 #define KERNEL_CTORS() . = ALIGN(8); \ 493 VMLINUX_SYMBOL(__ctors_start) = .; \ 494 *(.ctors) \ 495 *(.init_array) \ 496 VMLINUX_SYMBOL(__ctors_end) = .; 497 #else 498 #define KERNEL_CTORS() 499 #endif 500 501 /* init and exit section handling */ 502 #define INIT_DATA \ 503 *(.init.data) \ 504 MEM_DISCARD(init.data) \ 505 KERNEL_CTORS() \ 506 MCOUNT_REC() \ 507 *(.init.rodata) \ 508 FTRACE_EVENTS() \ 509 TRACE_SYSCALLS() \ 510 MEM_DISCARD(init.rodata) \ 511 CLK_OF_TABLES() \ 512 RESERVEDMEM_OF_TABLES() \ 513 CLKSRC_OF_TABLES() \ 514 CPU_METHOD_OF_TABLES() \ 515 KERNEL_DTB() \ 516 IRQCHIP_OF_MATCH_TABLE() 517 518 #define INIT_TEXT \ 519 *(.init.text) \ 520 MEM_DISCARD(init.text) 521 522 #define EXIT_DATA \ 523 *(.exit.data) \ 524 MEM_DISCARD(exit.data) \ 525 MEM_DISCARD(exit.rodata) 526 527 #define EXIT_TEXT \ 528 *(.exit.text) \ 529 MEM_DISCARD(exit.text) 530 531 #define EXIT_CALL \ 532 *(.exitcall.exit) 533 534 /* 535 * bss (Block Started by Symbol) - uninitialized data 536 * zeroed during startup 537 */ 538 #define SBSS(sbss_align) \ 539 . = ALIGN(sbss_align); \ 540 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 541 *(.sbss) \ 542 *(.scommon) \ 543 } 544 545 /* 546 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 547 * sections to the front of bss. 548 */ 549 #ifndef BSS_FIRST_SECTIONS 550 #define BSS_FIRST_SECTIONS 551 #endif 552 553 #define BSS(bss_align) \ 554 . = ALIGN(bss_align); \ 555 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 556 BSS_FIRST_SECTIONS \ 557 *(.bss..page_aligned) \ 558 *(.dynbss) \ 559 *(.bss) \ 560 *(COMMON) \ 561 } 562 563 /* 564 * DWARF debug sections. 565 * Symbols in the DWARF debugging sections are relative to 566 * the beginning of the section so we begin them at 0. 567 */ 568 #define DWARF_DEBUG \ 569 /* DWARF 1 */ \ 570 .debug 0 : { *(.debug) } \ 571 .line 0 : { *(.line) } \ 572 /* GNU DWARF 1 extensions */ \ 573 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 574 .debug_sfnames 0 : { *(.debug_sfnames) } \ 575 /* DWARF 1.1 and DWARF 2 */ \ 576 .debug_aranges 0 : { *(.debug_aranges) } \ 577 .debug_pubnames 0 : { *(.debug_pubnames) } \ 578 /* DWARF 2 */ \ 579 .debug_info 0 : { *(.debug_info \ 580 .gnu.linkonce.wi.*) } \ 581 .debug_abbrev 0 : { *(.debug_abbrev) } \ 582 .debug_line 0 : { *(.debug_line) } \ 583 .debug_frame 0 : { *(.debug_frame) } \ 584 .debug_str 0 : { *(.debug_str) } \ 585 .debug_loc 0 : { *(.debug_loc) } \ 586 .debug_macinfo 0 : { *(.debug_macinfo) } \ 587 /* SGI/MIPS DWARF 2 extensions */ \ 588 .debug_weaknames 0 : { *(.debug_weaknames) } \ 589 .debug_funcnames 0 : { *(.debug_funcnames) } \ 590 .debug_typenames 0 : { *(.debug_typenames) } \ 591 .debug_varnames 0 : { *(.debug_varnames) } \ 592 593 /* Stabs debugging sections. */ 594 #define STABS_DEBUG \ 595 .stab 0 : { *(.stab) } \ 596 .stabstr 0 : { *(.stabstr) } \ 597 .stab.excl 0 : { *(.stab.excl) } \ 598 .stab.exclstr 0 : { *(.stab.exclstr) } \ 599 .stab.index 0 : { *(.stab.index) } \ 600 .stab.indexstr 0 : { *(.stab.indexstr) } \ 601 .comment 0 : { *(.comment) } 602 603 #ifdef CONFIG_GENERIC_BUG 604 #define BUG_TABLE \ 605 . = ALIGN(8); \ 606 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 607 VMLINUX_SYMBOL(__start___bug_table) = .; \ 608 *(__bug_table) \ 609 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 610 } 611 #else 612 #define BUG_TABLE 613 #endif 614 615 #ifdef CONFIG_PM_TRACE 616 #define TRACEDATA \ 617 . = ALIGN(4); \ 618 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 619 VMLINUX_SYMBOL(__tracedata_start) = .; \ 620 *(.tracedata) \ 621 VMLINUX_SYMBOL(__tracedata_end) = .; \ 622 } 623 #else 624 #define TRACEDATA 625 #endif 626 627 #define NOTES \ 628 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 629 VMLINUX_SYMBOL(__start_notes) = .; \ 630 *(.note.*) \ 631 VMLINUX_SYMBOL(__stop_notes) = .; \ 632 } 633 634 #define INIT_SETUP(initsetup_align) \ 635 . = ALIGN(initsetup_align); \ 636 VMLINUX_SYMBOL(__setup_start) = .; \ 637 *(.init.setup) \ 638 VMLINUX_SYMBOL(__setup_end) = .; 639 640 #define INIT_CALLS_LEVEL(level) \ 641 VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 642 *(.initcall##level##.init) \ 643 *(.initcall##level##s.init) \ 644 645 #define INIT_CALLS \ 646 VMLINUX_SYMBOL(__initcall_start) = .; \ 647 *(.initcallearly.init) \ 648 INIT_CALLS_LEVEL(0) \ 649 INIT_CALLS_LEVEL(1) \ 650 INIT_CALLS_LEVEL(2) \ 651 INIT_CALLS_LEVEL(3) \ 652 INIT_CALLS_LEVEL(4) \ 653 INIT_CALLS_LEVEL(5) \ 654 INIT_CALLS_LEVEL(rootfs) \ 655 INIT_CALLS_LEVEL(6) \ 656 INIT_CALLS_LEVEL(7) \ 657 VMLINUX_SYMBOL(__initcall_end) = .; 658 659 #define CON_INITCALL \ 660 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 661 *(.con_initcall.init) \ 662 VMLINUX_SYMBOL(__con_initcall_end) = .; 663 664 #define SECURITY_INITCALL \ 665 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 666 *(.security_initcall.init) \ 667 VMLINUX_SYMBOL(__security_initcall_end) = .; 668 669 #ifdef CONFIG_BLK_DEV_INITRD 670 #define INIT_RAM_FS \ 671 . = ALIGN(4); \ 672 VMLINUX_SYMBOL(__initramfs_start) = .; \ 673 *(.init.ramfs) \ 674 . = ALIGN(8); \ 675 *(.init.ramfs.info) 676 #else 677 #define INIT_RAM_FS 678 #endif 679 680 /* 681 * Default discarded sections. 682 * 683 * Some archs want to discard exit text/data at runtime rather than 684 * link time due to cross-section references such as alt instructions, 685 * bug table, eh_frame, etc. DISCARDS must be the last of output 686 * section definitions so that such archs put those in earlier section 687 * definitions. 688 */ 689 #define DISCARDS \ 690 /DISCARD/ : { \ 691 EXIT_TEXT \ 692 EXIT_DATA \ 693 EXIT_CALL \ 694 *(.discard) \ 695 *(.discard.*) \ 696 } 697 698 /** 699 * PERCPU_INPUT - the percpu input sections 700 * @cacheline: cacheline size 701 * 702 * The core percpu section names and core symbols which do not rely 703 * directly upon load addresses. 704 * 705 * @cacheline is used to align subsections to avoid false cacheline 706 * sharing between subsections for different purposes. 707 */ 708 #define PERCPU_INPUT(cacheline) \ 709 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 710 *(.data..percpu..first) \ 711 . = ALIGN(PAGE_SIZE); \ 712 *(.data..percpu..page_aligned) \ 713 . = ALIGN(cacheline); \ 714 *(.data..percpu..readmostly) \ 715 . = ALIGN(cacheline); \ 716 *(.data..percpu) \ 717 *(.data..percpu..shared_aligned) \ 718 VMLINUX_SYMBOL(__per_cpu_end) = .; 719 720 /** 721 * PERCPU_VADDR - define output section for percpu area 722 * @cacheline: cacheline size 723 * @vaddr: explicit base address (optional) 724 * @phdr: destination PHDR (optional) 725 * 726 * Macro which expands to output section for percpu area. 727 * 728 * @cacheline is used to align subsections to avoid false cacheline 729 * sharing between subsections for different purposes. 730 * 731 * If @vaddr is not blank, it specifies explicit base address and all 732 * percpu symbols will be offset from the given address. If blank, 733 * @vaddr always equals @laddr + LOAD_OFFSET. 734 * 735 * @phdr defines the output PHDR to use if not blank. Be warned that 736 * output PHDR is sticky. If @phdr is specified, the next output 737 * section in the linker script will go there too. @phdr should have 738 * a leading colon. 739 * 740 * Note that this macros defines __per_cpu_load as an absolute symbol. 741 * If there is no need to put the percpu section at a predetermined 742 * address, use PERCPU_SECTION. 743 */ 744 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 745 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 746 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 747 - LOAD_OFFSET) { \ 748 PERCPU_INPUT(cacheline) \ 749 } phdr \ 750 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 751 752 /** 753 * PERCPU_SECTION - define output section for percpu area, simple version 754 * @cacheline: cacheline size 755 * 756 * Align to PAGE_SIZE and outputs output section for percpu area. This 757 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 758 * __per_cpu_start will be identical. 759 * 760 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 761 * except that __per_cpu_load is defined as a relative symbol against 762 * .data..percpu which is required for relocatable x86_32 configuration. 763 */ 764 #define PERCPU_SECTION(cacheline) \ 765 . = ALIGN(PAGE_SIZE); \ 766 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 767 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 768 PERCPU_INPUT(cacheline) \ 769 } 770 771 772 /* 773 * Definition of the high level *_SECTION macros 774 * They will fit only a subset of the architectures 775 */ 776 777 778 /* 779 * Writeable data. 780 * All sections are combined in a single .data section. 781 * The sections following CONSTRUCTORS are arranged so their 782 * typical alignment matches. 783 * A cacheline is typical/always less than a PAGE_SIZE so 784 * the sections that has this restriction (or similar) 785 * is located before the ones requiring PAGE_SIZE alignment. 786 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 787 * matches the requirement of PAGE_ALIGNED_DATA. 788 * 789 * use 0 as page_align if page_aligned data is not used */ 790 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 791 . = ALIGN(PAGE_SIZE); \ 792 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 793 INIT_TASK_DATA(inittask) \ 794 NOSAVE_DATA \ 795 PAGE_ALIGNED_DATA(pagealigned) \ 796 CACHELINE_ALIGNED_DATA(cacheline) \ 797 READ_MOSTLY_DATA(cacheline) \ 798 DATA_DATA \ 799 CONSTRUCTORS \ 800 } 801 802 #define INIT_TEXT_SECTION(inittext_align) \ 803 . = ALIGN(inittext_align); \ 804 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 805 VMLINUX_SYMBOL(_sinittext) = .; \ 806 INIT_TEXT \ 807 VMLINUX_SYMBOL(_einittext) = .; \ 808 } 809 810 #define INIT_DATA_SECTION(initsetup_align) \ 811 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 812 INIT_DATA \ 813 INIT_SETUP(initsetup_align) \ 814 INIT_CALLS \ 815 CON_INITCALL \ 816 SECURITY_INITCALL \ 817 INIT_RAM_FS \ 818 } 819 820 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 821 . = ALIGN(sbss_align); \ 822 VMLINUX_SYMBOL(__bss_start) = .; \ 823 SBSS(sbss_align) \ 824 BSS(bss_align) \ 825 . = ALIGN(stop_align); \ 826 VMLINUX_SYMBOL(__bss_stop) = .; 827