1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * BSS_SECTION(0, 0, 0) 34 * _end = .; 35 * 36 * STABS_DEBUG 37 * DWARF_DEBUG 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * [_stext, _etext] is the text section 44 * [_sdata, _edata] is the data section 45 * 46 * Some of the included output section have their own set of constants. 47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 48 * [__nosave_begin, __nosave_end] for the nosave data 49 */ 50 51 #ifndef LOAD_OFFSET 52 #define LOAD_OFFSET 0 53 #endif 54 55 #ifndef SYMBOL_PREFIX 56 #define VMLINUX_SYMBOL(sym) sym 57 #else 58 #define PASTE2(x,y) x##y 59 #define PASTE(x,y) PASTE2(x,y) 60 #define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym) 61 #endif 62 63 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 64 #define ALIGN_FUNCTION() . = ALIGN(8) 65 66 /* 67 * Align to a 32 byte boundary equal to the 68 * alignment gcc 4.5 uses for a struct 69 */ 70 #define STRUCT_ALIGNMENT 32 71 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 72 73 /* The actual configuration determine if the init/exit sections 74 * are handled as text/data or they can be discarded (which 75 * often happens at runtime) 76 */ 77 #ifdef CONFIG_HOTPLUG 78 #define DEV_KEEP(sec) *(.dev##sec) 79 #define DEV_DISCARD(sec) 80 #else 81 #define DEV_KEEP(sec) 82 #define DEV_DISCARD(sec) *(.dev##sec) 83 #endif 84 85 #ifdef CONFIG_HOTPLUG_CPU 86 #define CPU_KEEP(sec) *(.cpu##sec) 87 #define CPU_DISCARD(sec) 88 #else 89 #define CPU_KEEP(sec) 90 #define CPU_DISCARD(sec) *(.cpu##sec) 91 #endif 92 93 #if defined(CONFIG_MEMORY_HOTPLUG) 94 #define MEM_KEEP(sec) *(.mem##sec) 95 #define MEM_DISCARD(sec) 96 #else 97 #define MEM_KEEP(sec) 98 #define MEM_DISCARD(sec) *(.mem##sec) 99 #endif 100 101 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 102 #define MCOUNT_REC() . = ALIGN(8); \ 103 VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 104 *(__mcount_loc) \ 105 VMLINUX_SYMBOL(__stop_mcount_loc) = .; 106 #else 107 #define MCOUNT_REC() 108 #endif 109 110 #ifdef CONFIG_TRACE_BRANCH_PROFILING 111 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 112 *(_ftrace_annotated_branch) \ 113 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 114 #else 115 #define LIKELY_PROFILE() 116 #endif 117 118 #ifdef CONFIG_PROFILE_ALL_BRANCHES 119 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 120 *(_ftrace_branch) \ 121 VMLINUX_SYMBOL(__stop_branch_profile) = .; 122 #else 123 #define BRANCH_PROFILE() 124 #endif 125 126 #ifdef CONFIG_EVENT_TRACING 127 #define FTRACE_EVENTS() . = ALIGN(8); \ 128 VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 129 *(_ftrace_events) \ 130 VMLINUX_SYMBOL(__stop_ftrace_events) = .; 131 #else 132 #define FTRACE_EVENTS() 133 #endif 134 135 #ifdef CONFIG_TRACING 136 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 137 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ 138 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 139 #else 140 #define TRACE_PRINTKS() 141 #endif 142 143 #ifdef CONFIG_FTRACE_SYSCALLS 144 #define TRACE_SYSCALLS() . = ALIGN(8); \ 145 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 146 *(__syscalls_metadata) \ 147 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 148 #else 149 #define TRACE_SYSCALLS() 150 #endif 151 152 #ifdef CONFIG_CLKSRC_OF 153 #define CLKSRC_OF_TABLES() . = ALIGN(8); \ 154 VMLINUX_SYMBOL(__clksrc_of_table) = .; \ 155 *(__clksrc_of_table) \ 156 *(__clksrc_of_table_end) 157 #else 158 #define CLKSRC_OF_TABLES() 159 #endif 160 161 #ifdef CONFIG_IRQCHIP 162 #define IRQCHIP_OF_MATCH_TABLE() \ 163 . = ALIGN(8); \ 164 VMLINUX_SYMBOL(__irqchip_begin) = .; \ 165 *(__irqchip_of_table) \ 166 *(__irqchip_of_end) 167 #else 168 #define IRQCHIP_OF_MATCH_TABLE() 169 #endif 170 171 #ifdef CONFIG_COMMON_CLK 172 #define CLK_OF_TABLES() . = ALIGN(8); \ 173 VMLINUX_SYMBOL(__clk_of_table) = .; \ 174 *(__clk_of_table) \ 175 *(__clk_of_table_end) 176 #else 177 #define CLK_OF_TABLES() 178 #endif 179 180 #define KERNEL_DTB() \ 181 STRUCT_ALIGN(); \ 182 VMLINUX_SYMBOL(__dtb_start) = .; \ 183 *(.dtb.init.rodata) \ 184 VMLINUX_SYMBOL(__dtb_end) = .; 185 186 /* .data section */ 187 #define DATA_DATA \ 188 *(.data) \ 189 *(.ref.data) \ 190 *(.data..shared_aligned) /* percpu related */ \ 191 DEV_KEEP(init.data) \ 192 DEV_KEEP(exit.data) \ 193 CPU_KEEP(init.data) \ 194 CPU_KEEP(exit.data) \ 195 MEM_KEEP(init.data) \ 196 MEM_KEEP(exit.data) \ 197 *(.data.unlikely) \ 198 STRUCT_ALIGN(); \ 199 *(__tracepoints) \ 200 /* implement dynamic printk debug */ \ 201 . = ALIGN(8); \ 202 VMLINUX_SYMBOL(__start___jump_table) = .; \ 203 *(__jump_table) \ 204 VMLINUX_SYMBOL(__stop___jump_table) = .; \ 205 . = ALIGN(8); \ 206 VMLINUX_SYMBOL(__start___verbose) = .; \ 207 *(__verbose) \ 208 VMLINUX_SYMBOL(__stop___verbose) = .; \ 209 LIKELY_PROFILE() \ 210 BRANCH_PROFILE() \ 211 TRACE_PRINTKS() 212 213 /* 214 * Data section helpers 215 */ 216 #define NOSAVE_DATA \ 217 . = ALIGN(PAGE_SIZE); \ 218 VMLINUX_SYMBOL(__nosave_begin) = .; \ 219 *(.data..nosave) \ 220 . = ALIGN(PAGE_SIZE); \ 221 VMLINUX_SYMBOL(__nosave_end) = .; 222 223 #define PAGE_ALIGNED_DATA(page_align) \ 224 . = ALIGN(page_align); \ 225 *(.data..page_aligned) 226 227 #define READ_MOSTLY_DATA(align) \ 228 . = ALIGN(align); \ 229 *(.data..read_mostly) \ 230 . = ALIGN(align); 231 232 #define CACHELINE_ALIGNED_DATA(align) \ 233 . = ALIGN(align); \ 234 *(.data..cacheline_aligned) 235 236 #define INIT_TASK_DATA(align) \ 237 . = ALIGN(align); \ 238 *(.data..init_task) 239 240 /* 241 * Read only Data 242 */ 243 #define RO_DATA_SECTION(align) \ 244 . = ALIGN((align)); \ 245 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 246 VMLINUX_SYMBOL(__start_rodata) = .; \ 247 *(.rodata) *(.rodata.*) \ 248 *(__vermagic) /* Kernel version magic */ \ 249 . = ALIGN(8); \ 250 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ 251 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\ 252 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ 253 *(__tracepoints_strings)/* Tracepoints: strings */ \ 254 } \ 255 \ 256 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 257 *(.rodata1) \ 258 } \ 259 \ 260 BUG_TABLE \ 261 \ 262 /* PCI quirks */ \ 263 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 264 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 265 *(.pci_fixup_early) \ 266 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 267 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 268 *(.pci_fixup_header) \ 269 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 270 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 271 *(.pci_fixup_final) \ 272 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 273 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 274 *(.pci_fixup_enable) \ 275 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 276 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 277 *(.pci_fixup_resume) \ 278 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 279 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 280 *(.pci_fixup_resume_early) \ 281 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 282 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 283 *(.pci_fixup_suspend) \ 284 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 285 } \ 286 \ 287 /* Built-in firmware blobs */ \ 288 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 289 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 290 *(.builtin_fw) \ 291 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 292 } \ 293 \ 294 /* RapidIO route ops */ \ 295 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \ 296 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \ 297 *(.rio_switch_ops) \ 298 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \ 299 } \ 300 \ 301 TRACEDATA \ 302 \ 303 /* Kernel symbol table: Normal symbols */ \ 304 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 305 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 306 *(SORT(___ksymtab+*)) \ 307 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 308 } \ 309 \ 310 /* Kernel symbol table: GPL-only symbols */ \ 311 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 312 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 313 *(SORT(___ksymtab_gpl+*)) \ 314 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 315 } \ 316 \ 317 /* Kernel symbol table: Normal unused symbols */ \ 318 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 319 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 320 *(SORT(___ksymtab_unused+*)) \ 321 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 322 } \ 323 \ 324 /* Kernel symbol table: GPL-only unused symbols */ \ 325 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 326 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 327 *(SORT(___ksymtab_unused_gpl+*)) \ 328 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 329 } \ 330 \ 331 /* Kernel symbol table: GPL-future-only symbols */ \ 332 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 333 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 334 *(SORT(___ksymtab_gpl_future+*)) \ 335 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 336 } \ 337 \ 338 /* Kernel symbol table: Normal symbols */ \ 339 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 340 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 341 *(SORT(___kcrctab+*)) \ 342 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 343 } \ 344 \ 345 /* Kernel symbol table: GPL-only symbols */ \ 346 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 347 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 348 *(SORT(___kcrctab_gpl+*)) \ 349 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 350 } \ 351 \ 352 /* Kernel symbol table: Normal unused symbols */ \ 353 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 354 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 355 *(SORT(___kcrctab_unused+*)) \ 356 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 357 } \ 358 \ 359 /* Kernel symbol table: GPL-only unused symbols */ \ 360 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 361 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 362 *(SORT(___kcrctab_unused_gpl+*)) \ 363 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 364 } \ 365 \ 366 /* Kernel symbol table: GPL-future-only symbols */ \ 367 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 368 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 369 *(SORT(___kcrctab_gpl_future+*)) \ 370 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 371 } \ 372 \ 373 /* Kernel symbol table: strings */ \ 374 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 375 *(__ksymtab_strings) \ 376 } \ 377 \ 378 /* __*init sections */ \ 379 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 380 *(.ref.rodata) \ 381 DEV_KEEP(init.rodata) \ 382 DEV_KEEP(exit.rodata) \ 383 CPU_KEEP(init.rodata) \ 384 CPU_KEEP(exit.rodata) \ 385 MEM_KEEP(init.rodata) \ 386 MEM_KEEP(exit.rodata) \ 387 } \ 388 \ 389 /* Built-in module parameters. */ \ 390 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 391 VMLINUX_SYMBOL(__start___param) = .; \ 392 *(__param) \ 393 VMLINUX_SYMBOL(__stop___param) = .; \ 394 } \ 395 \ 396 /* Built-in module versions. */ \ 397 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 398 VMLINUX_SYMBOL(__start___modver) = .; \ 399 *(__modver) \ 400 VMLINUX_SYMBOL(__stop___modver) = .; \ 401 . = ALIGN((align)); \ 402 VMLINUX_SYMBOL(__end_rodata) = .; \ 403 } \ 404 . = ALIGN((align)); 405 406 /* RODATA & RO_DATA provided for backward compatibility. 407 * All archs are supposed to use RO_DATA() */ 408 #define RODATA RO_DATA_SECTION(4096) 409 #define RO_DATA(align) RO_DATA_SECTION(align) 410 411 #define SECURITY_INIT \ 412 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 413 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 414 *(.security_initcall.init) \ 415 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 416 } 417 418 /* .text section. Map to function alignment to avoid address changes 419 * during second ld run in second ld pass when generating System.map */ 420 #define TEXT_TEXT \ 421 ALIGN_FUNCTION(); \ 422 *(.text.hot) \ 423 *(.text) \ 424 *(.ref.text) \ 425 DEV_KEEP(init.text) \ 426 DEV_KEEP(exit.text) \ 427 CPU_KEEP(init.text) \ 428 CPU_KEEP(exit.text) \ 429 MEM_KEEP(init.text) \ 430 MEM_KEEP(exit.text) \ 431 *(.text.unlikely) 432 433 434 /* sched.text is aling to function alignment to secure we have same 435 * address even at second ld pass when generating System.map */ 436 #define SCHED_TEXT \ 437 ALIGN_FUNCTION(); \ 438 VMLINUX_SYMBOL(__sched_text_start) = .; \ 439 *(.sched.text) \ 440 VMLINUX_SYMBOL(__sched_text_end) = .; 441 442 /* spinlock.text is aling to function alignment to secure we have same 443 * address even at second ld pass when generating System.map */ 444 #define LOCK_TEXT \ 445 ALIGN_FUNCTION(); \ 446 VMLINUX_SYMBOL(__lock_text_start) = .; \ 447 *(.spinlock.text) \ 448 VMLINUX_SYMBOL(__lock_text_end) = .; 449 450 #define KPROBES_TEXT \ 451 ALIGN_FUNCTION(); \ 452 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 453 *(.kprobes.text) \ 454 VMLINUX_SYMBOL(__kprobes_text_end) = .; 455 456 #define ENTRY_TEXT \ 457 ALIGN_FUNCTION(); \ 458 VMLINUX_SYMBOL(__entry_text_start) = .; \ 459 *(.entry.text) \ 460 VMLINUX_SYMBOL(__entry_text_end) = .; 461 462 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 463 #define IRQENTRY_TEXT \ 464 ALIGN_FUNCTION(); \ 465 VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 466 *(.irqentry.text) \ 467 VMLINUX_SYMBOL(__irqentry_text_end) = .; 468 #else 469 #define IRQENTRY_TEXT 470 #endif 471 472 /* Section used for early init (in .S files) */ 473 #define HEAD_TEXT *(.head.text) 474 475 #define HEAD_TEXT_SECTION \ 476 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 477 HEAD_TEXT \ 478 } 479 480 /* 481 * Exception table 482 */ 483 #define EXCEPTION_TABLE(align) \ 484 . = ALIGN(align); \ 485 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 486 VMLINUX_SYMBOL(__start___ex_table) = .; \ 487 *(__ex_table) \ 488 VMLINUX_SYMBOL(__stop___ex_table) = .; \ 489 } 490 491 /* 492 * Init task 493 */ 494 #define INIT_TASK_DATA_SECTION(align) \ 495 . = ALIGN(align); \ 496 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 497 INIT_TASK_DATA(align) \ 498 } 499 500 #ifdef CONFIG_CONSTRUCTORS 501 #define KERNEL_CTORS() . = ALIGN(8); \ 502 VMLINUX_SYMBOL(__ctors_start) = .; \ 503 *(.ctors) \ 504 VMLINUX_SYMBOL(__ctors_end) = .; 505 #else 506 #define KERNEL_CTORS() 507 #endif 508 509 /* init and exit section handling */ 510 #define INIT_DATA \ 511 *(.init.data) \ 512 DEV_DISCARD(init.data) \ 513 CPU_DISCARD(init.data) \ 514 MEM_DISCARD(init.data) \ 515 KERNEL_CTORS() \ 516 MCOUNT_REC() \ 517 *(.init.rodata) \ 518 FTRACE_EVENTS() \ 519 TRACE_SYSCALLS() \ 520 DEV_DISCARD(init.rodata) \ 521 CPU_DISCARD(init.rodata) \ 522 MEM_DISCARD(init.rodata) \ 523 CLK_OF_TABLES() \ 524 CLKSRC_OF_TABLES() \ 525 KERNEL_DTB() \ 526 IRQCHIP_OF_MATCH_TABLE() 527 528 #define INIT_TEXT \ 529 *(.init.text) \ 530 DEV_DISCARD(init.text) \ 531 CPU_DISCARD(init.text) \ 532 MEM_DISCARD(init.text) 533 534 #define EXIT_DATA \ 535 *(.exit.data) \ 536 DEV_DISCARD(exit.data) \ 537 DEV_DISCARD(exit.rodata) \ 538 CPU_DISCARD(exit.data) \ 539 CPU_DISCARD(exit.rodata) \ 540 MEM_DISCARD(exit.data) \ 541 MEM_DISCARD(exit.rodata) 542 543 #define EXIT_TEXT \ 544 *(.exit.text) \ 545 DEV_DISCARD(exit.text) \ 546 CPU_DISCARD(exit.text) \ 547 MEM_DISCARD(exit.text) 548 549 #define EXIT_CALL \ 550 *(.exitcall.exit) 551 552 /* 553 * bss (Block Started by Symbol) - uninitialized data 554 * zeroed during startup 555 */ 556 #define SBSS(sbss_align) \ 557 . = ALIGN(sbss_align); \ 558 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 559 *(.sbss) \ 560 *(.scommon) \ 561 } 562 563 /* 564 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 565 * sections to the front of bss. 566 */ 567 #ifndef BSS_FIRST_SECTIONS 568 #define BSS_FIRST_SECTIONS 569 #endif 570 571 #define BSS(bss_align) \ 572 . = ALIGN(bss_align); \ 573 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 574 BSS_FIRST_SECTIONS \ 575 *(.bss..page_aligned) \ 576 *(.dynbss) \ 577 *(.bss) \ 578 *(COMMON) \ 579 } 580 581 /* 582 * DWARF debug sections. 583 * Symbols in the DWARF debugging sections are relative to 584 * the beginning of the section so we begin them at 0. 585 */ 586 #define DWARF_DEBUG \ 587 /* DWARF 1 */ \ 588 .debug 0 : { *(.debug) } \ 589 .line 0 : { *(.line) } \ 590 /* GNU DWARF 1 extensions */ \ 591 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 592 .debug_sfnames 0 : { *(.debug_sfnames) } \ 593 /* DWARF 1.1 and DWARF 2 */ \ 594 .debug_aranges 0 : { *(.debug_aranges) } \ 595 .debug_pubnames 0 : { *(.debug_pubnames) } \ 596 /* DWARF 2 */ \ 597 .debug_info 0 : { *(.debug_info \ 598 .gnu.linkonce.wi.*) } \ 599 .debug_abbrev 0 : { *(.debug_abbrev) } \ 600 .debug_line 0 : { *(.debug_line) } \ 601 .debug_frame 0 : { *(.debug_frame) } \ 602 .debug_str 0 : { *(.debug_str) } \ 603 .debug_loc 0 : { *(.debug_loc) } \ 604 .debug_macinfo 0 : { *(.debug_macinfo) } \ 605 /* SGI/MIPS DWARF 2 extensions */ \ 606 .debug_weaknames 0 : { *(.debug_weaknames) } \ 607 .debug_funcnames 0 : { *(.debug_funcnames) } \ 608 .debug_typenames 0 : { *(.debug_typenames) } \ 609 .debug_varnames 0 : { *(.debug_varnames) } \ 610 611 /* Stabs debugging sections. */ 612 #define STABS_DEBUG \ 613 .stab 0 : { *(.stab) } \ 614 .stabstr 0 : { *(.stabstr) } \ 615 .stab.excl 0 : { *(.stab.excl) } \ 616 .stab.exclstr 0 : { *(.stab.exclstr) } \ 617 .stab.index 0 : { *(.stab.index) } \ 618 .stab.indexstr 0 : { *(.stab.indexstr) } \ 619 .comment 0 : { *(.comment) } 620 621 #ifdef CONFIG_GENERIC_BUG 622 #define BUG_TABLE \ 623 . = ALIGN(8); \ 624 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 625 VMLINUX_SYMBOL(__start___bug_table) = .; \ 626 *(__bug_table) \ 627 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 628 } 629 #else 630 #define BUG_TABLE 631 #endif 632 633 #ifdef CONFIG_PM_TRACE 634 #define TRACEDATA \ 635 . = ALIGN(4); \ 636 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 637 VMLINUX_SYMBOL(__tracedata_start) = .; \ 638 *(.tracedata) \ 639 VMLINUX_SYMBOL(__tracedata_end) = .; \ 640 } 641 #else 642 #define TRACEDATA 643 #endif 644 645 #define NOTES \ 646 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 647 VMLINUX_SYMBOL(__start_notes) = .; \ 648 *(.note.*) \ 649 VMLINUX_SYMBOL(__stop_notes) = .; \ 650 } 651 652 #define INIT_SETUP(initsetup_align) \ 653 . = ALIGN(initsetup_align); \ 654 VMLINUX_SYMBOL(__setup_start) = .; \ 655 *(.init.setup) \ 656 VMLINUX_SYMBOL(__setup_end) = .; 657 658 #define INIT_CALLS_LEVEL(level) \ 659 VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 660 *(.initcall##level##.init) \ 661 *(.initcall##level##s.init) \ 662 663 #define INIT_CALLS \ 664 VMLINUX_SYMBOL(__initcall_start) = .; \ 665 *(.initcallearly.init) \ 666 INIT_CALLS_LEVEL(0) \ 667 INIT_CALLS_LEVEL(1) \ 668 INIT_CALLS_LEVEL(2) \ 669 INIT_CALLS_LEVEL(3) \ 670 INIT_CALLS_LEVEL(4) \ 671 INIT_CALLS_LEVEL(5) \ 672 INIT_CALLS_LEVEL(rootfs) \ 673 INIT_CALLS_LEVEL(6) \ 674 INIT_CALLS_LEVEL(7) \ 675 VMLINUX_SYMBOL(__initcall_end) = .; 676 677 #define CON_INITCALL \ 678 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 679 *(.con_initcall.init) \ 680 VMLINUX_SYMBOL(__con_initcall_end) = .; 681 682 #define SECURITY_INITCALL \ 683 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 684 *(.security_initcall.init) \ 685 VMLINUX_SYMBOL(__security_initcall_end) = .; 686 687 #ifdef CONFIG_BLK_DEV_INITRD 688 #define INIT_RAM_FS \ 689 . = ALIGN(4); \ 690 VMLINUX_SYMBOL(__initramfs_start) = .; \ 691 *(.init.ramfs) \ 692 . = ALIGN(8); \ 693 *(.init.ramfs.info) 694 #else 695 #define INIT_RAM_FS 696 #endif 697 698 /* 699 * Default discarded sections. 700 * 701 * Some archs want to discard exit text/data at runtime rather than 702 * link time due to cross-section references such as alt instructions, 703 * bug table, eh_frame, etc. DISCARDS must be the last of output 704 * section definitions so that such archs put those in earlier section 705 * definitions. 706 */ 707 #define DISCARDS \ 708 /DISCARD/ : { \ 709 EXIT_TEXT \ 710 EXIT_DATA \ 711 EXIT_CALL \ 712 *(.discard) \ 713 *(.discard.*) \ 714 } 715 716 /** 717 * PERCPU_INPUT - the percpu input sections 718 * @cacheline: cacheline size 719 * 720 * The core percpu section names and core symbols which do not rely 721 * directly upon load addresses. 722 * 723 * @cacheline is used to align subsections to avoid false cacheline 724 * sharing between subsections for different purposes. 725 */ 726 #define PERCPU_INPUT(cacheline) \ 727 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 728 *(.data..percpu..first) \ 729 . = ALIGN(PAGE_SIZE); \ 730 *(.data..percpu..page_aligned) \ 731 . = ALIGN(cacheline); \ 732 *(.data..percpu..readmostly) \ 733 . = ALIGN(cacheline); \ 734 *(.data..percpu) \ 735 *(.data..percpu..shared_aligned) \ 736 VMLINUX_SYMBOL(__per_cpu_end) = .; 737 738 /** 739 * PERCPU_VADDR - define output section for percpu area 740 * @cacheline: cacheline size 741 * @vaddr: explicit base address (optional) 742 * @phdr: destination PHDR (optional) 743 * 744 * Macro which expands to output section for percpu area. 745 * 746 * @cacheline is used to align subsections to avoid false cacheline 747 * sharing between subsections for different purposes. 748 * 749 * If @vaddr is not blank, it specifies explicit base address and all 750 * percpu symbols will be offset from the given address. If blank, 751 * @vaddr always equals @laddr + LOAD_OFFSET. 752 * 753 * @phdr defines the output PHDR to use if not blank. Be warned that 754 * output PHDR is sticky. If @phdr is specified, the next output 755 * section in the linker script will go there too. @phdr should have 756 * a leading colon. 757 * 758 * Note that this macros defines __per_cpu_load as an absolute symbol. 759 * If there is no need to put the percpu section at a predetermined 760 * address, use PERCPU_SECTION. 761 */ 762 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 763 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 764 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 765 - LOAD_OFFSET) { \ 766 PERCPU_INPUT(cacheline) \ 767 } phdr \ 768 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 769 770 /** 771 * PERCPU_SECTION - define output section for percpu area, simple version 772 * @cacheline: cacheline size 773 * 774 * Align to PAGE_SIZE and outputs output section for percpu area. This 775 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 776 * __per_cpu_start will be identical. 777 * 778 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 779 * except that __per_cpu_load is defined as a relative symbol against 780 * .data..percpu which is required for relocatable x86_32 configuration. 781 */ 782 #define PERCPU_SECTION(cacheline) \ 783 . = ALIGN(PAGE_SIZE); \ 784 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 785 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 786 PERCPU_INPUT(cacheline) \ 787 } 788 789 790 /* 791 * Definition of the high level *_SECTION macros 792 * They will fit only a subset of the architectures 793 */ 794 795 796 /* 797 * Writeable data. 798 * All sections are combined in a single .data section. 799 * The sections following CONSTRUCTORS are arranged so their 800 * typical alignment matches. 801 * A cacheline is typical/always less than a PAGE_SIZE so 802 * the sections that has this restriction (or similar) 803 * is located before the ones requiring PAGE_SIZE alignment. 804 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 805 * matches the requirement of PAGE_ALIGNED_DATA. 806 * 807 * use 0 as page_align if page_aligned data is not used */ 808 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 809 . = ALIGN(PAGE_SIZE); \ 810 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 811 INIT_TASK_DATA(inittask) \ 812 NOSAVE_DATA \ 813 PAGE_ALIGNED_DATA(pagealigned) \ 814 CACHELINE_ALIGNED_DATA(cacheline) \ 815 READ_MOSTLY_DATA(cacheline) \ 816 DATA_DATA \ 817 CONSTRUCTORS \ 818 } 819 820 #define INIT_TEXT_SECTION(inittext_align) \ 821 . = ALIGN(inittext_align); \ 822 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 823 VMLINUX_SYMBOL(_sinittext) = .; \ 824 INIT_TEXT \ 825 VMLINUX_SYMBOL(_einittext) = .; \ 826 } 827 828 #define INIT_DATA_SECTION(initsetup_align) \ 829 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 830 INIT_DATA \ 831 INIT_SETUP(initsetup_align) \ 832 INIT_CALLS \ 833 CON_INITCALL \ 834 SECURITY_INITCALL \ 835 INIT_RAM_FS \ 836 } 837 838 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 839 . = ALIGN(sbss_align); \ 840 VMLINUX_SYMBOL(__bss_start) = .; \ 841 SBSS(sbss_align) \ 842 BSS(bss_align) \ 843 . = ALIGN(stop_align); \ 844 VMLINUX_SYMBOL(__bss_stop) = .; 845