1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA(PAGE_SIZE) 27 * RW_DATA(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * 32 * BSS_SECTION(0, 0, 0) 33 * _end = .; 34 * 35 * STABS_DEBUG 36 * DWARF_DEBUG 37 * 38 * DISCARDS // must be the last 39 * } 40 * 41 * [__init_begin, __init_end] is the init section that may be freed after init 42 * // __init_begin and __init_end should be page aligned, so that we can 43 * // free the whole .init memory 44 * [_stext, _etext] is the text section 45 * [_sdata, _edata] is the data section 46 * 47 * Some of the included output section have their own set of constants. 48 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 49 * [__nosave_begin, __nosave_end] for the nosave data 50 */ 51 52 #ifndef LOAD_OFFSET 53 #define LOAD_OFFSET 0 54 #endif 55 56 /* 57 * Only some architectures want to have the .notes segment visible in 58 * a separate PT_NOTE ELF Program Header. When this happens, it needs 59 * to be visible in both the kernel text's PT_LOAD and the PT_NOTE 60 * Program Headers. In this case, though, the PT_LOAD needs to be made 61 * the default again so that all the following sections don't also end 62 * up in the PT_NOTE Program Header. 63 */ 64 #ifdef EMITS_PT_NOTE 65 #define NOTES_HEADERS :text :note 66 #define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text 67 #else 68 #define NOTES_HEADERS 69 #define NOTES_HEADERS_RESTORE 70 #endif 71 72 /* 73 * Some architectures have non-executable read-only exception tables. 74 * They can be added to the RO_DATA segment by specifying their desired 75 * alignment. 76 */ 77 #ifdef RO_EXCEPTION_TABLE_ALIGN 78 #define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN) 79 #else 80 #define RO_EXCEPTION_TABLE 81 #endif 82 83 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 84 #define ALIGN_FUNCTION() . = ALIGN(8) 85 86 /* 87 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which 88 * generates .data.identifier sections, which need to be pulled in with 89 * .data. We don't want to pull in .data..other sections, which Linux 90 * has defined. Same for text and bss. 91 * 92 * RODATA_MAIN is not used because existing code already defines .rodata.x 93 * sections to be brought in with rodata. 94 */ 95 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 96 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 97 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* 98 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* 99 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* 100 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* 101 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* 102 #else 103 #define TEXT_MAIN .text 104 #define DATA_MAIN .data 105 #define SDATA_MAIN .sdata 106 #define RODATA_MAIN .rodata 107 #define BSS_MAIN .bss 108 #define SBSS_MAIN .sbss 109 #endif 110 111 /* 112 * Align to a 32 byte boundary equal to the 113 * alignment gcc 4.5 uses for a struct 114 */ 115 #define STRUCT_ALIGNMENT 32 116 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 117 118 /* The actual configuration determine if the init/exit sections 119 * are handled as text/data or they can be discarded (which 120 * often happens at runtime) 121 */ 122 #ifdef CONFIG_HOTPLUG_CPU 123 #define CPU_KEEP(sec) *(.cpu##sec) 124 #define CPU_DISCARD(sec) 125 #else 126 #define CPU_KEEP(sec) 127 #define CPU_DISCARD(sec) *(.cpu##sec) 128 #endif 129 130 #if defined(CONFIG_MEMORY_HOTPLUG) 131 #define MEM_KEEP(sec) *(.mem##sec) 132 #define MEM_DISCARD(sec) 133 #else 134 #define MEM_KEEP(sec) 135 #define MEM_DISCARD(sec) *(.mem##sec) 136 #endif 137 138 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 139 /* 140 * The ftrace call sites are logged to a section whose name depends on the 141 * compiler option used. A given kernel image will only use one, AKA 142 * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header 143 * dependencies for FTRACE_CALLSITE_SECTION's definition. 144 * 145 * Need to also make ftrace_stub_graph point to ftrace_stub 146 * so that the same stub location may have different protocols 147 * and not mess up with C verifiers. 148 */ 149 #define MCOUNT_REC() . = ALIGN(8); \ 150 __start_mcount_loc = .; \ 151 KEEP(*(__mcount_loc)) \ 152 KEEP(*(__patchable_function_entries)) \ 153 __stop_mcount_loc = .; \ 154 ftrace_stub_graph = ftrace_stub; 155 #else 156 # ifdef CONFIG_FUNCTION_TRACER 157 # define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; 158 # else 159 # define MCOUNT_REC() 160 # endif 161 #endif 162 163 #ifdef CONFIG_TRACE_BRANCH_PROFILING 164 #define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ 165 KEEP(*(_ftrace_annotated_branch)) \ 166 __stop_annotated_branch_profile = .; 167 #else 168 #define LIKELY_PROFILE() 169 #endif 170 171 #ifdef CONFIG_PROFILE_ALL_BRANCHES 172 #define BRANCH_PROFILE() __start_branch_profile = .; \ 173 KEEP(*(_ftrace_branch)) \ 174 __stop_branch_profile = .; 175 #else 176 #define BRANCH_PROFILE() 177 #endif 178 179 #ifdef CONFIG_KPROBES 180 #define KPROBE_BLACKLIST() . = ALIGN(8); \ 181 __start_kprobe_blacklist = .; \ 182 KEEP(*(_kprobe_blacklist)) \ 183 __stop_kprobe_blacklist = .; 184 #else 185 #define KPROBE_BLACKLIST() 186 #endif 187 188 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 189 #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ 190 __start_error_injection_whitelist = .; \ 191 KEEP(*(_error_injection_whitelist)) \ 192 __stop_error_injection_whitelist = .; 193 #else 194 #define ERROR_INJECT_WHITELIST() 195 #endif 196 197 #ifdef CONFIG_EVENT_TRACING 198 #define FTRACE_EVENTS() . = ALIGN(8); \ 199 __start_ftrace_events = .; \ 200 KEEP(*(_ftrace_events)) \ 201 __stop_ftrace_events = .; \ 202 __start_ftrace_eval_maps = .; \ 203 KEEP(*(_ftrace_eval_map)) \ 204 __stop_ftrace_eval_maps = .; 205 #else 206 #define FTRACE_EVENTS() 207 #endif 208 209 #ifdef CONFIG_TRACING 210 #define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \ 211 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ 212 __stop___trace_bprintk_fmt = .; 213 #define TRACEPOINT_STR() __start___tracepoint_str = .; \ 214 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ 215 __stop___tracepoint_str = .; 216 #else 217 #define TRACE_PRINTKS() 218 #define TRACEPOINT_STR() 219 #endif 220 221 #ifdef CONFIG_FTRACE_SYSCALLS 222 #define TRACE_SYSCALLS() . = ALIGN(8); \ 223 __start_syscalls_metadata = .; \ 224 KEEP(*(__syscalls_metadata)) \ 225 __stop_syscalls_metadata = .; 226 #else 227 #define TRACE_SYSCALLS() 228 #endif 229 230 #ifdef CONFIG_BPF_EVENTS 231 #define BPF_RAW_TP() STRUCT_ALIGN(); \ 232 __start__bpf_raw_tp = .; \ 233 KEEP(*(__bpf_raw_tp_map)) \ 234 __stop__bpf_raw_tp = .; 235 #else 236 #define BPF_RAW_TP() 237 #endif 238 239 #ifdef CONFIG_SERIAL_EARLYCON 240 #define EARLYCON_TABLE() . = ALIGN(8); \ 241 __earlycon_table = .; \ 242 KEEP(*(__earlycon_table)) \ 243 __earlycon_table_end = .; 244 #else 245 #define EARLYCON_TABLE() 246 #endif 247 248 #ifdef CONFIG_SECURITY 249 #define LSM_TABLE() . = ALIGN(8); \ 250 __start_lsm_info = .; \ 251 KEEP(*(.lsm_info.init)) \ 252 __end_lsm_info = .; 253 #define EARLY_LSM_TABLE() . = ALIGN(8); \ 254 __start_early_lsm_info = .; \ 255 KEEP(*(.early_lsm_info.init)) \ 256 __end_early_lsm_info = .; 257 #else 258 #define LSM_TABLE() 259 #define EARLY_LSM_TABLE() 260 #endif 261 262 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) 263 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) 264 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) 265 #define _OF_TABLE_0(name) 266 #define _OF_TABLE_1(name) \ 267 . = ALIGN(8); \ 268 __##name##_of_table = .; \ 269 KEEP(*(__##name##_of_table)) \ 270 KEEP(*(__##name##_of_table_end)) 271 272 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) 273 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 274 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 275 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 276 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 277 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 278 279 #ifdef CONFIG_ACPI 280 #define ACPI_PROBE_TABLE(name) \ 281 . = ALIGN(8); \ 282 __##name##_acpi_probe_table = .; \ 283 KEEP(*(__##name##_acpi_probe_table)) \ 284 __##name##_acpi_probe_table_end = .; 285 #else 286 #define ACPI_PROBE_TABLE(name) 287 #endif 288 289 #ifdef CONFIG_THERMAL 290 #define THERMAL_TABLE(name) \ 291 . = ALIGN(8); \ 292 __##name##_thermal_table = .; \ 293 KEEP(*(__##name##_thermal_table)) \ 294 __##name##_thermal_table_end = .; 295 #else 296 #define THERMAL_TABLE(name) 297 #endif 298 299 #define KERNEL_DTB() \ 300 STRUCT_ALIGN(); \ 301 __dtb_start = .; \ 302 KEEP(*(.dtb.init.rodata)) \ 303 __dtb_end = .; 304 305 /* 306 * .data section 307 */ 308 #define DATA_DATA \ 309 *(.xiptext) \ 310 *(DATA_MAIN) \ 311 *(.ref.data) \ 312 *(.data..shared_aligned) /* percpu related */ \ 313 MEM_KEEP(init.data*) \ 314 MEM_KEEP(exit.data*) \ 315 *(.data.unlikely) \ 316 __start_once = .; \ 317 *(.data.once) \ 318 __end_once = .; \ 319 STRUCT_ALIGN(); \ 320 *(__tracepoints) \ 321 /* implement dynamic printk debug */ \ 322 . = ALIGN(8); \ 323 __start___verbose = .; \ 324 KEEP(*(__verbose)) \ 325 __stop___verbose = .; \ 326 LIKELY_PROFILE() \ 327 BRANCH_PROFILE() \ 328 TRACE_PRINTKS() \ 329 BPF_RAW_TP() \ 330 TRACEPOINT_STR() 331 332 /* 333 * Data section helpers 334 */ 335 #define NOSAVE_DATA \ 336 . = ALIGN(PAGE_SIZE); \ 337 __nosave_begin = .; \ 338 *(.data..nosave) \ 339 . = ALIGN(PAGE_SIZE); \ 340 __nosave_end = .; 341 342 #define PAGE_ALIGNED_DATA(page_align) \ 343 . = ALIGN(page_align); \ 344 *(.data..page_aligned) 345 346 #define READ_MOSTLY_DATA(align) \ 347 . = ALIGN(align); \ 348 *(.data..read_mostly) \ 349 . = ALIGN(align); 350 351 #define CACHELINE_ALIGNED_DATA(align) \ 352 . = ALIGN(align); \ 353 *(.data..cacheline_aligned) 354 355 #define INIT_TASK_DATA(align) \ 356 . = ALIGN(align); \ 357 __start_init_task = .; \ 358 init_thread_union = .; \ 359 init_stack = .; \ 360 KEEP(*(.data..init_task)) \ 361 KEEP(*(.data..init_thread_info)) \ 362 . = __start_init_task + THREAD_SIZE; \ 363 __end_init_task = .; 364 365 #define JUMP_TABLE_DATA \ 366 . = ALIGN(8); \ 367 __start___jump_table = .; \ 368 KEEP(*(__jump_table)) \ 369 __stop___jump_table = .; 370 371 /* 372 * Allow architectures to handle ro_after_init data on their 373 * own by defining an empty RO_AFTER_INIT_DATA. 374 */ 375 #ifndef RO_AFTER_INIT_DATA 376 #define RO_AFTER_INIT_DATA \ 377 __start_ro_after_init = .; \ 378 *(.data..ro_after_init) \ 379 JUMP_TABLE_DATA \ 380 __end_ro_after_init = .; 381 #endif 382 383 /* 384 * Read only Data 385 */ 386 #define RO_DATA(align) \ 387 . = ALIGN((align)); \ 388 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 389 __start_rodata = .; \ 390 *(.rodata) *(.rodata.*) \ 391 RO_AFTER_INIT_DATA /* Read only after init */ \ 392 . = ALIGN(8); \ 393 __start___tracepoints_ptrs = .; \ 394 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ 395 __stop___tracepoints_ptrs = .; \ 396 *(__tracepoints_strings)/* Tracepoints: strings */ \ 397 } \ 398 \ 399 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 400 *(.rodata1) \ 401 } \ 402 \ 403 /* PCI quirks */ \ 404 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 405 __start_pci_fixups_early = .; \ 406 KEEP(*(.pci_fixup_early)) \ 407 __end_pci_fixups_early = .; \ 408 __start_pci_fixups_header = .; \ 409 KEEP(*(.pci_fixup_header)) \ 410 __end_pci_fixups_header = .; \ 411 __start_pci_fixups_final = .; \ 412 KEEP(*(.pci_fixup_final)) \ 413 __end_pci_fixups_final = .; \ 414 __start_pci_fixups_enable = .; \ 415 KEEP(*(.pci_fixup_enable)) \ 416 __end_pci_fixups_enable = .; \ 417 __start_pci_fixups_resume = .; \ 418 KEEP(*(.pci_fixup_resume)) \ 419 __end_pci_fixups_resume = .; \ 420 __start_pci_fixups_resume_early = .; \ 421 KEEP(*(.pci_fixup_resume_early)) \ 422 __end_pci_fixups_resume_early = .; \ 423 __start_pci_fixups_suspend = .; \ 424 KEEP(*(.pci_fixup_suspend)) \ 425 __end_pci_fixups_suspend = .; \ 426 __start_pci_fixups_suspend_late = .; \ 427 KEEP(*(.pci_fixup_suspend_late)) \ 428 __end_pci_fixups_suspend_late = .; \ 429 } \ 430 \ 431 /* Built-in firmware blobs */ \ 432 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 433 __start_builtin_fw = .; \ 434 KEEP(*(.builtin_fw)) \ 435 __end_builtin_fw = .; \ 436 } \ 437 \ 438 TRACEDATA \ 439 \ 440 /* Kernel symbol table: Normal symbols */ \ 441 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 442 __start___ksymtab = .; \ 443 KEEP(*(SORT(___ksymtab+*))) \ 444 __stop___ksymtab = .; \ 445 } \ 446 \ 447 /* Kernel symbol table: GPL-only symbols */ \ 448 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 449 __start___ksymtab_gpl = .; \ 450 KEEP(*(SORT(___ksymtab_gpl+*))) \ 451 __stop___ksymtab_gpl = .; \ 452 } \ 453 \ 454 /* Kernel symbol table: Normal unused symbols */ \ 455 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 456 __start___ksymtab_unused = .; \ 457 KEEP(*(SORT(___ksymtab_unused+*))) \ 458 __stop___ksymtab_unused = .; \ 459 } \ 460 \ 461 /* Kernel symbol table: GPL-only unused symbols */ \ 462 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 463 __start___ksymtab_unused_gpl = .; \ 464 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ 465 __stop___ksymtab_unused_gpl = .; \ 466 } \ 467 \ 468 /* Kernel symbol table: GPL-future-only symbols */ \ 469 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 470 __start___ksymtab_gpl_future = .; \ 471 KEEP(*(SORT(___ksymtab_gpl_future+*))) \ 472 __stop___ksymtab_gpl_future = .; \ 473 } \ 474 \ 475 /* Kernel symbol table: Normal symbols */ \ 476 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 477 __start___kcrctab = .; \ 478 KEEP(*(SORT(___kcrctab+*))) \ 479 __stop___kcrctab = .; \ 480 } \ 481 \ 482 /* Kernel symbol table: GPL-only symbols */ \ 483 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 484 __start___kcrctab_gpl = .; \ 485 KEEP(*(SORT(___kcrctab_gpl+*))) \ 486 __stop___kcrctab_gpl = .; \ 487 } \ 488 \ 489 /* Kernel symbol table: Normal unused symbols */ \ 490 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 491 __start___kcrctab_unused = .; \ 492 KEEP(*(SORT(___kcrctab_unused+*))) \ 493 __stop___kcrctab_unused = .; \ 494 } \ 495 \ 496 /* Kernel symbol table: GPL-only unused symbols */ \ 497 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 498 __start___kcrctab_unused_gpl = .; \ 499 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ 500 __stop___kcrctab_unused_gpl = .; \ 501 } \ 502 \ 503 /* Kernel symbol table: GPL-future-only symbols */ \ 504 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 505 __start___kcrctab_gpl_future = .; \ 506 KEEP(*(SORT(___kcrctab_gpl_future+*))) \ 507 __stop___kcrctab_gpl_future = .; \ 508 } \ 509 \ 510 /* Kernel symbol table: strings */ \ 511 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 512 *(__ksymtab_strings) \ 513 } \ 514 \ 515 /* __*init sections */ \ 516 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 517 *(.ref.rodata) \ 518 MEM_KEEP(init.rodata) \ 519 MEM_KEEP(exit.rodata) \ 520 } \ 521 \ 522 /* Built-in module parameters. */ \ 523 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 524 __start___param = .; \ 525 KEEP(*(__param)) \ 526 __stop___param = .; \ 527 } \ 528 \ 529 /* Built-in module versions. */ \ 530 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 531 __start___modver = .; \ 532 KEEP(*(__modver)) \ 533 __stop___modver = .; \ 534 } \ 535 \ 536 RO_EXCEPTION_TABLE \ 537 NOTES \ 538 BTF \ 539 \ 540 . = ALIGN((align)); \ 541 __end_rodata = .; 542 543 /* 544 * .text section. Map to function alignment to avoid address changes 545 * during second ld run in second ld pass when generating System.map 546 * 547 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead 548 * code elimination is enabled, so these sections should be converted 549 * to use ".." first. 550 */ 551 #define TEXT_TEXT \ 552 ALIGN_FUNCTION(); \ 553 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ 554 *(.text..refcount) \ 555 *(.ref.text) \ 556 MEM_KEEP(init.text*) \ 557 MEM_KEEP(exit.text*) \ 558 559 560 /* sched.text is aling to function alignment to secure we have same 561 * address even at second ld pass when generating System.map */ 562 #define SCHED_TEXT \ 563 ALIGN_FUNCTION(); \ 564 __sched_text_start = .; \ 565 *(.sched.text) \ 566 __sched_text_end = .; 567 568 /* spinlock.text is aling to function alignment to secure we have same 569 * address even at second ld pass when generating System.map */ 570 #define LOCK_TEXT \ 571 ALIGN_FUNCTION(); \ 572 __lock_text_start = .; \ 573 *(.spinlock.text) \ 574 __lock_text_end = .; 575 576 #define CPUIDLE_TEXT \ 577 ALIGN_FUNCTION(); \ 578 __cpuidle_text_start = .; \ 579 *(.cpuidle.text) \ 580 __cpuidle_text_end = .; 581 582 #define KPROBES_TEXT \ 583 ALIGN_FUNCTION(); \ 584 __kprobes_text_start = .; \ 585 *(.kprobes.text) \ 586 __kprobes_text_end = .; 587 588 #define ENTRY_TEXT \ 589 ALIGN_FUNCTION(); \ 590 __entry_text_start = .; \ 591 *(.entry.text) \ 592 __entry_text_end = .; 593 594 #define IRQENTRY_TEXT \ 595 ALIGN_FUNCTION(); \ 596 __irqentry_text_start = .; \ 597 *(.irqentry.text) \ 598 __irqentry_text_end = .; 599 600 #define SOFTIRQENTRY_TEXT \ 601 ALIGN_FUNCTION(); \ 602 __softirqentry_text_start = .; \ 603 *(.softirqentry.text) \ 604 __softirqentry_text_end = .; 605 606 /* Section used for early init (in .S files) */ 607 #define HEAD_TEXT KEEP(*(.head.text)) 608 609 #define HEAD_TEXT_SECTION \ 610 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 611 HEAD_TEXT \ 612 } 613 614 /* 615 * Exception table 616 */ 617 #define EXCEPTION_TABLE(align) \ 618 . = ALIGN(align); \ 619 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 620 __start___ex_table = .; \ 621 KEEP(*(__ex_table)) \ 622 __stop___ex_table = .; \ 623 } 624 625 /* 626 * .BTF 627 */ 628 #ifdef CONFIG_DEBUG_INFO_BTF 629 #define BTF \ 630 .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ 631 __start_BTF = .; \ 632 *(.BTF) \ 633 __stop_BTF = .; \ 634 } 635 #else 636 #define BTF 637 #endif 638 639 /* 640 * Init task 641 */ 642 #define INIT_TASK_DATA_SECTION(align) \ 643 . = ALIGN(align); \ 644 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 645 INIT_TASK_DATA(align) \ 646 } 647 648 #ifdef CONFIG_CONSTRUCTORS 649 #define KERNEL_CTORS() . = ALIGN(8); \ 650 __ctors_start = .; \ 651 KEEP(*(.ctors)) \ 652 KEEP(*(SORT(.init_array.*))) \ 653 KEEP(*(.init_array)) \ 654 __ctors_end = .; 655 #else 656 #define KERNEL_CTORS() 657 #endif 658 659 /* init and exit section handling */ 660 #define INIT_DATA \ 661 KEEP(*(SORT(___kentry+*))) \ 662 *(.init.data init.data.*) \ 663 MEM_DISCARD(init.data*) \ 664 KERNEL_CTORS() \ 665 MCOUNT_REC() \ 666 *(.init.rodata .init.rodata.*) \ 667 FTRACE_EVENTS() \ 668 TRACE_SYSCALLS() \ 669 KPROBE_BLACKLIST() \ 670 ERROR_INJECT_WHITELIST() \ 671 MEM_DISCARD(init.rodata) \ 672 CLK_OF_TABLES() \ 673 RESERVEDMEM_OF_TABLES() \ 674 TIMER_OF_TABLES() \ 675 CPU_METHOD_OF_TABLES() \ 676 CPUIDLE_METHOD_OF_TABLES() \ 677 KERNEL_DTB() \ 678 IRQCHIP_OF_MATCH_TABLE() \ 679 ACPI_PROBE_TABLE(irqchip) \ 680 ACPI_PROBE_TABLE(timer) \ 681 THERMAL_TABLE(governor) \ 682 EARLYCON_TABLE() \ 683 LSM_TABLE() \ 684 EARLY_LSM_TABLE() 685 686 #define INIT_TEXT \ 687 *(.init.text .init.text.*) \ 688 *(.text.startup) \ 689 MEM_DISCARD(init.text*) 690 691 #define EXIT_DATA \ 692 *(.exit.data .exit.data.*) \ 693 *(.fini_array .fini_array.*) \ 694 *(.dtors .dtors.*) \ 695 MEM_DISCARD(exit.data*) \ 696 MEM_DISCARD(exit.rodata*) 697 698 #define EXIT_TEXT \ 699 *(.exit.text) \ 700 *(.text.exit) \ 701 MEM_DISCARD(exit.text) 702 703 #define EXIT_CALL \ 704 *(.exitcall.exit) 705 706 /* 707 * bss (Block Started by Symbol) - uninitialized data 708 * zeroed during startup 709 */ 710 #define SBSS(sbss_align) \ 711 . = ALIGN(sbss_align); \ 712 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 713 *(.dynsbss) \ 714 *(SBSS_MAIN) \ 715 *(.scommon) \ 716 } 717 718 /* 719 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 720 * sections to the front of bss. 721 */ 722 #ifndef BSS_FIRST_SECTIONS 723 #define BSS_FIRST_SECTIONS 724 #endif 725 726 #define BSS(bss_align) \ 727 . = ALIGN(bss_align); \ 728 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 729 BSS_FIRST_SECTIONS \ 730 *(.bss..page_aligned) \ 731 *(.dynbss) \ 732 *(BSS_MAIN) \ 733 *(COMMON) \ 734 } 735 736 /* 737 * DWARF debug sections. 738 * Symbols in the DWARF debugging sections are relative to 739 * the beginning of the section so we begin them at 0. 740 */ 741 #define DWARF_DEBUG \ 742 /* DWARF 1 */ \ 743 .debug 0 : { *(.debug) } \ 744 .line 0 : { *(.line) } \ 745 /* GNU DWARF 1 extensions */ \ 746 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 747 .debug_sfnames 0 : { *(.debug_sfnames) } \ 748 /* DWARF 1.1 and DWARF 2 */ \ 749 .debug_aranges 0 : { *(.debug_aranges) } \ 750 .debug_pubnames 0 : { *(.debug_pubnames) } \ 751 /* DWARF 2 */ \ 752 .debug_info 0 : { *(.debug_info \ 753 .gnu.linkonce.wi.*) } \ 754 .debug_abbrev 0 : { *(.debug_abbrev) } \ 755 .debug_line 0 : { *(.debug_line) } \ 756 .debug_frame 0 : { *(.debug_frame) } \ 757 .debug_str 0 : { *(.debug_str) } \ 758 .debug_loc 0 : { *(.debug_loc) } \ 759 .debug_macinfo 0 : { *(.debug_macinfo) } \ 760 .debug_pubtypes 0 : { *(.debug_pubtypes) } \ 761 /* DWARF 3 */ \ 762 .debug_ranges 0 : { *(.debug_ranges) } \ 763 /* SGI/MIPS DWARF 2 extensions */ \ 764 .debug_weaknames 0 : { *(.debug_weaknames) } \ 765 .debug_funcnames 0 : { *(.debug_funcnames) } \ 766 .debug_typenames 0 : { *(.debug_typenames) } \ 767 .debug_varnames 0 : { *(.debug_varnames) } \ 768 /* GNU DWARF 2 extensions */ \ 769 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ 770 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ 771 /* DWARF 4 */ \ 772 .debug_types 0 : { *(.debug_types) } \ 773 /* DWARF 5 */ \ 774 .debug_macro 0 : { *(.debug_macro) } \ 775 .debug_addr 0 : { *(.debug_addr) } 776 777 /* Stabs debugging sections. */ 778 #define STABS_DEBUG \ 779 .stab 0 : { *(.stab) } \ 780 .stabstr 0 : { *(.stabstr) } \ 781 .stab.excl 0 : { *(.stab.excl) } \ 782 .stab.exclstr 0 : { *(.stab.exclstr) } \ 783 .stab.index 0 : { *(.stab.index) } \ 784 .stab.indexstr 0 : { *(.stab.indexstr) } \ 785 .comment 0 : { *(.comment) } 786 787 #ifdef CONFIG_GENERIC_BUG 788 #define BUG_TABLE \ 789 . = ALIGN(8); \ 790 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 791 __start___bug_table = .; \ 792 KEEP(*(__bug_table)) \ 793 __stop___bug_table = .; \ 794 } 795 #else 796 #define BUG_TABLE 797 #endif 798 799 #ifdef CONFIG_UNWINDER_ORC 800 #define ORC_UNWIND_TABLE \ 801 . = ALIGN(4); \ 802 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ 803 __start_orc_unwind_ip = .; \ 804 KEEP(*(.orc_unwind_ip)) \ 805 __stop_orc_unwind_ip = .; \ 806 } \ 807 . = ALIGN(2); \ 808 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 809 __start_orc_unwind = .; \ 810 KEEP(*(.orc_unwind)) \ 811 __stop_orc_unwind = .; \ 812 } \ 813 . = ALIGN(4); \ 814 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ 815 orc_lookup = .; \ 816 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ 817 LOOKUP_BLOCK_SIZE) + 1) * 4; \ 818 orc_lookup_end = .; \ 819 } 820 #else 821 #define ORC_UNWIND_TABLE 822 #endif 823 824 #ifdef CONFIG_PM_TRACE 825 #define TRACEDATA \ 826 . = ALIGN(4); \ 827 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 828 __tracedata_start = .; \ 829 KEEP(*(.tracedata)) \ 830 __tracedata_end = .; \ 831 } 832 #else 833 #define TRACEDATA 834 #endif 835 836 #define NOTES \ 837 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 838 __start_notes = .; \ 839 KEEP(*(.note.*)) \ 840 __stop_notes = .; \ 841 } NOTES_HEADERS \ 842 NOTES_HEADERS_RESTORE 843 844 #define INIT_SETUP(initsetup_align) \ 845 . = ALIGN(initsetup_align); \ 846 __setup_start = .; \ 847 KEEP(*(.init.setup)) \ 848 __setup_end = .; 849 850 #define INIT_CALLS_LEVEL(level) \ 851 __initcall##level##_start = .; \ 852 KEEP(*(.initcall##level##.init)) \ 853 KEEP(*(.initcall##level##s.init)) \ 854 855 #define INIT_CALLS \ 856 __initcall_start = .; \ 857 KEEP(*(.initcallearly.init)) \ 858 INIT_CALLS_LEVEL(0) \ 859 INIT_CALLS_LEVEL(1) \ 860 INIT_CALLS_LEVEL(2) \ 861 INIT_CALLS_LEVEL(3) \ 862 INIT_CALLS_LEVEL(4) \ 863 INIT_CALLS_LEVEL(5) \ 864 INIT_CALLS_LEVEL(rootfs) \ 865 INIT_CALLS_LEVEL(6) \ 866 INIT_CALLS_LEVEL(7) \ 867 __initcall_end = .; 868 869 #define CON_INITCALL \ 870 __con_initcall_start = .; \ 871 KEEP(*(.con_initcall.init)) \ 872 __con_initcall_end = .; 873 874 #ifdef CONFIG_BLK_DEV_INITRD 875 #define INIT_RAM_FS \ 876 . = ALIGN(4); \ 877 __initramfs_start = .; \ 878 KEEP(*(.init.ramfs)) \ 879 . = ALIGN(8); \ 880 KEEP(*(.init.ramfs.info)) 881 #else 882 #define INIT_RAM_FS 883 #endif 884 885 /* 886 * Memory encryption operates on a page basis. Since we need to clear 887 * the memory encryption mask for this section, it needs to be aligned 888 * on a page boundary and be a page-size multiple in length. 889 * 890 * Note: We use a separate section so that only this section gets 891 * decrypted to avoid exposing more than we wish. 892 */ 893 #ifdef CONFIG_AMD_MEM_ENCRYPT 894 #define PERCPU_DECRYPTED_SECTION \ 895 . = ALIGN(PAGE_SIZE); \ 896 *(.data..percpu..decrypted) \ 897 . = ALIGN(PAGE_SIZE); 898 #else 899 #define PERCPU_DECRYPTED_SECTION 900 #endif 901 902 903 /* 904 * Default discarded sections. 905 * 906 * Some archs want to discard exit text/data at runtime rather than 907 * link time due to cross-section references such as alt instructions, 908 * bug table, eh_frame, etc. DISCARDS must be the last of output 909 * section definitions so that such archs put those in earlier section 910 * definitions. 911 */ 912 #ifdef RUNTIME_DISCARD_EXIT 913 #define EXIT_DISCARDS 914 #else 915 #define EXIT_DISCARDS \ 916 EXIT_TEXT \ 917 EXIT_DATA 918 #endif 919 920 #define DISCARDS \ 921 /DISCARD/ : { \ 922 EXIT_DISCARDS \ 923 EXIT_CALL \ 924 *(.discard) \ 925 *(.discard.*) \ 926 *(.modinfo) \ 927 } 928 929 /** 930 * PERCPU_INPUT - the percpu input sections 931 * @cacheline: cacheline size 932 * 933 * The core percpu section names and core symbols which do not rely 934 * directly upon load addresses. 935 * 936 * @cacheline is used to align subsections to avoid false cacheline 937 * sharing between subsections for different purposes. 938 */ 939 #define PERCPU_INPUT(cacheline) \ 940 __per_cpu_start = .; \ 941 *(.data..percpu..first) \ 942 . = ALIGN(PAGE_SIZE); \ 943 *(.data..percpu..page_aligned) \ 944 . = ALIGN(cacheline); \ 945 *(.data..percpu..read_mostly) \ 946 . = ALIGN(cacheline); \ 947 *(.data..percpu) \ 948 *(.data..percpu..shared_aligned) \ 949 PERCPU_DECRYPTED_SECTION \ 950 __per_cpu_end = .; 951 952 /** 953 * PERCPU_VADDR - define output section for percpu area 954 * @cacheline: cacheline size 955 * @vaddr: explicit base address (optional) 956 * @phdr: destination PHDR (optional) 957 * 958 * Macro which expands to output section for percpu area. 959 * 960 * @cacheline is used to align subsections to avoid false cacheline 961 * sharing between subsections for different purposes. 962 * 963 * If @vaddr is not blank, it specifies explicit base address and all 964 * percpu symbols will be offset from the given address. If blank, 965 * @vaddr always equals @laddr + LOAD_OFFSET. 966 * 967 * @phdr defines the output PHDR to use if not blank. Be warned that 968 * output PHDR is sticky. If @phdr is specified, the next output 969 * section in the linker script will go there too. @phdr should have 970 * a leading colon. 971 * 972 * Note that this macros defines __per_cpu_load as an absolute symbol. 973 * If there is no need to put the percpu section at a predetermined 974 * address, use PERCPU_SECTION. 975 */ 976 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 977 __per_cpu_load = .; \ 978 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ 979 PERCPU_INPUT(cacheline) \ 980 } phdr \ 981 . = __per_cpu_load + SIZEOF(.data..percpu); 982 983 /** 984 * PERCPU_SECTION - define output section for percpu area, simple version 985 * @cacheline: cacheline size 986 * 987 * Align to PAGE_SIZE and outputs output section for percpu area. This 988 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 989 * __per_cpu_start will be identical. 990 * 991 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 992 * except that __per_cpu_load is defined as a relative symbol against 993 * .data..percpu which is required for relocatable x86_32 configuration. 994 */ 995 #define PERCPU_SECTION(cacheline) \ 996 . = ALIGN(PAGE_SIZE); \ 997 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 998 __per_cpu_load = .; \ 999 PERCPU_INPUT(cacheline) \ 1000 } 1001 1002 1003 /* 1004 * Definition of the high level *_SECTION macros 1005 * They will fit only a subset of the architectures 1006 */ 1007 1008 1009 /* 1010 * Writeable data. 1011 * All sections are combined in a single .data section. 1012 * The sections following CONSTRUCTORS are arranged so their 1013 * typical alignment matches. 1014 * A cacheline is typical/always less than a PAGE_SIZE so 1015 * the sections that has this restriction (or similar) 1016 * is located before the ones requiring PAGE_SIZE alignment. 1017 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 1018 * matches the requirement of PAGE_ALIGNED_DATA. 1019 * 1020 * use 0 as page_align if page_aligned data is not used */ 1021 #define RW_DATA(cacheline, pagealigned, inittask) \ 1022 . = ALIGN(PAGE_SIZE); \ 1023 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 1024 INIT_TASK_DATA(inittask) \ 1025 NOSAVE_DATA \ 1026 PAGE_ALIGNED_DATA(pagealigned) \ 1027 CACHELINE_ALIGNED_DATA(cacheline) \ 1028 READ_MOSTLY_DATA(cacheline) \ 1029 DATA_DATA \ 1030 CONSTRUCTORS \ 1031 } \ 1032 BUG_TABLE \ 1033 1034 #define INIT_TEXT_SECTION(inittext_align) \ 1035 . = ALIGN(inittext_align); \ 1036 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 1037 _sinittext = .; \ 1038 INIT_TEXT \ 1039 _einittext = .; \ 1040 } 1041 1042 #define INIT_DATA_SECTION(initsetup_align) \ 1043 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 1044 INIT_DATA \ 1045 INIT_SETUP(initsetup_align) \ 1046 INIT_CALLS \ 1047 CON_INITCALL \ 1048 INIT_RAM_FS \ 1049 } 1050 1051 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 1052 . = ALIGN(sbss_align); \ 1053 __bss_start = .; \ 1054 SBSS(sbss_align) \ 1055 BSS(bss_align) \ 1056 . = ALIGN(stop_align); \ 1057 __bss_stop = .; 1058