1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA(PAGE_SIZE) 27 * RW_DATA(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * 32 * BSS_SECTION(0, 0, 0) 33 * _end = .; 34 * 35 * STABS_DEBUG 36 * DWARF_DEBUG 37 * ELF_DETAILS 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * // __init_begin and __init_end should be page aligned, so that we can 44 * // free the whole .init memory 45 * [_stext, _etext] is the text section 46 * [_sdata, _edata] is the data section 47 * 48 * Some of the included output section have their own set of constants. 49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 50 * [__nosave_begin, __nosave_end] for the nosave data 51 */ 52 53 #ifndef LOAD_OFFSET 54 #define LOAD_OFFSET 0 55 #endif 56 57 /* 58 * Only some architectures want to have the .notes segment visible in 59 * a separate PT_NOTE ELF Program Header. When this happens, it needs 60 * to be visible in both the kernel text's PT_LOAD and the PT_NOTE 61 * Program Headers. In this case, though, the PT_LOAD needs to be made 62 * the default again so that all the following sections don't also end 63 * up in the PT_NOTE Program Header. 64 */ 65 #ifdef EMITS_PT_NOTE 66 #define NOTES_HEADERS :text :note 67 #define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text 68 #else 69 #define NOTES_HEADERS 70 #define NOTES_HEADERS_RESTORE 71 #endif 72 73 /* 74 * Some architectures have non-executable read-only exception tables. 75 * They can be added to the RO_DATA segment by specifying their desired 76 * alignment. 77 */ 78 #ifdef RO_EXCEPTION_TABLE_ALIGN 79 #define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN) 80 #else 81 #define RO_EXCEPTION_TABLE 82 #endif 83 84 /* Align . function alignment. */ 85 #define ALIGN_FUNCTION() . = ALIGN(CONFIG_FUNCTION_ALIGNMENT) 86 87 /* 88 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which 89 * generates .data.identifier sections, which need to be pulled in with 90 * .data. We don't want to pull in .data..other sections, which Linux 91 * has defined. Same for text and bss. 92 * 93 * With LTO_CLANG, the linker also splits sections by default, so we need 94 * these macros to combine the sections during the final link. 95 * 96 * RODATA_MAIN is not used because existing code already defines .rodata.x 97 * sections to be brought in with rodata. 98 */ 99 #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) 100 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 101 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* 102 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* 103 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* 104 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral* 105 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* 106 #else 107 #define TEXT_MAIN .text 108 #define DATA_MAIN .data 109 #define SDATA_MAIN .sdata 110 #define RODATA_MAIN .rodata 111 #define BSS_MAIN .bss 112 #define SBSS_MAIN .sbss 113 #endif 114 115 /* 116 * GCC 4.5 and later have a 32 bytes section alignment for structures. 117 * Except GCC 4.9, that feels the need to align on 64 bytes. 118 */ 119 #define STRUCT_ALIGNMENT 32 120 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 121 122 /* 123 * The order of the sched class addresses are important, as they are 124 * used to determine the order of the priority of each sched class in 125 * relation to each other. 126 */ 127 #define SCHED_DATA \ 128 STRUCT_ALIGN(); \ 129 __sched_class_highest = .; \ 130 *(__stop_sched_class) \ 131 *(__dl_sched_class) \ 132 *(__rt_sched_class) \ 133 *(__fair_sched_class) \ 134 *(__idle_sched_class) \ 135 __sched_class_lowest = .; 136 137 /* The actual configuration determine if the init/exit sections 138 * are handled as text/data or they can be discarded (which 139 * often happens at runtime) 140 */ 141 142 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE 143 #define KEEP_PATCHABLE KEEP(*(__patchable_function_entries)) 144 #define PATCHABLE_DISCARDS 145 #else 146 #define KEEP_PATCHABLE 147 #define PATCHABLE_DISCARDS *(__patchable_function_entries) 148 #endif 149 150 #ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG 151 /* 152 * Simply points to ftrace_stub, but with the proper protocol. 153 * Defined by the linker script in linux/vmlinux.lds.h 154 */ 155 #define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub; 156 #else 157 #define FTRACE_STUB_HACK 158 #endif 159 160 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 161 /* 162 * The ftrace call sites are logged to a section whose name depends on the 163 * compiler option used. A given kernel image will only use one, AKA 164 * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header 165 * dependencies for FTRACE_CALLSITE_SECTION's definition. 166 * 167 * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func 168 * as some archs will have a different prototype for that function 169 * but ftrace_ops_list_func() will have a single prototype. 170 */ 171 #define MCOUNT_REC() . = ALIGN(8); \ 172 __start_mcount_loc = .; \ 173 KEEP(*(__mcount_loc)) \ 174 KEEP_PATCHABLE \ 175 __stop_mcount_loc = .; \ 176 FTRACE_STUB_HACK \ 177 ftrace_ops_list_func = arch_ftrace_ops_list_func; 178 #else 179 # ifdef CONFIG_FUNCTION_TRACER 180 # define MCOUNT_REC() FTRACE_STUB_HACK \ 181 ftrace_ops_list_func = arch_ftrace_ops_list_func; 182 # else 183 # define MCOUNT_REC() 184 # endif 185 #endif 186 187 #define BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) \ 188 _BEGIN_##_label_ = .; \ 189 KEEP(*(_sec_)) \ 190 _END_##_label_ = .; 191 192 #define BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) \ 193 _label_##_BEGIN_ = .; \ 194 KEEP(*(_sec_)) \ 195 _label_##_END_ = .; 196 197 #define BOUNDED_SECTION_BY(_sec_, _label_) \ 198 BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop) 199 200 #define BOUNDED_SECTION(_sec) BOUNDED_SECTION_BY(_sec, _sec) 201 202 #define HEADERED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \ 203 _HDR_##_label_ = .; \ 204 KEEP(*(.gnu.linkonce.##_sec_)) \ 205 BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) 206 207 #define HEADERED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \ 208 _label_##_HDR_ = .; \ 209 KEEP(*(.gnu.linkonce.##_sec_)) \ 210 BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) 211 212 #define HEADERED_SECTION_BY(_sec_, _label_) \ 213 HEADERED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop) 214 215 #define HEADERED_SECTION(_sec) HEADERED_SECTION_BY(_sec, _sec) 216 217 #ifdef CONFIG_TRACE_BRANCH_PROFILING 218 #define LIKELY_PROFILE() \ 219 BOUNDED_SECTION_BY(_ftrace_annotated_branch, _annotated_branch_profile) 220 #else 221 #define LIKELY_PROFILE() 222 #endif 223 224 #ifdef CONFIG_PROFILE_ALL_BRANCHES 225 #define BRANCH_PROFILE() \ 226 BOUNDED_SECTION_BY(_ftrace_branch, _branch_profile) 227 #else 228 #define BRANCH_PROFILE() 229 #endif 230 231 #ifdef CONFIG_KPROBES 232 #define KPROBE_BLACKLIST() \ 233 . = ALIGN(8); \ 234 BOUNDED_SECTION(_kprobe_blacklist) 235 #else 236 #define KPROBE_BLACKLIST() 237 #endif 238 239 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 240 #define ERROR_INJECT_WHITELIST() \ 241 STRUCT_ALIGN(); \ 242 BOUNDED_SECTION(_error_injection_whitelist) 243 #else 244 #define ERROR_INJECT_WHITELIST() 245 #endif 246 247 #ifdef CONFIG_EVENT_TRACING 248 #define FTRACE_EVENTS() \ 249 . = ALIGN(8); \ 250 BOUNDED_SECTION(_ftrace_events) \ 251 BOUNDED_SECTION_BY(_ftrace_eval_map, _ftrace_eval_maps) 252 #else 253 #define FTRACE_EVENTS() 254 #endif 255 256 #ifdef CONFIG_TRACING 257 #define TRACE_PRINTKS() BOUNDED_SECTION_BY(__trace_printk_fmt, ___trace_bprintk_fmt) 258 #define TRACEPOINT_STR() BOUNDED_SECTION_BY(__tracepoint_str, ___tracepoint_str) 259 #else 260 #define TRACE_PRINTKS() 261 #define TRACEPOINT_STR() 262 #endif 263 264 #ifdef CONFIG_FTRACE_SYSCALLS 265 #define TRACE_SYSCALLS() \ 266 . = ALIGN(8); \ 267 BOUNDED_SECTION_BY(__syscalls_metadata, _syscalls_metadata) 268 #else 269 #define TRACE_SYSCALLS() 270 #endif 271 272 #ifdef CONFIG_BPF_EVENTS 273 #define BPF_RAW_TP() STRUCT_ALIGN(); \ 274 BOUNDED_SECTION_BY(__bpf_raw_tp_map, __bpf_raw_tp) 275 #else 276 #define BPF_RAW_TP() 277 #endif 278 279 #ifdef CONFIG_SERIAL_EARLYCON 280 #define EARLYCON_TABLE() \ 281 . = ALIGN(8); \ 282 BOUNDED_SECTION_POST_LABEL(__earlycon_table, __earlycon_table, , _end) 283 #else 284 #define EARLYCON_TABLE() 285 #endif 286 287 #ifdef CONFIG_SECURITY 288 #define LSM_TABLE() \ 289 . = ALIGN(8); \ 290 BOUNDED_SECTION_PRE_LABEL(.lsm_info.init, _lsm_info, __start, __end) 291 292 #define EARLY_LSM_TABLE() \ 293 . = ALIGN(8); \ 294 BOUNDED_SECTION_PRE_LABEL(.early_lsm_info.init, _early_lsm_info, __start, __end) 295 #else 296 #define LSM_TABLE() 297 #define EARLY_LSM_TABLE() 298 #endif 299 300 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) 301 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) 302 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) 303 #define _OF_TABLE_0(name) 304 #define _OF_TABLE_1(name) \ 305 . = ALIGN(8); \ 306 __##name##_of_table = .; \ 307 KEEP(*(__##name##_of_table)) \ 308 KEEP(*(__##name##_of_table_end)) 309 310 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) 311 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 312 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 313 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 314 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 315 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 316 317 #ifdef CONFIG_ACPI 318 #define ACPI_PROBE_TABLE(name) \ 319 . = ALIGN(8); \ 320 BOUNDED_SECTION_POST_LABEL(__##name##_acpi_probe_table, \ 321 __##name##_acpi_probe_table,, _end) 322 #else 323 #define ACPI_PROBE_TABLE(name) 324 #endif 325 326 #ifdef CONFIG_THERMAL 327 #define THERMAL_TABLE(name) \ 328 . = ALIGN(8); \ 329 BOUNDED_SECTION_POST_LABEL(__##name##_thermal_table, \ 330 __##name##_thermal_table,, _end) 331 #else 332 #define THERMAL_TABLE(name) 333 #endif 334 335 #define KERNEL_DTB() \ 336 STRUCT_ALIGN(); \ 337 __dtb_start = .; \ 338 KEEP(*(.dtb.init.rodata)) \ 339 __dtb_end = .; 340 341 /* 342 * .data section 343 */ 344 #define DATA_DATA \ 345 *(.xiptext) \ 346 *(DATA_MAIN) \ 347 *(.data..decrypted) \ 348 *(.ref.data) \ 349 *(.data..shared_aligned) /* percpu related */ \ 350 *(.data..unlikely) \ 351 __start_once = .; \ 352 *(.data..once) \ 353 __end_once = .; \ 354 STRUCT_ALIGN(); \ 355 *(__tracepoints) \ 356 /* implement dynamic printk debug */ \ 357 . = ALIGN(8); \ 358 BOUNDED_SECTION_BY(__dyndbg_classes, ___dyndbg_classes) \ 359 BOUNDED_SECTION_BY(__dyndbg, ___dyndbg) \ 360 LIKELY_PROFILE() \ 361 BRANCH_PROFILE() \ 362 TRACE_PRINTKS() \ 363 BPF_RAW_TP() \ 364 TRACEPOINT_STR() 365 366 /* 367 * Data section helpers 368 */ 369 #define NOSAVE_DATA \ 370 . = ALIGN(PAGE_SIZE); \ 371 __nosave_begin = .; \ 372 *(.data..nosave) \ 373 . = ALIGN(PAGE_SIZE); \ 374 __nosave_end = .; 375 376 #define PAGE_ALIGNED_DATA(page_align) \ 377 . = ALIGN(page_align); \ 378 *(.data..page_aligned) \ 379 . = ALIGN(page_align); 380 381 #define READ_MOSTLY_DATA(align) \ 382 . = ALIGN(align); \ 383 *(.data..read_mostly) \ 384 . = ALIGN(align); 385 386 #define CACHELINE_ALIGNED_DATA(align) \ 387 . = ALIGN(align); \ 388 *(.data..cacheline_aligned) 389 390 #define INIT_TASK_DATA(align) \ 391 . = ALIGN(align); \ 392 __start_init_task = .; \ 393 init_thread_union = .; \ 394 init_stack = .; \ 395 KEEP(*(.data..init_task)) \ 396 KEEP(*(.data..init_thread_info)) \ 397 . = __start_init_task + THREAD_SIZE; \ 398 __end_init_task = .; 399 400 #define JUMP_TABLE_DATA \ 401 . = ALIGN(8); \ 402 BOUNDED_SECTION_BY(__jump_table, ___jump_table) 403 404 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE 405 #define STATIC_CALL_DATA \ 406 . = ALIGN(8); \ 407 BOUNDED_SECTION_BY(.static_call_sites, _static_call_sites) \ 408 BOUNDED_SECTION_BY(.static_call_tramp_key, _static_call_tramp_key) 409 #else 410 #define STATIC_CALL_DATA 411 #endif 412 413 /* 414 * Allow architectures to handle ro_after_init data on their 415 * own by defining an empty RO_AFTER_INIT_DATA. 416 */ 417 #ifndef RO_AFTER_INIT_DATA 418 #define RO_AFTER_INIT_DATA \ 419 . = ALIGN(8); \ 420 __start_ro_after_init = .; \ 421 *(.data..ro_after_init) \ 422 JUMP_TABLE_DATA \ 423 STATIC_CALL_DATA \ 424 __end_ro_after_init = .; 425 #endif 426 427 /* 428 * .kcfi_traps contains a list KCFI trap locations. 429 */ 430 #ifndef KCFI_TRAPS 431 #ifdef CONFIG_ARCH_USES_CFI_TRAPS 432 #define KCFI_TRAPS \ 433 __kcfi_traps : AT(ADDR(__kcfi_traps) - LOAD_OFFSET) { \ 434 BOUNDED_SECTION_BY(.kcfi_traps, ___kcfi_traps) \ 435 } 436 #else 437 #define KCFI_TRAPS 438 #endif 439 #endif 440 441 /* 442 * Read only Data 443 */ 444 #define RO_DATA(align) \ 445 . = ALIGN((align)); \ 446 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 447 __start_rodata = .; \ 448 *(.rodata) *(.rodata.*) \ 449 SCHED_DATA \ 450 RO_AFTER_INIT_DATA /* Read only after init */ \ 451 . = ALIGN(8); \ 452 BOUNDED_SECTION_BY(__tracepoints_ptrs, ___tracepoints_ptrs) \ 453 *(__tracepoints_strings)/* Tracepoints: strings */ \ 454 } \ 455 \ 456 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 457 *(.rodata1) \ 458 } \ 459 \ 460 /* PCI quirks */ \ 461 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 462 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_early, _pci_fixups_early, __start, __end) \ 463 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_header, _pci_fixups_header, __start, __end) \ 464 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_final, _pci_fixups_final, __start, __end) \ 465 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_enable, _pci_fixups_enable, __start, __end) \ 466 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume, _pci_fixups_resume, __start, __end) \ 467 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend, _pci_fixups_suspend, __start, __end) \ 468 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume_early, _pci_fixups_resume_early, __start, __end) \ 469 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend_late, _pci_fixups_suspend_late, __start, __end) \ 470 } \ 471 \ 472 FW_LOADER_BUILT_IN_DATA \ 473 TRACEDATA \ 474 \ 475 PRINTK_INDEX \ 476 \ 477 /* Kernel symbol table: Normal symbols */ \ 478 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 479 __start___ksymtab = .; \ 480 KEEP(*(SORT(___ksymtab+*))) \ 481 __stop___ksymtab = .; \ 482 } \ 483 \ 484 /* Kernel symbol table: GPL-only symbols */ \ 485 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 486 __start___ksymtab_gpl = .; \ 487 KEEP(*(SORT(___ksymtab_gpl+*))) \ 488 __stop___ksymtab_gpl = .; \ 489 } \ 490 \ 491 /* Kernel symbol table: Normal symbols */ \ 492 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 493 __start___kcrctab = .; \ 494 KEEP(*(SORT(___kcrctab+*))) \ 495 __stop___kcrctab = .; \ 496 } \ 497 \ 498 /* Kernel symbol table: GPL-only symbols */ \ 499 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 500 __start___kcrctab_gpl = .; \ 501 KEEP(*(SORT(___kcrctab_gpl+*))) \ 502 __stop___kcrctab_gpl = .; \ 503 } \ 504 \ 505 /* Kernel symbol table: strings */ \ 506 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 507 *(__ksymtab_strings) \ 508 } \ 509 \ 510 /* __*init sections */ \ 511 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 512 *(.ref.rodata) \ 513 } \ 514 \ 515 /* Built-in module parameters. */ \ 516 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 517 BOUNDED_SECTION_BY(__param, ___param) \ 518 } \ 519 \ 520 /* Built-in module versions. */ \ 521 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 522 BOUNDED_SECTION_BY(__modver, ___modver) \ 523 } \ 524 \ 525 KCFI_TRAPS \ 526 \ 527 RO_EXCEPTION_TABLE \ 528 NOTES \ 529 BTF \ 530 \ 531 . = ALIGN((align)); \ 532 __end_rodata = .; 533 534 535 /* 536 * Non-instrumentable text section 537 */ 538 #define NOINSTR_TEXT \ 539 ALIGN_FUNCTION(); \ 540 __noinstr_text_start = .; \ 541 *(.noinstr.text) \ 542 __cpuidle_text_start = .; \ 543 *(.cpuidle.text) \ 544 __cpuidle_text_end = .; \ 545 __noinstr_text_end = .; 546 547 /* 548 * .text section. Map to function alignment to avoid address changes 549 * during second ld run in second ld pass when generating System.map 550 * 551 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead 552 * code elimination is enabled, so these sections should be converted 553 * to use ".." first. 554 */ 555 #define TEXT_TEXT \ 556 ALIGN_FUNCTION(); \ 557 *(.text.hot .text.hot.*) \ 558 *(TEXT_MAIN .text.fixup) \ 559 *(.text.unlikely .text.unlikely.*) \ 560 *(.text.unknown .text.unknown.*) \ 561 NOINSTR_TEXT \ 562 *(.ref.text) \ 563 *(.text.asan.* .text.tsan.*) 564 565 566 /* sched.text is aling to function alignment to secure we have same 567 * address even at second ld pass when generating System.map */ 568 #define SCHED_TEXT \ 569 ALIGN_FUNCTION(); \ 570 __sched_text_start = .; \ 571 *(.sched.text) \ 572 __sched_text_end = .; 573 574 /* spinlock.text is aling to function alignment to secure we have same 575 * address even at second ld pass when generating System.map */ 576 #define LOCK_TEXT \ 577 ALIGN_FUNCTION(); \ 578 __lock_text_start = .; \ 579 *(.spinlock.text) \ 580 __lock_text_end = .; 581 582 #define KPROBES_TEXT \ 583 ALIGN_FUNCTION(); \ 584 __kprobes_text_start = .; \ 585 *(.kprobes.text) \ 586 __kprobes_text_end = .; 587 588 #define ENTRY_TEXT \ 589 ALIGN_FUNCTION(); \ 590 __entry_text_start = .; \ 591 *(.entry.text) \ 592 __entry_text_end = .; 593 594 #define IRQENTRY_TEXT \ 595 ALIGN_FUNCTION(); \ 596 __irqentry_text_start = .; \ 597 *(.irqentry.text) \ 598 __irqentry_text_end = .; 599 600 #define SOFTIRQENTRY_TEXT \ 601 ALIGN_FUNCTION(); \ 602 __softirqentry_text_start = .; \ 603 *(.softirqentry.text) \ 604 __softirqentry_text_end = .; 605 606 #define STATIC_CALL_TEXT \ 607 ALIGN_FUNCTION(); \ 608 __static_call_text_start = .; \ 609 *(.static_call.text) \ 610 __static_call_text_end = .; 611 612 /* Section used for early init (in .S files) */ 613 #define HEAD_TEXT KEEP(*(.head.text)) 614 615 #define HEAD_TEXT_SECTION \ 616 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 617 HEAD_TEXT \ 618 } 619 620 /* 621 * Exception table 622 */ 623 #define EXCEPTION_TABLE(align) \ 624 . = ALIGN(align); \ 625 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 626 BOUNDED_SECTION_BY(__ex_table, ___ex_table) \ 627 } 628 629 /* 630 * .BTF 631 */ 632 #ifdef CONFIG_DEBUG_INFO_BTF 633 #define BTF \ 634 .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ 635 BOUNDED_SECTION_BY(.BTF, _BTF) \ 636 } \ 637 . = ALIGN(4); \ 638 .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ 639 *(.BTF_ids) \ 640 } 641 #else 642 #define BTF 643 #endif 644 645 /* 646 * Init task 647 */ 648 #define INIT_TASK_DATA_SECTION(align) \ 649 . = ALIGN(align); \ 650 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 651 INIT_TASK_DATA(align) \ 652 } 653 654 #ifdef CONFIG_CONSTRUCTORS 655 #define KERNEL_CTORS() . = ALIGN(8); \ 656 __ctors_start = .; \ 657 KEEP(*(SORT(.ctors.*))) \ 658 KEEP(*(.ctors)) \ 659 KEEP(*(SORT(.init_array.*))) \ 660 KEEP(*(.init_array)) \ 661 __ctors_end = .; 662 #else 663 #define KERNEL_CTORS() 664 #endif 665 666 /* init and exit section handling */ 667 #define INIT_DATA \ 668 KEEP(*(SORT(___kentry+*))) \ 669 *(.init.data .init.data.*) \ 670 KERNEL_CTORS() \ 671 MCOUNT_REC() \ 672 *(.init.rodata .init.rodata.*) \ 673 FTRACE_EVENTS() \ 674 TRACE_SYSCALLS() \ 675 KPROBE_BLACKLIST() \ 676 ERROR_INJECT_WHITELIST() \ 677 CLK_OF_TABLES() \ 678 RESERVEDMEM_OF_TABLES() \ 679 TIMER_OF_TABLES() \ 680 CPU_METHOD_OF_TABLES() \ 681 CPUIDLE_METHOD_OF_TABLES() \ 682 KERNEL_DTB() \ 683 IRQCHIP_OF_MATCH_TABLE() \ 684 ACPI_PROBE_TABLE(irqchip) \ 685 ACPI_PROBE_TABLE(timer) \ 686 THERMAL_TABLE(governor) \ 687 EARLYCON_TABLE() \ 688 LSM_TABLE() \ 689 EARLY_LSM_TABLE() \ 690 KUNIT_TABLE() 691 692 #define INIT_TEXT \ 693 *(.init.text .init.text.*) \ 694 *(.text.startup) 695 696 #define EXIT_DATA \ 697 *(.exit.data .exit.data.*) \ 698 *(.fini_array .fini_array.*) \ 699 *(.dtors .dtors.*) \ 700 701 #define EXIT_TEXT \ 702 *(.exit.text) \ 703 *(.text.exit) \ 704 705 #define EXIT_CALL \ 706 *(.exitcall.exit) 707 708 /* 709 * bss (Block Started by Symbol) - uninitialized data 710 * zeroed during startup 711 */ 712 #define SBSS(sbss_align) \ 713 . = ALIGN(sbss_align); \ 714 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 715 *(.dynsbss) \ 716 *(SBSS_MAIN) \ 717 *(.scommon) \ 718 } 719 720 /* 721 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 722 * sections to the front of bss. 723 */ 724 #ifndef BSS_FIRST_SECTIONS 725 #define BSS_FIRST_SECTIONS 726 #endif 727 728 #define BSS(bss_align) \ 729 . = ALIGN(bss_align); \ 730 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 731 BSS_FIRST_SECTIONS \ 732 . = ALIGN(PAGE_SIZE); \ 733 *(.bss..page_aligned) \ 734 . = ALIGN(PAGE_SIZE); \ 735 *(.dynbss) \ 736 *(BSS_MAIN) \ 737 *(COMMON) \ 738 } 739 740 /* 741 * DWARF debug sections. 742 * Symbols in the DWARF debugging sections are relative to 743 * the beginning of the section so we begin them at 0. 744 */ 745 #define DWARF_DEBUG \ 746 /* DWARF 1 */ \ 747 .debug 0 : { *(.debug) } \ 748 .line 0 : { *(.line) } \ 749 /* GNU DWARF 1 extensions */ \ 750 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 751 .debug_sfnames 0 : { *(.debug_sfnames) } \ 752 /* DWARF 1.1 and DWARF 2 */ \ 753 .debug_aranges 0 : { *(.debug_aranges) } \ 754 .debug_pubnames 0 : { *(.debug_pubnames) } \ 755 /* DWARF 2 */ \ 756 .debug_info 0 : { *(.debug_info \ 757 .gnu.linkonce.wi.*) } \ 758 .debug_abbrev 0 : { *(.debug_abbrev) } \ 759 .debug_line 0 : { *(.debug_line) } \ 760 .debug_frame 0 : { *(.debug_frame) } \ 761 .debug_str 0 : { *(.debug_str) } \ 762 .debug_loc 0 : { *(.debug_loc) } \ 763 .debug_macinfo 0 : { *(.debug_macinfo) } \ 764 .debug_pubtypes 0 : { *(.debug_pubtypes) } \ 765 /* DWARF 3 */ \ 766 .debug_ranges 0 : { *(.debug_ranges) } \ 767 /* SGI/MIPS DWARF 2 extensions */ \ 768 .debug_weaknames 0 : { *(.debug_weaknames) } \ 769 .debug_funcnames 0 : { *(.debug_funcnames) } \ 770 .debug_typenames 0 : { *(.debug_typenames) } \ 771 .debug_varnames 0 : { *(.debug_varnames) } \ 772 /* GNU DWARF 2 extensions */ \ 773 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ 774 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ 775 /* DWARF 4 */ \ 776 .debug_types 0 : { *(.debug_types) } \ 777 /* DWARF 5 */ \ 778 .debug_addr 0 : { *(.debug_addr) } \ 779 .debug_line_str 0 : { *(.debug_line_str) } \ 780 .debug_loclists 0 : { *(.debug_loclists) } \ 781 .debug_macro 0 : { *(.debug_macro) } \ 782 .debug_names 0 : { *(.debug_names) } \ 783 .debug_rnglists 0 : { *(.debug_rnglists) } \ 784 .debug_str_offsets 0 : { *(.debug_str_offsets) } 785 786 /* Stabs debugging sections. */ 787 #define STABS_DEBUG \ 788 .stab 0 : { *(.stab) } \ 789 .stabstr 0 : { *(.stabstr) } \ 790 .stab.excl 0 : { *(.stab.excl) } \ 791 .stab.exclstr 0 : { *(.stab.exclstr) } \ 792 .stab.index 0 : { *(.stab.index) } \ 793 .stab.indexstr 0 : { *(.stab.indexstr) } 794 795 /* Required sections not related to debugging. */ 796 #define ELF_DETAILS \ 797 .comment 0 : { *(.comment) } \ 798 .symtab 0 : { *(.symtab) } \ 799 .strtab 0 : { *(.strtab) } \ 800 .shstrtab 0 : { *(.shstrtab) } 801 802 #ifdef CONFIG_GENERIC_BUG 803 #define BUG_TABLE \ 804 . = ALIGN(8); \ 805 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 806 BOUNDED_SECTION_BY(__bug_table, ___bug_table) \ 807 } 808 #else 809 #define BUG_TABLE 810 #endif 811 812 #ifdef CONFIG_UNWINDER_ORC 813 #define ORC_UNWIND_TABLE \ 814 .orc_header : AT(ADDR(.orc_header) - LOAD_OFFSET) { \ 815 BOUNDED_SECTION_BY(.orc_header, _orc_header) \ 816 } \ 817 . = ALIGN(4); \ 818 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ 819 BOUNDED_SECTION_BY(.orc_unwind_ip, _orc_unwind_ip) \ 820 } \ 821 . = ALIGN(2); \ 822 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 823 BOUNDED_SECTION_BY(.orc_unwind, _orc_unwind) \ 824 } \ 825 text_size = _etext - _stext; \ 826 . = ALIGN(4); \ 827 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ 828 orc_lookup = .; \ 829 . += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \ 830 LOOKUP_BLOCK_SIZE) + 1) * 4; \ 831 orc_lookup_end = .; \ 832 } 833 #else 834 #define ORC_UNWIND_TABLE 835 #endif 836 837 /* Built-in firmware blobs */ 838 #ifdef CONFIG_FW_LOADER 839 #define FW_LOADER_BUILT_IN_DATA \ 840 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \ 841 BOUNDED_SECTION_PRE_LABEL(.builtin_fw, _builtin_fw, __start, __end) \ 842 } 843 #else 844 #define FW_LOADER_BUILT_IN_DATA 845 #endif 846 847 #ifdef CONFIG_PM_TRACE 848 #define TRACEDATA \ 849 . = ALIGN(4); \ 850 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 851 BOUNDED_SECTION_POST_LABEL(.tracedata, __tracedata, _start, _end) \ 852 } 853 #else 854 #define TRACEDATA 855 #endif 856 857 #ifdef CONFIG_PRINTK_INDEX 858 #define PRINTK_INDEX \ 859 .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \ 860 BOUNDED_SECTION_BY(.printk_index, _printk_index) \ 861 } 862 #else 863 #define PRINTK_INDEX 864 #endif 865 866 /* 867 * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler. 868 * Otherwise, the type of .notes section would become PROGBITS instead of NOTES. 869 * 870 * Also, discard .note.gnu.property, otherwise it forces the notes section to 871 * be 8-byte aligned which causes alignment mismatches with the kernel's custom 872 * 4-byte aligned notes. 873 */ 874 #define NOTES \ 875 /DISCARD/ : { \ 876 *(.note.GNU-stack) \ 877 *(.note.gnu.property) \ 878 } \ 879 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 880 BOUNDED_SECTION_BY(.note.*, _notes) \ 881 } NOTES_HEADERS \ 882 NOTES_HEADERS_RESTORE 883 884 #define INIT_SETUP(initsetup_align) \ 885 . = ALIGN(initsetup_align); \ 886 BOUNDED_SECTION_POST_LABEL(.init.setup, __setup, _start, _end) 887 888 #define INIT_CALLS_LEVEL(level) \ 889 __initcall##level##_start = .; \ 890 KEEP(*(.initcall##level##.init)) \ 891 KEEP(*(.initcall##level##s.init)) \ 892 893 #define INIT_CALLS \ 894 __initcall_start = .; \ 895 KEEP(*(.initcallearly.init)) \ 896 INIT_CALLS_LEVEL(0) \ 897 INIT_CALLS_LEVEL(1) \ 898 INIT_CALLS_LEVEL(2) \ 899 INIT_CALLS_LEVEL(3) \ 900 INIT_CALLS_LEVEL(4) \ 901 INIT_CALLS_LEVEL(5) \ 902 INIT_CALLS_LEVEL(rootfs) \ 903 INIT_CALLS_LEVEL(6) \ 904 INIT_CALLS_LEVEL(7) \ 905 __initcall_end = .; 906 907 #define CON_INITCALL \ 908 BOUNDED_SECTION_POST_LABEL(.con_initcall.init, __con_initcall, _start, _end) 909 910 /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ 911 #define KUNIT_TABLE() \ 912 . = ALIGN(8); \ 913 BOUNDED_SECTION_POST_LABEL(.kunit_test_suites, __kunit_suites, _start, _end) 914 915 #ifdef CONFIG_BLK_DEV_INITRD 916 #define INIT_RAM_FS \ 917 . = ALIGN(4); \ 918 __initramfs_start = .; \ 919 KEEP(*(.init.ramfs)) \ 920 . = ALIGN(8); \ 921 KEEP(*(.init.ramfs.info)) 922 #else 923 #define INIT_RAM_FS 924 #endif 925 926 /* 927 * Memory encryption operates on a page basis. Since we need to clear 928 * the memory encryption mask for this section, it needs to be aligned 929 * on a page boundary and be a page-size multiple in length. 930 * 931 * Note: We use a separate section so that only this section gets 932 * decrypted to avoid exposing more than we wish. 933 */ 934 #ifdef CONFIG_AMD_MEM_ENCRYPT 935 #define PERCPU_DECRYPTED_SECTION \ 936 . = ALIGN(PAGE_SIZE); \ 937 *(.data..percpu..decrypted) \ 938 . = ALIGN(PAGE_SIZE); 939 #else 940 #define PERCPU_DECRYPTED_SECTION 941 #endif 942 943 944 /* 945 * Default discarded sections. 946 * 947 * Some archs want to discard exit text/data at runtime rather than 948 * link time due to cross-section references such as alt instructions, 949 * bug table, eh_frame, etc. DISCARDS must be the last of output 950 * section definitions so that such archs put those in earlier section 951 * definitions. 952 */ 953 #ifdef RUNTIME_DISCARD_EXIT 954 #define EXIT_DISCARDS 955 #else 956 #define EXIT_DISCARDS \ 957 EXIT_TEXT \ 958 EXIT_DATA 959 #endif 960 961 /* 962 * Clang's -fprofile-arcs, -fsanitize=kernel-address, and 963 * -fsanitize=thread produce unwanted sections (.eh_frame 964 * and .init_array.*), but CONFIG_CONSTRUCTORS wants to 965 * keep any .init_array.* sections. 966 * https://bugs.llvm.org/show_bug.cgi?id=46478 967 */ 968 #ifdef CONFIG_UNWIND_TABLES 969 #define DISCARD_EH_FRAME 970 #else 971 #define DISCARD_EH_FRAME *(.eh_frame) 972 #endif 973 #if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) 974 # ifdef CONFIG_CONSTRUCTORS 975 # define SANITIZER_DISCARDS \ 976 DISCARD_EH_FRAME 977 # else 978 # define SANITIZER_DISCARDS \ 979 *(.init_array) *(.init_array.*) \ 980 DISCARD_EH_FRAME 981 # endif 982 #else 983 # define SANITIZER_DISCARDS 984 #endif 985 986 #define COMMON_DISCARDS \ 987 SANITIZER_DISCARDS \ 988 PATCHABLE_DISCARDS \ 989 *(.discard) \ 990 *(.discard.*) \ 991 *(.export_symbol) \ 992 *(.modinfo) \ 993 /* ld.bfd warns about .gnu.version* even when not emitted */ \ 994 *(.gnu.version*) \ 995 996 #define DISCARDS \ 997 /DISCARD/ : { \ 998 EXIT_DISCARDS \ 999 EXIT_CALL \ 1000 COMMON_DISCARDS \ 1001 } 1002 1003 /** 1004 * PERCPU_INPUT - the percpu input sections 1005 * @cacheline: cacheline size 1006 * 1007 * The core percpu section names and core symbols which do not rely 1008 * directly upon load addresses. 1009 * 1010 * @cacheline is used to align subsections to avoid false cacheline 1011 * sharing between subsections for different purposes. 1012 */ 1013 #define PERCPU_INPUT(cacheline) \ 1014 __per_cpu_start = .; \ 1015 *(.data..percpu..first) \ 1016 . = ALIGN(PAGE_SIZE); \ 1017 *(.data..percpu..page_aligned) \ 1018 . = ALIGN(cacheline); \ 1019 *(.data..percpu..read_mostly) \ 1020 . = ALIGN(cacheline); \ 1021 *(.data..percpu) \ 1022 *(.data..percpu..shared_aligned) \ 1023 PERCPU_DECRYPTED_SECTION \ 1024 __per_cpu_end = .; 1025 1026 /** 1027 * PERCPU_VADDR - define output section for percpu area 1028 * @cacheline: cacheline size 1029 * @vaddr: explicit base address (optional) 1030 * @phdr: destination PHDR (optional) 1031 * 1032 * Macro which expands to output section for percpu area. 1033 * 1034 * @cacheline is used to align subsections to avoid false cacheline 1035 * sharing between subsections for different purposes. 1036 * 1037 * If @vaddr is not blank, it specifies explicit base address and all 1038 * percpu symbols will be offset from the given address. If blank, 1039 * @vaddr always equals @laddr + LOAD_OFFSET. 1040 * 1041 * @phdr defines the output PHDR to use if not blank. Be warned that 1042 * output PHDR is sticky. If @phdr is specified, the next output 1043 * section in the linker script will go there too. @phdr should have 1044 * a leading colon. 1045 * 1046 * Note that this macros defines __per_cpu_load as an absolute symbol. 1047 * If there is no need to put the percpu section at a predetermined 1048 * address, use PERCPU_SECTION. 1049 */ 1050 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 1051 __per_cpu_load = .; \ 1052 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ 1053 PERCPU_INPUT(cacheline) \ 1054 } phdr \ 1055 . = __per_cpu_load + SIZEOF(.data..percpu); 1056 1057 /** 1058 * PERCPU_SECTION - define output section for percpu area, simple version 1059 * @cacheline: cacheline size 1060 * 1061 * Align to PAGE_SIZE and outputs output section for percpu area. This 1062 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 1063 * __per_cpu_start will be identical. 1064 * 1065 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 1066 * except that __per_cpu_load is defined as a relative symbol against 1067 * .data..percpu which is required for relocatable x86_32 configuration. 1068 */ 1069 #define PERCPU_SECTION(cacheline) \ 1070 . = ALIGN(PAGE_SIZE); \ 1071 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 1072 __per_cpu_load = .; \ 1073 PERCPU_INPUT(cacheline) \ 1074 } 1075 1076 1077 /* 1078 * Definition of the high level *_SECTION macros 1079 * They will fit only a subset of the architectures 1080 */ 1081 1082 1083 /* 1084 * Writeable data. 1085 * All sections are combined in a single .data section. 1086 * The sections following CONSTRUCTORS are arranged so their 1087 * typical alignment matches. 1088 * A cacheline is typical/always less than a PAGE_SIZE so 1089 * the sections that has this restriction (or similar) 1090 * is located before the ones requiring PAGE_SIZE alignment. 1091 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 1092 * matches the requirement of PAGE_ALIGNED_DATA. 1093 * 1094 * use 0 as page_align if page_aligned data is not used */ 1095 #define RW_DATA(cacheline, pagealigned, inittask) \ 1096 . = ALIGN(PAGE_SIZE); \ 1097 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 1098 INIT_TASK_DATA(inittask) \ 1099 NOSAVE_DATA \ 1100 PAGE_ALIGNED_DATA(pagealigned) \ 1101 CACHELINE_ALIGNED_DATA(cacheline) \ 1102 READ_MOSTLY_DATA(cacheline) \ 1103 DATA_DATA \ 1104 CONSTRUCTORS \ 1105 } \ 1106 BUG_TABLE \ 1107 1108 #define INIT_TEXT_SECTION(inittext_align) \ 1109 . = ALIGN(inittext_align); \ 1110 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 1111 _sinittext = .; \ 1112 INIT_TEXT \ 1113 _einittext = .; \ 1114 } 1115 1116 #define INIT_DATA_SECTION(initsetup_align) \ 1117 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 1118 INIT_DATA \ 1119 INIT_SETUP(initsetup_align) \ 1120 INIT_CALLS \ 1121 CON_INITCALL \ 1122 INIT_RAM_FS \ 1123 } 1124 1125 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 1126 . = ALIGN(sbss_align); \ 1127 __bss_start = .; \ 1128 SBSS(sbss_align) \ 1129 BSS(bss_align) \ 1130 . = ALIGN(stop_align); \ 1131 __bss_stop = .; 1132