1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * BSS_SECTION(0, 0, 0) 34 * _end = .; 35 * 36 * STABS_DEBUG 37 * DWARF_DEBUG 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * // __init_begin and __init_end should be page aligned, so that we can 44 * // free the whole .init memory 45 * [_stext, _etext] is the text section 46 * [_sdata, _edata] is the data section 47 * 48 * Some of the included output section have their own set of constants. 49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 50 * [__nosave_begin, __nosave_end] for the nosave data 51 */ 52 53 #ifndef LOAD_OFFSET 54 #define LOAD_OFFSET 0 55 #endif 56 57 #include <linux/export.h> 58 59 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 60 #define ALIGN_FUNCTION() . = ALIGN(8) 61 62 /* 63 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which 64 * generates .data.identifier sections, which need to be pulled in with 65 * .data. We don't want to pull in .data..other sections, which Linux 66 * has defined. Same for text and bss. 67 */ 68 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 69 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 70 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* 71 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* 72 #else 73 #define TEXT_MAIN .text 74 #define DATA_MAIN .data 75 #define BSS_MAIN .bss 76 #endif 77 78 /* 79 * Align to a 32 byte boundary equal to the 80 * alignment gcc 4.5 uses for a struct 81 */ 82 #define STRUCT_ALIGNMENT 32 83 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 84 85 /* The actual configuration determine if the init/exit sections 86 * are handled as text/data or they can be discarded (which 87 * often happens at runtime) 88 */ 89 #ifdef CONFIG_HOTPLUG_CPU 90 #define CPU_KEEP(sec) *(.cpu##sec) 91 #define CPU_DISCARD(sec) 92 #else 93 #define CPU_KEEP(sec) 94 #define CPU_DISCARD(sec) *(.cpu##sec) 95 #endif 96 97 #if defined(CONFIG_MEMORY_HOTPLUG) 98 #define MEM_KEEP(sec) *(.mem##sec) 99 #define MEM_DISCARD(sec) 100 #else 101 #define MEM_KEEP(sec) 102 #define MEM_DISCARD(sec) *(.mem##sec) 103 #endif 104 105 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 106 #define MCOUNT_REC() . = ALIGN(8); \ 107 VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 108 *(__mcount_loc) \ 109 VMLINUX_SYMBOL(__stop_mcount_loc) = .; 110 #else 111 #define MCOUNT_REC() 112 #endif 113 114 #ifdef CONFIG_TRACE_BRANCH_PROFILING 115 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 116 *(_ftrace_annotated_branch) \ 117 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 118 #else 119 #define LIKELY_PROFILE() 120 #endif 121 122 #ifdef CONFIG_PROFILE_ALL_BRANCHES 123 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 124 *(_ftrace_branch) \ 125 VMLINUX_SYMBOL(__stop_branch_profile) = .; 126 #else 127 #define BRANCH_PROFILE() 128 #endif 129 130 #ifdef CONFIG_KPROBES 131 #define KPROBE_BLACKLIST() . = ALIGN(8); \ 132 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ 133 KEEP(*(_kprobe_blacklist)) \ 134 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; 135 #else 136 #define KPROBE_BLACKLIST() 137 #endif 138 139 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 140 #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ 141 VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\ 142 KEEP(*(_error_injection_whitelist)) \ 143 VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .; 144 #else 145 #define ERROR_INJECT_WHITELIST() 146 #endif 147 148 #ifdef CONFIG_EVENT_TRACING 149 #define FTRACE_EVENTS() . = ALIGN(8); \ 150 VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 151 KEEP(*(_ftrace_events)) \ 152 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \ 153 VMLINUX_SYMBOL(__start_ftrace_eval_maps) = .; \ 154 KEEP(*(_ftrace_eval_map)) \ 155 VMLINUX_SYMBOL(__stop_ftrace_eval_maps) = .; 156 #else 157 #define FTRACE_EVENTS() 158 #endif 159 160 #ifdef CONFIG_TRACING 161 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 162 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ 163 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 164 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ 165 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ 166 VMLINUX_SYMBOL(__stop___tracepoint_str) = .; 167 #else 168 #define TRACE_PRINTKS() 169 #define TRACEPOINT_STR() 170 #endif 171 172 #ifdef CONFIG_FTRACE_SYSCALLS 173 #define TRACE_SYSCALLS() . = ALIGN(8); \ 174 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 175 KEEP(*(__syscalls_metadata)) \ 176 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 177 #else 178 #define TRACE_SYSCALLS() 179 #endif 180 181 #ifdef CONFIG_BPF_EVENTS 182 #define BPF_RAW_TP() STRUCT_ALIGN(); \ 183 VMLINUX_SYMBOL(__start__bpf_raw_tp) = .; \ 184 KEEP(*(__bpf_raw_tp_map)) \ 185 VMLINUX_SYMBOL(__stop__bpf_raw_tp) = .; 186 #else 187 #define BPF_RAW_TP() 188 #endif 189 190 #ifdef CONFIG_SERIAL_EARLYCON 191 #define EARLYCON_TABLE() . = ALIGN(8); \ 192 VMLINUX_SYMBOL(__earlycon_table) = .; \ 193 KEEP(*(__earlycon_table)) \ 194 VMLINUX_SYMBOL(__earlycon_table_end) = .; 195 #else 196 #define EARLYCON_TABLE() 197 #endif 198 199 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) 200 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) 201 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) 202 #define _OF_TABLE_0(name) 203 #define _OF_TABLE_1(name) \ 204 . = ALIGN(8); \ 205 VMLINUX_SYMBOL(__##name##_of_table) = .; \ 206 KEEP(*(__##name##_of_table)) \ 207 KEEP(*(__##name##_of_table_end)) 208 209 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) 210 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 211 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 212 #define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) 213 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 214 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 215 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 216 217 #ifdef CONFIG_ACPI 218 #define ACPI_PROBE_TABLE(name) \ 219 . = ALIGN(8); \ 220 VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \ 221 KEEP(*(__##name##_acpi_probe_table)) \ 222 VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .; 223 #else 224 #define ACPI_PROBE_TABLE(name) 225 #endif 226 227 #define KERNEL_DTB() \ 228 STRUCT_ALIGN(); \ 229 VMLINUX_SYMBOL(__dtb_start) = .; \ 230 KEEP(*(.dtb.init.rodata)) \ 231 VMLINUX_SYMBOL(__dtb_end) = .; 232 233 /* 234 * .data section 235 */ 236 #define DATA_DATA \ 237 *(.xiptext) \ 238 *(DATA_MAIN) \ 239 *(.ref.data) \ 240 *(.data..shared_aligned) /* percpu related */ \ 241 MEM_KEEP(init.data) \ 242 MEM_KEEP(exit.data) \ 243 *(.data.unlikely) \ 244 VMLINUX_SYMBOL(__start_once) = .; \ 245 *(.data.once) \ 246 VMLINUX_SYMBOL(__end_once) = .; \ 247 STRUCT_ALIGN(); \ 248 *(__tracepoints) \ 249 /* implement dynamic printk debug */ \ 250 . = ALIGN(8); \ 251 VMLINUX_SYMBOL(__start___jump_table) = .; \ 252 KEEP(*(__jump_table)) \ 253 VMLINUX_SYMBOL(__stop___jump_table) = .; \ 254 . = ALIGN(8); \ 255 VMLINUX_SYMBOL(__start___verbose) = .; \ 256 KEEP(*(__verbose)) \ 257 VMLINUX_SYMBOL(__stop___verbose) = .; \ 258 LIKELY_PROFILE() \ 259 BRANCH_PROFILE() \ 260 TRACE_PRINTKS() \ 261 BPF_RAW_TP() \ 262 TRACEPOINT_STR() 263 264 /* 265 * Data section helpers 266 */ 267 #define NOSAVE_DATA \ 268 . = ALIGN(PAGE_SIZE); \ 269 VMLINUX_SYMBOL(__nosave_begin) = .; \ 270 *(.data..nosave) \ 271 . = ALIGN(PAGE_SIZE); \ 272 VMLINUX_SYMBOL(__nosave_end) = .; 273 274 #define PAGE_ALIGNED_DATA(page_align) \ 275 . = ALIGN(page_align); \ 276 *(.data..page_aligned) 277 278 #define READ_MOSTLY_DATA(align) \ 279 . = ALIGN(align); \ 280 *(.data..read_mostly) \ 281 . = ALIGN(align); 282 283 #define CACHELINE_ALIGNED_DATA(align) \ 284 . = ALIGN(align); \ 285 *(.data..cacheline_aligned) 286 287 #define INIT_TASK_DATA(align) \ 288 . = ALIGN(align); \ 289 VMLINUX_SYMBOL(__start_init_task) = .; \ 290 VMLINUX_SYMBOL(init_thread_union) = .; \ 291 VMLINUX_SYMBOL(init_stack) = .; \ 292 *(.data..init_task) \ 293 *(.data..init_thread_info) \ 294 . = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE; \ 295 VMLINUX_SYMBOL(__end_init_task) = .; 296 297 /* 298 * Allow architectures to handle ro_after_init data on their 299 * own by defining an empty RO_AFTER_INIT_DATA. 300 */ 301 #ifndef RO_AFTER_INIT_DATA 302 #define RO_AFTER_INIT_DATA \ 303 VMLINUX_SYMBOL(__start_ro_after_init) = .; \ 304 *(.data..ro_after_init) \ 305 VMLINUX_SYMBOL(__end_ro_after_init) = .; 306 #endif 307 308 /* 309 * Read only Data 310 */ 311 #define RO_DATA_SECTION(align) \ 312 . = ALIGN((align)); \ 313 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 314 VMLINUX_SYMBOL(__start_rodata) = .; \ 315 *(.rodata) *(.rodata.*) \ 316 RO_AFTER_INIT_DATA /* Read only after init */ \ 317 KEEP(*(__vermagic)) /* Kernel version magic */ \ 318 . = ALIGN(8); \ 319 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ 320 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ 321 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ 322 *(__tracepoints_strings)/* Tracepoints: strings */ \ 323 } \ 324 \ 325 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 326 *(.rodata1) \ 327 } \ 328 \ 329 /* PCI quirks */ \ 330 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 331 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 332 KEEP(*(.pci_fixup_early)) \ 333 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 334 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 335 KEEP(*(.pci_fixup_header)) \ 336 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 337 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 338 KEEP(*(.pci_fixup_final)) \ 339 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 340 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 341 KEEP(*(.pci_fixup_enable)) \ 342 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 343 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 344 KEEP(*(.pci_fixup_resume)) \ 345 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 346 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 347 KEEP(*(.pci_fixup_resume_early)) \ 348 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 349 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 350 KEEP(*(.pci_fixup_suspend)) \ 351 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 352 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ 353 KEEP(*(.pci_fixup_suspend_late)) \ 354 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ 355 } \ 356 \ 357 /* Built-in firmware blobs */ \ 358 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 359 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 360 KEEP(*(.builtin_fw)) \ 361 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 362 } \ 363 \ 364 TRACEDATA \ 365 \ 366 /* Kernel symbol table: Normal symbols */ \ 367 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 368 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 369 KEEP(*(SORT(___ksymtab+*))) \ 370 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 371 } \ 372 \ 373 /* Kernel symbol table: GPL-only symbols */ \ 374 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 375 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 376 KEEP(*(SORT(___ksymtab_gpl+*))) \ 377 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 378 } \ 379 \ 380 /* Kernel symbol table: Normal unused symbols */ \ 381 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 382 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 383 KEEP(*(SORT(___ksymtab_unused+*))) \ 384 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 385 } \ 386 \ 387 /* Kernel symbol table: GPL-only unused symbols */ \ 388 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 389 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 390 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ 391 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 392 } \ 393 \ 394 /* Kernel symbol table: GPL-future-only symbols */ \ 395 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 396 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 397 KEEP(*(SORT(___ksymtab_gpl_future+*))) \ 398 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 399 } \ 400 \ 401 /* Kernel symbol table: Normal symbols */ \ 402 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 403 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 404 KEEP(*(SORT(___kcrctab+*))) \ 405 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 406 } \ 407 \ 408 /* Kernel symbol table: GPL-only symbols */ \ 409 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 410 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 411 KEEP(*(SORT(___kcrctab_gpl+*))) \ 412 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 413 } \ 414 \ 415 /* Kernel symbol table: Normal unused symbols */ \ 416 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 417 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 418 KEEP(*(SORT(___kcrctab_unused+*))) \ 419 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 420 } \ 421 \ 422 /* Kernel symbol table: GPL-only unused symbols */ \ 423 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 424 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 425 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ 426 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 427 } \ 428 \ 429 /* Kernel symbol table: GPL-future-only symbols */ \ 430 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 431 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 432 KEEP(*(SORT(___kcrctab_gpl_future+*))) \ 433 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 434 } \ 435 \ 436 /* Kernel symbol table: strings */ \ 437 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 438 *(__ksymtab_strings) \ 439 } \ 440 \ 441 /* __*init sections */ \ 442 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 443 *(.ref.rodata) \ 444 MEM_KEEP(init.rodata) \ 445 MEM_KEEP(exit.rodata) \ 446 } \ 447 \ 448 /* Built-in module parameters. */ \ 449 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 450 VMLINUX_SYMBOL(__start___param) = .; \ 451 KEEP(*(__param)) \ 452 VMLINUX_SYMBOL(__stop___param) = .; \ 453 } \ 454 \ 455 /* Built-in module versions. */ \ 456 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 457 VMLINUX_SYMBOL(__start___modver) = .; \ 458 KEEP(*(__modver)) \ 459 VMLINUX_SYMBOL(__stop___modver) = .; \ 460 . = ALIGN((align)); \ 461 VMLINUX_SYMBOL(__end_rodata) = .; \ 462 } \ 463 . = ALIGN((align)); 464 465 /* RODATA & RO_DATA provided for backward compatibility. 466 * All archs are supposed to use RO_DATA() */ 467 #define RODATA RO_DATA_SECTION(4096) 468 #define RO_DATA(align) RO_DATA_SECTION(align) 469 470 #define SECURITY_INIT \ 471 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 472 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 473 KEEP(*(.security_initcall.init)) \ 474 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 475 } 476 477 /* 478 * .text section. Map to function alignment to avoid address changes 479 * during second ld run in second ld pass when generating System.map 480 * 481 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead 482 * code elimination is enabled, so these sections should be converted 483 * to use ".." first. 484 */ 485 #define TEXT_TEXT \ 486 ALIGN_FUNCTION(); \ 487 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ 488 *(.text..refcount) \ 489 *(.ref.text) \ 490 MEM_KEEP(init.text) \ 491 MEM_KEEP(exit.text) \ 492 493 494 /* sched.text is aling to function alignment to secure we have same 495 * address even at second ld pass when generating System.map */ 496 #define SCHED_TEXT \ 497 ALIGN_FUNCTION(); \ 498 VMLINUX_SYMBOL(__sched_text_start) = .; \ 499 *(.sched.text) \ 500 VMLINUX_SYMBOL(__sched_text_end) = .; 501 502 /* spinlock.text is aling to function alignment to secure we have same 503 * address even at second ld pass when generating System.map */ 504 #define LOCK_TEXT \ 505 ALIGN_FUNCTION(); \ 506 VMLINUX_SYMBOL(__lock_text_start) = .; \ 507 *(.spinlock.text) \ 508 VMLINUX_SYMBOL(__lock_text_end) = .; 509 510 #define CPUIDLE_TEXT \ 511 ALIGN_FUNCTION(); \ 512 VMLINUX_SYMBOL(__cpuidle_text_start) = .; \ 513 *(.cpuidle.text) \ 514 VMLINUX_SYMBOL(__cpuidle_text_end) = .; 515 516 #define KPROBES_TEXT \ 517 ALIGN_FUNCTION(); \ 518 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 519 *(.kprobes.text) \ 520 VMLINUX_SYMBOL(__kprobes_text_end) = .; 521 522 #define ENTRY_TEXT \ 523 ALIGN_FUNCTION(); \ 524 VMLINUX_SYMBOL(__entry_text_start) = .; \ 525 *(.entry.text) \ 526 VMLINUX_SYMBOL(__entry_text_end) = .; 527 528 #define IRQENTRY_TEXT \ 529 ALIGN_FUNCTION(); \ 530 VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 531 *(.irqentry.text) \ 532 VMLINUX_SYMBOL(__irqentry_text_end) = .; 533 534 #define SOFTIRQENTRY_TEXT \ 535 ALIGN_FUNCTION(); \ 536 VMLINUX_SYMBOL(__softirqentry_text_start) = .; \ 537 *(.softirqentry.text) \ 538 VMLINUX_SYMBOL(__softirqentry_text_end) = .; 539 540 /* Section used for early init (in .S files) */ 541 #define HEAD_TEXT *(.head.text) 542 543 #define HEAD_TEXT_SECTION \ 544 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 545 HEAD_TEXT \ 546 } 547 548 /* 549 * Exception table 550 */ 551 #define EXCEPTION_TABLE(align) \ 552 . = ALIGN(align); \ 553 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 554 VMLINUX_SYMBOL(__start___ex_table) = .; \ 555 KEEP(*(__ex_table)) \ 556 VMLINUX_SYMBOL(__stop___ex_table) = .; \ 557 } 558 559 /* 560 * Init task 561 */ 562 #define INIT_TASK_DATA_SECTION(align) \ 563 . = ALIGN(align); \ 564 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 565 INIT_TASK_DATA(align) \ 566 } 567 568 #ifdef CONFIG_CONSTRUCTORS 569 #define KERNEL_CTORS() . = ALIGN(8); \ 570 VMLINUX_SYMBOL(__ctors_start) = .; \ 571 KEEP(*(.ctors)) \ 572 KEEP(*(SORT(.init_array.*))) \ 573 KEEP(*(.init_array)) \ 574 VMLINUX_SYMBOL(__ctors_end) = .; 575 #else 576 #define KERNEL_CTORS() 577 #endif 578 579 /* init and exit section handling */ 580 #define INIT_DATA \ 581 KEEP(*(SORT(___kentry+*))) \ 582 *(.init.data) \ 583 MEM_DISCARD(init.data) \ 584 KERNEL_CTORS() \ 585 MCOUNT_REC() \ 586 *(.init.rodata) \ 587 FTRACE_EVENTS() \ 588 TRACE_SYSCALLS() \ 589 KPROBE_BLACKLIST() \ 590 ERROR_INJECT_WHITELIST() \ 591 MEM_DISCARD(init.rodata) \ 592 CLK_OF_TABLES() \ 593 RESERVEDMEM_OF_TABLES() \ 594 TIMER_OF_TABLES() \ 595 IOMMU_OF_TABLES() \ 596 CPU_METHOD_OF_TABLES() \ 597 CPUIDLE_METHOD_OF_TABLES() \ 598 KERNEL_DTB() \ 599 IRQCHIP_OF_MATCH_TABLE() \ 600 ACPI_PROBE_TABLE(irqchip) \ 601 ACPI_PROBE_TABLE(timer) \ 602 EARLYCON_TABLE() 603 604 #define INIT_TEXT \ 605 *(.init.text) \ 606 *(.text.startup) \ 607 MEM_DISCARD(init.text) 608 609 #define EXIT_DATA \ 610 *(.exit.data) \ 611 *(.fini_array) \ 612 *(.dtors) \ 613 MEM_DISCARD(exit.data) \ 614 MEM_DISCARD(exit.rodata) 615 616 #define EXIT_TEXT \ 617 *(.exit.text) \ 618 *(.text.exit) \ 619 MEM_DISCARD(exit.text) 620 621 #define EXIT_CALL \ 622 *(.exitcall.exit) 623 624 /* 625 * bss (Block Started by Symbol) - uninitialized data 626 * zeroed during startup 627 */ 628 #define SBSS(sbss_align) \ 629 . = ALIGN(sbss_align); \ 630 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 631 *(.dynsbss) \ 632 *(.sbss) \ 633 *(.scommon) \ 634 } 635 636 /* 637 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 638 * sections to the front of bss. 639 */ 640 #ifndef BSS_FIRST_SECTIONS 641 #define BSS_FIRST_SECTIONS 642 #endif 643 644 #define BSS(bss_align) \ 645 . = ALIGN(bss_align); \ 646 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 647 BSS_FIRST_SECTIONS \ 648 *(.bss..page_aligned) \ 649 *(.dynbss) \ 650 *(BSS_MAIN) \ 651 *(COMMON) \ 652 } 653 654 /* 655 * DWARF debug sections. 656 * Symbols in the DWARF debugging sections are relative to 657 * the beginning of the section so we begin them at 0. 658 */ 659 #define DWARF_DEBUG \ 660 /* DWARF 1 */ \ 661 .debug 0 : { *(.debug) } \ 662 .line 0 : { *(.line) } \ 663 /* GNU DWARF 1 extensions */ \ 664 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 665 .debug_sfnames 0 : { *(.debug_sfnames) } \ 666 /* DWARF 1.1 and DWARF 2 */ \ 667 .debug_aranges 0 : { *(.debug_aranges) } \ 668 .debug_pubnames 0 : { *(.debug_pubnames) } \ 669 /* DWARF 2 */ \ 670 .debug_info 0 : { *(.debug_info \ 671 .gnu.linkonce.wi.*) } \ 672 .debug_abbrev 0 : { *(.debug_abbrev) } \ 673 .debug_line 0 : { *(.debug_line) } \ 674 .debug_frame 0 : { *(.debug_frame) } \ 675 .debug_str 0 : { *(.debug_str) } \ 676 .debug_loc 0 : { *(.debug_loc) } \ 677 .debug_macinfo 0 : { *(.debug_macinfo) } \ 678 .debug_pubtypes 0 : { *(.debug_pubtypes) } \ 679 /* DWARF 3 */ \ 680 .debug_ranges 0 : { *(.debug_ranges) } \ 681 /* SGI/MIPS DWARF 2 extensions */ \ 682 .debug_weaknames 0 : { *(.debug_weaknames) } \ 683 .debug_funcnames 0 : { *(.debug_funcnames) } \ 684 .debug_typenames 0 : { *(.debug_typenames) } \ 685 .debug_varnames 0 : { *(.debug_varnames) } \ 686 /* GNU DWARF 2 extensions */ \ 687 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ 688 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ 689 /* DWARF 4 */ \ 690 .debug_types 0 : { *(.debug_types) } \ 691 /* DWARF 5 */ \ 692 .debug_macro 0 : { *(.debug_macro) } \ 693 .debug_addr 0 : { *(.debug_addr) } 694 695 /* Stabs debugging sections. */ 696 #define STABS_DEBUG \ 697 .stab 0 : { *(.stab) } \ 698 .stabstr 0 : { *(.stabstr) } \ 699 .stab.excl 0 : { *(.stab.excl) } \ 700 .stab.exclstr 0 : { *(.stab.exclstr) } \ 701 .stab.index 0 : { *(.stab.index) } \ 702 .stab.indexstr 0 : { *(.stab.indexstr) } \ 703 .comment 0 : { *(.comment) } 704 705 #ifdef CONFIG_GENERIC_BUG 706 #define BUG_TABLE \ 707 . = ALIGN(8); \ 708 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 709 VMLINUX_SYMBOL(__start___bug_table) = .; \ 710 KEEP(*(__bug_table)) \ 711 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 712 } 713 #else 714 #define BUG_TABLE 715 #endif 716 717 #ifdef CONFIG_UNWINDER_ORC 718 #define ORC_UNWIND_TABLE \ 719 . = ALIGN(4); \ 720 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ 721 VMLINUX_SYMBOL(__start_orc_unwind_ip) = .; \ 722 KEEP(*(.orc_unwind_ip)) \ 723 VMLINUX_SYMBOL(__stop_orc_unwind_ip) = .; \ 724 } \ 725 . = ALIGN(6); \ 726 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 727 VMLINUX_SYMBOL(__start_orc_unwind) = .; \ 728 KEEP(*(.orc_unwind)) \ 729 VMLINUX_SYMBOL(__stop_orc_unwind) = .; \ 730 } \ 731 . = ALIGN(4); \ 732 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ 733 VMLINUX_SYMBOL(orc_lookup) = .; \ 734 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ 735 LOOKUP_BLOCK_SIZE) + 1) * 4; \ 736 VMLINUX_SYMBOL(orc_lookup_end) = .; \ 737 } 738 #else 739 #define ORC_UNWIND_TABLE 740 #endif 741 742 #ifdef CONFIG_PM_TRACE 743 #define TRACEDATA \ 744 . = ALIGN(4); \ 745 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 746 VMLINUX_SYMBOL(__tracedata_start) = .; \ 747 KEEP(*(.tracedata)) \ 748 VMLINUX_SYMBOL(__tracedata_end) = .; \ 749 } 750 #else 751 #define TRACEDATA 752 #endif 753 754 #define NOTES \ 755 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 756 VMLINUX_SYMBOL(__start_notes) = .; \ 757 *(.note.*) \ 758 VMLINUX_SYMBOL(__stop_notes) = .; \ 759 } 760 761 #define INIT_SETUP(initsetup_align) \ 762 . = ALIGN(initsetup_align); \ 763 VMLINUX_SYMBOL(__setup_start) = .; \ 764 KEEP(*(.init.setup)) \ 765 VMLINUX_SYMBOL(__setup_end) = .; 766 767 #define INIT_CALLS_LEVEL(level) \ 768 VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 769 KEEP(*(.initcall##level##.init)) \ 770 KEEP(*(.initcall##level##s.init)) \ 771 772 #define INIT_CALLS \ 773 VMLINUX_SYMBOL(__initcall_start) = .; \ 774 KEEP(*(.initcallearly.init)) \ 775 INIT_CALLS_LEVEL(0) \ 776 INIT_CALLS_LEVEL(1) \ 777 INIT_CALLS_LEVEL(2) \ 778 INIT_CALLS_LEVEL(3) \ 779 INIT_CALLS_LEVEL(4) \ 780 INIT_CALLS_LEVEL(5) \ 781 INIT_CALLS_LEVEL(rootfs) \ 782 INIT_CALLS_LEVEL(6) \ 783 INIT_CALLS_LEVEL(7) \ 784 VMLINUX_SYMBOL(__initcall_end) = .; 785 786 #define CON_INITCALL \ 787 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 788 KEEP(*(.con_initcall.init)) \ 789 VMLINUX_SYMBOL(__con_initcall_end) = .; 790 791 #define SECURITY_INITCALL \ 792 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 793 KEEP(*(.security_initcall.init)) \ 794 VMLINUX_SYMBOL(__security_initcall_end) = .; 795 796 #ifdef CONFIG_BLK_DEV_INITRD 797 #define INIT_RAM_FS \ 798 . = ALIGN(4); \ 799 VMLINUX_SYMBOL(__initramfs_start) = .; \ 800 KEEP(*(.init.ramfs)) \ 801 . = ALIGN(8); \ 802 KEEP(*(.init.ramfs.info)) 803 #else 804 #define INIT_RAM_FS 805 #endif 806 807 /* 808 * Memory encryption operates on a page basis. Since we need to clear 809 * the memory encryption mask for this section, it needs to be aligned 810 * on a page boundary and be a page-size multiple in length. 811 * 812 * Note: We use a separate section so that only this section gets 813 * decrypted to avoid exposing more than we wish. 814 */ 815 #ifdef CONFIG_AMD_MEM_ENCRYPT 816 #define PERCPU_DECRYPTED_SECTION \ 817 . = ALIGN(PAGE_SIZE); \ 818 *(.data..percpu..decrypted) \ 819 . = ALIGN(PAGE_SIZE); 820 #else 821 #define PERCPU_DECRYPTED_SECTION 822 #endif 823 824 825 /* 826 * Default discarded sections. 827 * 828 * Some archs want to discard exit text/data at runtime rather than 829 * link time due to cross-section references such as alt instructions, 830 * bug table, eh_frame, etc. DISCARDS must be the last of output 831 * section definitions so that such archs put those in earlier section 832 * definitions. 833 */ 834 #define DISCARDS \ 835 /DISCARD/ : { \ 836 EXIT_TEXT \ 837 EXIT_DATA \ 838 EXIT_CALL \ 839 *(.discard) \ 840 *(.discard.*) \ 841 } 842 843 /** 844 * PERCPU_INPUT - the percpu input sections 845 * @cacheline: cacheline size 846 * 847 * The core percpu section names and core symbols which do not rely 848 * directly upon load addresses. 849 * 850 * @cacheline is used to align subsections to avoid false cacheline 851 * sharing between subsections for different purposes. 852 */ 853 #define PERCPU_INPUT(cacheline) \ 854 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 855 *(.data..percpu..first) \ 856 . = ALIGN(PAGE_SIZE); \ 857 *(.data..percpu..page_aligned) \ 858 . = ALIGN(cacheline); \ 859 *(.data..percpu..read_mostly) \ 860 . = ALIGN(cacheline); \ 861 *(.data..percpu) \ 862 *(.data..percpu..shared_aligned) \ 863 PERCPU_DECRYPTED_SECTION \ 864 VMLINUX_SYMBOL(__per_cpu_end) = .; 865 866 /** 867 * PERCPU_VADDR - define output section for percpu area 868 * @cacheline: cacheline size 869 * @vaddr: explicit base address (optional) 870 * @phdr: destination PHDR (optional) 871 * 872 * Macro which expands to output section for percpu area. 873 * 874 * @cacheline is used to align subsections to avoid false cacheline 875 * sharing between subsections for different purposes. 876 * 877 * If @vaddr is not blank, it specifies explicit base address and all 878 * percpu symbols will be offset from the given address. If blank, 879 * @vaddr always equals @laddr + LOAD_OFFSET. 880 * 881 * @phdr defines the output PHDR to use if not blank. Be warned that 882 * output PHDR is sticky. If @phdr is specified, the next output 883 * section in the linker script will go there too. @phdr should have 884 * a leading colon. 885 * 886 * Note that this macros defines __per_cpu_load as an absolute symbol. 887 * If there is no need to put the percpu section at a predetermined 888 * address, use PERCPU_SECTION. 889 */ 890 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 891 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 892 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 893 - LOAD_OFFSET) { \ 894 PERCPU_INPUT(cacheline) \ 895 } phdr \ 896 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 897 898 /** 899 * PERCPU_SECTION - define output section for percpu area, simple version 900 * @cacheline: cacheline size 901 * 902 * Align to PAGE_SIZE and outputs output section for percpu area. This 903 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 904 * __per_cpu_start will be identical. 905 * 906 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 907 * except that __per_cpu_load is defined as a relative symbol against 908 * .data..percpu which is required for relocatable x86_32 configuration. 909 */ 910 #define PERCPU_SECTION(cacheline) \ 911 . = ALIGN(PAGE_SIZE); \ 912 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 913 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 914 PERCPU_INPUT(cacheline) \ 915 } 916 917 918 /* 919 * Definition of the high level *_SECTION macros 920 * They will fit only a subset of the architectures 921 */ 922 923 924 /* 925 * Writeable data. 926 * All sections are combined in a single .data section. 927 * The sections following CONSTRUCTORS are arranged so their 928 * typical alignment matches. 929 * A cacheline is typical/always less than a PAGE_SIZE so 930 * the sections that has this restriction (or similar) 931 * is located before the ones requiring PAGE_SIZE alignment. 932 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 933 * matches the requirement of PAGE_ALIGNED_DATA. 934 * 935 * use 0 as page_align if page_aligned data is not used */ 936 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 937 . = ALIGN(PAGE_SIZE); \ 938 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 939 INIT_TASK_DATA(inittask) \ 940 NOSAVE_DATA \ 941 PAGE_ALIGNED_DATA(pagealigned) \ 942 CACHELINE_ALIGNED_DATA(cacheline) \ 943 READ_MOSTLY_DATA(cacheline) \ 944 DATA_DATA \ 945 CONSTRUCTORS \ 946 } \ 947 BUG_TABLE \ 948 949 #define INIT_TEXT_SECTION(inittext_align) \ 950 . = ALIGN(inittext_align); \ 951 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 952 VMLINUX_SYMBOL(_sinittext) = .; \ 953 INIT_TEXT \ 954 VMLINUX_SYMBOL(_einittext) = .; \ 955 } 956 957 #define INIT_DATA_SECTION(initsetup_align) \ 958 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 959 INIT_DATA \ 960 INIT_SETUP(initsetup_align) \ 961 INIT_CALLS \ 962 CON_INITCALL \ 963 SECURITY_INITCALL \ 964 INIT_RAM_FS \ 965 } 966 967 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 968 . = ALIGN(sbss_align); \ 969 VMLINUX_SYMBOL(__bss_start) = .; \ 970 SBSS(sbss_align) \ 971 BSS(bss_align) \ 972 . = ALIGN(stop_align); \ 973 VMLINUX_SYMBOL(__bss_stop) = .; 974