1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * BSS_SECTION(0, 0, 0) 34 * _end = .; 35 * 36 * STABS_DEBUG 37 * DWARF_DEBUG 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * // __init_begin and __init_end should be page aligned, so that we can 44 * // free the whole .init memory 45 * [_stext, _etext] is the text section 46 * [_sdata, _edata] is the data section 47 * 48 * Some of the included output section have their own set of constants. 49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 50 * [__nosave_begin, __nosave_end] for the nosave data 51 */ 52 53 #ifndef LOAD_OFFSET 54 #define LOAD_OFFSET 0 55 #endif 56 57 #include <linux/export.h> 58 59 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 60 #define ALIGN_FUNCTION() . = ALIGN(8) 61 62 /* 63 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which 64 * generates .data.identifier sections, which need to be pulled in with 65 * .data. We don't want to pull in .data..other sections, which Linux 66 * has defined. Same for text and bss. 67 */ 68 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 69 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 70 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* 71 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* 72 #else 73 #define TEXT_MAIN .text 74 #define DATA_MAIN .data 75 #define BSS_MAIN .bss 76 #endif 77 78 /* 79 * Align to a 32 byte boundary equal to the 80 * alignment gcc 4.5 uses for a struct 81 */ 82 #define STRUCT_ALIGNMENT 32 83 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 84 85 /* The actual configuration determine if the init/exit sections 86 * are handled as text/data or they can be discarded (which 87 * often happens at runtime) 88 */ 89 #ifdef CONFIG_HOTPLUG_CPU 90 #define CPU_KEEP(sec) *(.cpu##sec) 91 #define CPU_DISCARD(sec) 92 #else 93 #define CPU_KEEP(sec) 94 #define CPU_DISCARD(sec) *(.cpu##sec) 95 #endif 96 97 #if defined(CONFIG_MEMORY_HOTPLUG) 98 #define MEM_KEEP(sec) *(.mem##sec) 99 #define MEM_DISCARD(sec) 100 #else 101 #define MEM_KEEP(sec) 102 #define MEM_DISCARD(sec) *(.mem##sec) 103 #endif 104 105 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 106 #define MCOUNT_REC() . = ALIGN(8); \ 107 VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 108 *(__mcount_loc) \ 109 VMLINUX_SYMBOL(__stop_mcount_loc) = .; 110 #else 111 #define MCOUNT_REC() 112 #endif 113 114 #ifdef CONFIG_TRACE_BRANCH_PROFILING 115 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 116 *(_ftrace_annotated_branch) \ 117 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 118 #else 119 #define LIKELY_PROFILE() 120 #endif 121 122 #ifdef CONFIG_PROFILE_ALL_BRANCHES 123 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 124 *(_ftrace_branch) \ 125 VMLINUX_SYMBOL(__stop_branch_profile) = .; 126 #else 127 #define BRANCH_PROFILE() 128 #endif 129 130 #ifdef CONFIG_KPROBES 131 #define KPROBE_BLACKLIST() . = ALIGN(8); \ 132 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ 133 KEEP(*(_kprobe_blacklist)) \ 134 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; 135 #else 136 #define KPROBE_BLACKLIST() 137 #endif 138 139 #ifdef CONFIG_BPF_KPROBE_OVERRIDE 140 #define ERROR_INJECT_LIST() . = ALIGN(8); \ 141 VMLINUX_SYMBOL(__start_kprobe_error_inject_list) = .; \ 142 KEEP(*(_kprobe_error_inject_list)) \ 143 VMLINUX_SYMBOL(__stop_kprobe_error_inject_list) = .; 144 #else 145 #define ERROR_INJECT_LIST() 146 #endif 147 148 #ifdef CONFIG_EVENT_TRACING 149 #define FTRACE_EVENTS() . = ALIGN(8); \ 150 VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 151 KEEP(*(_ftrace_events)) \ 152 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \ 153 VMLINUX_SYMBOL(__start_ftrace_eval_maps) = .; \ 154 KEEP(*(_ftrace_eval_map)) \ 155 VMLINUX_SYMBOL(__stop_ftrace_eval_maps) = .; 156 #else 157 #define FTRACE_EVENTS() 158 #endif 159 160 #ifdef CONFIG_TRACING 161 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 162 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ 163 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 164 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ 165 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ 166 VMLINUX_SYMBOL(__stop___tracepoint_str) = .; 167 #else 168 #define TRACE_PRINTKS() 169 #define TRACEPOINT_STR() 170 #endif 171 172 #ifdef CONFIG_FTRACE_SYSCALLS 173 #define TRACE_SYSCALLS() . = ALIGN(8); \ 174 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 175 KEEP(*(__syscalls_metadata)) \ 176 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 177 #else 178 #define TRACE_SYSCALLS() 179 #endif 180 181 #ifdef CONFIG_SERIAL_EARLYCON 182 #define EARLYCON_TABLE() STRUCT_ALIGN(); \ 183 VMLINUX_SYMBOL(__earlycon_table) = .; \ 184 KEEP(*(__earlycon_table)) \ 185 VMLINUX_SYMBOL(__earlycon_table_end) = .; 186 #else 187 #define EARLYCON_TABLE() 188 #endif 189 190 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) 191 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) 192 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) 193 #define _OF_TABLE_0(name) 194 #define _OF_TABLE_1(name) \ 195 . = ALIGN(8); \ 196 VMLINUX_SYMBOL(__##name##_of_table) = .; \ 197 KEEP(*(__##name##_of_table)) \ 198 KEEP(*(__##name##_of_table_end)) 199 200 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) 201 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 202 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 203 #define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) 204 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 205 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 206 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 207 208 #ifdef CONFIG_ACPI 209 #define ACPI_PROBE_TABLE(name) \ 210 . = ALIGN(8); \ 211 VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \ 212 KEEP(*(__##name##_acpi_probe_table)) \ 213 VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .; 214 #else 215 #define ACPI_PROBE_TABLE(name) 216 #endif 217 218 #define KERNEL_DTB() \ 219 STRUCT_ALIGN(); \ 220 VMLINUX_SYMBOL(__dtb_start) = .; \ 221 KEEP(*(.dtb.init.rodata)) \ 222 VMLINUX_SYMBOL(__dtb_end) = .; 223 224 /* 225 * .data section 226 */ 227 #define DATA_DATA \ 228 *(.xiptext) \ 229 *(DATA_MAIN) \ 230 *(.ref.data) \ 231 *(.data..shared_aligned) /* percpu related */ \ 232 MEM_KEEP(init.data) \ 233 MEM_KEEP(exit.data) \ 234 *(.data.unlikely) \ 235 VMLINUX_SYMBOL(__start_once) = .; \ 236 *(.data.once) \ 237 VMLINUX_SYMBOL(__end_once) = .; \ 238 STRUCT_ALIGN(); \ 239 *(__tracepoints) \ 240 /* implement dynamic printk debug */ \ 241 . = ALIGN(8); \ 242 VMLINUX_SYMBOL(__start___jump_table) = .; \ 243 KEEP(*(__jump_table)) \ 244 VMLINUX_SYMBOL(__stop___jump_table) = .; \ 245 . = ALIGN(8); \ 246 VMLINUX_SYMBOL(__start___verbose) = .; \ 247 KEEP(*(__verbose)) \ 248 VMLINUX_SYMBOL(__stop___verbose) = .; \ 249 LIKELY_PROFILE() \ 250 BRANCH_PROFILE() \ 251 TRACE_PRINTKS() \ 252 TRACEPOINT_STR() 253 254 /* 255 * Data section helpers 256 */ 257 #define NOSAVE_DATA \ 258 . = ALIGN(PAGE_SIZE); \ 259 VMLINUX_SYMBOL(__nosave_begin) = .; \ 260 *(.data..nosave) \ 261 . = ALIGN(PAGE_SIZE); \ 262 VMLINUX_SYMBOL(__nosave_end) = .; 263 264 #define PAGE_ALIGNED_DATA(page_align) \ 265 . = ALIGN(page_align); \ 266 *(.data..page_aligned) 267 268 #define READ_MOSTLY_DATA(align) \ 269 . = ALIGN(align); \ 270 *(.data..read_mostly) \ 271 . = ALIGN(align); 272 273 #define CACHELINE_ALIGNED_DATA(align) \ 274 . = ALIGN(align); \ 275 *(.data..cacheline_aligned) 276 277 #define INIT_TASK_DATA(align) \ 278 . = ALIGN(align); \ 279 VMLINUX_SYMBOL(__start_init_task) = .; \ 280 *(.data..init_task) \ 281 VMLINUX_SYMBOL(__end_init_task) = .; 282 283 /* 284 * Allow architectures to handle ro_after_init data on their 285 * own by defining an empty RO_AFTER_INIT_DATA. 286 */ 287 #ifndef RO_AFTER_INIT_DATA 288 #define RO_AFTER_INIT_DATA \ 289 VMLINUX_SYMBOL(__start_ro_after_init) = .; \ 290 *(.data..ro_after_init) \ 291 VMLINUX_SYMBOL(__end_ro_after_init) = .; 292 #endif 293 294 /* 295 * Read only Data 296 */ 297 #define RO_DATA_SECTION(align) \ 298 . = ALIGN((align)); \ 299 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 300 VMLINUX_SYMBOL(__start_rodata) = .; \ 301 *(.rodata) *(.rodata.*) \ 302 RO_AFTER_INIT_DATA /* Read only after init */ \ 303 KEEP(*(__vermagic)) /* Kernel version magic */ \ 304 . = ALIGN(8); \ 305 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ 306 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ 307 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ 308 *(__tracepoints_strings)/* Tracepoints: strings */ \ 309 } \ 310 \ 311 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 312 *(.rodata1) \ 313 } \ 314 \ 315 /* PCI quirks */ \ 316 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 317 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 318 KEEP(*(.pci_fixup_early)) \ 319 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 320 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 321 KEEP(*(.pci_fixup_header)) \ 322 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 323 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 324 KEEP(*(.pci_fixup_final)) \ 325 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 326 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 327 KEEP(*(.pci_fixup_enable)) \ 328 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 329 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 330 KEEP(*(.pci_fixup_resume)) \ 331 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 332 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 333 KEEP(*(.pci_fixup_resume_early)) \ 334 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 335 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 336 KEEP(*(.pci_fixup_suspend)) \ 337 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 338 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ 339 KEEP(*(.pci_fixup_suspend_late)) \ 340 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ 341 } \ 342 \ 343 /* Built-in firmware blobs */ \ 344 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 345 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 346 KEEP(*(.builtin_fw)) \ 347 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 348 } \ 349 \ 350 TRACEDATA \ 351 \ 352 /* Kernel symbol table: Normal symbols */ \ 353 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 354 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 355 KEEP(*(SORT(___ksymtab+*))) \ 356 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 357 } \ 358 \ 359 /* Kernel symbol table: GPL-only symbols */ \ 360 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 361 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 362 KEEP(*(SORT(___ksymtab_gpl+*))) \ 363 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 364 } \ 365 \ 366 /* Kernel symbol table: Normal unused symbols */ \ 367 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 368 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 369 KEEP(*(SORT(___ksymtab_unused+*))) \ 370 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 371 } \ 372 \ 373 /* Kernel symbol table: GPL-only unused symbols */ \ 374 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 375 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 376 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ 377 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 378 } \ 379 \ 380 /* Kernel symbol table: GPL-future-only symbols */ \ 381 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 382 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 383 KEEP(*(SORT(___ksymtab_gpl_future+*))) \ 384 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 385 } \ 386 \ 387 /* Kernel symbol table: Normal symbols */ \ 388 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 389 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 390 KEEP(*(SORT(___kcrctab+*))) \ 391 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 392 } \ 393 \ 394 /* Kernel symbol table: GPL-only symbols */ \ 395 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 396 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 397 KEEP(*(SORT(___kcrctab_gpl+*))) \ 398 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 399 } \ 400 \ 401 /* Kernel symbol table: Normal unused symbols */ \ 402 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 403 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 404 KEEP(*(SORT(___kcrctab_unused+*))) \ 405 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 406 } \ 407 \ 408 /* Kernel symbol table: GPL-only unused symbols */ \ 409 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 410 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 411 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ 412 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 413 } \ 414 \ 415 /* Kernel symbol table: GPL-future-only symbols */ \ 416 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 417 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 418 KEEP(*(SORT(___kcrctab_gpl_future+*))) \ 419 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 420 } \ 421 \ 422 /* Kernel symbol table: strings */ \ 423 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 424 *(__ksymtab_strings) \ 425 } \ 426 \ 427 /* __*init sections */ \ 428 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 429 *(.ref.rodata) \ 430 MEM_KEEP(init.rodata) \ 431 MEM_KEEP(exit.rodata) \ 432 } \ 433 \ 434 /* Built-in module parameters. */ \ 435 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 436 VMLINUX_SYMBOL(__start___param) = .; \ 437 KEEP(*(__param)) \ 438 VMLINUX_SYMBOL(__stop___param) = .; \ 439 } \ 440 \ 441 /* Built-in module versions. */ \ 442 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 443 VMLINUX_SYMBOL(__start___modver) = .; \ 444 KEEP(*(__modver)) \ 445 VMLINUX_SYMBOL(__stop___modver) = .; \ 446 . = ALIGN((align)); \ 447 VMLINUX_SYMBOL(__end_rodata) = .; \ 448 } \ 449 . = ALIGN((align)); 450 451 /* RODATA & RO_DATA provided for backward compatibility. 452 * All archs are supposed to use RO_DATA() */ 453 #define RODATA RO_DATA_SECTION(4096) 454 #define RO_DATA(align) RO_DATA_SECTION(align) 455 456 #define SECURITY_INIT \ 457 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 458 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 459 KEEP(*(.security_initcall.init)) \ 460 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 461 } 462 463 /* 464 * .text section. Map to function alignment to avoid address changes 465 * during second ld run in second ld pass when generating System.map 466 * 467 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead 468 * code elimination is enabled, so these sections should be converted 469 * to use ".." first. 470 */ 471 #define TEXT_TEXT \ 472 ALIGN_FUNCTION(); \ 473 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ 474 *(.text..refcount) \ 475 *(.ref.text) \ 476 MEM_KEEP(init.text) \ 477 MEM_KEEP(exit.text) \ 478 479 480 /* sched.text is aling to function alignment to secure we have same 481 * address even at second ld pass when generating System.map */ 482 #define SCHED_TEXT \ 483 ALIGN_FUNCTION(); \ 484 VMLINUX_SYMBOL(__sched_text_start) = .; \ 485 *(.sched.text) \ 486 VMLINUX_SYMBOL(__sched_text_end) = .; 487 488 /* spinlock.text is aling to function alignment to secure we have same 489 * address even at second ld pass when generating System.map */ 490 #define LOCK_TEXT \ 491 ALIGN_FUNCTION(); \ 492 VMLINUX_SYMBOL(__lock_text_start) = .; \ 493 *(.spinlock.text) \ 494 VMLINUX_SYMBOL(__lock_text_end) = .; 495 496 #define CPUIDLE_TEXT \ 497 ALIGN_FUNCTION(); \ 498 VMLINUX_SYMBOL(__cpuidle_text_start) = .; \ 499 *(.cpuidle.text) \ 500 VMLINUX_SYMBOL(__cpuidle_text_end) = .; 501 502 #define KPROBES_TEXT \ 503 ALIGN_FUNCTION(); \ 504 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 505 *(.kprobes.text) \ 506 VMLINUX_SYMBOL(__kprobes_text_end) = .; 507 508 #define ENTRY_TEXT \ 509 ALIGN_FUNCTION(); \ 510 VMLINUX_SYMBOL(__entry_text_start) = .; \ 511 *(.entry.text) \ 512 VMLINUX_SYMBOL(__entry_text_end) = .; 513 514 #define IRQENTRY_TEXT \ 515 ALIGN_FUNCTION(); \ 516 VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 517 *(.irqentry.text) \ 518 VMLINUX_SYMBOL(__irqentry_text_end) = .; 519 520 #define SOFTIRQENTRY_TEXT \ 521 ALIGN_FUNCTION(); \ 522 VMLINUX_SYMBOL(__softirqentry_text_start) = .; \ 523 *(.softirqentry.text) \ 524 VMLINUX_SYMBOL(__softirqentry_text_end) = .; 525 526 /* Section used for early init (in .S files) */ 527 #define HEAD_TEXT *(.head.text) 528 529 #define HEAD_TEXT_SECTION \ 530 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 531 HEAD_TEXT \ 532 } 533 534 /* 535 * Exception table 536 */ 537 #define EXCEPTION_TABLE(align) \ 538 . = ALIGN(align); \ 539 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 540 VMLINUX_SYMBOL(__start___ex_table) = .; \ 541 KEEP(*(__ex_table)) \ 542 VMLINUX_SYMBOL(__stop___ex_table) = .; \ 543 } 544 545 /* 546 * Init task 547 */ 548 #define INIT_TASK_DATA_SECTION(align) \ 549 . = ALIGN(align); \ 550 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 551 INIT_TASK_DATA(align) \ 552 } 553 554 #ifdef CONFIG_CONSTRUCTORS 555 #define KERNEL_CTORS() . = ALIGN(8); \ 556 VMLINUX_SYMBOL(__ctors_start) = .; \ 557 KEEP(*(.ctors)) \ 558 KEEP(*(SORT(.init_array.*))) \ 559 KEEP(*(.init_array)) \ 560 VMLINUX_SYMBOL(__ctors_end) = .; 561 #else 562 #define KERNEL_CTORS() 563 #endif 564 565 /* init and exit section handling */ 566 #define INIT_DATA \ 567 KEEP(*(SORT(___kentry+*))) \ 568 *(.init.data) \ 569 MEM_DISCARD(init.data) \ 570 KERNEL_CTORS() \ 571 MCOUNT_REC() \ 572 *(.init.rodata) \ 573 FTRACE_EVENTS() \ 574 TRACE_SYSCALLS() \ 575 KPROBE_BLACKLIST() \ 576 ERROR_INJECT_LIST() \ 577 MEM_DISCARD(init.rodata) \ 578 CLK_OF_TABLES() \ 579 RESERVEDMEM_OF_TABLES() \ 580 TIMER_OF_TABLES() \ 581 IOMMU_OF_TABLES() \ 582 CPU_METHOD_OF_TABLES() \ 583 CPUIDLE_METHOD_OF_TABLES() \ 584 KERNEL_DTB() \ 585 IRQCHIP_OF_MATCH_TABLE() \ 586 ACPI_PROBE_TABLE(irqchip) \ 587 ACPI_PROBE_TABLE(timer) \ 588 ACPI_PROBE_TABLE(iort) \ 589 EARLYCON_TABLE() 590 591 #define INIT_TEXT \ 592 *(.init.text) \ 593 *(.text.startup) \ 594 MEM_DISCARD(init.text) 595 596 #define EXIT_DATA \ 597 *(.exit.data) \ 598 *(.fini_array) \ 599 *(.dtors) \ 600 MEM_DISCARD(exit.data) \ 601 MEM_DISCARD(exit.rodata) 602 603 #define EXIT_TEXT \ 604 *(.exit.text) \ 605 *(.text.exit) \ 606 MEM_DISCARD(exit.text) 607 608 #define EXIT_CALL \ 609 *(.exitcall.exit) 610 611 /* 612 * bss (Block Started by Symbol) - uninitialized data 613 * zeroed during startup 614 */ 615 #define SBSS(sbss_align) \ 616 . = ALIGN(sbss_align); \ 617 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 618 *(.dynsbss) \ 619 *(.sbss) \ 620 *(.scommon) \ 621 } 622 623 /* 624 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 625 * sections to the front of bss. 626 */ 627 #ifndef BSS_FIRST_SECTIONS 628 #define BSS_FIRST_SECTIONS 629 #endif 630 631 #define BSS(bss_align) \ 632 . = ALIGN(bss_align); \ 633 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 634 BSS_FIRST_SECTIONS \ 635 *(.bss..page_aligned) \ 636 *(.dynbss) \ 637 *(BSS_MAIN) \ 638 *(COMMON) \ 639 } 640 641 /* 642 * DWARF debug sections. 643 * Symbols in the DWARF debugging sections are relative to 644 * the beginning of the section so we begin them at 0. 645 */ 646 #define DWARF_DEBUG \ 647 /* DWARF 1 */ \ 648 .debug 0 : { *(.debug) } \ 649 .line 0 : { *(.line) } \ 650 /* GNU DWARF 1 extensions */ \ 651 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 652 .debug_sfnames 0 : { *(.debug_sfnames) } \ 653 /* DWARF 1.1 and DWARF 2 */ \ 654 .debug_aranges 0 : { *(.debug_aranges) } \ 655 .debug_pubnames 0 : { *(.debug_pubnames) } \ 656 /* DWARF 2 */ \ 657 .debug_info 0 : { *(.debug_info \ 658 .gnu.linkonce.wi.*) } \ 659 .debug_abbrev 0 : { *(.debug_abbrev) } \ 660 .debug_line 0 : { *(.debug_line) } \ 661 .debug_frame 0 : { *(.debug_frame) } \ 662 .debug_str 0 : { *(.debug_str) } \ 663 .debug_loc 0 : { *(.debug_loc) } \ 664 .debug_macinfo 0 : { *(.debug_macinfo) } \ 665 .debug_pubtypes 0 : { *(.debug_pubtypes) } \ 666 /* DWARF 3 */ \ 667 .debug_ranges 0 : { *(.debug_ranges) } \ 668 /* SGI/MIPS DWARF 2 extensions */ \ 669 .debug_weaknames 0 : { *(.debug_weaknames) } \ 670 .debug_funcnames 0 : { *(.debug_funcnames) } \ 671 .debug_typenames 0 : { *(.debug_typenames) } \ 672 .debug_varnames 0 : { *(.debug_varnames) } \ 673 /* GNU DWARF 2 extensions */ \ 674 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ 675 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ 676 /* DWARF 4 */ \ 677 .debug_types 0 : { *(.debug_types) } \ 678 /* DWARF 5 */ \ 679 .debug_macro 0 : { *(.debug_macro) } \ 680 .debug_addr 0 : { *(.debug_addr) } 681 682 /* Stabs debugging sections. */ 683 #define STABS_DEBUG \ 684 .stab 0 : { *(.stab) } \ 685 .stabstr 0 : { *(.stabstr) } \ 686 .stab.excl 0 : { *(.stab.excl) } \ 687 .stab.exclstr 0 : { *(.stab.exclstr) } \ 688 .stab.index 0 : { *(.stab.index) } \ 689 .stab.indexstr 0 : { *(.stab.indexstr) } \ 690 .comment 0 : { *(.comment) } 691 692 #ifdef CONFIG_GENERIC_BUG 693 #define BUG_TABLE \ 694 . = ALIGN(8); \ 695 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 696 VMLINUX_SYMBOL(__start___bug_table) = .; \ 697 KEEP(*(__bug_table)) \ 698 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 699 } 700 #else 701 #define BUG_TABLE 702 #endif 703 704 #ifdef CONFIG_UNWINDER_ORC 705 #define ORC_UNWIND_TABLE \ 706 . = ALIGN(4); \ 707 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ 708 VMLINUX_SYMBOL(__start_orc_unwind_ip) = .; \ 709 KEEP(*(.orc_unwind_ip)) \ 710 VMLINUX_SYMBOL(__stop_orc_unwind_ip) = .; \ 711 } \ 712 . = ALIGN(6); \ 713 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 714 VMLINUX_SYMBOL(__start_orc_unwind) = .; \ 715 KEEP(*(.orc_unwind)) \ 716 VMLINUX_SYMBOL(__stop_orc_unwind) = .; \ 717 } \ 718 . = ALIGN(4); \ 719 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ 720 VMLINUX_SYMBOL(orc_lookup) = .; \ 721 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ 722 LOOKUP_BLOCK_SIZE) + 1) * 4; \ 723 VMLINUX_SYMBOL(orc_lookup_end) = .; \ 724 } 725 #else 726 #define ORC_UNWIND_TABLE 727 #endif 728 729 #ifdef CONFIG_PM_TRACE 730 #define TRACEDATA \ 731 . = ALIGN(4); \ 732 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 733 VMLINUX_SYMBOL(__tracedata_start) = .; \ 734 KEEP(*(.tracedata)) \ 735 VMLINUX_SYMBOL(__tracedata_end) = .; \ 736 } 737 #else 738 #define TRACEDATA 739 #endif 740 741 #define NOTES \ 742 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 743 VMLINUX_SYMBOL(__start_notes) = .; \ 744 *(.note.*) \ 745 VMLINUX_SYMBOL(__stop_notes) = .; \ 746 } 747 748 #define INIT_SETUP(initsetup_align) \ 749 . = ALIGN(initsetup_align); \ 750 VMLINUX_SYMBOL(__setup_start) = .; \ 751 KEEP(*(.init.setup)) \ 752 VMLINUX_SYMBOL(__setup_end) = .; 753 754 #define INIT_CALLS_LEVEL(level) \ 755 VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 756 KEEP(*(.initcall##level##.init)) \ 757 KEEP(*(.initcall##level##s.init)) \ 758 759 #define INIT_CALLS \ 760 VMLINUX_SYMBOL(__initcall_start) = .; \ 761 KEEP(*(.initcallearly.init)) \ 762 INIT_CALLS_LEVEL(0) \ 763 INIT_CALLS_LEVEL(1) \ 764 INIT_CALLS_LEVEL(2) \ 765 INIT_CALLS_LEVEL(3) \ 766 INIT_CALLS_LEVEL(4) \ 767 INIT_CALLS_LEVEL(5) \ 768 INIT_CALLS_LEVEL(rootfs) \ 769 INIT_CALLS_LEVEL(6) \ 770 INIT_CALLS_LEVEL(7) \ 771 VMLINUX_SYMBOL(__initcall_end) = .; 772 773 #define CON_INITCALL \ 774 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 775 KEEP(*(.con_initcall.init)) \ 776 VMLINUX_SYMBOL(__con_initcall_end) = .; 777 778 #define SECURITY_INITCALL \ 779 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 780 KEEP(*(.security_initcall.init)) \ 781 VMLINUX_SYMBOL(__security_initcall_end) = .; 782 783 #ifdef CONFIG_BLK_DEV_INITRD 784 #define INIT_RAM_FS \ 785 . = ALIGN(4); \ 786 VMLINUX_SYMBOL(__initramfs_start) = .; \ 787 KEEP(*(.init.ramfs)) \ 788 . = ALIGN(8); \ 789 KEEP(*(.init.ramfs.info)) 790 #else 791 #define INIT_RAM_FS 792 #endif 793 794 /* 795 * Memory encryption operates on a page basis. Since we need to clear 796 * the memory encryption mask for this section, it needs to be aligned 797 * on a page boundary and be a page-size multiple in length. 798 * 799 * Note: We use a separate section so that only this section gets 800 * decrypted to avoid exposing more than we wish. 801 */ 802 #ifdef CONFIG_AMD_MEM_ENCRYPT 803 #define PERCPU_DECRYPTED_SECTION \ 804 . = ALIGN(PAGE_SIZE); \ 805 *(.data..percpu..decrypted) \ 806 . = ALIGN(PAGE_SIZE); 807 #else 808 #define PERCPU_DECRYPTED_SECTION 809 #endif 810 811 812 /* 813 * Default discarded sections. 814 * 815 * Some archs want to discard exit text/data at runtime rather than 816 * link time due to cross-section references such as alt instructions, 817 * bug table, eh_frame, etc. DISCARDS must be the last of output 818 * section definitions so that such archs put those in earlier section 819 * definitions. 820 */ 821 #define DISCARDS \ 822 /DISCARD/ : { \ 823 EXIT_TEXT \ 824 EXIT_DATA \ 825 EXIT_CALL \ 826 *(.discard) \ 827 *(.discard.*) \ 828 } 829 830 /** 831 * PERCPU_INPUT - the percpu input sections 832 * @cacheline: cacheline size 833 * 834 * The core percpu section names and core symbols which do not rely 835 * directly upon load addresses. 836 * 837 * @cacheline is used to align subsections to avoid false cacheline 838 * sharing between subsections for different purposes. 839 */ 840 #define PERCPU_INPUT(cacheline) \ 841 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 842 *(.data..percpu..first) \ 843 . = ALIGN(PAGE_SIZE); \ 844 *(.data..percpu..page_aligned) \ 845 . = ALIGN(cacheline); \ 846 *(.data..percpu..read_mostly) \ 847 . = ALIGN(cacheline); \ 848 *(.data..percpu) \ 849 *(.data..percpu..shared_aligned) \ 850 PERCPU_DECRYPTED_SECTION \ 851 VMLINUX_SYMBOL(__per_cpu_end) = .; 852 853 /** 854 * PERCPU_VADDR - define output section for percpu area 855 * @cacheline: cacheline size 856 * @vaddr: explicit base address (optional) 857 * @phdr: destination PHDR (optional) 858 * 859 * Macro which expands to output section for percpu area. 860 * 861 * @cacheline is used to align subsections to avoid false cacheline 862 * sharing between subsections for different purposes. 863 * 864 * If @vaddr is not blank, it specifies explicit base address and all 865 * percpu symbols will be offset from the given address. If blank, 866 * @vaddr always equals @laddr + LOAD_OFFSET. 867 * 868 * @phdr defines the output PHDR to use if not blank. Be warned that 869 * output PHDR is sticky. If @phdr is specified, the next output 870 * section in the linker script will go there too. @phdr should have 871 * a leading colon. 872 * 873 * Note that this macros defines __per_cpu_load as an absolute symbol. 874 * If there is no need to put the percpu section at a predetermined 875 * address, use PERCPU_SECTION. 876 */ 877 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 878 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 879 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 880 - LOAD_OFFSET) { \ 881 PERCPU_INPUT(cacheline) \ 882 } phdr \ 883 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 884 885 /** 886 * PERCPU_SECTION - define output section for percpu area, simple version 887 * @cacheline: cacheline size 888 * 889 * Align to PAGE_SIZE and outputs output section for percpu area. This 890 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 891 * __per_cpu_start will be identical. 892 * 893 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 894 * except that __per_cpu_load is defined as a relative symbol against 895 * .data..percpu which is required for relocatable x86_32 configuration. 896 */ 897 #define PERCPU_SECTION(cacheline) \ 898 . = ALIGN(PAGE_SIZE); \ 899 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 900 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 901 PERCPU_INPUT(cacheline) \ 902 } 903 904 905 /* 906 * Definition of the high level *_SECTION macros 907 * They will fit only a subset of the architectures 908 */ 909 910 911 /* 912 * Writeable data. 913 * All sections are combined in a single .data section. 914 * The sections following CONSTRUCTORS are arranged so their 915 * typical alignment matches. 916 * A cacheline is typical/always less than a PAGE_SIZE so 917 * the sections that has this restriction (or similar) 918 * is located before the ones requiring PAGE_SIZE alignment. 919 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 920 * matches the requirement of PAGE_ALIGNED_DATA. 921 * 922 * use 0 as page_align if page_aligned data is not used */ 923 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 924 . = ALIGN(PAGE_SIZE); \ 925 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 926 INIT_TASK_DATA(inittask) \ 927 NOSAVE_DATA \ 928 PAGE_ALIGNED_DATA(pagealigned) \ 929 CACHELINE_ALIGNED_DATA(cacheline) \ 930 READ_MOSTLY_DATA(cacheline) \ 931 DATA_DATA \ 932 CONSTRUCTORS \ 933 } \ 934 BUG_TABLE \ 935 936 #define INIT_TEXT_SECTION(inittext_align) \ 937 . = ALIGN(inittext_align); \ 938 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 939 VMLINUX_SYMBOL(_sinittext) = .; \ 940 INIT_TEXT \ 941 VMLINUX_SYMBOL(_einittext) = .; \ 942 } 943 944 #define INIT_DATA_SECTION(initsetup_align) \ 945 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 946 INIT_DATA \ 947 INIT_SETUP(initsetup_align) \ 948 INIT_CALLS \ 949 CON_INITCALL \ 950 SECURITY_INITCALL \ 951 INIT_RAM_FS \ 952 } 953 954 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 955 . = ALIGN(sbss_align); \ 956 VMLINUX_SYMBOL(__bss_start) = .; \ 957 SBSS(sbss_align) \ 958 BSS(bss_align) \ 959 . = ALIGN(stop_align); \ 960 VMLINUX_SYMBOL(__bss_stop) = .; 961