1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * BSS_SECTION(0, 0, 0) 34 * _end = .; 35 * 36 * STABS_DEBUG 37 * DWARF_DEBUG 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * // __init_begin and __init_end should be page aligned, so that we can 44 * // free the whole .init memory 45 * [_stext, _etext] is the text section 46 * [_sdata, _edata] is the data section 47 * 48 * Some of the included output section have their own set of constants. 49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 50 * [__nosave_begin, __nosave_end] for the nosave data 51 */ 52 53 #ifndef LOAD_OFFSET 54 #define LOAD_OFFSET 0 55 #endif 56 57 #include <linux/export.h> 58 59 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 60 #define ALIGN_FUNCTION() . = ALIGN(8) 61 62 /* 63 * Align to a 32 byte boundary equal to the 64 * alignment gcc 4.5 uses for a struct 65 */ 66 #define STRUCT_ALIGNMENT 32 67 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 68 69 /* The actual configuration determine if the init/exit sections 70 * are handled as text/data or they can be discarded (which 71 * often happens at runtime) 72 */ 73 #ifdef CONFIG_HOTPLUG_CPU 74 #define CPU_KEEP(sec) *(.cpu##sec) 75 #define CPU_DISCARD(sec) 76 #else 77 #define CPU_KEEP(sec) 78 #define CPU_DISCARD(sec) *(.cpu##sec) 79 #endif 80 81 #if defined(CONFIG_MEMORY_HOTPLUG) 82 #define MEM_KEEP(sec) *(.mem##sec) 83 #define MEM_DISCARD(sec) 84 #else 85 #define MEM_KEEP(sec) 86 #define MEM_DISCARD(sec) *(.mem##sec) 87 #endif 88 89 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 90 #define MCOUNT_REC() . = ALIGN(8); \ 91 VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 92 *(__mcount_loc) \ 93 VMLINUX_SYMBOL(__stop_mcount_loc) = .; 94 #else 95 #define MCOUNT_REC() 96 #endif 97 98 #ifdef CONFIG_TRACE_BRANCH_PROFILING 99 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 100 *(_ftrace_annotated_branch) \ 101 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 102 #else 103 #define LIKELY_PROFILE() 104 #endif 105 106 #ifdef CONFIG_PROFILE_ALL_BRANCHES 107 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 108 *(_ftrace_branch) \ 109 VMLINUX_SYMBOL(__stop_branch_profile) = .; 110 #else 111 #define BRANCH_PROFILE() 112 #endif 113 114 #ifdef CONFIG_KPROBES 115 #define KPROBE_BLACKLIST() . = ALIGN(8); \ 116 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ 117 KEEP(*(_kprobe_blacklist)) \ 118 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; 119 #else 120 #define KPROBE_BLACKLIST() 121 #endif 122 123 #ifdef CONFIG_EVENT_TRACING 124 #define FTRACE_EVENTS() . = ALIGN(8); \ 125 VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 126 KEEP(*(_ftrace_events)) \ 127 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \ 128 VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \ 129 KEEP(*(_ftrace_enum_map)) \ 130 VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .; 131 #else 132 #define FTRACE_EVENTS() 133 #endif 134 135 #ifdef CONFIG_TRACING 136 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 137 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ 138 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 139 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ 140 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ 141 VMLINUX_SYMBOL(__stop___tracepoint_str) = .; 142 #else 143 #define TRACE_PRINTKS() 144 #define TRACEPOINT_STR() 145 #endif 146 147 #ifdef CONFIG_FTRACE_SYSCALLS 148 #define TRACE_SYSCALLS() . = ALIGN(8); \ 149 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 150 KEEP(*(__syscalls_metadata)) \ 151 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 152 #else 153 #define TRACE_SYSCALLS() 154 #endif 155 156 #ifdef CONFIG_SERIAL_EARLYCON 157 #define EARLYCON_TABLE() STRUCT_ALIGN(); \ 158 VMLINUX_SYMBOL(__earlycon_table) = .; \ 159 KEEP(*(__earlycon_table)) \ 160 VMLINUX_SYMBOL(__earlycon_table_end) = .; 161 #else 162 #define EARLYCON_TABLE() 163 #endif 164 165 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) 166 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) 167 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) 168 #define _OF_TABLE_0(name) 169 #define _OF_TABLE_1(name) \ 170 . = ALIGN(8); \ 171 VMLINUX_SYMBOL(__##name##_of_table) = .; \ 172 KEEP(*(__##name##_of_table)) \ 173 KEEP(*(__##name##_of_table_end)) 174 175 #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) 176 #define CLKEVT_OF_TABLES() OF_TABLE(CONFIG_CLKEVT_OF, clkevt) 177 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 178 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 179 #define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) 180 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 181 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 182 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 183 184 #ifdef CONFIG_ACPI 185 #define ACPI_PROBE_TABLE(name) \ 186 . = ALIGN(8); \ 187 VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \ 188 KEEP(*(__##name##_acpi_probe_table)) \ 189 VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .; 190 #else 191 #define ACPI_PROBE_TABLE(name) 192 #endif 193 194 #define KERNEL_DTB() \ 195 STRUCT_ALIGN(); \ 196 VMLINUX_SYMBOL(__dtb_start) = .; \ 197 KEEP(*(.dtb.init.rodata)) \ 198 VMLINUX_SYMBOL(__dtb_end) = .; 199 200 /* 201 * .data section 202 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates 203 * .data.identifier which needs to be pulled in with .data, but don't want to 204 * pull in .data..stuff which has its own requirements. Same for bss. 205 */ 206 #define DATA_DATA \ 207 *(.data .data.[0-9a-zA-Z_]*) \ 208 *(.ref.data) \ 209 *(.data..shared_aligned) /* percpu related */ \ 210 MEM_KEEP(init.data) \ 211 MEM_KEEP(exit.data) \ 212 *(.data.unlikely) \ 213 STRUCT_ALIGN(); \ 214 *(__tracepoints) \ 215 /* implement dynamic printk debug */ \ 216 . = ALIGN(8); \ 217 VMLINUX_SYMBOL(__start___jump_table) = .; \ 218 KEEP(*(__jump_table)) \ 219 VMLINUX_SYMBOL(__stop___jump_table) = .; \ 220 . = ALIGN(8); \ 221 VMLINUX_SYMBOL(__start___verbose) = .; \ 222 KEEP(*(__verbose)) \ 223 VMLINUX_SYMBOL(__stop___verbose) = .; \ 224 LIKELY_PROFILE() \ 225 BRANCH_PROFILE() \ 226 TRACE_PRINTKS() \ 227 TRACEPOINT_STR() 228 229 /* 230 * Data section helpers 231 */ 232 #define NOSAVE_DATA \ 233 . = ALIGN(PAGE_SIZE); \ 234 VMLINUX_SYMBOL(__nosave_begin) = .; \ 235 *(.data..nosave) \ 236 . = ALIGN(PAGE_SIZE); \ 237 VMLINUX_SYMBOL(__nosave_end) = .; 238 239 #define PAGE_ALIGNED_DATA(page_align) \ 240 . = ALIGN(page_align); \ 241 *(.data..page_aligned) 242 243 #define READ_MOSTLY_DATA(align) \ 244 . = ALIGN(align); \ 245 *(.data..read_mostly) \ 246 . = ALIGN(align); 247 248 #define CACHELINE_ALIGNED_DATA(align) \ 249 . = ALIGN(align); \ 250 *(.data..cacheline_aligned) 251 252 #define INIT_TASK_DATA(align) \ 253 . = ALIGN(align); \ 254 VMLINUX_SYMBOL(__start_init_task) = .; \ 255 *(.data..init_task) \ 256 VMLINUX_SYMBOL(__end_init_task) = .; 257 258 /* 259 * Allow architectures to handle ro_after_init data on their 260 * own by defining an empty RO_AFTER_INIT_DATA. 261 */ 262 #ifndef RO_AFTER_INIT_DATA 263 #define RO_AFTER_INIT_DATA \ 264 VMLINUX_SYMBOL(__start_ro_after_init) = .; \ 265 *(.data..ro_after_init) \ 266 VMLINUX_SYMBOL(__end_ro_after_init) = .; 267 #endif 268 269 /* 270 * Read only Data 271 */ 272 #define RO_DATA_SECTION(align) \ 273 . = ALIGN((align)); \ 274 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 275 VMLINUX_SYMBOL(__start_rodata) = .; \ 276 *(.rodata) *(.rodata.*) \ 277 RO_AFTER_INIT_DATA /* Read only after init */ \ 278 KEEP(*(__vermagic)) /* Kernel version magic */ \ 279 . = ALIGN(8); \ 280 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ 281 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ 282 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ 283 *(__tracepoints_strings)/* Tracepoints: strings */ \ 284 } \ 285 \ 286 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 287 *(.rodata1) \ 288 } \ 289 \ 290 /* PCI quirks */ \ 291 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 292 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 293 KEEP(*(.pci_fixup_early)) \ 294 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 295 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 296 KEEP(*(.pci_fixup_header)) \ 297 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 298 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 299 KEEP(*(.pci_fixup_final)) \ 300 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 301 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 302 KEEP(*(.pci_fixup_enable)) \ 303 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 304 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 305 KEEP(*(.pci_fixup_resume)) \ 306 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 307 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 308 KEEP(*(.pci_fixup_resume_early)) \ 309 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 310 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 311 KEEP(*(.pci_fixup_suspend)) \ 312 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 313 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ 314 KEEP(*(.pci_fixup_suspend_late)) \ 315 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ 316 } \ 317 \ 318 /* Built-in firmware blobs */ \ 319 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 320 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 321 KEEP(*(.builtin_fw)) \ 322 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 323 } \ 324 \ 325 TRACEDATA \ 326 \ 327 /* Kernel symbol table: Normal symbols */ \ 328 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 329 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 330 KEEP(*(SORT(___ksymtab+*))) \ 331 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 332 } \ 333 \ 334 /* Kernel symbol table: GPL-only symbols */ \ 335 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 336 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 337 KEEP(*(SORT(___ksymtab_gpl+*))) \ 338 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 339 } \ 340 \ 341 /* Kernel symbol table: Normal unused symbols */ \ 342 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 343 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 344 KEEP(*(SORT(___ksymtab_unused+*))) \ 345 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 346 } \ 347 \ 348 /* Kernel symbol table: GPL-only unused symbols */ \ 349 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 350 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 351 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ 352 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 353 } \ 354 \ 355 /* Kernel symbol table: GPL-future-only symbols */ \ 356 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 357 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 358 KEEP(*(SORT(___ksymtab_gpl_future+*))) \ 359 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 360 } \ 361 \ 362 /* Kernel symbol table: Normal symbols */ \ 363 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 364 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 365 KEEP(*(SORT(___kcrctab+*))) \ 366 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 367 } \ 368 \ 369 /* Kernel symbol table: GPL-only symbols */ \ 370 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 371 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 372 KEEP(*(SORT(___kcrctab_gpl+*))) \ 373 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 374 } \ 375 \ 376 /* Kernel symbol table: Normal unused symbols */ \ 377 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 378 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 379 KEEP(*(SORT(___kcrctab_unused+*))) \ 380 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 381 } \ 382 \ 383 /* Kernel symbol table: GPL-only unused symbols */ \ 384 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 385 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 386 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ 387 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 388 } \ 389 \ 390 /* Kernel symbol table: GPL-future-only symbols */ \ 391 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 392 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 393 KEEP(*(SORT(___kcrctab_gpl_future+*))) \ 394 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 395 } \ 396 \ 397 /* Kernel symbol table: strings */ \ 398 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 399 *(__ksymtab_strings) \ 400 } \ 401 \ 402 /* __*init sections */ \ 403 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 404 *(.ref.rodata) \ 405 MEM_KEEP(init.rodata) \ 406 MEM_KEEP(exit.rodata) \ 407 } \ 408 \ 409 /* Built-in module parameters. */ \ 410 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 411 VMLINUX_SYMBOL(__start___param) = .; \ 412 KEEP(*(__param)) \ 413 VMLINUX_SYMBOL(__stop___param) = .; \ 414 } \ 415 \ 416 /* Built-in module versions. */ \ 417 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 418 VMLINUX_SYMBOL(__start___modver) = .; \ 419 KEEP(*(__modver)) \ 420 VMLINUX_SYMBOL(__stop___modver) = .; \ 421 . = ALIGN((align)); \ 422 VMLINUX_SYMBOL(__end_rodata) = .; \ 423 } \ 424 . = ALIGN((align)); 425 426 /* RODATA & RO_DATA provided for backward compatibility. 427 * All archs are supposed to use RO_DATA() */ 428 #define RODATA RO_DATA_SECTION(4096) 429 #define RO_DATA(align) RO_DATA_SECTION(align) 430 431 #define SECURITY_INIT \ 432 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 433 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 434 KEEP(*(.security_initcall.init)) \ 435 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 436 } 437 438 /* .text section. Map to function alignment to avoid address changes 439 * during second ld run in second ld pass when generating System.map 440 * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates 441 * .text.identifier which needs to be pulled in with .text , but some 442 * architectures define .text.foo which is not intended to be pulled in here. 443 * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have 444 * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */ 445 #define TEXT_TEXT \ 446 ALIGN_FUNCTION(); \ 447 *(.text.hot .text .text.fixup .text.unlikely) \ 448 *(.ref.text) \ 449 MEM_KEEP(init.text) \ 450 MEM_KEEP(exit.text) \ 451 452 453 /* sched.text is aling to function alignment to secure we have same 454 * address even at second ld pass when generating System.map */ 455 #define SCHED_TEXT \ 456 ALIGN_FUNCTION(); \ 457 VMLINUX_SYMBOL(__sched_text_start) = .; \ 458 *(.sched.text) \ 459 VMLINUX_SYMBOL(__sched_text_end) = .; 460 461 /* spinlock.text is aling to function alignment to secure we have same 462 * address even at second ld pass when generating System.map */ 463 #define LOCK_TEXT \ 464 ALIGN_FUNCTION(); \ 465 VMLINUX_SYMBOL(__lock_text_start) = .; \ 466 *(.spinlock.text) \ 467 VMLINUX_SYMBOL(__lock_text_end) = .; 468 469 #define CPUIDLE_TEXT \ 470 ALIGN_FUNCTION(); \ 471 VMLINUX_SYMBOL(__cpuidle_text_start) = .; \ 472 *(.cpuidle.text) \ 473 VMLINUX_SYMBOL(__cpuidle_text_end) = .; 474 475 #define KPROBES_TEXT \ 476 ALIGN_FUNCTION(); \ 477 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 478 *(.kprobes.text) \ 479 VMLINUX_SYMBOL(__kprobes_text_end) = .; 480 481 #define ENTRY_TEXT \ 482 ALIGN_FUNCTION(); \ 483 VMLINUX_SYMBOL(__entry_text_start) = .; \ 484 *(.entry.text) \ 485 VMLINUX_SYMBOL(__entry_text_end) = .; 486 487 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) 488 #define IRQENTRY_TEXT \ 489 ALIGN_FUNCTION(); \ 490 VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 491 *(.irqentry.text) \ 492 VMLINUX_SYMBOL(__irqentry_text_end) = .; 493 #else 494 #define IRQENTRY_TEXT 495 #endif 496 497 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) 498 #define SOFTIRQENTRY_TEXT \ 499 ALIGN_FUNCTION(); \ 500 VMLINUX_SYMBOL(__softirqentry_text_start) = .; \ 501 *(.softirqentry.text) \ 502 VMLINUX_SYMBOL(__softirqentry_text_end) = .; 503 #else 504 #define SOFTIRQENTRY_TEXT 505 #endif 506 507 /* Section used for early init (in .S files) */ 508 #define HEAD_TEXT *(.head.text) 509 510 #define HEAD_TEXT_SECTION \ 511 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 512 HEAD_TEXT \ 513 } 514 515 /* 516 * Exception table 517 */ 518 #define EXCEPTION_TABLE(align) \ 519 . = ALIGN(align); \ 520 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 521 VMLINUX_SYMBOL(__start___ex_table) = .; \ 522 KEEP(*(__ex_table)) \ 523 VMLINUX_SYMBOL(__stop___ex_table) = .; \ 524 } 525 526 /* 527 * Init task 528 */ 529 #define INIT_TASK_DATA_SECTION(align) \ 530 . = ALIGN(align); \ 531 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 532 INIT_TASK_DATA(align) \ 533 } 534 535 #ifdef CONFIG_CONSTRUCTORS 536 #define KERNEL_CTORS() . = ALIGN(8); \ 537 VMLINUX_SYMBOL(__ctors_start) = .; \ 538 KEEP(*(.ctors)) \ 539 KEEP(*(SORT(.init_array.*))) \ 540 KEEP(*(.init_array)) \ 541 VMLINUX_SYMBOL(__ctors_end) = .; 542 #else 543 #define KERNEL_CTORS() 544 #endif 545 546 /* init and exit section handling */ 547 #define INIT_DATA \ 548 KEEP(*(SORT(___kentry+*))) \ 549 *(.init.data) \ 550 MEM_DISCARD(init.data) \ 551 KERNEL_CTORS() \ 552 MCOUNT_REC() \ 553 *(.init.rodata) \ 554 FTRACE_EVENTS() \ 555 TRACE_SYSCALLS() \ 556 KPROBE_BLACKLIST() \ 557 MEM_DISCARD(init.rodata) \ 558 CLK_OF_TABLES() \ 559 RESERVEDMEM_OF_TABLES() \ 560 CLKSRC_OF_TABLES() \ 561 CLKEVT_OF_TABLES() \ 562 IOMMU_OF_TABLES() \ 563 CPU_METHOD_OF_TABLES() \ 564 CPUIDLE_METHOD_OF_TABLES() \ 565 KERNEL_DTB() \ 566 IRQCHIP_OF_MATCH_TABLE() \ 567 ACPI_PROBE_TABLE(irqchip) \ 568 ACPI_PROBE_TABLE(clksrc) \ 569 EARLYCON_TABLE() 570 571 #define INIT_TEXT \ 572 *(.init.text) \ 573 *(.text.startup) \ 574 MEM_DISCARD(init.text) 575 576 #define EXIT_DATA \ 577 *(.exit.data) \ 578 *(.fini_array) \ 579 *(.dtors) \ 580 MEM_DISCARD(exit.data) \ 581 MEM_DISCARD(exit.rodata) 582 583 #define EXIT_TEXT \ 584 *(.exit.text) \ 585 *(.text.exit) \ 586 MEM_DISCARD(exit.text) 587 588 #define EXIT_CALL \ 589 *(.exitcall.exit) 590 591 /* 592 * bss (Block Started by Symbol) - uninitialized data 593 * zeroed during startup 594 */ 595 #define SBSS(sbss_align) \ 596 . = ALIGN(sbss_align); \ 597 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 598 *(.sbss) \ 599 *(.scommon) \ 600 } 601 602 /* 603 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 604 * sections to the front of bss. 605 */ 606 #ifndef BSS_FIRST_SECTIONS 607 #define BSS_FIRST_SECTIONS 608 #endif 609 610 #define BSS(bss_align) \ 611 . = ALIGN(bss_align); \ 612 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 613 BSS_FIRST_SECTIONS \ 614 *(.bss..page_aligned) \ 615 *(.dynbss) \ 616 *(.bss .bss.[0-9a-zA-Z_]*) \ 617 *(COMMON) \ 618 } 619 620 /* 621 * DWARF debug sections. 622 * Symbols in the DWARF debugging sections are relative to 623 * the beginning of the section so we begin them at 0. 624 */ 625 #define DWARF_DEBUG \ 626 /* DWARF 1 */ \ 627 .debug 0 : { *(.debug) } \ 628 .line 0 : { *(.line) } \ 629 /* GNU DWARF 1 extensions */ \ 630 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 631 .debug_sfnames 0 : { *(.debug_sfnames) } \ 632 /* DWARF 1.1 and DWARF 2 */ \ 633 .debug_aranges 0 : { *(.debug_aranges) } \ 634 .debug_pubnames 0 : { *(.debug_pubnames) } \ 635 /* DWARF 2 */ \ 636 .debug_info 0 : { *(.debug_info \ 637 .gnu.linkonce.wi.*) } \ 638 .debug_abbrev 0 : { *(.debug_abbrev) } \ 639 .debug_line 0 : { *(.debug_line) } \ 640 .debug_frame 0 : { *(.debug_frame) } \ 641 .debug_str 0 : { *(.debug_str) } \ 642 .debug_loc 0 : { *(.debug_loc) } \ 643 .debug_macinfo 0 : { *(.debug_macinfo) } \ 644 /* SGI/MIPS DWARF 2 extensions */ \ 645 .debug_weaknames 0 : { *(.debug_weaknames) } \ 646 .debug_funcnames 0 : { *(.debug_funcnames) } \ 647 .debug_typenames 0 : { *(.debug_typenames) } \ 648 .debug_varnames 0 : { *(.debug_varnames) } \ 649 650 /* Stabs debugging sections. */ 651 #define STABS_DEBUG \ 652 .stab 0 : { *(.stab) } \ 653 .stabstr 0 : { *(.stabstr) } \ 654 .stab.excl 0 : { *(.stab.excl) } \ 655 .stab.exclstr 0 : { *(.stab.exclstr) } \ 656 .stab.index 0 : { *(.stab.index) } \ 657 .stab.indexstr 0 : { *(.stab.indexstr) } \ 658 .comment 0 : { *(.comment) } 659 660 #ifdef CONFIG_GENERIC_BUG 661 #define BUG_TABLE \ 662 . = ALIGN(8); \ 663 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 664 VMLINUX_SYMBOL(__start___bug_table) = .; \ 665 KEEP(*(__bug_table)) \ 666 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 667 } 668 #else 669 #define BUG_TABLE 670 #endif 671 672 #ifdef CONFIG_PM_TRACE 673 #define TRACEDATA \ 674 . = ALIGN(4); \ 675 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 676 VMLINUX_SYMBOL(__tracedata_start) = .; \ 677 KEEP(*(.tracedata)) \ 678 VMLINUX_SYMBOL(__tracedata_end) = .; \ 679 } 680 #else 681 #define TRACEDATA 682 #endif 683 684 #define NOTES \ 685 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 686 VMLINUX_SYMBOL(__start_notes) = .; \ 687 *(.note.*) \ 688 VMLINUX_SYMBOL(__stop_notes) = .; \ 689 } 690 691 #define INIT_SETUP(initsetup_align) \ 692 . = ALIGN(initsetup_align); \ 693 VMLINUX_SYMBOL(__setup_start) = .; \ 694 KEEP(*(.init.setup)) \ 695 VMLINUX_SYMBOL(__setup_end) = .; 696 697 #define INIT_CALLS_LEVEL(level) \ 698 VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 699 KEEP(*(.initcall##level##.init)) \ 700 KEEP(*(.initcall##level##s.init)) \ 701 702 #define INIT_CALLS \ 703 VMLINUX_SYMBOL(__initcall_start) = .; \ 704 KEEP(*(.initcallearly.init)) \ 705 INIT_CALLS_LEVEL(0) \ 706 INIT_CALLS_LEVEL(1) \ 707 INIT_CALLS_LEVEL(2) \ 708 INIT_CALLS_LEVEL(3) \ 709 INIT_CALLS_LEVEL(4) \ 710 INIT_CALLS_LEVEL(5) \ 711 INIT_CALLS_LEVEL(rootfs) \ 712 INIT_CALLS_LEVEL(6) \ 713 INIT_CALLS_LEVEL(7) \ 714 VMLINUX_SYMBOL(__initcall_end) = .; 715 716 #define CON_INITCALL \ 717 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 718 KEEP(*(.con_initcall.init)) \ 719 VMLINUX_SYMBOL(__con_initcall_end) = .; 720 721 #define SECURITY_INITCALL \ 722 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 723 KEEP(*(.security_initcall.init)) \ 724 VMLINUX_SYMBOL(__security_initcall_end) = .; 725 726 #ifdef CONFIG_BLK_DEV_INITRD 727 #define INIT_RAM_FS \ 728 . = ALIGN(4); \ 729 VMLINUX_SYMBOL(__initramfs_start) = .; \ 730 KEEP(*(.init.ramfs)) \ 731 . = ALIGN(8); \ 732 KEEP(*(.init.ramfs.info)) 733 #else 734 #define INIT_RAM_FS 735 #endif 736 737 /* 738 * Default discarded sections. 739 * 740 * Some archs want to discard exit text/data at runtime rather than 741 * link time due to cross-section references such as alt instructions, 742 * bug table, eh_frame, etc. DISCARDS must be the last of output 743 * section definitions so that such archs put those in earlier section 744 * definitions. 745 */ 746 #define DISCARDS \ 747 /DISCARD/ : { \ 748 EXIT_TEXT \ 749 EXIT_DATA \ 750 EXIT_CALL \ 751 *(.discard) \ 752 *(.discard.*) \ 753 } 754 755 /** 756 * PERCPU_INPUT - the percpu input sections 757 * @cacheline: cacheline size 758 * 759 * The core percpu section names and core symbols which do not rely 760 * directly upon load addresses. 761 * 762 * @cacheline is used to align subsections to avoid false cacheline 763 * sharing between subsections for different purposes. 764 */ 765 #define PERCPU_INPUT(cacheline) \ 766 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 767 *(.data..percpu..first) \ 768 . = ALIGN(PAGE_SIZE); \ 769 *(.data..percpu..page_aligned) \ 770 . = ALIGN(cacheline); \ 771 *(.data..percpu..read_mostly) \ 772 . = ALIGN(cacheline); \ 773 *(.data..percpu) \ 774 *(.data..percpu..shared_aligned) \ 775 VMLINUX_SYMBOL(__per_cpu_end) = .; 776 777 /** 778 * PERCPU_VADDR - define output section for percpu area 779 * @cacheline: cacheline size 780 * @vaddr: explicit base address (optional) 781 * @phdr: destination PHDR (optional) 782 * 783 * Macro which expands to output section for percpu area. 784 * 785 * @cacheline is used to align subsections to avoid false cacheline 786 * sharing between subsections for different purposes. 787 * 788 * If @vaddr is not blank, it specifies explicit base address and all 789 * percpu symbols will be offset from the given address. If blank, 790 * @vaddr always equals @laddr + LOAD_OFFSET. 791 * 792 * @phdr defines the output PHDR to use if not blank. Be warned that 793 * output PHDR is sticky. If @phdr is specified, the next output 794 * section in the linker script will go there too. @phdr should have 795 * a leading colon. 796 * 797 * Note that this macros defines __per_cpu_load as an absolute symbol. 798 * If there is no need to put the percpu section at a predetermined 799 * address, use PERCPU_SECTION. 800 */ 801 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 802 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 803 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 804 - LOAD_OFFSET) { \ 805 PERCPU_INPUT(cacheline) \ 806 } phdr \ 807 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 808 809 /** 810 * PERCPU_SECTION - define output section for percpu area, simple version 811 * @cacheline: cacheline size 812 * 813 * Align to PAGE_SIZE and outputs output section for percpu area. This 814 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 815 * __per_cpu_start will be identical. 816 * 817 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 818 * except that __per_cpu_load is defined as a relative symbol against 819 * .data..percpu which is required for relocatable x86_32 configuration. 820 */ 821 #define PERCPU_SECTION(cacheline) \ 822 . = ALIGN(PAGE_SIZE); \ 823 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 824 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 825 PERCPU_INPUT(cacheline) \ 826 } 827 828 829 /* 830 * Definition of the high level *_SECTION macros 831 * They will fit only a subset of the architectures 832 */ 833 834 835 /* 836 * Writeable data. 837 * All sections are combined in a single .data section. 838 * The sections following CONSTRUCTORS are arranged so their 839 * typical alignment matches. 840 * A cacheline is typical/always less than a PAGE_SIZE so 841 * the sections that has this restriction (or similar) 842 * is located before the ones requiring PAGE_SIZE alignment. 843 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 844 * matches the requirement of PAGE_ALIGNED_DATA. 845 * 846 * use 0 as page_align if page_aligned data is not used */ 847 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 848 . = ALIGN(PAGE_SIZE); \ 849 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 850 INIT_TASK_DATA(inittask) \ 851 NOSAVE_DATA \ 852 PAGE_ALIGNED_DATA(pagealigned) \ 853 CACHELINE_ALIGNED_DATA(cacheline) \ 854 READ_MOSTLY_DATA(cacheline) \ 855 DATA_DATA \ 856 CONSTRUCTORS \ 857 } \ 858 BUG_TABLE 859 860 #define INIT_TEXT_SECTION(inittext_align) \ 861 . = ALIGN(inittext_align); \ 862 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 863 VMLINUX_SYMBOL(_sinittext) = .; \ 864 INIT_TEXT \ 865 VMLINUX_SYMBOL(_einittext) = .; \ 866 } 867 868 #define INIT_DATA_SECTION(initsetup_align) \ 869 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 870 INIT_DATA \ 871 INIT_SETUP(initsetup_align) \ 872 INIT_CALLS \ 873 CON_INITCALL \ 874 SECURITY_INITCALL \ 875 INIT_RAM_FS \ 876 } 877 878 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 879 . = ALIGN(sbss_align); \ 880 VMLINUX_SYMBOL(__bss_start) = .; \ 881 SBSS(sbss_align) \ 882 BSS(bss_align) \ 883 . = ALIGN(stop_align); \ 884 VMLINUX_SYMBOL(__bss_stop) = .; 885