1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * ld script for the x86 kernel 4 * 5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 6 * 7 * Modernisation, unification and other changes and fixes: 8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> 9 * 10 * 11 * Don't define absolute symbols until and unless you know that symbol 12 * value is should remain constant even if kernel image is relocated 13 * at run time. Absolute symbols are not relocated. If symbol value should 14 * change if kernel is relocated, make the symbol section relative and 15 * put it inside the section definition. 16 */ 17 18#ifdef CONFIG_X86_32 19#define LOAD_OFFSET __PAGE_OFFSET 20#else 21#define LOAD_OFFSET __START_KERNEL_map 22#endif 23 24#define RUNTIME_DISCARD_EXIT 25#define EMITS_PT_NOTE 26#define RO_EXCEPTION_TABLE_ALIGN 16 27 28#include <asm-generic/vmlinux.lds.h> 29#include <asm/asm-offsets.h> 30#include <asm/thread_info.h> 31#include <asm/page_types.h> 32#include <asm/orc_lookup.h> 33#include <asm/cache.h> 34#include <asm/boot.h> 35 36#undef i386 /* in case the preprocessor is a 32bit one */ 37 38OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) 39 40#ifdef CONFIG_X86_32 41OUTPUT_ARCH(i386) 42ENTRY(phys_startup_32) 43#else 44OUTPUT_ARCH(i386:x86-64) 45ENTRY(phys_startup_64) 46#endif 47 48jiffies = jiffies_64; 49 50#if defined(CONFIG_X86_64) 51/* 52 * On 64-bit, align RODATA to 2MB so we retain large page mappings for 53 * boundaries spanning kernel text, rodata and data sections. 54 * 55 * However, kernel identity mappings will have different RWX permissions 56 * to the pages mapping to text and to the pages padding (which are freed) the 57 * text section. Hence kernel identity mappings will be broken to smaller 58 * pages. For 64-bit, kernel text and kernel identity mappings are different, 59 * so we can enable protection checks as well as retain 2MB large page 60 * mappings for kernel text. 61 */ 62#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); 63 64#define X86_ALIGN_RODATA_END \ 65 . = ALIGN(HPAGE_SIZE); \ 66 __end_rodata_hpage_align = .; \ 67 __end_rodata_aligned = .; 68 69#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); 70#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); 71 72/* 73 * This section contains data which will be mapped as decrypted. Memory 74 * encryption operates on a page basis. Make this section PMD-aligned 75 * to avoid splitting the pages while mapping the section early. 76 * 77 * Note: We use a separate section so that only this section gets 78 * decrypted to avoid exposing more than we wish. 79 */ 80#define BSS_DECRYPTED \ 81 . = ALIGN(PMD_SIZE); \ 82 __start_bss_decrypted = .; \ 83 *(.bss..decrypted); \ 84 . = ALIGN(PAGE_SIZE); \ 85 __start_bss_decrypted_unused = .; \ 86 . = ALIGN(PMD_SIZE); \ 87 __end_bss_decrypted = .; \ 88 89#else 90 91#define X86_ALIGN_RODATA_BEGIN 92#define X86_ALIGN_RODATA_END \ 93 . = ALIGN(PAGE_SIZE); \ 94 __end_rodata_aligned = .; 95 96#define ALIGN_ENTRY_TEXT_BEGIN 97#define ALIGN_ENTRY_TEXT_END 98#define BSS_DECRYPTED 99 100#endif 101 102PHDRS { 103 text PT_LOAD FLAGS(5); /* R_E */ 104 data PT_LOAD FLAGS(6); /* RW_ */ 105#ifdef CONFIG_X86_64 106#ifdef CONFIG_SMP 107 percpu PT_LOAD FLAGS(6); /* RW_ */ 108#endif 109 init PT_LOAD FLAGS(7); /* RWE */ 110#endif 111 note PT_NOTE FLAGS(0); /* ___ */ 112} 113 114SECTIONS 115{ 116#ifdef CONFIG_X86_32 117 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; 118 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); 119#else 120 . = __START_KERNEL; 121 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); 122#endif 123 124 /* Text and read-only data */ 125 .text : AT(ADDR(.text) - LOAD_OFFSET) { 126 _text = .; 127 _stext = .; 128 /* bootstrapping code */ 129 HEAD_TEXT 130 TEXT_TEXT 131 SCHED_TEXT 132 CPUIDLE_TEXT 133 LOCK_TEXT 134 KPROBES_TEXT 135 ALIGN_ENTRY_TEXT_BEGIN 136 ENTRY_TEXT 137 ALIGN_ENTRY_TEXT_END 138 SOFTIRQENTRY_TEXT 139 STATIC_CALL_TEXT 140 *(.gnu.warning) 141 142#ifdef CONFIG_RETPOLINE 143 __indirect_thunk_start = .; 144 *(.text.__x86.indirect_thunk) 145 __indirect_thunk_end = .; 146#endif 147 } :text =0xcccc 148 149 /* End of text section, which should occupy whole number of pages */ 150 _etext = .; 151 . = ALIGN(PAGE_SIZE); 152 153 X86_ALIGN_RODATA_BEGIN 154 RO_DATA(PAGE_SIZE) 155 X86_ALIGN_RODATA_END 156 157 /* Data */ 158 .data : AT(ADDR(.data) - LOAD_OFFSET) { 159 /* Start of data section */ 160 _sdata = .; 161 162 /* init_task */ 163 INIT_TASK_DATA(THREAD_SIZE) 164 165#ifdef CONFIG_X86_32 166 /* 32 bit has nosave before _edata */ 167 NOSAVE_DATA 168#endif 169 170 PAGE_ALIGNED_DATA(PAGE_SIZE) 171 172 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 173 174 DATA_DATA 175 CONSTRUCTORS 176 177 /* rarely changed data like cpu maps */ 178 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) 179 180 /* End of data section */ 181 _edata = .; 182 } :data 183 184 BUG_TABLE 185 186 ORC_UNWIND_TABLE 187 188 . = ALIGN(PAGE_SIZE); 189 __vvar_page = .; 190 191 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { 192 /* work around gold bug 13023 */ 193 __vvar_beginning_hack = .; 194 195 /* Place all vvars at the offsets in asm/vvar.h. */ 196#define EMIT_VVAR(name, offset) \ 197 . = __vvar_beginning_hack + offset; \ 198 *(.vvar_ ## name) 199#include <asm/vvar.h> 200#undef EMIT_VVAR 201 202 /* 203 * Pad the rest of the page with zeros. Otherwise the loader 204 * can leave garbage here. 205 */ 206 . = __vvar_beginning_hack + PAGE_SIZE; 207 } :data 208 209 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); 210 211 /* Init code and data - will be freed after init */ 212 . = ALIGN(PAGE_SIZE); 213 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { 214 __init_begin = .; /* paired with __init_end */ 215 } 216 217#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) 218 /* 219 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the 220 * output PHDR, so the next output section - .init.text - should 221 * start another segment - init. 222 */ 223 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) 224 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, 225 "per-CPU data too large - increase CONFIG_PHYSICAL_START") 226#endif 227 228 INIT_TEXT_SECTION(PAGE_SIZE) 229#ifdef CONFIG_X86_64 230 :init 231#endif 232 233 /* 234 * Section for code used exclusively before alternatives are run. All 235 * references to such code must be patched out by alternatives, normally 236 * by using X86_FEATURE_ALWAYS CPU feature bit. 237 * 238 * See static_cpu_has() for an example. 239 */ 240 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { 241 *(.altinstr_aux) 242 } 243 244 INIT_DATA_SECTION(16) 245 246 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { 247 __x86_cpu_dev_start = .; 248 *(.x86_cpu_dev.init) 249 __x86_cpu_dev_end = .; 250 } 251 252#ifdef CONFIG_X86_INTEL_MID 253 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ 254 LOAD_OFFSET) { 255 __x86_intel_mid_dev_start = .; 256 *(.x86_intel_mid_dev.init) 257 __x86_intel_mid_dev_end = .; 258 } 259#endif 260 261 /* 262 * start address and size of operations which during runtime 263 * can be patched with virtualization friendly instructions or 264 * baremetal native ones. Think page table operations. 265 * Details in paravirt_types.h 266 */ 267 . = ALIGN(8); 268 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { 269 __parainstructions = .; 270 *(.parainstructions) 271 __parainstructions_end = .; 272 } 273 274#ifdef CONFIG_RETPOLINE 275 /* 276 * List of instructions that call/jmp/jcc to retpoline thunks 277 * __x86_indirect_thunk_*(). These instructions can be patched along 278 * with alternatives, after which the section can be freed. 279 */ 280 . = ALIGN(8); 281 .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) { 282 __retpoline_sites = .; 283 *(.retpoline_sites) 284 __retpoline_sites_end = .; 285 } 286#endif 287 288 /* 289 * struct alt_inst entries. From the header (alternative.h): 290 * "Alternative instructions for different CPU types or capabilities" 291 * Think locking instructions on spinlocks. 292 */ 293 . = ALIGN(8); 294 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 295 __alt_instructions = .; 296 *(.altinstructions) 297 __alt_instructions_end = .; 298 } 299 300 /* 301 * And here are the replacement instructions. The linker sticks 302 * them as binary blobs. The .altinstructions has enough data to 303 * get the address and the length of them to patch the kernel safely. 304 */ 305 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { 306 *(.altinstr_replacement) 307 } 308 309 /* 310 * struct iommu_table_entry entries are injected in this section. 311 * It is an array of IOMMUs which during run time gets sorted depending 312 * on its dependency order. After rootfs_initcall is complete 313 * this section can be safely removed. 314 */ 315 .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { 316 __iommu_table = .; 317 *(.iommu_table) 318 __iommu_table_end = .; 319 } 320 321 . = ALIGN(8); 322 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { 323 __apicdrivers = .; 324 *(.apicdrivers); 325 __apicdrivers_end = .; 326 } 327 328 . = ALIGN(8); 329 /* 330 * .exit.text is discarded at runtime, not link time, to deal with 331 * references from .altinstructions 332 */ 333 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { 334 EXIT_TEXT 335 } 336 337 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { 338 EXIT_DATA 339 } 340 341#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 342 PERCPU_SECTION(INTERNODE_CACHE_BYTES) 343#endif 344 345 . = ALIGN(PAGE_SIZE); 346 347 /* freed after init ends here */ 348 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { 349 __init_end = .; 350 } 351 352 /* 353 * smp_locks might be freed after init 354 * start/end must be page aligned 355 */ 356 . = ALIGN(PAGE_SIZE); 357 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 358 __smp_locks = .; 359 *(.smp_locks) 360 . = ALIGN(PAGE_SIZE); 361 __smp_locks_end = .; 362 } 363 364#ifdef CONFIG_X86_64 365 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 366 NOSAVE_DATA 367 } 368#endif 369 370 /* BSS */ 371 . = ALIGN(PAGE_SIZE); 372 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 373 __bss_start = .; 374 *(.bss..page_aligned) 375 . = ALIGN(PAGE_SIZE); 376 *(BSS_MAIN) 377 BSS_DECRYPTED 378 . = ALIGN(PAGE_SIZE); 379 __bss_stop = .; 380 } 381 382 /* 383 * The memory occupied from _text to here, __end_of_kernel_reserve, is 384 * automatically reserved in setup_arch(). Anything after here must be 385 * explicitly reserved using memblock_reserve() or it will be discarded 386 * and treated as available memory. 387 */ 388 __end_of_kernel_reserve = .; 389 390 . = ALIGN(PAGE_SIZE); 391 .brk : AT(ADDR(.brk) - LOAD_OFFSET) { 392 __brk_base = .; 393 . += 64 * 1024; /* 64k alignment slop space */ 394 *(.brk_reservation) /* areas brk users have reserved */ 395 __brk_limit = .; 396 } 397 398 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ 399 _end = .; 400 401#ifdef CONFIG_AMD_MEM_ENCRYPT 402 /* 403 * Early scratch/workarea section: Lives outside of the kernel proper 404 * (_text - _end). 405 * 406 * Resides after _end because even though the .brk section is after 407 * __end_of_kernel_reserve, the .brk section is later reserved as a 408 * part of the kernel. Since it is located after __end_of_kernel_reserve 409 * it will be discarded and become part of the available memory. As 410 * such, it can only be used by very early boot code and must not be 411 * needed afterwards. 412 * 413 * Currently used by SME for performing in-place encryption of the 414 * kernel during boot. Resides on a 2MB boundary to simplify the 415 * pagetable setup used for SME in-place encryption. 416 */ 417 . = ALIGN(HPAGE_SIZE); 418 .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) { 419 __init_scratch_begin = .; 420 *(.init.scratch) 421 . = ALIGN(HPAGE_SIZE); 422 __init_scratch_end = .; 423 } 424#endif 425 426 STABS_DEBUG 427 DWARF_DEBUG 428 ELF_DETAILS 429 430 DISCARDS 431 432 /* 433 * Make sure that the .got.plt is either completely empty or it 434 * contains only the lazy dispatch entries. 435 */ 436 .got.plt (INFO) : { *(.got.plt) } 437 ASSERT(SIZEOF(.got.plt) == 0 || 438#ifdef CONFIG_X86_64 439 SIZEOF(.got.plt) == 0x18, 440#else 441 SIZEOF(.got.plt) == 0xc, 442#endif 443 "Unexpected GOT/PLT entries detected!") 444 445 /* 446 * Sections that should stay zero sized, which is safer to 447 * explicitly check instead of blindly discarding. 448 */ 449 .got : { 450 *(.got) *(.igot.*) 451 } 452 ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!") 453 454 .plt : { 455 *(.plt) *(.plt.*) *(.iplt) 456 } 457 ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") 458 459 .rel.dyn : { 460 *(.rel.*) *(.rel_*) 461 } 462 ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!") 463 464 .rela.dyn : { 465 *(.rela.*) *(.rela_*) 466 } 467 ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") 468} 469 470/* 471 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: 472 */ 473. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 474 "kernel image bigger than KERNEL_IMAGE_SIZE"); 475 476#ifdef CONFIG_X86_64 477/* 478 * Per-cpu symbols which need to be offset from __per_cpu_load 479 * for the boot processor. 480 */ 481#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load 482INIT_PER_CPU(gdt_page); 483INIT_PER_CPU(fixed_percpu_data); 484INIT_PER_CPU(irq_stack_backing_store); 485 486#ifdef CONFIG_SMP 487. = ASSERT((fixed_percpu_data == 0), 488 "fixed_percpu_data is not at start of per-cpu area"); 489#endif 490 491#endif /* CONFIG_X86_64 */ 492 493#ifdef CONFIG_KEXEC_CORE 494#include <asm/kexec.h> 495 496. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, 497 "kexec control code size is too big"); 498#endif 499 500