1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 217ce265dSSam Ravnborg/* 317ce265dSSam Ravnborg * ld script for the x86 kernel 417ce265dSSam Ravnborg * 517ce265dSSam Ravnborg * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 617ce265dSSam Ravnborg * 791fd7fe8SIngo Molnar * Modernisation, unification and other changes and fixes: 891fd7fe8SIngo Molnar * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> 917ce265dSSam Ravnborg * 1017ce265dSSam Ravnborg * 1117ce265dSSam Ravnborg * Don't define absolute symbols until and unless you know that symbol 1217ce265dSSam Ravnborg * value is should remain constant even if kernel image is relocated 1317ce265dSSam Ravnborg * at run time. Absolute symbols are not relocated. If symbol value should 1417ce265dSSam Ravnborg * change if kernel is relocated, make the symbol section relative and 1517ce265dSSam Ravnborg * put it inside the section definition. 1617ce265dSSam Ravnborg */ 1717ce265dSSam Ravnborg 1817ce265dSSam Ravnborg#ifdef CONFIG_X86_32 1917ce265dSSam Ravnborg#define LOAD_OFFSET __PAGE_OFFSET 2017ce265dSSam Ravnborg#else 2117ce265dSSam Ravnborg#define LOAD_OFFSET __START_KERNEL_map 2217ce265dSSam Ravnborg#endif 2317ce265dSSam Ravnborg 2484d5f77fSH.J. Lu#define RUNTIME_DISCARD_EXIT 25441110a5SKees Cook#define EMITS_PT_NOTE 26f0d7ee17SKees Cook#define RO_EXCEPTION_TABLE_ALIGN 16 27441110a5SKees Cook 2817ce265dSSam Ravnborg#include <asm-generic/vmlinux.lds.h> 2917ce265dSSam Ravnborg#include <asm/asm-offsets.h> 3017ce265dSSam Ravnborg#include <asm/thread_info.h> 3117ce265dSSam Ravnborg#include <asm/page_types.h> 32ee9f8fceSJosh Poimboeuf#include <asm/orc_lookup.h> 3317ce265dSSam Ravnborg#include <asm/cache.h> 3417ce265dSSam Ravnborg#include <asm/boot.h> 3517ce265dSSam Ravnborg 3617ce265dSSam Ravnborg#undef i386 /* in case the preprocessor is a 32bit one */ 3717ce265dSSam Ravnborg 38e6d7bc0bSBorislav PetkovOUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) 3917ce265dSSam Ravnborg 4017ce265dSSam Ravnborg#ifdef CONFIG_X86_32 4117ce265dSSam RavnborgOUTPUT_ARCH(i386) 4217ce265dSSam RavnborgENTRY(phys_startup_32) 4317ce265dSSam Ravnborg#else 4417ce265dSSam RavnborgOUTPUT_ARCH(i386:x86-64) 4517ce265dSSam RavnborgENTRY(phys_startup_64) 4617ce265dSSam Ravnborg#endif 4717ce265dSSam Ravnborg 48d8ad6d39SBob Haarmanjiffies = jiffies_64; 49d8ad6d39SBob Haarman 509ccaf77cSKees Cook#if defined(CONFIG_X86_64) 51d6cc1c3aSSuresh Siddha/* 529ccaf77cSKees Cook * On 64-bit, align RODATA to 2MB so we retain large page mappings for 539ccaf77cSKees Cook * boundaries spanning kernel text, rodata and data sections. 54d6cc1c3aSSuresh Siddha * 55d6cc1c3aSSuresh Siddha * However, kernel identity mappings will have different RWX permissions 56d6cc1c3aSSuresh Siddha * to the pages mapping to text and to the pages padding (which are freed) the 57d6cc1c3aSSuresh Siddha * text section. Hence kernel identity mappings will be broken to smaller 58d6cc1c3aSSuresh Siddha * pages. For 64-bit, kernel text and kernel identity mappings are different, 599ccaf77cSKees Cook * so we can enable protection checks as well as retain 2MB large page 609ccaf77cSKees Cook * mappings for kernel text. 61d6cc1c3aSSuresh Siddha */ 6239d668e0SJoerg Roedel#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); 6374e08179SSuresh Siddha 6439d668e0SJoerg Roedel#define X86_ALIGN_RODATA_END \ 6574e08179SSuresh Siddha . = ALIGN(HPAGE_SIZE); \ 6639d668e0SJoerg Roedel __end_rodata_hpage_align = .; \ 6739d668e0SJoerg Roedel __end_rodata_aligned = .; 6874e08179SSuresh Siddha 692f7412baSThomas Gleixner#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); 702f7412baSThomas Gleixner#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); 712f7412baSThomas Gleixner 72b3f0907cSBrijesh Singh/* 73b3f0907cSBrijesh Singh * This section contains data which will be mapped as decrypted. Memory 74b3f0907cSBrijesh Singh * encryption operates on a page basis. Make this section PMD-aligned 75b3f0907cSBrijesh Singh * to avoid splitting the pages while mapping the section early. 76b3f0907cSBrijesh Singh * 77b3f0907cSBrijesh Singh * Note: We use a separate section so that only this section gets 78b3f0907cSBrijesh Singh * decrypted to avoid exposing more than we wish. 79b3f0907cSBrijesh Singh */ 80b3f0907cSBrijesh Singh#define BSS_DECRYPTED \ 81b3f0907cSBrijesh Singh . = ALIGN(PMD_SIZE); \ 82b3f0907cSBrijesh Singh __start_bss_decrypted = .; \ 83b3f0907cSBrijesh Singh *(.bss..decrypted); \ 84b3f0907cSBrijesh Singh . = ALIGN(PAGE_SIZE); \ 85b3f0907cSBrijesh Singh __start_bss_decrypted_unused = .; \ 86b3f0907cSBrijesh Singh . = ALIGN(PMD_SIZE); \ 87b3f0907cSBrijesh Singh __end_bss_decrypted = .; \ 88b3f0907cSBrijesh Singh 8974e08179SSuresh Siddha#else 9074e08179SSuresh Siddha 9139d668e0SJoerg Roedel#define X86_ALIGN_RODATA_BEGIN 9239d668e0SJoerg Roedel#define X86_ALIGN_RODATA_END \ 9339d668e0SJoerg Roedel . = ALIGN(PAGE_SIZE); \ 9439d668e0SJoerg Roedel __end_rodata_aligned = .; 9574e08179SSuresh Siddha 962f7412baSThomas Gleixner#define ALIGN_ENTRY_TEXT_BEGIN 972f7412baSThomas Gleixner#define ALIGN_ENTRY_TEXT_END 98b3f0907cSBrijesh Singh#define BSS_DECRYPTED 992f7412baSThomas Gleixner 10074e08179SSuresh Siddha#endif 10174e08179SSuresh Siddha 102afb8095aSSam RavnborgPHDRS { 103afb8095aSSam Ravnborg text PT_LOAD FLAGS(5); /* R_E */ 1045bd5a452SMatthieu Castet data PT_LOAD FLAGS(6); /* RW_ */ 105afb8095aSSam Ravnborg#ifdef CONFIG_X86_64 106afb8095aSSam Ravnborg#ifdef CONFIG_SMP 1078d0cc631SJan Beulich percpu PT_LOAD FLAGS(6); /* RW_ */ 108afb8095aSSam Ravnborg#endif 109c62e4320SJan Beulich init PT_LOAD FLAGS(7); /* RWE */ 110afb8095aSSam Ravnborg#endif 111afb8095aSSam Ravnborg note PT_NOTE FLAGS(0); /* ___ */ 112afb8095aSSam Ravnborg} 11317ce265dSSam Ravnborg 114444e0ae4SSam RavnborgSECTIONS 115444e0ae4SSam Ravnborg{ 116444e0ae4SSam Ravnborg#ifdef CONFIG_X86_32 117444e0ae4SSam Ravnborg . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; 118142b9e6cSArd Biesheuvel phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); 119444e0ae4SSam Ravnborg#else 120444e0ae4SSam Ravnborg . = __START_KERNEL; 121142b9e6cSArd Biesheuvel phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); 122444e0ae4SSam Ravnborg#endif 123444e0ae4SSam Ravnborg 124dfc20895SSam Ravnborg /* Text and read-only data */ 125dfc20895SSam Ravnborg .text : AT(ADDR(.text) - LOAD_OFFSET) { 1264ae59b91STim Abbott _text = .; 127e728f61cSJosh Poimboeuf _stext = .; 1284ae59b91STim Abbott /* bootstrapping code */ 1294ae59b91STim Abbott HEAD_TEXT 130dfc20895SSam Ravnborg TEXT_TEXT 131dfc20895SSam Ravnborg SCHED_TEXT 132dfc20895SSam Ravnborg LOCK_TEXT 133dfc20895SSam Ravnborg KPROBES_TEXT 134be7635e7SAlexander Potapenko SOFTIRQENTRY_TEXT 135736e80a4SMasami Hiramatsu#ifdef CONFIG_RETPOLINE 136736e80a4SMasami Hiramatsu __indirect_thunk_start = .; 137fb3bd914SBorislav Petkov (AMD) *(.text.__x86.indirect_thunk) 138fb3bd914SBorislav Petkov (AMD) *(.text.__x86.return_thunk) 139736e80a4SMasami Hiramatsu __indirect_thunk_end = .; 140736e80a4SMasami Hiramatsu#endif 14124a9c543SThomas Gleixner STATIC_CALL_TEXT 14224a9c543SThomas Gleixner 14324a9c543SThomas Gleixner ALIGN_ENTRY_TEXT_BEGIN 144fb3bd914SBorislav Petkov (AMD)#ifdef CONFIG_CPU_SRSO 145fb3bd914SBorislav Petkov (AMD) *(.text.__x86.rethunk_untrain) 146fb3bd914SBorislav Petkov (AMD)#endif 147fb3bd914SBorislav Petkov (AMD) 14824a9c543SThomas Gleixner ENTRY_TEXT 149fb3bd914SBorislav Petkov (AMD) 150fb3bd914SBorislav Petkov (AMD)#ifdef CONFIG_CPU_SRSO 151fb3bd914SBorislav Petkov (AMD) /* 152fb3bd914SBorislav Petkov (AMD) * See the comment above srso_untrain_ret_alias()'s 153fb3bd914SBorislav Petkov (AMD) * definition. 154fb3bd914SBorislav Petkov (AMD) */ 155fb3bd914SBorislav Petkov (AMD) . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); 156fb3bd914SBorislav Petkov (AMD) *(.text.__x86.rethunk_safe) 157fb3bd914SBorislav Petkov (AMD)#endif 15824a9c543SThomas Gleixner ALIGN_ENTRY_TEXT_END 15924a9c543SThomas Gleixner *(.gnu.warning) 16024a9c543SThomas Gleixner 1617705dc85SKees Cook } :text =0xcccc 162dfc20895SSam Ravnborg 163b9076938SKees Cook /* End of text section, which should occupy whole number of pages */ 164b9076938SKees Cook _etext = .; 1655bd5a452SMatthieu Castet . = ALIGN(PAGE_SIZE); 166b9076938SKees Cook 16739d668e0SJoerg Roedel X86_ALIGN_RODATA_BEGIN 168c62e4320SJan Beulich RO_DATA(PAGE_SIZE) 16939d668e0SJoerg Roedel X86_ALIGN_RODATA_END 170448bc3abSSam Ravnborg 1711f6397baSSam Ravnborg /* Data */ 1721f6397baSSam Ravnborg .data : AT(ADDR(.data) - LOAD_OFFSET) { 1731260866aSCatalin Marinas /* Start of data section */ 1741260866aSCatalin Marinas _sdata = .; 175c62e4320SJan Beulich 176c62e4320SJan Beulich /* init_task */ 177c62e4320SJan Beulich INIT_TASK_DATA(THREAD_SIZE) 1781f6397baSSam Ravnborg 1791f6397baSSam Ravnborg#ifdef CONFIG_X86_32 1801f6397baSSam Ravnborg /* 32 bit has nosave before _edata */ 181c62e4320SJan Beulich NOSAVE_DATA 1821f6397baSSam Ravnborg#endif 1831f6397baSSam Ravnborg 184c62e4320SJan Beulich PAGE_ALIGNED_DATA(PAGE_SIZE) 1851f6397baSSam Ravnborg 186350f8f56SJan Beulich CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 187c62e4320SJan Beulich 188c62e4320SJan Beulich DATA_DATA 189c62e4320SJan Beulich CONSTRUCTORS 1901f6397baSSam Ravnborg 1911f6397baSSam Ravnborg /* rarely changed data like cpu maps */ 192350f8f56SJan Beulich READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) 1931f6397baSSam Ravnborg 1941f6397baSSam Ravnborg /* End of data section */ 1951f6397baSSam Ravnborg _edata = .; 196c62e4320SJan Beulich } :data 1971f6397baSSam Ravnborg 198b5effd38SPeter Zijlstra BUG_TABLE 199ff6f87e1SSam Ravnborg 200ee9f8fceSJosh Poimboeuf ORC_UNWIND_TABLE 201ee9f8fceSJosh Poimboeuf 2029c40818dSAndy Lutomirski . = ALIGN(PAGE_SIZE); 2039c40818dSAndy Lutomirski __vvar_page = .; 2049c40818dSAndy Lutomirski 2059c40818dSAndy Lutomirski .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { 206f670bb76SAndy Lutomirski /* work around gold bug 13023 */ 207f670bb76SAndy Lutomirski __vvar_beginning_hack = .; 2089c40818dSAndy Lutomirski 2099c40818dSAndy Lutomirski /* Place all vvars at the offsets in asm/vvar.h. */ 2109c40818dSAndy Lutomirski#define EMIT_VVAR(name, offset) \ 211f670bb76SAndy Lutomirski . = __vvar_beginning_hack + offset; \ 2129c40818dSAndy Lutomirski *(.vvar_ ## name) 2139c40818dSAndy Lutomirski#include <asm/vvar.h> 2149c40818dSAndy Lutomirski#undef EMIT_VVAR 2159c40818dSAndy Lutomirski 216309944beSAndy Lutomirski /* 217309944beSAndy Lutomirski * Pad the rest of the page with zeros. Otherwise the loader 218309944beSAndy Lutomirski * can leave garbage here. 219309944beSAndy Lutomirski */ 220309944beSAndy Lutomirski . = __vvar_beginning_hack + PAGE_SIZE; 2219c40818dSAndy Lutomirski } :data 2229c40818dSAndy Lutomirski 2239c40818dSAndy Lutomirski . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); 2249c40818dSAndy Lutomirski 225e58bdaa8SSam Ravnborg /* Init code and data - will be freed after init */ 226e58bdaa8SSam Ravnborg . = ALIGN(PAGE_SIZE); 227c62e4320SJan Beulich .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { 228fd073194SIngo Molnar __init_begin = .; /* paired with __init_end */ 229c62e4320SJan Beulich } 230c62e4320SJan Beulich 231c62e4320SJan Beulich#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) 232c62e4320SJan Beulich /* 233c62e4320SJan Beulich * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the 234c62e4320SJan Beulich * output PHDR, so the next output section - .init.text - should 235c62e4320SJan Beulich * start another segment - init. 236c62e4320SJan Beulich */ 23719df0c2fSTejun Heo PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) 23897b67ae5SJan Beulich ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, 23997b67ae5SJan Beulich "per-CPU data too large - increase CONFIG_PHYSICAL_START") 240c62e4320SJan Beulich#endif 241c62e4320SJan Beulich 242123f3e1dSTim Abbott INIT_TEXT_SECTION(PAGE_SIZE) 243c62e4320SJan Beulich#ifdef CONFIG_X86_64 244c62e4320SJan Beulich :init 245c62e4320SJan Beulich#endif 246e58bdaa8SSam Ravnborg 247337e4cc8SBorislav Petkov /* 248337e4cc8SBorislav Petkov * Section for code used exclusively before alternatives are run. All 249337e4cc8SBorislav Petkov * references to such code must be patched out by alternatives, normally 250337e4cc8SBorislav Petkov * by using X86_FEATURE_ALWAYS CPU feature bit. 251337e4cc8SBorislav Petkov * 252337e4cc8SBorislav Petkov * See static_cpu_has() for an example. 253337e4cc8SBorislav Petkov */ 254337e4cc8SBorislav Petkov .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { 255337e4cc8SBorislav Petkov *(.altinstr_aux) 256337e4cc8SBorislav Petkov } 257337e4cc8SBorislav Petkov 258123f3e1dSTim Abbott INIT_DATA_SECTION(16) 259e58bdaa8SSam Ravnborg 260e58bdaa8SSam Ravnborg .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { 261e58bdaa8SSam Ravnborg __x86_cpu_dev_start = .; 262e58bdaa8SSam Ravnborg *(.x86_cpu_dev.init) 263e58bdaa8SSam Ravnborg __x86_cpu_dev_end = .; 264e58bdaa8SSam Ravnborg } 265e58bdaa8SSam Ravnborg 26666ac5013SDavid Cohen#ifdef CONFIG_X86_INTEL_MID 26766ac5013SDavid Cohen .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ 26866ac5013SDavid Cohen LOAD_OFFSET) { 26966ac5013SDavid Cohen __x86_intel_mid_dev_start = .; 27066ac5013SDavid Cohen *(.x86_intel_mid_dev.init) 27166ac5013SDavid Cohen __x86_intel_mid_dev_end = .; 27266ac5013SDavid Cohen } 27366ac5013SDavid Cohen#endif 27466ac5013SDavid Cohen 2756f44d033SKonrad Rzeszutek Wilk /* 2766f44d033SKonrad Rzeszutek Wilk * start address and size of operations which during runtime 2776f44d033SKonrad Rzeszutek Wilk * can be patched with virtualization friendly instructions or 2786f44d033SKonrad Rzeszutek Wilk * baremetal native ones. Think page table operations. 2796f44d033SKonrad Rzeszutek Wilk * Details in paravirt_types.h 2806f44d033SKonrad Rzeszutek Wilk */ 281ae618362SSam Ravnborg . = ALIGN(8); 282ae618362SSam Ravnborg .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { 283ae618362SSam Ravnborg __parainstructions = .; 284ae618362SSam Ravnborg *(.parainstructions) 285ae618362SSam Ravnborg __parainstructions_end = .; 286ae618362SSam Ravnborg } 287ae618362SSam Ravnborg 288134ab5bdSPeter Zijlstra#ifdef CONFIG_RETPOLINE 289134ab5bdSPeter Zijlstra /* 290134ab5bdSPeter Zijlstra * List of instructions that call/jmp/jcc to retpoline thunks 291134ab5bdSPeter Zijlstra * __x86_indirect_thunk_*(). These instructions can be patched along 292134ab5bdSPeter Zijlstra * with alternatives, after which the section can be freed. 293134ab5bdSPeter Zijlstra */ 294134ab5bdSPeter Zijlstra . = ALIGN(8); 295134ab5bdSPeter Zijlstra .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) { 296134ab5bdSPeter Zijlstra __retpoline_sites = .; 297134ab5bdSPeter Zijlstra *(.retpoline_sites) 298134ab5bdSPeter Zijlstra __retpoline_sites_end = .; 299134ab5bdSPeter Zijlstra } 30015e67227SPeter Zijlstra 30115e67227SPeter Zijlstra . = ALIGN(8); 30215e67227SPeter Zijlstra .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) { 30315e67227SPeter Zijlstra __return_sites = .; 30415e67227SPeter Zijlstra *(.return_sites) 30515e67227SPeter Zijlstra __return_sites_end = .; 30615e67227SPeter Zijlstra } 30700abd384SPeter Zijlstra 30800abd384SPeter Zijlstra . = ALIGN(8); 30900abd384SPeter Zijlstra .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) { 31000abd384SPeter Zijlstra __call_sites = .; 31100abd384SPeter Zijlstra *(.call_sites) 31200abd384SPeter Zijlstra __call_sites_end = .; 31300abd384SPeter Zijlstra } 314134ab5bdSPeter Zijlstra#endif 315134ab5bdSPeter Zijlstra 31689bc853eSPeter Zijlstra#ifdef CONFIG_X86_KERNEL_IBT 31789bc853eSPeter Zijlstra . = ALIGN(8); 31889bc853eSPeter Zijlstra .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) { 31989bc853eSPeter Zijlstra __ibt_endbr_seal = .; 32089bc853eSPeter Zijlstra *(.ibt_endbr_seal) 32189bc853eSPeter Zijlstra __ibt_endbr_seal_end = .; 32289bc853eSPeter Zijlstra } 32389bc853eSPeter Zijlstra#endif 32489bc853eSPeter Zijlstra 325931ab636SPeter Zijlstra#ifdef CONFIG_FINEIBT 326931ab636SPeter Zijlstra . = ALIGN(8); 327931ab636SPeter Zijlstra .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) { 328931ab636SPeter Zijlstra __cfi_sites = .; 329931ab636SPeter Zijlstra *(.cfi_sites) 330931ab636SPeter Zijlstra __cfi_sites_end = .; 331931ab636SPeter Zijlstra } 332931ab636SPeter Zijlstra#endif 333931ab636SPeter Zijlstra 3346f44d033SKonrad Rzeszutek Wilk /* 3356f44d033SKonrad Rzeszutek Wilk * struct alt_inst entries. From the header (alternative.h): 3366f44d033SKonrad Rzeszutek Wilk * "Alternative instructions for different CPU types or capabilities" 3376f44d033SKonrad Rzeszutek Wilk * Think locking instructions on spinlocks. 3386f44d033SKonrad Rzeszutek Wilk */ 339ae618362SSam Ravnborg . = ALIGN(8); 340ae618362SSam Ravnborg .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 341ae618362SSam Ravnborg __alt_instructions = .; 342ae618362SSam Ravnborg *(.altinstructions) 343ae618362SSam Ravnborg __alt_instructions_end = .; 344ae618362SSam Ravnborg } 345ae618362SSam Ravnborg 3466f44d033SKonrad Rzeszutek Wilk /* 3476f44d033SKonrad Rzeszutek Wilk * And here are the replacement instructions. The linker sticks 3486f44d033SKonrad Rzeszutek Wilk * them as binary blobs. The .altinstructions has enough data to 3496f44d033SKonrad Rzeszutek Wilk * get the address and the length of them to patch the kernel safely. 3506f44d033SKonrad Rzeszutek Wilk */ 351ae618362SSam Ravnborg .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { 352ae618362SSam Ravnborg *(.altinstr_replacement) 353ae618362SSam Ravnborg } 354ae618362SSam Ravnborg 3557ac41ccfSKonrad Rzeszutek Wilk . = ALIGN(8); 356107e0e0cSSuresh Siddha .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { 357107e0e0cSSuresh Siddha __apicdrivers = .; 358107e0e0cSSuresh Siddha *(.apicdrivers); 359107e0e0cSSuresh Siddha __apicdrivers_end = .; 360107e0e0cSSuresh Siddha } 361107e0e0cSSuresh Siddha 362107e0e0cSSuresh Siddha . = ALIGN(8); 363bf6a5741SSam Ravnborg /* 3646f8f0dc9SArvind Sankar * .exit.text is discarded at runtime, not link time, to deal with 3656f8f0dc9SArvind Sankar * references from .altinstructions 366bf6a5741SSam Ravnborg */ 367bf6a5741SSam Ravnborg .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { 368bf6a5741SSam Ravnborg EXIT_TEXT 369bf6a5741SSam Ravnborg } 370bf6a5741SSam Ravnborg 371bf6a5741SSam Ravnborg .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { 372bf6a5741SSam Ravnborg EXIT_DATA 373bf6a5741SSam Ravnborg } 374bf6a5741SSam Ravnborg 375c62e4320SJan Beulich#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 3760415b00dSTejun Heo PERCPU_SECTION(INTERNODE_CACHE_BYTES) 3779d16e783SSam Ravnborg#endif 3789d16e783SSam Ravnborg 3799d16e783SSam Ravnborg . = ALIGN(PAGE_SIZE); 380fd073194SIngo Molnar 3819d16e783SSam Ravnborg /* freed after init ends here */ 382fd073194SIngo Molnar .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { 3839d16e783SSam Ravnborg __init_end = .; 384fd073194SIngo Molnar } 3859d16e783SSam Ravnborg 386c62e4320SJan Beulich /* 387c62e4320SJan Beulich * smp_locks might be freed after init 388c62e4320SJan Beulich * start/end must be page aligned 389c62e4320SJan Beulich */ 390c62e4320SJan Beulich . = ALIGN(PAGE_SIZE); 391c62e4320SJan Beulich .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 392c62e4320SJan Beulich __smp_locks = .; 393c62e4320SJan Beulich *(.smp_locks) 394c62e4320SJan Beulich . = ALIGN(PAGE_SIZE); 395596b711eSYinghai Lu __smp_locks_end = .; 396c62e4320SJan Beulich } 397c62e4320SJan Beulich 3989d16e783SSam Ravnborg#ifdef CONFIG_X86_64 3999d16e783SSam Ravnborg .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 400c62e4320SJan Beulich NOSAVE_DATA 401c62e4320SJan Beulich } 4029d16e783SSam Ravnborg#endif 4039d16e783SSam Ravnborg 404091e52c3SSam Ravnborg /* BSS */ 405091e52c3SSam Ravnborg . = ALIGN(PAGE_SIZE); 406091e52c3SSam Ravnborg .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 407091e52c3SSam Ravnborg __bss_start = .; 4087c74df07STim Abbott *(.bss..page_aligned) 409de2b41beSJoerg Roedel . = ALIGN(PAGE_SIZE); 4106a03469aSSami Tolvanen *(BSS_MAIN) 411b3f0907cSBrijesh Singh BSS_DECRYPTED 4125bd5a452SMatthieu Castet . = ALIGN(PAGE_SIZE); 413091e52c3SSam Ravnborg __bss_stop = .; 414091e52c3SSam Ravnborg } 4159d16e783SSam Ravnborg 416c603a309SThomas Lendacky /* 417c603a309SThomas Lendacky * The memory occupied from _text to here, __end_of_kernel_reserve, is 418c603a309SThomas Lendacky * automatically reserved in setup_arch(). Anything after here must be 419c603a309SThomas Lendacky * explicitly reserved using memblock_reserve() or it will be discarded 420c603a309SThomas Lendacky * and treated as available memory. 421c603a309SThomas Lendacky */ 422c603a309SThomas Lendacky __end_of_kernel_reserve = .; 423c603a309SThomas Lendacky 424091e52c3SSam Ravnborg . = ALIGN(PAGE_SIZE); 4257e09ac27SJuergen Gross .brk : AT(ADDR(.brk) - LOAD_OFFSET) { 426091e52c3SSam Ravnborg __brk_base = .; 427091e52c3SSam Ravnborg . += 64 * 1024; /* 64k alignment slop space */ 428e32683c6SJosh Poimboeuf *(.bss..brk) /* areas brk users have reserved */ 429091e52c3SSam Ravnborg __brk_limit = .; 430091e52c3SSam Ravnborg } 431091e52c3SSam Ravnborg 432974f221cSYinghai Lu . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ 433091e52c3SSam Ravnborg _end = .; 434091e52c3SSam Ravnborg 435e1bfa873SThomas Lendacky#ifdef CONFIG_AMD_MEM_ENCRYPT 436e1bfa873SThomas Lendacky /* 437e1bfa873SThomas Lendacky * Early scratch/workarea section: Lives outside of the kernel proper 438e1bfa873SThomas Lendacky * (_text - _end). 439e1bfa873SThomas Lendacky * 440e1bfa873SThomas Lendacky * Resides after _end because even though the .brk section is after 441e1bfa873SThomas Lendacky * __end_of_kernel_reserve, the .brk section is later reserved as a 442e1bfa873SThomas Lendacky * part of the kernel. Since it is located after __end_of_kernel_reserve 443e1bfa873SThomas Lendacky * it will be discarded and become part of the available memory. As 444e1bfa873SThomas Lendacky * such, it can only be used by very early boot code and must not be 445e1bfa873SThomas Lendacky * needed afterwards. 446e1bfa873SThomas Lendacky * 447e1bfa873SThomas Lendacky * Currently used by SME for performing in-place encryption of the 448e1bfa873SThomas Lendacky * kernel during boot. Resides on a 2MB boundary to simplify the 449e1bfa873SThomas Lendacky * pagetable setup used for SME in-place encryption. 450e1bfa873SThomas Lendacky */ 451e1bfa873SThomas Lendacky . = ALIGN(HPAGE_SIZE); 452e1bfa873SThomas Lendacky .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) { 453e1bfa873SThomas Lendacky __init_scratch_begin = .; 454e1bfa873SThomas Lendacky *(.init.scratch) 455e1bfa873SThomas Lendacky . = ALIGN(HPAGE_SIZE); 456e1bfa873SThomas Lendacky __init_scratch_end = .; 457e1bfa873SThomas Lendacky } 458e1bfa873SThomas Lendacky#endif 459e1bfa873SThomas Lendacky 460444e0ae4SSam Ravnborg STABS_DEBUG 461444e0ae4SSam Ravnborg DWARF_DEBUG 462c604abc3SKees Cook ELF_DETAILS 463023bf6f1STejun Heo 464023bf6f1STejun Heo DISCARDS 465444e0ae4SSam Ravnborg 466815d6807SKees Cook /* 467815d6807SKees Cook * Make sure that the .got.plt is either completely empty or it 468815d6807SKees Cook * contains only the lazy dispatch entries. 469815d6807SKees Cook */ 470815d6807SKees Cook .got.plt (INFO) : { *(.got.plt) } 471815d6807SKees Cook ASSERT(SIZEOF(.got.plt) == 0 || 472815d6807SKees Cook#ifdef CONFIG_X86_64 473815d6807SKees Cook SIZEOF(.got.plt) == 0x18, 474815d6807SKees Cook#else 475815d6807SKees Cook SIZEOF(.got.plt) == 0xc, 476815d6807SKees Cook#endif 477815d6807SKees Cook "Unexpected GOT/PLT entries detected!") 4785354e845SKees Cook 4795354e845SKees Cook /* 4805354e845SKees Cook * Sections that should stay zero sized, which is safer to 4815354e845SKees Cook * explicitly check instead of blindly discarding. 4825354e845SKees Cook */ 4835354e845SKees Cook .got : { 4845354e845SKees Cook *(.got) *(.igot.*) 4855354e845SKees Cook } 4865354e845SKees Cook ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!") 4875354e845SKees Cook 4885354e845SKees Cook .plt : { 4895354e845SKees Cook *(.plt) *(.plt.*) *(.iplt) 4905354e845SKees Cook } 4915354e845SKees Cook ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") 4925354e845SKees Cook 4935354e845SKees Cook .rel.dyn : { 4945354e845SKees Cook *(.rel.*) *(.rel_*) 4955354e845SKees Cook } 4965354e845SKees Cook ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!") 4975354e845SKees Cook 4985354e845SKees Cook .rela.dyn : { 4995354e845SKees Cook *(.rela.*) *(.rela_*) 5005354e845SKees Cook } 5015354e845SKees Cook ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") 502815d6807SKees Cook} 50317ce265dSSam Ravnborg 504a5912f6bSIngo Molnar/* 505a5912f6bSIngo Molnar * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: 506a5912f6bSIngo Molnar */ 507d2ba8b21SH. Peter Anvin. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 508d2ba8b21SH. Peter Anvin "kernel image bigger than KERNEL_IMAGE_SIZE"); 509ea3186b9SArvind Sankar 510ea3186b9SArvind Sankar#ifdef CONFIG_X86_64 51117ce265dSSam Ravnborg/* 51217ce265dSSam Ravnborg * Per-cpu symbols which need to be offset from __per_cpu_load 51317ce265dSSam Ravnborg * for the boot processor. 51417ce265dSSam Ravnborg */ 515d071ae09SRafael Ávila de Espíndola#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load 51617ce265dSSam RavnborgINIT_PER_CPU(gdt_page); 517e6401c13SAndy LutomirskiINIT_PER_CPU(fixed_percpu_data); 518e6401c13SAndy LutomirskiINIT_PER_CPU(irq_stack_backing_store); 51917ce265dSSam Ravnborg 52017ce265dSSam Ravnborg#ifdef CONFIG_SMP 521e6401c13SAndy Lutomirski. = ASSERT((fixed_percpu_data == 0), 522e6401c13SAndy Lutomirski "fixed_percpu_data is not at start of per-cpu area"); 52317ce265dSSam Ravnborg#endif 52417ce265dSSam Ravnborg 525f220125bSBorislav Petkov (AMD)#ifdef CONFIG_RETHUNK 526fb3bd914SBorislav Petkov (AMD). = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); 527fb3bd914SBorislav Petkov (AMD). = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); 528fb3bd914SBorislav Petkov (AMD)#endif 529fb3bd914SBorislav Petkov (AMD) 530fb3bd914SBorislav Petkov (AMD)#ifdef CONFIG_CPU_SRSO 531fb3bd914SBorislav Petkov (AMD)/* 532*cbe8ded4SNick Desaulniers * GNU ld cannot do XOR until 2.41. 533*cbe8ded4SNick Desaulniers * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 534*cbe8ded4SNick Desaulniers * 535*cbe8ded4SNick Desaulniers * LLVM lld cannot do XOR until lld-17. 536*cbe8ded4SNick Desaulniers * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb 537*cbe8ded4SNick Desaulniers * 538*cbe8ded4SNick Desaulniers * Instead do: (A | B) - (A & B) in order to compute the XOR 539fb3bd914SBorislav Petkov (AMD) * of the two function addresses: 540fb3bd914SBorislav Petkov (AMD) */ 541*cbe8ded4SNick Desaulniers. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - 542*cbe8ded4SNick Desaulniers (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), 543fb3bd914SBorislav Petkov (AMD) "SRSO function pair won't alias"); 544f220125bSBorislav Petkov (AMD)#endif 545f220125bSBorislav Petkov (AMD) 546ea3186b9SArvind Sankar#endif /* CONFIG_X86_64 */ 547