1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * ld script for the x86 kernel 4 * 5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 6 * 7 * Modernisation, unification and other changes and fixes: 8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> 9 * 10 * 11 * Don't define absolute symbols until and unless you know that symbol 12 * value is should remain constant even if kernel image is relocated 13 * at run time. Absolute symbols are not relocated. If symbol value should 14 * change if kernel is relocated, make the symbol section relative and 15 * put it inside the section definition. 16 */ 17 18#ifdef CONFIG_X86_32 19#define LOAD_OFFSET __PAGE_OFFSET 20#else 21#define LOAD_OFFSET __START_KERNEL_map 22#endif 23 24#include <asm-generic/vmlinux.lds.h> 25#include <asm/asm-offsets.h> 26#include <asm/thread_info.h> 27#include <asm/page_types.h> 28#include <asm/orc_lookup.h> 29#include <asm/cache.h> 30#include <asm/boot.h> 31 32#undef i386 /* in case the preprocessor is a 32bit one */ 33 34OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) 35 36#ifdef CONFIG_X86_32 37OUTPUT_ARCH(i386) 38ENTRY(phys_startup_32) 39jiffies = jiffies_64; 40#else 41OUTPUT_ARCH(i386:x86-64) 42ENTRY(phys_startup_64) 43jiffies_64 = jiffies; 44#endif 45 46#if defined(CONFIG_X86_64) 47/* 48 * On 64-bit, align RODATA to 2MB so we retain large page mappings for 49 * boundaries spanning kernel text, rodata and data sections. 50 * 51 * However, kernel identity mappings will have different RWX permissions 52 * to the pages mapping to text and to the pages padding (which are freed) the 53 * text section. Hence kernel identity mappings will be broken to smaller 54 * pages. For 64-bit, kernel text and kernel identity mappings are different, 55 * so we can enable protection checks as well as retain 2MB large page 56 * mappings for kernel text. 57 */ 58#define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); 59 60#define X64_ALIGN_RODATA_END \ 61 . = ALIGN(HPAGE_SIZE); \ 62 __end_rodata_hpage_align = .; 63 64#else 65 66#define X64_ALIGN_RODATA_BEGIN 67#define X64_ALIGN_RODATA_END 68 69#endif 70 71PHDRS { 72 text PT_LOAD FLAGS(5); /* R_E */ 73 data PT_LOAD FLAGS(6); /* RW_ */ 74#ifdef CONFIG_X86_64 75#ifdef CONFIG_SMP 76 percpu PT_LOAD FLAGS(6); /* RW_ */ 77#endif 78 init PT_LOAD FLAGS(7); /* RWE */ 79#endif 80 note PT_NOTE FLAGS(0); /* ___ */ 81} 82 83SECTIONS 84{ 85#ifdef CONFIG_X86_32 86 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; 87 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); 88#else 89 . = __START_KERNEL; 90 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); 91#endif 92 93 /* Text and read-only data */ 94 .text : AT(ADDR(.text) - LOAD_OFFSET) { 95 _text = .; 96 _stext = .; 97 /* bootstrapping code */ 98 HEAD_TEXT 99 . = ALIGN(8); 100 TEXT_TEXT 101 SCHED_TEXT 102 CPUIDLE_TEXT 103 LOCK_TEXT 104 KPROBES_TEXT 105 ENTRY_TEXT 106 IRQENTRY_TEXT 107 SOFTIRQENTRY_TEXT 108 *(.fixup) 109 *(.gnu.warning) 110 /* End of text section */ 111 _etext = .; 112 } :text = 0x9090 113 114 NOTES :text :note 115 116 EXCEPTION_TABLE(16) :text = 0x9090 117 118 /* .text should occupy whole number of pages */ 119 . = ALIGN(PAGE_SIZE); 120 X64_ALIGN_RODATA_BEGIN 121 RO_DATA(PAGE_SIZE) 122 X64_ALIGN_RODATA_END 123 124 /* Data */ 125 .data : AT(ADDR(.data) - LOAD_OFFSET) { 126 /* Start of data section */ 127 _sdata = .; 128 129 /* init_task */ 130 INIT_TASK_DATA(THREAD_SIZE) 131 132#ifdef CONFIG_X86_32 133 /* 32 bit has nosave before _edata */ 134 NOSAVE_DATA 135#endif 136 137 PAGE_ALIGNED_DATA(PAGE_SIZE) 138 139 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 140 141 DATA_DATA 142 CONSTRUCTORS 143 144 /* rarely changed data like cpu maps */ 145 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) 146 147 /* End of data section */ 148 _edata = .; 149 } :data 150 151 BUG_TABLE 152 153 ORC_UNWIND_TABLE 154 155 . = ALIGN(PAGE_SIZE); 156 __vvar_page = .; 157 158 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { 159 /* work around gold bug 13023 */ 160 __vvar_beginning_hack = .; 161 162 /* Place all vvars at the offsets in asm/vvar.h. */ 163#define EMIT_VVAR(name, offset) \ 164 . = __vvar_beginning_hack + offset; \ 165 *(.vvar_ ## name) 166#define __VVAR_KERNEL_LDS 167#include <asm/vvar.h> 168#undef __VVAR_KERNEL_LDS 169#undef EMIT_VVAR 170 171 /* 172 * Pad the rest of the page with zeros. Otherwise the loader 173 * can leave garbage here. 174 */ 175 . = __vvar_beginning_hack + PAGE_SIZE; 176 } :data 177 178 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); 179 180 /* Init code and data - will be freed after init */ 181 . = ALIGN(PAGE_SIZE); 182 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { 183 __init_begin = .; /* paired with __init_end */ 184 } 185 186#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) 187 /* 188 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the 189 * output PHDR, so the next output section - .init.text - should 190 * start another segment - init. 191 */ 192 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) 193 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, 194 "per-CPU data too large - increase CONFIG_PHYSICAL_START") 195#endif 196 197 INIT_TEXT_SECTION(PAGE_SIZE) 198#ifdef CONFIG_X86_64 199 :init 200#endif 201 202 /* 203 * Section for code used exclusively before alternatives are run. All 204 * references to such code must be patched out by alternatives, normally 205 * by using X86_FEATURE_ALWAYS CPU feature bit. 206 * 207 * See static_cpu_has() for an example. 208 */ 209 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { 210 *(.altinstr_aux) 211 } 212 213 INIT_DATA_SECTION(16) 214 215 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { 216 __x86_cpu_dev_start = .; 217 *(.x86_cpu_dev.init) 218 __x86_cpu_dev_end = .; 219 } 220 221#ifdef CONFIG_X86_INTEL_MID 222 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ 223 LOAD_OFFSET) { 224 __x86_intel_mid_dev_start = .; 225 *(.x86_intel_mid_dev.init) 226 __x86_intel_mid_dev_end = .; 227 } 228#endif 229 230 /* 231 * start address and size of operations which during runtime 232 * can be patched with virtualization friendly instructions or 233 * baremetal native ones. Think page table operations. 234 * Details in paravirt_types.h 235 */ 236 . = ALIGN(8); 237 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { 238 __parainstructions = .; 239 *(.parainstructions) 240 __parainstructions_end = .; 241 } 242 243 /* 244 * struct alt_inst entries. From the header (alternative.h): 245 * "Alternative instructions for different CPU types or capabilities" 246 * Think locking instructions on spinlocks. 247 */ 248 . = ALIGN(8); 249 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 250 __alt_instructions = .; 251 *(.altinstructions) 252 __alt_instructions_end = .; 253 } 254 255 /* 256 * And here are the replacement instructions. The linker sticks 257 * them as binary blobs. The .altinstructions has enough data to 258 * get the address and the length of them to patch the kernel safely. 259 */ 260 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { 261 *(.altinstr_replacement) 262 } 263 264 /* 265 * struct iommu_table_entry entries are injected in this section. 266 * It is an array of IOMMUs which during run time gets sorted depending 267 * on its dependency order. After rootfs_initcall is complete 268 * this section can be safely removed. 269 */ 270 .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { 271 __iommu_table = .; 272 *(.iommu_table) 273 __iommu_table_end = .; 274 } 275 276 . = ALIGN(8); 277 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { 278 __apicdrivers = .; 279 *(.apicdrivers); 280 __apicdrivers_end = .; 281 } 282 283 . = ALIGN(8); 284 /* 285 * .exit.text is discard at runtime, not link time, to deal with 286 * references from .altinstructions and .eh_frame 287 */ 288 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { 289 EXIT_TEXT 290 } 291 292 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { 293 EXIT_DATA 294 } 295 296#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 297 PERCPU_SECTION(INTERNODE_CACHE_BYTES) 298#endif 299 300 . = ALIGN(PAGE_SIZE); 301 302 /* freed after init ends here */ 303 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { 304 __init_end = .; 305 } 306 307 /* 308 * smp_locks might be freed after init 309 * start/end must be page aligned 310 */ 311 . = ALIGN(PAGE_SIZE); 312 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 313 __smp_locks = .; 314 *(.smp_locks) 315 . = ALIGN(PAGE_SIZE); 316 __smp_locks_end = .; 317 } 318 319#ifdef CONFIG_X86_64 320 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 321 NOSAVE_DATA 322 } 323#endif 324 325 /* BSS */ 326 . = ALIGN(PAGE_SIZE); 327 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 328 __bss_start = .; 329 *(.bss..page_aligned) 330 *(.bss) 331 . = ALIGN(PAGE_SIZE); 332 __bss_stop = .; 333 } 334 335 . = ALIGN(PAGE_SIZE); 336 .brk : AT(ADDR(.brk) - LOAD_OFFSET) { 337 __brk_base = .; 338 . += 64 * 1024; /* 64k alignment slop space */ 339 *(.brk_reservation) /* areas brk users have reserved */ 340 __brk_limit = .; 341 } 342 343 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ 344 _end = .; 345 346 STABS_DEBUG 347 DWARF_DEBUG 348 349 /* Sections to be discarded */ 350 DISCARDS 351 /DISCARD/ : { 352 *(.eh_frame) 353 } 354} 355 356 357#ifdef CONFIG_X86_32 358/* 359 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: 360 */ 361. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 362 "kernel image bigger than KERNEL_IMAGE_SIZE"); 363#else 364/* 365 * Per-cpu symbols which need to be offset from __per_cpu_load 366 * for the boot processor. 367 */ 368#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load 369INIT_PER_CPU(gdt_page); 370INIT_PER_CPU(irq_stack_union); 371 372/* 373 * Build-time check on the image size: 374 */ 375. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), 376 "kernel image bigger than KERNEL_IMAGE_SIZE"); 377 378#ifdef CONFIG_SMP 379. = ASSERT((irq_stack_union == 0), 380 "irq_stack_union is not at start of per-cpu area"); 381#endif 382 383#endif /* CONFIG_X86_32 */ 384 385#ifdef CONFIG_KEXEC_CORE 386#include <asm/kexec.h> 387 388. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, 389 "kexec control code size is too big"); 390#endif 391 392