1/* 2 * ld script for the x86 kernel 3 * 4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 5 * 6 * Modernisation, unification and other changes and fixes: 7 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> 8 * 9 * 10 * Don't define absolute symbols until and unless you know that symbol 11 * value is should remain constant even if kernel image is relocated 12 * at run time. Absolute symbols are not relocated. If symbol value should 13 * change if kernel is relocated, make the symbol section relative and 14 * put it inside the section definition. 15 */ 16 17#ifdef CONFIG_X86_32 18#define LOAD_OFFSET __PAGE_OFFSET 19#else 20#define LOAD_OFFSET __START_KERNEL_map 21#endif 22 23#include <asm-generic/vmlinux.lds.h> 24#include <asm/asm-offsets.h> 25#include <asm/thread_info.h> 26#include <asm/page_types.h> 27#include <asm/cache.h> 28#include <asm/boot.h> 29 30#undef i386 /* in case the preprocessor is a 32bit one */ 31 32OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) 33 34#ifdef CONFIG_X86_32 35OUTPUT_ARCH(i386) 36ENTRY(phys_startup_32) 37jiffies = jiffies_64; 38#else 39OUTPUT_ARCH(i386:x86-64) 40ENTRY(phys_startup_64) 41jiffies_64 = jiffies; 42#endif 43 44PHDRS { 45 text PT_LOAD FLAGS(5); /* R_E */ 46 data PT_LOAD FLAGS(7); /* RWE */ 47#ifdef CONFIG_X86_64 48 user PT_LOAD FLAGS(7); /* RWE */ 49#ifdef CONFIG_SMP 50 percpu PT_LOAD FLAGS(7); /* RWE */ 51#endif 52 init PT_LOAD FLAGS(7); /* RWE */ 53#endif 54 note PT_NOTE FLAGS(0); /* ___ */ 55} 56 57SECTIONS 58{ 59#ifdef CONFIG_X86_32 60 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; 61 phys_startup_32 = startup_32 - LOAD_OFFSET; 62#else 63 . = __START_KERNEL; 64 phys_startup_64 = startup_64 - LOAD_OFFSET; 65#endif 66 67 /* Text and read-only data */ 68 69 /* bootstrapping code */ 70 .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) { 71 _text = .; 72 *(.text.head) 73 } :text = 0x9090 74 75 /* The rest of the text */ 76 .text : AT(ADDR(.text) - LOAD_OFFSET) { 77#ifdef CONFIG_X86_32 78 /* not really needed, already page aligned */ 79 . = ALIGN(PAGE_SIZE); 80 *(.text.page_aligned) 81#endif 82 . = ALIGN(8); 83 _stext = .; 84 TEXT_TEXT 85 SCHED_TEXT 86 LOCK_TEXT 87 KPROBES_TEXT 88 IRQENTRY_TEXT 89 *(.fixup) 90 *(.gnu.warning) 91 /* End of text section */ 92 _etext = .; 93 } :text = 0x9090 94 95 NOTES :text :note 96 97 /* Exception table */ 98 . = ALIGN(16); 99 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { 100 __start___ex_table = .; 101 *(__ex_table) 102 __stop___ex_table = .; 103 } :text = 0x9090 104 105 RO_DATA(PAGE_SIZE) 106 107 /* Data */ 108 .data : AT(ADDR(.data) - LOAD_OFFSET) { 109 /* Start of data section */ 110 _sdata = .; 111 112 /* init_task */ 113 INIT_TASK_DATA(THREAD_SIZE) 114 115#ifdef CONFIG_X86_32 116 /* 32 bit has nosave before _edata */ 117 NOSAVE_DATA 118#endif 119 120 PAGE_ALIGNED_DATA(PAGE_SIZE) 121 *(.data.idt) 122 123 CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES) 124 125 DATA_DATA 126 CONSTRUCTORS 127 128 /* rarely changed data like cpu maps */ 129 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES) 130 131 /* End of data section */ 132 _edata = .; 133 } :data 134 135#ifdef CONFIG_X86_64 136 137#define VSYSCALL_ADDR (-10*1024*1024) 138#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \ 139 PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) 140#define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \ 141 PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) 142 143#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) 144#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) 145 146#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR) 147#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) 148 149 . = VSYSCALL_ADDR; 150 .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { 151 *(.vsyscall_0) 152 } :user 153 154 __vsyscall_0 = VSYSCALL_VIRT_ADDR; 155 156 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); 157 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { 158 *(.vsyscall_fn) 159 } 160 161 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); 162 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) { 163 *(.vsyscall_gtod_data) 164 } 165 166 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data); 167 .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) { 168 *(.vsyscall_clock) 169 } 170 vsyscall_clock = VVIRT(.vsyscall_clock); 171 172 173 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { 174 *(.vsyscall_1) 175 } 176 .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) { 177 *(.vsyscall_2) 178 } 179 180 .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { 181 *(.vgetcpu_mode) 182 } 183 vgetcpu_mode = VVIRT(.vgetcpu_mode); 184 185 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); 186 .jiffies : AT(VLOAD(.jiffies)) { 187 *(.jiffies) 188 } 189 jiffies = VVIRT(.jiffies); 190 191 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { 192 *(.vsyscall_3) 193 } 194 195 . = VSYSCALL_VIRT_ADDR + PAGE_SIZE; 196 197#undef VSYSCALL_ADDR 198#undef VSYSCALL_PHYS_ADDR 199#undef VSYSCALL_VIRT_ADDR 200#undef VLOAD_OFFSET 201#undef VLOAD 202#undef VVIRT_OFFSET 203#undef VVIRT 204 205#endif /* CONFIG_X86_64 */ 206 207 /* Init code and data - will be freed after init */ 208 . = ALIGN(PAGE_SIZE); 209 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { 210 __init_begin = .; /* paired with __init_end */ 211 } 212 213#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) 214 /* 215 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the 216 * output PHDR, so the next output section - .init.text - should 217 * start another segment - init. 218 */ 219 PERCPU_VADDR(0, :percpu) 220#endif 221 222 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { 223 _sinittext = .; 224 INIT_TEXT 225 _einittext = .; 226 } 227#ifdef CONFIG_X86_64 228 :init 229#endif 230 231 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { 232 INIT_DATA 233 } 234 235 . = ALIGN(16); 236 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { 237 __setup_start = .; 238 *(.init.setup) 239 __setup_end = .; 240 } 241 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { 242 __initcall_start = .; 243 INITCALLS 244 __initcall_end = .; 245 } 246 247 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { 248 __con_initcall_start = .; 249 *(.con_initcall.init) 250 __con_initcall_end = .; 251 } 252 253 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { 254 __x86_cpu_dev_start = .; 255 *(.x86_cpu_dev.init) 256 __x86_cpu_dev_end = .; 257 } 258 259 SECURITY_INIT 260 261 . = ALIGN(8); 262 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { 263 __parainstructions = .; 264 *(.parainstructions) 265 __parainstructions_end = .; 266 } 267 268 . = ALIGN(8); 269 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 270 __alt_instructions = .; 271 *(.altinstructions) 272 __alt_instructions_end = .; 273 } 274 275 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { 276 *(.altinstr_replacement) 277 } 278 279 /* 280 * .exit.text is discard at runtime, not link time, to deal with 281 * references from .altinstructions and .eh_frame 282 */ 283 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { 284 EXIT_TEXT 285 } 286 287 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { 288 EXIT_DATA 289 } 290 291#ifdef CONFIG_BLK_DEV_INITRD 292 . = ALIGN(PAGE_SIZE); 293 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { 294 __initramfs_start = .; 295 *(.init.ramfs) 296 __initramfs_end = .; 297 } 298#endif 299 300#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 301 PERCPU(PAGE_SIZE) 302#endif 303 304 . = ALIGN(PAGE_SIZE); 305 306 /* freed after init ends here */ 307 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { 308 __init_end = .; 309 } 310 311 /* 312 * smp_locks might be freed after init 313 * start/end must be page aligned 314 */ 315 . = ALIGN(PAGE_SIZE); 316 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 317 __smp_locks = .; 318 *(.smp_locks) 319 __smp_locks_end = .; 320 . = ALIGN(PAGE_SIZE); 321 } 322 323#ifdef CONFIG_X86_64 324 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 325 NOSAVE_DATA 326 } 327#endif 328 329 /* BSS */ 330 . = ALIGN(PAGE_SIZE); 331 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 332 __bss_start = .; 333 *(.bss.page_aligned) 334 *(.bss) 335 . = ALIGN(4); 336 __bss_stop = .; 337 } 338 339 . = ALIGN(PAGE_SIZE); 340 .brk : AT(ADDR(.brk) - LOAD_OFFSET) { 341 __brk_base = .; 342 . += 64 * 1024; /* 64k alignment slop space */ 343 *(.brk_reservation) /* areas brk users have reserved */ 344 __brk_limit = .; 345 } 346 347 .end : AT(ADDR(.end) - LOAD_OFFSET) { 348 _end = .; 349 } 350 351 STABS_DEBUG 352 DWARF_DEBUG 353 354 /* Sections to be discarded */ 355 DISCARDS 356 /DISCARD/ : { *(.eh_frame) } 357} 358 359 360#ifdef CONFIG_X86_32 361. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 362 "kernel image bigger than KERNEL_IMAGE_SIZE"); 363#else 364/* 365 * Per-cpu symbols which need to be offset from __per_cpu_load 366 * for the boot processor. 367 */ 368#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load 369INIT_PER_CPU(gdt_page); 370INIT_PER_CPU(irq_stack_union); 371 372/* 373 * Build-time check on the image size: 374 */ 375. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), 376 "kernel image bigger than KERNEL_IMAGE_SIZE"); 377 378#ifdef CONFIG_SMP 379. = ASSERT((per_cpu__irq_stack_union == 0), 380 "irq_stack_union is not at start of per-cpu area"); 381#endif 382 383#endif /* CONFIG_X86_32 */ 384 385#ifdef CONFIG_KEXEC 386#include <asm/kexec.h> 387 388. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, 389 "kexec control code size is too big"); 390#endif 391 392