1/* ld script to make ARM Linux kernel 2 * taken from the i386 version by Russell King 3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 4 */ 5 6#ifdef CONFIG_XIP_KERNEL 7#include "vmlinux-xip.lds.S" 8#else 9 10#include <asm-generic/vmlinux.lds.h> 11#include <asm/cache.h> 12#include <asm/thread_info.h> 13#include <asm/memory.h> 14#include <asm/page.h> 15#include <asm/pgtable.h> 16 17#define PROC_INFO \ 18 . = ALIGN(4); \ 19 VMLINUX_SYMBOL(__proc_info_begin) = .; \ 20 *(.proc.info.init) \ 21 VMLINUX_SYMBOL(__proc_info_end) = .; 22 23#define HYPERVISOR_TEXT \ 24 VMLINUX_SYMBOL(__hyp_text_start) = .; \ 25 *(.hyp.text) \ 26 VMLINUX_SYMBOL(__hyp_text_end) = .; 27 28#define IDMAP_TEXT \ 29 ALIGN_FUNCTION(); \ 30 VMLINUX_SYMBOL(__idmap_text_start) = .; \ 31 *(.idmap.text) \ 32 VMLINUX_SYMBOL(__idmap_text_end) = .; \ 33 . = ALIGN(PAGE_SIZE); \ 34 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ 35 *(.hyp.idmap.text) \ 36 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; 37 38#ifdef CONFIG_HOTPLUG_CPU 39#define ARM_CPU_DISCARD(x) 40#define ARM_CPU_KEEP(x) x 41#else 42#define ARM_CPU_DISCARD(x) x 43#define ARM_CPU_KEEP(x) 44#endif 45 46#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \ 47 defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL) 48#define ARM_EXIT_KEEP(x) x 49#define ARM_EXIT_DISCARD(x) 50#else 51#define ARM_EXIT_KEEP(x) 52#define ARM_EXIT_DISCARD(x) x 53#endif 54 55OUTPUT_ARCH(arm) 56ENTRY(stext) 57 58#ifndef __ARMEB__ 59jiffies = jiffies_64; 60#else 61jiffies = jiffies_64 + 4; 62#endif 63 64SECTIONS 65{ 66 /* 67 * XXX: The linker does not define how output sections are 68 * assigned to input sections when there are multiple statements 69 * matching the same input section name. There is no documented 70 * order of matching. 71 * 72 * unwind exit sections must be discarded before the rest of the 73 * unwind sections get included. 74 */ 75 /DISCARD/ : { 76 *(.ARM.exidx.exit.text) 77 *(.ARM.extab.exit.text) 78 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) 79 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) 80 ARM_EXIT_DISCARD(EXIT_TEXT) 81 ARM_EXIT_DISCARD(EXIT_DATA) 82 EXIT_CALL 83#ifndef CONFIG_MMU 84 *(.text.fixup) 85 *(__ex_table) 86#endif 87#ifndef CONFIG_SMP_ON_UP 88 *(.alt.smp.init) 89#endif 90 *(.discard) 91 *(.discard.*) 92 } 93 94 . = PAGE_OFFSET + TEXT_OFFSET; 95 .head.text : { 96 _text = .; 97 HEAD_TEXT 98 } 99 100#ifdef CONFIG_DEBUG_RODATA 101 . = ALIGN(1<<SECTION_SHIFT); 102#endif 103 104 .text : { /* Real text segment */ 105 _stext = .; /* Text and read-only data */ 106 IDMAP_TEXT 107 __exception_text_start = .; 108 *(.exception.text) 109 __exception_text_end = .; 110 IRQENTRY_TEXT 111 SOFTIRQENTRY_TEXT 112 TEXT_TEXT 113 SCHED_TEXT 114 CPUIDLE_TEXT 115 LOCK_TEXT 116 HYPERVISOR_TEXT 117 KPROBES_TEXT 118 *(.gnu.warning) 119 *(.glue_7) 120 *(.glue_7t) 121 . = ALIGN(4); 122 *(.got) /* Global offset table */ 123 ARM_CPU_KEEP(PROC_INFO) 124 } 125 126#ifdef CONFIG_DEBUG_ALIGN_RODATA 127 . = ALIGN(1<<SECTION_SHIFT); 128#endif 129 _etext = .; /* End of text section */ 130 131 RO_DATA(PAGE_SIZE) 132 133 . = ALIGN(4); 134 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { 135 __start___ex_table = .; 136#ifdef CONFIG_MMU 137 *(__ex_table) 138#endif 139 __stop___ex_table = .; 140 } 141 142#ifdef CONFIG_ARM_UNWIND 143 /* 144 * Stack unwinding tables 145 */ 146 . = ALIGN(8); 147 .ARM.unwind_idx : { 148 __start_unwind_idx = .; 149 *(.ARM.exidx*) 150 __stop_unwind_idx = .; 151 } 152 .ARM.unwind_tab : { 153 __start_unwind_tab = .; 154 *(.ARM.extab*) 155 __stop_unwind_tab = .; 156 } 157#endif 158 159 NOTES 160 161#ifdef CONFIG_DEBUG_RODATA 162 . = ALIGN(1<<SECTION_SHIFT); 163#else 164 . = ALIGN(PAGE_SIZE); 165#endif 166 __init_begin = .; 167 168 /* 169 * The vectors and stubs are relocatable code, and the 170 * only thing that matters is their relative offsets 171 */ 172 __vectors_start = .; 173 .vectors 0xffff0000 : AT(__vectors_start) { 174 *(.vectors) 175 } 176 . = __vectors_start + SIZEOF(.vectors); 177 __vectors_end = .; 178 179 __stubs_start = .; 180 .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { 181 *(.stubs) 182 } 183 . = __stubs_start + SIZEOF(.stubs); 184 __stubs_end = .; 185 186 PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors)); 187 188 INIT_TEXT_SECTION(8) 189 .exit.text : { 190 ARM_EXIT_KEEP(EXIT_TEXT) 191 } 192 .init.proc.info : { 193 ARM_CPU_DISCARD(PROC_INFO) 194 } 195 .init.arch.info : { 196 __arch_info_begin = .; 197 *(.arch.info.init) 198 __arch_info_end = .; 199 } 200 .init.tagtable : { 201 __tagtable_begin = .; 202 *(.taglist.init) 203 __tagtable_end = .; 204 } 205#ifdef CONFIG_SMP_ON_UP 206 .init.smpalt : { 207 __smpalt_begin = .; 208 *(.alt.smp.init) 209 __smpalt_end = .; 210 } 211#endif 212 .init.pv_table : { 213 __pv_table_begin = .; 214 *(.pv_table) 215 __pv_table_end = .; 216 } 217 .init.data : { 218 INIT_DATA 219 INIT_SETUP(16) 220 INIT_CALLS 221 CON_INITCALL 222 SECURITY_INITCALL 223 INIT_RAM_FS 224 } 225 .exit.data : { 226 ARM_EXIT_KEEP(EXIT_DATA) 227 } 228 229#ifdef CONFIG_SMP 230 PERCPU_SECTION(L1_CACHE_BYTES) 231#endif 232 233#ifdef CONFIG_DEBUG_RODATA 234 . = ALIGN(1<<SECTION_SHIFT); 235#else 236 . = ALIGN(THREAD_SIZE); 237#endif 238 __init_end = .; 239 __data_loc = .; 240 241 .data : AT(__data_loc) { 242 _data = .; /* address in memory */ 243 _sdata = .; 244 245 /* 246 * first, the init task union, aligned 247 * to an 8192 byte boundary. 248 */ 249 INIT_TASK_DATA(THREAD_SIZE) 250 251 NOSAVE_DATA 252 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 253 READ_MOSTLY_DATA(L1_CACHE_BYTES) 254 255 /* 256 * and the usual data section 257 */ 258 DATA_DATA 259 CONSTRUCTORS 260 261 _edata = .; 262 } 263 _edata_loc = __data_loc + SIZEOF(.data); 264 265#ifdef CONFIG_HAVE_TCM 266 /* 267 * We align everything to a page boundary so we can 268 * free it after init has commenced and TCM contents have 269 * been copied to its destination. 270 */ 271 .tcm_start : { 272 . = ALIGN(PAGE_SIZE); 273 __tcm_start = .; 274 __itcm_start = .; 275 } 276 277 /* 278 * Link these to the ITCM RAM 279 * Put VMA to the TCM address and LMA to the common RAM 280 * and we'll upload the contents from RAM to TCM and free 281 * the used RAM after that. 282 */ 283 .text_itcm ITCM_OFFSET : AT(__itcm_start) 284 { 285 __sitcm_text = .; 286 *(.tcm.text) 287 *(.tcm.rodata) 288 . = ALIGN(4); 289 __eitcm_text = .; 290 } 291 292 /* 293 * Reset the dot pointer, this is needed to create the 294 * relative __dtcm_start below (to be used as extern in code). 295 */ 296 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm); 297 298 .dtcm_start : { 299 __dtcm_start = .; 300 } 301 302 /* TODO: add remainder of ITCM as well, that can be used for data! */ 303 .data_dtcm DTCM_OFFSET : AT(__dtcm_start) 304 { 305 . = ALIGN(4); 306 __sdtcm_data = .; 307 *(.tcm.data) 308 . = ALIGN(4); 309 __edtcm_data = .; 310 } 311 312 /* Reset the dot pointer or the linker gets confused */ 313 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm); 314 315 /* End marker for freeing TCM copy in linked object */ 316 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){ 317 . = ALIGN(PAGE_SIZE); 318 __tcm_end = .; 319 } 320#endif 321 322 BSS_SECTION(0, 0, 0) 323 _end = .; 324 325 STABS_DEBUG 326} 327 328#ifdef CONFIG_DEBUG_RODATA 329/* 330 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will 331 * be the first section-aligned location after __start_rodata. Otherwise, 332 * it will be equal to __start_rodata. 333 */ 334__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT); 335#endif 336 337/* 338 * These must never be empty 339 * If you have to comment these two assert statements out, your 340 * binutils is too old (for other reasons as well) 341 */ 342ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") 343ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") 344 345/* 346 * The HYP init code can't be more than a page long, 347 * and should not cross a page boundary. 348 * The above comment applies as well. 349 */ 350ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, 351 "HYP init code too big or misaligned") 352 353#endif /* CONFIG_XIP_KERNEL */ 354