xref: /openbmc/linux/arch/parisc/kernel/vmlinux.lds.S (revision c21b37f6)
1/*    Kernel link layout for various "sections"
2 *
3 *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
4 *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
5 *    Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
6 *    Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
7 *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
8 *    Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
9 *    Copyright (C) 2006 Helge Deller <deller@gmx.de>
10 *
11 *
12 *    This program is free software; you can redistribute it and/or modify
13 *    it under the terms of the GNU General Public License as published by
14 *    the Free Software Foundation; either version 2 of the License, or
15 *    (at your option) any later version.
16 *
17 *    This program is distributed in the hope that it will be useful,
18 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
19 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 *    GNU General Public License for more details.
21 *
22 *    You should have received a copy of the GNU General Public License
23 *    along with this program; if not, write to the Free Software
24 *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25 */
26#include <asm-generic/vmlinux.lds.h>
27/* needed for the processor specific cache alignment size */
28#include <asm/cache.h>
29#include <asm/page.h>
30#include <asm/asm-offsets.h>
31
32/* ld script to make hppa Linux kernel */
33#ifndef CONFIG_64BIT
34OUTPUT_FORMAT("elf32-hppa-linux")
35OUTPUT_ARCH(hppa)
36#else
37OUTPUT_FORMAT("elf64-hppa-linux")
38OUTPUT_ARCH(hppa:hppa2.0w)
39#endif
40
41ENTRY(_stext)
42#ifndef CONFIG_64BIT
43jiffies = jiffies_64 + 4;
44#else
45jiffies = jiffies_64;
46#endif
47SECTIONS
48{
49
50  . = KERNEL_BINARY_TEXT_START;
51
52  _text = .;			/* Text and read-only data */
53  .text ALIGN(16) : {
54	TEXT_TEXT
55	SCHED_TEXT
56	LOCK_TEXT
57	*(.text.do_softirq)
58	*(.text.sys_exit)
59	*(.text.do_sigaltstack)
60	*(.text.do_fork)
61	*(.text.*)
62	*(.fixup)
63	*(.lock.text)		/* out-of-line lock text */
64	*(.gnu.warning)
65	} = 0
66
67  _etext = .;			/* End of text section */
68
69  RODATA
70
71  BUG_TABLE
72
73  /* writeable */
74  . = ALIGN(ASM_PAGE_SIZE);	/* Make sure this is page aligned so
75  				   that we can properly leave these
76				   as writable */
77  data_start = .;
78
79  . = ALIGN(16);		/* Exception table */
80  __start___ex_table = .;
81  __ex_table : { *(__ex_table) }
82  __stop___ex_table = .;
83
84  __start___unwind = .;         /* unwind info */
85  .PARISC.unwind : { *(.PARISC.unwind) }
86  __stop___unwind = .;
87
88  /* rarely changed data like cpu maps */
89  . = ALIGN(16);
90  .data.read_mostly : { *(.data.read_mostly) }
91
92  . = ALIGN(L1_CACHE_BYTES);
93  .data : {			/* Data */
94	DATA_DATA
95	CONSTRUCTORS
96	}
97
98  . = ALIGN(L1_CACHE_BYTES);
99  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
100
101  /* PA-RISC locks requires 16-byte alignment */
102  . = ALIGN(16);
103  .data.lock_aligned : { *(.data.lock_aligned) }
104
105  . = ALIGN(ASM_PAGE_SIZE);
106  /* nosave data is really only used for software suspend...it's here
107   * just in case we ever implement it */
108  __nosave_begin = .;
109  .data_nosave : { *(.data.nosave) }
110  . = ALIGN(ASM_PAGE_SIZE);
111  __nosave_end = .;
112
113  _edata = .;			/* End of data section */
114
115  __bss_start = .;		/* BSS */
116  /* page table entries need to be PAGE_SIZE aligned */
117  . = ALIGN(ASM_PAGE_SIZE);
118  .data.vmpages : {
119	*(.data.vm0.pmd)
120	*(.data.vm0.pgd)
121	*(.data.vm0.pte)
122	}
123  .bss : { *(.bss) *(COMMON) }
124  __bss_stop = .;
125
126
127  /* assembler code expects init_task to be 16k aligned */
128  . = ALIGN(16384); 		/* init_task */
129  .data.init_task : { *(.data.init_task) }
130
131  /* The interrupt stack is currently partially coded, but not yet
132   * implemented */
133  . = ALIGN(16384);
134  init_istack : { *(init_istack) }
135
136#ifdef CONFIG_64BIT
137  . = ALIGN(16);               /* Linkage tables */
138  .opd : { *(.opd) } PROVIDE (__gp = .);
139  .plt : { *(.plt) }
140  .dlt : { *(.dlt) }
141#endif
142
143  /* reserve space for interrupt stack by aligning __init* to 16k */
144  . = ALIGN(16384);
145  __init_begin = .;
146  .init.text : {
147	_sinittext = .;
148	*(.init.text)
149	_einittext = .;
150  }
151  .init.data : { *(.init.data) }
152  . = ALIGN(16);
153  __setup_start = .;
154  .init.setup : { *(.init.setup) }
155  __setup_end = .;
156  __initcall_start = .;
157  .initcall.init : {
158	INITCALLS
159  }
160  __initcall_end = .;
161  __con_initcall_start = .;
162  .con_initcall.init : { *(.con_initcall.init) }
163  __con_initcall_end = .;
164  SECURITY_INIT
165  /* alternate instruction replacement.  This is a mechanism x86 uses
166   * to detect the CPU type and replace generic instruction sequences
167   * with CPU specific ones.  We don't currently do this in PA, but
168   * it seems like a good idea... */
169  . = ALIGN(4);
170  __alt_instructions = .;
171  .altinstructions : { *(.altinstructions) }
172  __alt_instructions_end = .;
173 .altinstr_replacement : { *(.altinstr_replacement) }
174  /* .exit.text is discard at runtime, not link time, to deal with references
175     from .altinstructions and .eh_frame */
176  .exit.text : { *(.exit.text) }
177  .exit.data : { *(.exit.data) }
178#ifdef CONFIG_BLK_DEV_INITRD
179  . = ALIGN(ASM_PAGE_SIZE);
180  __initramfs_start = .;
181  .init.ramfs : { *(.init.ramfs) }
182  __initramfs_end = .;
183#endif
184
185  PERCPU(ASM_PAGE_SIZE)
186
187  . = ALIGN(ASM_PAGE_SIZE);
188  __init_end = .;
189  /* freed after init ends here */
190
191  _end = . ;
192
193  /* Sections to be discarded */
194  /DISCARD/ : {
195	*(.exitcall.exit)
196#ifdef CONFIG_64BIT
197	/* temporary hack until binutils is fixed to not emit these
198	 for static binaries */
199	*(.interp)
200	*(.dynsym)
201	*(.dynstr)
202	*(.dynamic)
203	*(.hash)
204	*(.gnu.hash)
205#endif
206	}
207
208  STABS_DEBUG
209  .note 0 : { *(.note) }
210
211}
212