xref: /openbmc/linux/arch/parisc/kernel/vmlinux.lds.S (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1/*    Kernel link layout for various "sections"
2 *
3 *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
4 *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
5 *    Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
6 *    Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
7 *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
8 *    Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
9 *    Copyright (C) 2006 Helge Deller <deller@gmx.de>
10 *
11 *
12 *    This program is free software; you can redistribute it and/or modify
13 *    it under the terms of the GNU General Public License as published by
14 *    the Free Software Foundation; either version 2 of the License, or
15 *    (at your option) any later version.
16 *
17 *    This program is distributed in the hope that it will be useful,
18 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
19 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 *    GNU General Public License for more details.
21 *
22 *    You should have received a copy of the GNU General Public License
23 *    along with this program; if not, write to the Free Software
24 *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25 */
26#include <asm-generic/vmlinux.lds.h>
27/* needed for the processor specific cache alignment size */
28#include <asm/cache.h>
29#include <asm/page.h>
30#include <asm/asm-offsets.h>
31
32/* ld script to make hppa Linux kernel */
33#ifndef CONFIG_64BIT
34OUTPUT_FORMAT("elf32-hppa-linux")
35OUTPUT_ARCH(hppa)
36#else
37OUTPUT_FORMAT("elf64-hppa-linux")
38OUTPUT_ARCH(hppa:hppa2.0w)
39#endif
40
41ENTRY(_stext)
42#ifndef CONFIG_64BIT
43jiffies = jiffies_64 + 4;
44#else
45jiffies = jiffies_64;
46#endif
47SECTIONS
48{
49
50  . = KERNEL_BINARY_TEXT_START;
51
52  _text = .;			/* Text and read-only data */
53  .text ALIGN(16) : {
54	TEXT_TEXT
55	SCHED_TEXT
56	LOCK_TEXT
57	*(.text.do_softirq)
58	*(.text.sys_exit)
59	*(.text.do_sigaltstack)
60	*(.text.do_fork)
61	*(.text.*)
62	*(.fixup)
63	*(.lock.text)		/* out-of-line lock text */
64	*(.gnu.warning)
65	} = 0
66
67  _etext = .;			/* End of text section */
68
69  RODATA
70
71  BUG_TABLE
72
73  /* writeable */
74  . = ALIGN(ASM_PAGE_SIZE);	/* Make sure this is page aligned so
75  				   that we can properly leave these
76				   as writable */
77  data_start = .;
78
79  . = ALIGN(16);		/* Exception table */
80  __start___ex_table = .;
81  __ex_table : { *(__ex_table) }
82  __stop___ex_table = .;
83
84  NOTES
85
86  __start___unwind = .;         /* unwind info */
87  .PARISC.unwind : { *(.PARISC.unwind) }
88  __stop___unwind = .;
89
90  /* rarely changed data like cpu maps */
91  . = ALIGN(16);
92  .data.read_mostly : { *(.data.read_mostly) }
93
94  . = ALIGN(L1_CACHE_BYTES);
95  .data : {			/* Data */
96	DATA_DATA
97	CONSTRUCTORS
98	}
99
100  . = ALIGN(L1_CACHE_BYTES);
101  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
102
103  /* PA-RISC locks requires 16-byte alignment */
104  . = ALIGN(16);
105  .data.lock_aligned : { *(.data.lock_aligned) }
106
107  . = ALIGN(ASM_PAGE_SIZE);
108  /* nosave data is really only used for software suspend...it's here
109   * just in case we ever implement it */
110  __nosave_begin = .;
111  .data_nosave : { *(.data.nosave) }
112  . = ALIGN(ASM_PAGE_SIZE);
113  __nosave_end = .;
114
115  _edata = .;			/* End of data section */
116
117  __bss_start = .;		/* BSS */
118  /* page table entries need to be PAGE_SIZE aligned */
119  . = ALIGN(ASM_PAGE_SIZE);
120  .data.vmpages : {
121	*(.data.vm0.pmd)
122	*(.data.vm0.pgd)
123	*(.data.vm0.pte)
124	}
125  .bss : { *(.bss) *(COMMON) }
126  __bss_stop = .;
127
128
129  /* assembler code expects init_task to be 16k aligned */
130  . = ALIGN(16384); 		/* init_task */
131  .data.init_task : { *(.data.init_task) }
132
133  /* The interrupt stack is currently partially coded, but not yet
134   * implemented */
135  . = ALIGN(16384);
136  init_istack : { *(init_istack) }
137
138#ifdef CONFIG_64BIT
139  . = ALIGN(16);               /* Linkage tables */
140  .opd : { *(.opd) } PROVIDE (__gp = .);
141  .plt : { *(.plt) }
142  .dlt : { *(.dlt) }
143#endif
144
145  /* reserve space for interrupt stack by aligning __init* to 16k */
146  . = ALIGN(16384);
147  __init_begin = .;
148  .init.text : {
149	_sinittext = .;
150	*(.init.text)
151	_einittext = .;
152  }
153  .init.data : { *(.init.data) }
154  . = ALIGN(16);
155  __setup_start = .;
156  .init.setup : { *(.init.setup) }
157  __setup_end = .;
158  __initcall_start = .;
159  .initcall.init : {
160	INITCALLS
161  }
162  __initcall_end = .;
163  __con_initcall_start = .;
164  .con_initcall.init : { *(.con_initcall.init) }
165  __con_initcall_end = .;
166  SECURITY_INIT
167  /* alternate instruction replacement.  This is a mechanism x86 uses
168   * to detect the CPU type and replace generic instruction sequences
169   * with CPU specific ones.  We don't currently do this in PA, but
170   * it seems like a good idea... */
171  . = ALIGN(4);
172  __alt_instructions = .;
173  .altinstructions : { *(.altinstructions) }
174  __alt_instructions_end = .;
175 .altinstr_replacement : { *(.altinstr_replacement) }
176  /* .exit.text is discard at runtime, not link time, to deal with references
177     from .altinstructions and .eh_frame */
178  .exit.text : { *(.exit.text) }
179  .exit.data : { *(.exit.data) }
180#ifdef CONFIG_BLK_DEV_INITRD
181  . = ALIGN(ASM_PAGE_SIZE);
182  __initramfs_start = .;
183  .init.ramfs : { *(.init.ramfs) }
184  __initramfs_end = .;
185#endif
186
187  PERCPU(ASM_PAGE_SIZE)
188
189  . = ALIGN(ASM_PAGE_SIZE);
190  __init_end = .;
191  /* freed after init ends here */
192
193  _end = . ;
194
195  /* Sections to be discarded */
196  /DISCARD/ : {
197	*(.exitcall.exit)
198#ifdef CONFIG_64BIT
199	/* temporary hack until binutils is fixed to not emit these
200	 for static binaries */
201	*(.interp)
202	*(.dynsym)
203	*(.dynstr)
204	*(.dynamic)
205	*(.hash)
206	*(.gnu.hash)
207#endif
208	}
209
210  STABS_DEBUG
211  .note 0 : { *(.note) }
212
213}
214