xref: /openbmc/linux/arch/ia64/kernel/vmlinux.lds.S (revision b6dcefde)
1
2#include <asm/cache.h>
3#include <asm/ptrace.h>
4#include <asm/system.h>
5#include <asm/pgtable.h>
6
7#include <asm-generic/vmlinux.lds.h>
8
9#define IVT_TEXT							\
10		VMLINUX_SYMBOL(__start_ivt_text) = .;			\
11		*(.text.ivt)						\
12		VMLINUX_SYMBOL(__end_ivt_text) = .;
13
14OUTPUT_FORMAT("elf64-ia64-little")
15OUTPUT_ARCH(ia64)
16ENTRY(phys_start)
17jiffies = jiffies_64;
18PHDRS {
19  code   PT_LOAD;
20  percpu PT_LOAD;
21  data   PT_LOAD;
22  note   PT_NOTE;
23  unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
24}
25SECTIONS
26{
27  /* unwind exit sections must be discarded before the rest of the
28     sections get included. */
29  /DISCARD/ : {
30	*(.IA_64.unwind.exit.text)
31	*(.IA_64.unwind_info.exit.text)
32	*(.comment)
33	*(.note)
34  }
35
36  v = PAGE_OFFSET;	/* this symbol is here to make debugging easier... */
37  phys_start = _start - LOAD_OFFSET;
38
39  code : { } :code
40  . = KERNEL_START;
41
42  _text = .;
43  _stext = .;
44
45  .text : AT(ADDR(.text) - LOAD_OFFSET)
46    {
47	IVT_TEXT
48	TEXT_TEXT
49	SCHED_TEXT
50	LOCK_TEXT
51	KPROBES_TEXT
52	*(.gnu.linkonce.t*)
53    }
54  .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
55	{ *(.text2) }
56#ifdef CONFIG_SMP
57  .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
58	{ *(.text.lock) }
59#endif
60  _etext = .;
61
62  /* Read-only data */
63
64  NOTES :code :note		/* put .notes in text and mark in PT_NOTE  */
65  code_continues : {} :code	/* switch back to regular program...  */
66
67  EXCEPTION_TABLE(16)
68
69  /* MCA table */
70  . = ALIGN(16);
71  __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
72	{
73	  __start___mca_table = .;
74	  *(__mca_table)
75	  __stop___mca_table = .;
76	}
77
78  .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET)
79	{
80	  __start___phys_stack_reg_patchlist = .;
81	  *(.data.patch.phys_stack_reg)
82	  __end___phys_stack_reg_patchlist = .;
83	}
84
85  /* Global data */
86  _data = .;
87
88  /* Unwind info & table: */
89  . = ALIGN(8);
90  .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
91	{ *(.IA_64.unwind_info*) }
92  .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET)
93	{
94	  __start_unwind = .;
95	  *(.IA_64.unwind*)
96	  __end_unwind = .;
97	} :code :unwind
98  code_continues2 : {} : code
99
100  RODATA
101
102  .opd : AT(ADDR(.opd) - LOAD_OFFSET)
103	{ *(.opd) }
104
105  /* Initialization code and data: */
106
107  . = ALIGN(PAGE_SIZE);
108  __init_begin = .;
109
110  INIT_TEXT_SECTION(PAGE_SIZE)
111  INIT_DATA_SECTION(16)
112
113  .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
114	{
115	  __start___vtop_patchlist = .;
116	  *(.data.patch.vtop)
117	  __end___vtop_patchlist = .;
118	}
119
120  .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET)
121	{
122	  __start___rse_patchlist = .;
123	  *(.data.patch.rse)
124	  __end___rse_patchlist = .;
125	}
126
127  .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
128	{
129	  __start___mckinley_e9_bundles = .;
130	  *(.data.patch.mckinley_e9)
131	  __end___mckinley_e9_bundles = .;
132	}
133
134#if defined(CONFIG_PARAVIRT)
135  . = ALIGN(16);
136  .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET)
137	{
138	  __start_paravirt_bundles = .;
139          *(.paravirt_bundles)
140	  __stop_paravirt_bundles = .;
141	}
142  . = ALIGN(16);
143  .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET)
144	{
145	  __start_paravirt_insts = .;
146          *(.paravirt_insts)
147	  __stop_paravirt_insts = .;
148	}
149  . = ALIGN(16);
150  .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET)
151	{
152	  __start_paravirt_branches = .;
153	  *(.paravirt_branches)
154	  __stop_paravirt_branches = .;
155	}
156#endif
157
158#if defined(CONFIG_IA64_GENERIC)
159  /* Machine Vector */
160  . = ALIGN(16);
161  .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
162	{
163	  machvec_start = .;
164	  *(.machvec)
165	  machvec_end = .;
166	}
167#endif
168
169#ifdef	CONFIG_SMP
170  . = ALIGN(PERCPU_PAGE_SIZE);
171  __cpu0_per_cpu = .;
172  . = . + PERCPU_PAGE_SIZE;	/* cpu0 per-cpu space */
173#endif
174
175  . = ALIGN(PAGE_SIZE);
176  __init_end = .;
177
178  .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
179        {
180	PAGE_ALIGNED_DATA(PAGE_SIZE)
181	  . = ALIGN(PAGE_SIZE);
182	  __start_gate_section = .;
183	  *(.data.gate)
184	  __stop_gate_section = .;
185#ifdef CONFIG_XEN
186	  . = ALIGN(PAGE_SIZE);
187	  __xen_start_gate_section = .;
188	  *(.data.gate.xen)
189	  __xen_stop_gate_section = .;
190#endif
191	}
192  . = ALIGN(PAGE_SIZE);		/* make sure the gate page doesn't expose
193  				 * kernel data
194				 */
195
196  /* Per-cpu data: */
197  . = ALIGN(PERCPU_PAGE_SIZE);
198  PERCPU_VADDR(PERCPU_ADDR, :percpu)
199  __phys_per_cpu_start = __per_cpu_load;
200  . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;	/* ensure percpu data fits
201  						 * into percpu page size
202						 */
203
204  data : { } :data
205  .data : AT(ADDR(.data) - LOAD_OFFSET)
206	{
207		INIT_TASK_DATA(PAGE_SIZE)
208		CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
209		READ_MOSTLY_DATA(SMP_CACHE_BYTES)
210		DATA_DATA
211		*(.data1)
212		*(.gnu.linkonce.d*)
213		CONSTRUCTORS
214	}
215
216  . = ALIGN(16);	/* gp must be 16-byte aligned for exc. table */
217  .got : AT(ADDR(.got) - LOAD_OFFSET)
218	{ *(.got.plt) *(.got) }
219  __gp = ADDR(.got) + 0x200000;
220  /* We want the small data sections together, so single-instruction offsets
221     can access them all, and initialized data all before uninitialized, so
222     we can shorten the on-disk segment size.  */
223  .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
224	{ *(.sdata) *(.sdata1) *(.srdata) }
225  _edata  =  .;
226
227  BSS_SECTION(0, 0, 0)
228
229  _end = .;
230
231  code : { } :code
232
233  STABS_DEBUG
234  DWARF_DEBUG
235
236  /* Default discards */
237  DISCARDS
238}
239