1/* SPDX-License-Identifier: GPL-2.0 */
2#ifdef CONFIG_PPC64
3#define PROVIDE32(x)	PROVIDE(__unused__##x)
4#else
5#define PROVIDE32(x)	PROVIDE(x)
6#endif
7
8#define BSS_FIRST_SECTIONS *(.bss.prominit)
9#define EMITS_PT_NOTE
10#define RO_EXCEPTION_TABLE_ALIGN	0
11
12#include <asm/page.h>
13#include <asm-generic/vmlinux.lds.h>
14#include <asm/cache.h>
15#include <asm/thread_info.h>
16
17#define STRICT_ALIGN_SIZE	(1 << CONFIG_DATA_SHIFT)
18#define ETEXT_ALIGN_SIZE	(1 << CONFIG_ETEXT_SHIFT)
19
20ENTRY(_stext)
21
22PHDRS {
23	text PT_LOAD FLAGS(7); /* RWX */
24	note PT_NOTE FLAGS(0);
25}
26
27#ifdef CONFIG_PPC64
28OUTPUT_ARCH(powerpc:common64)
29jiffies = jiffies_64;
30#else
31OUTPUT_ARCH(powerpc:common)
32jiffies = jiffies_64 + 4;
33#endif
34SECTIONS
35{
36	. = KERNELBASE;
37
38/*
39 * Text, read only data and other permanent read-only sections
40 */
41
42	_text = .;
43	_stext = .;
44
45	/*
46	 * Head text.
47	 * This needs to be in its own output section to avoid ld placing
48	 * branch trampoline stubs randomly throughout the fixed sections,
49	 * which it will do (even if the branch comes from another section)
50	 * in order to optimize stub generation.
51	 */
52	.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
53#ifdef CONFIG_PPC64
54		KEEP(*(.head.text.first_256B));
55#ifdef CONFIG_PPC_BOOK3E
56#else
57		KEEP(*(.head.text.real_vectors));
58		*(.head.text.real_trampolines);
59		KEEP(*(.head.text.virt_vectors));
60		*(.head.text.virt_trampolines);
61# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
62		KEEP(*(.head.data.fwnmi_page));
63# endif
64#endif
65#else /* !CONFIG_PPC64 */
66		HEAD_TEXT
67#endif
68	} :text
69
70	__head_end = .;
71
72#ifdef CONFIG_PPC64
73	/*
74	 * ALIGN(0) overrides the default output section alignment because
75	 * this needs to start right after .head.text in order for fixed
76	 * section placement to work.
77	 */
78	.text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) {
79#ifdef CONFIG_LD_HEAD_STUB_CATCH
80		KEEP(*(.linker_stub_catch));
81		. = . ;
82#endif
83
84#else
85	.text : AT(ADDR(.text) - LOAD_OFFSET) {
86		ALIGN_FUNCTION();
87#endif
88		/* careful! __ftr_alt_* sections need to be close to .text */
89		*(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
90#ifdef CONFIG_PPC64
91		*(.tramp.ftrace.text);
92#endif
93		SCHED_TEXT
94		CPUIDLE_TEXT
95		LOCK_TEXT
96		KPROBES_TEXT
97		IRQENTRY_TEXT
98		SOFTIRQENTRY_TEXT
99		/*
100		 * -Os builds call FP save/restore functions. The powerpc64
101		 * linker generates those on demand in the .sfpr section.
102		 * .sfpr gets placed at the beginning of a group of input
103		 * sections, which can break start-of-text offset if it is
104		 * included with the main text sections, so put it by itself.
105		 */
106		*(.sfpr);
107		MEM_KEEP(init.text)
108		MEM_KEEP(exit.text)
109
110#ifdef CONFIG_PPC32
111		*(.got1)
112		__got2_start = .;
113		*(.got2)
114		__got2_end = .;
115#endif /* CONFIG_PPC32 */
116
117	} :text
118
119	. = ALIGN(ETEXT_ALIGN_SIZE);
120	_etext = .;
121	PROVIDE32 (etext = .);
122
123	/* Read-only data */
124	RO_DATA(PAGE_SIZE)
125
126#ifdef CONFIG_PPC64
127	. = ALIGN(8);
128	__stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
129		__start___stf_entry_barrier_fixup = .;
130		*(__stf_entry_barrier_fixup)
131		__stop___stf_entry_barrier_fixup = .;
132	}
133
134	. = ALIGN(8);
135	__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
136		__start___stf_exit_barrier_fixup = .;
137		*(__stf_exit_barrier_fixup)
138		__stop___stf_exit_barrier_fixup = .;
139	}
140
141	. = ALIGN(8);
142	__rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
143		__start___rfi_flush_fixup = .;
144		*(__rfi_flush_fixup)
145		__stop___rfi_flush_fixup = .;
146	}
147#endif /* CONFIG_PPC64 */
148
149#ifdef CONFIG_PPC_BARRIER_NOSPEC
150	. = ALIGN(8);
151	__spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
152		__start___barrier_nospec_fixup = .;
153		*(__barrier_nospec_fixup)
154		__stop___barrier_nospec_fixup = .;
155	}
156#endif /* CONFIG_PPC_BARRIER_NOSPEC */
157
158#ifdef CONFIG_PPC_FSL_BOOK3E
159	. = ALIGN(8);
160	__spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
161		__start__btb_flush_fixup = .;
162		*(__btb_flush_fixup)
163		__stop__btb_flush_fixup = .;
164	}
165#endif
166
167/*
168 * Init sections discarded at runtime
169 */
170	. = ALIGN(STRICT_ALIGN_SIZE);
171	__init_begin = .;
172	. = ALIGN(PAGE_SIZE);
173	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
174		_sinittext = .;
175		INIT_TEXT
176		_einittext = .;
177#ifdef CONFIG_PPC64
178		*(.tramp.ftrace.init);
179#endif
180	} :text
181
182	/* .exit.text is discarded at runtime, not link time,
183	 * to deal with references from __bug_table
184	 */
185	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
186		EXIT_TEXT
187	}
188
189	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
190		INIT_DATA
191	}
192
193	.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
194		INIT_SETUP(16)
195	}
196
197	.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
198		INIT_CALLS
199	}
200
201	.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
202		CON_INITCALL
203	}
204
205	. = ALIGN(8);
206	__ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
207		__start___ftr_fixup = .;
208		KEEP(*(__ftr_fixup))
209		__stop___ftr_fixup = .;
210	}
211	. = ALIGN(8);
212	__mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
213		__start___mmu_ftr_fixup = .;
214		KEEP(*(__mmu_ftr_fixup))
215		__stop___mmu_ftr_fixup = .;
216	}
217	. = ALIGN(8);
218	__lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
219		__start___lwsync_fixup = .;
220		KEEP(*(__lwsync_fixup))
221		__stop___lwsync_fixup = .;
222	}
223#ifdef CONFIG_PPC64
224	. = ALIGN(8);
225	__fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
226		__start___fw_ftr_fixup = .;
227		KEEP(*(__fw_ftr_fixup))
228		__stop___fw_ftr_fixup = .;
229	}
230#endif
231	.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
232		INIT_RAM_FS
233	}
234
235	PERCPU_SECTION(L1_CACHE_BYTES)
236
237	. = ALIGN(8);
238	.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
239		__machine_desc_start = . ;
240		KEEP(*(.machine.desc))
241		__machine_desc_end = . ;
242	}
243#ifdef CONFIG_RELOCATABLE
244	. = ALIGN(8);
245	.dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
246	{
247#ifdef CONFIG_PPC32
248		__dynamic_symtab = .;
249#endif
250		*(.dynsym)
251	}
252	.dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
253	.dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
254	{
255		__dynamic_start = .;
256		*(.dynamic)
257	}
258	.hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
259	.interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
260	.rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
261	{
262		__rela_dyn_start = .;
263		*(.rela*)
264	}
265#endif
266	/* .exit.data is discarded at runtime, not link time,
267	 * to deal with references from .exit.text
268	 */
269	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
270		EXIT_DATA
271	}
272
273	/* freed after init ends here */
274	. = ALIGN(PAGE_SIZE);
275	__init_end = .;
276
277/*
278 * And now the various read/write data
279 */
280
281	. = ALIGN(PAGE_SIZE);
282	_sdata = .;
283
284#ifdef CONFIG_PPC32
285	.data : AT(ADDR(.data) - LOAD_OFFSET) {
286		DATA_DATA
287#ifdef CONFIG_UBSAN
288		*(.data..Lubsan_data*)
289		*(.data..Lubsan_type*)
290#endif
291		*(.data.rel*)
292		*(SDATA_MAIN)
293		*(.sdata2)
294		*(.got.plt) *(.got)
295		*(.plt)
296		*(.branch_lt)
297	}
298#else
299	.data : AT(ADDR(.data) - LOAD_OFFSET) {
300		DATA_DATA
301		*(.data.rel*)
302		*(.toc1)
303		*(.branch_lt)
304	}
305
306	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
307		__start_opd = .;
308		KEEP(*(.opd))
309		__end_opd = .;
310	}
311
312	. = ALIGN(256);
313	.got : AT(ADDR(.got) - LOAD_OFFSET) {
314		__toc_start = .;
315#ifndef CONFIG_RELOCATABLE
316		__prom_init_toc_start = .;
317		arch/powerpc/kernel/prom_init.o*(.toc .got)
318		__prom_init_toc_end = .;
319#endif
320		*(.got)
321		*(.toc)
322	}
323#endif
324
325	/* The initial task and kernel stack */
326	INIT_TASK_DATA_SECTION(THREAD_SIZE)
327
328	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
329		PAGE_ALIGNED_DATA(PAGE_SIZE)
330	}
331
332	.data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
333		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
334	}
335
336	.data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
337		READ_MOSTLY_DATA(L1_CACHE_BYTES)
338	}
339
340	. = ALIGN(PAGE_SIZE);
341	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
342		NOSAVE_DATA
343	}
344
345	BUG_TABLE
346
347	. = ALIGN(PAGE_SIZE);
348	_edata  =  .;
349	PROVIDE32 (edata = .);
350
351/*
352 * And finally the bss
353 */
354
355	BSS_SECTION(0, 0, 0)
356
357	. = ALIGN(PAGE_SIZE);
358	_end = . ;
359	PROVIDE32 (end = .);
360
361	STABS_DEBUG
362
363	DWARF_DEBUG
364
365	DISCARDS
366	/DISCARD/ : {
367		*(*.EMB.apuinfo)
368		*(.glink .iplt .plt .rela* .comment)
369		*(.gnu.version*)
370		*(.gnu.attributes)
371		*(.eh_frame)
372	}
373}
374