1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifdef CONFIG_PPC64 3#define PROVIDE32(x) PROVIDE(__unused__##x) 4#else 5#define PROVIDE32(x) PROVIDE(x) 6#endif 7 8#define BSS_FIRST_SECTIONS *(.bss.prominit) 9#define EMITS_PT_NOTE 10#define RO_EXCEPTION_TABLE_ALIGN 0 11 12#include <asm/page.h> 13#include <asm-generic/vmlinux.lds.h> 14#include <asm/cache.h> 15#include <asm/thread_info.h> 16 17#define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT) 18#define ETEXT_ALIGN_SIZE (1 << CONFIG_ETEXT_SHIFT) 19 20ENTRY(_stext) 21 22PHDRS { 23 text PT_LOAD FLAGS(7); /* RWX */ 24 note PT_NOTE FLAGS(0); 25} 26 27#ifdef CONFIG_PPC64 28OUTPUT_ARCH(powerpc:common64) 29jiffies = jiffies_64; 30#else 31OUTPUT_ARCH(powerpc:common) 32jiffies = jiffies_64 + 4; 33#endif 34SECTIONS 35{ 36 . = KERNELBASE; 37 38/* 39 * Text, read only data and other permanent read-only sections 40 */ 41 42 _text = .; 43 _stext = .; 44 45 /* 46 * Head text. 47 * This needs to be in its own output section to avoid ld placing 48 * branch trampoline stubs randomly throughout the fixed sections, 49 * which it will do (even if the branch comes from another section) 50 * in order to optimize stub generation. 51 */ 52 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { 53#ifdef CONFIG_PPC64 54 KEEP(*(.head.text.first_256B)); 55#ifdef CONFIG_PPC_BOOK3E 56#else 57 KEEP(*(.head.text.real_vectors)); 58 *(.head.text.real_trampolines); 59 KEEP(*(.head.text.virt_vectors)); 60 *(.head.text.virt_trampolines); 61# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 62 KEEP(*(.head.data.fwnmi_page)); 63# endif 64#endif 65#else /* !CONFIG_PPC64 */ 66 HEAD_TEXT 67#endif 68 } :text 69 70 __head_end = .; 71 72#ifdef CONFIG_PPC64 73 /* 74 * ALIGN(0) overrides the default output section alignment because 75 * this needs to start right after .head.text in order for fixed 76 * section placement to work. 77 */ 78 .text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) { 79#ifdef CONFIG_LD_HEAD_STUB_CATCH 80 KEEP(*(.linker_stub_catch)); 81 . = . ; 82#endif 83 84#else 85 .text : AT(ADDR(.text) - LOAD_OFFSET) { 86 ALIGN_FUNCTION(); 87#endif 88 /* careful! __ftr_alt_* sections need to be close to .text */ 89 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text); 90#ifdef CONFIG_PPC64 91 *(.tramp.ftrace.text); 92#endif 93 NOINSTR_TEXT 94 SCHED_TEXT 95 CPUIDLE_TEXT 96 LOCK_TEXT 97 KPROBES_TEXT 98 IRQENTRY_TEXT 99 SOFTIRQENTRY_TEXT 100 /* 101 * -Os builds call FP save/restore functions. The powerpc64 102 * linker generates those on demand in the .sfpr section. 103 * .sfpr gets placed at the beginning of a group of input 104 * sections, which can break start-of-text offset if it is 105 * included with the main text sections, so put it by itself. 106 */ 107 *(.sfpr); 108 MEM_KEEP(init.text) 109 MEM_KEEP(exit.text) 110 111#ifdef CONFIG_PPC32 112 *(.got1) 113 __got2_start = .; 114 *(.got2) 115 __got2_end = .; 116#endif /* CONFIG_PPC32 */ 117 118 } :text 119 120 . = ALIGN(ETEXT_ALIGN_SIZE); 121 _etext = .; 122 PROVIDE32 (etext = .); 123 124 /* Read-only data */ 125 RO_DATA(PAGE_SIZE) 126 127#ifdef CONFIG_PPC64 128 . = ALIGN(8); 129 __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) { 130 __start___stf_entry_barrier_fixup = .; 131 *(__stf_entry_barrier_fixup) 132 __stop___stf_entry_barrier_fixup = .; 133 } 134 135 . = ALIGN(8); 136 __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { 137 __start___stf_exit_barrier_fixup = .; 138 *(__stf_exit_barrier_fixup) 139 __stop___stf_exit_barrier_fixup = .; 140 } 141 142 . = ALIGN(8); 143 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { 144 __start___rfi_flush_fixup = .; 145 *(__rfi_flush_fixup) 146 __stop___rfi_flush_fixup = .; 147 } 148#endif /* CONFIG_PPC64 */ 149 150#ifdef CONFIG_PPC_BARRIER_NOSPEC 151 . = ALIGN(8); 152 __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) { 153 __start___barrier_nospec_fixup = .; 154 *(__barrier_nospec_fixup) 155 __stop___barrier_nospec_fixup = .; 156 } 157#endif /* CONFIG_PPC_BARRIER_NOSPEC */ 158 159#ifdef CONFIG_PPC_FSL_BOOK3E 160 . = ALIGN(8); 161 __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) { 162 __start__btb_flush_fixup = .; 163 *(__btb_flush_fixup) 164 __stop__btb_flush_fixup = .; 165 } 166#endif 167 168/* 169 * Init sections discarded at runtime 170 */ 171 . = ALIGN(STRICT_ALIGN_SIZE); 172 __init_begin = .; 173 . = ALIGN(PAGE_SIZE); 174 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { 175 _sinittext = .; 176 INIT_TEXT 177 _einittext = .; 178#ifdef CONFIG_PPC64 179 *(.tramp.ftrace.init); 180#endif 181 } :text 182 183 /* .exit.text is discarded at runtime, not link time, 184 * to deal with references from __bug_table 185 */ 186 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { 187 EXIT_TEXT 188 } 189 190 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { 191 INIT_DATA 192 } 193 194 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { 195 INIT_SETUP(16) 196 } 197 198 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { 199 INIT_CALLS 200 } 201 202 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { 203 CON_INITCALL 204 } 205 206 . = ALIGN(8); 207 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) { 208 __start___ftr_fixup = .; 209 KEEP(*(__ftr_fixup)) 210 __stop___ftr_fixup = .; 211 } 212 . = ALIGN(8); 213 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) { 214 __start___mmu_ftr_fixup = .; 215 KEEP(*(__mmu_ftr_fixup)) 216 __stop___mmu_ftr_fixup = .; 217 } 218 . = ALIGN(8); 219 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) { 220 __start___lwsync_fixup = .; 221 KEEP(*(__lwsync_fixup)) 222 __stop___lwsync_fixup = .; 223 } 224#ifdef CONFIG_PPC64 225 . = ALIGN(8); 226 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) { 227 __start___fw_ftr_fixup = .; 228 KEEP(*(__fw_ftr_fixup)) 229 __stop___fw_ftr_fixup = .; 230 } 231#endif 232 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { 233 INIT_RAM_FS 234 } 235 236 PERCPU_SECTION(L1_CACHE_BYTES) 237 238 . = ALIGN(8); 239 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { 240 __machine_desc_start = . ; 241 KEEP(*(.machine.desc)) 242 __machine_desc_end = . ; 243 } 244#ifdef CONFIG_RELOCATABLE 245 . = ALIGN(8); 246 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) 247 { 248#ifdef CONFIG_PPC32 249 __dynamic_symtab = .; 250#endif 251 *(.dynsym) 252 } 253 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) } 254 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET) 255 { 256 __dynamic_start = .; 257 *(.dynamic) 258 } 259 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) } 260 .gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) } 261 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) } 262 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET) 263 { 264 __rela_dyn_start = .; 265 *(.rela*) 266 } 267#endif 268 /* .exit.data is discarded at runtime, not link time, 269 * to deal with references from .exit.text 270 */ 271 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { 272 EXIT_DATA 273 } 274 275 /* freed after init ends here */ 276 . = ALIGN(PAGE_SIZE); 277 __init_end = .; 278 279/* 280 * And now the various read/write data 281 */ 282 283 . = ALIGN(PAGE_SIZE); 284 _sdata = .; 285 286#ifdef CONFIG_PPC32 287 .data : AT(ADDR(.data) - LOAD_OFFSET) { 288 DATA_DATA 289#ifdef CONFIG_UBSAN 290 *(.data..Lubsan_data*) 291 *(.data..Lubsan_type*) 292#endif 293 *(.data.rel*) 294 *(SDATA_MAIN) 295 *(.sdata2) 296 *(.got.plt) *(.got) 297 *(.plt) 298 *(.branch_lt) 299 } 300#else 301 .data : AT(ADDR(.data) - LOAD_OFFSET) { 302 DATA_DATA 303 *(.data.rel*) 304 *(.toc1) 305 *(.branch_lt) 306 } 307 308 .opd : AT(ADDR(.opd) - LOAD_OFFSET) { 309 __start_opd = .; 310 KEEP(*(.opd)) 311 __end_opd = .; 312 } 313 314 . = ALIGN(256); 315 .got : AT(ADDR(.got) - LOAD_OFFSET) { 316 __toc_start = .; 317#ifndef CONFIG_RELOCATABLE 318 __prom_init_toc_start = .; 319 arch/powerpc/kernel/prom_init.o*(.toc .got) 320 __prom_init_toc_end = .; 321#endif 322 *(.got) 323 *(.toc) 324 } 325#endif 326 327 /* The initial task and kernel stack */ 328 INIT_TASK_DATA_SECTION(THREAD_ALIGN) 329 330 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { 331 PAGE_ALIGNED_DATA(PAGE_SIZE) 332 } 333 334 .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) { 335 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 336 } 337 338 .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) { 339 READ_MOSTLY_DATA(L1_CACHE_BYTES) 340 } 341 342 . = ALIGN(PAGE_SIZE); 343 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 344 NOSAVE_DATA 345 } 346 347 BUG_TABLE 348 349 . = ALIGN(PAGE_SIZE); 350 _edata = .; 351 PROVIDE32 (edata = .); 352 353/* 354 * And finally the bss 355 */ 356 357 BSS_SECTION(0, 0, 0) 358 359 . = ALIGN(PAGE_SIZE); 360 _end = . ; 361 PROVIDE32 (end = .); 362 363 STABS_DEBUG 364 365 DWARF_DEBUG 366 367 DISCARDS 368 /DISCARD/ : { 369 *(*.EMB.apuinfo) 370 *(.glink .iplt .plt .rela* .comment) 371 *(.gnu.version*) 372 *(.gnu.attributes) 373 *(.eh_frame) 374 } 375} 376