1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * ld script to make ARM Linux kernel 4 * taken from the i386 version by Russell King 5 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 6 */ 7 8#include <asm/hyp_image.h> 9#ifdef CONFIG_KVM 10#define HYPERVISOR_EXTABLE \ 11 . = ALIGN(SZ_8); \ 12 __start___kvm_ex_table = .; \ 13 *(__kvm_ex_table) \ 14 __stop___kvm_ex_table = .; 15 16#define HYPERVISOR_DATA_SECTIONS \ 17 HYP_SECTION_NAME(.rodata) : { \ 18 . = ALIGN(PAGE_SIZE); \ 19 __hyp_rodata_start = .; \ 20 *(HYP_SECTION_NAME(.data..ro_after_init)) \ 21 *(HYP_SECTION_NAME(.rodata)) \ 22 . = ALIGN(PAGE_SIZE); \ 23 __hyp_rodata_end = .; \ 24 } 25 26#define HYPERVISOR_PERCPU_SECTION \ 27 . = ALIGN(PAGE_SIZE); \ 28 HYP_SECTION_NAME(.data..percpu) : { \ 29 *(HYP_SECTION_NAME(.data..percpu)) \ 30 } 31 32#define HYPERVISOR_RELOC_SECTION \ 33 .hyp.reloc : ALIGN(4) { \ 34 __hyp_reloc_begin = .; \ 35 *(.hyp.reloc) \ 36 __hyp_reloc_end = .; \ 37 } 38 39#define BSS_FIRST_SECTIONS \ 40 __hyp_bss_start = .; \ 41 *(HYP_SECTION_NAME(.bss)) \ 42 . = ALIGN(PAGE_SIZE); \ 43 __hyp_bss_end = .; 44 45/* 46 * We require that __hyp_bss_start and __bss_start are aligned, and enforce it 47 * with an assertion. But the BSS_SECTION macro places an empty .sbss section 48 * between them, which can in some cases cause the linker to misalign them. To 49 * work around the issue, force a page alignment for __bss_start. 50 */ 51#define SBSS_ALIGN PAGE_SIZE 52#else /* CONFIG_KVM */ 53#define HYPERVISOR_EXTABLE 54#define HYPERVISOR_DATA_SECTIONS 55#define HYPERVISOR_PERCPU_SECTION 56#define HYPERVISOR_RELOC_SECTION 57#define SBSS_ALIGN 0 58#endif 59 60#define RO_EXCEPTION_TABLE_ALIGN 4 61#define RUNTIME_DISCARD_EXIT 62 63#include <asm-generic/vmlinux.lds.h> 64#include <asm/cache.h> 65#include <asm/kernel-pgtable.h> 66#include <asm/kexec.h> 67#include <asm/memory.h> 68#include <asm/page.h> 69 70#include "image.h" 71 72OUTPUT_ARCH(aarch64) 73ENTRY(_text) 74 75jiffies = jiffies_64; 76 77#define HYPERVISOR_TEXT \ 78 . = ALIGN(PAGE_SIZE); \ 79 __hyp_idmap_text_start = .; \ 80 *(.hyp.idmap.text) \ 81 __hyp_idmap_text_end = .; \ 82 __hyp_text_start = .; \ 83 *(.hyp.text) \ 84 HYPERVISOR_EXTABLE \ 85 . = ALIGN(PAGE_SIZE); \ 86 __hyp_text_end = .; 87 88#define IDMAP_TEXT \ 89 . = ALIGN(SZ_4K); \ 90 __idmap_text_start = .; \ 91 *(.idmap.text) \ 92 __idmap_text_end = .; 93 94#ifdef CONFIG_HIBERNATION 95#define HIBERNATE_TEXT \ 96 __hibernate_exit_text_start = .; \ 97 *(.hibernate_exit.text) \ 98 __hibernate_exit_text_end = .; 99#else 100#define HIBERNATE_TEXT 101#endif 102 103#ifdef CONFIG_KEXEC_CORE 104#define KEXEC_TEXT \ 105 __relocate_new_kernel_start = .; \ 106 *(.kexec_relocate.text) \ 107 __relocate_new_kernel_end = .; 108#else 109#define KEXEC_TEXT 110#endif 111 112#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 113#define TRAMP_TEXT \ 114 . = ALIGN(PAGE_SIZE); \ 115 __entry_tramp_text_start = .; \ 116 *(.entry.tramp.text) \ 117 . = ALIGN(PAGE_SIZE); \ 118 __entry_tramp_text_end = .; 119#else 120#define TRAMP_TEXT 121#endif 122 123/* 124 * The size of the PE/COFF section that covers the kernel image, which 125 * runs from _stext to _edata, must be a round multiple of the PE/COFF 126 * FileAlignment, which we set to its minimum value of 0x200. '_stext' 127 * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned 128 * boundary should be sufficient. 129 */ 130PECOFF_FILE_ALIGNMENT = 0x200; 131 132#ifdef CONFIG_EFI 133#define PECOFF_EDATA_PADDING \ 134 .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); } 135#else 136#define PECOFF_EDATA_PADDING 137#endif 138 139SECTIONS 140{ 141 /* 142 * XXX: The linker does not define how output sections are 143 * assigned to input sections when there are multiple statements 144 * matching the same input section name. There is no documented 145 * order of matching. 146 */ 147 DISCARDS 148 /DISCARD/ : { 149 *(.interp .dynamic) 150 *(.dynsym .dynstr .hash .gnu.hash) 151 } 152 153 . = KIMAGE_VADDR; 154 155 .head.text : { 156 _text = .; 157 HEAD_TEXT 158 } 159 .text : ALIGN(SEGMENT_ALIGN) { /* Real text segment */ 160 _stext = .; /* Text and read-only data */ 161 IRQENTRY_TEXT 162 SOFTIRQENTRY_TEXT 163 ENTRY_TEXT 164 TEXT_TEXT 165 SCHED_TEXT 166 CPUIDLE_TEXT 167 LOCK_TEXT 168 KPROBES_TEXT 169 HYPERVISOR_TEXT 170 IDMAP_TEXT 171 *(.gnu.warning) 172 . = ALIGN(16); 173 *(.got) /* Global offset table */ 174 } 175 176 /* 177 * Make sure that the .got.plt is either completely empty or it 178 * contains only the lazy dispatch entries. 179 */ 180 .got.plt : { *(.got.plt) } 181 ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, 182 "Unexpected GOT/PLT entries detected!") 183 184 . = ALIGN(SEGMENT_ALIGN); 185 _etext = .; /* End of text section */ 186 187 /* everything from this point to __init_begin will be marked RO NX */ 188 RO_DATA(PAGE_SIZE) 189 190 HYPERVISOR_DATA_SECTIONS 191 192 /* code sections that are never executed via the kernel mapping */ 193 .rodata.text : { 194 TRAMP_TEXT 195 HIBERNATE_TEXT 196 KEXEC_TEXT 197 . = ALIGN(PAGE_SIZE); 198 } 199 200 idmap_pg_dir = .; 201 . += PAGE_SIZE; 202 203#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 204 tramp_pg_dir = .; 205 . += PAGE_SIZE; 206#endif 207 208 reserved_pg_dir = .; 209 . += PAGE_SIZE; 210 211 swapper_pg_dir = .; 212 . += PAGE_SIZE; 213 214 . = ALIGN(SEGMENT_ALIGN); 215 __init_begin = .; 216 __inittext_begin = .; 217 218 INIT_TEXT_SECTION(8) 219 220 __exittext_begin = .; 221 .exit.text : { 222 EXIT_TEXT 223 } 224 __exittext_end = .; 225 226 . = ALIGN(4); 227 .altinstructions : { 228 __alt_instructions = .; 229 *(.altinstructions) 230 __alt_instructions_end = .; 231 } 232 233 . = ALIGN(SEGMENT_ALIGN); 234 __inittext_end = .; 235 __initdata_begin = .; 236 237 init_idmap_pg_dir = .; 238 . += INIT_IDMAP_DIR_SIZE; 239 init_idmap_pg_end = .; 240 241 .init.data : { 242 INIT_DATA 243 INIT_SETUP(16) 244 INIT_CALLS 245 CON_INITCALL 246 INIT_RAM_FS 247 *(.init.altinstructions .init.bss) /* from the EFI stub */ 248 } 249 .exit.data : { 250 EXIT_DATA 251 } 252 253 PERCPU_SECTION(L1_CACHE_BYTES) 254 HYPERVISOR_PERCPU_SECTION 255 256 HYPERVISOR_RELOC_SECTION 257 258 .rela.dyn : ALIGN(8) { 259 *(.rela .rela*) 260 } 261 262 __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR); 263 __rela_size = SIZEOF(.rela.dyn); 264 265#ifdef CONFIG_RELR 266 .relr.dyn : ALIGN(8) { 267 *(.relr.dyn) 268 } 269 270 __relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR); 271 __relr_size = SIZEOF(.relr.dyn); 272#endif 273 274 . = ALIGN(SEGMENT_ALIGN); 275 __initdata_end = .; 276 __init_end = .; 277 278 _data = .; 279 _sdata = .; 280 RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) 281 282 /* 283 * Data written with the MMU off but read with the MMU on requires 284 * cache lines to be invalidated, discarding up to a Cache Writeback 285 * Granule (CWG) of data from the cache. Keep the section that 286 * requires this type of maintenance to be in its own Cache Writeback 287 * Granule (CWG) area so the cache maintenance operations don't 288 * interfere with adjacent data. 289 */ 290 .mmuoff.data.write : ALIGN(SZ_2K) { 291 __mmuoff_data_start = .; 292 *(.mmuoff.data.write) 293 } 294 . = ALIGN(SZ_2K); 295 .mmuoff.data.read : { 296 *(.mmuoff.data.read) 297 __mmuoff_data_end = .; 298 } 299 300 PECOFF_EDATA_PADDING 301 __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin); 302 _edata = .; 303 304 BSS_SECTION(SBSS_ALIGN, 0, 0) 305 306 . = ALIGN(PAGE_SIZE); 307 init_pg_dir = .; 308 . += INIT_DIR_SIZE; 309 init_pg_end = .; 310 311 . = ALIGN(SEGMENT_ALIGN); 312 __pecoff_data_size = ABSOLUTE(. - __initdata_begin); 313 _end = .; 314 315 STABS_DEBUG 316 DWARF_DEBUG 317 ELF_DETAILS 318 319 HEAD_SYMBOLS 320 321 /* 322 * Sections that should stay zero sized, which is safer to 323 * explicitly check instead of blindly discarding. 324 */ 325 .plt : { 326 *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt) 327 } 328 ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") 329 330 .data.rel.ro : { *(.data.rel.ro) } 331 ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!") 332} 333 334#include "image-vars.h" 335 336/* 337 * The HYP init code and ID map text can't be longer than a page each. The 338 * former is page-aligned, but the latter may not be with 16K or 64K pages, so 339 * it should also not cross a page boundary. 340 */ 341ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= PAGE_SIZE, 342 "HYP init code too big") 343ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, 344 "ID map text too big or misaligned") 345#ifdef CONFIG_HIBERNATION 346ASSERT(__hibernate_exit_text_end - __hibernate_exit_text_start <= SZ_4K, 347 "Hibernate exit text is bigger than 4 KiB") 348#endif 349#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 350ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, 351 "Entry trampoline text too big") 352#endif 353#ifdef CONFIG_KVM 354ASSERT(__hyp_bss_start == __bss_start, "HYP and Host BSS are misaligned") 355#endif 356/* 357 * If padding is applied before .head.text, virt<->phys conversions will fail. 358 */ 359ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned") 360 361ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET, 362 "RESERVED_SWAPPER_OFFSET is wrong!") 363 364#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 365ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET, 366 "TRAMP_SWAPPER_OFFSET is wrong!") 367#endif 368 369#ifdef CONFIG_KEXEC_CORE 370/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */ 371ASSERT(__relocate_new_kernel_end - __relocate_new_kernel_start <= SZ_4K, 372 "kexec relocation code is bigger than 4 KiB") 373ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken") 374#endif 375