1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 */ 5 #ifndef __ASM_MODULE_H 6 #define __ASM_MODULE_H 7 8 #include <asm-generic/module.h> 9 10 struct mod_plt_sec { 11 int plt_shndx; 12 int plt_num_entries; 13 int plt_max_entries; 14 }; 15 16 struct mod_arch_specific { 17 struct mod_plt_sec core; 18 struct mod_plt_sec init; 19 20 /* for CONFIG_DYNAMIC_FTRACE */ 21 struct plt_entry *ftrace_trampolines; 22 }; 23 24 u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, 25 void *loc, const Elf64_Rela *rela, 26 Elf64_Sym *sym); 27 28 u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs, 29 void *loc, u64 val); 30 31 struct plt_entry { 32 /* 33 * A program that conforms to the AArch64 Procedure Call Standard 34 * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or 35 * IP1 (x17) may be inserted at any branch instruction that is 36 * exposed to a relocation that supports long branches. Since that 37 * is exactly what we are dealing with here, we are free to use x16 38 * as a scratch register in the PLT veneers. 39 */ 40 __le32 adrp; /* adrp x16, .... */ 41 __le32 add; /* add x16, x16, #0x.... */ 42 __le32 br; /* br x16 */ 43 }; 44 45 static inline bool is_forbidden_offset_for_adrp(void *place) 46 { 47 return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) && 48 cpus_have_const_cap(ARM64_WORKAROUND_843419) && 49 ((u64)place & 0xfff) >= 0xff8; 50 } 51 52 struct plt_entry get_plt_entry(u64 dst, void *pc); 53 54 static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr, 55 const Elf_Shdr *sechdrs, 56 const char *name) 57 { 58 const Elf_Shdr *s, *se; 59 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 60 61 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { 62 if (strcmp(name, secstrs + s->sh_name) == 0) 63 return s; 64 } 65 66 return NULL; 67 } 68 69 #endif /* __ASM_MODULE_H */ 70