xref: /openbmc/linux/arch/arm64/include/asm/module.h (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2257cb251SWill Deacon /*
3257cb251SWill Deacon  * Copyright (C) 2012 ARM Ltd.
4257cb251SWill Deacon  */
5257cb251SWill Deacon #ifndef __ASM_MODULE_H
6257cb251SWill Deacon #define __ASM_MODULE_H
7257cb251SWill Deacon 
8257cb251SWill Deacon #include <asm-generic/module.h>
9257cb251SWill Deacon 
1024af6c4eSArd Biesheuvel struct mod_plt_sec {
11c8ebf64eSJessica Yu 	int			plt_shndx;
12fd045f6cSArd Biesheuvel 	int			plt_num_entries;
13fd045f6cSArd Biesheuvel 	int			plt_max_entries;
14fd045f6cSArd Biesheuvel };
1524af6c4eSArd Biesheuvel 
1624af6c4eSArd Biesheuvel struct mod_arch_specific {
1724af6c4eSArd Biesheuvel 	struct mod_plt_sec	core;
1824af6c4eSArd Biesheuvel 	struct mod_plt_sec	init;
19e71a4e1bSArd Biesheuvel 
20e71a4e1bSArd Biesheuvel 	/* for CONFIG_DYNAMIC_FTRACE */
213b23e499STorsten Duwe 	struct plt_entry	*ftrace_trampolines;
2224af6c4eSArd Biesheuvel };
23fd045f6cSArd Biesheuvel 
24c8ebf64eSJessica Yu u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
25c8ebf64eSJessica Yu 			  void *loc, const Elf64_Rela *rela,
26fd045f6cSArd Biesheuvel 			  Elf64_Sym *sym);
27fd045f6cSArd Biesheuvel 
28c8ebf64eSJessica Yu u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
29c8ebf64eSJessica Yu 				void *loc, u64 val);
30a257e025SArd Biesheuvel 
317e8b9c1dSArd Biesheuvel struct plt_entry {
327e8b9c1dSArd Biesheuvel 	/*
337e8b9c1dSArd Biesheuvel 	 * A program that conforms to the AArch64 Procedure Call Standard
347e8b9c1dSArd Biesheuvel 	 * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
357e8b9c1dSArd Biesheuvel 	 * IP1 (x17) may be inserted at any branch instruction that is
367e8b9c1dSArd Biesheuvel 	 * exposed to a relocation that supports long branches. Since that
377e8b9c1dSArd Biesheuvel 	 * is exactly what we are dealing with here, we are free to use x16
387e8b9c1dSArd Biesheuvel 	 * as a scratch register in the PLT veneers.
397e8b9c1dSArd Biesheuvel 	 */
40bdb85cd1SArd Biesheuvel 	__le32	adrp;	/* adrp	x16, ....			*/
41bdb85cd1SArd Biesheuvel 	__le32	add;	/* add	x16, x16, #0x....		*/
427e8b9c1dSArd Biesheuvel 	__le32	br;	/* br	x16				*/
437e8b9c1dSArd Biesheuvel };
447e8b9c1dSArd Biesheuvel 
is_forbidden_offset_for_adrp(void * place)45bdb85cd1SArd Biesheuvel static inline bool is_forbidden_offset_for_adrp(void *place)
467e8b9c1dSArd Biesheuvel {
47bdb85cd1SArd Biesheuvel 	return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
48bdb85cd1SArd Biesheuvel 	       cpus_have_const_cap(ARM64_WORKAROUND_843419) &&
49bdb85cd1SArd Biesheuvel 	       ((u64)place & 0xfff) >= 0xff8;
507e8b9c1dSArd Biesheuvel }
517e8b9c1dSArd Biesheuvel 
52bdb85cd1SArd Biesheuvel struct plt_entry get_plt_entry(u64 dst, void *pc);
537e8b9c1dSArd Biesheuvel 
find_section(const Elf_Ehdr * hdr,const Elf_Shdr * sechdrs,const char * name)54*b3adc384SJoey Gouly static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
55*b3adc384SJoey Gouly 				    const Elf_Shdr *sechdrs,
56*b3adc384SJoey Gouly 				    const char *name)
575a3ae7b3SArd Biesheuvel {
58*b3adc384SJoey Gouly 	const Elf_Shdr *s, *se;
59*b3adc384SJoey Gouly 	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
60*b3adc384SJoey Gouly 
61*b3adc384SJoey Gouly 	for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
62*b3adc384SJoey Gouly 		if (strcmp(name, secstrs + s->sh_name) == 0)
63*b3adc384SJoey Gouly 			return s;
64*b3adc384SJoey Gouly 	}
65*b3adc384SJoey Gouly 
66*b3adc384SJoey Gouly 	return NULL;
675a3ae7b3SArd Biesheuvel }
685a3ae7b3SArd Biesheuvel 
69257cb251SWill Deacon #endif /* __ASM_MODULE_H */
70