xref: /openbmc/linux/arch/arm64/kernel/module-plts.c (revision ca79acca)
1fd045f6cSArd Biesheuvel /*
224af6c4eSArd Biesheuvel  * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
3fd045f6cSArd Biesheuvel  *
4fd045f6cSArd Biesheuvel  * This program is free software; you can redistribute it and/or modify
5fd045f6cSArd Biesheuvel  * it under the terms of the GNU General Public License version 2 as
6fd045f6cSArd Biesheuvel  * published by the Free Software Foundation.
7fd045f6cSArd Biesheuvel  */
8fd045f6cSArd Biesheuvel 
9fd045f6cSArd Biesheuvel #include <linux/elf.h>
10fd045f6cSArd Biesheuvel #include <linux/kernel.h>
11fd045f6cSArd Biesheuvel #include <linux/module.h>
12fd045f6cSArd Biesheuvel #include <linux/sort.h>
13fd045f6cSArd Biesheuvel 
1424af6c4eSArd Biesheuvel static bool in_init(const struct module *mod, void *loc)
15fd045f6cSArd Biesheuvel {
1624af6c4eSArd Biesheuvel 	return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
17fd045f6cSArd Biesheuvel }
18fd045f6cSArd Biesheuvel 
1924af6c4eSArd Biesheuvel u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
2024af6c4eSArd Biesheuvel 			  Elf64_Sym *sym)
2124af6c4eSArd Biesheuvel {
2224af6c4eSArd Biesheuvel 	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
2324af6c4eSArd Biesheuvel 							  &mod->arch.init;
2424af6c4eSArd Biesheuvel 	struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
2524af6c4eSArd Biesheuvel 	int i = pltsec->plt_num_entries;
2624af6c4eSArd Biesheuvel 	u64 val = sym->st_value + rela->r_addend;
27fd045f6cSArd Biesheuvel 
287e8b9c1dSArd Biesheuvel 	plt[i] = get_plt_entry(val);
29fd045f6cSArd Biesheuvel 
3024af6c4eSArd Biesheuvel 	/*
3124af6c4eSArd Biesheuvel 	 * Check if the entry we just created is a duplicate. Given that the
3224af6c4eSArd Biesheuvel 	 * relocations are sorted, this will be the last entry we allocated.
3324af6c4eSArd Biesheuvel 	 * (if one exists).
3424af6c4eSArd Biesheuvel 	 */
357e8b9c1dSArd Biesheuvel 	if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
3624af6c4eSArd Biesheuvel 		return (u64)&plt[i - 1];
3724af6c4eSArd Biesheuvel 
3824af6c4eSArd Biesheuvel 	pltsec->plt_num_entries++;
395e8307b9SArd Biesheuvel 	if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
405e8307b9SArd Biesheuvel 		return 0;
41fd045f6cSArd Biesheuvel 
42fd045f6cSArd Biesheuvel 	return (u64)&plt[i];
43fd045f6cSArd Biesheuvel }
44fd045f6cSArd Biesheuvel 
45a257e025SArd Biesheuvel #ifdef CONFIG_ARM64_ERRATUM_843419
46a257e025SArd Biesheuvel u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val)
47a257e025SArd Biesheuvel {
48a257e025SArd Biesheuvel 	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
49a257e025SArd Biesheuvel 							  &mod->arch.init;
50a257e025SArd Biesheuvel 	struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
51a257e025SArd Biesheuvel 	int i = pltsec->plt_num_entries++;
52a257e025SArd Biesheuvel 	u32 mov0, mov1, mov2, br;
53a257e025SArd Biesheuvel 	int rd;
54a257e025SArd Biesheuvel 
55a257e025SArd Biesheuvel 	if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
56a257e025SArd Biesheuvel 		return 0;
57a257e025SArd Biesheuvel 
58a257e025SArd Biesheuvel 	/* get the destination register of the ADRP instruction */
59a257e025SArd Biesheuvel 	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
60a257e025SArd Biesheuvel 					  le32_to_cpup((__le32 *)loc));
61a257e025SArd Biesheuvel 
62a257e025SArd Biesheuvel 	/* generate the veneer instructions */
63a257e025SArd Biesheuvel 	mov0 = aarch64_insn_gen_movewide(rd, (u16)~val, 0,
64a257e025SArd Biesheuvel 					 AARCH64_INSN_VARIANT_64BIT,
65a257e025SArd Biesheuvel 					 AARCH64_INSN_MOVEWIDE_INVERSE);
66a257e025SArd Biesheuvel 	mov1 = aarch64_insn_gen_movewide(rd, (u16)(val >> 16), 16,
67a257e025SArd Biesheuvel 					 AARCH64_INSN_VARIANT_64BIT,
68a257e025SArd Biesheuvel 					 AARCH64_INSN_MOVEWIDE_KEEP);
69a257e025SArd Biesheuvel 	mov2 = aarch64_insn_gen_movewide(rd, (u16)(val >> 32), 32,
70a257e025SArd Biesheuvel 					 AARCH64_INSN_VARIANT_64BIT,
71a257e025SArd Biesheuvel 					 AARCH64_INSN_MOVEWIDE_KEEP);
72a257e025SArd Biesheuvel 	br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
73a257e025SArd Biesheuvel 					 AARCH64_INSN_BRANCH_NOLINK);
74a257e025SArd Biesheuvel 
75a257e025SArd Biesheuvel 	plt[i] = (struct plt_entry){
76a257e025SArd Biesheuvel 			cpu_to_le32(mov0),
77a257e025SArd Biesheuvel 			cpu_to_le32(mov1),
78a257e025SArd Biesheuvel 			cpu_to_le32(mov2),
79a257e025SArd Biesheuvel 			cpu_to_le32(br)
80a257e025SArd Biesheuvel 		};
81a257e025SArd Biesheuvel 
82a257e025SArd Biesheuvel 	return (u64)&plt[i];
83a257e025SArd Biesheuvel }
84a257e025SArd Biesheuvel #endif
85a257e025SArd Biesheuvel 
86fd045f6cSArd Biesheuvel #define cmp_3way(a,b)	((a) < (b) ? -1 : (a) > (b))
87fd045f6cSArd Biesheuvel 
88fd045f6cSArd Biesheuvel static int cmp_rela(const void *a, const void *b)
89fd045f6cSArd Biesheuvel {
90fd045f6cSArd Biesheuvel 	const Elf64_Rela *x = a, *y = b;
91fd045f6cSArd Biesheuvel 	int i;
92fd045f6cSArd Biesheuvel 
93fd045f6cSArd Biesheuvel 	/* sort by type, symbol index and addend */
94fd045f6cSArd Biesheuvel 	i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
95fd045f6cSArd Biesheuvel 	if (i == 0)
96fd045f6cSArd Biesheuvel 		i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
97fd045f6cSArd Biesheuvel 	if (i == 0)
98fd045f6cSArd Biesheuvel 		i = cmp_3way(x->r_addend, y->r_addend);
99fd045f6cSArd Biesheuvel 	return i;
100fd045f6cSArd Biesheuvel }
101fd045f6cSArd Biesheuvel 
102fd045f6cSArd Biesheuvel static bool duplicate_rel(const Elf64_Rela *rela, int num)
103fd045f6cSArd Biesheuvel {
104fd045f6cSArd Biesheuvel 	/*
105fd045f6cSArd Biesheuvel 	 * Entries are sorted by type, symbol index and addend. That means
106fd045f6cSArd Biesheuvel 	 * that, if a duplicate entry exists, it must be in the preceding
107fd045f6cSArd Biesheuvel 	 * slot.
108fd045f6cSArd Biesheuvel 	 */
109fd045f6cSArd Biesheuvel 	return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
110fd045f6cSArd Biesheuvel }
111fd045f6cSArd Biesheuvel 
11224af6c4eSArd Biesheuvel static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
113a257e025SArd Biesheuvel 			       Elf64_Word dstidx, Elf_Shdr *dstsec)
114fd045f6cSArd Biesheuvel {
115fd045f6cSArd Biesheuvel 	unsigned int ret = 0;
116fd045f6cSArd Biesheuvel 	Elf64_Sym *s;
117fd045f6cSArd Biesheuvel 	int i;
118fd045f6cSArd Biesheuvel 
119fd045f6cSArd Biesheuvel 	for (i = 0; i < num; i++) {
120a257e025SArd Biesheuvel 		u64 min_align;
121a257e025SArd Biesheuvel 
122fd045f6cSArd Biesheuvel 		switch (ELF64_R_TYPE(rela[i].r_info)) {
123fd045f6cSArd Biesheuvel 		case R_AARCH64_JUMP26:
124fd045f6cSArd Biesheuvel 		case R_AARCH64_CALL26:
125a257e025SArd Biesheuvel 			if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
126a257e025SArd Biesheuvel 				break;
127a257e025SArd Biesheuvel 
128fd045f6cSArd Biesheuvel 			/*
129fd045f6cSArd Biesheuvel 			 * We only have to consider branch targets that resolve
13024af6c4eSArd Biesheuvel 			 * to symbols that are defined in a different section.
13124af6c4eSArd Biesheuvel 			 * This is not simply a heuristic, it is a fundamental
13224af6c4eSArd Biesheuvel 			 * limitation, since there is no guaranteed way to emit
13324af6c4eSArd Biesheuvel 			 * PLT entries sufficiently close to the branch if the
13424af6c4eSArd Biesheuvel 			 * section size exceeds the range of a branch
13524af6c4eSArd Biesheuvel 			 * instruction. So ignore relocations against defined
13624af6c4eSArd Biesheuvel 			 * symbols if they live in the same section as the
13724af6c4eSArd Biesheuvel 			 * relocation target.
138fd045f6cSArd Biesheuvel 			 */
139fd045f6cSArd Biesheuvel 			s = syms + ELF64_R_SYM(rela[i].r_info);
14024af6c4eSArd Biesheuvel 			if (s->st_shndx == dstidx)
141fd045f6cSArd Biesheuvel 				break;
142fd045f6cSArd Biesheuvel 
143fd045f6cSArd Biesheuvel 			/*
144fd045f6cSArd Biesheuvel 			 * Jump relocations with non-zero addends against
145fd045f6cSArd Biesheuvel 			 * undefined symbols are supported by the ELF spec, but
146fd045f6cSArd Biesheuvel 			 * do not occur in practice (e.g., 'jump n bytes past
147fd045f6cSArd Biesheuvel 			 * the entry point of undefined function symbol f').
148fd045f6cSArd Biesheuvel 			 * So we need to support them, but there is no need to
149fd045f6cSArd Biesheuvel 			 * take them into consideration when trying to optimize
150fd045f6cSArd Biesheuvel 			 * this code. So let's only check for duplicates when
151fd045f6cSArd Biesheuvel 			 * the addend is zero: this allows us to record the PLT
152fd045f6cSArd Biesheuvel 			 * entry address in the symbol table itself, rather than
153fd045f6cSArd Biesheuvel 			 * having to search the list for duplicates each time we
154fd045f6cSArd Biesheuvel 			 * emit one.
155fd045f6cSArd Biesheuvel 			 */
156fd045f6cSArd Biesheuvel 			if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
157fd045f6cSArd Biesheuvel 				ret++;
158fd045f6cSArd Biesheuvel 			break;
159a257e025SArd Biesheuvel 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
160a257e025SArd Biesheuvel 		case R_AARCH64_ADR_PREL_PG_HI21:
161ca79accaSArd Biesheuvel 			if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
162ca79accaSArd Biesheuvel 			    !cpus_have_const_cap(ARM64_WORKAROUND_843419))
163a257e025SArd Biesheuvel 				break;
164a257e025SArd Biesheuvel 
165a257e025SArd Biesheuvel 			/*
166a257e025SArd Biesheuvel 			 * Determine the minimal safe alignment for this ADRP
167a257e025SArd Biesheuvel 			 * instruction: the section alignment at which it is
168a257e025SArd Biesheuvel 			 * guaranteed not to appear at a vulnerable offset.
169a257e025SArd Biesheuvel 			 *
170a257e025SArd Biesheuvel 			 * This comes down to finding the least significant zero
171a257e025SArd Biesheuvel 			 * bit in bits [11:3] of the section offset, and
172a257e025SArd Biesheuvel 			 * increasing the section's alignment so that the
173a257e025SArd Biesheuvel 			 * resulting address of this instruction is guaranteed
174a257e025SArd Biesheuvel 			 * to equal the offset in that particular bit (as well
175a257e025SArd Biesheuvel 			 * as all less signficant bits). This ensures that the
176a257e025SArd Biesheuvel 			 * address modulo 4 KB != 0xfff8 or 0xfffc (which would
177a257e025SArd Biesheuvel 			 * have all ones in bits [11:3])
178a257e025SArd Biesheuvel 			 */
179a257e025SArd Biesheuvel 			min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
180a257e025SArd Biesheuvel 
181a257e025SArd Biesheuvel 			/*
182a257e025SArd Biesheuvel 			 * Allocate veneer space for each ADRP that may appear
183a257e025SArd Biesheuvel 			 * at a vulnerable offset nonetheless. At relocation
184a257e025SArd Biesheuvel 			 * time, some of these will remain unused since some
185a257e025SArd Biesheuvel 			 * ADRP instructions can be patched to ADR instructions
186a257e025SArd Biesheuvel 			 * instead.
187a257e025SArd Biesheuvel 			 */
188a257e025SArd Biesheuvel 			if (min_align > SZ_4K)
189a257e025SArd Biesheuvel 				ret++;
190a257e025SArd Biesheuvel 			else
191a257e025SArd Biesheuvel 				dstsec->sh_addralign = max(dstsec->sh_addralign,
192a257e025SArd Biesheuvel 							   min_align);
193a257e025SArd Biesheuvel 			break;
194fd045f6cSArd Biesheuvel 		}
195fd045f6cSArd Biesheuvel 	}
196fd045f6cSArd Biesheuvel 	return ret;
197fd045f6cSArd Biesheuvel }
198fd045f6cSArd Biesheuvel 
199fd045f6cSArd Biesheuvel int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
200fd045f6cSArd Biesheuvel 			      char *secstrings, struct module *mod)
201fd045f6cSArd Biesheuvel {
20224af6c4eSArd Biesheuvel 	unsigned long core_plts = 0;
20324af6c4eSArd Biesheuvel 	unsigned long init_plts = 0;
204fd045f6cSArd Biesheuvel 	Elf64_Sym *syms = NULL;
205be0f272bSArd Biesheuvel 	Elf_Shdr *tramp = NULL;
206fd045f6cSArd Biesheuvel 	int i;
207fd045f6cSArd Biesheuvel 
208fd045f6cSArd Biesheuvel 	/*
209fd045f6cSArd Biesheuvel 	 * Find the empty .plt section so we can expand it to store the PLT
210fd045f6cSArd Biesheuvel 	 * entries. Record the symtab address as well.
211fd045f6cSArd Biesheuvel 	 */
212fd045f6cSArd Biesheuvel 	for (i = 0; i < ehdr->e_shnum; i++) {
21324af6c4eSArd Biesheuvel 		if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
21424af6c4eSArd Biesheuvel 			mod->arch.core.plt = sechdrs + i;
21524af6c4eSArd Biesheuvel 		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
21624af6c4eSArd Biesheuvel 			mod->arch.init.plt = sechdrs + i;
217be0f272bSArd Biesheuvel 		else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
218be0f272bSArd Biesheuvel 			 !strcmp(secstrings + sechdrs[i].sh_name,
219be0f272bSArd Biesheuvel 				 ".text.ftrace_trampoline"))
220be0f272bSArd Biesheuvel 			tramp = sechdrs + i;
221fd045f6cSArd Biesheuvel 		else if (sechdrs[i].sh_type == SHT_SYMTAB)
222fd045f6cSArd Biesheuvel 			syms = (Elf64_Sym *)sechdrs[i].sh_addr;
223fd045f6cSArd Biesheuvel 	}
224fd045f6cSArd Biesheuvel 
22524af6c4eSArd Biesheuvel 	if (!mod->arch.core.plt || !mod->arch.init.plt) {
22624af6c4eSArd Biesheuvel 		pr_err("%s: module PLT section(s) missing\n", mod->name);
227fd045f6cSArd Biesheuvel 		return -ENOEXEC;
228fd045f6cSArd Biesheuvel 	}
229fd045f6cSArd Biesheuvel 	if (!syms) {
230fd045f6cSArd Biesheuvel 		pr_err("%s: module symtab section missing\n", mod->name);
231fd045f6cSArd Biesheuvel 		return -ENOEXEC;
232fd045f6cSArd Biesheuvel 	}
233fd045f6cSArd Biesheuvel 
234fd045f6cSArd Biesheuvel 	for (i = 0; i < ehdr->e_shnum; i++) {
235fd045f6cSArd Biesheuvel 		Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
236fd045f6cSArd Biesheuvel 		int numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
237fd045f6cSArd Biesheuvel 		Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
238fd045f6cSArd Biesheuvel 
239fd045f6cSArd Biesheuvel 		if (sechdrs[i].sh_type != SHT_RELA)
240fd045f6cSArd Biesheuvel 			continue;
241fd045f6cSArd Biesheuvel 
242fd045f6cSArd Biesheuvel 		/* ignore relocations that operate on non-exec sections */
243fd045f6cSArd Biesheuvel 		if (!(dstsec->sh_flags & SHF_EXECINSTR))
244fd045f6cSArd Biesheuvel 			continue;
245fd045f6cSArd Biesheuvel 
246fd045f6cSArd Biesheuvel 		/* sort by type, symbol index and addend */
247fd045f6cSArd Biesheuvel 		sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
248fd045f6cSArd Biesheuvel 
24924af6c4eSArd Biesheuvel 		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
25024af6c4eSArd Biesheuvel 			core_plts += count_plts(syms, rels, numrels,
251a257e025SArd Biesheuvel 						sechdrs[i].sh_info, dstsec);
25224af6c4eSArd Biesheuvel 		else
25324af6c4eSArd Biesheuvel 			init_plts += count_plts(syms, rels, numrels,
254a257e025SArd Biesheuvel 						sechdrs[i].sh_info, dstsec);
255fd045f6cSArd Biesheuvel 	}
256fd045f6cSArd Biesheuvel 
25724af6c4eSArd Biesheuvel 	mod->arch.core.plt->sh_type = SHT_NOBITS;
25824af6c4eSArd Biesheuvel 	mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
25924af6c4eSArd Biesheuvel 	mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
26024af6c4eSArd Biesheuvel 	mod->arch.core.plt->sh_size = (core_plts  + 1) * sizeof(struct plt_entry);
26124af6c4eSArd Biesheuvel 	mod->arch.core.plt_num_entries = 0;
26224af6c4eSArd Biesheuvel 	mod->arch.core.plt_max_entries = core_plts;
26324af6c4eSArd Biesheuvel 
26424af6c4eSArd Biesheuvel 	mod->arch.init.plt->sh_type = SHT_NOBITS;
26524af6c4eSArd Biesheuvel 	mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
26624af6c4eSArd Biesheuvel 	mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
26724af6c4eSArd Biesheuvel 	mod->arch.init.plt->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
26824af6c4eSArd Biesheuvel 	mod->arch.init.plt_num_entries = 0;
26924af6c4eSArd Biesheuvel 	mod->arch.init.plt_max_entries = init_plts;
27024af6c4eSArd Biesheuvel 
271be0f272bSArd Biesheuvel 	if (tramp) {
272be0f272bSArd Biesheuvel 		tramp->sh_type = SHT_NOBITS;
273be0f272bSArd Biesheuvel 		tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
274be0f272bSArd Biesheuvel 		tramp->sh_addralign = __alignof__(struct plt_entry);
275be0f272bSArd Biesheuvel 		tramp->sh_size = sizeof(struct plt_entry);
276be0f272bSArd Biesheuvel 	}
277be0f272bSArd Biesheuvel 
278fd045f6cSArd Biesheuvel 	return 0;
279fd045f6cSArd Biesheuvel }
280