xref: /openbmc/linux/arch/arm64/kernel/module-plts.c (revision ed231ae3)
1 /*
2  * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/elf.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/sort.h>
13 
14 static bool in_init(const struct module *mod, void *loc)
15 {
16 	return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
17 }
18 
19 u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
20 			  Elf64_Sym *sym)
21 {
22 	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
23 							  &mod->arch.init;
24 	struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
25 	int i = pltsec->plt_num_entries;
26 	u64 val = sym->st_value + rela->r_addend;
27 
28 	plt[i] = get_plt_entry(val);
29 
30 	/*
31 	 * Check if the entry we just created is a duplicate. Given that the
32 	 * relocations are sorted, this will be the last entry we allocated.
33 	 * (if one exists).
34 	 */
35 	if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
36 		return (u64)&plt[i - 1];
37 
38 	pltsec->plt_num_entries++;
39 	if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
40 		return 0;
41 
42 	return (u64)&plt[i];
43 }
44 
45 #ifdef CONFIG_ARM64_ERRATUM_843419
46 u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val)
47 {
48 	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
49 							  &mod->arch.init;
50 	struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
51 	int i = pltsec->plt_num_entries++;
52 	u32 mov0, mov1, mov2, br;
53 	int rd;
54 
55 	if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
56 		return 0;
57 
58 	/* get the destination register of the ADRP instruction */
59 	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
60 					  le32_to_cpup((__le32 *)loc));
61 
62 	/* generate the veneer instructions */
63 	mov0 = aarch64_insn_gen_movewide(rd, (u16)~val, 0,
64 					 AARCH64_INSN_VARIANT_64BIT,
65 					 AARCH64_INSN_MOVEWIDE_INVERSE);
66 	mov1 = aarch64_insn_gen_movewide(rd, (u16)(val >> 16), 16,
67 					 AARCH64_INSN_VARIANT_64BIT,
68 					 AARCH64_INSN_MOVEWIDE_KEEP);
69 	mov2 = aarch64_insn_gen_movewide(rd, (u16)(val >> 32), 32,
70 					 AARCH64_INSN_VARIANT_64BIT,
71 					 AARCH64_INSN_MOVEWIDE_KEEP);
72 	br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
73 					 AARCH64_INSN_BRANCH_NOLINK);
74 
75 	plt[i] = (struct plt_entry){
76 			cpu_to_le32(mov0),
77 			cpu_to_le32(mov1),
78 			cpu_to_le32(mov2),
79 			cpu_to_le32(br)
80 		};
81 
82 	return (u64)&plt[i];
83 }
84 #endif
85 
86 #define cmp_3way(a,b)	((a) < (b) ? -1 : (a) > (b))
87 
88 static int cmp_rela(const void *a, const void *b)
89 {
90 	const Elf64_Rela *x = a, *y = b;
91 	int i;
92 
93 	/* sort by type, symbol index and addend */
94 	i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
95 	if (i == 0)
96 		i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
97 	if (i == 0)
98 		i = cmp_3way(x->r_addend, y->r_addend);
99 	return i;
100 }
101 
102 static bool duplicate_rel(const Elf64_Rela *rela, int num)
103 {
104 	/*
105 	 * Entries are sorted by type, symbol index and addend. That means
106 	 * that, if a duplicate entry exists, it must be in the preceding
107 	 * slot.
108 	 */
109 	return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
110 }
111 
112 static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
113 			       Elf64_Word dstidx, Elf_Shdr *dstsec)
114 {
115 	unsigned int ret = 0;
116 	Elf64_Sym *s;
117 	int i;
118 
119 	for (i = 0; i < num; i++) {
120 		u64 min_align;
121 
122 		switch (ELF64_R_TYPE(rela[i].r_info)) {
123 		case R_AARCH64_JUMP26:
124 		case R_AARCH64_CALL26:
125 			if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
126 				break;
127 
128 			/*
129 			 * We only have to consider branch targets that resolve
130 			 * to symbols that are defined in a different section.
131 			 * This is not simply a heuristic, it is a fundamental
132 			 * limitation, since there is no guaranteed way to emit
133 			 * PLT entries sufficiently close to the branch if the
134 			 * section size exceeds the range of a branch
135 			 * instruction. So ignore relocations against defined
136 			 * symbols if they live in the same section as the
137 			 * relocation target.
138 			 */
139 			s = syms + ELF64_R_SYM(rela[i].r_info);
140 			if (s->st_shndx == dstidx)
141 				break;
142 
143 			/*
144 			 * Jump relocations with non-zero addends against
145 			 * undefined symbols are supported by the ELF spec, but
146 			 * do not occur in practice (e.g., 'jump n bytes past
147 			 * the entry point of undefined function symbol f').
148 			 * So we need to support them, but there is no need to
149 			 * take them into consideration when trying to optimize
150 			 * this code. So let's only check for duplicates when
151 			 * the addend is zero: this allows us to record the PLT
152 			 * entry address in the symbol table itself, rather than
153 			 * having to search the list for duplicates each time we
154 			 * emit one.
155 			 */
156 			if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
157 				ret++;
158 			break;
159 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
160 		case R_AARCH64_ADR_PREL_PG_HI21:
161 			if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
162 			    !cpus_have_const_cap(ARM64_WORKAROUND_843419))
163 				break;
164 
165 			/*
166 			 * Determine the minimal safe alignment for this ADRP
167 			 * instruction: the section alignment at which it is
168 			 * guaranteed not to appear at a vulnerable offset.
169 			 *
170 			 * This comes down to finding the least significant zero
171 			 * bit in bits [11:3] of the section offset, and
172 			 * increasing the section's alignment so that the
173 			 * resulting address of this instruction is guaranteed
174 			 * to equal the offset in that particular bit (as well
175 			 * as all less signficant bits). This ensures that the
176 			 * address modulo 4 KB != 0xfff8 or 0xfffc (which would
177 			 * have all ones in bits [11:3])
178 			 */
179 			min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
180 
181 			/*
182 			 * Allocate veneer space for each ADRP that may appear
183 			 * at a vulnerable offset nonetheless. At relocation
184 			 * time, some of these will remain unused since some
185 			 * ADRP instructions can be patched to ADR instructions
186 			 * instead.
187 			 */
188 			if (min_align > SZ_4K)
189 				ret++;
190 			else
191 				dstsec->sh_addralign = max(dstsec->sh_addralign,
192 							   min_align);
193 			break;
194 		}
195 	}
196 	return ret;
197 }
198 
199 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
200 			      char *secstrings, struct module *mod)
201 {
202 	unsigned long core_plts = 0;
203 	unsigned long init_plts = 0;
204 	Elf64_Sym *syms = NULL;
205 	Elf_Shdr *tramp = NULL;
206 	int i;
207 
208 	/*
209 	 * Find the empty .plt section so we can expand it to store the PLT
210 	 * entries. Record the symtab address as well.
211 	 */
212 	for (i = 0; i < ehdr->e_shnum; i++) {
213 		if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
214 			mod->arch.core.plt = sechdrs + i;
215 		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
216 			mod->arch.init.plt = sechdrs + i;
217 		else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
218 			 !strcmp(secstrings + sechdrs[i].sh_name,
219 				 ".text.ftrace_trampoline"))
220 			tramp = sechdrs + i;
221 		else if (sechdrs[i].sh_type == SHT_SYMTAB)
222 			syms = (Elf64_Sym *)sechdrs[i].sh_addr;
223 	}
224 
225 	if (!mod->arch.core.plt || !mod->arch.init.plt) {
226 		pr_err("%s: module PLT section(s) missing\n", mod->name);
227 		return -ENOEXEC;
228 	}
229 	if (!syms) {
230 		pr_err("%s: module symtab section missing\n", mod->name);
231 		return -ENOEXEC;
232 	}
233 
234 	for (i = 0; i < ehdr->e_shnum; i++) {
235 		Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
236 		int numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
237 		Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
238 
239 		if (sechdrs[i].sh_type != SHT_RELA)
240 			continue;
241 
242 		/* ignore relocations that operate on non-exec sections */
243 		if (!(dstsec->sh_flags & SHF_EXECINSTR))
244 			continue;
245 
246 		/* sort by type, symbol index and addend */
247 		sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
248 
249 		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
250 			core_plts += count_plts(syms, rels, numrels,
251 						sechdrs[i].sh_info, dstsec);
252 		else
253 			init_plts += count_plts(syms, rels, numrels,
254 						sechdrs[i].sh_info, dstsec);
255 	}
256 
257 	mod->arch.core.plt->sh_type = SHT_NOBITS;
258 	mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
259 	mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
260 	mod->arch.core.plt->sh_size = (core_plts  + 1) * sizeof(struct plt_entry);
261 	mod->arch.core.plt_num_entries = 0;
262 	mod->arch.core.plt_max_entries = core_plts;
263 
264 	mod->arch.init.plt->sh_type = SHT_NOBITS;
265 	mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
266 	mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
267 	mod->arch.init.plt->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
268 	mod->arch.init.plt_num_entries = 0;
269 	mod->arch.init.plt_max_entries = init_plts;
270 
271 	if (tramp) {
272 		tramp->sh_type = SHT_NOBITS;
273 		tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
274 		tramp->sh_addralign = __alignof__(struct plt_entry);
275 		tramp->sh_size = sizeof(struct plt_entry);
276 	}
277 
278 	return 0;
279 }
280