xref: /openbmc/linux/arch/powerpc/kernel/module_32.c (revision 2cf1c348)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*  Kernel module help for PPC.
3     Copyright (C) 2001 Rusty Russell.
4 
5 */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/module.h>
10 #include <linux/moduleloader.h>
11 #include <linux/elf.h>
12 #include <linux/vmalloc.h>
13 #include <linux/fs.h>
14 #include <linux/string.h>
15 #include <linux/kernel.h>
16 #include <linux/ftrace.h>
17 #include <linux/cache.h>
18 #include <linux/bug.h>
19 #include <linux/sort.h>
20 #include <asm/setup.h>
21 
22 /* Count how many different relocations (different symbol, different
23    addend) */
24 static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
25 {
26 	unsigned int i, r_info, r_addend, _count_relocs;
27 
28 	_count_relocs = 0;
29 	r_info = 0;
30 	r_addend = 0;
31 	for (i = 0; i < num; i++)
32 		/* Only count 24-bit relocs, others don't need stubs */
33 		if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
34 		    (r_info != ELF32_R_SYM(rela[i].r_info) ||
35 		     r_addend != rela[i].r_addend)) {
36 			_count_relocs++;
37 			r_info = ELF32_R_SYM(rela[i].r_info);
38 			r_addend = rela[i].r_addend;
39 		}
40 
41 #ifdef CONFIG_DYNAMIC_FTRACE
42 	_count_relocs++;	/* add one for ftrace_caller */
43 #endif
44 	return _count_relocs;
45 }
46 
47 static int relacmp(const void *_x, const void *_y)
48 {
49 	const Elf32_Rela *x, *y;
50 
51 	y = (Elf32_Rela *)_x;
52 	x = (Elf32_Rela *)_y;
53 
54 	/* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
55 	 * make the comparison cheaper/faster. It won't affect the sorting or
56 	 * the counting algorithms' performance
57 	 */
58 	if (x->r_info < y->r_info)
59 		return -1;
60 	else if (x->r_info > y->r_info)
61 		return 1;
62 	else if (x->r_addend < y->r_addend)
63 		return -1;
64 	else if (x->r_addend > y->r_addend)
65 		return 1;
66 	else
67 		return 0;
68 }
69 
70 /* Get the potential trampolines size required of the init and
71    non-init sections */
72 static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
73 				  const Elf32_Shdr *sechdrs,
74 				  const char *secstrings,
75 				  int is_init)
76 {
77 	unsigned long ret = 0;
78 	unsigned i;
79 
80 	/* Everything marked ALLOC (this includes the exported
81            symbols) */
82 	for (i = 1; i < hdr->e_shnum; i++) {
83 		/* If it's called *.init*, and we're not init, we're
84                    not interested */
85 		if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL)
86 		    != is_init)
87 			continue;
88 
89 		/* We don't want to look at debug sections. */
90 		if (strstr(secstrings + sechdrs[i].sh_name, ".debug"))
91 			continue;
92 
93 		if (sechdrs[i].sh_type == SHT_RELA) {
94 			pr_debug("Found relocations in section %u\n", i);
95 			pr_debug("Ptr: %p.  Number: %u\n",
96 			       (void *)hdr + sechdrs[i].sh_offset,
97 			       sechdrs[i].sh_size / sizeof(Elf32_Rela));
98 
99 			/* Sort the relocation information based on a symbol and
100 			 * addend key. This is a stable O(n*log n) complexity
101 			 * alogrithm but it will reduce the complexity of
102 			 * count_relocs() to linear complexity O(n)
103 			 */
104 			sort((void *)hdr + sechdrs[i].sh_offset,
105 			     sechdrs[i].sh_size / sizeof(Elf32_Rela),
106 			     sizeof(Elf32_Rela), relacmp, NULL);
107 
108 			ret += count_relocs((void *)hdr
109 					     + sechdrs[i].sh_offset,
110 					     sechdrs[i].sh_size
111 					     / sizeof(Elf32_Rela))
112 				* sizeof(struct ppc_plt_entry);
113 		}
114 	}
115 
116 	return ret;
117 }
118 
119 int module_frob_arch_sections(Elf32_Ehdr *hdr,
120 			      Elf32_Shdr *sechdrs,
121 			      char *secstrings,
122 			      struct module *me)
123 {
124 	unsigned int i;
125 
126 	/* Find .plt and .init.plt sections */
127 	for (i = 0; i < hdr->e_shnum; i++) {
128 		if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
129 			me->arch.init_plt_section = i;
130 		else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
131 			me->arch.core_plt_section = i;
132 	}
133 	if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
134 		pr_err("Module doesn't contain .plt or .init.plt sections.\n");
135 		return -ENOEXEC;
136 	}
137 
138 	/* Override their sizes */
139 	sechdrs[me->arch.core_plt_section].sh_size
140 		= get_plt_size(hdr, sechdrs, secstrings, 0);
141 	sechdrs[me->arch.init_plt_section].sh_size
142 		= get_plt_size(hdr, sechdrs, secstrings, 1);
143 	return 0;
144 }
145 
146 static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
147 {
148 	if (entry->jump[0] != PPC_RAW_LIS(_R12, PPC_HA(val)))
149 		return 0;
150 	if (entry->jump[1] != PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))
151 		return 0;
152 	return 1;
153 }
154 
155 /* Set up a trampoline in the PLT to bounce us to the distant function */
156 static uint32_t do_plt_call(void *location,
157 			    Elf32_Addr val,
158 			    const Elf32_Shdr *sechdrs,
159 			    struct module *mod)
160 {
161 	struct ppc_plt_entry *entry;
162 
163 	pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
164 	/* Init, or core PLT? */
165 	if (location >= mod->core_layout.base
166 	    && location < mod->core_layout.base + mod->core_layout.size)
167 		entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
168 	else
169 		entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
170 
171 	/* Find this entry, or if that fails, the next avail. entry */
172 	while (entry->jump[0]) {
173 		if (entry_matches(entry, val)) return (uint32_t)entry;
174 		entry++;
175 	}
176 
177 	entry->jump[0] = PPC_RAW_LIS(_R12, PPC_HA(val));
178 	entry->jump[1] = PPC_RAW_ADDI(_R12, _R12, PPC_LO(val));
179 	entry->jump[2] = PPC_RAW_MTCTR(_R12);
180 	entry->jump[3] = PPC_RAW_BCTR();
181 
182 	pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
183 	return (uint32_t)entry;
184 }
185 
186 int apply_relocate_add(Elf32_Shdr *sechdrs,
187 		       const char *strtab,
188 		       unsigned int symindex,
189 		       unsigned int relsec,
190 		       struct module *module)
191 {
192 	unsigned int i;
193 	Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
194 	Elf32_Sym *sym;
195 	uint32_t *location;
196 	uint32_t value;
197 
198 	pr_debug("Applying ADD relocate section %u to %u\n", relsec,
199 	       sechdrs[relsec].sh_info);
200 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
201 		/* This is where to make the change */
202 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
203 			+ rela[i].r_offset;
204 		/* This is the symbol it is referring to.  Note that all
205 		   undefined symbols have been resolved.  */
206 		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
207 			+ ELF32_R_SYM(rela[i].r_info);
208 		/* `Everything is relative'. */
209 		value = sym->st_value + rela[i].r_addend;
210 
211 		switch (ELF32_R_TYPE(rela[i].r_info)) {
212 		case R_PPC_ADDR32:
213 			/* Simply set it */
214 			*(uint32_t *)location = value;
215 			break;
216 
217 		case R_PPC_ADDR16_LO:
218 			/* Low half of the symbol */
219 			*(uint16_t *)location = value;
220 			break;
221 
222 		case R_PPC_ADDR16_HI:
223 			/* Higher half of the symbol */
224 			*(uint16_t *)location = (value >> 16);
225 			break;
226 
227 		case R_PPC_ADDR16_HA:
228 			/* Sign-adjusted lower 16 bits: PPC ELF ABI says:
229 			   (((x >> 16) + ((x & 0x8000) ? 1 : 0))) & 0xFFFF.
230 			   This is the same, only sane.
231 			 */
232 			*(uint16_t *)location = (value + 0x8000) >> 16;
233 			break;
234 
235 		case R_PPC_REL24:
236 			if ((int)(value - (uint32_t)location) < -0x02000000
237 			    || (int)(value - (uint32_t)location) >= 0x02000000)
238 				value = do_plt_call(location, value,
239 						    sechdrs, module);
240 
241 			/* Only replace bits 2 through 26 */
242 			pr_debug("REL24 value = %08X. location = %08X\n",
243 			       value, (uint32_t)location);
244 			pr_debug("Location before: %08X.\n",
245 			       *(uint32_t *)location);
246 			*(uint32_t *)location
247 				= (*(uint32_t *)location & ~0x03fffffc)
248 				| ((value - (uint32_t)location)
249 				   & 0x03fffffc);
250 			pr_debug("Location after: %08X.\n",
251 			       *(uint32_t *)location);
252 			pr_debug("ie. jump to %08X+%08X = %08X\n",
253 			       *(uint32_t *)location & 0x03fffffc,
254 			       (uint32_t)location,
255 			       (*(uint32_t *)location & 0x03fffffc)
256 			       + (uint32_t)location);
257 			break;
258 
259 		case R_PPC_REL32:
260 			/* 32-bit relative jump. */
261 			*(uint32_t *)location = value - (uint32_t)location;
262 			break;
263 
264 		default:
265 			pr_err("%s: unknown ADD relocation: %u\n",
266 			       module->name,
267 			       ELF32_R_TYPE(rela[i].r_info));
268 			return -ENOEXEC;
269 		}
270 	}
271 
272 	return 0;
273 }
274 
275 #ifdef CONFIG_DYNAMIC_FTRACE
276 int module_trampoline_target(struct module *mod, unsigned long addr,
277 			     unsigned long *target)
278 {
279 	unsigned int jmp[4];
280 
281 	/* Find where the trampoline jumps to */
282 	if (copy_from_kernel_nofault(jmp, (void *)addr, sizeof(jmp)))
283 		return -EFAULT;
284 
285 	/* verify that this is what we expect it to be */
286 	if ((jmp[0] & 0xffff0000) != PPC_RAW_LIS(_R12, 0) ||
287 	    (jmp[1] & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0) ||
288 	    jmp[2] != PPC_RAW_MTCTR(_R12) ||
289 	    jmp[3] != PPC_RAW_BCTR())
290 		return -EINVAL;
291 
292 	addr = (jmp[1] & 0xffff) | ((jmp[0] & 0xffff) << 16);
293 	if (addr & 0x8000)
294 		addr -= 0x10000;
295 
296 	*target = addr;
297 
298 	return 0;
299 }
300 
301 int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
302 {
303 	module->arch.tramp = do_plt_call(module->core_layout.base,
304 					 (unsigned long)ftrace_caller,
305 					 sechdrs, module);
306 	if (!module->arch.tramp)
307 		return -ENOENT;
308 
309 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
310 	module->arch.tramp_regs = do_plt_call(module->core_layout.base,
311 					      (unsigned long)ftrace_regs_caller,
312 					      sechdrs, module);
313 	if (!module->arch.tramp_regs)
314 		return -ENOENT;
315 #endif
316 
317 	return 0;
318 }
319 #endif
320