xref: /openbmc/linux/arch/x86/kernel/module.c (revision 9221b289)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*  Kernel module help for x86.
3     Copyright (C) 2001 Rusty Russell.
4 
5 */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/moduleloader.h>
10 #include <linux/elf.h>
11 #include <linux/vmalloc.h>
12 #include <linux/fs.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/kasan.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/gfp.h>
19 #include <linux/jump_label.h>
20 #include <linux/random.h>
21 #include <linux/memory.h>
22 
23 #include <asm/text-patching.h>
24 #include <asm/page.h>
25 #include <asm/setup.h>
26 #include <asm/unwind.h>
27 
28 #if 0
29 #define DEBUGP(fmt, ...)				\
30 	printk(KERN_DEBUG fmt, ##__VA_ARGS__)
31 #else
32 #define DEBUGP(fmt, ...)				\
33 do {							\
34 	if (0)						\
35 		printk(KERN_DEBUG fmt, ##__VA_ARGS__);	\
36 } while (0)
37 #endif
38 
39 #ifdef CONFIG_RANDOMIZE_BASE
40 static unsigned long module_load_offset;
41 
42 /* Mutex protects the module_load_offset. */
43 static DEFINE_MUTEX(module_kaslr_mutex);
44 
45 static unsigned long int get_module_load_offset(void)
46 {
47 	if (kaslr_enabled()) {
48 		mutex_lock(&module_kaslr_mutex);
49 		/*
50 		 * Calculate the module_load_offset the first time this
51 		 * code is called. Once calculated it stays the same until
52 		 * reboot.
53 		 */
54 		if (module_load_offset == 0)
55 			module_load_offset =
56 				(get_random_int() % 1024 + 1) * PAGE_SIZE;
57 		mutex_unlock(&module_kaslr_mutex);
58 	}
59 	return module_load_offset;
60 }
61 #else
62 static unsigned long int get_module_load_offset(void)
63 {
64 	return 0;
65 }
66 #endif
67 
68 void *module_alloc(unsigned long size)
69 {
70 	gfp_t gfp_mask = GFP_KERNEL;
71 	void *p;
72 
73 	if (PAGE_ALIGN(size) > MODULES_LEN)
74 		return NULL;
75 
76 	p = __vmalloc_node_range(size, MODULE_ALIGN,
77 				    MODULES_VADDR + get_module_load_offset(),
78 				    MODULES_END, gfp_mask,
79 				    PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
80 				    __builtin_return_address(0));
81 	if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
82 		vfree(p);
83 		return NULL;
84 	}
85 
86 	return p;
87 }
88 
89 #ifdef CONFIG_X86_32
90 int apply_relocate(Elf32_Shdr *sechdrs,
91 		   const char *strtab,
92 		   unsigned int symindex,
93 		   unsigned int relsec,
94 		   struct module *me)
95 {
96 	unsigned int i;
97 	Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
98 	Elf32_Sym *sym;
99 	uint32_t *location;
100 
101 	DEBUGP("Applying relocate section %u to %u\n",
102 	       relsec, sechdrs[relsec].sh_info);
103 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
104 		/* This is where to make the change */
105 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
106 			+ rel[i].r_offset;
107 		/* This is the symbol it is referring to.  Note that all
108 		   undefined symbols have been resolved.  */
109 		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
110 			+ ELF32_R_SYM(rel[i].r_info);
111 
112 		switch (ELF32_R_TYPE(rel[i].r_info)) {
113 		case R_386_32:
114 			/* We add the value into the location given */
115 			*location += sym->st_value;
116 			break;
117 		case R_386_PC32:
118 		case R_386_PLT32:
119 			/* Add the value, subtract its position */
120 			*location += sym->st_value - (uint32_t)location;
121 			break;
122 		default:
123 			pr_err("%s: Unknown relocation: %u\n",
124 			       me->name, ELF32_R_TYPE(rel[i].r_info));
125 			return -ENOEXEC;
126 		}
127 	}
128 	return 0;
129 }
130 #else /*X86_64*/
131 static int __apply_relocate_add(Elf64_Shdr *sechdrs,
132 		   const char *strtab,
133 		   unsigned int symindex,
134 		   unsigned int relsec,
135 		   struct module *me,
136 		   void *(*write)(void *dest, const void *src, size_t len))
137 {
138 	unsigned int i;
139 	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
140 	Elf64_Sym *sym;
141 	void *loc;
142 	u64 val;
143 
144 	DEBUGP("Applying relocate section %u to %u\n",
145 	       relsec, sechdrs[relsec].sh_info);
146 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
147 		/* This is where to make the change */
148 		loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
149 			+ rel[i].r_offset;
150 
151 		/* This is the symbol it is referring to.  Note that all
152 		   undefined symbols have been resolved.  */
153 		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
154 			+ ELF64_R_SYM(rel[i].r_info);
155 
156 		DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
157 		       (int)ELF64_R_TYPE(rel[i].r_info),
158 		       sym->st_value, rel[i].r_addend, (u64)loc);
159 
160 		val = sym->st_value + rel[i].r_addend;
161 
162 		switch (ELF64_R_TYPE(rel[i].r_info)) {
163 		case R_X86_64_NONE:
164 			break;
165 		case R_X86_64_64:
166 			if (*(u64 *)loc != 0)
167 				goto invalid_relocation;
168 			write(loc, &val, 8);
169 			break;
170 		case R_X86_64_32:
171 			if (*(u32 *)loc != 0)
172 				goto invalid_relocation;
173 			write(loc, &val, 4);
174 			if (val != *(u32 *)loc)
175 				goto overflow;
176 			break;
177 		case R_X86_64_32S:
178 			if (*(s32 *)loc != 0)
179 				goto invalid_relocation;
180 			write(loc, &val, 4);
181 			if ((s64)val != *(s32 *)loc)
182 				goto overflow;
183 			break;
184 		case R_X86_64_PC32:
185 		case R_X86_64_PLT32:
186 			if (*(u32 *)loc != 0)
187 				goto invalid_relocation;
188 			val -= (u64)loc;
189 			write(loc, &val, 4);
190 #if 0
191 			if ((s64)val != *(s32 *)loc)
192 				goto overflow;
193 #endif
194 			break;
195 		case R_X86_64_PC64:
196 			if (*(u64 *)loc != 0)
197 				goto invalid_relocation;
198 			val -= (u64)loc;
199 			write(loc, &val, 8);
200 			break;
201 		default:
202 			pr_err("%s: Unknown rela relocation: %llu\n",
203 			       me->name, ELF64_R_TYPE(rel[i].r_info));
204 			return -ENOEXEC;
205 		}
206 	}
207 	return 0;
208 
209 invalid_relocation:
210 	pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
211 	       (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
212 	return -ENOEXEC;
213 
214 overflow:
215 	pr_err("overflow in relocation type %d val %Lx\n",
216 	       (int)ELF64_R_TYPE(rel[i].r_info), val);
217 	pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
218 	       me->name);
219 	return -ENOEXEC;
220 }
221 
222 int apply_relocate_add(Elf64_Shdr *sechdrs,
223 		   const char *strtab,
224 		   unsigned int symindex,
225 		   unsigned int relsec,
226 		   struct module *me)
227 {
228 	int ret;
229 	bool early = me->state == MODULE_STATE_UNFORMED;
230 	void *(*write)(void *, const void *, size_t) = memcpy;
231 
232 	if (!early) {
233 		write = text_poke;
234 		mutex_lock(&text_mutex);
235 	}
236 
237 	ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
238 				   write);
239 
240 	if (!early) {
241 		text_poke_sync();
242 		mutex_unlock(&text_mutex);
243 	}
244 
245 	return ret;
246 }
247 
248 #endif
249 
250 int module_finalize(const Elf_Ehdr *hdr,
251 		    const Elf_Shdr *sechdrs,
252 		    struct module *me)
253 {
254 	const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
255 		*para = NULL, *orc = NULL, *orc_ip = NULL,
256 		*retpolines = NULL, *returns = NULL, *ibt_endbr = NULL;
257 	char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
258 
259 	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
260 		if (!strcmp(".text", secstrings + s->sh_name))
261 			text = s;
262 		if (!strcmp(".altinstructions", secstrings + s->sh_name))
263 			alt = s;
264 		if (!strcmp(".smp_locks", secstrings + s->sh_name))
265 			locks = s;
266 		if (!strcmp(".parainstructions", secstrings + s->sh_name))
267 			para = s;
268 		if (!strcmp(".orc_unwind", secstrings + s->sh_name))
269 			orc = s;
270 		if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
271 			orc_ip = s;
272 		if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
273 			retpolines = s;
274 		if (!strcmp(".return_sites", secstrings + s->sh_name))
275 			returns = s;
276 		if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name))
277 			ibt_endbr = s;
278 	}
279 
280 	/*
281 	 * See alternative_instructions() for the ordering rules between the
282 	 * various patching types.
283 	 */
284 	if (para) {
285 		void *pseg = (void *)para->sh_addr;
286 		apply_paravirt(pseg, pseg + para->sh_size);
287 	}
288 	if (retpolines) {
289 		void *rseg = (void *)retpolines->sh_addr;
290 		apply_retpolines(rseg, rseg + retpolines->sh_size);
291 	}
292 	if (returns) {
293 		void *rseg = (void *)returns->sh_addr;
294 		apply_returns(rseg, rseg + returns->sh_size);
295 	}
296 	if (alt) {
297 		/* patch .altinstructions */
298 		void *aseg = (void *)alt->sh_addr;
299 		apply_alternatives(aseg, aseg + alt->sh_size);
300 	}
301 	if (ibt_endbr) {
302 		void *iseg = (void *)ibt_endbr->sh_addr;
303 		apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size);
304 	}
305 	if (locks && text) {
306 		void *lseg = (void *)locks->sh_addr;
307 		void *tseg = (void *)text->sh_addr;
308 		alternatives_smp_module_add(me, me->name,
309 					    lseg, lseg + locks->sh_size,
310 					    tseg, tseg + text->sh_size);
311 	}
312 
313 	if (orc && orc_ip)
314 		unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
315 				   (void *)orc->sh_addr, orc->sh_size);
316 
317 	return 0;
318 }
319 
320 void module_arch_cleanup(struct module *mod)
321 {
322 	alternatives_smp_module_del(mod);
323 }
324