xref: /openbmc/linux/arch/x86/kernel/module.c (revision b8d312aa)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*  Kernel module help for x86.
3     Copyright (C) 2001 Rusty Russell.
4 
5 */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/moduleloader.h>
10 #include <linux/elf.h>
11 #include <linux/vmalloc.h>
12 #include <linux/fs.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/kasan.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/gfp.h>
19 #include <linux/jump_label.h>
20 #include <linux/random.h>
21 
22 #include <asm/text-patching.h>
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 #include <asm/setup.h>
26 #include <asm/unwind.h>
27 
28 #if 0
29 #define DEBUGP(fmt, ...)				\
30 	printk(KERN_DEBUG fmt, ##__VA_ARGS__)
31 #else
32 #define DEBUGP(fmt, ...)				\
33 do {							\
34 	if (0)						\
35 		printk(KERN_DEBUG fmt, ##__VA_ARGS__);	\
36 } while (0)
37 #endif
38 
39 #ifdef CONFIG_RANDOMIZE_BASE
40 static unsigned long module_load_offset;
41 
42 /* Mutex protects the module_load_offset. */
43 static DEFINE_MUTEX(module_kaslr_mutex);
44 
45 static unsigned long int get_module_load_offset(void)
46 {
47 	if (kaslr_enabled()) {
48 		mutex_lock(&module_kaslr_mutex);
49 		/*
50 		 * Calculate the module_load_offset the first time this
51 		 * code is called. Once calculated it stays the same until
52 		 * reboot.
53 		 */
54 		if (module_load_offset == 0)
55 			module_load_offset =
56 				(get_random_int() % 1024 + 1) * PAGE_SIZE;
57 		mutex_unlock(&module_kaslr_mutex);
58 	}
59 	return module_load_offset;
60 }
61 #else
62 static unsigned long int get_module_load_offset(void)
63 {
64 	return 0;
65 }
66 #endif
67 
68 void *module_alloc(unsigned long size)
69 {
70 	void *p;
71 
72 	if (PAGE_ALIGN(size) > MODULES_LEN)
73 		return NULL;
74 
75 	p = __vmalloc_node_range(size, MODULE_ALIGN,
76 				    MODULES_VADDR + get_module_load_offset(),
77 				    MODULES_END, GFP_KERNEL,
78 				    PAGE_KERNEL, 0, NUMA_NO_NODE,
79 				    __builtin_return_address(0));
80 	if (p && (kasan_module_alloc(p, size) < 0)) {
81 		vfree(p);
82 		return NULL;
83 	}
84 
85 	return p;
86 }
87 
88 #ifdef CONFIG_X86_32
89 int apply_relocate(Elf32_Shdr *sechdrs,
90 		   const char *strtab,
91 		   unsigned int symindex,
92 		   unsigned int relsec,
93 		   struct module *me)
94 {
95 	unsigned int i;
96 	Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
97 	Elf32_Sym *sym;
98 	uint32_t *location;
99 
100 	DEBUGP("Applying relocate section %u to %u\n",
101 	       relsec, sechdrs[relsec].sh_info);
102 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
103 		/* This is where to make the change */
104 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
105 			+ rel[i].r_offset;
106 		/* This is the symbol it is referring to.  Note that all
107 		   undefined symbols have been resolved.  */
108 		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
109 			+ ELF32_R_SYM(rel[i].r_info);
110 
111 		switch (ELF32_R_TYPE(rel[i].r_info)) {
112 		case R_386_32:
113 			/* We add the value into the location given */
114 			*location += sym->st_value;
115 			break;
116 		case R_386_PC32:
117 			/* Add the value, subtract its position */
118 			*location += sym->st_value - (uint32_t)location;
119 			break;
120 		default:
121 			pr_err("%s: Unknown relocation: %u\n",
122 			       me->name, ELF32_R_TYPE(rel[i].r_info));
123 			return -ENOEXEC;
124 		}
125 	}
126 	return 0;
127 }
128 #else /*X86_64*/
129 int apply_relocate_add(Elf64_Shdr *sechdrs,
130 		   const char *strtab,
131 		   unsigned int symindex,
132 		   unsigned int relsec,
133 		   struct module *me)
134 {
135 	unsigned int i;
136 	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
137 	Elf64_Sym *sym;
138 	void *loc;
139 	u64 val;
140 
141 	DEBUGP("Applying relocate section %u to %u\n",
142 	       relsec, sechdrs[relsec].sh_info);
143 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
144 		/* This is where to make the change */
145 		loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
146 			+ rel[i].r_offset;
147 
148 		/* This is the symbol it is referring to.  Note that all
149 		   undefined symbols have been resolved.  */
150 		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
151 			+ ELF64_R_SYM(rel[i].r_info);
152 
153 		DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
154 		       (int)ELF64_R_TYPE(rel[i].r_info),
155 		       sym->st_value, rel[i].r_addend, (u64)loc);
156 
157 		val = sym->st_value + rel[i].r_addend;
158 
159 		switch (ELF64_R_TYPE(rel[i].r_info)) {
160 		case R_X86_64_NONE:
161 			break;
162 		case R_X86_64_64:
163 			if (*(u64 *)loc != 0)
164 				goto invalid_relocation;
165 			*(u64 *)loc = val;
166 			break;
167 		case R_X86_64_32:
168 			if (*(u32 *)loc != 0)
169 				goto invalid_relocation;
170 			*(u32 *)loc = val;
171 			if (val != *(u32 *)loc)
172 				goto overflow;
173 			break;
174 		case R_X86_64_32S:
175 			if (*(s32 *)loc != 0)
176 				goto invalid_relocation;
177 			*(s32 *)loc = val;
178 			if ((s64)val != *(s32 *)loc)
179 				goto overflow;
180 			break;
181 		case R_X86_64_PC32:
182 		case R_X86_64_PLT32:
183 			if (*(u32 *)loc != 0)
184 				goto invalid_relocation;
185 			val -= (u64)loc;
186 			*(u32 *)loc = val;
187 #if 0
188 			if ((s64)val != *(s32 *)loc)
189 				goto overflow;
190 #endif
191 			break;
192 		case R_X86_64_PC64:
193 			if (*(u64 *)loc != 0)
194 				goto invalid_relocation;
195 			val -= (u64)loc;
196 			*(u64 *)loc = val;
197 			break;
198 		default:
199 			pr_err("%s: Unknown rela relocation: %llu\n",
200 			       me->name, ELF64_R_TYPE(rel[i].r_info));
201 			return -ENOEXEC;
202 		}
203 	}
204 	return 0;
205 
206 invalid_relocation:
207 	pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
208 	       (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
209 	return -ENOEXEC;
210 
211 overflow:
212 	pr_err("overflow in relocation type %d val %Lx\n",
213 	       (int)ELF64_R_TYPE(rel[i].r_info), val);
214 	pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
215 	       me->name);
216 	return -ENOEXEC;
217 }
218 #endif
219 
220 int module_finalize(const Elf_Ehdr *hdr,
221 		    const Elf_Shdr *sechdrs,
222 		    struct module *me)
223 {
224 	const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
225 		*para = NULL, *orc = NULL, *orc_ip = NULL;
226 	char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
227 
228 	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
229 		if (!strcmp(".text", secstrings + s->sh_name))
230 			text = s;
231 		if (!strcmp(".altinstructions", secstrings + s->sh_name))
232 			alt = s;
233 		if (!strcmp(".smp_locks", secstrings + s->sh_name))
234 			locks = s;
235 		if (!strcmp(".parainstructions", secstrings + s->sh_name))
236 			para = s;
237 		if (!strcmp(".orc_unwind", secstrings + s->sh_name))
238 			orc = s;
239 		if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
240 			orc_ip = s;
241 	}
242 
243 	if (alt) {
244 		/* patch .altinstructions */
245 		void *aseg = (void *)alt->sh_addr;
246 		apply_alternatives(aseg, aseg + alt->sh_size);
247 	}
248 	if (locks && text) {
249 		void *lseg = (void *)locks->sh_addr;
250 		void *tseg = (void *)text->sh_addr;
251 		alternatives_smp_module_add(me, me->name,
252 					    lseg, lseg + locks->sh_size,
253 					    tseg, tseg + text->sh_size);
254 	}
255 
256 	if (para) {
257 		void *pseg = (void *)para->sh_addr;
258 		apply_paravirt(pseg, pseg + para->sh_size);
259 	}
260 
261 	/* make jump label nops */
262 	jump_label_apply_nops(me);
263 
264 	if (orc && orc_ip)
265 		unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
266 				   (void *)orc->sh_addr, orc->sh_size);
267 
268 	return 0;
269 }
270 
271 void module_arch_cleanup(struct module *mod)
272 {
273 	alternatives_smp_module_del(mod);
274 }
275