xref: /openbmc/linux/arch/x86/kernel/module.c (revision 5a244f48)
1 /*  Kernel module help for x86.
2     Copyright (C) 2001 Rusty Russell.
3 
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8 
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13 
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17 */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/moduleloader.h>
22 #include <linux/elf.h>
23 #include <linux/vmalloc.h>
24 #include <linux/fs.h>
25 #include <linux/string.h>
26 #include <linux/kernel.h>
27 #include <linux/kasan.h>
28 #include <linux/bug.h>
29 #include <linux/mm.h>
30 #include <linux/gfp.h>
31 #include <linux/jump_label.h>
32 #include <linux/random.h>
33 
34 #include <asm/text-patching.h>
35 #include <asm/page.h>
36 #include <asm/pgtable.h>
37 #include <asm/setup.h>
38 #include <asm/unwind.h>
39 
40 #if 0
41 #define DEBUGP(fmt, ...)				\
42 	printk(KERN_DEBUG fmt, ##__VA_ARGS__)
43 #else
44 #define DEBUGP(fmt, ...)				\
45 do {							\
46 	if (0)						\
47 		printk(KERN_DEBUG fmt, ##__VA_ARGS__);	\
48 } while (0)
49 #endif
50 
51 #ifdef CONFIG_RANDOMIZE_BASE
52 static unsigned long module_load_offset;
53 
54 /* Mutex protects the module_load_offset. */
55 static DEFINE_MUTEX(module_kaslr_mutex);
56 
57 static unsigned long int get_module_load_offset(void)
58 {
59 	if (kaslr_enabled()) {
60 		mutex_lock(&module_kaslr_mutex);
61 		/*
62 		 * Calculate the module_load_offset the first time this
63 		 * code is called. Once calculated it stays the same until
64 		 * reboot.
65 		 */
66 		if (module_load_offset == 0)
67 			module_load_offset =
68 				(get_random_int() % 1024 + 1) * PAGE_SIZE;
69 		mutex_unlock(&module_kaslr_mutex);
70 	}
71 	return module_load_offset;
72 }
73 #else
74 static unsigned long int get_module_load_offset(void)
75 {
76 	return 0;
77 }
78 #endif
79 
80 void *module_alloc(unsigned long size)
81 {
82 	void *p;
83 
84 	if (PAGE_ALIGN(size) > MODULES_LEN)
85 		return NULL;
86 
87 	p = __vmalloc_node_range(size, MODULE_ALIGN,
88 				    MODULES_VADDR + get_module_load_offset(),
89 				    MODULES_END, GFP_KERNEL,
90 				    PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
91 				    __builtin_return_address(0));
92 	if (p && (kasan_module_alloc(p, size) < 0)) {
93 		vfree(p);
94 		return NULL;
95 	}
96 
97 	return p;
98 }
99 
100 #ifdef CONFIG_X86_32
101 int apply_relocate(Elf32_Shdr *sechdrs,
102 		   const char *strtab,
103 		   unsigned int symindex,
104 		   unsigned int relsec,
105 		   struct module *me)
106 {
107 	unsigned int i;
108 	Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
109 	Elf32_Sym *sym;
110 	uint32_t *location;
111 
112 	DEBUGP("Applying relocate section %u to %u\n",
113 	       relsec, sechdrs[relsec].sh_info);
114 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
115 		/* This is where to make the change */
116 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
117 			+ rel[i].r_offset;
118 		/* This is the symbol it is referring to.  Note that all
119 		   undefined symbols have been resolved.  */
120 		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
121 			+ ELF32_R_SYM(rel[i].r_info);
122 
123 		switch (ELF32_R_TYPE(rel[i].r_info)) {
124 		case R_386_32:
125 			/* We add the value into the location given */
126 			*location += sym->st_value;
127 			break;
128 		case R_386_PC32:
129 			/* Add the value, subtract its position */
130 			*location += sym->st_value - (uint32_t)location;
131 			break;
132 		default:
133 			pr_err("%s: Unknown relocation: %u\n",
134 			       me->name, ELF32_R_TYPE(rel[i].r_info));
135 			return -ENOEXEC;
136 		}
137 	}
138 	return 0;
139 }
140 #else /*X86_64*/
141 int apply_relocate_add(Elf64_Shdr *sechdrs,
142 		   const char *strtab,
143 		   unsigned int symindex,
144 		   unsigned int relsec,
145 		   struct module *me)
146 {
147 	unsigned int i;
148 	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
149 	Elf64_Sym *sym;
150 	void *loc;
151 	u64 val;
152 
153 	DEBUGP("Applying relocate section %u to %u\n",
154 	       relsec, sechdrs[relsec].sh_info);
155 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
156 		/* This is where to make the change */
157 		loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
158 			+ rel[i].r_offset;
159 
160 		/* This is the symbol it is referring to.  Note that all
161 		   undefined symbols have been resolved.  */
162 		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
163 			+ ELF64_R_SYM(rel[i].r_info);
164 
165 		DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
166 		       (int)ELF64_R_TYPE(rel[i].r_info),
167 		       sym->st_value, rel[i].r_addend, (u64)loc);
168 
169 		val = sym->st_value + rel[i].r_addend;
170 
171 		switch (ELF64_R_TYPE(rel[i].r_info)) {
172 		case R_X86_64_NONE:
173 			break;
174 		case R_X86_64_64:
175 			*(u64 *)loc = val;
176 			break;
177 		case R_X86_64_32:
178 			*(u32 *)loc = val;
179 			if (val != *(u32 *)loc)
180 				goto overflow;
181 			break;
182 		case R_X86_64_32S:
183 			*(s32 *)loc = val;
184 			if ((s64)val != *(s32 *)loc)
185 				goto overflow;
186 			break;
187 		case R_X86_64_PC32:
188 			val -= (u64)loc;
189 			*(u32 *)loc = val;
190 #if 0
191 			if ((s64)val != *(s32 *)loc)
192 				goto overflow;
193 #endif
194 			break;
195 		default:
196 			pr_err("%s: Unknown rela relocation: %llu\n",
197 			       me->name, ELF64_R_TYPE(rel[i].r_info));
198 			return -ENOEXEC;
199 		}
200 	}
201 	return 0;
202 
203 overflow:
204 	pr_err("overflow in relocation type %d val %Lx\n",
205 	       (int)ELF64_R_TYPE(rel[i].r_info), val);
206 	pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
207 	       me->name);
208 	return -ENOEXEC;
209 }
210 #endif
211 
212 int module_finalize(const Elf_Ehdr *hdr,
213 		    const Elf_Shdr *sechdrs,
214 		    struct module *me)
215 {
216 	const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
217 		*para = NULL, *orc = NULL, *orc_ip = NULL;
218 	char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
219 
220 	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
221 		if (!strcmp(".text", secstrings + s->sh_name))
222 			text = s;
223 		if (!strcmp(".altinstructions", secstrings + s->sh_name))
224 			alt = s;
225 		if (!strcmp(".smp_locks", secstrings + s->sh_name))
226 			locks = s;
227 		if (!strcmp(".parainstructions", secstrings + s->sh_name))
228 			para = s;
229 		if (!strcmp(".orc_unwind", secstrings + s->sh_name))
230 			orc = s;
231 		if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
232 			orc_ip = s;
233 	}
234 
235 	if (alt) {
236 		/* patch .altinstructions */
237 		void *aseg = (void *)alt->sh_addr;
238 		apply_alternatives(aseg, aseg + alt->sh_size);
239 	}
240 	if (locks && text) {
241 		void *lseg = (void *)locks->sh_addr;
242 		void *tseg = (void *)text->sh_addr;
243 		alternatives_smp_module_add(me, me->name,
244 					    lseg, lseg + locks->sh_size,
245 					    tseg, tseg + text->sh_size);
246 	}
247 
248 	if (para) {
249 		void *pseg = (void *)para->sh_addr;
250 		apply_paravirt(pseg, pseg + para->sh_size);
251 	}
252 
253 	/* make jump label nops */
254 	jump_label_apply_nops(me);
255 
256 	if (orc && orc_ip)
257 		unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
258 				   (void *)orc->sh_addr, orc->sh_size);
259 
260 	return 0;
261 }
262 
263 void module_arch_cleanup(struct module *mod)
264 {
265 	alternatives_smp_module_del(mod);
266 }
267