1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Kernel module help for x86. 3 Copyright (C) 2001 Rusty Russell. 4 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/moduleloader.h> 10 #include <linux/elf.h> 11 #include <linux/vmalloc.h> 12 #include <linux/fs.h> 13 #include <linux/string.h> 14 #include <linux/kernel.h> 15 #include <linux/kasan.h> 16 #include <linux/bug.h> 17 #include <linux/mm.h> 18 #include <linux/gfp.h> 19 #include <linux/jump_label.h> 20 #include <linux/random.h> 21 #include <linux/memory.h> 22 23 #include <asm/text-patching.h> 24 #include <asm/page.h> 25 #include <asm/setup.h> 26 #include <asm/unwind.h> 27 28 #if 0 29 #define DEBUGP(fmt, ...) \ 30 printk(KERN_DEBUG fmt, ##__VA_ARGS__) 31 #else 32 #define DEBUGP(fmt, ...) \ 33 do { \ 34 if (0) \ 35 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ 36 } while (0) 37 #endif 38 39 #ifdef CONFIG_RANDOMIZE_BASE 40 static unsigned long module_load_offset; 41 42 /* Mutex protects the module_load_offset. */ 43 static DEFINE_MUTEX(module_kaslr_mutex); 44 45 static unsigned long int get_module_load_offset(void) 46 { 47 if (kaslr_enabled()) { 48 mutex_lock(&module_kaslr_mutex); 49 /* 50 * Calculate the module_load_offset the first time this 51 * code is called. Once calculated it stays the same until 52 * reboot. 53 */ 54 if (module_load_offset == 0) 55 module_load_offset = 56 (get_random_int() % 1024 + 1) * PAGE_SIZE; 57 mutex_unlock(&module_kaslr_mutex); 58 } 59 return module_load_offset; 60 } 61 #else 62 static unsigned long int get_module_load_offset(void) 63 { 64 return 0; 65 } 66 #endif 67 68 void *module_alloc(unsigned long size) 69 { 70 void *p; 71 72 if (PAGE_ALIGN(size) > MODULES_LEN) 73 return NULL; 74 75 p = __vmalloc_node_range(size, MODULE_ALIGN, 76 MODULES_VADDR + get_module_load_offset(), 77 MODULES_END, GFP_KERNEL, 78 PAGE_KERNEL, 0, NUMA_NO_NODE, 79 __builtin_return_address(0)); 80 if (p && (kasan_module_alloc(p, size) < 0)) { 81 vfree(p); 82 return NULL; 83 } 84 85 return p; 86 } 87 88 #ifdef CONFIG_X86_32 89 int apply_relocate(Elf32_Shdr *sechdrs, 90 const char *strtab, 91 unsigned int symindex, 92 unsigned int relsec, 93 struct module *me) 94 { 95 unsigned int i; 96 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; 97 Elf32_Sym *sym; 98 uint32_t *location; 99 100 DEBUGP("Applying relocate section %u to %u\n", 101 relsec, sechdrs[relsec].sh_info); 102 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 103 /* This is where to make the change */ 104 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 105 + rel[i].r_offset; 106 /* This is the symbol it is referring to. Note that all 107 undefined symbols have been resolved. */ 108 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr 109 + ELF32_R_SYM(rel[i].r_info); 110 111 switch (ELF32_R_TYPE(rel[i].r_info)) { 112 case R_386_32: 113 /* We add the value into the location given */ 114 *location += sym->st_value; 115 break; 116 case R_386_PC32: 117 case R_386_PLT32: 118 /* Add the value, subtract its position */ 119 *location += sym->st_value - (uint32_t)location; 120 break; 121 default: 122 pr_err("%s: Unknown relocation: %u\n", 123 me->name, ELF32_R_TYPE(rel[i].r_info)); 124 return -ENOEXEC; 125 } 126 } 127 return 0; 128 } 129 #else /*X86_64*/ 130 static int __apply_relocate_add(Elf64_Shdr *sechdrs, 131 const char *strtab, 132 unsigned int symindex, 133 unsigned int relsec, 134 struct module *me, 135 void *(*write)(void *dest, const void *src, size_t len)) 136 { 137 unsigned int i; 138 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 139 Elf64_Sym *sym; 140 void *loc; 141 u64 val; 142 143 DEBUGP("Applying relocate section %u to %u\n", 144 relsec, sechdrs[relsec].sh_info); 145 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 146 /* This is where to make the change */ 147 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 148 + rel[i].r_offset; 149 150 /* This is the symbol it is referring to. Note that all 151 undefined symbols have been resolved. */ 152 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 153 + ELF64_R_SYM(rel[i].r_info); 154 155 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", 156 (int)ELF64_R_TYPE(rel[i].r_info), 157 sym->st_value, rel[i].r_addend, (u64)loc); 158 159 val = sym->st_value + rel[i].r_addend; 160 161 switch (ELF64_R_TYPE(rel[i].r_info)) { 162 case R_X86_64_NONE: 163 break; 164 case R_X86_64_64: 165 if (*(u64 *)loc != 0) 166 goto invalid_relocation; 167 write(loc, &val, 8); 168 break; 169 case R_X86_64_32: 170 if (*(u32 *)loc != 0) 171 goto invalid_relocation; 172 write(loc, &val, 4); 173 if (val != *(u32 *)loc) 174 goto overflow; 175 break; 176 case R_X86_64_32S: 177 if (*(s32 *)loc != 0) 178 goto invalid_relocation; 179 write(loc, &val, 4); 180 if ((s64)val != *(s32 *)loc) 181 goto overflow; 182 break; 183 case R_X86_64_PC32: 184 case R_X86_64_PLT32: 185 if (*(u32 *)loc != 0) 186 goto invalid_relocation; 187 val -= (u64)loc; 188 write(loc, &val, 4); 189 #if 0 190 if ((s64)val != *(s32 *)loc) 191 goto overflow; 192 #endif 193 break; 194 case R_X86_64_PC64: 195 if (*(u64 *)loc != 0) 196 goto invalid_relocation; 197 val -= (u64)loc; 198 write(loc, &val, 8); 199 break; 200 default: 201 pr_err("%s: Unknown rela relocation: %llu\n", 202 me->name, ELF64_R_TYPE(rel[i].r_info)); 203 return -ENOEXEC; 204 } 205 } 206 return 0; 207 208 invalid_relocation: 209 pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n", 210 (int)ELF64_R_TYPE(rel[i].r_info), loc, val); 211 return -ENOEXEC; 212 213 overflow: 214 pr_err("overflow in relocation type %d val %Lx\n", 215 (int)ELF64_R_TYPE(rel[i].r_info), val); 216 pr_err("`%s' likely not compiled with -mcmodel=kernel\n", 217 me->name); 218 return -ENOEXEC; 219 } 220 221 int apply_relocate_add(Elf64_Shdr *sechdrs, 222 const char *strtab, 223 unsigned int symindex, 224 unsigned int relsec, 225 struct module *me) 226 { 227 int ret; 228 bool early = me->state == MODULE_STATE_UNFORMED; 229 void *(*write)(void *, const void *, size_t) = memcpy; 230 231 if (!early) { 232 write = text_poke; 233 mutex_lock(&text_mutex); 234 } 235 236 ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me, 237 write); 238 239 if (!early) { 240 text_poke_sync(); 241 mutex_unlock(&text_mutex); 242 } 243 244 return ret; 245 } 246 247 #endif 248 249 int module_finalize(const Elf_Ehdr *hdr, 250 const Elf_Shdr *sechdrs, 251 struct module *me) 252 { 253 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, 254 *para = NULL, *orc = NULL, *orc_ip = NULL; 255 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 256 257 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 258 if (!strcmp(".text", secstrings + s->sh_name)) 259 text = s; 260 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 261 alt = s; 262 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 263 locks = s; 264 if (!strcmp(".parainstructions", secstrings + s->sh_name)) 265 para = s; 266 if (!strcmp(".orc_unwind", secstrings + s->sh_name)) 267 orc = s; 268 if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name)) 269 orc_ip = s; 270 } 271 272 if (alt) { 273 /* patch .altinstructions */ 274 void *aseg = (void *)alt->sh_addr; 275 apply_alternatives(aseg, aseg + alt->sh_size); 276 } 277 if (locks && text) { 278 void *lseg = (void *)locks->sh_addr; 279 void *tseg = (void *)text->sh_addr; 280 alternatives_smp_module_add(me, me->name, 281 lseg, lseg + locks->sh_size, 282 tseg, tseg + text->sh_size); 283 } 284 285 if (para) { 286 void *pseg = (void *)para->sh_addr; 287 apply_paravirt(pseg, pseg + para->sh_size); 288 } 289 290 /* make jump label nops */ 291 jump_label_apply_nops(me); 292 293 if (orc && orc_ip) 294 unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size, 295 (void *)orc->sh_addr, orc->sh_size); 296 297 return 0; 298 } 299 300 void module_arch_cleanup(struct module *mod) 301 { 302 alternatives_smp_module_del(mod); 303 } 304