1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Kernel module help for x86. 3 Copyright (C) 2001 Rusty Russell. 4 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/moduleloader.h> 10 #include <linux/elf.h> 11 #include <linux/vmalloc.h> 12 #include <linux/fs.h> 13 #include <linux/string.h> 14 #include <linux/kernel.h> 15 #include <linux/kasan.h> 16 #include <linux/bug.h> 17 #include <linux/mm.h> 18 #include <linux/gfp.h> 19 #include <linux/jump_label.h> 20 #include <linux/random.h> 21 #include <linux/memory.h> 22 23 #include <asm/text-patching.h> 24 #include <asm/page.h> 25 #include <asm/pgtable.h> 26 #include <asm/setup.h> 27 #include <asm/unwind.h> 28 29 #if 0 30 #define DEBUGP(fmt, ...) \ 31 printk(KERN_DEBUG fmt, ##__VA_ARGS__) 32 #else 33 #define DEBUGP(fmt, ...) \ 34 do { \ 35 if (0) \ 36 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ 37 } while (0) 38 #endif 39 40 #ifdef CONFIG_RANDOMIZE_BASE 41 static unsigned long module_load_offset; 42 43 /* Mutex protects the module_load_offset. */ 44 static DEFINE_MUTEX(module_kaslr_mutex); 45 46 static unsigned long int get_module_load_offset(void) 47 { 48 if (kaslr_enabled()) { 49 mutex_lock(&module_kaslr_mutex); 50 /* 51 * Calculate the module_load_offset the first time this 52 * code is called. Once calculated it stays the same until 53 * reboot. 54 */ 55 if (module_load_offset == 0) 56 module_load_offset = 57 (get_random_int() % 1024 + 1) * PAGE_SIZE; 58 mutex_unlock(&module_kaslr_mutex); 59 } 60 return module_load_offset; 61 } 62 #else 63 static unsigned long int get_module_load_offset(void) 64 { 65 return 0; 66 } 67 #endif 68 69 void *module_alloc(unsigned long size) 70 { 71 void *p; 72 73 if (PAGE_ALIGN(size) > MODULES_LEN) 74 return NULL; 75 76 p = __vmalloc_node_range(size, MODULE_ALIGN, 77 MODULES_VADDR + get_module_load_offset(), 78 MODULES_END, GFP_KERNEL, 79 PAGE_KERNEL, 0, NUMA_NO_NODE, 80 __builtin_return_address(0)); 81 if (p && (kasan_module_alloc(p, size) < 0)) { 82 vfree(p); 83 return NULL; 84 } 85 86 return p; 87 } 88 89 #ifdef CONFIG_X86_32 90 int apply_relocate(Elf32_Shdr *sechdrs, 91 const char *strtab, 92 unsigned int symindex, 93 unsigned int relsec, 94 struct module *me) 95 { 96 unsigned int i; 97 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; 98 Elf32_Sym *sym; 99 uint32_t *location; 100 101 DEBUGP("Applying relocate section %u to %u\n", 102 relsec, sechdrs[relsec].sh_info); 103 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 104 /* This is where to make the change */ 105 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 106 + rel[i].r_offset; 107 /* This is the symbol it is referring to. Note that all 108 undefined symbols have been resolved. */ 109 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr 110 + ELF32_R_SYM(rel[i].r_info); 111 112 switch (ELF32_R_TYPE(rel[i].r_info)) { 113 case R_386_32: 114 /* We add the value into the location given */ 115 *location += sym->st_value; 116 break; 117 case R_386_PC32: 118 /* Add the value, subtract its position */ 119 *location += sym->st_value - (uint32_t)location; 120 break; 121 default: 122 pr_err("%s: Unknown relocation: %u\n", 123 me->name, ELF32_R_TYPE(rel[i].r_info)); 124 return -ENOEXEC; 125 } 126 } 127 return 0; 128 } 129 #else /*X86_64*/ 130 static int __apply_relocate_add(Elf64_Shdr *sechdrs, 131 const char *strtab, 132 unsigned int symindex, 133 unsigned int relsec, 134 struct module *me, 135 void *(*write)(void *dest, const void *src, size_t len)) 136 { 137 unsigned int i; 138 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 139 Elf64_Sym *sym; 140 void *loc; 141 u64 val; 142 143 DEBUGP("Applying relocate section %u to %u\n", 144 relsec, sechdrs[relsec].sh_info); 145 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 146 /* This is where to make the change */ 147 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 148 + rel[i].r_offset; 149 150 /* This is the symbol it is referring to. Note that all 151 undefined symbols have been resolved. */ 152 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 153 + ELF64_R_SYM(rel[i].r_info); 154 155 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", 156 (int)ELF64_R_TYPE(rel[i].r_info), 157 sym->st_value, rel[i].r_addend, (u64)loc); 158 159 val = sym->st_value + rel[i].r_addend; 160 161 switch (ELF64_R_TYPE(rel[i].r_info)) { 162 case R_X86_64_NONE: 163 break; 164 case R_X86_64_64: 165 if (*(u64 *)loc != 0) 166 goto invalid_relocation; 167 write(loc, &val, 8); 168 break; 169 case R_X86_64_32: 170 if (*(u32 *)loc != 0) 171 goto invalid_relocation; 172 write(loc, &val, 4); 173 if (val != *(u32 *)loc) 174 goto overflow; 175 break; 176 case R_X86_64_32S: 177 if (*(s32 *)loc != 0) 178 goto invalid_relocation; 179 write(loc, &val, 4); 180 if ((s64)val != *(s32 *)loc) 181 goto overflow; 182 break; 183 case R_X86_64_PC32: 184 case R_X86_64_PLT32: 185 if (*(u32 *)loc != 0) 186 goto invalid_relocation; 187 val -= (u64)loc; 188 write(loc, &val, 4); 189 #if 0 190 if ((s64)val != *(s32 *)loc) 191 goto overflow; 192 #endif 193 break; 194 case R_X86_64_PC64: 195 if (*(u64 *)loc != 0) 196 goto invalid_relocation; 197 val -= (u64)loc; 198 write(loc, &val, 8); 199 break; 200 default: 201 pr_err("%s: Unknown rela relocation: %llu\n", 202 me->name, ELF64_R_TYPE(rel[i].r_info)); 203 return -ENOEXEC; 204 } 205 } 206 return 0; 207 208 invalid_relocation: 209 pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n", 210 (int)ELF64_R_TYPE(rel[i].r_info), loc, val); 211 return -ENOEXEC; 212 213 overflow: 214 pr_err("overflow in relocation type %d val %Lx\n", 215 (int)ELF64_R_TYPE(rel[i].r_info), val); 216 pr_err("`%s' likely not compiled with -mcmodel=kernel\n", 217 me->name); 218 return -ENOEXEC; 219 } 220 221 int apply_relocate_add(Elf64_Shdr *sechdrs, 222 const char *strtab, 223 unsigned int symindex, 224 unsigned int relsec, 225 struct module *me) 226 { 227 int ret; 228 bool early = me->state == MODULE_STATE_UNFORMED; 229 void *(*write)(void *, const void *, size_t) = memcpy; 230 231 if (!early) { 232 write = text_poke; 233 mutex_lock(&text_mutex); 234 } 235 236 ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me, 237 write); 238 239 if (!early) { 240 text_poke_sync(); 241 mutex_unlock(&text_mutex); 242 } 243 244 return ret; 245 } 246 247 #endif 248 249 int module_finalize(const Elf_Ehdr *hdr, 250 const Elf_Shdr *sechdrs, 251 struct module *me) 252 { 253 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, 254 *para = NULL, *orc = NULL, *orc_ip = NULL; 255 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 256 257 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 258 if (!strcmp(".text", secstrings + s->sh_name)) 259 text = s; 260 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 261 alt = s; 262 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 263 locks = s; 264 if (!strcmp(".parainstructions", secstrings + s->sh_name)) 265 para = s; 266 if (!strcmp(".orc_unwind", secstrings + s->sh_name)) 267 orc = s; 268 if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name)) 269 orc_ip = s; 270 } 271 272 if (alt) { 273 /* patch .altinstructions */ 274 void *aseg = (void *)alt->sh_addr; 275 apply_alternatives(aseg, aseg + alt->sh_size); 276 } 277 if (locks && text) { 278 void *lseg = (void *)locks->sh_addr; 279 void *tseg = (void *)text->sh_addr; 280 alternatives_smp_module_add(me, me->name, 281 lseg, lseg + locks->sh_size, 282 tseg, tseg + text->sh_size); 283 } 284 285 if (para) { 286 void *pseg = (void *)para->sh_addr; 287 apply_paravirt(pseg, pseg + para->sh_size); 288 } 289 290 /* make jump label nops */ 291 jump_label_apply_nops(me); 292 293 if (orc && orc_ip) 294 unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size, 295 (void *)orc->sh_addr, orc->sh_size); 296 297 return 0; 298 } 299 300 void module_arch_cleanup(struct module *mod) 301 { 302 alternatives_smp_module_del(mod); 303 } 304