1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Kernel module help for x86. 3 Copyright (C) 2001 Rusty Russell. 4 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/moduleloader.h> 10 #include <linux/elf.h> 11 #include <linux/vmalloc.h> 12 #include <linux/fs.h> 13 #include <linux/string.h> 14 #include <linux/kernel.h> 15 #include <linux/kasan.h> 16 #include <linux/bug.h> 17 #include <linux/mm.h> 18 #include <linux/gfp.h> 19 #include <linux/jump_label.h> 20 #include <linux/random.h> 21 #include <linux/memory.h> 22 23 #include <asm/text-patching.h> 24 #include <asm/page.h> 25 #include <asm/setup.h> 26 #include <asm/unwind.h> 27 28 #if 0 29 #define DEBUGP(fmt, ...) \ 30 printk(KERN_DEBUG fmt, ##__VA_ARGS__) 31 #else 32 #define DEBUGP(fmt, ...) \ 33 do { \ 34 if (0) \ 35 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ 36 } while (0) 37 #endif 38 39 #ifdef CONFIG_RANDOMIZE_BASE 40 static unsigned long module_load_offset; 41 42 /* Mutex protects the module_load_offset. */ 43 static DEFINE_MUTEX(module_kaslr_mutex); 44 45 static unsigned long int get_module_load_offset(void) 46 { 47 if (kaslr_enabled()) { 48 mutex_lock(&module_kaslr_mutex); 49 /* 50 * Calculate the module_load_offset the first time this 51 * code is called. Once calculated it stays the same until 52 * reboot. 53 */ 54 if (module_load_offset == 0) 55 module_load_offset = 56 get_random_u32_inclusive(1, 1024) * PAGE_SIZE; 57 mutex_unlock(&module_kaslr_mutex); 58 } 59 return module_load_offset; 60 } 61 #else 62 static unsigned long int get_module_load_offset(void) 63 { 64 return 0; 65 } 66 #endif 67 68 void *module_alloc(unsigned long size) 69 { 70 gfp_t gfp_mask = GFP_KERNEL; 71 void *p; 72 73 if (PAGE_ALIGN(size) > MODULES_LEN) 74 return NULL; 75 76 p = __vmalloc_node_range(size, MODULE_ALIGN, 77 MODULES_VADDR + get_module_load_offset(), 78 MODULES_END, gfp_mask, PAGE_KERNEL, 79 VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK, 80 NUMA_NO_NODE, __builtin_return_address(0)); 81 82 if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { 83 vfree(p); 84 return NULL; 85 } 86 87 return p; 88 } 89 90 #ifdef CONFIG_X86_32 91 int apply_relocate(Elf32_Shdr *sechdrs, 92 const char *strtab, 93 unsigned int symindex, 94 unsigned int relsec, 95 struct module *me) 96 { 97 unsigned int i; 98 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; 99 Elf32_Sym *sym; 100 uint32_t *location; 101 102 DEBUGP("Applying relocate section %u to %u\n", 103 relsec, sechdrs[relsec].sh_info); 104 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 105 /* This is where to make the change */ 106 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 107 + rel[i].r_offset; 108 /* This is the symbol it is referring to. Note that all 109 undefined symbols have been resolved. */ 110 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr 111 + ELF32_R_SYM(rel[i].r_info); 112 113 switch (ELF32_R_TYPE(rel[i].r_info)) { 114 case R_386_32: 115 /* We add the value into the location given */ 116 *location += sym->st_value; 117 break; 118 case R_386_PC32: 119 case R_386_PLT32: 120 /* Add the value, subtract its position */ 121 *location += sym->st_value - (uint32_t)location; 122 break; 123 default: 124 pr_err("%s: Unknown relocation: %u\n", 125 me->name, ELF32_R_TYPE(rel[i].r_info)); 126 return -ENOEXEC; 127 } 128 } 129 return 0; 130 } 131 #else /*X86_64*/ 132 static int __apply_relocate_add(Elf64_Shdr *sechdrs, 133 const char *strtab, 134 unsigned int symindex, 135 unsigned int relsec, 136 struct module *me, 137 void *(*write)(void *dest, const void *src, size_t len)) 138 { 139 unsigned int i; 140 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 141 Elf64_Sym *sym; 142 void *loc; 143 u64 val; 144 145 DEBUGP("Applying relocate section %u to %u\n", 146 relsec, sechdrs[relsec].sh_info); 147 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 148 /* This is where to make the change */ 149 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 150 + rel[i].r_offset; 151 152 /* This is the symbol it is referring to. Note that all 153 undefined symbols have been resolved. */ 154 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 155 + ELF64_R_SYM(rel[i].r_info); 156 157 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", 158 (int)ELF64_R_TYPE(rel[i].r_info), 159 sym->st_value, rel[i].r_addend, (u64)loc); 160 161 val = sym->st_value + rel[i].r_addend; 162 163 switch (ELF64_R_TYPE(rel[i].r_info)) { 164 case R_X86_64_NONE: 165 break; 166 case R_X86_64_64: 167 if (*(u64 *)loc != 0) 168 goto invalid_relocation; 169 write(loc, &val, 8); 170 break; 171 case R_X86_64_32: 172 if (*(u32 *)loc != 0) 173 goto invalid_relocation; 174 write(loc, &val, 4); 175 if (val != *(u32 *)loc) 176 goto overflow; 177 break; 178 case R_X86_64_32S: 179 if (*(s32 *)loc != 0) 180 goto invalid_relocation; 181 write(loc, &val, 4); 182 if ((s64)val != *(s32 *)loc) 183 goto overflow; 184 break; 185 case R_X86_64_PC32: 186 case R_X86_64_PLT32: 187 if (*(u32 *)loc != 0) 188 goto invalid_relocation; 189 val -= (u64)loc; 190 write(loc, &val, 4); 191 #if 0 192 if ((s64)val != *(s32 *)loc) 193 goto overflow; 194 #endif 195 break; 196 case R_X86_64_PC64: 197 if (*(u64 *)loc != 0) 198 goto invalid_relocation; 199 val -= (u64)loc; 200 write(loc, &val, 8); 201 break; 202 default: 203 pr_err("%s: Unknown rela relocation: %llu\n", 204 me->name, ELF64_R_TYPE(rel[i].r_info)); 205 return -ENOEXEC; 206 } 207 } 208 return 0; 209 210 invalid_relocation: 211 pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n", 212 (int)ELF64_R_TYPE(rel[i].r_info), loc, val); 213 return -ENOEXEC; 214 215 overflow: 216 pr_err("overflow in relocation type %d val %Lx\n", 217 (int)ELF64_R_TYPE(rel[i].r_info), val); 218 pr_err("`%s' likely not compiled with -mcmodel=kernel\n", 219 me->name); 220 return -ENOEXEC; 221 } 222 223 int apply_relocate_add(Elf64_Shdr *sechdrs, 224 const char *strtab, 225 unsigned int symindex, 226 unsigned int relsec, 227 struct module *me) 228 { 229 int ret; 230 bool early = me->state == MODULE_STATE_UNFORMED; 231 void *(*write)(void *, const void *, size_t) = memcpy; 232 233 if (!early) { 234 write = text_poke; 235 mutex_lock(&text_mutex); 236 } 237 238 ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me, 239 write); 240 241 if (!early) { 242 text_poke_sync(); 243 mutex_unlock(&text_mutex); 244 } 245 246 return ret; 247 } 248 249 #endif 250 251 int module_finalize(const Elf_Ehdr *hdr, 252 const Elf_Shdr *sechdrs, 253 struct module *me) 254 { 255 const Elf_Shdr *s, *alt = NULL, *locks = NULL, 256 *para = NULL, *orc = NULL, *orc_ip = NULL, 257 *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL, 258 *calls = NULL, *cfi = NULL; 259 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 260 261 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 262 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 263 alt = s; 264 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 265 locks = s; 266 if (!strcmp(".parainstructions", secstrings + s->sh_name)) 267 para = s; 268 if (!strcmp(".orc_unwind", secstrings + s->sh_name)) 269 orc = s; 270 if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name)) 271 orc_ip = s; 272 if (!strcmp(".retpoline_sites", secstrings + s->sh_name)) 273 retpolines = s; 274 if (!strcmp(".return_sites", secstrings + s->sh_name)) 275 returns = s; 276 if (!strcmp(".call_sites", secstrings + s->sh_name)) 277 calls = s; 278 if (!strcmp(".cfi_sites", secstrings + s->sh_name)) 279 cfi = s; 280 if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name)) 281 ibt_endbr = s; 282 } 283 284 /* 285 * See alternative_instructions() for the ordering rules between the 286 * various patching types. 287 */ 288 if (para) { 289 void *pseg = (void *)para->sh_addr; 290 apply_paravirt(pseg, pseg + para->sh_size); 291 } 292 if (retpolines || cfi) { 293 void *rseg = NULL, *cseg = NULL; 294 unsigned int rsize = 0, csize = 0; 295 296 if (retpolines) { 297 rseg = (void *)retpolines->sh_addr; 298 rsize = retpolines->sh_size; 299 } 300 301 if (cfi) { 302 cseg = (void *)cfi->sh_addr; 303 csize = cfi->sh_size; 304 } 305 306 apply_fineibt(rseg, rseg + rsize, cseg, cseg + csize); 307 } 308 if (retpolines) { 309 void *rseg = (void *)retpolines->sh_addr; 310 apply_retpolines(rseg, rseg + retpolines->sh_size); 311 } 312 if (returns) { 313 void *rseg = (void *)returns->sh_addr; 314 apply_returns(rseg, rseg + returns->sh_size); 315 } 316 if (alt) { 317 /* patch .altinstructions */ 318 void *aseg = (void *)alt->sh_addr; 319 apply_alternatives(aseg, aseg + alt->sh_size); 320 } 321 if (calls || para) { 322 struct callthunk_sites cs = {}; 323 324 if (calls) { 325 cs.call_start = (void *)calls->sh_addr; 326 cs.call_end = (void *)calls->sh_addr + calls->sh_size; 327 } 328 329 if (para) { 330 cs.pv_start = (void *)para->sh_addr; 331 cs.pv_end = (void *)para->sh_addr + para->sh_size; 332 } 333 334 callthunks_patch_module_calls(&cs, me); 335 } 336 if (ibt_endbr) { 337 void *iseg = (void *)ibt_endbr->sh_addr; 338 apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size); 339 } 340 if (locks) { 341 void *lseg = (void *)locks->sh_addr; 342 void *text = me->core_layout.base; 343 void *text_end = text + me->core_layout.text_size; 344 alternatives_smp_module_add(me, me->name, 345 lseg, lseg + locks->sh_size, 346 text, text_end); 347 } 348 349 if (orc && orc_ip) 350 unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size, 351 (void *)orc->sh_addr, orc->sh_size); 352 353 return 0; 354 } 355 356 void module_arch_cleanup(struct module *mod) 357 { 358 alternatives_smp_module_del(mod); 359 } 360