1 /* 2 * core.c - Kernel Live Patching Core 3 * 4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> 5 * Copyright (C) 2014 SUSE 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/module.h> 24 #include <linux/kernel.h> 25 #include <linux/mutex.h> 26 #include <linux/slab.h> 27 #include <linux/ftrace.h> 28 #include <linux/list.h> 29 #include <linux/kallsyms.h> 30 #include <linux/livepatch.h> 31 #include <linux/elf.h> 32 #include <linux/moduleloader.h> 33 #include <asm/cacheflush.h> 34 35 /** 36 * struct klp_ops - structure for tracking registered ftrace ops structs 37 * 38 * A single ftrace_ops is shared between all enabled replacement functions 39 * (klp_func structs) which have the same old_addr. This allows the switch 40 * between function versions to happen instantaneously by updating the klp_ops 41 * struct's func_stack list. The winner is the klp_func at the top of the 42 * func_stack (front of the list). 43 * 44 * @node: node for the global klp_ops list 45 * @func_stack: list head for the stack of klp_func's (active func is on top) 46 * @fops: registered ftrace ops struct 47 */ 48 struct klp_ops { 49 struct list_head node; 50 struct list_head func_stack; 51 struct ftrace_ops fops; 52 }; 53 54 /* 55 * The klp_mutex protects the global lists and state transitions of any 56 * structure reachable from them. References to any structure must be obtained 57 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to 58 * ensure it gets consistent data). 59 */ 60 static DEFINE_MUTEX(klp_mutex); 61 62 static LIST_HEAD(klp_patches); 63 static LIST_HEAD(klp_ops); 64 65 static struct kobject *klp_root_kobj; 66 67 static struct klp_ops *klp_find_ops(unsigned long old_addr) 68 { 69 struct klp_ops *ops; 70 struct klp_func *func; 71 72 list_for_each_entry(ops, &klp_ops, node) { 73 func = list_first_entry(&ops->func_stack, struct klp_func, 74 stack_node); 75 if (func->old_addr == old_addr) 76 return ops; 77 } 78 79 return NULL; 80 } 81 82 static bool klp_is_module(struct klp_object *obj) 83 { 84 return obj->name; 85 } 86 87 static bool klp_is_object_loaded(struct klp_object *obj) 88 { 89 return !obj->name || obj->mod; 90 } 91 92 /* sets obj->mod if object is not vmlinux and module is found */ 93 static void klp_find_object_module(struct klp_object *obj) 94 { 95 struct module *mod; 96 97 if (!klp_is_module(obj)) 98 return; 99 100 mutex_lock(&module_mutex); 101 /* 102 * We do not want to block removal of patched modules and therefore 103 * we do not take a reference here. The patches are removed by 104 * klp_module_going() instead. 105 */ 106 mod = find_module(obj->name); 107 /* 108 * Do not mess work of klp_module_coming() and klp_module_going(). 109 * Note that the patch might still be needed before klp_module_going() 110 * is called. Module functions can be called even in the GOING state 111 * until mod->exit() finishes. This is especially important for 112 * patches that modify semantic of the functions. 113 */ 114 if (mod && mod->klp_alive) 115 obj->mod = mod; 116 117 mutex_unlock(&module_mutex); 118 } 119 120 /* klp_mutex must be held by caller */ 121 static bool klp_is_patch_registered(struct klp_patch *patch) 122 { 123 struct klp_patch *mypatch; 124 125 list_for_each_entry(mypatch, &klp_patches, list) 126 if (mypatch == patch) 127 return true; 128 129 return false; 130 } 131 132 static bool klp_initialized(void) 133 { 134 return !!klp_root_kobj; 135 } 136 137 struct klp_find_arg { 138 const char *objname; 139 const char *name; 140 unsigned long addr; 141 unsigned long count; 142 unsigned long pos; 143 }; 144 145 static int klp_find_callback(void *data, const char *name, 146 struct module *mod, unsigned long addr) 147 { 148 struct klp_find_arg *args = data; 149 150 if ((mod && !args->objname) || (!mod && args->objname)) 151 return 0; 152 153 if (strcmp(args->name, name)) 154 return 0; 155 156 if (args->objname && strcmp(args->objname, mod->name)) 157 return 0; 158 159 args->addr = addr; 160 args->count++; 161 162 /* 163 * Finish the search when the symbol is found for the desired position 164 * or the position is not defined for a non-unique symbol. 165 */ 166 if ((args->pos && (args->count == args->pos)) || 167 (!args->pos && (args->count > 1))) 168 return 1; 169 170 return 0; 171 } 172 173 static int klp_find_object_symbol(const char *objname, const char *name, 174 unsigned long sympos, unsigned long *addr) 175 { 176 struct klp_find_arg args = { 177 .objname = objname, 178 .name = name, 179 .addr = 0, 180 .count = 0, 181 .pos = sympos, 182 }; 183 184 mutex_lock(&module_mutex); 185 kallsyms_on_each_symbol(klp_find_callback, &args); 186 mutex_unlock(&module_mutex); 187 188 /* 189 * Ensure an address was found. If sympos is 0, ensure symbol is unique; 190 * otherwise ensure the symbol position count matches sympos. 191 */ 192 if (args.addr == 0) 193 pr_err("symbol '%s' not found in symbol table\n", name); 194 else if (args.count > 1 && sympos == 0) { 195 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n", 196 name, objname); 197 } else if (sympos != args.count && sympos > 0) { 198 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n", 199 sympos, name, objname ? objname : "vmlinux"); 200 } else { 201 *addr = args.addr; 202 return 0; 203 } 204 205 *addr = 0; 206 return -EINVAL; 207 } 208 209 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod) 210 { 211 int i, cnt, vmlinux, ret; 212 char objname[MODULE_NAME_LEN]; 213 char symname[KSYM_NAME_LEN]; 214 char *strtab = pmod->core_kallsyms.strtab; 215 Elf_Rela *relas; 216 Elf_Sym *sym; 217 unsigned long sympos, addr; 218 219 /* 220 * Since the field widths for objname and symname in the sscanf() 221 * call are hard-coded and correspond to MODULE_NAME_LEN and 222 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN 223 * and KSYM_NAME_LEN have the values we expect them to have. 224 * 225 * Because the value of MODULE_NAME_LEN can differ among architectures, 226 * we use the smallest/strictest upper bound possible (56, based on 227 * the current definition of MODULE_NAME_LEN) to prevent overflows. 228 */ 229 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128); 230 231 relas = (Elf_Rela *) relasec->sh_addr; 232 /* For each rela in this klp relocation section */ 233 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { 234 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info); 235 if (sym->st_shndx != SHN_LIVEPATCH) { 236 pr_err("symbol %s is not marked as a livepatch symbol", 237 strtab + sym->st_name); 238 return -EINVAL; 239 } 240 241 /* Format: .klp.sym.objname.symname,sympos */ 242 cnt = sscanf(strtab + sym->st_name, 243 ".klp.sym.%55[^.].%127[^,],%lu", 244 objname, symname, &sympos); 245 if (cnt != 3) { 246 pr_err("symbol %s has an incorrectly formatted name", 247 strtab + sym->st_name); 248 return -EINVAL; 249 } 250 251 /* klp_find_object_symbol() treats a NULL objname as vmlinux */ 252 vmlinux = !strcmp(objname, "vmlinux"); 253 ret = klp_find_object_symbol(vmlinux ? NULL : objname, 254 symname, sympos, &addr); 255 if (ret) 256 return ret; 257 258 sym->st_value = addr; 259 } 260 261 return 0; 262 } 263 264 static int klp_write_object_relocations(struct module *pmod, 265 struct klp_object *obj) 266 { 267 int i, cnt, ret = 0; 268 const char *objname, *secname; 269 char sec_objname[MODULE_NAME_LEN]; 270 Elf_Shdr *sec; 271 272 if (WARN_ON(!klp_is_object_loaded(obj))) 273 return -EINVAL; 274 275 objname = klp_is_module(obj) ? obj->name : "vmlinux"; 276 277 /* For each klp relocation section */ 278 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) { 279 sec = pmod->klp_info->sechdrs + i; 280 secname = pmod->klp_info->secstrings + sec->sh_name; 281 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH)) 282 continue; 283 284 /* 285 * Format: .klp.rela.sec_objname.section_name 286 * See comment in klp_resolve_symbols() for an explanation 287 * of the selected field width value. 288 */ 289 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname); 290 if (cnt != 1) { 291 pr_err("section %s has an incorrectly formatted name", 292 secname); 293 ret = -EINVAL; 294 break; 295 } 296 297 if (strcmp(objname, sec_objname)) 298 continue; 299 300 ret = klp_resolve_symbols(sec, pmod); 301 if (ret) 302 break; 303 304 ret = apply_relocate_add(pmod->klp_info->sechdrs, 305 pmod->core_kallsyms.strtab, 306 pmod->klp_info->symndx, i, pmod); 307 if (ret) 308 break; 309 } 310 311 return ret; 312 } 313 314 static void notrace klp_ftrace_handler(unsigned long ip, 315 unsigned long parent_ip, 316 struct ftrace_ops *fops, 317 struct pt_regs *regs) 318 { 319 struct klp_ops *ops; 320 struct klp_func *func; 321 322 ops = container_of(fops, struct klp_ops, fops); 323 324 rcu_read_lock(); 325 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, 326 stack_node); 327 if (WARN_ON_ONCE(!func)) 328 goto unlock; 329 330 klp_arch_set_pc(regs, (unsigned long)func->new_func); 331 unlock: 332 rcu_read_unlock(); 333 } 334 335 /* 336 * Convert a function address into the appropriate ftrace location. 337 * 338 * Usually this is just the address of the function, but on some architectures 339 * it's more complicated so allow them to provide a custom behaviour. 340 */ 341 #ifndef klp_get_ftrace_location 342 static unsigned long klp_get_ftrace_location(unsigned long faddr) 343 { 344 return faddr; 345 } 346 #endif 347 348 static void klp_disable_func(struct klp_func *func) 349 { 350 struct klp_ops *ops; 351 352 if (WARN_ON(func->state != KLP_ENABLED)) 353 return; 354 if (WARN_ON(!func->old_addr)) 355 return; 356 357 ops = klp_find_ops(func->old_addr); 358 if (WARN_ON(!ops)) 359 return; 360 361 if (list_is_singular(&ops->func_stack)) { 362 unsigned long ftrace_loc; 363 364 ftrace_loc = klp_get_ftrace_location(func->old_addr); 365 if (WARN_ON(!ftrace_loc)) 366 return; 367 368 WARN_ON(unregister_ftrace_function(&ops->fops)); 369 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); 370 371 list_del_rcu(&func->stack_node); 372 list_del(&ops->node); 373 kfree(ops); 374 } else { 375 list_del_rcu(&func->stack_node); 376 } 377 378 func->state = KLP_DISABLED; 379 } 380 381 static int klp_enable_func(struct klp_func *func) 382 { 383 struct klp_ops *ops; 384 int ret; 385 386 if (WARN_ON(!func->old_addr)) 387 return -EINVAL; 388 389 if (WARN_ON(func->state != KLP_DISABLED)) 390 return -EINVAL; 391 392 ops = klp_find_ops(func->old_addr); 393 if (!ops) { 394 unsigned long ftrace_loc; 395 396 ftrace_loc = klp_get_ftrace_location(func->old_addr); 397 if (!ftrace_loc) { 398 pr_err("failed to find location for function '%s'\n", 399 func->old_name); 400 return -EINVAL; 401 } 402 403 ops = kzalloc(sizeof(*ops), GFP_KERNEL); 404 if (!ops) 405 return -ENOMEM; 406 407 ops->fops.func = klp_ftrace_handler; 408 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | 409 FTRACE_OPS_FL_DYNAMIC | 410 FTRACE_OPS_FL_IPMODIFY; 411 412 list_add(&ops->node, &klp_ops); 413 414 INIT_LIST_HEAD(&ops->func_stack); 415 list_add_rcu(&func->stack_node, &ops->func_stack); 416 417 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); 418 if (ret) { 419 pr_err("failed to set ftrace filter for function '%s' (%d)\n", 420 func->old_name, ret); 421 goto err; 422 } 423 424 ret = register_ftrace_function(&ops->fops); 425 if (ret) { 426 pr_err("failed to register ftrace handler for function '%s' (%d)\n", 427 func->old_name, ret); 428 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); 429 goto err; 430 } 431 432 433 } else { 434 list_add_rcu(&func->stack_node, &ops->func_stack); 435 } 436 437 func->state = KLP_ENABLED; 438 439 return 0; 440 441 err: 442 list_del_rcu(&func->stack_node); 443 list_del(&ops->node); 444 kfree(ops); 445 return ret; 446 } 447 448 static void klp_disable_object(struct klp_object *obj) 449 { 450 struct klp_func *func; 451 452 klp_for_each_func(obj, func) 453 if (func->state == KLP_ENABLED) 454 klp_disable_func(func); 455 456 obj->state = KLP_DISABLED; 457 } 458 459 static int klp_enable_object(struct klp_object *obj) 460 { 461 struct klp_func *func; 462 int ret; 463 464 if (WARN_ON(obj->state != KLP_DISABLED)) 465 return -EINVAL; 466 467 if (WARN_ON(!klp_is_object_loaded(obj))) 468 return -EINVAL; 469 470 klp_for_each_func(obj, func) { 471 ret = klp_enable_func(func); 472 if (ret) { 473 klp_disable_object(obj); 474 return ret; 475 } 476 } 477 obj->state = KLP_ENABLED; 478 479 return 0; 480 } 481 482 static int __klp_disable_patch(struct klp_patch *patch) 483 { 484 struct klp_object *obj; 485 486 /* enforce stacking: only the last enabled patch can be disabled */ 487 if (!list_is_last(&patch->list, &klp_patches) && 488 list_next_entry(patch, list)->state == KLP_ENABLED) 489 return -EBUSY; 490 491 pr_notice("disabling patch '%s'\n", patch->mod->name); 492 493 klp_for_each_object(patch, obj) { 494 if (obj->state == KLP_ENABLED) 495 klp_disable_object(obj); 496 } 497 498 patch->state = KLP_DISABLED; 499 500 return 0; 501 } 502 503 /** 504 * klp_disable_patch() - disables a registered patch 505 * @patch: The registered, enabled patch to be disabled 506 * 507 * Unregisters the patched functions from ftrace. 508 * 509 * Return: 0 on success, otherwise error 510 */ 511 int klp_disable_patch(struct klp_patch *patch) 512 { 513 int ret; 514 515 mutex_lock(&klp_mutex); 516 517 if (!klp_is_patch_registered(patch)) { 518 ret = -EINVAL; 519 goto err; 520 } 521 522 if (patch->state == KLP_DISABLED) { 523 ret = -EINVAL; 524 goto err; 525 } 526 527 ret = __klp_disable_patch(patch); 528 529 err: 530 mutex_unlock(&klp_mutex); 531 return ret; 532 } 533 EXPORT_SYMBOL_GPL(klp_disable_patch); 534 535 static int __klp_enable_patch(struct klp_patch *patch) 536 { 537 struct klp_object *obj; 538 int ret; 539 540 if (WARN_ON(patch->state != KLP_DISABLED)) 541 return -EINVAL; 542 543 /* enforce stacking: only the first disabled patch can be enabled */ 544 if (patch->list.prev != &klp_patches && 545 list_prev_entry(patch, list)->state == KLP_DISABLED) 546 return -EBUSY; 547 548 pr_notice("enabling patch '%s'\n", patch->mod->name); 549 550 klp_for_each_object(patch, obj) { 551 if (!klp_is_object_loaded(obj)) 552 continue; 553 554 ret = klp_enable_object(obj); 555 if (ret) 556 goto unregister; 557 } 558 559 patch->state = KLP_ENABLED; 560 561 return 0; 562 563 unregister: 564 WARN_ON(__klp_disable_patch(patch)); 565 return ret; 566 } 567 568 /** 569 * klp_enable_patch() - enables a registered patch 570 * @patch: The registered, disabled patch to be enabled 571 * 572 * Performs the needed symbol lookups and code relocations, 573 * then registers the patched functions with ftrace. 574 * 575 * Return: 0 on success, otherwise error 576 */ 577 int klp_enable_patch(struct klp_patch *patch) 578 { 579 int ret; 580 581 mutex_lock(&klp_mutex); 582 583 if (!klp_is_patch_registered(patch)) { 584 ret = -EINVAL; 585 goto err; 586 } 587 588 ret = __klp_enable_patch(patch); 589 590 err: 591 mutex_unlock(&klp_mutex); 592 return ret; 593 } 594 EXPORT_SYMBOL_GPL(klp_enable_patch); 595 596 /* 597 * Sysfs Interface 598 * 599 * /sys/kernel/livepatch 600 * /sys/kernel/livepatch/<patch> 601 * /sys/kernel/livepatch/<patch>/enabled 602 * /sys/kernel/livepatch/<patch>/<object> 603 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> 604 */ 605 606 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 607 const char *buf, size_t count) 608 { 609 struct klp_patch *patch; 610 int ret; 611 unsigned long val; 612 613 ret = kstrtoul(buf, 10, &val); 614 if (ret) 615 return -EINVAL; 616 617 if (val != KLP_DISABLED && val != KLP_ENABLED) 618 return -EINVAL; 619 620 patch = container_of(kobj, struct klp_patch, kobj); 621 622 mutex_lock(&klp_mutex); 623 624 if (val == patch->state) { 625 /* already in requested state */ 626 ret = -EINVAL; 627 goto err; 628 } 629 630 if (val == KLP_ENABLED) { 631 ret = __klp_enable_patch(patch); 632 if (ret) 633 goto err; 634 } else { 635 ret = __klp_disable_patch(patch); 636 if (ret) 637 goto err; 638 } 639 640 mutex_unlock(&klp_mutex); 641 642 return count; 643 644 err: 645 mutex_unlock(&klp_mutex); 646 return ret; 647 } 648 649 static ssize_t enabled_show(struct kobject *kobj, 650 struct kobj_attribute *attr, char *buf) 651 { 652 struct klp_patch *patch; 653 654 patch = container_of(kobj, struct klp_patch, kobj); 655 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state); 656 } 657 658 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); 659 static struct attribute *klp_patch_attrs[] = { 660 &enabled_kobj_attr.attr, 661 NULL 662 }; 663 664 static void klp_kobj_release_patch(struct kobject *kobj) 665 { 666 /* 667 * Once we have a consistency model we'll need to module_put() the 668 * patch module here. See klp_register_patch() for more details. 669 */ 670 } 671 672 static struct kobj_type klp_ktype_patch = { 673 .release = klp_kobj_release_patch, 674 .sysfs_ops = &kobj_sysfs_ops, 675 .default_attrs = klp_patch_attrs, 676 }; 677 678 static void klp_kobj_release_object(struct kobject *kobj) 679 { 680 } 681 682 static struct kobj_type klp_ktype_object = { 683 .release = klp_kobj_release_object, 684 .sysfs_ops = &kobj_sysfs_ops, 685 }; 686 687 static void klp_kobj_release_func(struct kobject *kobj) 688 { 689 } 690 691 static struct kobj_type klp_ktype_func = { 692 .release = klp_kobj_release_func, 693 .sysfs_ops = &kobj_sysfs_ops, 694 }; 695 696 /* 697 * Free all functions' kobjects in the array up to some limit. When limit is 698 * NULL, all kobjects are freed. 699 */ 700 static void klp_free_funcs_limited(struct klp_object *obj, 701 struct klp_func *limit) 702 { 703 struct klp_func *func; 704 705 for (func = obj->funcs; func->old_name && func != limit; func++) 706 kobject_put(&func->kobj); 707 } 708 709 /* Clean up when a patched object is unloaded */ 710 static void klp_free_object_loaded(struct klp_object *obj) 711 { 712 struct klp_func *func; 713 714 obj->mod = NULL; 715 716 klp_for_each_func(obj, func) 717 func->old_addr = 0; 718 } 719 720 /* 721 * Free all objects' kobjects in the array up to some limit. When limit is 722 * NULL, all kobjects are freed. 723 */ 724 static void klp_free_objects_limited(struct klp_patch *patch, 725 struct klp_object *limit) 726 { 727 struct klp_object *obj; 728 729 for (obj = patch->objs; obj->funcs && obj != limit; obj++) { 730 klp_free_funcs_limited(obj, NULL); 731 kobject_put(&obj->kobj); 732 } 733 } 734 735 static void klp_free_patch(struct klp_patch *patch) 736 { 737 klp_free_objects_limited(patch, NULL); 738 if (!list_empty(&patch->list)) 739 list_del(&patch->list); 740 kobject_put(&patch->kobj); 741 } 742 743 static int klp_init_func(struct klp_object *obj, struct klp_func *func) 744 { 745 if (!func->old_name || !func->new_func) 746 return -EINVAL; 747 748 INIT_LIST_HEAD(&func->stack_node); 749 func->state = KLP_DISABLED; 750 751 /* The format for the sysfs directory is <function,sympos> where sympos 752 * is the nth occurrence of this symbol in kallsyms for the patched 753 * object. If the user selects 0 for old_sympos, then 1 will be used 754 * since a unique symbol will be the first occurrence. 755 */ 756 return kobject_init_and_add(&func->kobj, &klp_ktype_func, 757 &obj->kobj, "%s,%lu", func->old_name, 758 func->old_sympos ? func->old_sympos : 1); 759 } 760 761 /* Arches may override this to finish any remaining arch-specific tasks */ 762 void __weak arch_klp_init_object_loaded(struct klp_patch *patch, 763 struct klp_object *obj) 764 { 765 } 766 767 /* parts of the initialization that is done only when the object is loaded */ 768 static int klp_init_object_loaded(struct klp_patch *patch, 769 struct klp_object *obj) 770 { 771 struct klp_func *func; 772 int ret; 773 774 module_disable_ro(patch->mod); 775 ret = klp_write_object_relocations(patch->mod, obj); 776 if (ret) { 777 module_enable_ro(patch->mod, true); 778 return ret; 779 } 780 781 arch_klp_init_object_loaded(patch, obj); 782 module_enable_ro(patch->mod, true); 783 784 klp_for_each_func(obj, func) { 785 ret = klp_find_object_symbol(obj->name, func->old_name, 786 func->old_sympos, 787 &func->old_addr); 788 if (ret) 789 return ret; 790 } 791 792 return 0; 793 } 794 795 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) 796 { 797 struct klp_func *func; 798 int ret; 799 const char *name; 800 801 if (!obj->funcs) 802 return -EINVAL; 803 804 obj->state = KLP_DISABLED; 805 obj->mod = NULL; 806 807 klp_find_object_module(obj); 808 809 name = klp_is_module(obj) ? obj->name : "vmlinux"; 810 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object, 811 &patch->kobj, "%s", name); 812 if (ret) 813 return ret; 814 815 klp_for_each_func(obj, func) { 816 ret = klp_init_func(obj, func); 817 if (ret) 818 goto free; 819 } 820 821 if (klp_is_object_loaded(obj)) { 822 ret = klp_init_object_loaded(patch, obj); 823 if (ret) 824 goto free; 825 } 826 827 return 0; 828 829 free: 830 klp_free_funcs_limited(obj, func); 831 kobject_put(&obj->kobj); 832 return ret; 833 } 834 835 static int klp_init_patch(struct klp_patch *patch) 836 { 837 struct klp_object *obj; 838 int ret; 839 840 if (!patch->objs) 841 return -EINVAL; 842 843 mutex_lock(&klp_mutex); 844 845 patch->state = KLP_DISABLED; 846 847 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, 848 klp_root_kobj, "%s", patch->mod->name); 849 if (ret) 850 goto unlock; 851 852 klp_for_each_object(patch, obj) { 853 ret = klp_init_object(patch, obj); 854 if (ret) 855 goto free; 856 } 857 858 list_add_tail(&patch->list, &klp_patches); 859 860 mutex_unlock(&klp_mutex); 861 862 return 0; 863 864 free: 865 klp_free_objects_limited(patch, obj); 866 kobject_put(&patch->kobj); 867 unlock: 868 mutex_unlock(&klp_mutex); 869 return ret; 870 } 871 872 /** 873 * klp_unregister_patch() - unregisters a patch 874 * @patch: Disabled patch to be unregistered 875 * 876 * Frees the data structures and removes the sysfs interface. 877 * 878 * Return: 0 on success, otherwise error 879 */ 880 int klp_unregister_patch(struct klp_patch *patch) 881 { 882 int ret = 0; 883 884 mutex_lock(&klp_mutex); 885 886 if (!klp_is_patch_registered(patch)) { 887 ret = -EINVAL; 888 goto out; 889 } 890 891 if (patch->state == KLP_ENABLED) { 892 ret = -EBUSY; 893 goto out; 894 } 895 896 klp_free_patch(patch); 897 898 out: 899 mutex_unlock(&klp_mutex); 900 return ret; 901 } 902 EXPORT_SYMBOL_GPL(klp_unregister_patch); 903 904 /** 905 * klp_register_patch() - registers a patch 906 * @patch: Patch to be registered 907 * 908 * Initializes the data structure associated with the patch and 909 * creates the sysfs interface. 910 * 911 * Return: 0 on success, otherwise error 912 */ 913 int klp_register_patch(struct klp_patch *patch) 914 { 915 int ret; 916 917 if (!patch || !patch->mod) 918 return -EINVAL; 919 920 if (!is_livepatch_module(patch->mod)) { 921 pr_err("module %s is not marked as a livepatch module", 922 patch->mod->name); 923 return -EINVAL; 924 } 925 926 if (!klp_initialized()) 927 return -ENODEV; 928 929 /* 930 * A reference is taken on the patch module to prevent it from being 931 * unloaded. Right now, we don't allow patch modules to unload since 932 * there is currently no method to determine if a thread is still 933 * running in the patched code contained in the patch module once 934 * the ftrace registration is successful. 935 */ 936 if (!try_module_get(patch->mod)) 937 return -ENODEV; 938 939 ret = klp_init_patch(patch); 940 if (ret) 941 module_put(patch->mod); 942 943 return ret; 944 } 945 EXPORT_SYMBOL_GPL(klp_register_patch); 946 947 int klp_module_coming(struct module *mod) 948 { 949 int ret; 950 struct klp_patch *patch; 951 struct klp_object *obj; 952 953 if (WARN_ON(mod->state != MODULE_STATE_COMING)) 954 return -EINVAL; 955 956 mutex_lock(&klp_mutex); 957 /* 958 * Each module has to know that klp_module_coming() 959 * has been called. We never know what module will 960 * get patched by a new patch. 961 */ 962 mod->klp_alive = true; 963 964 list_for_each_entry(patch, &klp_patches, list) { 965 klp_for_each_object(patch, obj) { 966 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 967 continue; 968 969 obj->mod = mod; 970 971 ret = klp_init_object_loaded(patch, obj); 972 if (ret) { 973 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n", 974 patch->mod->name, obj->mod->name, ret); 975 goto err; 976 } 977 978 if (patch->state == KLP_DISABLED) 979 break; 980 981 pr_notice("applying patch '%s' to loading module '%s'\n", 982 patch->mod->name, obj->mod->name); 983 984 ret = klp_enable_object(obj); 985 if (ret) { 986 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", 987 patch->mod->name, obj->mod->name, ret); 988 goto err; 989 } 990 991 break; 992 } 993 } 994 995 mutex_unlock(&klp_mutex); 996 997 return 0; 998 999 err: 1000 /* 1001 * If a patch is unsuccessfully applied, return 1002 * error to the module loader. 1003 */ 1004 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", 1005 patch->mod->name, obj->mod->name, obj->mod->name); 1006 mod->klp_alive = false; 1007 klp_free_object_loaded(obj); 1008 mutex_unlock(&klp_mutex); 1009 1010 return ret; 1011 } 1012 1013 void klp_module_going(struct module *mod) 1014 { 1015 struct klp_patch *patch; 1016 struct klp_object *obj; 1017 1018 if (WARN_ON(mod->state != MODULE_STATE_GOING && 1019 mod->state != MODULE_STATE_COMING)) 1020 return; 1021 1022 mutex_lock(&klp_mutex); 1023 /* 1024 * Each module has to know that klp_module_going() 1025 * has been called. We never know what module will 1026 * get patched by a new patch. 1027 */ 1028 mod->klp_alive = false; 1029 1030 list_for_each_entry(patch, &klp_patches, list) { 1031 klp_for_each_object(patch, obj) { 1032 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 1033 continue; 1034 1035 if (patch->state != KLP_DISABLED) { 1036 pr_notice("reverting patch '%s' on unloading module '%s'\n", 1037 patch->mod->name, obj->mod->name); 1038 klp_disable_object(obj); 1039 } 1040 1041 klp_free_object_loaded(obj); 1042 break; 1043 } 1044 } 1045 1046 mutex_unlock(&klp_mutex); 1047 } 1048 1049 static int __init klp_init(void) 1050 { 1051 int ret; 1052 1053 ret = klp_check_compiler_support(); 1054 if (ret) { 1055 pr_info("Your compiler is too old; turning off.\n"); 1056 return -EINVAL; 1057 } 1058 1059 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); 1060 if (!klp_root_kobj) 1061 return -ENOMEM; 1062 1063 return 0; 1064 } 1065 1066 module_init(klp_init); 1067