xref: /openbmc/linux/kernel/livepatch/core.c (revision b85d4594)
1 /*
2  * core.c - Kernel Live Patching Core
3  *
4  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5  * Copyright (C) 2014 SUSE
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version 2
10  * of the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
31 
32 /**
33  * struct klp_ops - structure for tracking registered ftrace ops structs
34  *
35  * A single ftrace_ops is shared between all enabled replacement functions
36  * (klp_func structs) which have the same old_addr.  This allows the switch
37  * between function versions to happen instantaneously by updating the klp_ops
38  * struct's func_stack list.  The winner is the klp_func at the top of the
39  * func_stack (front of the list).
40  *
41  * @node:	node for the global klp_ops list
42  * @func_stack:	list head for the stack of klp_func's (active func is on top)
43  * @fops:	registered ftrace ops struct
44  */
45 struct klp_ops {
46 	struct list_head node;
47 	struct list_head func_stack;
48 	struct ftrace_ops fops;
49 };
50 
51 /*
52  * The klp_mutex protects the global lists and state transitions of any
53  * structure reachable from them.  References to any structure must be obtained
54  * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55  * ensure it gets consistent data).
56  */
57 static DEFINE_MUTEX(klp_mutex);
58 
59 static LIST_HEAD(klp_patches);
60 static LIST_HEAD(klp_ops);
61 
62 static struct kobject *klp_root_kobj;
63 
64 static struct klp_ops *klp_find_ops(unsigned long old_addr)
65 {
66 	struct klp_ops *ops;
67 	struct klp_func *func;
68 
69 	list_for_each_entry(ops, &klp_ops, node) {
70 		func = list_first_entry(&ops->func_stack, struct klp_func,
71 					stack_node);
72 		if (func->old_addr == old_addr)
73 			return ops;
74 	}
75 
76 	return NULL;
77 }
78 
79 static bool klp_is_module(struct klp_object *obj)
80 {
81 	return obj->name;
82 }
83 
84 static bool klp_is_object_loaded(struct klp_object *obj)
85 {
86 	return !obj->name || obj->mod;
87 }
88 
89 /* sets obj->mod if object is not vmlinux and module is found */
90 static void klp_find_object_module(struct klp_object *obj)
91 {
92 	struct module *mod;
93 
94 	if (!klp_is_module(obj))
95 		return;
96 
97 	mutex_lock(&module_mutex);
98 	/*
99 	 * We do not want to block removal of patched modules and therefore
100 	 * we do not take a reference here. The patches are removed by
101 	 * a going module handler instead.
102 	 */
103 	mod = find_module(obj->name);
104 	/*
105 	 * Do not mess work of the module coming and going notifiers.
106 	 * Note that the patch might still be needed before the going handler
107 	 * is called. Module functions can be called even in the GOING state
108 	 * until mod->exit() finishes. This is especially important for
109 	 * patches that modify semantic of the functions.
110 	 */
111 	if (mod && mod->klp_alive)
112 		obj->mod = mod;
113 
114 	mutex_unlock(&module_mutex);
115 }
116 
117 /* klp_mutex must be held by caller */
118 static bool klp_is_patch_registered(struct klp_patch *patch)
119 {
120 	struct klp_patch *mypatch;
121 
122 	list_for_each_entry(mypatch, &klp_patches, list)
123 		if (mypatch == patch)
124 			return true;
125 
126 	return false;
127 }
128 
129 static bool klp_initialized(void)
130 {
131 	return !!klp_root_kobj;
132 }
133 
134 struct klp_find_arg {
135 	const char *objname;
136 	const char *name;
137 	unsigned long addr;
138 	/*
139 	 * If count == 0, the symbol was not found. If count == 1, a unique
140 	 * match was found and addr is set.  If count > 1, there is
141 	 * unresolvable ambiguity among "count" number of symbols with the same
142 	 * name in the same object.
143 	 */
144 	unsigned long count;
145 };
146 
147 static int klp_find_callback(void *data, const char *name,
148 			     struct module *mod, unsigned long addr)
149 {
150 	struct klp_find_arg *args = data;
151 
152 	if ((mod && !args->objname) || (!mod && args->objname))
153 		return 0;
154 
155 	if (strcmp(args->name, name))
156 		return 0;
157 
158 	if (args->objname && strcmp(args->objname, mod->name))
159 		return 0;
160 
161 	/*
162 	 * args->addr might be overwritten if another match is found
163 	 * but klp_find_object_symbol() handles this and only returns the
164 	 * addr if count == 1.
165 	 */
166 	args->addr = addr;
167 	args->count++;
168 
169 	return 0;
170 }
171 
172 static int klp_find_object_symbol(const char *objname, const char *name,
173 				  unsigned long *addr)
174 {
175 	struct klp_find_arg args = {
176 		.objname = objname,
177 		.name = name,
178 		.addr = 0,
179 		.count = 0
180 	};
181 
182 	mutex_lock(&module_mutex);
183 	kallsyms_on_each_symbol(klp_find_callback, &args);
184 	mutex_unlock(&module_mutex);
185 
186 	if (args.count == 0)
187 		pr_err("symbol '%s' not found in symbol table\n", name);
188 	else if (args.count > 1)
189 		pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
190 		       args.count, name, objname);
191 	else {
192 		*addr = args.addr;
193 		return 0;
194 	}
195 
196 	*addr = 0;
197 	return -EINVAL;
198 }
199 
200 struct klp_verify_args {
201 	const char *name;
202 	const unsigned long addr;
203 };
204 
205 static int klp_verify_callback(void *data, const char *name,
206 			       struct module *mod, unsigned long addr)
207 {
208 	struct klp_verify_args *args = data;
209 
210 	if (!mod &&
211 	    !strcmp(args->name, name) &&
212 	    args->addr == addr)
213 		return 1;
214 
215 	return 0;
216 }
217 
218 static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
219 {
220 	struct klp_verify_args args = {
221 		.name = name,
222 		.addr = addr,
223 	};
224 	int ret;
225 
226 	mutex_lock(&module_mutex);
227 	ret = kallsyms_on_each_symbol(klp_verify_callback, &args);
228 	mutex_unlock(&module_mutex);
229 
230 	if (!ret) {
231 		pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
232 			name, addr);
233 		return -EINVAL;
234 	}
235 
236 	return 0;
237 }
238 
239 static int klp_find_verify_func_addr(struct klp_object *obj,
240 				     struct klp_func *func)
241 {
242 	int ret;
243 
244 #if defined(CONFIG_RANDOMIZE_BASE)
245 	/* If KASLR has been enabled, adjust old_addr accordingly */
246 	if (kaslr_enabled() && func->old_addr)
247 		func->old_addr += kaslr_offset();
248 #endif
249 
250 	if (!func->old_addr || klp_is_module(obj))
251 		ret = klp_find_object_symbol(obj->name, func->old_name,
252 					     &func->old_addr);
253 	else
254 		ret = klp_verify_vmlinux_symbol(func->old_name,
255 						func->old_addr);
256 
257 	return ret;
258 }
259 
260 /*
261  * external symbols are located outside the parent object (where the parent
262  * object is either vmlinux or the kmod being patched).
263  */
264 static int klp_find_external_symbol(struct module *pmod, const char *name,
265 				    unsigned long *addr)
266 {
267 	const struct kernel_symbol *sym;
268 
269 	/* first, check if it's an exported symbol */
270 	preempt_disable();
271 	sym = find_symbol(name, NULL, NULL, true, true);
272 	if (sym) {
273 		*addr = sym->value;
274 		preempt_enable();
275 		return 0;
276 	}
277 	preempt_enable();
278 
279 	/* otherwise check if it's in another .o within the patch module */
280 	return klp_find_object_symbol(pmod->name, name, addr);
281 }
282 
283 static int klp_write_object_relocations(struct module *pmod,
284 					struct klp_object *obj)
285 {
286 	int ret;
287 	struct klp_reloc *reloc;
288 
289 	if (WARN_ON(!klp_is_object_loaded(obj)))
290 		return -EINVAL;
291 
292 	if (WARN_ON(!obj->relocs))
293 		return -EINVAL;
294 
295 	for (reloc = obj->relocs; reloc->name; reloc++) {
296 		if (!klp_is_module(obj)) {
297 			ret = klp_verify_vmlinux_symbol(reloc->name,
298 							reloc->val);
299 			if (ret)
300 				return ret;
301 		} else {
302 			/* module, reloc->val needs to be discovered */
303 			if (reloc->external)
304 				ret = klp_find_external_symbol(pmod,
305 							       reloc->name,
306 							       &reloc->val);
307 			else
308 				ret = klp_find_object_symbol(obj->mod->name,
309 							     reloc->name,
310 							     &reloc->val);
311 			if (ret)
312 				return ret;
313 		}
314 		ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
315 					     reloc->val + reloc->addend);
316 		if (ret) {
317 			pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
318 			       reloc->name, reloc->val, ret);
319 			return ret;
320 		}
321 	}
322 
323 	return 0;
324 }
325 
326 static void notrace klp_ftrace_handler(unsigned long ip,
327 				       unsigned long parent_ip,
328 				       struct ftrace_ops *fops,
329 				       struct pt_regs *regs)
330 {
331 	struct klp_ops *ops;
332 	struct klp_func *func;
333 
334 	ops = container_of(fops, struct klp_ops, fops);
335 
336 	rcu_read_lock();
337 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
338 				      stack_node);
339 	if (WARN_ON_ONCE(!func))
340 		goto unlock;
341 
342 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
343 unlock:
344 	rcu_read_unlock();
345 }
346 
347 static void klp_disable_func(struct klp_func *func)
348 {
349 	struct klp_ops *ops;
350 
351 	if (WARN_ON(func->state != KLP_ENABLED))
352 		return;
353 	if (WARN_ON(!func->old_addr))
354 		return;
355 
356 	ops = klp_find_ops(func->old_addr);
357 	if (WARN_ON(!ops))
358 		return;
359 
360 	if (list_is_singular(&ops->func_stack)) {
361 		WARN_ON(unregister_ftrace_function(&ops->fops));
362 		WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
363 
364 		list_del_rcu(&func->stack_node);
365 		list_del(&ops->node);
366 		kfree(ops);
367 	} else {
368 		list_del_rcu(&func->stack_node);
369 	}
370 
371 	func->state = KLP_DISABLED;
372 }
373 
374 static int klp_enable_func(struct klp_func *func)
375 {
376 	struct klp_ops *ops;
377 	int ret;
378 
379 	if (WARN_ON(!func->old_addr))
380 		return -EINVAL;
381 
382 	if (WARN_ON(func->state != KLP_DISABLED))
383 		return -EINVAL;
384 
385 	ops = klp_find_ops(func->old_addr);
386 	if (!ops) {
387 		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
388 		if (!ops)
389 			return -ENOMEM;
390 
391 		ops->fops.func = klp_ftrace_handler;
392 		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
393 				  FTRACE_OPS_FL_DYNAMIC |
394 				  FTRACE_OPS_FL_IPMODIFY;
395 
396 		list_add(&ops->node, &klp_ops);
397 
398 		INIT_LIST_HEAD(&ops->func_stack);
399 		list_add_rcu(&func->stack_node, &ops->func_stack);
400 
401 		ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
402 		if (ret) {
403 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
404 			       func->old_name, ret);
405 			goto err;
406 		}
407 
408 		ret = register_ftrace_function(&ops->fops);
409 		if (ret) {
410 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
411 			       func->old_name, ret);
412 			ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
413 			goto err;
414 		}
415 
416 
417 	} else {
418 		list_add_rcu(&func->stack_node, &ops->func_stack);
419 	}
420 
421 	func->state = KLP_ENABLED;
422 
423 	return 0;
424 
425 err:
426 	list_del_rcu(&func->stack_node);
427 	list_del(&ops->node);
428 	kfree(ops);
429 	return ret;
430 }
431 
432 static void klp_disable_object(struct klp_object *obj)
433 {
434 	struct klp_func *func;
435 
436 	klp_for_each_func(obj, func)
437 		if (func->state == KLP_ENABLED)
438 			klp_disable_func(func);
439 
440 	obj->state = KLP_DISABLED;
441 }
442 
443 static int klp_enable_object(struct klp_object *obj)
444 {
445 	struct klp_func *func;
446 	int ret;
447 
448 	if (WARN_ON(obj->state != KLP_DISABLED))
449 		return -EINVAL;
450 
451 	if (WARN_ON(!klp_is_object_loaded(obj)))
452 		return -EINVAL;
453 
454 	klp_for_each_func(obj, func) {
455 		ret = klp_enable_func(func);
456 		if (ret) {
457 			klp_disable_object(obj);
458 			return ret;
459 		}
460 	}
461 	obj->state = KLP_ENABLED;
462 
463 	return 0;
464 }
465 
466 static int __klp_disable_patch(struct klp_patch *patch)
467 {
468 	struct klp_object *obj;
469 
470 	/* enforce stacking: only the last enabled patch can be disabled */
471 	if (!list_is_last(&patch->list, &klp_patches) &&
472 	    list_next_entry(patch, list)->state == KLP_ENABLED)
473 		return -EBUSY;
474 
475 	pr_notice("disabling patch '%s'\n", patch->mod->name);
476 
477 	klp_for_each_object(patch, obj) {
478 		if (obj->state == KLP_ENABLED)
479 			klp_disable_object(obj);
480 	}
481 
482 	patch->state = KLP_DISABLED;
483 
484 	return 0;
485 }
486 
487 /**
488  * klp_disable_patch() - disables a registered patch
489  * @patch:	The registered, enabled patch to be disabled
490  *
491  * Unregisters the patched functions from ftrace.
492  *
493  * Return: 0 on success, otherwise error
494  */
495 int klp_disable_patch(struct klp_patch *patch)
496 {
497 	int ret;
498 
499 	mutex_lock(&klp_mutex);
500 
501 	if (!klp_is_patch_registered(patch)) {
502 		ret = -EINVAL;
503 		goto err;
504 	}
505 
506 	if (patch->state == KLP_DISABLED) {
507 		ret = -EINVAL;
508 		goto err;
509 	}
510 
511 	ret = __klp_disable_patch(patch);
512 
513 err:
514 	mutex_unlock(&klp_mutex);
515 	return ret;
516 }
517 EXPORT_SYMBOL_GPL(klp_disable_patch);
518 
519 static int __klp_enable_patch(struct klp_patch *patch)
520 {
521 	struct klp_object *obj;
522 	int ret;
523 
524 	if (WARN_ON(patch->state != KLP_DISABLED))
525 		return -EINVAL;
526 
527 	/* enforce stacking: only the first disabled patch can be enabled */
528 	if (patch->list.prev != &klp_patches &&
529 	    list_prev_entry(patch, list)->state == KLP_DISABLED)
530 		return -EBUSY;
531 
532 	pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
533 	add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
534 
535 	pr_notice("enabling patch '%s'\n", patch->mod->name);
536 
537 	klp_for_each_object(patch, obj) {
538 		if (!klp_is_object_loaded(obj))
539 			continue;
540 
541 		ret = klp_enable_object(obj);
542 		if (ret)
543 			goto unregister;
544 	}
545 
546 	patch->state = KLP_ENABLED;
547 
548 	return 0;
549 
550 unregister:
551 	WARN_ON(__klp_disable_patch(patch));
552 	return ret;
553 }
554 
555 /**
556  * klp_enable_patch() - enables a registered patch
557  * @patch:	The registered, disabled patch to be enabled
558  *
559  * Performs the needed symbol lookups and code relocations,
560  * then registers the patched functions with ftrace.
561  *
562  * Return: 0 on success, otherwise error
563  */
564 int klp_enable_patch(struct klp_patch *patch)
565 {
566 	int ret;
567 
568 	mutex_lock(&klp_mutex);
569 
570 	if (!klp_is_patch_registered(patch)) {
571 		ret = -EINVAL;
572 		goto err;
573 	}
574 
575 	ret = __klp_enable_patch(patch);
576 
577 err:
578 	mutex_unlock(&klp_mutex);
579 	return ret;
580 }
581 EXPORT_SYMBOL_GPL(klp_enable_patch);
582 
583 /*
584  * Sysfs Interface
585  *
586  * /sys/kernel/livepatch
587  * /sys/kernel/livepatch/<patch>
588  * /sys/kernel/livepatch/<patch>/enabled
589  * /sys/kernel/livepatch/<patch>/<object>
590  * /sys/kernel/livepatch/<patch>/<object>/<func>
591  */
592 
593 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
594 			     const char *buf, size_t count)
595 {
596 	struct klp_patch *patch;
597 	int ret;
598 	unsigned long val;
599 
600 	ret = kstrtoul(buf, 10, &val);
601 	if (ret)
602 		return -EINVAL;
603 
604 	if (val != KLP_DISABLED && val != KLP_ENABLED)
605 		return -EINVAL;
606 
607 	patch = container_of(kobj, struct klp_patch, kobj);
608 
609 	mutex_lock(&klp_mutex);
610 
611 	if (val == patch->state) {
612 		/* already in requested state */
613 		ret = -EINVAL;
614 		goto err;
615 	}
616 
617 	if (val == KLP_ENABLED) {
618 		ret = __klp_enable_patch(patch);
619 		if (ret)
620 			goto err;
621 	} else {
622 		ret = __klp_disable_patch(patch);
623 		if (ret)
624 			goto err;
625 	}
626 
627 	mutex_unlock(&klp_mutex);
628 
629 	return count;
630 
631 err:
632 	mutex_unlock(&klp_mutex);
633 	return ret;
634 }
635 
636 static ssize_t enabled_show(struct kobject *kobj,
637 			    struct kobj_attribute *attr, char *buf)
638 {
639 	struct klp_patch *patch;
640 
641 	patch = container_of(kobj, struct klp_patch, kobj);
642 	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
643 }
644 
645 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
646 static struct attribute *klp_patch_attrs[] = {
647 	&enabled_kobj_attr.attr,
648 	NULL
649 };
650 
651 static void klp_kobj_release_patch(struct kobject *kobj)
652 {
653 	/*
654 	 * Once we have a consistency model we'll need to module_put() the
655 	 * patch module here.  See klp_register_patch() for more details.
656 	 */
657 }
658 
659 static struct kobj_type klp_ktype_patch = {
660 	.release = klp_kobj_release_patch,
661 	.sysfs_ops = &kobj_sysfs_ops,
662 	.default_attrs = klp_patch_attrs,
663 };
664 
665 static void klp_kobj_release_object(struct kobject *kobj)
666 {
667 }
668 
669 static struct kobj_type klp_ktype_object = {
670 	.release = klp_kobj_release_object,
671 	.sysfs_ops = &kobj_sysfs_ops,
672 };
673 
674 static void klp_kobj_release_func(struct kobject *kobj)
675 {
676 }
677 
678 static struct kobj_type klp_ktype_func = {
679 	.release = klp_kobj_release_func,
680 	.sysfs_ops = &kobj_sysfs_ops,
681 };
682 
683 /*
684  * Free all functions' kobjects in the array up to some limit. When limit is
685  * NULL, all kobjects are freed.
686  */
687 static void klp_free_funcs_limited(struct klp_object *obj,
688 				   struct klp_func *limit)
689 {
690 	struct klp_func *func;
691 
692 	for (func = obj->funcs; func->old_name && func != limit; func++)
693 		kobject_put(&func->kobj);
694 }
695 
696 /* Clean up when a patched object is unloaded */
697 static void klp_free_object_loaded(struct klp_object *obj)
698 {
699 	struct klp_func *func;
700 
701 	obj->mod = NULL;
702 
703 	klp_for_each_func(obj, func)
704 		func->old_addr = 0;
705 }
706 
707 /*
708  * Free all objects' kobjects in the array up to some limit. When limit is
709  * NULL, all kobjects are freed.
710  */
711 static void klp_free_objects_limited(struct klp_patch *patch,
712 				     struct klp_object *limit)
713 {
714 	struct klp_object *obj;
715 
716 	for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
717 		klp_free_funcs_limited(obj, NULL);
718 		kobject_put(&obj->kobj);
719 	}
720 }
721 
722 static void klp_free_patch(struct klp_patch *patch)
723 {
724 	klp_free_objects_limited(patch, NULL);
725 	if (!list_empty(&patch->list))
726 		list_del(&patch->list);
727 	kobject_put(&patch->kobj);
728 }
729 
730 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
731 {
732 	INIT_LIST_HEAD(&func->stack_node);
733 	func->state = KLP_DISABLED;
734 
735 	return kobject_init_and_add(&func->kobj, &klp_ktype_func,
736 				    &obj->kobj, "%s", func->old_name);
737 }
738 
739 /* parts of the initialization that is done only when the object is loaded */
740 static int klp_init_object_loaded(struct klp_patch *patch,
741 				  struct klp_object *obj)
742 {
743 	struct klp_func *func;
744 	int ret;
745 
746 	if (obj->relocs) {
747 		ret = klp_write_object_relocations(patch->mod, obj);
748 		if (ret)
749 			return ret;
750 	}
751 
752 	klp_for_each_func(obj, func) {
753 		ret = klp_find_verify_func_addr(obj, func);
754 		if (ret)
755 			return ret;
756 	}
757 
758 	return 0;
759 }
760 
761 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
762 {
763 	struct klp_func *func;
764 	int ret;
765 	const char *name;
766 
767 	if (!obj->funcs)
768 		return -EINVAL;
769 
770 	obj->state = KLP_DISABLED;
771 	obj->mod = NULL;
772 
773 	klp_find_object_module(obj);
774 
775 	name = klp_is_module(obj) ? obj->name : "vmlinux";
776 	ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
777 				   &patch->kobj, "%s", name);
778 	if (ret)
779 		return ret;
780 
781 	klp_for_each_func(obj, func) {
782 		ret = klp_init_func(obj, func);
783 		if (ret)
784 			goto free;
785 	}
786 
787 	if (klp_is_object_loaded(obj)) {
788 		ret = klp_init_object_loaded(patch, obj);
789 		if (ret)
790 			goto free;
791 	}
792 
793 	return 0;
794 
795 free:
796 	klp_free_funcs_limited(obj, func);
797 	kobject_put(&obj->kobj);
798 	return ret;
799 }
800 
801 static int klp_init_patch(struct klp_patch *patch)
802 {
803 	struct klp_object *obj;
804 	int ret;
805 
806 	if (!patch->objs)
807 		return -EINVAL;
808 
809 	mutex_lock(&klp_mutex);
810 
811 	patch->state = KLP_DISABLED;
812 
813 	ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
814 				   klp_root_kobj, "%s", patch->mod->name);
815 	if (ret)
816 		goto unlock;
817 
818 	klp_for_each_object(patch, obj) {
819 		ret = klp_init_object(patch, obj);
820 		if (ret)
821 			goto free;
822 	}
823 
824 	list_add_tail(&patch->list, &klp_patches);
825 
826 	mutex_unlock(&klp_mutex);
827 
828 	return 0;
829 
830 free:
831 	klp_free_objects_limited(patch, obj);
832 	kobject_put(&patch->kobj);
833 unlock:
834 	mutex_unlock(&klp_mutex);
835 	return ret;
836 }
837 
838 /**
839  * klp_unregister_patch() - unregisters a patch
840  * @patch:	Disabled patch to be unregistered
841  *
842  * Frees the data structures and removes the sysfs interface.
843  *
844  * Return: 0 on success, otherwise error
845  */
846 int klp_unregister_patch(struct klp_patch *patch)
847 {
848 	int ret = 0;
849 
850 	mutex_lock(&klp_mutex);
851 
852 	if (!klp_is_patch_registered(patch)) {
853 		ret = -EINVAL;
854 		goto out;
855 	}
856 
857 	if (patch->state == KLP_ENABLED) {
858 		ret = -EBUSY;
859 		goto out;
860 	}
861 
862 	klp_free_patch(patch);
863 
864 out:
865 	mutex_unlock(&klp_mutex);
866 	return ret;
867 }
868 EXPORT_SYMBOL_GPL(klp_unregister_patch);
869 
870 /**
871  * klp_register_patch() - registers a patch
872  * @patch:	Patch to be registered
873  *
874  * Initializes the data structure associated with the patch and
875  * creates the sysfs interface.
876  *
877  * Return: 0 on success, otherwise error
878  */
879 int klp_register_patch(struct klp_patch *patch)
880 {
881 	int ret;
882 
883 	if (!klp_initialized())
884 		return -ENODEV;
885 
886 	if (!patch || !patch->mod)
887 		return -EINVAL;
888 
889 	/*
890 	 * A reference is taken on the patch module to prevent it from being
891 	 * unloaded.  Right now, we don't allow patch modules to unload since
892 	 * there is currently no method to determine if a thread is still
893 	 * running in the patched code contained in the patch module once
894 	 * the ftrace registration is successful.
895 	 */
896 	if (!try_module_get(patch->mod))
897 		return -ENODEV;
898 
899 	ret = klp_init_patch(patch);
900 	if (ret)
901 		module_put(patch->mod);
902 
903 	return ret;
904 }
905 EXPORT_SYMBOL_GPL(klp_register_patch);
906 
907 static int klp_module_notify_coming(struct klp_patch *patch,
908 				     struct klp_object *obj)
909 {
910 	struct module *pmod = patch->mod;
911 	struct module *mod = obj->mod;
912 	int ret;
913 
914 	ret = klp_init_object_loaded(patch, obj);
915 	if (ret) {
916 		pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
917 			pmod->name, mod->name, ret);
918 		return ret;
919 	}
920 
921 	if (patch->state == KLP_DISABLED)
922 		return 0;
923 
924 	pr_notice("applying patch '%s' to loading module '%s'\n",
925 		  pmod->name, mod->name);
926 
927 	ret = klp_enable_object(obj);
928 	if (ret)
929 		pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
930 			pmod->name, mod->name, ret);
931 	return ret;
932 }
933 
934 static void klp_module_notify_going(struct klp_patch *patch,
935 				    struct klp_object *obj)
936 {
937 	struct module *pmod = patch->mod;
938 	struct module *mod = obj->mod;
939 
940 	if (patch->state == KLP_DISABLED)
941 		goto disabled;
942 
943 	pr_notice("reverting patch '%s' on unloading module '%s'\n",
944 		  pmod->name, mod->name);
945 
946 	klp_disable_object(obj);
947 
948 disabled:
949 	klp_free_object_loaded(obj);
950 }
951 
952 static int klp_module_notify(struct notifier_block *nb, unsigned long action,
953 			     void *data)
954 {
955 	int ret;
956 	struct module *mod = data;
957 	struct klp_patch *patch;
958 	struct klp_object *obj;
959 
960 	if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
961 		return 0;
962 
963 	mutex_lock(&klp_mutex);
964 
965 	/*
966 	 * Each module has to know that the notifier has been called.
967 	 * We never know what module will get patched by a new patch.
968 	 */
969 	if (action == MODULE_STATE_COMING)
970 		mod->klp_alive = true;
971 	else /* MODULE_STATE_GOING */
972 		mod->klp_alive = false;
973 
974 	list_for_each_entry(patch, &klp_patches, list) {
975 		klp_for_each_object(patch, obj) {
976 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
977 				continue;
978 
979 			if (action == MODULE_STATE_COMING) {
980 				obj->mod = mod;
981 				ret = klp_module_notify_coming(patch, obj);
982 				if (ret) {
983 					obj->mod = NULL;
984 					pr_warn("patch '%s' is in an inconsistent state!\n",
985 						patch->mod->name);
986 				}
987 			} else /* MODULE_STATE_GOING */
988 				klp_module_notify_going(patch, obj);
989 
990 			break;
991 		}
992 	}
993 
994 	mutex_unlock(&klp_mutex);
995 
996 	return 0;
997 }
998 
999 static struct notifier_block klp_module_nb = {
1000 	.notifier_call = klp_module_notify,
1001 	.priority = INT_MIN+1, /* called late but before ftrace notifier */
1002 };
1003 
1004 static int __init klp_init(void)
1005 {
1006 	int ret;
1007 
1008 	ret = klp_check_compiler_support();
1009 	if (ret) {
1010 		pr_info("Your compiler is too old; turning off.\n");
1011 		return -EINVAL;
1012 	}
1013 
1014 	ret = register_module_notifier(&klp_module_nb);
1015 	if (ret)
1016 		return ret;
1017 
1018 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1019 	if (!klp_root_kobj) {
1020 		ret = -ENOMEM;
1021 		goto unregister;
1022 	}
1023 
1024 	return 0;
1025 
1026 unregister:
1027 	unregister_module_notifier(&klp_module_nb);
1028 	return ret;
1029 }
1030 
1031 module_init(klp_init);
1032