xref: /openbmc/linux/kernel/livepatch/core.c (revision bc5aa3a0)
1 /*
2  * core.c - Kernel Live Patching Core
3  *
4  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5  * Copyright (C) 2014 SUSE
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version 2
10  * of the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
31 #include <linux/elf.h>
32 #include <linux/moduleloader.h>
33 #include <asm/cacheflush.h>
34 
35 /**
36  * struct klp_ops - structure for tracking registered ftrace ops structs
37  *
38  * A single ftrace_ops is shared between all enabled replacement functions
39  * (klp_func structs) which have the same old_addr.  This allows the switch
40  * between function versions to happen instantaneously by updating the klp_ops
41  * struct's func_stack list.  The winner is the klp_func at the top of the
42  * func_stack (front of the list).
43  *
44  * @node:	node for the global klp_ops list
45  * @func_stack:	list head for the stack of klp_func's (active func is on top)
46  * @fops:	registered ftrace ops struct
47  */
48 struct klp_ops {
49 	struct list_head node;
50 	struct list_head func_stack;
51 	struct ftrace_ops fops;
52 };
53 
54 /*
55  * The klp_mutex protects the global lists and state transitions of any
56  * structure reachable from them.  References to any structure must be obtained
57  * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
58  * ensure it gets consistent data).
59  */
60 static DEFINE_MUTEX(klp_mutex);
61 
62 static LIST_HEAD(klp_patches);
63 static LIST_HEAD(klp_ops);
64 
65 static struct kobject *klp_root_kobj;
66 
67 static struct klp_ops *klp_find_ops(unsigned long old_addr)
68 {
69 	struct klp_ops *ops;
70 	struct klp_func *func;
71 
72 	list_for_each_entry(ops, &klp_ops, node) {
73 		func = list_first_entry(&ops->func_stack, struct klp_func,
74 					stack_node);
75 		if (func->old_addr == old_addr)
76 			return ops;
77 	}
78 
79 	return NULL;
80 }
81 
82 static bool klp_is_module(struct klp_object *obj)
83 {
84 	return obj->name;
85 }
86 
87 static bool klp_is_object_loaded(struct klp_object *obj)
88 {
89 	return !obj->name || obj->mod;
90 }
91 
92 /* sets obj->mod if object is not vmlinux and module is found */
93 static void klp_find_object_module(struct klp_object *obj)
94 {
95 	struct module *mod;
96 
97 	if (!klp_is_module(obj))
98 		return;
99 
100 	mutex_lock(&module_mutex);
101 	/*
102 	 * We do not want to block removal of patched modules and therefore
103 	 * we do not take a reference here. The patches are removed by
104 	 * klp_module_going() instead.
105 	 */
106 	mod = find_module(obj->name);
107 	/*
108 	 * Do not mess work of klp_module_coming() and klp_module_going().
109 	 * Note that the patch might still be needed before klp_module_going()
110 	 * is called. Module functions can be called even in the GOING state
111 	 * until mod->exit() finishes. This is especially important for
112 	 * patches that modify semantic of the functions.
113 	 */
114 	if (mod && mod->klp_alive)
115 		obj->mod = mod;
116 
117 	mutex_unlock(&module_mutex);
118 }
119 
120 /* klp_mutex must be held by caller */
121 static bool klp_is_patch_registered(struct klp_patch *patch)
122 {
123 	struct klp_patch *mypatch;
124 
125 	list_for_each_entry(mypatch, &klp_patches, list)
126 		if (mypatch == patch)
127 			return true;
128 
129 	return false;
130 }
131 
132 static bool klp_initialized(void)
133 {
134 	return !!klp_root_kobj;
135 }
136 
137 struct klp_find_arg {
138 	const char *objname;
139 	const char *name;
140 	unsigned long addr;
141 	unsigned long count;
142 	unsigned long pos;
143 };
144 
145 static int klp_find_callback(void *data, const char *name,
146 			     struct module *mod, unsigned long addr)
147 {
148 	struct klp_find_arg *args = data;
149 
150 	if ((mod && !args->objname) || (!mod && args->objname))
151 		return 0;
152 
153 	if (strcmp(args->name, name))
154 		return 0;
155 
156 	if (args->objname && strcmp(args->objname, mod->name))
157 		return 0;
158 
159 	args->addr = addr;
160 	args->count++;
161 
162 	/*
163 	 * Finish the search when the symbol is found for the desired position
164 	 * or the position is not defined for a non-unique symbol.
165 	 */
166 	if ((args->pos && (args->count == args->pos)) ||
167 	    (!args->pos && (args->count > 1)))
168 		return 1;
169 
170 	return 0;
171 }
172 
173 static int klp_find_object_symbol(const char *objname, const char *name,
174 				  unsigned long sympos, unsigned long *addr)
175 {
176 	struct klp_find_arg args = {
177 		.objname = objname,
178 		.name = name,
179 		.addr = 0,
180 		.count = 0,
181 		.pos = sympos,
182 	};
183 
184 	mutex_lock(&module_mutex);
185 	kallsyms_on_each_symbol(klp_find_callback, &args);
186 	mutex_unlock(&module_mutex);
187 
188 	/*
189 	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
190 	 * otherwise ensure the symbol position count matches sympos.
191 	 */
192 	if (args.addr == 0)
193 		pr_err("symbol '%s' not found in symbol table\n", name);
194 	else if (args.count > 1 && sympos == 0) {
195 		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
196 		       name, objname);
197 	} else if (sympos != args.count && sympos > 0) {
198 		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
199 		       sympos, name, objname ? objname : "vmlinux");
200 	} else {
201 		*addr = args.addr;
202 		return 0;
203 	}
204 
205 	*addr = 0;
206 	return -EINVAL;
207 }
208 
209 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
210 {
211 	int i, cnt, vmlinux, ret;
212 	char objname[MODULE_NAME_LEN];
213 	char symname[KSYM_NAME_LEN];
214 	char *strtab = pmod->core_kallsyms.strtab;
215 	Elf_Rela *relas;
216 	Elf_Sym *sym;
217 	unsigned long sympos, addr;
218 
219 	/*
220 	 * Since the field widths for objname and symname in the sscanf()
221 	 * call are hard-coded and correspond to MODULE_NAME_LEN and
222 	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
223 	 * and KSYM_NAME_LEN have the values we expect them to have.
224 	 *
225 	 * Because the value of MODULE_NAME_LEN can differ among architectures,
226 	 * we use the smallest/strictest upper bound possible (56, based on
227 	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
228 	 */
229 	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
230 
231 	relas = (Elf_Rela *) relasec->sh_addr;
232 	/* For each rela in this klp relocation section */
233 	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
234 		sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
235 		if (sym->st_shndx != SHN_LIVEPATCH) {
236 			pr_err("symbol %s is not marked as a livepatch symbol",
237 			       strtab + sym->st_name);
238 			return -EINVAL;
239 		}
240 
241 		/* Format: .klp.sym.objname.symname,sympos */
242 		cnt = sscanf(strtab + sym->st_name,
243 			     ".klp.sym.%55[^.].%127[^,],%lu",
244 			     objname, symname, &sympos);
245 		if (cnt != 3) {
246 			pr_err("symbol %s has an incorrectly formatted name",
247 			       strtab + sym->st_name);
248 			return -EINVAL;
249 		}
250 
251 		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
252 		vmlinux = !strcmp(objname, "vmlinux");
253 		ret = klp_find_object_symbol(vmlinux ? NULL : objname,
254 					     symname, sympos, &addr);
255 		if (ret)
256 			return ret;
257 
258 		sym->st_value = addr;
259 	}
260 
261 	return 0;
262 }
263 
264 static int klp_write_object_relocations(struct module *pmod,
265 					struct klp_object *obj)
266 {
267 	int i, cnt, ret = 0;
268 	const char *objname, *secname;
269 	char sec_objname[MODULE_NAME_LEN];
270 	Elf_Shdr *sec;
271 
272 	if (WARN_ON(!klp_is_object_loaded(obj)))
273 		return -EINVAL;
274 
275 	objname = klp_is_module(obj) ? obj->name : "vmlinux";
276 
277 	module_disable_ro(pmod);
278 	/* For each klp relocation section */
279 	for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
280 		sec = pmod->klp_info->sechdrs + i;
281 		secname = pmod->klp_info->secstrings + sec->sh_name;
282 		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
283 			continue;
284 
285 		/*
286 		 * Format: .klp.rela.sec_objname.section_name
287 		 * See comment in klp_resolve_symbols() for an explanation
288 		 * of the selected field width value.
289 		 */
290 		cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
291 		if (cnt != 1) {
292 			pr_err("section %s has an incorrectly formatted name",
293 			       secname);
294 			ret = -EINVAL;
295 			break;
296 		}
297 
298 		if (strcmp(objname, sec_objname))
299 			continue;
300 
301 		ret = klp_resolve_symbols(sec, pmod);
302 		if (ret)
303 			break;
304 
305 		ret = apply_relocate_add(pmod->klp_info->sechdrs,
306 					 pmod->core_kallsyms.strtab,
307 					 pmod->klp_info->symndx, i, pmod);
308 		if (ret)
309 			break;
310 	}
311 
312 	module_enable_ro(pmod, true);
313 	return ret;
314 }
315 
316 static void notrace klp_ftrace_handler(unsigned long ip,
317 				       unsigned long parent_ip,
318 				       struct ftrace_ops *fops,
319 				       struct pt_regs *regs)
320 {
321 	struct klp_ops *ops;
322 	struct klp_func *func;
323 
324 	ops = container_of(fops, struct klp_ops, fops);
325 
326 	rcu_read_lock();
327 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
328 				      stack_node);
329 	if (WARN_ON_ONCE(!func))
330 		goto unlock;
331 
332 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
333 unlock:
334 	rcu_read_unlock();
335 }
336 
337 /*
338  * Convert a function address into the appropriate ftrace location.
339  *
340  * Usually this is just the address of the function, but on some architectures
341  * it's more complicated so allow them to provide a custom behaviour.
342  */
343 #ifndef klp_get_ftrace_location
344 static unsigned long klp_get_ftrace_location(unsigned long faddr)
345 {
346 	return faddr;
347 }
348 #endif
349 
350 static void klp_disable_func(struct klp_func *func)
351 {
352 	struct klp_ops *ops;
353 
354 	if (WARN_ON(func->state != KLP_ENABLED))
355 		return;
356 	if (WARN_ON(!func->old_addr))
357 		return;
358 
359 	ops = klp_find_ops(func->old_addr);
360 	if (WARN_ON(!ops))
361 		return;
362 
363 	if (list_is_singular(&ops->func_stack)) {
364 		unsigned long ftrace_loc;
365 
366 		ftrace_loc = klp_get_ftrace_location(func->old_addr);
367 		if (WARN_ON(!ftrace_loc))
368 			return;
369 
370 		WARN_ON(unregister_ftrace_function(&ops->fops));
371 		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
372 
373 		list_del_rcu(&func->stack_node);
374 		list_del(&ops->node);
375 		kfree(ops);
376 	} else {
377 		list_del_rcu(&func->stack_node);
378 	}
379 
380 	func->state = KLP_DISABLED;
381 }
382 
383 static int klp_enable_func(struct klp_func *func)
384 {
385 	struct klp_ops *ops;
386 	int ret;
387 
388 	if (WARN_ON(!func->old_addr))
389 		return -EINVAL;
390 
391 	if (WARN_ON(func->state != KLP_DISABLED))
392 		return -EINVAL;
393 
394 	ops = klp_find_ops(func->old_addr);
395 	if (!ops) {
396 		unsigned long ftrace_loc;
397 
398 		ftrace_loc = klp_get_ftrace_location(func->old_addr);
399 		if (!ftrace_loc) {
400 			pr_err("failed to find location for function '%s'\n",
401 				func->old_name);
402 			return -EINVAL;
403 		}
404 
405 		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
406 		if (!ops)
407 			return -ENOMEM;
408 
409 		ops->fops.func = klp_ftrace_handler;
410 		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
411 				  FTRACE_OPS_FL_DYNAMIC |
412 				  FTRACE_OPS_FL_IPMODIFY;
413 
414 		list_add(&ops->node, &klp_ops);
415 
416 		INIT_LIST_HEAD(&ops->func_stack);
417 		list_add_rcu(&func->stack_node, &ops->func_stack);
418 
419 		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
420 		if (ret) {
421 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
422 			       func->old_name, ret);
423 			goto err;
424 		}
425 
426 		ret = register_ftrace_function(&ops->fops);
427 		if (ret) {
428 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
429 			       func->old_name, ret);
430 			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
431 			goto err;
432 		}
433 
434 
435 	} else {
436 		list_add_rcu(&func->stack_node, &ops->func_stack);
437 	}
438 
439 	func->state = KLP_ENABLED;
440 
441 	return 0;
442 
443 err:
444 	list_del_rcu(&func->stack_node);
445 	list_del(&ops->node);
446 	kfree(ops);
447 	return ret;
448 }
449 
450 static void klp_disable_object(struct klp_object *obj)
451 {
452 	struct klp_func *func;
453 
454 	klp_for_each_func(obj, func)
455 		if (func->state == KLP_ENABLED)
456 			klp_disable_func(func);
457 
458 	obj->state = KLP_DISABLED;
459 }
460 
461 static int klp_enable_object(struct klp_object *obj)
462 {
463 	struct klp_func *func;
464 	int ret;
465 
466 	if (WARN_ON(obj->state != KLP_DISABLED))
467 		return -EINVAL;
468 
469 	if (WARN_ON(!klp_is_object_loaded(obj)))
470 		return -EINVAL;
471 
472 	klp_for_each_func(obj, func) {
473 		ret = klp_enable_func(func);
474 		if (ret) {
475 			klp_disable_object(obj);
476 			return ret;
477 		}
478 	}
479 	obj->state = KLP_ENABLED;
480 
481 	return 0;
482 }
483 
484 static int __klp_disable_patch(struct klp_patch *patch)
485 {
486 	struct klp_object *obj;
487 
488 	/* enforce stacking: only the last enabled patch can be disabled */
489 	if (!list_is_last(&patch->list, &klp_patches) &&
490 	    list_next_entry(patch, list)->state == KLP_ENABLED)
491 		return -EBUSY;
492 
493 	pr_notice("disabling patch '%s'\n", patch->mod->name);
494 
495 	klp_for_each_object(patch, obj) {
496 		if (obj->state == KLP_ENABLED)
497 			klp_disable_object(obj);
498 	}
499 
500 	patch->state = KLP_DISABLED;
501 
502 	return 0;
503 }
504 
505 /**
506  * klp_disable_patch() - disables a registered patch
507  * @patch:	The registered, enabled patch to be disabled
508  *
509  * Unregisters the patched functions from ftrace.
510  *
511  * Return: 0 on success, otherwise error
512  */
513 int klp_disable_patch(struct klp_patch *patch)
514 {
515 	int ret;
516 
517 	mutex_lock(&klp_mutex);
518 
519 	if (!klp_is_patch_registered(patch)) {
520 		ret = -EINVAL;
521 		goto err;
522 	}
523 
524 	if (patch->state == KLP_DISABLED) {
525 		ret = -EINVAL;
526 		goto err;
527 	}
528 
529 	ret = __klp_disable_patch(patch);
530 
531 err:
532 	mutex_unlock(&klp_mutex);
533 	return ret;
534 }
535 EXPORT_SYMBOL_GPL(klp_disable_patch);
536 
537 static int __klp_enable_patch(struct klp_patch *patch)
538 {
539 	struct klp_object *obj;
540 	int ret;
541 
542 	if (WARN_ON(patch->state != KLP_DISABLED))
543 		return -EINVAL;
544 
545 	/* enforce stacking: only the first disabled patch can be enabled */
546 	if (patch->list.prev != &klp_patches &&
547 	    list_prev_entry(patch, list)->state == KLP_DISABLED)
548 		return -EBUSY;
549 
550 	pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
551 	add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
552 
553 	pr_notice("enabling patch '%s'\n", patch->mod->name);
554 
555 	klp_for_each_object(patch, obj) {
556 		if (!klp_is_object_loaded(obj))
557 			continue;
558 
559 		ret = klp_enable_object(obj);
560 		if (ret)
561 			goto unregister;
562 	}
563 
564 	patch->state = KLP_ENABLED;
565 
566 	return 0;
567 
568 unregister:
569 	WARN_ON(__klp_disable_patch(patch));
570 	return ret;
571 }
572 
573 /**
574  * klp_enable_patch() - enables a registered patch
575  * @patch:	The registered, disabled patch to be enabled
576  *
577  * Performs the needed symbol lookups and code relocations,
578  * then registers the patched functions with ftrace.
579  *
580  * Return: 0 on success, otherwise error
581  */
582 int klp_enable_patch(struct klp_patch *patch)
583 {
584 	int ret;
585 
586 	mutex_lock(&klp_mutex);
587 
588 	if (!klp_is_patch_registered(patch)) {
589 		ret = -EINVAL;
590 		goto err;
591 	}
592 
593 	ret = __klp_enable_patch(patch);
594 
595 err:
596 	mutex_unlock(&klp_mutex);
597 	return ret;
598 }
599 EXPORT_SYMBOL_GPL(klp_enable_patch);
600 
601 /*
602  * Sysfs Interface
603  *
604  * /sys/kernel/livepatch
605  * /sys/kernel/livepatch/<patch>
606  * /sys/kernel/livepatch/<patch>/enabled
607  * /sys/kernel/livepatch/<patch>/<object>
608  * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
609  */
610 
611 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
612 			     const char *buf, size_t count)
613 {
614 	struct klp_patch *patch;
615 	int ret;
616 	unsigned long val;
617 
618 	ret = kstrtoul(buf, 10, &val);
619 	if (ret)
620 		return -EINVAL;
621 
622 	if (val != KLP_DISABLED && val != KLP_ENABLED)
623 		return -EINVAL;
624 
625 	patch = container_of(kobj, struct klp_patch, kobj);
626 
627 	mutex_lock(&klp_mutex);
628 
629 	if (val == patch->state) {
630 		/* already in requested state */
631 		ret = -EINVAL;
632 		goto err;
633 	}
634 
635 	if (val == KLP_ENABLED) {
636 		ret = __klp_enable_patch(patch);
637 		if (ret)
638 			goto err;
639 	} else {
640 		ret = __klp_disable_patch(patch);
641 		if (ret)
642 			goto err;
643 	}
644 
645 	mutex_unlock(&klp_mutex);
646 
647 	return count;
648 
649 err:
650 	mutex_unlock(&klp_mutex);
651 	return ret;
652 }
653 
654 static ssize_t enabled_show(struct kobject *kobj,
655 			    struct kobj_attribute *attr, char *buf)
656 {
657 	struct klp_patch *patch;
658 
659 	patch = container_of(kobj, struct klp_patch, kobj);
660 	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
661 }
662 
663 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
664 static struct attribute *klp_patch_attrs[] = {
665 	&enabled_kobj_attr.attr,
666 	NULL
667 };
668 
669 static void klp_kobj_release_patch(struct kobject *kobj)
670 {
671 	/*
672 	 * Once we have a consistency model we'll need to module_put() the
673 	 * patch module here.  See klp_register_patch() for more details.
674 	 */
675 }
676 
677 static struct kobj_type klp_ktype_patch = {
678 	.release = klp_kobj_release_patch,
679 	.sysfs_ops = &kobj_sysfs_ops,
680 	.default_attrs = klp_patch_attrs,
681 };
682 
683 static void klp_kobj_release_object(struct kobject *kobj)
684 {
685 }
686 
687 static struct kobj_type klp_ktype_object = {
688 	.release = klp_kobj_release_object,
689 	.sysfs_ops = &kobj_sysfs_ops,
690 };
691 
692 static void klp_kobj_release_func(struct kobject *kobj)
693 {
694 }
695 
696 static struct kobj_type klp_ktype_func = {
697 	.release = klp_kobj_release_func,
698 	.sysfs_ops = &kobj_sysfs_ops,
699 };
700 
701 /*
702  * Free all functions' kobjects in the array up to some limit. When limit is
703  * NULL, all kobjects are freed.
704  */
705 static void klp_free_funcs_limited(struct klp_object *obj,
706 				   struct klp_func *limit)
707 {
708 	struct klp_func *func;
709 
710 	for (func = obj->funcs; func->old_name && func != limit; func++)
711 		kobject_put(&func->kobj);
712 }
713 
714 /* Clean up when a patched object is unloaded */
715 static void klp_free_object_loaded(struct klp_object *obj)
716 {
717 	struct klp_func *func;
718 
719 	obj->mod = NULL;
720 
721 	klp_for_each_func(obj, func)
722 		func->old_addr = 0;
723 }
724 
725 /*
726  * Free all objects' kobjects in the array up to some limit. When limit is
727  * NULL, all kobjects are freed.
728  */
729 static void klp_free_objects_limited(struct klp_patch *patch,
730 				     struct klp_object *limit)
731 {
732 	struct klp_object *obj;
733 
734 	for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
735 		klp_free_funcs_limited(obj, NULL);
736 		kobject_put(&obj->kobj);
737 	}
738 }
739 
740 static void klp_free_patch(struct klp_patch *patch)
741 {
742 	klp_free_objects_limited(patch, NULL);
743 	if (!list_empty(&patch->list))
744 		list_del(&patch->list);
745 	kobject_put(&patch->kobj);
746 }
747 
748 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
749 {
750 	if (!func->old_name || !func->new_func)
751 		return -EINVAL;
752 
753 	INIT_LIST_HEAD(&func->stack_node);
754 	func->state = KLP_DISABLED;
755 
756 	/* The format for the sysfs directory is <function,sympos> where sympos
757 	 * is the nth occurrence of this symbol in kallsyms for the patched
758 	 * object. If the user selects 0 for old_sympos, then 1 will be used
759 	 * since a unique symbol will be the first occurrence.
760 	 */
761 	return kobject_init_and_add(&func->kobj, &klp_ktype_func,
762 				    &obj->kobj, "%s,%lu", func->old_name,
763 				    func->old_sympos ? func->old_sympos : 1);
764 }
765 
766 /* parts of the initialization that is done only when the object is loaded */
767 static int klp_init_object_loaded(struct klp_patch *patch,
768 				  struct klp_object *obj)
769 {
770 	struct klp_func *func;
771 	int ret;
772 
773 	ret = klp_write_object_relocations(patch->mod, obj);
774 	if (ret)
775 		return ret;
776 
777 	klp_for_each_func(obj, func) {
778 		ret = klp_find_object_symbol(obj->name, func->old_name,
779 					     func->old_sympos,
780 					     &func->old_addr);
781 		if (ret)
782 			return ret;
783 	}
784 
785 	return 0;
786 }
787 
788 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
789 {
790 	struct klp_func *func;
791 	int ret;
792 	const char *name;
793 
794 	if (!obj->funcs)
795 		return -EINVAL;
796 
797 	obj->state = KLP_DISABLED;
798 	obj->mod = NULL;
799 
800 	klp_find_object_module(obj);
801 
802 	name = klp_is_module(obj) ? obj->name : "vmlinux";
803 	ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
804 				   &patch->kobj, "%s", name);
805 	if (ret)
806 		return ret;
807 
808 	klp_for_each_func(obj, func) {
809 		ret = klp_init_func(obj, func);
810 		if (ret)
811 			goto free;
812 	}
813 
814 	if (klp_is_object_loaded(obj)) {
815 		ret = klp_init_object_loaded(patch, obj);
816 		if (ret)
817 			goto free;
818 	}
819 
820 	return 0;
821 
822 free:
823 	klp_free_funcs_limited(obj, func);
824 	kobject_put(&obj->kobj);
825 	return ret;
826 }
827 
828 static int klp_init_patch(struct klp_patch *patch)
829 {
830 	struct klp_object *obj;
831 	int ret;
832 
833 	if (!patch->objs)
834 		return -EINVAL;
835 
836 	mutex_lock(&klp_mutex);
837 
838 	patch->state = KLP_DISABLED;
839 
840 	ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
841 				   klp_root_kobj, "%s", patch->mod->name);
842 	if (ret)
843 		goto unlock;
844 
845 	klp_for_each_object(patch, obj) {
846 		ret = klp_init_object(patch, obj);
847 		if (ret)
848 			goto free;
849 	}
850 
851 	list_add_tail(&patch->list, &klp_patches);
852 
853 	mutex_unlock(&klp_mutex);
854 
855 	return 0;
856 
857 free:
858 	klp_free_objects_limited(patch, obj);
859 	kobject_put(&patch->kobj);
860 unlock:
861 	mutex_unlock(&klp_mutex);
862 	return ret;
863 }
864 
865 /**
866  * klp_unregister_patch() - unregisters a patch
867  * @patch:	Disabled patch to be unregistered
868  *
869  * Frees the data structures and removes the sysfs interface.
870  *
871  * Return: 0 on success, otherwise error
872  */
873 int klp_unregister_patch(struct klp_patch *patch)
874 {
875 	int ret = 0;
876 
877 	mutex_lock(&klp_mutex);
878 
879 	if (!klp_is_patch_registered(patch)) {
880 		ret = -EINVAL;
881 		goto out;
882 	}
883 
884 	if (patch->state == KLP_ENABLED) {
885 		ret = -EBUSY;
886 		goto out;
887 	}
888 
889 	klp_free_patch(patch);
890 
891 out:
892 	mutex_unlock(&klp_mutex);
893 	return ret;
894 }
895 EXPORT_SYMBOL_GPL(klp_unregister_patch);
896 
897 /**
898  * klp_register_patch() - registers a patch
899  * @patch:	Patch to be registered
900  *
901  * Initializes the data structure associated with the patch and
902  * creates the sysfs interface.
903  *
904  * Return: 0 on success, otherwise error
905  */
906 int klp_register_patch(struct klp_patch *patch)
907 {
908 	int ret;
909 
910 	if (!patch || !patch->mod)
911 		return -EINVAL;
912 
913 	if (!is_livepatch_module(patch->mod)) {
914 		pr_err("module %s is not marked as a livepatch module",
915 		       patch->mod->name);
916 		return -EINVAL;
917 	}
918 
919 	if (!klp_initialized())
920 		return -ENODEV;
921 
922 	/*
923 	 * A reference is taken on the patch module to prevent it from being
924 	 * unloaded.  Right now, we don't allow patch modules to unload since
925 	 * there is currently no method to determine if a thread is still
926 	 * running in the patched code contained in the patch module once
927 	 * the ftrace registration is successful.
928 	 */
929 	if (!try_module_get(patch->mod))
930 		return -ENODEV;
931 
932 	ret = klp_init_patch(patch);
933 	if (ret)
934 		module_put(patch->mod);
935 
936 	return ret;
937 }
938 EXPORT_SYMBOL_GPL(klp_register_patch);
939 
940 int klp_module_coming(struct module *mod)
941 {
942 	int ret;
943 	struct klp_patch *patch;
944 	struct klp_object *obj;
945 
946 	if (WARN_ON(mod->state != MODULE_STATE_COMING))
947 		return -EINVAL;
948 
949 	mutex_lock(&klp_mutex);
950 	/*
951 	 * Each module has to know that klp_module_coming()
952 	 * has been called. We never know what module will
953 	 * get patched by a new patch.
954 	 */
955 	mod->klp_alive = true;
956 
957 	list_for_each_entry(patch, &klp_patches, list) {
958 		klp_for_each_object(patch, obj) {
959 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
960 				continue;
961 
962 			obj->mod = mod;
963 
964 			ret = klp_init_object_loaded(patch, obj);
965 			if (ret) {
966 				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
967 					patch->mod->name, obj->mod->name, ret);
968 				goto err;
969 			}
970 
971 			if (patch->state == KLP_DISABLED)
972 				break;
973 
974 			pr_notice("applying patch '%s' to loading module '%s'\n",
975 				  patch->mod->name, obj->mod->name);
976 
977 			ret = klp_enable_object(obj);
978 			if (ret) {
979 				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
980 					patch->mod->name, obj->mod->name, ret);
981 				goto err;
982 			}
983 
984 			break;
985 		}
986 	}
987 
988 	mutex_unlock(&klp_mutex);
989 
990 	return 0;
991 
992 err:
993 	/*
994 	 * If a patch is unsuccessfully applied, return
995 	 * error to the module loader.
996 	 */
997 	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
998 		patch->mod->name, obj->mod->name, obj->mod->name);
999 	mod->klp_alive = false;
1000 	klp_free_object_loaded(obj);
1001 	mutex_unlock(&klp_mutex);
1002 
1003 	return ret;
1004 }
1005 
1006 void klp_module_going(struct module *mod)
1007 {
1008 	struct klp_patch *patch;
1009 	struct klp_object *obj;
1010 
1011 	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1012 		    mod->state != MODULE_STATE_COMING))
1013 		return;
1014 
1015 	mutex_lock(&klp_mutex);
1016 	/*
1017 	 * Each module has to know that klp_module_going()
1018 	 * has been called. We never know what module will
1019 	 * get patched by a new patch.
1020 	 */
1021 	mod->klp_alive = false;
1022 
1023 	list_for_each_entry(patch, &klp_patches, list) {
1024 		klp_for_each_object(patch, obj) {
1025 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1026 				continue;
1027 
1028 			if (patch->state != KLP_DISABLED) {
1029 				pr_notice("reverting patch '%s' on unloading module '%s'\n",
1030 					  patch->mod->name, obj->mod->name);
1031 				klp_disable_object(obj);
1032 			}
1033 
1034 			klp_free_object_loaded(obj);
1035 			break;
1036 		}
1037 	}
1038 
1039 	mutex_unlock(&klp_mutex);
1040 }
1041 
1042 static int __init klp_init(void)
1043 {
1044 	int ret;
1045 
1046 	ret = klp_check_compiler_support();
1047 	if (ret) {
1048 		pr_info("Your compiler is too old; turning off.\n");
1049 		return -EINVAL;
1050 	}
1051 
1052 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1053 	if (!klp_root_kobj)
1054 		return -ENOMEM;
1055 
1056 	return 0;
1057 }
1058 
1059 module_init(klp_init);
1060