xref: /openbmc/linux/kernel/livepatch/core.c (revision d4fd6347)
1 /*
2  * core.c - Kernel Live Patching Core
3  *
4  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5  * Copyright (C) 2014 SUSE
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version 2
10  * of the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/kallsyms.h>
29 #include <linux/livepatch.h>
30 #include <linux/elf.h>
31 #include <linux/moduleloader.h>
32 #include <linux/completion.h>
33 #include <asm/cacheflush.h>
34 #include "core.h"
35 #include "patch.h"
36 #include "transition.h"
37 
38 /*
39  * klp_mutex is a coarse lock which serializes access to klp data.  All
40  * accesses to klp-related variables and structures must have mutex protection,
41  * except within the following functions which carefully avoid the need for it:
42  *
43  * - klp_ftrace_handler()
44  * - klp_update_patch_state()
45  */
46 DEFINE_MUTEX(klp_mutex);
47 
48 /*
49  * Actively used patches: enabled or in transition. Note that replaced
50  * or disabled patches are not listed even though the related kernel
51  * module still can be loaded.
52  */
53 LIST_HEAD(klp_patches);
54 
55 static struct kobject *klp_root_kobj;
56 
57 static bool klp_is_module(struct klp_object *obj)
58 {
59 	return obj->name;
60 }
61 
62 /* sets obj->mod if object is not vmlinux and module is found */
63 static void klp_find_object_module(struct klp_object *obj)
64 {
65 	struct module *mod;
66 
67 	if (!klp_is_module(obj))
68 		return;
69 
70 	mutex_lock(&module_mutex);
71 	/*
72 	 * We do not want to block removal of patched modules and therefore
73 	 * we do not take a reference here. The patches are removed by
74 	 * klp_module_going() instead.
75 	 */
76 	mod = find_module(obj->name);
77 	/*
78 	 * Do not mess work of klp_module_coming() and klp_module_going().
79 	 * Note that the patch might still be needed before klp_module_going()
80 	 * is called. Module functions can be called even in the GOING state
81 	 * until mod->exit() finishes. This is especially important for
82 	 * patches that modify semantic of the functions.
83 	 */
84 	if (mod && mod->klp_alive)
85 		obj->mod = mod;
86 
87 	mutex_unlock(&module_mutex);
88 }
89 
90 static bool klp_initialized(void)
91 {
92 	return !!klp_root_kobj;
93 }
94 
95 static struct klp_func *klp_find_func(struct klp_object *obj,
96 				      struct klp_func *old_func)
97 {
98 	struct klp_func *func;
99 
100 	klp_for_each_func(obj, func) {
101 		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
102 		    (old_func->old_sympos == func->old_sympos)) {
103 			return func;
104 		}
105 	}
106 
107 	return NULL;
108 }
109 
110 static struct klp_object *klp_find_object(struct klp_patch *patch,
111 					  struct klp_object *old_obj)
112 {
113 	struct klp_object *obj;
114 
115 	klp_for_each_object(patch, obj) {
116 		if (klp_is_module(old_obj)) {
117 			if (klp_is_module(obj) &&
118 			    strcmp(old_obj->name, obj->name) == 0) {
119 				return obj;
120 			}
121 		} else if (!klp_is_module(obj)) {
122 			return obj;
123 		}
124 	}
125 
126 	return NULL;
127 }
128 
129 struct klp_find_arg {
130 	const char *objname;
131 	const char *name;
132 	unsigned long addr;
133 	unsigned long count;
134 	unsigned long pos;
135 };
136 
137 static int klp_find_callback(void *data, const char *name,
138 			     struct module *mod, unsigned long addr)
139 {
140 	struct klp_find_arg *args = data;
141 
142 	if ((mod && !args->objname) || (!mod && args->objname))
143 		return 0;
144 
145 	if (strcmp(args->name, name))
146 		return 0;
147 
148 	if (args->objname && strcmp(args->objname, mod->name))
149 		return 0;
150 
151 	args->addr = addr;
152 	args->count++;
153 
154 	/*
155 	 * Finish the search when the symbol is found for the desired position
156 	 * or the position is not defined for a non-unique symbol.
157 	 */
158 	if ((args->pos && (args->count == args->pos)) ||
159 	    (!args->pos && (args->count > 1)))
160 		return 1;
161 
162 	return 0;
163 }
164 
165 static int klp_find_object_symbol(const char *objname, const char *name,
166 				  unsigned long sympos, unsigned long *addr)
167 {
168 	struct klp_find_arg args = {
169 		.objname = objname,
170 		.name = name,
171 		.addr = 0,
172 		.count = 0,
173 		.pos = sympos,
174 	};
175 
176 	mutex_lock(&module_mutex);
177 	if (objname)
178 		module_kallsyms_on_each_symbol(klp_find_callback, &args);
179 	else
180 		kallsyms_on_each_symbol(klp_find_callback, &args);
181 	mutex_unlock(&module_mutex);
182 
183 	/*
184 	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
185 	 * otherwise ensure the symbol position count matches sympos.
186 	 */
187 	if (args.addr == 0)
188 		pr_err("symbol '%s' not found in symbol table\n", name);
189 	else if (args.count > 1 && sympos == 0) {
190 		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
191 		       name, objname);
192 	} else if (sympos != args.count && sympos > 0) {
193 		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
194 		       sympos, name, objname ? objname : "vmlinux");
195 	} else {
196 		*addr = args.addr;
197 		return 0;
198 	}
199 
200 	*addr = 0;
201 	return -EINVAL;
202 }
203 
204 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
205 {
206 	int i, cnt, vmlinux, ret;
207 	char objname[MODULE_NAME_LEN];
208 	char symname[KSYM_NAME_LEN];
209 	char *strtab = pmod->core_kallsyms.strtab;
210 	Elf_Rela *relas;
211 	Elf_Sym *sym;
212 	unsigned long sympos, addr;
213 
214 	/*
215 	 * Since the field widths for objname and symname in the sscanf()
216 	 * call are hard-coded and correspond to MODULE_NAME_LEN and
217 	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
218 	 * and KSYM_NAME_LEN have the values we expect them to have.
219 	 *
220 	 * Because the value of MODULE_NAME_LEN can differ among architectures,
221 	 * we use the smallest/strictest upper bound possible (56, based on
222 	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
223 	 */
224 	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
225 
226 	relas = (Elf_Rela *) relasec->sh_addr;
227 	/* For each rela in this klp relocation section */
228 	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
229 		sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
230 		if (sym->st_shndx != SHN_LIVEPATCH) {
231 			pr_err("symbol %s is not marked as a livepatch symbol\n",
232 			       strtab + sym->st_name);
233 			return -EINVAL;
234 		}
235 
236 		/* Format: .klp.sym.objname.symname,sympos */
237 		cnt = sscanf(strtab + sym->st_name,
238 			     ".klp.sym.%55[^.].%127[^,],%lu",
239 			     objname, symname, &sympos);
240 		if (cnt != 3) {
241 			pr_err("symbol %s has an incorrectly formatted name\n",
242 			       strtab + sym->st_name);
243 			return -EINVAL;
244 		}
245 
246 		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
247 		vmlinux = !strcmp(objname, "vmlinux");
248 		ret = klp_find_object_symbol(vmlinux ? NULL : objname,
249 					     symname, sympos, &addr);
250 		if (ret)
251 			return ret;
252 
253 		sym->st_value = addr;
254 	}
255 
256 	return 0;
257 }
258 
259 static int klp_write_object_relocations(struct module *pmod,
260 					struct klp_object *obj)
261 {
262 	int i, cnt, ret = 0;
263 	const char *objname, *secname;
264 	char sec_objname[MODULE_NAME_LEN];
265 	Elf_Shdr *sec;
266 
267 	if (WARN_ON(!klp_is_object_loaded(obj)))
268 		return -EINVAL;
269 
270 	objname = klp_is_module(obj) ? obj->name : "vmlinux";
271 
272 	/* For each klp relocation section */
273 	for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
274 		sec = pmod->klp_info->sechdrs + i;
275 		secname = pmod->klp_info->secstrings + sec->sh_name;
276 		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
277 			continue;
278 
279 		/*
280 		 * Format: .klp.rela.sec_objname.section_name
281 		 * See comment in klp_resolve_symbols() for an explanation
282 		 * of the selected field width value.
283 		 */
284 		cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
285 		if (cnt != 1) {
286 			pr_err("section %s has an incorrectly formatted name\n",
287 			       secname);
288 			ret = -EINVAL;
289 			break;
290 		}
291 
292 		if (strcmp(objname, sec_objname))
293 			continue;
294 
295 		ret = klp_resolve_symbols(sec, pmod);
296 		if (ret)
297 			break;
298 
299 		ret = apply_relocate_add(pmod->klp_info->sechdrs,
300 					 pmod->core_kallsyms.strtab,
301 					 pmod->klp_info->symndx, i, pmod);
302 		if (ret)
303 			break;
304 	}
305 
306 	return ret;
307 }
308 
309 /*
310  * Sysfs Interface
311  *
312  * /sys/kernel/livepatch
313  * /sys/kernel/livepatch/<patch>
314  * /sys/kernel/livepatch/<patch>/enabled
315  * /sys/kernel/livepatch/<patch>/transition
316  * /sys/kernel/livepatch/<patch>/force
317  * /sys/kernel/livepatch/<patch>/<object>
318  * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
319  */
320 static int __klp_disable_patch(struct klp_patch *patch);
321 
322 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
323 			     const char *buf, size_t count)
324 {
325 	struct klp_patch *patch;
326 	int ret;
327 	bool enabled;
328 
329 	ret = kstrtobool(buf, &enabled);
330 	if (ret)
331 		return ret;
332 
333 	patch = container_of(kobj, struct klp_patch, kobj);
334 
335 	mutex_lock(&klp_mutex);
336 
337 	if (patch->enabled == enabled) {
338 		/* already in requested state */
339 		ret = -EINVAL;
340 		goto out;
341 	}
342 
343 	/*
344 	 * Allow to reverse a pending transition in both ways. It might be
345 	 * necessary to complete the transition without forcing and breaking
346 	 * the system integrity.
347 	 *
348 	 * Do not allow to re-enable a disabled patch.
349 	 */
350 	if (patch == klp_transition_patch)
351 		klp_reverse_transition();
352 	else if (!enabled)
353 		ret = __klp_disable_patch(patch);
354 	else
355 		ret = -EINVAL;
356 
357 out:
358 	mutex_unlock(&klp_mutex);
359 
360 	if (ret)
361 		return ret;
362 	return count;
363 }
364 
365 static ssize_t enabled_show(struct kobject *kobj,
366 			    struct kobj_attribute *attr, char *buf)
367 {
368 	struct klp_patch *patch;
369 
370 	patch = container_of(kobj, struct klp_patch, kobj);
371 	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
372 }
373 
374 static ssize_t transition_show(struct kobject *kobj,
375 			       struct kobj_attribute *attr, char *buf)
376 {
377 	struct klp_patch *patch;
378 
379 	patch = container_of(kobj, struct klp_patch, kobj);
380 	return snprintf(buf, PAGE_SIZE-1, "%d\n",
381 			patch == klp_transition_patch);
382 }
383 
384 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
385 			   const char *buf, size_t count)
386 {
387 	struct klp_patch *patch;
388 	int ret;
389 	bool val;
390 
391 	ret = kstrtobool(buf, &val);
392 	if (ret)
393 		return ret;
394 
395 	if (!val)
396 		return count;
397 
398 	mutex_lock(&klp_mutex);
399 
400 	patch = container_of(kobj, struct klp_patch, kobj);
401 	if (patch != klp_transition_patch) {
402 		mutex_unlock(&klp_mutex);
403 		return -EINVAL;
404 	}
405 
406 	klp_force_transition();
407 
408 	mutex_unlock(&klp_mutex);
409 
410 	return count;
411 }
412 
413 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
414 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
415 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
416 static struct attribute *klp_patch_attrs[] = {
417 	&enabled_kobj_attr.attr,
418 	&transition_kobj_attr.attr,
419 	&force_kobj_attr.attr,
420 	NULL
421 };
422 ATTRIBUTE_GROUPS(klp_patch);
423 
424 static void klp_free_object_dynamic(struct klp_object *obj)
425 {
426 	kfree(obj->name);
427 	kfree(obj);
428 }
429 
430 static void klp_init_func_early(struct klp_object *obj,
431 				struct klp_func *func);
432 static void klp_init_object_early(struct klp_patch *patch,
433 				  struct klp_object *obj);
434 
435 static struct klp_object *klp_alloc_object_dynamic(const char *name,
436 						   struct klp_patch *patch)
437 {
438 	struct klp_object *obj;
439 
440 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
441 	if (!obj)
442 		return NULL;
443 
444 	if (name) {
445 		obj->name = kstrdup(name, GFP_KERNEL);
446 		if (!obj->name) {
447 			kfree(obj);
448 			return NULL;
449 		}
450 	}
451 
452 	klp_init_object_early(patch, obj);
453 	obj->dynamic = true;
454 
455 	return obj;
456 }
457 
458 static void klp_free_func_nop(struct klp_func *func)
459 {
460 	kfree(func->old_name);
461 	kfree(func);
462 }
463 
464 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
465 					   struct klp_object *obj)
466 {
467 	struct klp_func *func;
468 
469 	func = kzalloc(sizeof(*func), GFP_KERNEL);
470 	if (!func)
471 		return NULL;
472 
473 	if (old_func->old_name) {
474 		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
475 		if (!func->old_name) {
476 			kfree(func);
477 			return NULL;
478 		}
479 	}
480 
481 	klp_init_func_early(obj, func);
482 	/*
483 	 * func->new_func is same as func->old_func. These addresses are
484 	 * set when the object is loaded, see klp_init_object_loaded().
485 	 */
486 	func->old_sympos = old_func->old_sympos;
487 	func->nop = true;
488 
489 	return func;
490 }
491 
492 static int klp_add_object_nops(struct klp_patch *patch,
493 			       struct klp_object *old_obj)
494 {
495 	struct klp_object *obj;
496 	struct klp_func *func, *old_func;
497 
498 	obj = klp_find_object(patch, old_obj);
499 
500 	if (!obj) {
501 		obj = klp_alloc_object_dynamic(old_obj->name, patch);
502 		if (!obj)
503 			return -ENOMEM;
504 	}
505 
506 	klp_for_each_func(old_obj, old_func) {
507 		func = klp_find_func(obj, old_func);
508 		if (func)
509 			continue;
510 
511 		func = klp_alloc_func_nop(old_func, obj);
512 		if (!func)
513 			return -ENOMEM;
514 	}
515 
516 	return 0;
517 }
518 
519 /*
520  * Add 'nop' functions which simply return to the caller to run
521  * the original function. The 'nop' functions are added to a
522  * patch to facilitate a 'replace' mode.
523  */
524 static int klp_add_nops(struct klp_patch *patch)
525 {
526 	struct klp_patch *old_patch;
527 	struct klp_object *old_obj;
528 
529 	klp_for_each_patch(old_patch) {
530 		klp_for_each_object(old_patch, old_obj) {
531 			int err;
532 
533 			err = klp_add_object_nops(patch, old_obj);
534 			if (err)
535 				return err;
536 		}
537 	}
538 
539 	return 0;
540 }
541 
542 static void klp_kobj_release_patch(struct kobject *kobj)
543 {
544 	struct klp_patch *patch;
545 
546 	patch = container_of(kobj, struct klp_patch, kobj);
547 	complete(&patch->finish);
548 }
549 
550 static struct kobj_type klp_ktype_patch = {
551 	.release = klp_kobj_release_patch,
552 	.sysfs_ops = &kobj_sysfs_ops,
553 	.default_groups = klp_patch_groups,
554 };
555 
556 static void klp_kobj_release_object(struct kobject *kobj)
557 {
558 	struct klp_object *obj;
559 
560 	obj = container_of(kobj, struct klp_object, kobj);
561 
562 	if (obj->dynamic)
563 		klp_free_object_dynamic(obj);
564 }
565 
566 static struct kobj_type klp_ktype_object = {
567 	.release = klp_kobj_release_object,
568 	.sysfs_ops = &kobj_sysfs_ops,
569 };
570 
571 static void klp_kobj_release_func(struct kobject *kobj)
572 {
573 	struct klp_func *func;
574 
575 	func = container_of(kobj, struct klp_func, kobj);
576 
577 	if (func->nop)
578 		klp_free_func_nop(func);
579 }
580 
581 static struct kobj_type klp_ktype_func = {
582 	.release = klp_kobj_release_func,
583 	.sysfs_ops = &kobj_sysfs_ops,
584 };
585 
586 static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
587 {
588 	struct klp_func *func, *tmp_func;
589 
590 	klp_for_each_func_safe(obj, func, tmp_func) {
591 		if (nops_only && !func->nop)
592 			continue;
593 
594 		list_del(&func->node);
595 		kobject_put(&func->kobj);
596 	}
597 }
598 
599 /* Clean up when a patched object is unloaded */
600 static void klp_free_object_loaded(struct klp_object *obj)
601 {
602 	struct klp_func *func;
603 
604 	obj->mod = NULL;
605 
606 	klp_for_each_func(obj, func) {
607 		func->old_func = NULL;
608 
609 		if (func->nop)
610 			func->new_func = NULL;
611 	}
612 }
613 
614 static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
615 {
616 	struct klp_object *obj, *tmp_obj;
617 
618 	klp_for_each_object_safe(patch, obj, tmp_obj) {
619 		__klp_free_funcs(obj, nops_only);
620 
621 		if (nops_only && !obj->dynamic)
622 			continue;
623 
624 		list_del(&obj->node);
625 		kobject_put(&obj->kobj);
626 	}
627 }
628 
629 static void klp_free_objects(struct klp_patch *patch)
630 {
631 	__klp_free_objects(patch, false);
632 }
633 
634 static void klp_free_objects_dynamic(struct klp_patch *patch)
635 {
636 	__klp_free_objects(patch, true);
637 }
638 
639 /*
640  * This function implements the free operations that can be called safely
641  * under klp_mutex.
642  *
643  * The operation must be completed by calling klp_free_patch_finish()
644  * outside klp_mutex.
645  */
646 void klp_free_patch_start(struct klp_patch *patch)
647 {
648 	if (!list_empty(&patch->list))
649 		list_del(&patch->list);
650 
651 	klp_free_objects(patch);
652 }
653 
654 /*
655  * This function implements the free part that must be called outside
656  * klp_mutex.
657  *
658  * It must be called after klp_free_patch_start(). And it has to be
659  * the last function accessing the livepatch structures when the patch
660  * gets disabled.
661  */
662 static void klp_free_patch_finish(struct klp_patch *patch)
663 {
664 	/*
665 	 * Avoid deadlock with enabled_store() sysfs callback by
666 	 * calling this outside klp_mutex. It is safe because
667 	 * this is called when the patch gets disabled and it
668 	 * cannot get enabled again.
669 	 */
670 	kobject_put(&patch->kobj);
671 	wait_for_completion(&patch->finish);
672 
673 	/* Put the module after the last access to struct klp_patch. */
674 	if (!patch->forced)
675 		module_put(patch->mod);
676 }
677 
678 /*
679  * The livepatch might be freed from sysfs interface created by the patch.
680  * This work allows to wait until the interface is destroyed in a separate
681  * context.
682  */
683 static void klp_free_patch_work_fn(struct work_struct *work)
684 {
685 	struct klp_patch *patch =
686 		container_of(work, struct klp_patch, free_work);
687 
688 	klp_free_patch_finish(patch);
689 }
690 
691 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
692 {
693 	if (!func->old_name)
694 		return -EINVAL;
695 
696 	/*
697 	 * NOPs get the address later. The patched module must be loaded,
698 	 * see klp_init_object_loaded().
699 	 */
700 	if (!func->new_func && !func->nop)
701 		return -EINVAL;
702 
703 	if (strlen(func->old_name) >= KSYM_NAME_LEN)
704 		return -EINVAL;
705 
706 	INIT_LIST_HEAD(&func->stack_node);
707 	func->patched = false;
708 	func->transition = false;
709 
710 	/* The format for the sysfs directory is <function,sympos> where sympos
711 	 * is the nth occurrence of this symbol in kallsyms for the patched
712 	 * object. If the user selects 0 for old_sympos, then 1 will be used
713 	 * since a unique symbol will be the first occurrence.
714 	 */
715 	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
716 			   func->old_name,
717 			   func->old_sympos ? func->old_sympos : 1);
718 }
719 
720 /* Arches may override this to finish any remaining arch-specific tasks */
721 void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
722 					struct klp_object *obj)
723 {
724 }
725 
726 /* parts of the initialization that is done only when the object is loaded */
727 static int klp_init_object_loaded(struct klp_patch *patch,
728 				  struct klp_object *obj)
729 {
730 	struct klp_func *func;
731 	int ret;
732 
733 	module_disable_ro(patch->mod);
734 	ret = klp_write_object_relocations(patch->mod, obj);
735 	if (ret) {
736 		module_enable_ro(patch->mod, true);
737 		return ret;
738 	}
739 
740 	arch_klp_init_object_loaded(patch, obj);
741 	module_enable_ro(patch->mod, true);
742 
743 	klp_for_each_func(obj, func) {
744 		ret = klp_find_object_symbol(obj->name, func->old_name,
745 					     func->old_sympos,
746 					     (unsigned long *)&func->old_func);
747 		if (ret)
748 			return ret;
749 
750 		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
751 						  &func->old_size, NULL);
752 		if (!ret) {
753 			pr_err("kallsyms size lookup failed for '%s'\n",
754 			       func->old_name);
755 			return -ENOENT;
756 		}
757 
758 		if (func->nop)
759 			func->new_func = func->old_func;
760 
761 		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
762 						  &func->new_size, NULL);
763 		if (!ret) {
764 			pr_err("kallsyms size lookup failed for '%s' replacement\n",
765 			       func->old_name);
766 			return -ENOENT;
767 		}
768 	}
769 
770 	return 0;
771 }
772 
773 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
774 {
775 	struct klp_func *func;
776 	int ret;
777 	const char *name;
778 
779 	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
780 		return -EINVAL;
781 
782 	obj->patched = false;
783 	obj->mod = NULL;
784 
785 	klp_find_object_module(obj);
786 
787 	name = klp_is_module(obj) ? obj->name : "vmlinux";
788 	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
789 	if (ret)
790 		return ret;
791 
792 	klp_for_each_func(obj, func) {
793 		ret = klp_init_func(obj, func);
794 		if (ret)
795 			return ret;
796 	}
797 
798 	if (klp_is_object_loaded(obj))
799 		ret = klp_init_object_loaded(patch, obj);
800 
801 	return ret;
802 }
803 
804 static void klp_init_func_early(struct klp_object *obj,
805 				struct klp_func *func)
806 {
807 	kobject_init(&func->kobj, &klp_ktype_func);
808 	list_add_tail(&func->node, &obj->func_list);
809 }
810 
811 static void klp_init_object_early(struct klp_patch *patch,
812 				  struct klp_object *obj)
813 {
814 	INIT_LIST_HEAD(&obj->func_list);
815 	kobject_init(&obj->kobj, &klp_ktype_object);
816 	list_add_tail(&obj->node, &patch->obj_list);
817 }
818 
819 static int klp_init_patch_early(struct klp_patch *patch)
820 {
821 	struct klp_object *obj;
822 	struct klp_func *func;
823 
824 	if (!patch->objs)
825 		return -EINVAL;
826 
827 	INIT_LIST_HEAD(&patch->list);
828 	INIT_LIST_HEAD(&patch->obj_list);
829 	kobject_init(&patch->kobj, &klp_ktype_patch);
830 	patch->enabled = false;
831 	patch->forced = false;
832 	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
833 	init_completion(&patch->finish);
834 
835 	klp_for_each_object_static(patch, obj) {
836 		if (!obj->funcs)
837 			return -EINVAL;
838 
839 		klp_init_object_early(patch, obj);
840 
841 		klp_for_each_func_static(obj, func) {
842 			klp_init_func_early(obj, func);
843 		}
844 	}
845 
846 	if (!try_module_get(patch->mod))
847 		return -ENODEV;
848 
849 	return 0;
850 }
851 
852 static int klp_init_patch(struct klp_patch *patch)
853 {
854 	struct klp_object *obj;
855 	int ret;
856 
857 	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
858 	if (ret)
859 		return ret;
860 
861 	if (patch->replace) {
862 		ret = klp_add_nops(patch);
863 		if (ret)
864 			return ret;
865 	}
866 
867 	klp_for_each_object(patch, obj) {
868 		ret = klp_init_object(patch, obj);
869 		if (ret)
870 			return ret;
871 	}
872 
873 	list_add_tail(&patch->list, &klp_patches);
874 
875 	return 0;
876 }
877 
878 static int __klp_disable_patch(struct klp_patch *patch)
879 {
880 	struct klp_object *obj;
881 
882 	if (WARN_ON(!patch->enabled))
883 		return -EINVAL;
884 
885 	if (klp_transition_patch)
886 		return -EBUSY;
887 
888 	klp_init_transition(patch, KLP_UNPATCHED);
889 
890 	klp_for_each_object(patch, obj)
891 		if (obj->patched)
892 			klp_pre_unpatch_callback(obj);
893 
894 	/*
895 	 * Enforce the order of the func->transition writes in
896 	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
897 	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
898 	 * is called shortly after klp_update_patch_state() switches the task,
899 	 * this ensures the handler sees that func->transition is set.
900 	 */
901 	smp_wmb();
902 
903 	klp_start_transition();
904 	patch->enabled = false;
905 	klp_try_complete_transition();
906 
907 	return 0;
908 }
909 
910 static int __klp_enable_patch(struct klp_patch *patch)
911 {
912 	struct klp_object *obj;
913 	int ret;
914 
915 	if (klp_transition_patch)
916 		return -EBUSY;
917 
918 	if (WARN_ON(patch->enabled))
919 		return -EINVAL;
920 
921 	pr_notice("enabling patch '%s'\n", patch->mod->name);
922 
923 	klp_init_transition(patch, KLP_PATCHED);
924 
925 	/*
926 	 * Enforce the order of the func->transition writes in
927 	 * klp_init_transition() and the ops->func_stack writes in
928 	 * klp_patch_object(), so that klp_ftrace_handler() will see the
929 	 * func->transition updates before the handler is registered and the
930 	 * new funcs become visible to the handler.
931 	 */
932 	smp_wmb();
933 
934 	klp_for_each_object(patch, obj) {
935 		if (!klp_is_object_loaded(obj))
936 			continue;
937 
938 		ret = klp_pre_patch_callback(obj);
939 		if (ret) {
940 			pr_warn("pre-patch callback failed for object '%s'\n",
941 				klp_is_module(obj) ? obj->name : "vmlinux");
942 			goto err;
943 		}
944 
945 		ret = klp_patch_object(obj);
946 		if (ret) {
947 			pr_warn("failed to patch object '%s'\n",
948 				klp_is_module(obj) ? obj->name : "vmlinux");
949 			goto err;
950 		}
951 	}
952 
953 	klp_start_transition();
954 	patch->enabled = true;
955 	klp_try_complete_transition();
956 
957 	return 0;
958 err:
959 	pr_warn("failed to enable patch '%s'\n", patch->mod->name);
960 
961 	klp_cancel_transition();
962 	return ret;
963 }
964 
965 /**
966  * klp_enable_patch() - enable the livepatch
967  * @patch:	patch to be enabled
968  *
969  * Initializes the data structure associated with the patch, creates the sysfs
970  * interface, performs the needed symbol lookups and code relocations,
971  * registers the patched functions with ftrace.
972  *
973  * This function is supposed to be called from the livepatch module_init()
974  * callback.
975  *
976  * Return: 0 on success, otherwise error
977  */
978 int klp_enable_patch(struct klp_patch *patch)
979 {
980 	int ret;
981 
982 	if (!patch || !patch->mod)
983 		return -EINVAL;
984 
985 	if (!is_livepatch_module(patch->mod)) {
986 		pr_err("module %s is not marked as a livepatch module\n",
987 		       patch->mod->name);
988 		return -EINVAL;
989 	}
990 
991 	if (!klp_initialized())
992 		return -ENODEV;
993 
994 	if (!klp_have_reliable_stack()) {
995 		pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
996 		pr_warn("The livepatch transition may never complete.\n");
997 	}
998 
999 	mutex_lock(&klp_mutex);
1000 
1001 	ret = klp_init_patch_early(patch);
1002 	if (ret) {
1003 		mutex_unlock(&klp_mutex);
1004 		return ret;
1005 	}
1006 
1007 	ret = klp_init_patch(patch);
1008 	if (ret)
1009 		goto err;
1010 
1011 	ret = __klp_enable_patch(patch);
1012 	if (ret)
1013 		goto err;
1014 
1015 	mutex_unlock(&klp_mutex);
1016 
1017 	return 0;
1018 
1019 err:
1020 	klp_free_patch_start(patch);
1021 
1022 	mutex_unlock(&klp_mutex);
1023 
1024 	klp_free_patch_finish(patch);
1025 
1026 	return ret;
1027 }
1028 EXPORT_SYMBOL_GPL(klp_enable_patch);
1029 
1030 /*
1031  * This function removes replaced patches.
1032  *
1033  * We could be pretty aggressive here. It is called in the situation where
1034  * these structures are no longer accessible. All functions are redirected
1035  * by the klp_transition_patch. They use either a new code or they are in
1036  * the original code because of the special nop function patches.
1037  *
1038  * The only exception is when the transition was forced. In this case,
1039  * klp_ftrace_handler() might still see the replaced patch on the stack.
1040  * Fortunately, it is carefully designed to work with removed functions
1041  * thanks to RCU. We only have to keep the patches on the system. Also
1042  * this is handled transparently by patch->module_put.
1043  */
1044 void klp_discard_replaced_patches(struct klp_patch *new_patch)
1045 {
1046 	struct klp_patch *old_patch, *tmp_patch;
1047 
1048 	klp_for_each_patch_safe(old_patch, tmp_patch) {
1049 		if (old_patch == new_patch)
1050 			return;
1051 
1052 		old_patch->enabled = false;
1053 		klp_unpatch_objects(old_patch);
1054 		klp_free_patch_start(old_patch);
1055 		schedule_work(&old_patch->free_work);
1056 	}
1057 }
1058 
1059 /*
1060  * This function removes the dynamically allocated 'nop' functions.
1061  *
1062  * We could be pretty aggressive. NOPs do not change the existing
1063  * behavior except for adding unnecessary delay by the ftrace handler.
1064  *
1065  * It is safe even when the transition was forced. The ftrace handler
1066  * will see a valid ops->func_stack entry thanks to RCU.
1067  *
1068  * We could even free the NOPs structures. They must be the last entry
1069  * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1070  * It does the same as klp_synchronize_transition() to make sure that
1071  * nobody is inside the ftrace handler once the operation finishes.
1072  *
1073  * IMPORTANT: It must be called right after removing the replaced patches!
1074  */
1075 void klp_discard_nops(struct klp_patch *new_patch)
1076 {
1077 	klp_unpatch_objects_dynamic(klp_transition_patch);
1078 	klp_free_objects_dynamic(klp_transition_patch);
1079 }
1080 
1081 /*
1082  * Remove parts of patches that touch a given kernel module. The list of
1083  * patches processed might be limited. When limit is NULL, all patches
1084  * will be handled.
1085  */
1086 static void klp_cleanup_module_patches_limited(struct module *mod,
1087 					       struct klp_patch *limit)
1088 {
1089 	struct klp_patch *patch;
1090 	struct klp_object *obj;
1091 
1092 	klp_for_each_patch(patch) {
1093 		if (patch == limit)
1094 			break;
1095 
1096 		klp_for_each_object(patch, obj) {
1097 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1098 				continue;
1099 
1100 			if (patch != klp_transition_patch)
1101 				klp_pre_unpatch_callback(obj);
1102 
1103 			pr_notice("reverting patch '%s' on unloading module '%s'\n",
1104 				  patch->mod->name, obj->mod->name);
1105 			klp_unpatch_object(obj);
1106 
1107 			klp_post_unpatch_callback(obj);
1108 
1109 			klp_free_object_loaded(obj);
1110 			break;
1111 		}
1112 	}
1113 }
1114 
1115 int klp_module_coming(struct module *mod)
1116 {
1117 	int ret;
1118 	struct klp_patch *patch;
1119 	struct klp_object *obj;
1120 
1121 	if (WARN_ON(mod->state != MODULE_STATE_COMING))
1122 		return -EINVAL;
1123 
1124 	mutex_lock(&klp_mutex);
1125 	/*
1126 	 * Each module has to know that klp_module_coming()
1127 	 * has been called. We never know what module will
1128 	 * get patched by a new patch.
1129 	 */
1130 	mod->klp_alive = true;
1131 
1132 	klp_for_each_patch(patch) {
1133 		klp_for_each_object(patch, obj) {
1134 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1135 				continue;
1136 
1137 			obj->mod = mod;
1138 
1139 			ret = klp_init_object_loaded(patch, obj);
1140 			if (ret) {
1141 				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1142 					patch->mod->name, obj->mod->name, ret);
1143 				goto err;
1144 			}
1145 
1146 			pr_notice("applying patch '%s' to loading module '%s'\n",
1147 				  patch->mod->name, obj->mod->name);
1148 
1149 			ret = klp_pre_patch_callback(obj);
1150 			if (ret) {
1151 				pr_warn("pre-patch callback failed for object '%s'\n",
1152 					obj->name);
1153 				goto err;
1154 			}
1155 
1156 			ret = klp_patch_object(obj);
1157 			if (ret) {
1158 				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1159 					patch->mod->name, obj->mod->name, ret);
1160 
1161 				klp_post_unpatch_callback(obj);
1162 				goto err;
1163 			}
1164 
1165 			if (patch != klp_transition_patch)
1166 				klp_post_patch_callback(obj);
1167 
1168 			break;
1169 		}
1170 	}
1171 
1172 	mutex_unlock(&klp_mutex);
1173 
1174 	return 0;
1175 
1176 err:
1177 	/*
1178 	 * If a patch is unsuccessfully applied, return
1179 	 * error to the module loader.
1180 	 */
1181 	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1182 		patch->mod->name, obj->mod->name, obj->mod->name);
1183 	mod->klp_alive = false;
1184 	klp_cleanup_module_patches_limited(mod, patch);
1185 	mutex_unlock(&klp_mutex);
1186 
1187 	return ret;
1188 }
1189 
1190 void klp_module_going(struct module *mod)
1191 {
1192 	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1193 		    mod->state != MODULE_STATE_COMING))
1194 		return;
1195 
1196 	mutex_lock(&klp_mutex);
1197 	/*
1198 	 * Each module has to know that klp_module_going()
1199 	 * has been called. We never know what module will
1200 	 * get patched by a new patch.
1201 	 */
1202 	mod->klp_alive = false;
1203 
1204 	klp_cleanup_module_patches_limited(mod, NULL);
1205 
1206 	mutex_unlock(&klp_mutex);
1207 }
1208 
1209 static int __init klp_init(void)
1210 {
1211 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1212 	if (!klp_root_kobj)
1213 		return -ENOMEM;
1214 
1215 	return 0;
1216 }
1217 
1218 module_init(klp_init);
1219