xref: /openbmc/linux/kernel/livepatch/core.c (revision 3381df09)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * core.c - Kernel Live Patching Core
4  *
5  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6  * Copyright (C) 2014 SUSE
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/kallsyms.h>
17 #include <linux/livepatch.h>
18 #include <linux/elf.h>
19 #include <linux/moduleloader.h>
20 #include <linux/completion.h>
21 #include <linux/memory.h>
22 #include <asm/cacheflush.h>
23 #include "core.h"
24 #include "patch.h"
25 #include "state.h"
26 #include "transition.h"
27 
28 /*
29  * klp_mutex is a coarse lock which serializes access to klp data.  All
30  * accesses to klp-related variables and structures must have mutex protection,
31  * except within the following functions which carefully avoid the need for it:
32  *
33  * - klp_ftrace_handler()
34  * - klp_update_patch_state()
35  */
36 DEFINE_MUTEX(klp_mutex);
37 
38 /*
39  * Actively used patches: enabled or in transition. Note that replaced
40  * or disabled patches are not listed even though the related kernel
41  * module still can be loaded.
42  */
43 LIST_HEAD(klp_patches);
44 
45 static struct kobject *klp_root_kobj;
46 
47 static bool klp_is_module(struct klp_object *obj)
48 {
49 	return obj->name;
50 }
51 
52 /* sets obj->mod if object is not vmlinux and module is found */
53 static void klp_find_object_module(struct klp_object *obj)
54 {
55 	struct module *mod;
56 
57 	if (!klp_is_module(obj))
58 		return;
59 
60 	mutex_lock(&module_mutex);
61 	/*
62 	 * We do not want to block removal of patched modules and therefore
63 	 * we do not take a reference here. The patches are removed by
64 	 * klp_module_going() instead.
65 	 */
66 	mod = find_module(obj->name);
67 	/*
68 	 * Do not mess work of klp_module_coming() and klp_module_going().
69 	 * Note that the patch might still be needed before klp_module_going()
70 	 * is called. Module functions can be called even in the GOING state
71 	 * until mod->exit() finishes. This is especially important for
72 	 * patches that modify semantic of the functions.
73 	 */
74 	if (mod && mod->klp_alive)
75 		obj->mod = mod;
76 
77 	mutex_unlock(&module_mutex);
78 }
79 
80 static bool klp_initialized(void)
81 {
82 	return !!klp_root_kobj;
83 }
84 
85 static struct klp_func *klp_find_func(struct klp_object *obj,
86 				      struct klp_func *old_func)
87 {
88 	struct klp_func *func;
89 
90 	klp_for_each_func(obj, func) {
91 		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
92 		    (old_func->old_sympos == func->old_sympos)) {
93 			return func;
94 		}
95 	}
96 
97 	return NULL;
98 }
99 
100 static struct klp_object *klp_find_object(struct klp_patch *patch,
101 					  struct klp_object *old_obj)
102 {
103 	struct klp_object *obj;
104 
105 	klp_for_each_object(patch, obj) {
106 		if (klp_is_module(old_obj)) {
107 			if (klp_is_module(obj) &&
108 			    strcmp(old_obj->name, obj->name) == 0) {
109 				return obj;
110 			}
111 		} else if (!klp_is_module(obj)) {
112 			return obj;
113 		}
114 	}
115 
116 	return NULL;
117 }
118 
119 struct klp_find_arg {
120 	const char *objname;
121 	const char *name;
122 	unsigned long addr;
123 	unsigned long count;
124 	unsigned long pos;
125 };
126 
127 static int klp_find_callback(void *data, const char *name,
128 			     struct module *mod, unsigned long addr)
129 {
130 	struct klp_find_arg *args = data;
131 
132 	if ((mod && !args->objname) || (!mod && args->objname))
133 		return 0;
134 
135 	if (strcmp(args->name, name))
136 		return 0;
137 
138 	if (args->objname && strcmp(args->objname, mod->name))
139 		return 0;
140 
141 	args->addr = addr;
142 	args->count++;
143 
144 	/*
145 	 * Finish the search when the symbol is found for the desired position
146 	 * or the position is not defined for a non-unique symbol.
147 	 */
148 	if ((args->pos && (args->count == args->pos)) ||
149 	    (!args->pos && (args->count > 1)))
150 		return 1;
151 
152 	return 0;
153 }
154 
155 static int klp_find_object_symbol(const char *objname, const char *name,
156 				  unsigned long sympos, unsigned long *addr)
157 {
158 	struct klp_find_arg args = {
159 		.objname = objname,
160 		.name = name,
161 		.addr = 0,
162 		.count = 0,
163 		.pos = sympos,
164 	};
165 
166 	mutex_lock(&module_mutex);
167 	if (objname)
168 		module_kallsyms_on_each_symbol(klp_find_callback, &args);
169 	else
170 		kallsyms_on_each_symbol(klp_find_callback, &args);
171 	mutex_unlock(&module_mutex);
172 
173 	/*
174 	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
175 	 * otherwise ensure the symbol position count matches sympos.
176 	 */
177 	if (args.addr == 0)
178 		pr_err("symbol '%s' not found in symbol table\n", name);
179 	else if (args.count > 1 && sympos == 0) {
180 		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
181 		       name, objname);
182 	} else if (sympos != args.count && sympos > 0) {
183 		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
184 		       sympos, name, objname ? objname : "vmlinux");
185 	} else {
186 		*addr = args.addr;
187 		return 0;
188 	}
189 
190 	*addr = 0;
191 	return -EINVAL;
192 }
193 
194 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
195 {
196 	int i, cnt, vmlinux, ret;
197 	char objname[MODULE_NAME_LEN];
198 	char symname[KSYM_NAME_LEN];
199 	char *strtab = pmod->core_kallsyms.strtab;
200 	Elf_Rela *relas;
201 	Elf_Sym *sym;
202 	unsigned long sympos, addr;
203 
204 	/*
205 	 * Since the field widths for objname and symname in the sscanf()
206 	 * call are hard-coded and correspond to MODULE_NAME_LEN and
207 	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
208 	 * and KSYM_NAME_LEN have the values we expect them to have.
209 	 *
210 	 * Because the value of MODULE_NAME_LEN can differ among architectures,
211 	 * we use the smallest/strictest upper bound possible (56, based on
212 	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
213 	 */
214 	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
215 
216 	relas = (Elf_Rela *) relasec->sh_addr;
217 	/* For each rela in this klp relocation section */
218 	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
219 		sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
220 		if (sym->st_shndx != SHN_LIVEPATCH) {
221 			pr_err("symbol %s is not marked as a livepatch symbol\n",
222 			       strtab + sym->st_name);
223 			return -EINVAL;
224 		}
225 
226 		/* Format: .klp.sym.objname.symname,sympos */
227 		cnt = sscanf(strtab + sym->st_name,
228 			     ".klp.sym.%55[^.].%127[^,],%lu",
229 			     objname, symname, &sympos);
230 		if (cnt != 3) {
231 			pr_err("symbol %s has an incorrectly formatted name\n",
232 			       strtab + sym->st_name);
233 			return -EINVAL;
234 		}
235 
236 		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
237 		vmlinux = !strcmp(objname, "vmlinux");
238 		ret = klp_find_object_symbol(vmlinux ? NULL : objname,
239 					     symname, sympos, &addr);
240 		if (ret)
241 			return ret;
242 
243 		sym->st_value = addr;
244 	}
245 
246 	return 0;
247 }
248 
249 static int klp_write_object_relocations(struct module *pmod,
250 					struct klp_object *obj)
251 {
252 	int i, cnt, ret = 0;
253 	const char *objname, *secname;
254 	char sec_objname[MODULE_NAME_LEN];
255 	Elf_Shdr *sec;
256 
257 	if (WARN_ON(!klp_is_object_loaded(obj)))
258 		return -EINVAL;
259 
260 	objname = klp_is_module(obj) ? obj->name : "vmlinux";
261 
262 	/* For each klp relocation section */
263 	for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
264 		sec = pmod->klp_info->sechdrs + i;
265 		secname = pmod->klp_info->secstrings + sec->sh_name;
266 		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
267 			continue;
268 
269 		/*
270 		 * Format: .klp.rela.sec_objname.section_name
271 		 * See comment in klp_resolve_symbols() for an explanation
272 		 * of the selected field width value.
273 		 */
274 		cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
275 		if (cnt != 1) {
276 			pr_err("section %s has an incorrectly formatted name\n",
277 			       secname);
278 			ret = -EINVAL;
279 			break;
280 		}
281 
282 		if (strcmp(objname, sec_objname))
283 			continue;
284 
285 		ret = klp_resolve_symbols(sec, pmod);
286 		if (ret)
287 			break;
288 
289 		ret = apply_relocate_add(pmod->klp_info->sechdrs,
290 					 pmod->core_kallsyms.strtab,
291 					 pmod->klp_info->symndx, i, pmod);
292 		if (ret)
293 			break;
294 	}
295 
296 	return ret;
297 }
298 
299 /*
300  * Sysfs Interface
301  *
302  * /sys/kernel/livepatch
303  * /sys/kernel/livepatch/<patch>
304  * /sys/kernel/livepatch/<patch>/enabled
305  * /sys/kernel/livepatch/<patch>/transition
306  * /sys/kernel/livepatch/<patch>/force
307  * /sys/kernel/livepatch/<patch>/<object>
308  * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
309  */
310 static int __klp_disable_patch(struct klp_patch *patch);
311 
312 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
313 			     const char *buf, size_t count)
314 {
315 	struct klp_patch *patch;
316 	int ret;
317 	bool enabled;
318 
319 	ret = kstrtobool(buf, &enabled);
320 	if (ret)
321 		return ret;
322 
323 	patch = container_of(kobj, struct klp_patch, kobj);
324 
325 	mutex_lock(&klp_mutex);
326 
327 	if (patch->enabled == enabled) {
328 		/* already in requested state */
329 		ret = -EINVAL;
330 		goto out;
331 	}
332 
333 	/*
334 	 * Allow to reverse a pending transition in both ways. It might be
335 	 * necessary to complete the transition without forcing and breaking
336 	 * the system integrity.
337 	 *
338 	 * Do not allow to re-enable a disabled patch.
339 	 */
340 	if (patch == klp_transition_patch)
341 		klp_reverse_transition();
342 	else if (!enabled)
343 		ret = __klp_disable_patch(patch);
344 	else
345 		ret = -EINVAL;
346 
347 out:
348 	mutex_unlock(&klp_mutex);
349 
350 	if (ret)
351 		return ret;
352 	return count;
353 }
354 
355 static ssize_t enabled_show(struct kobject *kobj,
356 			    struct kobj_attribute *attr, char *buf)
357 {
358 	struct klp_patch *patch;
359 
360 	patch = container_of(kobj, struct klp_patch, kobj);
361 	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
362 }
363 
364 static ssize_t transition_show(struct kobject *kobj,
365 			       struct kobj_attribute *attr, char *buf)
366 {
367 	struct klp_patch *patch;
368 
369 	patch = container_of(kobj, struct klp_patch, kobj);
370 	return snprintf(buf, PAGE_SIZE-1, "%d\n",
371 			patch == klp_transition_patch);
372 }
373 
374 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
375 			   const char *buf, size_t count)
376 {
377 	struct klp_patch *patch;
378 	int ret;
379 	bool val;
380 
381 	ret = kstrtobool(buf, &val);
382 	if (ret)
383 		return ret;
384 
385 	if (!val)
386 		return count;
387 
388 	mutex_lock(&klp_mutex);
389 
390 	patch = container_of(kobj, struct klp_patch, kobj);
391 	if (patch != klp_transition_patch) {
392 		mutex_unlock(&klp_mutex);
393 		return -EINVAL;
394 	}
395 
396 	klp_force_transition();
397 
398 	mutex_unlock(&klp_mutex);
399 
400 	return count;
401 }
402 
403 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
404 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
405 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
406 static struct attribute *klp_patch_attrs[] = {
407 	&enabled_kobj_attr.attr,
408 	&transition_kobj_attr.attr,
409 	&force_kobj_attr.attr,
410 	NULL
411 };
412 ATTRIBUTE_GROUPS(klp_patch);
413 
414 static void klp_free_object_dynamic(struct klp_object *obj)
415 {
416 	kfree(obj->name);
417 	kfree(obj);
418 }
419 
420 static void klp_init_func_early(struct klp_object *obj,
421 				struct klp_func *func);
422 static void klp_init_object_early(struct klp_patch *patch,
423 				  struct klp_object *obj);
424 
425 static struct klp_object *klp_alloc_object_dynamic(const char *name,
426 						   struct klp_patch *patch)
427 {
428 	struct klp_object *obj;
429 
430 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
431 	if (!obj)
432 		return NULL;
433 
434 	if (name) {
435 		obj->name = kstrdup(name, GFP_KERNEL);
436 		if (!obj->name) {
437 			kfree(obj);
438 			return NULL;
439 		}
440 	}
441 
442 	klp_init_object_early(patch, obj);
443 	obj->dynamic = true;
444 
445 	return obj;
446 }
447 
448 static void klp_free_func_nop(struct klp_func *func)
449 {
450 	kfree(func->old_name);
451 	kfree(func);
452 }
453 
454 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
455 					   struct klp_object *obj)
456 {
457 	struct klp_func *func;
458 
459 	func = kzalloc(sizeof(*func), GFP_KERNEL);
460 	if (!func)
461 		return NULL;
462 
463 	if (old_func->old_name) {
464 		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
465 		if (!func->old_name) {
466 			kfree(func);
467 			return NULL;
468 		}
469 	}
470 
471 	klp_init_func_early(obj, func);
472 	/*
473 	 * func->new_func is same as func->old_func. These addresses are
474 	 * set when the object is loaded, see klp_init_object_loaded().
475 	 */
476 	func->old_sympos = old_func->old_sympos;
477 	func->nop = true;
478 
479 	return func;
480 }
481 
482 static int klp_add_object_nops(struct klp_patch *patch,
483 			       struct klp_object *old_obj)
484 {
485 	struct klp_object *obj;
486 	struct klp_func *func, *old_func;
487 
488 	obj = klp_find_object(patch, old_obj);
489 
490 	if (!obj) {
491 		obj = klp_alloc_object_dynamic(old_obj->name, patch);
492 		if (!obj)
493 			return -ENOMEM;
494 	}
495 
496 	klp_for_each_func(old_obj, old_func) {
497 		func = klp_find_func(obj, old_func);
498 		if (func)
499 			continue;
500 
501 		func = klp_alloc_func_nop(old_func, obj);
502 		if (!func)
503 			return -ENOMEM;
504 	}
505 
506 	return 0;
507 }
508 
509 /*
510  * Add 'nop' functions which simply return to the caller to run
511  * the original function. The 'nop' functions are added to a
512  * patch to facilitate a 'replace' mode.
513  */
514 static int klp_add_nops(struct klp_patch *patch)
515 {
516 	struct klp_patch *old_patch;
517 	struct klp_object *old_obj;
518 
519 	klp_for_each_patch(old_patch) {
520 		klp_for_each_object(old_patch, old_obj) {
521 			int err;
522 
523 			err = klp_add_object_nops(patch, old_obj);
524 			if (err)
525 				return err;
526 		}
527 	}
528 
529 	return 0;
530 }
531 
532 static void klp_kobj_release_patch(struct kobject *kobj)
533 {
534 	struct klp_patch *patch;
535 
536 	patch = container_of(kobj, struct klp_patch, kobj);
537 	complete(&patch->finish);
538 }
539 
540 static struct kobj_type klp_ktype_patch = {
541 	.release = klp_kobj_release_patch,
542 	.sysfs_ops = &kobj_sysfs_ops,
543 	.default_groups = klp_patch_groups,
544 };
545 
546 static void klp_kobj_release_object(struct kobject *kobj)
547 {
548 	struct klp_object *obj;
549 
550 	obj = container_of(kobj, struct klp_object, kobj);
551 
552 	if (obj->dynamic)
553 		klp_free_object_dynamic(obj);
554 }
555 
556 static struct kobj_type klp_ktype_object = {
557 	.release = klp_kobj_release_object,
558 	.sysfs_ops = &kobj_sysfs_ops,
559 };
560 
561 static void klp_kobj_release_func(struct kobject *kobj)
562 {
563 	struct klp_func *func;
564 
565 	func = container_of(kobj, struct klp_func, kobj);
566 
567 	if (func->nop)
568 		klp_free_func_nop(func);
569 }
570 
571 static struct kobj_type klp_ktype_func = {
572 	.release = klp_kobj_release_func,
573 	.sysfs_ops = &kobj_sysfs_ops,
574 };
575 
576 static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
577 {
578 	struct klp_func *func, *tmp_func;
579 
580 	klp_for_each_func_safe(obj, func, tmp_func) {
581 		if (nops_only && !func->nop)
582 			continue;
583 
584 		list_del(&func->node);
585 		kobject_put(&func->kobj);
586 	}
587 }
588 
589 /* Clean up when a patched object is unloaded */
590 static void klp_free_object_loaded(struct klp_object *obj)
591 {
592 	struct klp_func *func;
593 
594 	obj->mod = NULL;
595 
596 	klp_for_each_func(obj, func) {
597 		func->old_func = NULL;
598 
599 		if (func->nop)
600 			func->new_func = NULL;
601 	}
602 }
603 
604 static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
605 {
606 	struct klp_object *obj, *tmp_obj;
607 
608 	klp_for_each_object_safe(patch, obj, tmp_obj) {
609 		__klp_free_funcs(obj, nops_only);
610 
611 		if (nops_only && !obj->dynamic)
612 			continue;
613 
614 		list_del(&obj->node);
615 		kobject_put(&obj->kobj);
616 	}
617 }
618 
619 static void klp_free_objects(struct klp_patch *patch)
620 {
621 	__klp_free_objects(patch, false);
622 }
623 
624 static void klp_free_objects_dynamic(struct klp_patch *patch)
625 {
626 	__klp_free_objects(patch, true);
627 }
628 
629 /*
630  * This function implements the free operations that can be called safely
631  * under klp_mutex.
632  *
633  * The operation must be completed by calling klp_free_patch_finish()
634  * outside klp_mutex.
635  */
636 static void klp_free_patch_start(struct klp_patch *patch)
637 {
638 	if (!list_empty(&patch->list))
639 		list_del(&patch->list);
640 
641 	klp_free_objects(patch);
642 }
643 
644 /*
645  * This function implements the free part that must be called outside
646  * klp_mutex.
647  *
648  * It must be called after klp_free_patch_start(). And it has to be
649  * the last function accessing the livepatch structures when the patch
650  * gets disabled.
651  */
652 static void klp_free_patch_finish(struct klp_patch *patch)
653 {
654 	/*
655 	 * Avoid deadlock with enabled_store() sysfs callback by
656 	 * calling this outside klp_mutex. It is safe because
657 	 * this is called when the patch gets disabled and it
658 	 * cannot get enabled again.
659 	 */
660 	kobject_put(&patch->kobj);
661 	wait_for_completion(&patch->finish);
662 
663 	/* Put the module after the last access to struct klp_patch. */
664 	if (!patch->forced)
665 		module_put(patch->mod);
666 }
667 
668 /*
669  * The livepatch might be freed from sysfs interface created by the patch.
670  * This work allows to wait until the interface is destroyed in a separate
671  * context.
672  */
673 static void klp_free_patch_work_fn(struct work_struct *work)
674 {
675 	struct klp_patch *patch =
676 		container_of(work, struct klp_patch, free_work);
677 
678 	klp_free_patch_finish(patch);
679 }
680 
681 void klp_free_patch_async(struct klp_patch *patch)
682 {
683 	klp_free_patch_start(patch);
684 	schedule_work(&patch->free_work);
685 }
686 
687 void klp_free_replaced_patches_async(struct klp_patch *new_patch)
688 {
689 	struct klp_patch *old_patch, *tmp_patch;
690 
691 	klp_for_each_patch_safe(old_patch, tmp_patch) {
692 		if (old_patch == new_patch)
693 			return;
694 		klp_free_patch_async(old_patch);
695 	}
696 }
697 
698 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
699 {
700 	if (!func->old_name)
701 		return -EINVAL;
702 
703 	/*
704 	 * NOPs get the address later. The patched module must be loaded,
705 	 * see klp_init_object_loaded().
706 	 */
707 	if (!func->new_func && !func->nop)
708 		return -EINVAL;
709 
710 	if (strlen(func->old_name) >= KSYM_NAME_LEN)
711 		return -EINVAL;
712 
713 	INIT_LIST_HEAD(&func->stack_node);
714 	func->patched = false;
715 	func->transition = false;
716 
717 	/* The format for the sysfs directory is <function,sympos> where sympos
718 	 * is the nth occurrence of this symbol in kallsyms for the patched
719 	 * object. If the user selects 0 for old_sympos, then 1 will be used
720 	 * since a unique symbol will be the first occurrence.
721 	 */
722 	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
723 			   func->old_name,
724 			   func->old_sympos ? func->old_sympos : 1);
725 }
726 
727 /* Arches may override this to finish any remaining arch-specific tasks */
728 void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
729 					struct klp_object *obj)
730 {
731 }
732 
733 /* parts of the initialization that is done only when the object is loaded */
734 static int klp_init_object_loaded(struct klp_patch *patch,
735 				  struct klp_object *obj)
736 {
737 	struct klp_func *func;
738 	int ret;
739 
740 	mutex_lock(&text_mutex);
741 
742 	module_disable_ro(patch->mod);
743 	ret = klp_write_object_relocations(patch->mod, obj);
744 	if (ret) {
745 		module_enable_ro(patch->mod, true);
746 		mutex_unlock(&text_mutex);
747 		return ret;
748 	}
749 
750 	arch_klp_init_object_loaded(patch, obj);
751 	module_enable_ro(patch->mod, true);
752 
753 	mutex_unlock(&text_mutex);
754 
755 	klp_for_each_func(obj, func) {
756 		ret = klp_find_object_symbol(obj->name, func->old_name,
757 					     func->old_sympos,
758 					     (unsigned long *)&func->old_func);
759 		if (ret)
760 			return ret;
761 
762 		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
763 						  &func->old_size, NULL);
764 		if (!ret) {
765 			pr_err("kallsyms size lookup failed for '%s'\n",
766 			       func->old_name);
767 			return -ENOENT;
768 		}
769 
770 		if (func->nop)
771 			func->new_func = func->old_func;
772 
773 		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
774 						  &func->new_size, NULL);
775 		if (!ret) {
776 			pr_err("kallsyms size lookup failed for '%s' replacement\n",
777 			       func->old_name);
778 			return -ENOENT;
779 		}
780 	}
781 
782 	return 0;
783 }
784 
785 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
786 {
787 	struct klp_func *func;
788 	int ret;
789 	const char *name;
790 
791 	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
792 		return -EINVAL;
793 
794 	obj->patched = false;
795 	obj->mod = NULL;
796 
797 	klp_find_object_module(obj);
798 
799 	name = klp_is_module(obj) ? obj->name : "vmlinux";
800 	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
801 	if (ret)
802 		return ret;
803 
804 	klp_for_each_func(obj, func) {
805 		ret = klp_init_func(obj, func);
806 		if (ret)
807 			return ret;
808 	}
809 
810 	if (klp_is_object_loaded(obj))
811 		ret = klp_init_object_loaded(patch, obj);
812 
813 	return ret;
814 }
815 
816 static void klp_init_func_early(struct klp_object *obj,
817 				struct klp_func *func)
818 {
819 	kobject_init(&func->kobj, &klp_ktype_func);
820 	list_add_tail(&func->node, &obj->func_list);
821 }
822 
823 static void klp_init_object_early(struct klp_patch *patch,
824 				  struct klp_object *obj)
825 {
826 	INIT_LIST_HEAD(&obj->func_list);
827 	kobject_init(&obj->kobj, &klp_ktype_object);
828 	list_add_tail(&obj->node, &patch->obj_list);
829 }
830 
831 static int klp_init_patch_early(struct klp_patch *patch)
832 {
833 	struct klp_object *obj;
834 	struct klp_func *func;
835 
836 	if (!patch->objs)
837 		return -EINVAL;
838 
839 	INIT_LIST_HEAD(&patch->list);
840 	INIT_LIST_HEAD(&patch->obj_list);
841 	kobject_init(&patch->kobj, &klp_ktype_patch);
842 	patch->enabled = false;
843 	patch->forced = false;
844 	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
845 	init_completion(&patch->finish);
846 
847 	klp_for_each_object_static(patch, obj) {
848 		if (!obj->funcs)
849 			return -EINVAL;
850 
851 		klp_init_object_early(patch, obj);
852 
853 		klp_for_each_func_static(obj, func) {
854 			klp_init_func_early(obj, func);
855 		}
856 	}
857 
858 	if (!try_module_get(patch->mod))
859 		return -ENODEV;
860 
861 	return 0;
862 }
863 
864 static int klp_init_patch(struct klp_patch *patch)
865 {
866 	struct klp_object *obj;
867 	int ret;
868 
869 	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
870 	if (ret)
871 		return ret;
872 
873 	if (patch->replace) {
874 		ret = klp_add_nops(patch);
875 		if (ret)
876 			return ret;
877 	}
878 
879 	klp_for_each_object(patch, obj) {
880 		ret = klp_init_object(patch, obj);
881 		if (ret)
882 			return ret;
883 	}
884 
885 	list_add_tail(&patch->list, &klp_patches);
886 
887 	return 0;
888 }
889 
890 static int __klp_disable_patch(struct klp_patch *patch)
891 {
892 	struct klp_object *obj;
893 
894 	if (WARN_ON(!patch->enabled))
895 		return -EINVAL;
896 
897 	if (klp_transition_patch)
898 		return -EBUSY;
899 
900 	klp_init_transition(patch, KLP_UNPATCHED);
901 
902 	klp_for_each_object(patch, obj)
903 		if (obj->patched)
904 			klp_pre_unpatch_callback(obj);
905 
906 	/*
907 	 * Enforce the order of the func->transition writes in
908 	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
909 	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
910 	 * is called shortly after klp_update_patch_state() switches the task,
911 	 * this ensures the handler sees that func->transition is set.
912 	 */
913 	smp_wmb();
914 
915 	klp_start_transition();
916 	patch->enabled = false;
917 	klp_try_complete_transition();
918 
919 	return 0;
920 }
921 
922 static int __klp_enable_patch(struct klp_patch *patch)
923 {
924 	struct klp_object *obj;
925 	int ret;
926 
927 	if (klp_transition_patch)
928 		return -EBUSY;
929 
930 	if (WARN_ON(patch->enabled))
931 		return -EINVAL;
932 
933 	pr_notice("enabling patch '%s'\n", patch->mod->name);
934 
935 	klp_init_transition(patch, KLP_PATCHED);
936 
937 	/*
938 	 * Enforce the order of the func->transition writes in
939 	 * klp_init_transition() and the ops->func_stack writes in
940 	 * klp_patch_object(), so that klp_ftrace_handler() will see the
941 	 * func->transition updates before the handler is registered and the
942 	 * new funcs become visible to the handler.
943 	 */
944 	smp_wmb();
945 
946 	klp_for_each_object(patch, obj) {
947 		if (!klp_is_object_loaded(obj))
948 			continue;
949 
950 		ret = klp_pre_patch_callback(obj);
951 		if (ret) {
952 			pr_warn("pre-patch callback failed for object '%s'\n",
953 				klp_is_module(obj) ? obj->name : "vmlinux");
954 			goto err;
955 		}
956 
957 		ret = klp_patch_object(obj);
958 		if (ret) {
959 			pr_warn("failed to patch object '%s'\n",
960 				klp_is_module(obj) ? obj->name : "vmlinux");
961 			goto err;
962 		}
963 	}
964 
965 	klp_start_transition();
966 	patch->enabled = true;
967 	klp_try_complete_transition();
968 
969 	return 0;
970 err:
971 	pr_warn("failed to enable patch '%s'\n", patch->mod->name);
972 
973 	klp_cancel_transition();
974 	return ret;
975 }
976 
977 /**
978  * klp_enable_patch() - enable the livepatch
979  * @patch:	patch to be enabled
980  *
981  * Initializes the data structure associated with the patch, creates the sysfs
982  * interface, performs the needed symbol lookups and code relocations,
983  * registers the patched functions with ftrace.
984  *
985  * This function is supposed to be called from the livepatch module_init()
986  * callback.
987  *
988  * Return: 0 on success, otherwise error
989  */
990 int klp_enable_patch(struct klp_patch *patch)
991 {
992 	int ret;
993 
994 	if (!patch || !patch->mod)
995 		return -EINVAL;
996 
997 	if (!is_livepatch_module(patch->mod)) {
998 		pr_err("module %s is not marked as a livepatch module\n",
999 		       patch->mod->name);
1000 		return -EINVAL;
1001 	}
1002 
1003 	if (!klp_initialized())
1004 		return -ENODEV;
1005 
1006 	if (!klp_have_reliable_stack()) {
1007 		pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
1008 		pr_warn("The livepatch transition may never complete.\n");
1009 	}
1010 
1011 	mutex_lock(&klp_mutex);
1012 
1013 	if (!klp_is_patch_compatible(patch)) {
1014 		pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
1015 			patch->mod->name);
1016 		mutex_unlock(&klp_mutex);
1017 		return -EINVAL;
1018 	}
1019 
1020 	ret = klp_init_patch_early(patch);
1021 	if (ret) {
1022 		mutex_unlock(&klp_mutex);
1023 		return ret;
1024 	}
1025 
1026 	ret = klp_init_patch(patch);
1027 	if (ret)
1028 		goto err;
1029 
1030 	ret = __klp_enable_patch(patch);
1031 	if (ret)
1032 		goto err;
1033 
1034 	mutex_unlock(&klp_mutex);
1035 
1036 	return 0;
1037 
1038 err:
1039 	klp_free_patch_start(patch);
1040 
1041 	mutex_unlock(&klp_mutex);
1042 
1043 	klp_free_patch_finish(patch);
1044 
1045 	return ret;
1046 }
1047 EXPORT_SYMBOL_GPL(klp_enable_patch);
1048 
1049 /*
1050  * This function unpatches objects from the replaced livepatches.
1051  *
1052  * We could be pretty aggressive here. It is called in the situation where
1053  * these structures are no longer accessed from the ftrace handler.
1054  * All functions are redirected by the klp_transition_patch. They
1055  * use either a new code or they are in the original code because
1056  * of the special nop function patches.
1057  *
1058  * The only exception is when the transition was forced. In this case,
1059  * klp_ftrace_handler() might still see the replaced patch on the stack.
1060  * Fortunately, it is carefully designed to work with removed functions
1061  * thanks to RCU. We only have to keep the patches on the system. Also
1062  * this is handled transparently by patch->module_put.
1063  */
1064 void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
1065 {
1066 	struct klp_patch *old_patch;
1067 
1068 	klp_for_each_patch(old_patch) {
1069 		if (old_patch == new_patch)
1070 			return;
1071 
1072 		old_patch->enabled = false;
1073 		klp_unpatch_objects(old_patch);
1074 	}
1075 }
1076 
1077 /*
1078  * This function removes the dynamically allocated 'nop' functions.
1079  *
1080  * We could be pretty aggressive. NOPs do not change the existing
1081  * behavior except for adding unnecessary delay by the ftrace handler.
1082  *
1083  * It is safe even when the transition was forced. The ftrace handler
1084  * will see a valid ops->func_stack entry thanks to RCU.
1085  *
1086  * We could even free the NOPs structures. They must be the last entry
1087  * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1088  * It does the same as klp_synchronize_transition() to make sure that
1089  * nobody is inside the ftrace handler once the operation finishes.
1090  *
1091  * IMPORTANT: It must be called right after removing the replaced patches!
1092  */
1093 void klp_discard_nops(struct klp_patch *new_patch)
1094 {
1095 	klp_unpatch_objects_dynamic(klp_transition_patch);
1096 	klp_free_objects_dynamic(klp_transition_patch);
1097 }
1098 
1099 /*
1100  * Remove parts of patches that touch a given kernel module. The list of
1101  * patches processed might be limited. When limit is NULL, all patches
1102  * will be handled.
1103  */
1104 static void klp_cleanup_module_patches_limited(struct module *mod,
1105 					       struct klp_patch *limit)
1106 {
1107 	struct klp_patch *patch;
1108 	struct klp_object *obj;
1109 
1110 	klp_for_each_patch(patch) {
1111 		if (patch == limit)
1112 			break;
1113 
1114 		klp_for_each_object(patch, obj) {
1115 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1116 				continue;
1117 
1118 			if (patch != klp_transition_patch)
1119 				klp_pre_unpatch_callback(obj);
1120 
1121 			pr_notice("reverting patch '%s' on unloading module '%s'\n",
1122 				  patch->mod->name, obj->mod->name);
1123 			klp_unpatch_object(obj);
1124 
1125 			klp_post_unpatch_callback(obj);
1126 
1127 			klp_free_object_loaded(obj);
1128 			break;
1129 		}
1130 	}
1131 }
1132 
1133 int klp_module_coming(struct module *mod)
1134 {
1135 	int ret;
1136 	struct klp_patch *patch;
1137 	struct klp_object *obj;
1138 
1139 	if (WARN_ON(mod->state != MODULE_STATE_COMING))
1140 		return -EINVAL;
1141 
1142 	mutex_lock(&klp_mutex);
1143 	/*
1144 	 * Each module has to know that klp_module_coming()
1145 	 * has been called. We never know what module will
1146 	 * get patched by a new patch.
1147 	 */
1148 	mod->klp_alive = true;
1149 
1150 	klp_for_each_patch(patch) {
1151 		klp_for_each_object(patch, obj) {
1152 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1153 				continue;
1154 
1155 			obj->mod = mod;
1156 
1157 			ret = klp_init_object_loaded(patch, obj);
1158 			if (ret) {
1159 				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1160 					patch->mod->name, obj->mod->name, ret);
1161 				goto err;
1162 			}
1163 
1164 			pr_notice("applying patch '%s' to loading module '%s'\n",
1165 				  patch->mod->name, obj->mod->name);
1166 
1167 			ret = klp_pre_patch_callback(obj);
1168 			if (ret) {
1169 				pr_warn("pre-patch callback failed for object '%s'\n",
1170 					obj->name);
1171 				goto err;
1172 			}
1173 
1174 			ret = klp_patch_object(obj);
1175 			if (ret) {
1176 				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1177 					patch->mod->name, obj->mod->name, ret);
1178 
1179 				klp_post_unpatch_callback(obj);
1180 				goto err;
1181 			}
1182 
1183 			if (patch != klp_transition_patch)
1184 				klp_post_patch_callback(obj);
1185 
1186 			break;
1187 		}
1188 	}
1189 
1190 	mutex_unlock(&klp_mutex);
1191 
1192 	return 0;
1193 
1194 err:
1195 	/*
1196 	 * If a patch is unsuccessfully applied, return
1197 	 * error to the module loader.
1198 	 */
1199 	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1200 		patch->mod->name, obj->mod->name, obj->mod->name);
1201 	mod->klp_alive = false;
1202 	obj->mod = NULL;
1203 	klp_cleanup_module_patches_limited(mod, patch);
1204 	mutex_unlock(&klp_mutex);
1205 
1206 	return ret;
1207 }
1208 
1209 void klp_module_going(struct module *mod)
1210 {
1211 	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1212 		    mod->state != MODULE_STATE_COMING))
1213 		return;
1214 
1215 	mutex_lock(&klp_mutex);
1216 	/*
1217 	 * Each module has to know that klp_module_going()
1218 	 * has been called. We never know what module will
1219 	 * get patched by a new patch.
1220 	 */
1221 	mod->klp_alive = false;
1222 
1223 	klp_cleanup_module_patches_limited(mod, NULL);
1224 
1225 	mutex_unlock(&klp_mutex);
1226 }
1227 
1228 static int __init klp_init(void)
1229 {
1230 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1231 	if (!klp_root_kobj)
1232 		return -ENOMEM;
1233 
1234 	return 0;
1235 }
1236 
1237 module_init(klp_init);
1238