11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2c349cdcaSJosh Poimboeuf /*
3c349cdcaSJosh Poimboeuf * patch.c - livepatch patching functions
4c349cdcaSJosh Poimboeuf *
5c349cdcaSJosh Poimboeuf * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6c349cdcaSJosh Poimboeuf * Copyright (C) 2014 SUSE
7c349cdcaSJosh Poimboeuf * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
8c349cdcaSJosh Poimboeuf */
9c349cdcaSJosh Poimboeuf
10c349cdcaSJosh Poimboeuf #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11c349cdcaSJosh Poimboeuf
12c349cdcaSJosh Poimboeuf #include <linux/livepatch.h>
13c349cdcaSJosh Poimboeuf #include <linux/list.h>
14c349cdcaSJosh Poimboeuf #include <linux/ftrace.h>
15c349cdcaSJosh Poimboeuf #include <linux/rculist.h>
16c349cdcaSJosh Poimboeuf #include <linux/slab.h>
17c349cdcaSJosh Poimboeuf #include <linux/bug.h>
18c349cdcaSJosh Poimboeuf #include <linux/printk.h>
1993862e38SJoe Lawrence #include "core.h"
20c349cdcaSJosh Poimboeuf #include "patch.h"
21d83a7cb3SJosh Poimboeuf #include "transition.h"
22c349cdcaSJosh Poimboeuf
23c349cdcaSJosh Poimboeuf static LIST_HEAD(klp_ops);
24c349cdcaSJosh Poimboeuf
klp_find_ops(void * old_func)2519514910SPetr Mladek struct klp_ops *klp_find_ops(void *old_func)
26c349cdcaSJosh Poimboeuf {
27c349cdcaSJosh Poimboeuf struct klp_ops *ops;
28c349cdcaSJosh Poimboeuf struct klp_func *func;
29c349cdcaSJosh Poimboeuf
30c349cdcaSJosh Poimboeuf list_for_each_entry(ops, &klp_ops, node) {
31c349cdcaSJosh Poimboeuf func = list_first_entry(&ops->func_stack, struct klp_func,
32c349cdcaSJosh Poimboeuf stack_node);
3319514910SPetr Mladek if (func->old_func == old_func)
34c349cdcaSJosh Poimboeuf return ops;
35c349cdcaSJosh Poimboeuf }
36c349cdcaSJosh Poimboeuf
37c349cdcaSJosh Poimboeuf return NULL;
38c349cdcaSJosh Poimboeuf }
39c349cdcaSJosh Poimboeuf
klp_ftrace_handler(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * fops,struct ftrace_regs * fregs)40c349cdcaSJosh Poimboeuf static void notrace klp_ftrace_handler(unsigned long ip,
41c349cdcaSJosh Poimboeuf unsigned long parent_ip,
42c349cdcaSJosh Poimboeuf struct ftrace_ops *fops,
43d19ad077SSteven Rostedt (VMware) struct ftrace_regs *fregs)
44c349cdcaSJosh Poimboeuf {
45c349cdcaSJosh Poimboeuf struct klp_ops *ops;
46c349cdcaSJosh Poimboeuf struct klp_func *func;
47d83a7cb3SJosh Poimboeuf int patch_state;
4813f3ea9aSSteven Rostedt (VMware) int bit;
49c349cdcaSJosh Poimboeuf
50c349cdcaSJosh Poimboeuf ops = container_of(fops, struct klp_ops, fops);
51c349cdcaSJosh Poimboeuf
52ce5e4803S王贇 /*
53ce5e4803S王贇 * The ftrace_test_recursion_trylock() will disable preemption,
54ce5e4803S王贇 * which is required for the variant of synchronize_rcu() that is
55ce5e4803S王贇 * used to allow patching functions where RCU is not watching.
56ce5e4803S王贇 * See klp_synchronize_transition() for more details.
57ce5e4803S王贇 */
58773c1670SSteven Rostedt (VMware) bit = ftrace_test_recursion_trylock(ip, parent_ip);
594b750b57SSteven Rostedt (VMware) if (WARN_ON_ONCE(bit < 0))
6013f3ea9aSSteven Rostedt (VMware) return;
61d83a7cb3SJosh Poimboeuf
62c349cdcaSJosh Poimboeuf func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
63c349cdcaSJosh Poimboeuf stack_node);
64d83a7cb3SJosh Poimboeuf
65d83a7cb3SJosh Poimboeuf /*
66d83a7cb3SJosh Poimboeuf * func should never be NULL because preemption should be disabled here
67d83a7cb3SJosh Poimboeuf * and unregister_ftrace_function() does the equivalent of a
686932689eSPaul E. McKenney * synchronize_rcu() before the func_stack removal.
69d83a7cb3SJosh Poimboeuf */
70c349cdcaSJosh Poimboeuf if (WARN_ON_ONCE(!func))
71c349cdcaSJosh Poimboeuf goto unlock;
72c349cdcaSJosh Poimboeuf
73d83a7cb3SJosh Poimboeuf /*
74d83a7cb3SJosh Poimboeuf * In the enable path, enforce the order of the ops->func_stack and
75d83a7cb3SJosh Poimboeuf * func->transition reads. The corresponding write barrier is in
76d83a7cb3SJosh Poimboeuf * __klp_enable_patch().
77d83a7cb3SJosh Poimboeuf *
78d83a7cb3SJosh Poimboeuf * (Note that this barrier technically isn't needed in the disable
79d83a7cb3SJosh Poimboeuf * path. In the rare case where klp_update_patch_state() runs before
80d83a7cb3SJosh Poimboeuf * this handler, its TIF_PATCH_PENDING read and this func->transition
81d83a7cb3SJosh Poimboeuf * read need to be ordered. But klp_update_patch_state() already
82d83a7cb3SJosh Poimboeuf * enforces that.)
83d83a7cb3SJosh Poimboeuf */
84d83a7cb3SJosh Poimboeuf smp_rmb();
85d83a7cb3SJosh Poimboeuf
86d83a7cb3SJosh Poimboeuf if (unlikely(func->transition)) {
87d83a7cb3SJosh Poimboeuf
88d83a7cb3SJosh Poimboeuf /*
89d83a7cb3SJosh Poimboeuf * Enforce the order of the func->transition and
90d83a7cb3SJosh Poimboeuf * current->patch_state reads. Otherwise we could read an
91d83a7cb3SJosh Poimboeuf * out-of-date task state and pick the wrong function. The
92d83a7cb3SJosh Poimboeuf * corresponding write barrier is in klp_init_transition().
93d83a7cb3SJosh Poimboeuf */
94d83a7cb3SJosh Poimboeuf smp_rmb();
95d83a7cb3SJosh Poimboeuf
96d83a7cb3SJosh Poimboeuf patch_state = current->patch_state;
97d83a7cb3SJosh Poimboeuf
98d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
99d83a7cb3SJosh Poimboeuf
100d83a7cb3SJosh Poimboeuf if (patch_state == KLP_UNPATCHED) {
101d83a7cb3SJosh Poimboeuf /*
102d83a7cb3SJosh Poimboeuf * Use the previously patched version of the function.
103d83a7cb3SJosh Poimboeuf * If no previous patches exist, continue with the
104d83a7cb3SJosh Poimboeuf * original function.
105d83a7cb3SJosh Poimboeuf */
106d83a7cb3SJosh Poimboeuf func = list_entry_rcu(func->stack_node.next,
107d83a7cb3SJosh Poimboeuf struct klp_func, stack_node);
108d83a7cb3SJosh Poimboeuf
109d83a7cb3SJosh Poimboeuf if (&func->stack_node == &ops->func_stack)
110d83a7cb3SJosh Poimboeuf goto unlock;
111d83a7cb3SJosh Poimboeuf }
112d83a7cb3SJosh Poimboeuf }
113d83a7cb3SJosh Poimboeuf
114e1452b60SJason Baron /*
115e1452b60SJason Baron * NOPs are used to replace existing patches with original code.
116e1452b60SJason Baron * Do nothing! Setting pc would cause an infinite loop.
117e1452b60SJason Baron */
118e1452b60SJason Baron if (func->nop)
119e1452b60SJason Baron goto unlock;
120e1452b60SJason Baron
121*0ef86097SMark Rutland ftrace_regs_set_instruction_pointer(fregs, (unsigned long)func->new_func);
122e1452b60SJason Baron
123c349cdcaSJosh Poimboeuf unlock:
12413f3ea9aSSteven Rostedt (VMware) ftrace_test_recursion_unlock(bit);
125c349cdcaSJosh Poimboeuf }
126c349cdcaSJosh Poimboeuf
klp_unpatch_func(struct klp_func * func)127c349cdcaSJosh Poimboeuf static void klp_unpatch_func(struct klp_func *func)
128c349cdcaSJosh Poimboeuf {
129c349cdcaSJosh Poimboeuf struct klp_ops *ops;
130c349cdcaSJosh Poimboeuf
131c349cdcaSJosh Poimboeuf if (WARN_ON(!func->patched))
132c349cdcaSJosh Poimboeuf return;
13319514910SPetr Mladek if (WARN_ON(!func->old_func))
134c349cdcaSJosh Poimboeuf return;
135c349cdcaSJosh Poimboeuf
13619514910SPetr Mladek ops = klp_find_ops(func->old_func);
137c349cdcaSJosh Poimboeuf if (WARN_ON(!ops))
138c349cdcaSJosh Poimboeuf return;
139c349cdcaSJosh Poimboeuf
140c349cdcaSJosh Poimboeuf if (list_is_singular(&ops->func_stack)) {
141c349cdcaSJosh Poimboeuf unsigned long ftrace_loc;
142c349cdcaSJosh Poimboeuf
143d15cb3daSPeter Zijlstra ftrace_loc = ftrace_location((unsigned long)func->old_func);
144c349cdcaSJosh Poimboeuf if (WARN_ON(!ftrace_loc))
145c349cdcaSJosh Poimboeuf return;
146c349cdcaSJosh Poimboeuf
147c349cdcaSJosh Poimboeuf WARN_ON(unregister_ftrace_function(&ops->fops));
148c349cdcaSJosh Poimboeuf WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
149c349cdcaSJosh Poimboeuf
150c349cdcaSJosh Poimboeuf list_del_rcu(&func->stack_node);
151c349cdcaSJosh Poimboeuf list_del(&ops->node);
152c349cdcaSJosh Poimboeuf kfree(ops);
153c349cdcaSJosh Poimboeuf } else {
154c349cdcaSJosh Poimboeuf list_del_rcu(&func->stack_node);
155c349cdcaSJosh Poimboeuf }
156c349cdcaSJosh Poimboeuf
157c349cdcaSJosh Poimboeuf func->patched = false;
158c349cdcaSJosh Poimboeuf }
159c349cdcaSJosh Poimboeuf
klp_patch_func(struct klp_func * func)160c349cdcaSJosh Poimboeuf static int klp_patch_func(struct klp_func *func)
161c349cdcaSJosh Poimboeuf {
162c349cdcaSJosh Poimboeuf struct klp_ops *ops;
163c349cdcaSJosh Poimboeuf int ret;
164c349cdcaSJosh Poimboeuf
16519514910SPetr Mladek if (WARN_ON(!func->old_func))
166c349cdcaSJosh Poimboeuf return -EINVAL;
167c349cdcaSJosh Poimboeuf
168c349cdcaSJosh Poimboeuf if (WARN_ON(func->patched))
169c349cdcaSJosh Poimboeuf return -EINVAL;
170c349cdcaSJosh Poimboeuf
17119514910SPetr Mladek ops = klp_find_ops(func->old_func);
172c349cdcaSJosh Poimboeuf if (!ops) {
173c349cdcaSJosh Poimboeuf unsigned long ftrace_loc;
174c349cdcaSJosh Poimboeuf
175d15cb3daSPeter Zijlstra ftrace_loc = ftrace_location((unsigned long)func->old_func);
176c349cdcaSJosh Poimboeuf if (!ftrace_loc) {
177c349cdcaSJosh Poimboeuf pr_err("failed to find location for function '%s'\n",
178c349cdcaSJosh Poimboeuf func->old_name);
179c349cdcaSJosh Poimboeuf return -EINVAL;
180c349cdcaSJosh Poimboeuf }
181c349cdcaSJosh Poimboeuf
182c349cdcaSJosh Poimboeuf ops = kzalloc(sizeof(*ops), GFP_KERNEL);
183c349cdcaSJosh Poimboeuf if (!ops)
184c349cdcaSJosh Poimboeuf return -ENOMEM;
185c349cdcaSJosh Poimboeuf
186c349cdcaSJosh Poimboeuf ops->fops.func = klp_ftrace_handler;
1872860cd8aSSteven Rostedt (VMware) ops->fops.flags = FTRACE_OPS_FL_DYNAMIC |
1882860cd8aSSteven Rostedt (VMware) #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
1892860cd8aSSteven Rostedt (VMware) FTRACE_OPS_FL_SAVE_REGS |
1902860cd8aSSteven Rostedt (VMware) #endif
1917162431dSMiroslav Benes FTRACE_OPS_FL_IPMODIFY |
1927162431dSMiroslav Benes FTRACE_OPS_FL_PERMANENT;
193c349cdcaSJosh Poimboeuf
194c349cdcaSJosh Poimboeuf list_add(&ops->node, &klp_ops);
195c349cdcaSJosh Poimboeuf
196c349cdcaSJosh Poimboeuf INIT_LIST_HEAD(&ops->func_stack);
197c349cdcaSJosh Poimboeuf list_add_rcu(&func->stack_node, &ops->func_stack);
198c349cdcaSJosh Poimboeuf
199c349cdcaSJosh Poimboeuf ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
200c349cdcaSJosh Poimboeuf if (ret) {
201c349cdcaSJosh Poimboeuf pr_err("failed to set ftrace filter for function '%s' (%d)\n",
202c349cdcaSJosh Poimboeuf func->old_name, ret);
203c349cdcaSJosh Poimboeuf goto err;
204c349cdcaSJosh Poimboeuf }
205c349cdcaSJosh Poimboeuf
206c349cdcaSJosh Poimboeuf ret = register_ftrace_function(&ops->fops);
207c349cdcaSJosh Poimboeuf if (ret) {
208c349cdcaSJosh Poimboeuf pr_err("failed to register ftrace handler for function '%s' (%d)\n",
209c349cdcaSJosh Poimboeuf func->old_name, ret);
210c349cdcaSJosh Poimboeuf ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
211c349cdcaSJosh Poimboeuf goto err;
212c349cdcaSJosh Poimboeuf }
213c349cdcaSJosh Poimboeuf
214c349cdcaSJosh Poimboeuf
215c349cdcaSJosh Poimboeuf } else {
216c349cdcaSJosh Poimboeuf list_add_rcu(&func->stack_node, &ops->func_stack);
217c349cdcaSJosh Poimboeuf }
218c349cdcaSJosh Poimboeuf
219c349cdcaSJosh Poimboeuf func->patched = true;
220c349cdcaSJosh Poimboeuf
221c349cdcaSJosh Poimboeuf return 0;
222c349cdcaSJosh Poimboeuf
223c349cdcaSJosh Poimboeuf err:
224c349cdcaSJosh Poimboeuf list_del_rcu(&func->stack_node);
225c349cdcaSJosh Poimboeuf list_del(&ops->node);
226c349cdcaSJosh Poimboeuf kfree(ops);
227c349cdcaSJosh Poimboeuf return ret;
228c349cdcaSJosh Poimboeuf }
229c349cdcaSJosh Poimboeuf
__klp_unpatch_object(struct klp_object * obj,bool nops_only)230d697bad5SPetr Mladek static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
231c349cdcaSJosh Poimboeuf {
232c349cdcaSJosh Poimboeuf struct klp_func *func;
233c349cdcaSJosh Poimboeuf
234d697bad5SPetr Mladek klp_for_each_func(obj, func) {
235d697bad5SPetr Mladek if (nops_only && !func->nop)
236d697bad5SPetr Mladek continue;
237d697bad5SPetr Mladek
238c349cdcaSJosh Poimboeuf if (func->patched)
239c349cdcaSJosh Poimboeuf klp_unpatch_func(func);
240d697bad5SPetr Mladek }
241c349cdcaSJosh Poimboeuf
242d697bad5SPetr Mladek if (obj->dynamic || !nops_only)
243c349cdcaSJosh Poimboeuf obj->patched = false;
244c349cdcaSJosh Poimboeuf }
245c349cdcaSJosh Poimboeuf
246d697bad5SPetr Mladek
klp_unpatch_object(struct klp_object * obj)247d697bad5SPetr Mladek void klp_unpatch_object(struct klp_object *obj)
248d697bad5SPetr Mladek {
249d697bad5SPetr Mladek __klp_unpatch_object(obj, false);
250d697bad5SPetr Mladek }
251d697bad5SPetr Mladek
klp_patch_object(struct klp_object * obj)252c349cdcaSJosh Poimboeuf int klp_patch_object(struct klp_object *obj)
253c349cdcaSJosh Poimboeuf {
254c349cdcaSJosh Poimboeuf struct klp_func *func;
255c349cdcaSJosh Poimboeuf int ret;
256c349cdcaSJosh Poimboeuf
257c349cdcaSJosh Poimboeuf if (WARN_ON(obj->patched))
258c349cdcaSJosh Poimboeuf return -EINVAL;
259c349cdcaSJosh Poimboeuf
260c349cdcaSJosh Poimboeuf klp_for_each_func(obj, func) {
261c349cdcaSJosh Poimboeuf ret = klp_patch_func(func);
262c349cdcaSJosh Poimboeuf if (ret) {
263c349cdcaSJosh Poimboeuf klp_unpatch_object(obj);
264c349cdcaSJosh Poimboeuf return ret;
265c349cdcaSJosh Poimboeuf }
266c349cdcaSJosh Poimboeuf }
267c349cdcaSJosh Poimboeuf obj->patched = true;
268c349cdcaSJosh Poimboeuf
269c349cdcaSJosh Poimboeuf return 0;
270c349cdcaSJosh Poimboeuf }
271d83a7cb3SJosh Poimboeuf
__klp_unpatch_objects(struct klp_patch * patch,bool nops_only)272d697bad5SPetr Mladek static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
273d83a7cb3SJosh Poimboeuf {
274d83a7cb3SJosh Poimboeuf struct klp_object *obj;
275d83a7cb3SJosh Poimboeuf
276d83a7cb3SJosh Poimboeuf klp_for_each_object(patch, obj)
277d83a7cb3SJosh Poimboeuf if (obj->patched)
278d697bad5SPetr Mladek __klp_unpatch_object(obj, nops_only);
279d697bad5SPetr Mladek }
280d697bad5SPetr Mladek
klp_unpatch_objects(struct klp_patch * patch)281d697bad5SPetr Mladek void klp_unpatch_objects(struct klp_patch *patch)
282d697bad5SPetr Mladek {
283d697bad5SPetr Mladek __klp_unpatch_objects(patch, false);
284d697bad5SPetr Mladek }
285d697bad5SPetr Mladek
klp_unpatch_objects_dynamic(struct klp_patch * patch)286d697bad5SPetr Mladek void klp_unpatch_objects_dynamic(struct klp_patch *patch)
287d697bad5SPetr Mladek {
288d697bad5SPetr Mladek __klp_unpatch_objects(patch, true);
289d83a7cb3SJosh Poimboeuf }
290