xref: /openbmc/linux/kernel/livepatch/patch.c (revision eb50fd3a)
1 /*
2  * patch.c - livepatch patching functions
3  *
4  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5  * Copyright (C) 2014 SUSE
6  * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * as published by the Free Software Foundation; either version 2
11  * of the License, or (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 
24 #include <linux/livepatch.h>
25 #include <linux/list.h>
26 #include <linux/ftrace.h>
27 #include <linux/rculist.h>
28 #include <linux/slab.h>
29 #include <linux/bug.h>
30 #include <linux/printk.h>
31 #include "patch.h"
32 #include "transition.h"
33 
34 static LIST_HEAD(klp_ops);
35 
36 struct klp_ops *klp_find_ops(unsigned long old_addr)
37 {
38 	struct klp_ops *ops;
39 	struct klp_func *func;
40 
41 	list_for_each_entry(ops, &klp_ops, node) {
42 		func = list_first_entry(&ops->func_stack, struct klp_func,
43 					stack_node);
44 		if (func->old_addr == old_addr)
45 			return ops;
46 	}
47 
48 	return NULL;
49 }
50 
51 static void notrace klp_ftrace_handler(unsigned long ip,
52 				       unsigned long parent_ip,
53 				       struct ftrace_ops *fops,
54 				       struct pt_regs *regs)
55 {
56 	struct klp_ops *ops;
57 	struct klp_func *func;
58 	int patch_state;
59 
60 	ops = container_of(fops, struct klp_ops, fops);
61 
62 	/*
63 	 * A variant of synchronize_sched() is used to allow patching functions
64 	 * where RCU is not watching, see klp_synchronize_transition().
65 	 */
66 	preempt_disable_notrace();
67 
68 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
69 				      stack_node);
70 
71 	/*
72 	 * func should never be NULL because preemption should be disabled here
73 	 * and unregister_ftrace_function() does the equivalent of a
74 	 * synchronize_sched() before the func_stack removal.
75 	 */
76 	if (WARN_ON_ONCE(!func))
77 		goto unlock;
78 
79 	/*
80 	 * In the enable path, enforce the order of the ops->func_stack and
81 	 * func->transition reads.  The corresponding write barrier is in
82 	 * __klp_enable_patch().
83 	 *
84 	 * (Note that this barrier technically isn't needed in the disable
85 	 * path.  In the rare case where klp_update_patch_state() runs before
86 	 * this handler, its TIF_PATCH_PENDING read and this func->transition
87 	 * read need to be ordered.  But klp_update_patch_state() already
88 	 * enforces that.)
89 	 */
90 	smp_rmb();
91 
92 	if (unlikely(func->transition)) {
93 
94 		/*
95 		 * Enforce the order of the func->transition and
96 		 * current->patch_state reads.  Otherwise we could read an
97 		 * out-of-date task state and pick the wrong function.  The
98 		 * corresponding write barrier is in klp_init_transition().
99 		 */
100 		smp_rmb();
101 
102 		patch_state = current->patch_state;
103 
104 		WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
105 
106 		if (patch_state == KLP_UNPATCHED) {
107 			/*
108 			 * Use the previously patched version of the function.
109 			 * If no previous patches exist, continue with the
110 			 * original function.
111 			 */
112 			func = list_entry_rcu(func->stack_node.next,
113 					      struct klp_func, stack_node);
114 
115 			if (&func->stack_node == &ops->func_stack)
116 				goto unlock;
117 		}
118 	}
119 
120 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
121 unlock:
122 	preempt_enable_notrace();
123 }
124 
125 /*
126  * Convert a function address into the appropriate ftrace location.
127  *
128  * Usually this is just the address of the function, but on some architectures
129  * it's more complicated so allow them to provide a custom behaviour.
130  */
131 #ifndef klp_get_ftrace_location
132 static unsigned long klp_get_ftrace_location(unsigned long faddr)
133 {
134 	return faddr;
135 }
136 #endif
137 
138 static void klp_unpatch_func(struct klp_func *func)
139 {
140 	struct klp_ops *ops;
141 
142 	if (WARN_ON(!func->patched))
143 		return;
144 	if (WARN_ON(!func->old_addr))
145 		return;
146 
147 	ops = klp_find_ops(func->old_addr);
148 	if (WARN_ON(!ops))
149 		return;
150 
151 	if (list_is_singular(&ops->func_stack)) {
152 		unsigned long ftrace_loc;
153 
154 		ftrace_loc = klp_get_ftrace_location(func->old_addr);
155 		if (WARN_ON(!ftrace_loc))
156 			return;
157 
158 		WARN_ON(unregister_ftrace_function(&ops->fops));
159 		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
160 
161 		list_del_rcu(&func->stack_node);
162 		list_del(&ops->node);
163 		kfree(ops);
164 	} else {
165 		list_del_rcu(&func->stack_node);
166 	}
167 
168 	func->patched = false;
169 }
170 
171 static int klp_patch_func(struct klp_func *func)
172 {
173 	struct klp_ops *ops;
174 	int ret;
175 
176 	if (WARN_ON(!func->old_addr))
177 		return -EINVAL;
178 
179 	if (WARN_ON(func->patched))
180 		return -EINVAL;
181 
182 	ops = klp_find_ops(func->old_addr);
183 	if (!ops) {
184 		unsigned long ftrace_loc;
185 
186 		ftrace_loc = klp_get_ftrace_location(func->old_addr);
187 		if (!ftrace_loc) {
188 			pr_err("failed to find location for function '%s'\n",
189 				func->old_name);
190 			return -EINVAL;
191 		}
192 
193 		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
194 		if (!ops)
195 			return -ENOMEM;
196 
197 		ops->fops.func = klp_ftrace_handler;
198 		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
199 				  FTRACE_OPS_FL_DYNAMIC |
200 				  FTRACE_OPS_FL_IPMODIFY;
201 
202 		list_add(&ops->node, &klp_ops);
203 
204 		INIT_LIST_HEAD(&ops->func_stack);
205 		list_add_rcu(&func->stack_node, &ops->func_stack);
206 
207 		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
208 		if (ret) {
209 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
210 			       func->old_name, ret);
211 			goto err;
212 		}
213 
214 		ret = register_ftrace_function(&ops->fops);
215 		if (ret) {
216 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
217 			       func->old_name, ret);
218 			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
219 			goto err;
220 		}
221 
222 
223 	} else {
224 		list_add_rcu(&func->stack_node, &ops->func_stack);
225 	}
226 
227 	func->patched = true;
228 
229 	return 0;
230 
231 err:
232 	list_del_rcu(&func->stack_node);
233 	list_del(&ops->node);
234 	kfree(ops);
235 	return ret;
236 }
237 
238 void klp_unpatch_object(struct klp_object *obj)
239 {
240 	struct klp_func *func;
241 
242 	klp_for_each_func(obj, func)
243 		if (func->patched)
244 			klp_unpatch_func(func);
245 
246 	obj->patched = false;
247 }
248 
249 int klp_patch_object(struct klp_object *obj)
250 {
251 	struct klp_func *func;
252 	int ret;
253 
254 	if (WARN_ON(obj->patched))
255 		return -EINVAL;
256 
257 	klp_for_each_func(obj, func) {
258 		ret = klp_patch_func(func);
259 		if (ret) {
260 			klp_unpatch_object(obj);
261 			return ret;
262 		}
263 	}
264 	obj->patched = true;
265 
266 	return 0;
267 }
268 
269 void klp_unpatch_objects(struct klp_patch *patch)
270 {
271 	struct klp_object *obj;
272 
273 	klp_for_each_object(patch, obj)
274 		if (obj->patched)
275 			klp_unpatch_object(obj);
276 }
277