xref: /openbmc/linux/kernel/livepatch/transition.c (revision a2818ee4)
1 /*
2  * transition.c - Kernel Live Patching transition functions
3  *
4  * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/cpu.h>
23 #include <linux/stacktrace.h>
24 #include "core.h"
25 #include "patch.h"
26 #include "transition.h"
27 #include "../sched/sched.h"
28 
29 #define MAX_STACK_ENTRIES  100
30 #define STACK_ERR_BUF_SIZE 128
31 
32 struct klp_patch *klp_transition_patch;
33 
34 static int klp_target_state = KLP_UNDEFINED;
35 
36 /*
37  * This work can be performed periodically to finish patching or unpatching any
38  * "straggler" tasks which failed to transition in the first attempt.
39  */
40 static void klp_transition_work_fn(struct work_struct *work)
41 {
42 	mutex_lock(&klp_mutex);
43 
44 	if (klp_transition_patch)
45 		klp_try_complete_transition();
46 
47 	mutex_unlock(&klp_mutex);
48 }
49 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
50 
51 /*
52  * This function is just a stub to implement a hard force
53  * of synchronize_rcu(). This requires synchronizing
54  * tasks even in userspace and idle.
55  */
56 static void klp_sync(struct work_struct *work)
57 {
58 }
59 
60 /*
61  * We allow to patch also functions where RCU is not watching,
62  * e.g. before user_exit(). We can not rely on the RCU infrastructure
63  * to do the synchronization. Instead hard force the sched synchronization.
64  *
65  * This approach allows to use RCU functions for manipulating func_stack
66  * safely.
67  */
68 static void klp_synchronize_transition(void)
69 {
70 	schedule_on_each_cpu(klp_sync);
71 }
72 
73 /*
74  * The transition to the target patch state is complete.  Clean up the data
75  * structures.
76  */
77 static void klp_complete_transition(void)
78 {
79 	struct klp_object *obj;
80 	struct klp_func *func;
81 	struct task_struct *g, *task;
82 	unsigned int cpu;
83 
84 	pr_debug("'%s': completing %s transition\n",
85 		 klp_transition_patch->mod->name,
86 		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
87 
88 	if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
89 		klp_discard_replaced_patches(klp_transition_patch);
90 		klp_discard_nops(klp_transition_patch);
91 	}
92 
93 	if (klp_target_state == KLP_UNPATCHED) {
94 		/*
95 		 * All tasks have transitioned to KLP_UNPATCHED so we can now
96 		 * remove the new functions from the func_stack.
97 		 */
98 		klp_unpatch_objects(klp_transition_patch);
99 
100 		/*
101 		 * Make sure klp_ftrace_handler() can no longer see functions
102 		 * from this patch on the ops->func_stack.  Otherwise, after
103 		 * func->transition gets cleared, the handler may choose a
104 		 * removed function.
105 		 */
106 		klp_synchronize_transition();
107 	}
108 
109 	klp_for_each_object(klp_transition_patch, obj)
110 		klp_for_each_func(obj, func)
111 			func->transition = false;
112 
113 	/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
114 	if (klp_target_state == KLP_PATCHED)
115 		klp_synchronize_transition();
116 
117 	read_lock(&tasklist_lock);
118 	for_each_process_thread(g, task) {
119 		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
120 		task->patch_state = KLP_UNDEFINED;
121 	}
122 	read_unlock(&tasklist_lock);
123 
124 	for_each_possible_cpu(cpu) {
125 		task = idle_task(cpu);
126 		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
127 		task->patch_state = KLP_UNDEFINED;
128 	}
129 
130 	klp_for_each_object(klp_transition_patch, obj) {
131 		if (!klp_is_object_loaded(obj))
132 			continue;
133 		if (klp_target_state == KLP_PATCHED)
134 			klp_post_patch_callback(obj);
135 		else if (klp_target_state == KLP_UNPATCHED)
136 			klp_post_unpatch_callback(obj);
137 	}
138 
139 	pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
140 		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
141 
142 	klp_target_state = KLP_UNDEFINED;
143 	klp_transition_patch = NULL;
144 }
145 
146 /*
147  * This is called in the error path, to cancel a transition before it has
148  * started, i.e. klp_init_transition() has been called but
149  * klp_start_transition() hasn't.  If the transition *has* been started,
150  * klp_reverse_transition() should be used instead.
151  */
152 void klp_cancel_transition(void)
153 {
154 	if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
155 		return;
156 
157 	pr_debug("'%s': canceling patching transition, going to unpatch\n",
158 		 klp_transition_patch->mod->name);
159 
160 	klp_target_state = KLP_UNPATCHED;
161 	klp_complete_transition();
162 }
163 
164 /*
165  * Switch the patched state of the task to the set of functions in the target
166  * patch state.
167  *
168  * NOTE: If task is not 'current', the caller must ensure the task is inactive.
169  * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
170  */
171 void klp_update_patch_state(struct task_struct *task)
172 {
173 	/*
174 	 * A variant of synchronize_rcu() is used to allow patching functions
175 	 * where RCU is not watching, see klp_synchronize_transition().
176 	 */
177 	preempt_disable_notrace();
178 
179 	/*
180 	 * This test_and_clear_tsk_thread_flag() call also serves as a read
181 	 * barrier (smp_rmb) for two cases:
182 	 *
183 	 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
184 	 *    klp_target_state read.  The corresponding write barrier is in
185 	 *    klp_init_transition().
186 	 *
187 	 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
188 	 *    of func->transition, if klp_ftrace_handler() is called later on
189 	 *    the same CPU.  See __klp_disable_patch().
190 	 */
191 	if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
192 		task->patch_state = READ_ONCE(klp_target_state);
193 
194 	preempt_enable_notrace();
195 }
196 
197 /*
198  * Determine whether the given stack trace includes any references to a
199  * to-be-patched or to-be-unpatched function.
200  */
201 static int klp_check_stack_func(struct klp_func *func,
202 				struct stack_trace *trace)
203 {
204 	unsigned long func_addr, func_size, address;
205 	struct klp_ops *ops;
206 	int i;
207 
208 	for (i = 0; i < trace->nr_entries; i++) {
209 		address = trace->entries[i];
210 
211 		if (klp_target_state == KLP_UNPATCHED) {
212 			 /*
213 			  * Check for the to-be-unpatched function
214 			  * (the func itself).
215 			  */
216 			func_addr = (unsigned long)func->new_func;
217 			func_size = func->new_size;
218 		} else {
219 			/*
220 			 * Check for the to-be-patched function
221 			 * (the previous func).
222 			 */
223 			ops = klp_find_ops(func->old_func);
224 
225 			if (list_is_singular(&ops->func_stack)) {
226 				/* original function */
227 				func_addr = (unsigned long)func->old_func;
228 				func_size = func->old_size;
229 			} else {
230 				/* previously patched function */
231 				struct klp_func *prev;
232 
233 				prev = list_next_entry(func, stack_node);
234 				func_addr = (unsigned long)prev->new_func;
235 				func_size = prev->new_size;
236 			}
237 		}
238 
239 		if (address >= func_addr && address < func_addr + func_size)
240 			return -EAGAIN;
241 	}
242 
243 	return 0;
244 }
245 
246 /*
247  * Determine whether it's safe to transition the task to the target patch state
248  * by looking for any to-be-patched or to-be-unpatched functions on its stack.
249  */
250 static int klp_check_stack(struct task_struct *task, char *err_buf)
251 {
252 	static unsigned long entries[MAX_STACK_ENTRIES];
253 	struct stack_trace trace;
254 	struct klp_object *obj;
255 	struct klp_func *func;
256 	int ret;
257 
258 	trace.skip = 0;
259 	trace.nr_entries = 0;
260 	trace.max_entries = MAX_STACK_ENTRIES;
261 	trace.entries = entries;
262 	ret = save_stack_trace_tsk_reliable(task, &trace);
263 	WARN_ON_ONCE(ret == -ENOSYS);
264 	if (ret) {
265 		snprintf(err_buf, STACK_ERR_BUF_SIZE,
266 			 "%s: %s:%d has an unreliable stack\n",
267 			 __func__, task->comm, task->pid);
268 		return ret;
269 	}
270 
271 	klp_for_each_object(klp_transition_patch, obj) {
272 		if (!obj->patched)
273 			continue;
274 		klp_for_each_func(obj, func) {
275 			ret = klp_check_stack_func(func, &trace);
276 			if (ret) {
277 				snprintf(err_buf, STACK_ERR_BUF_SIZE,
278 					 "%s: %s:%d is sleeping on function %s\n",
279 					 __func__, task->comm, task->pid,
280 					 func->old_name);
281 				return ret;
282 			}
283 		}
284 	}
285 
286 	return 0;
287 }
288 
289 /*
290  * Try to safely switch a task to the target patch state.  If it's currently
291  * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
292  * if the stack is unreliable, return false.
293  */
294 static bool klp_try_switch_task(struct task_struct *task)
295 {
296 	struct rq *rq;
297 	struct rq_flags flags;
298 	int ret;
299 	bool success = false;
300 	char err_buf[STACK_ERR_BUF_SIZE];
301 
302 	err_buf[0] = '\0';
303 
304 	/* check if this task has already switched over */
305 	if (task->patch_state == klp_target_state)
306 		return true;
307 
308 	/*
309 	 * Now try to check the stack for any to-be-patched or to-be-unpatched
310 	 * functions.  If all goes well, switch the task to the target patch
311 	 * state.
312 	 */
313 	rq = task_rq_lock(task, &flags);
314 
315 	if (task_running(rq, task) && task != current) {
316 		snprintf(err_buf, STACK_ERR_BUF_SIZE,
317 			 "%s: %s:%d is running\n", __func__, task->comm,
318 			 task->pid);
319 		goto done;
320 	}
321 
322 	ret = klp_check_stack(task, err_buf);
323 	if (ret)
324 		goto done;
325 
326 	success = true;
327 
328 	clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
329 	task->patch_state = klp_target_state;
330 
331 done:
332 	task_rq_unlock(rq, task, &flags);
333 
334 	/*
335 	 * Due to console deadlock issues, pr_debug() can't be used while
336 	 * holding the task rq lock.  Instead we have to use a temporary buffer
337 	 * and print the debug message after releasing the lock.
338 	 */
339 	if (err_buf[0] != '\0')
340 		pr_debug("%s", err_buf);
341 
342 	return success;
343 
344 }
345 
346 /*
347  * Try to switch all remaining tasks to the target patch state by walking the
348  * stacks of sleeping tasks and looking for any to-be-patched or
349  * to-be-unpatched functions.  If such functions are found, the task can't be
350  * switched yet.
351  *
352  * If any tasks are still stuck in the initial patch state, schedule a retry.
353  */
354 void klp_try_complete_transition(void)
355 {
356 	unsigned int cpu;
357 	struct task_struct *g, *task;
358 	struct klp_patch *patch;
359 	bool complete = true;
360 
361 	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
362 
363 	/*
364 	 * Try to switch the tasks to the target patch state by walking their
365 	 * stacks and looking for any to-be-patched or to-be-unpatched
366 	 * functions.  If such functions are found on a stack, or if the stack
367 	 * is deemed unreliable, the task can't be switched yet.
368 	 *
369 	 * Usually this will transition most (or all) of the tasks on a system
370 	 * unless the patch includes changes to a very common function.
371 	 */
372 	read_lock(&tasklist_lock);
373 	for_each_process_thread(g, task)
374 		if (!klp_try_switch_task(task))
375 			complete = false;
376 	read_unlock(&tasklist_lock);
377 
378 	/*
379 	 * Ditto for the idle "swapper" tasks.
380 	 */
381 	get_online_cpus();
382 	for_each_possible_cpu(cpu) {
383 		task = idle_task(cpu);
384 		if (cpu_online(cpu)) {
385 			if (!klp_try_switch_task(task))
386 				complete = false;
387 		} else if (task->patch_state != klp_target_state) {
388 			/* offline idle tasks can be switched immediately */
389 			clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
390 			task->patch_state = klp_target_state;
391 		}
392 	}
393 	put_online_cpus();
394 
395 	if (!complete) {
396 		/*
397 		 * Some tasks weren't able to be switched over.  Try again
398 		 * later and/or wait for other methods like kernel exit
399 		 * switching.
400 		 */
401 		schedule_delayed_work(&klp_transition_work,
402 				      round_jiffies_relative(HZ));
403 		return;
404 	}
405 
406 	/* we're done, now cleanup the data structures */
407 	patch = klp_transition_patch;
408 	klp_complete_transition();
409 
410 	/*
411 	 * It would make more sense to free the patch in
412 	 * klp_complete_transition() but it is called also
413 	 * from klp_cancel_transition().
414 	 */
415 	if (!patch->enabled) {
416 		klp_free_patch_start(patch);
417 		schedule_work(&patch->free_work);
418 	}
419 }
420 
421 /*
422  * Start the transition to the specified target patch state so tasks can begin
423  * switching to it.
424  */
425 void klp_start_transition(void)
426 {
427 	struct task_struct *g, *task;
428 	unsigned int cpu;
429 
430 	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
431 
432 	pr_notice("'%s': starting %s transition\n",
433 		  klp_transition_patch->mod->name,
434 		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
435 
436 	/*
437 	 * Mark all normal tasks as needing a patch state update.  They'll
438 	 * switch either in klp_try_complete_transition() or as they exit the
439 	 * kernel.
440 	 */
441 	read_lock(&tasklist_lock);
442 	for_each_process_thread(g, task)
443 		if (task->patch_state != klp_target_state)
444 			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
445 	read_unlock(&tasklist_lock);
446 
447 	/*
448 	 * Mark all idle tasks as needing a patch state update.  They'll switch
449 	 * either in klp_try_complete_transition() or at the idle loop switch
450 	 * point.
451 	 */
452 	for_each_possible_cpu(cpu) {
453 		task = idle_task(cpu);
454 		if (task->patch_state != klp_target_state)
455 			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
456 	}
457 }
458 
459 /*
460  * Initialize the global target patch state and all tasks to the initial patch
461  * state, and initialize all function transition states to true in preparation
462  * for patching or unpatching.
463  */
464 void klp_init_transition(struct klp_patch *patch, int state)
465 {
466 	struct task_struct *g, *task;
467 	unsigned int cpu;
468 	struct klp_object *obj;
469 	struct klp_func *func;
470 	int initial_state = !state;
471 
472 	WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
473 
474 	klp_transition_patch = patch;
475 
476 	/*
477 	 * Set the global target patch state which tasks will switch to.  This
478 	 * has no effect until the TIF_PATCH_PENDING flags get set later.
479 	 */
480 	klp_target_state = state;
481 
482 	pr_debug("'%s': initializing %s transition\n", patch->mod->name,
483 		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
484 
485 	/*
486 	 * Initialize all tasks to the initial patch state to prepare them for
487 	 * switching to the target state.
488 	 */
489 	read_lock(&tasklist_lock);
490 	for_each_process_thread(g, task) {
491 		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
492 		task->patch_state = initial_state;
493 	}
494 	read_unlock(&tasklist_lock);
495 
496 	/*
497 	 * Ditto for the idle "swapper" tasks.
498 	 */
499 	for_each_possible_cpu(cpu) {
500 		task = idle_task(cpu);
501 		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
502 		task->patch_state = initial_state;
503 	}
504 
505 	/*
506 	 * Enforce the order of the task->patch_state initializations and the
507 	 * func->transition updates to ensure that klp_ftrace_handler() doesn't
508 	 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
509 	 *
510 	 * Also enforce the order of the klp_target_state write and future
511 	 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
512 	 * set a task->patch_state to KLP_UNDEFINED.
513 	 */
514 	smp_wmb();
515 
516 	/*
517 	 * Set the func transition states so klp_ftrace_handler() will know to
518 	 * switch to the transition logic.
519 	 *
520 	 * When patching, the funcs aren't yet in the func_stack and will be
521 	 * made visible to the ftrace handler shortly by the calls to
522 	 * klp_patch_object().
523 	 *
524 	 * When unpatching, the funcs are already in the func_stack and so are
525 	 * already visible to the ftrace handler.
526 	 */
527 	klp_for_each_object(patch, obj)
528 		klp_for_each_func(obj, func)
529 			func->transition = true;
530 }
531 
532 /*
533  * This function can be called in the middle of an existing transition to
534  * reverse the direction of the target patch state.  This can be done to
535  * effectively cancel an existing enable or disable operation if there are any
536  * tasks which are stuck in the initial patch state.
537  */
538 void klp_reverse_transition(void)
539 {
540 	unsigned int cpu;
541 	struct task_struct *g, *task;
542 
543 	pr_debug("'%s': reversing transition from %s\n",
544 		 klp_transition_patch->mod->name,
545 		 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
546 						   "unpatching to patching");
547 
548 	klp_transition_patch->enabled = !klp_transition_patch->enabled;
549 
550 	klp_target_state = !klp_target_state;
551 
552 	/*
553 	 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
554 	 * klp_update_patch_state() running in parallel with
555 	 * klp_start_transition().
556 	 */
557 	read_lock(&tasklist_lock);
558 	for_each_process_thread(g, task)
559 		clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
560 	read_unlock(&tasklist_lock);
561 
562 	for_each_possible_cpu(cpu)
563 		clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
564 
565 	/* Let any remaining calls to klp_update_patch_state() complete */
566 	klp_synchronize_transition();
567 
568 	klp_start_transition();
569 }
570 
571 /* Called from copy_process() during fork */
572 void klp_copy_process(struct task_struct *child)
573 {
574 	child->patch_state = current->patch_state;
575 
576 	/* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
577 }
578 
579 /*
580  * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
581  * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
582  * action currently.
583  */
584 void klp_send_signals(void)
585 {
586 	struct task_struct *g, *task;
587 
588 	pr_notice("signaling remaining tasks\n");
589 
590 	read_lock(&tasklist_lock);
591 	for_each_process_thread(g, task) {
592 		if (!klp_patch_pending(task))
593 			continue;
594 
595 		/*
596 		 * There is a small race here. We could see TIF_PATCH_PENDING
597 		 * set and decide to wake up a kthread or send a fake signal.
598 		 * Meanwhile the task could migrate itself and the action
599 		 * would be meaningless. It is not serious though.
600 		 */
601 		if (task->flags & PF_KTHREAD) {
602 			/*
603 			 * Wake up a kthread which sleeps interruptedly and
604 			 * still has not been migrated.
605 			 */
606 			wake_up_state(task, TASK_INTERRUPTIBLE);
607 		} else {
608 			/*
609 			 * Send fake signal to all non-kthread tasks which are
610 			 * still not migrated.
611 			 */
612 			spin_lock_irq(&task->sighand->siglock);
613 			signal_wake_up(task, 0);
614 			spin_unlock_irq(&task->sighand->siglock);
615 		}
616 	}
617 	read_unlock(&tasklist_lock);
618 }
619 
620 /*
621  * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
622  * existing transition to finish.
623  *
624  * NOTE: klp_update_patch_state(task) requires the task to be inactive or
625  * 'current'. This is not the case here and the consistency model could be
626  * broken. Administrator, who is the only one to execute the
627  * klp_force_transitions(), has to be aware of this.
628  */
629 void klp_force_transition(void)
630 {
631 	struct klp_patch *patch;
632 	struct task_struct *g, *task;
633 	unsigned int cpu;
634 
635 	pr_warn("forcing remaining tasks to the patched state\n");
636 
637 	read_lock(&tasklist_lock);
638 	for_each_process_thread(g, task)
639 		klp_update_patch_state(task);
640 	read_unlock(&tasklist_lock);
641 
642 	for_each_possible_cpu(cpu)
643 		klp_update_patch_state(idle_task(cpu));
644 
645 	list_for_each_entry(patch, &klp_patches, list)
646 		patch->forced = true;
647 }
648