xref: /openbmc/linux/kernel/livepatch/transition.c (revision 3d37ef41)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * transition.c - Kernel Live Patching transition functions
4  *
5  * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/cpu.h>
11 #include <linux/stacktrace.h>
12 #include <linux/tracehook.h>
13 #include "core.h"
14 #include "patch.h"
15 #include "transition.h"
16 #include "../sched/sched.h"
17 
18 #define MAX_STACK_ENTRIES  100
19 #define STACK_ERR_BUF_SIZE 128
20 
21 #define SIGNALS_TIMEOUT 15
22 
23 struct klp_patch *klp_transition_patch;
24 
25 static int klp_target_state = KLP_UNDEFINED;
26 
27 static unsigned int klp_signals_cnt;
28 
29 /*
30  * This work can be performed periodically to finish patching or unpatching any
31  * "straggler" tasks which failed to transition in the first attempt.
32  */
33 static void klp_transition_work_fn(struct work_struct *work)
34 {
35 	mutex_lock(&klp_mutex);
36 
37 	if (klp_transition_patch)
38 		klp_try_complete_transition();
39 
40 	mutex_unlock(&klp_mutex);
41 }
42 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
43 
44 /*
45  * This function is just a stub to implement a hard force
46  * of synchronize_rcu(). This requires synchronizing
47  * tasks even in userspace and idle.
48  */
49 static void klp_sync(struct work_struct *work)
50 {
51 }
52 
53 /*
54  * We allow to patch also functions where RCU is not watching,
55  * e.g. before user_exit(). We can not rely on the RCU infrastructure
56  * to do the synchronization. Instead hard force the sched synchronization.
57  *
58  * This approach allows to use RCU functions for manipulating func_stack
59  * safely.
60  */
61 static void klp_synchronize_transition(void)
62 {
63 	schedule_on_each_cpu(klp_sync);
64 }
65 
66 /*
67  * The transition to the target patch state is complete.  Clean up the data
68  * structures.
69  */
70 static void klp_complete_transition(void)
71 {
72 	struct klp_object *obj;
73 	struct klp_func *func;
74 	struct task_struct *g, *task;
75 	unsigned int cpu;
76 
77 	pr_debug("'%s': completing %s transition\n",
78 		 klp_transition_patch->mod->name,
79 		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
80 
81 	if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
82 		klp_unpatch_replaced_patches(klp_transition_patch);
83 		klp_discard_nops(klp_transition_patch);
84 	}
85 
86 	if (klp_target_state == KLP_UNPATCHED) {
87 		/*
88 		 * All tasks have transitioned to KLP_UNPATCHED so we can now
89 		 * remove the new functions from the func_stack.
90 		 */
91 		klp_unpatch_objects(klp_transition_patch);
92 
93 		/*
94 		 * Make sure klp_ftrace_handler() can no longer see functions
95 		 * from this patch on the ops->func_stack.  Otherwise, after
96 		 * func->transition gets cleared, the handler may choose a
97 		 * removed function.
98 		 */
99 		klp_synchronize_transition();
100 	}
101 
102 	klp_for_each_object(klp_transition_patch, obj)
103 		klp_for_each_func(obj, func)
104 			func->transition = false;
105 
106 	/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
107 	if (klp_target_state == KLP_PATCHED)
108 		klp_synchronize_transition();
109 
110 	read_lock(&tasklist_lock);
111 	for_each_process_thread(g, task) {
112 		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
113 		task->patch_state = KLP_UNDEFINED;
114 	}
115 	read_unlock(&tasklist_lock);
116 
117 	for_each_possible_cpu(cpu) {
118 		task = idle_task(cpu);
119 		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
120 		task->patch_state = KLP_UNDEFINED;
121 	}
122 
123 	klp_for_each_object(klp_transition_patch, obj) {
124 		if (!klp_is_object_loaded(obj))
125 			continue;
126 		if (klp_target_state == KLP_PATCHED)
127 			klp_post_patch_callback(obj);
128 		else if (klp_target_state == KLP_UNPATCHED)
129 			klp_post_unpatch_callback(obj);
130 	}
131 
132 	pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
133 		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
134 
135 	klp_target_state = KLP_UNDEFINED;
136 	klp_transition_patch = NULL;
137 }
138 
139 /*
140  * This is called in the error path, to cancel a transition before it has
141  * started, i.e. klp_init_transition() has been called but
142  * klp_start_transition() hasn't.  If the transition *has* been started,
143  * klp_reverse_transition() should be used instead.
144  */
145 void klp_cancel_transition(void)
146 {
147 	if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
148 		return;
149 
150 	pr_debug("'%s': canceling patching transition, going to unpatch\n",
151 		 klp_transition_patch->mod->name);
152 
153 	klp_target_state = KLP_UNPATCHED;
154 	klp_complete_transition();
155 }
156 
157 /*
158  * Switch the patched state of the task to the set of functions in the target
159  * patch state.
160  *
161  * NOTE: If task is not 'current', the caller must ensure the task is inactive.
162  * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
163  */
164 void klp_update_patch_state(struct task_struct *task)
165 {
166 	/*
167 	 * A variant of synchronize_rcu() is used to allow patching functions
168 	 * where RCU is not watching, see klp_synchronize_transition().
169 	 */
170 	preempt_disable_notrace();
171 
172 	/*
173 	 * This test_and_clear_tsk_thread_flag() call also serves as a read
174 	 * barrier (smp_rmb) for two cases:
175 	 *
176 	 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
177 	 *    klp_target_state read.  The corresponding write barrier is in
178 	 *    klp_init_transition().
179 	 *
180 	 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
181 	 *    of func->transition, if klp_ftrace_handler() is called later on
182 	 *    the same CPU.  See __klp_disable_patch().
183 	 */
184 	if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
185 		task->patch_state = READ_ONCE(klp_target_state);
186 
187 	preempt_enable_notrace();
188 }
189 
190 /*
191  * Determine whether the given stack trace includes any references to a
192  * to-be-patched or to-be-unpatched function.
193  */
194 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
195 				unsigned int nr_entries)
196 {
197 	unsigned long func_addr, func_size, address;
198 	struct klp_ops *ops;
199 	int i;
200 
201 	for (i = 0; i < nr_entries; i++) {
202 		address = entries[i];
203 
204 		if (klp_target_state == KLP_UNPATCHED) {
205 			 /*
206 			  * Check for the to-be-unpatched function
207 			  * (the func itself).
208 			  */
209 			func_addr = (unsigned long)func->new_func;
210 			func_size = func->new_size;
211 		} else {
212 			/*
213 			 * Check for the to-be-patched function
214 			 * (the previous func).
215 			 */
216 			ops = klp_find_ops(func->old_func);
217 
218 			if (list_is_singular(&ops->func_stack)) {
219 				/* original function */
220 				func_addr = (unsigned long)func->old_func;
221 				func_size = func->old_size;
222 			} else {
223 				/* previously patched function */
224 				struct klp_func *prev;
225 
226 				prev = list_next_entry(func, stack_node);
227 				func_addr = (unsigned long)prev->new_func;
228 				func_size = prev->new_size;
229 			}
230 		}
231 
232 		if (address >= func_addr && address < func_addr + func_size)
233 			return -EAGAIN;
234 	}
235 
236 	return 0;
237 }
238 
239 /*
240  * Determine whether it's safe to transition the task to the target patch state
241  * by looking for any to-be-patched or to-be-unpatched functions on its stack.
242  */
243 static int klp_check_stack(struct task_struct *task, char *err_buf)
244 {
245 	static unsigned long entries[MAX_STACK_ENTRIES];
246 	struct klp_object *obj;
247 	struct klp_func *func;
248 	int ret, nr_entries;
249 
250 	ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
251 	if (ret < 0) {
252 		snprintf(err_buf, STACK_ERR_BUF_SIZE,
253 			 "%s: %s:%d has an unreliable stack\n",
254 			 __func__, task->comm, task->pid);
255 		return ret;
256 	}
257 	nr_entries = ret;
258 
259 	klp_for_each_object(klp_transition_patch, obj) {
260 		if (!obj->patched)
261 			continue;
262 		klp_for_each_func(obj, func) {
263 			ret = klp_check_stack_func(func, entries, nr_entries);
264 			if (ret) {
265 				snprintf(err_buf, STACK_ERR_BUF_SIZE,
266 					 "%s: %s:%d is sleeping on function %s\n",
267 					 __func__, task->comm, task->pid,
268 					 func->old_name);
269 				return ret;
270 			}
271 		}
272 	}
273 
274 	return 0;
275 }
276 
277 /*
278  * Try to safely switch a task to the target patch state.  If it's currently
279  * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
280  * if the stack is unreliable, return false.
281  */
282 static bool klp_try_switch_task(struct task_struct *task)
283 {
284 	static char err_buf[STACK_ERR_BUF_SIZE];
285 	struct rq *rq;
286 	struct rq_flags flags;
287 	int ret;
288 	bool success = false;
289 
290 	err_buf[0] = '\0';
291 
292 	/* check if this task has already switched over */
293 	if (task->patch_state == klp_target_state)
294 		return true;
295 
296 	/*
297 	 * For arches which don't have reliable stack traces, we have to rely
298 	 * on other methods (e.g., switching tasks at kernel exit).
299 	 */
300 	if (!klp_have_reliable_stack())
301 		return false;
302 
303 	/*
304 	 * Now try to check the stack for any to-be-patched or to-be-unpatched
305 	 * functions.  If all goes well, switch the task to the target patch
306 	 * state.
307 	 */
308 	rq = task_rq_lock(task, &flags);
309 
310 	if (task_running(rq, task) && task != current) {
311 		snprintf(err_buf, STACK_ERR_BUF_SIZE,
312 			 "%s: %s:%d is running\n", __func__, task->comm,
313 			 task->pid);
314 		goto done;
315 	}
316 
317 	ret = klp_check_stack(task, err_buf);
318 	if (ret)
319 		goto done;
320 
321 	success = true;
322 
323 	clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
324 	task->patch_state = klp_target_state;
325 
326 done:
327 	task_rq_unlock(rq, task, &flags);
328 
329 	/*
330 	 * Due to console deadlock issues, pr_debug() can't be used while
331 	 * holding the task rq lock.  Instead we have to use a temporary buffer
332 	 * and print the debug message after releasing the lock.
333 	 */
334 	if (err_buf[0] != '\0')
335 		pr_debug("%s", err_buf);
336 
337 	return success;
338 }
339 
340 /*
341  * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
342  * Kthreads with TIF_PATCH_PENDING set are woken up.
343  */
344 static void klp_send_signals(void)
345 {
346 	struct task_struct *g, *task;
347 
348 	if (klp_signals_cnt == SIGNALS_TIMEOUT)
349 		pr_notice("signaling remaining tasks\n");
350 
351 	read_lock(&tasklist_lock);
352 	for_each_process_thread(g, task) {
353 		if (!klp_patch_pending(task))
354 			continue;
355 
356 		/*
357 		 * There is a small race here. We could see TIF_PATCH_PENDING
358 		 * set and decide to wake up a kthread or send a fake signal.
359 		 * Meanwhile the task could migrate itself and the action
360 		 * would be meaningless. It is not serious though.
361 		 */
362 		if (task->flags & PF_KTHREAD) {
363 			/*
364 			 * Wake up a kthread which sleeps interruptedly and
365 			 * still has not been migrated.
366 			 */
367 			wake_up_state(task, TASK_INTERRUPTIBLE);
368 		} else {
369 			/*
370 			 * Send fake signal to all non-kthread tasks which are
371 			 * still not migrated.
372 			 */
373 			set_notify_signal(task);
374 		}
375 	}
376 	read_unlock(&tasklist_lock);
377 }
378 
379 /*
380  * Try to switch all remaining tasks to the target patch state by walking the
381  * stacks of sleeping tasks and looking for any to-be-patched or
382  * to-be-unpatched functions.  If such functions are found, the task can't be
383  * switched yet.
384  *
385  * If any tasks are still stuck in the initial patch state, schedule a retry.
386  */
387 void klp_try_complete_transition(void)
388 {
389 	unsigned int cpu;
390 	struct task_struct *g, *task;
391 	struct klp_patch *patch;
392 	bool complete = true;
393 
394 	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
395 
396 	/*
397 	 * Try to switch the tasks to the target patch state by walking their
398 	 * stacks and looking for any to-be-patched or to-be-unpatched
399 	 * functions.  If such functions are found on a stack, or if the stack
400 	 * is deemed unreliable, the task can't be switched yet.
401 	 *
402 	 * Usually this will transition most (or all) of the tasks on a system
403 	 * unless the patch includes changes to a very common function.
404 	 */
405 	read_lock(&tasklist_lock);
406 	for_each_process_thread(g, task)
407 		if (!klp_try_switch_task(task))
408 			complete = false;
409 	read_unlock(&tasklist_lock);
410 
411 	/*
412 	 * Ditto for the idle "swapper" tasks.
413 	 */
414 	get_online_cpus();
415 	for_each_possible_cpu(cpu) {
416 		task = idle_task(cpu);
417 		if (cpu_online(cpu)) {
418 			if (!klp_try_switch_task(task))
419 				complete = false;
420 		} else if (task->patch_state != klp_target_state) {
421 			/* offline idle tasks can be switched immediately */
422 			clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
423 			task->patch_state = klp_target_state;
424 		}
425 	}
426 	put_online_cpus();
427 
428 	if (!complete) {
429 		if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
430 			klp_send_signals();
431 		klp_signals_cnt++;
432 
433 		/*
434 		 * Some tasks weren't able to be switched over.  Try again
435 		 * later and/or wait for other methods like kernel exit
436 		 * switching.
437 		 */
438 		schedule_delayed_work(&klp_transition_work,
439 				      round_jiffies_relative(HZ));
440 		return;
441 	}
442 
443 	/* we're done, now cleanup the data structures */
444 	patch = klp_transition_patch;
445 	klp_complete_transition();
446 
447 	/*
448 	 * It would make more sense to free the unused patches in
449 	 * klp_complete_transition() but it is called also
450 	 * from klp_cancel_transition().
451 	 */
452 	if (!patch->enabled)
453 		klp_free_patch_async(patch);
454 	else if (patch->replace)
455 		klp_free_replaced_patches_async(patch);
456 }
457 
458 /*
459  * Start the transition to the specified target patch state so tasks can begin
460  * switching to it.
461  */
462 void klp_start_transition(void)
463 {
464 	struct task_struct *g, *task;
465 	unsigned int cpu;
466 
467 	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
468 
469 	pr_notice("'%s': starting %s transition\n",
470 		  klp_transition_patch->mod->name,
471 		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
472 
473 	/*
474 	 * Mark all normal tasks as needing a patch state update.  They'll
475 	 * switch either in klp_try_complete_transition() or as they exit the
476 	 * kernel.
477 	 */
478 	read_lock(&tasklist_lock);
479 	for_each_process_thread(g, task)
480 		if (task->patch_state != klp_target_state)
481 			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
482 	read_unlock(&tasklist_lock);
483 
484 	/*
485 	 * Mark all idle tasks as needing a patch state update.  They'll switch
486 	 * either in klp_try_complete_transition() or at the idle loop switch
487 	 * point.
488 	 */
489 	for_each_possible_cpu(cpu) {
490 		task = idle_task(cpu);
491 		if (task->patch_state != klp_target_state)
492 			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
493 	}
494 
495 	klp_signals_cnt = 0;
496 }
497 
498 /*
499  * Initialize the global target patch state and all tasks to the initial patch
500  * state, and initialize all function transition states to true in preparation
501  * for patching or unpatching.
502  */
503 void klp_init_transition(struct klp_patch *patch, int state)
504 {
505 	struct task_struct *g, *task;
506 	unsigned int cpu;
507 	struct klp_object *obj;
508 	struct klp_func *func;
509 	int initial_state = !state;
510 
511 	WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
512 
513 	klp_transition_patch = patch;
514 
515 	/*
516 	 * Set the global target patch state which tasks will switch to.  This
517 	 * has no effect until the TIF_PATCH_PENDING flags get set later.
518 	 */
519 	klp_target_state = state;
520 
521 	pr_debug("'%s': initializing %s transition\n", patch->mod->name,
522 		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
523 
524 	/*
525 	 * Initialize all tasks to the initial patch state to prepare them for
526 	 * switching to the target state.
527 	 */
528 	read_lock(&tasklist_lock);
529 	for_each_process_thread(g, task) {
530 		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
531 		task->patch_state = initial_state;
532 	}
533 	read_unlock(&tasklist_lock);
534 
535 	/*
536 	 * Ditto for the idle "swapper" tasks.
537 	 */
538 	for_each_possible_cpu(cpu) {
539 		task = idle_task(cpu);
540 		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
541 		task->patch_state = initial_state;
542 	}
543 
544 	/*
545 	 * Enforce the order of the task->patch_state initializations and the
546 	 * func->transition updates to ensure that klp_ftrace_handler() doesn't
547 	 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
548 	 *
549 	 * Also enforce the order of the klp_target_state write and future
550 	 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
551 	 * set a task->patch_state to KLP_UNDEFINED.
552 	 */
553 	smp_wmb();
554 
555 	/*
556 	 * Set the func transition states so klp_ftrace_handler() will know to
557 	 * switch to the transition logic.
558 	 *
559 	 * When patching, the funcs aren't yet in the func_stack and will be
560 	 * made visible to the ftrace handler shortly by the calls to
561 	 * klp_patch_object().
562 	 *
563 	 * When unpatching, the funcs are already in the func_stack and so are
564 	 * already visible to the ftrace handler.
565 	 */
566 	klp_for_each_object(patch, obj)
567 		klp_for_each_func(obj, func)
568 			func->transition = true;
569 }
570 
571 /*
572  * This function can be called in the middle of an existing transition to
573  * reverse the direction of the target patch state.  This can be done to
574  * effectively cancel an existing enable or disable operation if there are any
575  * tasks which are stuck in the initial patch state.
576  */
577 void klp_reverse_transition(void)
578 {
579 	unsigned int cpu;
580 	struct task_struct *g, *task;
581 
582 	pr_debug("'%s': reversing transition from %s\n",
583 		 klp_transition_patch->mod->name,
584 		 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
585 						   "unpatching to patching");
586 
587 	klp_transition_patch->enabled = !klp_transition_patch->enabled;
588 
589 	klp_target_state = !klp_target_state;
590 
591 	/*
592 	 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
593 	 * klp_update_patch_state() running in parallel with
594 	 * klp_start_transition().
595 	 */
596 	read_lock(&tasklist_lock);
597 	for_each_process_thread(g, task)
598 		clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
599 	read_unlock(&tasklist_lock);
600 
601 	for_each_possible_cpu(cpu)
602 		clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
603 
604 	/* Let any remaining calls to klp_update_patch_state() complete */
605 	klp_synchronize_transition();
606 
607 	klp_start_transition();
608 }
609 
610 /* Called from copy_process() during fork */
611 void klp_copy_process(struct task_struct *child)
612 {
613 	child->patch_state = current->patch_state;
614 
615 	/* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
616 }
617 
618 /*
619  * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
620  * existing transition to finish.
621  *
622  * NOTE: klp_update_patch_state(task) requires the task to be inactive or
623  * 'current'. This is not the case here and the consistency model could be
624  * broken. Administrator, who is the only one to execute the
625  * klp_force_transitions(), has to be aware of this.
626  */
627 void klp_force_transition(void)
628 {
629 	struct klp_patch *patch;
630 	struct task_struct *g, *task;
631 	unsigned int cpu;
632 
633 	pr_warn("forcing remaining tasks to the patched state\n");
634 
635 	read_lock(&tasklist_lock);
636 	for_each_process_thread(g, task)
637 		klp_update_patch_state(task);
638 	read_unlock(&tasklist_lock);
639 
640 	for_each_possible_cpu(cpu)
641 		klp_update_patch_state(idle_task(cpu));
642 
643 	klp_for_each_patch(patch)
644 		patch->forced = true;
645 }
646