1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * transition.c - Kernel Live Patching transition functions 4 * 5 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com> 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/cpu.h> 11 #include <linux/stacktrace.h> 12 #include "core.h" 13 #include "patch.h" 14 #include "transition.h" 15 #include "../sched/sched.h" 16 17 #define MAX_STACK_ENTRIES 100 18 #define STACK_ERR_BUF_SIZE 128 19 20 #define SIGNALS_TIMEOUT 15 21 22 struct klp_patch *klp_transition_patch; 23 24 static int klp_target_state = KLP_UNDEFINED; 25 26 static unsigned int klp_signals_cnt; 27 28 /* 29 * This work can be performed periodically to finish patching or unpatching any 30 * "straggler" tasks which failed to transition in the first attempt. 31 */ 32 static void klp_transition_work_fn(struct work_struct *work) 33 { 34 mutex_lock(&klp_mutex); 35 36 if (klp_transition_patch) 37 klp_try_complete_transition(); 38 39 mutex_unlock(&klp_mutex); 40 } 41 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); 42 43 /* 44 * This function is just a stub to implement a hard force 45 * of synchronize_rcu(). This requires synchronizing 46 * tasks even in userspace and idle. 47 */ 48 static void klp_sync(struct work_struct *work) 49 { 50 } 51 52 /* 53 * We allow to patch also functions where RCU is not watching, 54 * e.g. before user_exit(). We can not rely on the RCU infrastructure 55 * to do the synchronization. Instead hard force the sched synchronization. 56 * 57 * This approach allows to use RCU functions for manipulating func_stack 58 * safely. 59 */ 60 static void klp_synchronize_transition(void) 61 { 62 schedule_on_each_cpu(klp_sync); 63 } 64 65 /* 66 * The transition to the target patch state is complete. Clean up the data 67 * structures. 68 */ 69 static void klp_complete_transition(void) 70 { 71 struct klp_object *obj; 72 struct klp_func *func; 73 struct task_struct *g, *task; 74 unsigned int cpu; 75 76 pr_debug("'%s': completing %s transition\n", 77 klp_transition_patch->mod->name, 78 klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 79 80 if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) { 81 klp_discard_replaced_patches(klp_transition_patch); 82 klp_discard_nops(klp_transition_patch); 83 } 84 85 if (klp_target_state == KLP_UNPATCHED) { 86 /* 87 * All tasks have transitioned to KLP_UNPATCHED so we can now 88 * remove the new functions from the func_stack. 89 */ 90 klp_unpatch_objects(klp_transition_patch); 91 92 /* 93 * Make sure klp_ftrace_handler() can no longer see functions 94 * from this patch on the ops->func_stack. Otherwise, after 95 * func->transition gets cleared, the handler may choose a 96 * removed function. 97 */ 98 klp_synchronize_transition(); 99 } 100 101 klp_for_each_object(klp_transition_patch, obj) 102 klp_for_each_func(obj, func) 103 func->transition = false; 104 105 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 106 if (klp_target_state == KLP_PATCHED) 107 klp_synchronize_transition(); 108 109 read_lock(&tasklist_lock); 110 for_each_process_thread(g, task) { 111 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); 112 task->patch_state = KLP_UNDEFINED; 113 } 114 read_unlock(&tasklist_lock); 115 116 for_each_possible_cpu(cpu) { 117 task = idle_task(cpu); 118 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); 119 task->patch_state = KLP_UNDEFINED; 120 } 121 122 klp_for_each_object(klp_transition_patch, obj) { 123 if (!klp_is_object_loaded(obj)) 124 continue; 125 if (klp_target_state == KLP_PATCHED) 126 klp_post_patch_callback(obj); 127 else if (klp_target_state == KLP_UNPATCHED) 128 klp_post_unpatch_callback(obj); 129 } 130 131 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, 132 klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 133 134 klp_target_state = KLP_UNDEFINED; 135 klp_transition_patch = NULL; 136 } 137 138 /* 139 * This is called in the error path, to cancel a transition before it has 140 * started, i.e. klp_init_transition() has been called but 141 * klp_start_transition() hasn't. If the transition *has* been started, 142 * klp_reverse_transition() should be used instead. 143 */ 144 void klp_cancel_transition(void) 145 { 146 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) 147 return; 148 149 pr_debug("'%s': canceling patching transition, going to unpatch\n", 150 klp_transition_patch->mod->name); 151 152 klp_target_state = KLP_UNPATCHED; 153 klp_complete_transition(); 154 } 155 156 /* 157 * Switch the patched state of the task to the set of functions in the target 158 * patch state. 159 * 160 * NOTE: If task is not 'current', the caller must ensure the task is inactive. 161 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value. 162 */ 163 void klp_update_patch_state(struct task_struct *task) 164 { 165 /* 166 * A variant of synchronize_rcu() is used to allow patching functions 167 * where RCU is not watching, see klp_synchronize_transition(). 168 */ 169 preempt_disable_notrace(); 170 171 /* 172 * This test_and_clear_tsk_thread_flag() call also serves as a read 173 * barrier (smp_rmb) for two cases: 174 * 175 * 1) Enforce the order of the TIF_PATCH_PENDING read and the 176 * klp_target_state read. The corresponding write barrier is in 177 * klp_init_transition(). 178 * 179 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read 180 * of func->transition, if klp_ftrace_handler() is called later on 181 * the same CPU. See __klp_disable_patch(). 182 */ 183 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) 184 task->patch_state = READ_ONCE(klp_target_state); 185 186 preempt_enable_notrace(); 187 } 188 189 /* 190 * Determine whether the given stack trace includes any references to a 191 * to-be-patched or to-be-unpatched function. 192 */ 193 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, 194 unsigned int nr_entries) 195 { 196 unsigned long func_addr, func_size, address; 197 struct klp_ops *ops; 198 int i; 199 200 for (i = 0; i < nr_entries; i++) { 201 address = entries[i]; 202 203 if (klp_target_state == KLP_UNPATCHED) { 204 /* 205 * Check for the to-be-unpatched function 206 * (the func itself). 207 */ 208 func_addr = (unsigned long)func->new_func; 209 func_size = func->new_size; 210 } else { 211 /* 212 * Check for the to-be-patched function 213 * (the previous func). 214 */ 215 ops = klp_find_ops(func->old_func); 216 217 if (list_is_singular(&ops->func_stack)) { 218 /* original function */ 219 func_addr = (unsigned long)func->old_func; 220 func_size = func->old_size; 221 } else { 222 /* previously patched function */ 223 struct klp_func *prev; 224 225 prev = list_next_entry(func, stack_node); 226 func_addr = (unsigned long)prev->new_func; 227 func_size = prev->new_size; 228 } 229 } 230 231 if (address >= func_addr && address < func_addr + func_size) 232 return -EAGAIN; 233 } 234 235 return 0; 236 } 237 238 /* 239 * Determine whether it's safe to transition the task to the target patch state 240 * by looking for any to-be-patched or to-be-unpatched functions on its stack. 241 */ 242 static int klp_check_stack(struct task_struct *task, char *err_buf) 243 { 244 static unsigned long entries[MAX_STACK_ENTRIES]; 245 struct klp_object *obj; 246 struct klp_func *func; 247 int ret, nr_entries; 248 249 ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); 250 WARN_ON_ONCE(ret == -ENOSYS); 251 if (ret < 0) { 252 snprintf(err_buf, STACK_ERR_BUF_SIZE, 253 "%s: %s:%d has an unreliable stack\n", 254 __func__, task->comm, task->pid); 255 return ret; 256 } 257 nr_entries = ret; 258 259 klp_for_each_object(klp_transition_patch, obj) { 260 if (!obj->patched) 261 continue; 262 klp_for_each_func(obj, func) { 263 ret = klp_check_stack_func(func, entries, nr_entries); 264 if (ret) { 265 snprintf(err_buf, STACK_ERR_BUF_SIZE, 266 "%s: %s:%d is sleeping on function %s\n", 267 __func__, task->comm, task->pid, 268 func->old_name); 269 return ret; 270 } 271 } 272 } 273 274 return 0; 275 } 276 277 /* 278 * Try to safely switch a task to the target patch state. If it's currently 279 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or 280 * if the stack is unreliable, return false. 281 */ 282 static bool klp_try_switch_task(struct task_struct *task) 283 { 284 struct rq *rq; 285 struct rq_flags flags; 286 int ret; 287 bool success = false; 288 char err_buf[STACK_ERR_BUF_SIZE]; 289 290 err_buf[0] = '\0'; 291 292 /* check if this task has already switched over */ 293 if (task->patch_state == klp_target_state) 294 return true; 295 296 /* 297 * Now try to check the stack for any to-be-patched or to-be-unpatched 298 * functions. If all goes well, switch the task to the target patch 299 * state. 300 */ 301 rq = task_rq_lock(task, &flags); 302 303 if (task_running(rq, task) && task != current) { 304 snprintf(err_buf, STACK_ERR_BUF_SIZE, 305 "%s: %s:%d is running\n", __func__, task->comm, 306 task->pid); 307 goto done; 308 } 309 310 ret = klp_check_stack(task, err_buf); 311 if (ret) 312 goto done; 313 314 success = true; 315 316 clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 317 task->patch_state = klp_target_state; 318 319 done: 320 task_rq_unlock(rq, task, &flags); 321 322 /* 323 * Due to console deadlock issues, pr_debug() can't be used while 324 * holding the task rq lock. Instead we have to use a temporary buffer 325 * and print the debug message after releasing the lock. 326 */ 327 if (err_buf[0] != '\0') 328 pr_debug("%s", err_buf); 329 330 return success; 331 332 } 333 334 /* 335 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. 336 * Kthreads with TIF_PATCH_PENDING set are woken up. 337 */ 338 static void klp_send_signals(void) 339 { 340 struct task_struct *g, *task; 341 342 if (klp_signals_cnt == SIGNALS_TIMEOUT) 343 pr_notice("signaling remaining tasks\n"); 344 345 read_lock(&tasklist_lock); 346 for_each_process_thread(g, task) { 347 if (!klp_patch_pending(task)) 348 continue; 349 350 /* 351 * There is a small race here. We could see TIF_PATCH_PENDING 352 * set and decide to wake up a kthread or send a fake signal. 353 * Meanwhile the task could migrate itself and the action 354 * would be meaningless. It is not serious though. 355 */ 356 if (task->flags & PF_KTHREAD) { 357 /* 358 * Wake up a kthread which sleeps interruptedly and 359 * still has not been migrated. 360 */ 361 wake_up_state(task, TASK_INTERRUPTIBLE); 362 } else { 363 /* 364 * Send fake signal to all non-kthread tasks which are 365 * still not migrated. 366 */ 367 spin_lock_irq(&task->sighand->siglock); 368 signal_wake_up(task, 0); 369 spin_unlock_irq(&task->sighand->siglock); 370 } 371 } 372 read_unlock(&tasklist_lock); 373 } 374 375 /* 376 * Try to switch all remaining tasks to the target patch state by walking the 377 * stacks of sleeping tasks and looking for any to-be-patched or 378 * to-be-unpatched functions. If such functions are found, the task can't be 379 * switched yet. 380 * 381 * If any tasks are still stuck in the initial patch state, schedule a retry. 382 */ 383 void klp_try_complete_transition(void) 384 { 385 unsigned int cpu; 386 struct task_struct *g, *task; 387 struct klp_patch *patch; 388 bool complete = true; 389 390 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); 391 392 /* 393 * Try to switch the tasks to the target patch state by walking their 394 * stacks and looking for any to-be-patched or to-be-unpatched 395 * functions. If such functions are found on a stack, or if the stack 396 * is deemed unreliable, the task can't be switched yet. 397 * 398 * Usually this will transition most (or all) of the tasks on a system 399 * unless the patch includes changes to a very common function. 400 */ 401 read_lock(&tasklist_lock); 402 for_each_process_thread(g, task) 403 if (!klp_try_switch_task(task)) 404 complete = false; 405 read_unlock(&tasklist_lock); 406 407 /* 408 * Ditto for the idle "swapper" tasks. 409 */ 410 get_online_cpus(); 411 for_each_possible_cpu(cpu) { 412 task = idle_task(cpu); 413 if (cpu_online(cpu)) { 414 if (!klp_try_switch_task(task)) 415 complete = false; 416 } else if (task->patch_state != klp_target_state) { 417 /* offline idle tasks can be switched immediately */ 418 clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 419 task->patch_state = klp_target_state; 420 } 421 } 422 put_online_cpus(); 423 424 if (!complete) { 425 if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT)) 426 klp_send_signals(); 427 klp_signals_cnt++; 428 429 /* 430 * Some tasks weren't able to be switched over. Try again 431 * later and/or wait for other methods like kernel exit 432 * switching. 433 */ 434 schedule_delayed_work(&klp_transition_work, 435 round_jiffies_relative(HZ)); 436 return; 437 } 438 439 /* we're done, now cleanup the data structures */ 440 patch = klp_transition_patch; 441 klp_complete_transition(); 442 443 /* 444 * It would make more sense to free the patch in 445 * klp_complete_transition() but it is called also 446 * from klp_cancel_transition(). 447 */ 448 if (!patch->enabled) { 449 klp_free_patch_start(patch); 450 schedule_work(&patch->free_work); 451 } 452 } 453 454 /* 455 * Start the transition to the specified target patch state so tasks can begin 456 * switching to it. 457 */ 458 void klp_start_transition(void) 459 { 460 struct task_struct *g, *task; 461 unsigned int cpu; 462 463 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); 464 465 pr_notice("'%s': starting %s transition\n", 466 klp_transition_patch->mod->name, 467 klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 468 469 /* 470 * Mark all normal tasks as needing a patch state update. They'll 471 * switch either in klp_try_complete_transition() or as they exit the 472 * kernel. 473 */ 474 read_lock(&tasklist_lock); 475 for_each_process_thread(g, task) 476 if (task->patch_state != klp_target_state) 477 set_tsk_thread_flag(task, TIF_PATCH_PENDING); 478 read_unlock(&tasklist_lock); 479 480 /* 481 * Mark all idle tasks as needing a patch state update. They'll switch 482 * either in klp_try_complete_transition() or at the idle loop switch 483 * point. 484 */ 485 for_each_possible_cpu(cpu) { 486 task = idle_task(cpu); 487 if (task->patch_state != klp_target_state) 488 set_tsk_thread_flag(task, TIF_PATCH_PENDING); 489 } 490 491 klp_signals_cnt = 0; 492 } 493 494 /* 495 * Initialize the global target patch state and all tasks to the initial patch 496 * state, and initialize all function transition states to true in preparation 497 * for patching or unpatching. 498 */ 499 void klp_init_transition(struct klp_patch *patch, int state) 500 { 501 struct task_struct *g, *task; 502 unsigned int cpu; 503 struct klp_object *obj; 504 struct klp_func *func; 505 int initial_state = !state; 506 507 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); 508 509 klp_transition_patch = patch; 510 511 /* 512 * Set the global target patch state which tasks will switch to. This 513 * has no effect until the TIF_PATCH_PENDING flags get set later. 514 */ 515 klp_target_state = state; 516 517 pr_debug("'%s': initializing %s transition\n", patch->mod->name, 518 klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 519 520 /* 521 * Initialize all tasks to the initial patch state to prepare them for 522 * switching to the target state. 523 */ 524 read_lock(&tasklist_lock); 525 for_each_process_thread(g, task) { 526 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); 527 task->patch_state = initial_state; 528 } 529 read_unlock(&tasklist_lock); 530 531 /* 532 * Ditto for the idle "swapper" tasks. 533 */ 534 for_each_possible_cpu(cpu) { 535 task = idle_task(cpu); 536 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); 537 task->patch_state = initial_state; 538 } 539 540 /* 541 * Enforce the order of the task->patch_state initializations and the 542 * func->transition updates to ensure that klp_ftrace_handler() doesn't 543 * see a func in transition with a task->patch_state of KLP_UNDEFINED. 544 * 545 * Also enforce the order of the klp_target_state write and future 546 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't 547 * set a task->patch_state to KLP_UNDEFINED. 548 */ 549 smp_wmb(); 550 551 /* 552 * Set the func transition states so klp_ftrace_handler() will know to 553 * switch to the transition logic. 554 * 555 * When patching, the funcs aren't yet in the func_stack and will be 556 * made visible to the ftrace handler shortly by the calls to 557 * klp_patch_object(). 558 * 559 * When unpatching, the funcs are already in the func_stack and so are 560 * already visible to the ftrace handler. 561 */ 562 klp_for_each_object(patch, obj) 563 klp_for_each_func(obj, func) 564 func->transition = true; 565 } 566 567 /* 568 * This function can be called in the middle of an existing transition to 569 * reverse the direction of the target patch state. This can be done to 570 * effectively cancel an existing enable or disable operation if there are any 571 * tasks which are stuck in the initial patch state. 572 */ 573 void klp_reverse_transition(void) 574 { 575 unsigned int cpu; 576 struct task_struct *g, *task; 577 578 pr_debug("'%s': reversing transition from %s\n", 579 klp_transition_patch->mod->name, 580 klp_target_state == KLP_PATCHED ? "patching to unpatching" : 581 "unpatching to patching"); 582 583 klp_transition_patch->enabled = !klp_transition_patch->enabled; 584 585 klp_target_state = !klp_target_state; 586 587 /* 588 * Clear all TIF_PATCH_PENDING flags to prevent races caused by 589 * klp_update_patch_state() running in parallel with 590 * klp_start_transition(). 591 */ 592 read_lock(&tasklist_lock); 593 for_each_process_thread(g, task) 594 clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 595 read_unlock(&tasklist_lock); 596 597 for_each_possible_cpu(cpu) 598 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); 599 600 /* Let any remaining calls to klp_update_patch_state() complete */ 601 klp_synchronize_transition(); 602 603 klp_start_transition(); 604 } 605 606 /* Called from copy_process() during fork */ 607 void klp_copy_process(struct task_struct *child) 608 { 609 child->patch_state = current->patch_state; 610 611 /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ 612 } 613 614 /* 615 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an 616 * existing transition to finish. 617 * 618 * NOTE: klp_update_patch_state(task) requires the task to be inactive or 619 * 'current'. This is not the case here and the consistency model could be 620 * broken. Administrator, who is the only one to execute the 621 * klp_force_transitions(), has to be aware of this. 622 */ 623 void klp_force_transition(void) 624 { 625 struct klp_patch *patch; 626 struct task_struct *g, *task; 627 unsigned int cpu; 628 629 pr_warn("forcing remaining tasks to the patched state\n"); 630 631 read_lock(&tasklist_lock); 632 for_each_process_thread(g, task) 633 klp_update_patch_state(task); 634 read_unlock(&tasklist_lock); 635 636 for_each_possible_cpu(cpu) 637 klp_update_patch_state(idle_task(cpu)); 638 639 klp_for_each_patch(patch) 640 patch->forced = true; 641 } 642