1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * kernel/stop_machine.c 4 * 5 * Copyright (C) 2008, 2005 IBM Corporation. 6 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au 7 * Copyright (C) 2010 SUSE Linux Products GmbH 8 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 9 */ 10 #include <linux/compiler.h> 11 #include <linux/completion.h> 12 #include <linux/cpu.h> 13 #include <linux/init.h> 14 #include <linux/kthread.h> 15 #include <linux/export.h> 16 #include <linux/percpu.h> 17 #include <linux/sched.h> 18 #include <linux/stop_machine.h> 19 #include <linux/interrupt.h> 20 #include <linux/kallsyms.h> 21 #include <linux/smpboot.h> 22 #include <linux/atomic.h> 23 #include <linux/nmi.h> 24 #include <linux/sched/wake_q.h> 25 26 /* 27 * Structure to determine completion condition and record errors. May 28 * be shared by works on different cpus. 29 */ 30 struct cpu_stop_done { 31 atomic_t nr_todo; /* nr left to execute */ 32 int ret; /* collected return value */ 33 struct completion completion; /* fired if nr_todo reaches 0 */ 34 }; 35 36 /* the actual stopper, one per every possible cpu, enabled on online cpus */ 37 struct cpu_stopper { 38 struct task_struct *thread; 39 40 raw_spinlock_t lock; 41 bool enabled; /* is this stopper enabled? */ 42 struct list_head works; /* list of pending works */ 43 44 struct cpu_stop_work stop_work; /* for stop_cpus */ 45 unsigned long caller; 46 cpu_stop_fn_t fn; 47 }; 48 49 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); 50 static bool stop_machine_initialized = false; 51 52 void print_stop_info(const char *log_lvl, struct task_struct *task) 53 { 54 /* 55 * If @task is a stopper task, it cannot migrate and task_cpu() is 56 * stable. 57 */ 58 struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task)); 59 60 if (task != stopper->thread) 61 return; 62 63 printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller); 64 } 65 66 /* static data for stop_cpus */ 67 static DEFINE_MUTEX(stop_cpus_mutex); 68 static bool stop_cpus_in_progress; 69 70 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) 71 { 72 memset(done, 0, sizeof(*done)); 73 atomic_set(&done->nr_todo, nr_todo); 74 init_completion(&done->completion); 75 } 76 77 /* signal completion unless @done is NULL */ 78 static void cpu_stop_signal_done(struct cpu_stop_done *done) 79 { 80 if (atomic_dec_and_test(&done->nr_todo)) 81 complete(&done->completion); 82 } 83 84 static void __cpu_stop_queue_work(struct cpu_stopper *stopper, 85 struct cpu_stop_work *work, 86 struct wake_q_head *wakeq) 87 { 88 list_add_tail(&work->list, &stopper->works); 89 wake_q_add(wakeq, stopper->thread); 90 } 91 92 /* queue @work to @stopper. if offline, @work is completed immediately */ 93 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) 94 { 95 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 96 DEFINE_WAKE_Q(wakeq); 97 unsigned long flags; 98 bool enabled; 99 100 preempt_disable(); 101 raw_spin_lock_irqsave(&stopper->lock, flags); 102 enabled = stopper->enabled; 103 if (enabled) 104 __cpu_stop_queue_work(stopper, work, &wakeq); 105 else if (work->done) 106 cpu_stop_signal_done(work->done); 107 raw_spin_unlock_irqrestore(&stopper->lock, flags); 108 109 wake_up_q(&wakeq); 110 preempt_enable(); 111 112 return enabled; 113 } 114 115 /** 116 * stop_one_cpu - stop a cpu 117 * @cpu: cpu to stop 118 * @fn: function to execute 119 * @arg: argument to @fn 120 * 121 * Execute @fn(@arg) on @cpu. @fn is run in a process context with 122 * the highest priority preempting any task on the cpu and 123 * monopolizing it. This function returns after the execution is 124 * complete. 125 * 126 * This function doesn't guarantee @cpu stays online till @fn 127 * completes. If @cpu goes down in the middle, execution may happen 128 * partially or fully on different cpus. @fn should either be ready 129 * for that or the caller should ensure that @cpu stays online until 130 * this function completes. 131 * 132 * CONTEXT: 133 * Might sleep. 134 * 135 * RETURNS: 136 * -ENOENT if @fn(@arg) was not executed because @cpu was offline; 137 * otherwise, the return value of @fn. 138 */ 139 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) 140 { 141 struct cpu_stop_done done; 142 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ }; 143 144 cpu_stop_init_done(&done, 1); 145 if (!cpu_stop_queue_work(cpu, &work)) 146 return -ENOENT; 147 /* 148 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup 149 * cycle by doing a preemption: 150 */ 151 cond_resched(); 152 wait_for_completion(&done.completion); 153 return done.ret; 154 } 155 156 /* This controls the threads on each CPU. */ 157 enum multi_stop_state { 158 /* Dummy starting state for thread. */ 159 MULTI_STOP_NONE, 160 /* Awaiting everyone to be scheduled. */ 161 MULTI_STOP_PREPARE, 162 /* Disable interrupts. */ 163 MULTI_STOP_DISABLE_IRQ, 164 /* Run the function */ 165 MULTI_STOP_RUN, 166 /* Exit */ 167 MULTI_STOP_EXIT, 168 }; 169 170 struct multi_stop_data { 171 cpu_stop_fn_t fn; 172 void *data; 173 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ 174 unsigned int num_threads; 175 const struct cpumask *active_cpus; 176 177 enum multi_stop_state state; 178 atomic_t thread_ack; 179 }; 180 181 static void set_state(struct multi_stop_data *msdata, 182 enum multi_stop_state newstate) 183 { 184 /* Reset ack counter. */ 185 atomic_set(&msdata->thread_ack, msdata->num_threads); 186 smp_wmb(); 187 WRITE_ONCE(msdata->state, newstate); 188 } 189 190 /* Last one to ack a state moves to the next state. */ 191 static void ack_state(struct multi_stop_data *msdata) 192 { 193 if (atomic_dec_and_test(&msdata->thread_ack)) 194 set_state(msdata, msdata->state + 1); 195 } 196 197 notrace void __weak stop_machine_yield(const struct cpumask *cpumask) 198 { 199 cpu_relax(); 200 } 201 202 /* This is the cpu_stop function which stops the CPU. */ 203 static int multi_cpu_stop(void *data) 204 { 205 struct multi_stop_data *msdata = data; 206 enum multi_stop_state newstate, curstate = MULTI_STOP_NONE; 207 int cpu = smp_processor_id(), err = 0; 208 const struct cpumask *cpumask; 209 unsigned long flags; 210 bool is_active; 211 212 /* 213 * When called from stop_machine_from_inactive_cpu(), irq might 214 * already be disabled. Save the state and restore it on exit. 215 */ 216 local_save_flags(flags); 217 218 if (!msdata->active_cpus) { 219 cpumask = cpu_online_mask; 220 is_active = cpu == cpumask_first(cpumask); 221 } else { 222 cpumask = msdata->active_cpus; 223 is_active = cpumask_test_cpu(cpu, cpumask); 224 } 225 226 /* Simple state machine */ 227 do { 228 /* Chill out and ensure we re-read multi_stop_state. */ 229 stop_machine_yield(cpumask); 230 newstate = READ_ONCE(msdata->state); 231 if (newstate != curstate) { 232 curstate = newstate; 233 switch (curstate) { 234 case MULTI_STOP_DISABLE_IRQ: 235 local_irq_disable(); 236 hard_irq_disable(); 237 break; 238 case MULTI_STOP_RUN: 239 if (is_active) 240 err = msdata->fn(msdata->data); 241 break; 242 default: 243 break; 244 } 245 ack_state(msdata); 246 } else if (curstate > MULTI_STOP_PREPARE) { 247 /* 248 * At this stage all other CPUs we depend on must spin 249 * in the same loop. Any reason for hard-lockup should 250 * be detected and reported on their side. 251 */ 252 touch_nmi_watchdog(); 253 } 254 rcu_momentary_dyntick_idle(); 255 } while (curstate != MULTI_STOP_EXIT); 256 257 local_irq_restore(flags); 258 return err; 259 } 260 261 static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, 262 int cpu2, struct cpu_stop_work *work2) 263 { 264 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); 265 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); 266 DEFINE_WAKE_Q(wakeq); 267 int err; 268 269 retry: 270 /* 271 * The waking up of stopper threads has to happen in the same 272 * scheduling context as the queueing. Otherwise, there is a 273 * possibility of one of the above stoppers being woken up by another 274 * CPU, and preempting us. This will cause us to not wake up the other 275 * stopper forever. 276 */ 277 preempt_disable(); 278 raw_spin_lock_irq(&stopper1->lock); 279 raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); 280 281 if (!stopper1->enabled || !stopper2->enabled) { 282 err = -ENOENT; 283 goto unlock; 284 } 285 286 /* 287 * Ensure that if we race with __stop_cpus() the stoppers won't get 288 * queued up in reverse order leading to system deadlock. 289 * 290 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has 291 * queued a work on cpu1 but not on cpu2, we hold both locks. 292 * 293 * It can be falsely true but it is safe to spin until it is cleared, 294 * queue_stop_cpus_work() does everything under preempt_disable(). 295 */ 296 if (unlikely(stop_cpus_in_progress)) { 297 err = -EDEADLK; 298 goto unlock; 299 } 300 301 err = 0; 302 __cpu_stop_queue_work(stopper1, work1, &wakeq); 303 __cpu_stop_queue_work(stopper2, work2, &wakeq); 304 305 unlock: 306 raw_spin_unlock(&stopper2->lock); 307 raw_spin_unlock_irq(&stopper1->lock); 308 309 if (unlikely(err == -EDEADLK)) { 310 preempt_enable(); 311 312 while (stop_cpus_in_progress) 313 cpu_relax(); 314 315 goto retry; 316 } 317 318 wake_up_q(&wakeq); 319 preempt_enable(); 320 321 return err; 322 } 323 /** 324 * stop_two_cpus - stops two cpus 325 * @cpu1: the cpu to stop 326 * @cpu2: the other cpu to stop 327 * @fn: function to execute 328 * @arg: argument to @fn 329 * 330 * Stops both the current and specified CPU and runs @fn on one of them. 331 * 332 * returns when both are completed. 333 */ 334 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) 335 { 336 struct cpu_stop_done done; 337 struct cpu_stop_work work1, work2; 338 struct multi_stop_data msdata; 339 340 msdata = (struct multi_stop_data){ 341 .fn = fn, 342 .data = arg, 343 .num_threads = 2, 344 .active_cpus = cpumask_of(cpu1), 345 }; 346 347 work1 = work2 = (struct cpu_stop_work){ 348 .fn = multi_cpu_stop, 349 .arg = &msdata, 350 .done = &done, 351 .caller = _RET_IP_, 352 }; 353 354 cpu_stop_init_done(&done, 2); 355 set_state(&msdata, MULTI_STOP_PREPARE); 356 357 if (cpu1 > cpu2) 358 swap(cpu1, cpu2); 359 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) 360 return -ENOENT; 361 362 wait_for_completion(&done.completion); 363 return done.ret; 364 } 365 366 /** 367 * stop_one_cpu_nowait - stop a cpu but don't wait for completion 368 * @cpu: cpu to stop 369 * @fn: function to execute 370 * @arg: argument to @fn 371 * @work_buf: pointer to cpu_stop_work structure 372 * 373 * Similar to stop_one_cpu() but doesn't wait for completion. The 374 * caller is responsible for ensuring @work_buf is currently unused 375 * and will remain untouched until stopper starts executing @fn. 376 * 377 * CONTEXT: 378 * Don't care. 379 * 380 * RETURNS: 381 * true if cpu_stop_work was queued successfully and @fn will be called, 382 * false otherwise. 383 */ 384 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, 385 struct cpu_stop_work *work_buf) 386 { 387 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, }; 388 return cpu_stop_queue_work(cpu, work_buf); 389 } 390 391 static bool queue_stop_cpus_work(const struct cpumask *cpumask, 392 cpu_stop_fn_t fn, void *arg, 393 struct cpu_stop_done *done) 394 { 395 struct cpu_stop_work *work; 396 unsigned int cpu; 397 bool queued = false; 398 399 /* 400 * Disable preemption while queueing to avoid getting 401 * preempted by a stopper which might wait for other stoppers 402 * to enter @fn which can lead to deadlock. 403 */ 404 preempt_disable(); 405 stop_cpus_in_progress = true; 406 barrier(); 407 for_each_cpu(cpu, cpumask) { 408 work = &per_cpu(cpu_stopper.stop_work, cpu); 409 work->fn = fn; 410 work->arg = arg; 411 work->done = done; 412 if (cpu_stop_queue_work(cpu, work)) 413 queued = true; 414 } 415 barrier(); 416 stop_cpus_in_progress = false; 417 preempt_enable(); 418 419 return queued; 420 } 421 422 static int __stop_cpus(const struct cpumask *cpumask, 423 cpu_stop_fn_t fn, void *arg) 424 { 425 struct cpu_stop_done done; 426 427 cpu_stop_init_done(&done, cpumask_weight(cpumask)); 428 if (!queue_stop_cpus_work(cpumask, fn, arg, &done)) 429 return -ENOENT; 430 wait_for_completion(&done.completion); 431 return done.ret; 432 } 433 434 /** 435 * stop_cpus - stop multiple cpus 436 * @cpumask: cpus to stop 437 * @fn: function to execute 438 * @arg: argument to @fn 439 * 440 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, 441 * @fn is run in a process context with the highest priority 442 * preempting any task on the cpu and monopolizing it. This function 443 * returns after all executions are complete. 444 * 445 * This function doesn't guarantee the cpus in @cpumask stay online 446 * till @fn completes. If some cpus go down in the middle, execution 447 * on the cpu may happen partially or fully on different cpus. @fn 448 * should either be ready for that or the caller should ensure that 449 * the cpus stay online until this function completes. 450 * 451 * All stop_cpus() calls are serialized making it safe for @fn to wait 452 * for all cpus to start executing it. 453 * 454 * CONTEXT: 455 * Might sleep. 456 * 457 * RETURNS: 458 * -ENOENT if @fn(@arg) was not executed at all because all cpus in 459 * @cpumask were offline; otherwise, 0 if all executions of @fn 460 * returned 0, any non zero return value if any returned non zero. 461 */ 462 static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) 463 { 464 int ret; 465 466 /* static works are used, process one request at a time */ 467 mutex_lock(&stop_cpus_mutex); 468 ret = __stop_cpus(cpumask, fn, arg); 469 mutex_unlock(&stop_cpus_mutex); 470 return ret; 471 } 472 473 static int cpu_stop_should_run(unsigned int cpu) 474 { 475 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 476 unsigned long flags; 477 int run; 478 479 raw_spin_lock_irqsave(&stopper->lock, flags); 480 run = !list_empty(&stopper->works); 481 raw_spin_unlock_irqrestore(&stopper->lock, flags); 482 return run; 483 } 484 485 static void cpu_stopper_thread(unsigned int cpu) 486 { 487 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 488 struct cpu_stop_work *work; 489 490 repeat: 491 work = NULL; 492 raw_spin_lock_irq(&stopper->lock); 493 if (!list_empty(&stopper->works)) { 494 work = list_first_entry(&stopper->works, 495 struct cpu_stop_work, list); 496 list_del_init(&work->list); 497 } 498 raw_spin_unlock_irq(&stopper->lock); 499 500 if (work) { 501 cpu_stop_fn_t fn = work->fn; 502 void *arg = work->arg; 503 struct cpu_stop_done *done = work->done; 504 int ret; 505 506 /* cpu stop callbacks must not sleep, make in_atomic() == T */ 507 stopper->caller = work->caller; 508 stopper->fn = fn; 509 preempt_count_inc(); 510 ret = fn(arg); 511 if (done) { 512 if (ret) 513 done->ret = ret; 514 cpu_stop_signal_done(done); 515 } 516 preempt_count_dec(); 517 stopper->fn = NULL; 518 stopper->caller = 0; 519 WARN_ONCE(preempt_count(), 520 "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg); 521 goto repeat; 522 } 523 } 524 525 void stop_machine_park(int cpu) 526 { 527 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 528 /* 529 * Lockless. cpu_stopper_thread() will take stopper->lock and flush 530 * the pending works before it parks, until then it is fine to queue 531 * the new works. 532 */ 533 stopper->enabled = false; 534 kthread_park(stopper->thread); 535 } 536 537 extern void sched_set_stop_task(int cpu, struct task_struct *stop); 538 539 static void cpu_stop_create(unsigned int cpu) 540 { 541 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); 542 } 543 544 static void cpu_stop_park(unsigned int cpu) 545 { 546 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 547 548 WARN_ON(!list_empty(&stopper->works)); 549 } 550 551 void stop_machine_unpark(int cpu) 552 { 553 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 554 555 stopper->enabled = true; 556 kthread_unpark(stopper->thread); 557 } 558 559 static struct smp_hotplug_thread cpu_stop_threads = { 560 .store = &cpu_stopper.thread, 561 .thread_should_run = cpu_stop_should_run, 562 .thread_fn = cpu_stopper_thread, 563 .thread_comm = "migration/%u", 564 .create = cpu_stop_create, 565 .park = cpu_stop_park, 566 .selfparking = true, 567 }; 568 569 static int __init cpu_stop_init(void) 570 { 571 unsigned int cpu; 572 573 for_each_possible_cpu(cpu) { 574 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 575 576 raw_spin_lock_init(&stopper->lock); 577 INIT_LIST_HEAD(&stopper->works); 578 } 579 580 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); 581 stop_machine_unpark(raw_smp_processor_id()); 582 stop_machine_initialized = true; 583 return 0; 584 } 585 early_initcall(cpu_stop_init); 586 587 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, 588 const struct cpumask *cpus) 589 { 590 struct multi_stop_data msdata = { 591 .fn = fn, 592 .data = data, 593 .num_threads = num_online_cpus(), 594 .active_cpus = cpus, 595 }; 596 597 lockdep_assert_cpus_held(); 598 599 if (!stop_machine_initialized) { 600 /* 601 * Handle the case where stop_machine() is called 602 * early in boot before stop_machine() has been 603 * initialized. 604 */ 605 unsigned long flags; 606 int ret; 607 608 WARN_ON_ONCE(msdata.num_threads != 1); 609 610 local_irq_save(flags); 611 hard_irq_disable(); 612 ret = (*fn)(data); 613 local_irq_restore(flags); 614 615 return ret; 616 } 617 618 /* Set the initial state and stop all online cpus. */ 619 set_state(&msdata, MULTI_STOP_PREPARE); 620 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); 621 } 622 623 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) 624 { 625 int ret; 626 627 /* No CPUs can come up or down during this. */ 628 cpus_read_lock(); 629 ret = stop_machine_cpuslocked(fn, data, cpus); 630 cpus_read_unlock(); 631 return ret; 632 } 633 EXPORT_SYMBOL_GPL(stop_machine); 634 635 /** 636 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU 637 * @fn: the function to run 638 * @data: the data ptr for the @fn() 639 * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 640 * 641 * This is identical to stop_machine() but can be called from a CPU which 642 * is not active. The local CPU is in the process of hotplug (so no other 643 * CPU hotplug can start) and not marked active and doesn't have enough 644 * context to sleep. 645 * 646 * This function provides stop_machine() functionality for such state by 647 * using busy-wait for synchronization and executing @fn directly for local 648 * CPU. 649 * 650 * CONTEXT: 651 * Local CPU is inactive. Temporarily stops all active CPUs. 652 * 653 * RETURNS: 654 * 0 if all executions of @fn returned 0, any non zero return value if any 655 * returned non zero. 656 */ 657 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, 658 const struct cpumask *cpus) 659 { 660 struct multi_stop_data msdata = { .fn = fn, .data = data, 661 .active_cpus = cpus }; 662 struct cpu_stop_done done; 663 int ret; 664 665 /* Local CPU must be inactive and CPU hotplug in progress. */ 666 BUG_ON(cpu_active(raw_smp_processor_id())); 667 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ 668 669 /* No proper task established and can't sleep - busy wait for lock. */ 670 while (!mutex_trylock(&stop_cpus_mutex)) 671 cpu_relax(); 672 673 /* Schedule work on other CPUs and execute directly for local CPU */ 674 set_state(&msdata, MULTI_STOP_PREPARE); 675 cpu_stop_init_done(&done, num_active_cpus()); 676 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, 677 &done); 678 ret = multi_cpu_stop(&msdata); 679 680 /* Busy wait for completion. */ 681 while (!completion_done(&done.completion)) 682 cpu_relax(); 683 684 mutex_unlock(&stop_cpus_mutex); 685 return ret ?: done.ret; 686 } 687