1 /* 2 * kernel/stop_machine.c 3 * 4 * Copyright (C) 2008, 2005 IBM Corporation. 5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au 6 * Copyright (C) 2010 SUSE Linux Products GmbH 7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 8 * 9 * This file is released under the GPLv2 and any later version. 10 */ 11 #include <linux/completion.h> 12 #include <linux/cpu.h> 13 #include <linux/init.h> 14 #include <linux/kthread.h> 15 #include <linux/export.h> 16 #include <linux/percpu.h> 17 #include <linux/sched.h> 18 #include <linux/stop_machine.h> 19 #include <linux/interrupt.h> 20 #include <linux/kallsyms.h> 21 #include <linux/smpboot.h> 22 #include <linux/atomic.h> 23 #include <linux/nmi.h> 24 #include <linux/sched/wake_q.h> 25 26 /* 27 * Structure to determine completion condition and record errors. May 28 * be shared by works on different cpus. 29 */ 30 struct cpu_stop_done { 31 atomic_t nr_todo; /* nr left to execute */ 32 int ret; /* collected return value */ 33 struct completion completion; /* fired if nr_todo reaches 0 */ 34 }; 35 36 /* the actual stopper, one per every possible cpu, enabled on online cpus */ 37 struct cpu_stopper { 38 struct task_struct *thread; 39 40 raw_spinlock_t lock; 41 bool enabled; /* is this stopper enabled? */ 42 struct list_head works; /* list of pending works */ 43 44 struct cpu_stop_work stop_work; /* for stop_cpus */ 45 }; 46 47 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); 48 static bool stop_machine_initialized = false; 49 50 /* static data for stop_cpus */ 51 static DEFINE_MUTEX(stop_cpus_mutex); 52 static bool stop_cpus_in_progress; 53 54 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) 55 { 56 memset(done, 0, sizeof(*done)); 57 atomic_set(&done->nr_todo, nr_todo); 58 init_completion(&done->completion); 59 } 60 61 /* signal completion unless @done is NULL */ 62 static void cpu_stop_signal_done(struct cpu_stop_done *done) 63 { 64 if (atomic_dec_and_test(&done->nr_todo)) 65 complete(&done->completion); 66 } 67 68 static void __cpu_stop_queue_work(struct cpu_stopper *stopper, 69 struct cpu_stop_work *work, 70 struct wake_q_head *wakeq) 71 { 72 list_add_tail(&work->list, &stopper->works); 73 wake_q_add(wakeq, stopper->thread); 74 } 75 76 /* queue @work to @stopper. if offline, @work is completed immediately */ 77 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) 78 { 79 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 80 DEFINE_WAKE_Q(wakeq); 81 unsigned long flags; 82 bool enabled; 83 84 raw_spin_lock_irqsave(&stopper->lock, flags); 85 enabled = stopper->enabled; 86 if (enabled) 87 __cpu_stop_queue_work(stopper, work, &wakeq); 88 else if (work->done) 89 cpu_stop_signal_done(work->done); 90 raw_spin_unlock_irqrestore(&stopper->lock, flags); 91 92 wake_up_q(&wakeq); 93 94 return enabled; 95 } 96 97 /** 98 * stop_one_cpu - stop a cpu 99 * @cpu: cpu to stop 100 * @fn: function to execute 101 * @arg: argument to @fn 102 * 103 * Execute @fn(@arg) on @cpu. @fn is run in a process context with 104 * the highest priority preempting any task on the cpu and 105 * monopolizing it. This function returns after the execution is 106 * complete. 107 * 108 * This function doesn't guarantee @cpu stays online till @fn 109 * completes. If @cpu goes down in the middle, execution may happen 110 * partially or fully on different cpus. @fn should either be ready 111 * for that or the caller should ensure that @cpu stays online until 112 * this function completes. 113 * 114 * CONTEXT: 115 * Might sleep. 116 * 117 * RETURNS: 118 * -ENOENT if @fn(@arg) was not executed because @cpu was offline; 119 * otherwise, the return value of @fn. 120 */ 121 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) 122 { 123 struct cpu_stop_done done; 124 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; 125 126 cpu_stop_init_done(&done, 1); 127 if (!cpu_stop_queue_work(cpu, &work)) 128 return -ENOENT; 129 /* 130 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup 131 * cycle by doing a preemption: 132 */ 133 cond_resched(); 134 wait_for_completion(&done.completion); 135 return done.ret; 136 } 137 138 /* This controls the threads on each CPU. */ 139 enum multi_stop_state { 140 /* Dummy starting state for thread. */ 141 MULTI_STOP_NONE, 142 /* Awaiting everyone to be scheduled. */ 143 MULTI_STOP_PREPARE, 144 /* Disable interrupts. */ 145 MULTI_STOP_DISABLE_IRQ, 146 /* Run the function */ 147 MULTI_STOP_RUN, 148 /* Exit */ 149 MULTI_STOP_EXIT, 150 }; 151 152 struct multi_stop_data { 153 cpu_stop_fn_t fn; 154 void *data; 155 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ 156 unsigned int num_threads; 157 const struct cpumask *active_cpus; 158 159 enum multi_stop_state state; 160 atomic_t thread_ack; 161 }; 162 163 static void set_state(struct multi_stop_data *msdata, 164 enum multi_stop_state newstate) 165 { 166 /* Reset ack counter. */ 167 atomic_set(&msdata->thread_ack, msdata->num_threads); 168 smp_wmb(); 169 msdata->state = newstate; 170 } 171 172 /* Last one to ack a state moves to the next state. */ 173 static void ack_state(struct multi_stop_data *msdata) 174 { 175 if (atomic_dec_and_test(&msdata->thread_ack)) 176 set_state(msdata, msdata->state + 1); 177 } 178 179 /* This is the cpu_stop function which stops the CPU. */ 180 static int multi_cpu_stop(void *data) 181 { 182 struct multi_stop_data *msdata = data; 183 enum multi_stop_state curstate = MULTI_STOP_NONE; 184 int cpu = smp_processor_id(), err = 0; 185 unsigned long flags; 186 bool is_active; 187 188 /* 189 * When called from stop_machine_from_inactive_cpu(), irq might 190 * already be disabled. Save the state and restore it on exit. 191 */ 192 local_save_flags(flags); 193 194 if (!msdata->active_cpus) 195 is_active = cpu == cpumask_first(cpu_online_mask); 196 else 197 is_active = cpumask_test_cpu(cpu, msdata->active_cpus); 198 199 /* Simple state machine */ 200 do { 201 /* Chill out and ensure we re-read multi_stop_state. */ 202 cpu_relax_yield(); 203 if (msdata->state != curstate) { 204 curstate = msdata->state; 205 switch (curstate) { 206 case MULTI_STOP_DISABLE_IRQ: 207 local_irq_disable(); 208 hard_irq_disable(); 209 break; 210 case MULTI_STOP_RUN: 211 if (is_active) 212 err = msdata->fn(msdata->data); 213 break; 214 default: 215 break; 216 } 217 ack_state(msdata); 218 } else if (curstate > MULTI_STOP_PREPARE) { 219 /* 220 * At this stage all other CPUs we depend on must spin 221 * in the same loop. Any reason for hard-lockup should 222 * be detected and reported on their side. 223 */ 224 touch_nmi_watchdog(); 225 } 226 } while (curstate != MULTI_STOP_EXIT); 227 228 local_irq_restore(flags); 229 return err; 230 } 231 232 static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, 233 int cpu2, struct cpu_stop_work *work2) 234 { 235 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); 236 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); 237 DEFINE_WAKE_Q(wakeq); 238 int err; 239 retry: 240 raw_spin_lock_irq(&stopper1->lock); 241 raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); 242 243 err = -ENOENT; 244 if (!stopper1->enabled || !stopper2->enabled) 245 goto unlock; 246 /* 247 * Ensure that if we race with __stop_cpus() the stoppers won't get 248 * queued up in reverse order leading to system deadlock. 249 * 250 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has 251 * queued a work on cpu1 but not on cpu2, we hold both locks. 252 * 253 * It can be falsely true but it is safe to spin until it is cleared, 254 * queue_stop_cpus_work() does everything under preempt_disable(). 255 */ 256 err = -EDEADLK; 257 if (unlikely(stop_cpus_in_progress)) 258 goto unlock; 259 260 err = 0; 261 __cpu_stop_queue_work(stopper1, work1, &wakeq); 262 __cpu_stop_queue_work(stopper2, work2, &wakeq); 263 unlock: 264 raw_spin_unlock(&stopper2->lock); 265 raw_spin_unlock_irq(&stopper1->lock); 266 267 if (unlikely(err == -EDEADLK)) { 268 while (stop_cpus_in_progress) 269 cpu_relax(); 270 goto retry; 271 } 272 273 if (!err) { 274 preempt_disable(); 275 wake_up_q(&wakeq); 276 preempt_enable(); 277 } 278 279 return err; 280 } 281 /** 282 * stop_two_cpus - stops two cpus 283 * @cpu1: the cpu to stop 284 * @cpu2: the other cpu to stop 285 * @fn: function to execute 286 * @arg: argument to @fn 287 * 288 * Stops both the current and specified CPU and runs @fn on one of them. 289 * 290 * returns when both are completed. 291 */ 292 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) 293 { 294 struct cpu_stop_done done; 295 struct cpu_stop_work work1, work2; 296 struct multi_stop_data msdata; 297 298 msdata = (struct multi_stop_data){ 299 .fn = fn, 300 .data = arg, 301 .num_threads = 2, 302 .active_cpus = cpumask_of(cpu1), 303 }; 304 305 work1 = work2 = (struct cpu_stop_work){ 306 .fn = multi_cpu_stop, 307 .arg = &msdata, 308 .done = &done 309 }; 310 311 cpu_stop_init_done(&done, 2); 312 set_state(&msdata, MULTI_STOP_PREPARE); 313 314 if (cpu1 > cpu2) 315 swap(cpu1, cpu2); 316 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) 317 return -ENOENT; 318 319 wait_for_completion(&done.completion); 320 return done.ret; 321 } 322 323 /** 324 * stop_one_cpu_nowait - stop a cpu but don't wait for completion 325 * @cpu: cpu to stop 326 * @fn: function to execute 327 * @arg: argument to @fn 328 * @work_buf: pointer to cpu_stop_work structure 329 * 330 * Similar to stop_one_cpu() but doesn't wait for completion. The 331 * caller is responsible for ensuring @work_buf is currently unused 332 * and will remain untouched until stopper starts executing @fn. 333 * 334 * CONTEXT: 335 * Don't care. 336 * 337 * RETURNS: 338 * true if cpu_stop_work was queued successfully and @fn will be called, 339 * false otherwise. 340 */ 341 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, 342 struct cpu_stop_work *work_buf) 343 { 344 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; 345 return cpu_stop_queue_work(cpu, work_buf); 346 } 347 348 static bool queue_stop_cpus_work(const struct cpumask *cpumask, 349 cpu_stop_fn_t fn, void *arg, 350 struct cpu_stop_done *done) 351 { 352 struct cpu_stop_work *work; 353 unsigned int cpu; 354 bool queued = false; 355 356 /* 357 * Disable preemption while queueing to avoid getting 358 * preempted by a stopper which might wait for other stoppers 359 * to enter @fn which can lead to deadlock. 360 */ 361 preempt_disable(); 362 stop_cpus_in_progress = true; 363 for_each_cpu(cpu, cpumask) { 364 work = &per_cpu(cpu_stopper.stop_work, cpu); 365 work->fn = fn; 366 work->arg = arg; 367 work->done = done; 368 if (cpu_stop_queue_work(cpu, work)) 369 queued = true; 370 } 371 stop_cpus_in_progress = false; 372 preempt_enable(); 373 374 return queued; 375 } 376 377 static int __stop_cpus(const struct cpumask *cpumask, 378 cpu_stop_fn_t fn, void *arg) 379 { 380 struct cpu_stop_done done; 381 382 cpu_stop_init_done(&done, cpumask_weight(cpumask)); 383 if (!queue_stop_cpus_work(cpumask, fn, arg, &done)) 384 return -ENOENT; 385 wait_for_completion(&done.completion); 386 return done.ret; 387 } 388 389 /** 390 * stop_cpus - stop multiple cpus 391 * @cpumask: cpus to stop 392 * @fn: function to execute 393 * @arg: argument to @fn 394 * 395 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, 396 * @fn is run in a process context with the highest priority 397 * preempting any task on the cpu and monopolizing it. This function 398 * returns after all executions are complete. 399 * 400 * This function doesn't guarantee the cpus in @cpumask stay online 401 * till @fn completes. If some cpus go down in the middle, execution 402 * on the cpu may happen partially or fully on different cpus. @fn 403 * should either be ready for that or the caller should ensure that 404 * the cpus stay online until this function completes. 405 * 406 * All stop_cpus() calls are serialized making it safe for @fn to wait 407 * for all cpus to start executing it. 408 * 409 * CONTEXT: 410 * Might sleep. 411 * 412 * RETURNS: 413 * -ENOENT if @fn(@arg) was not executed at all because all cpus in 414 * @cpumask were offline; otherwise, 0 if all executions of @fn 415 * returned 0, any non zero return value if any returned non zero. 416 */ 417 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) 418 { 419 int ret; 420 421 /* static works are used, process one request at a time */ 422 mutex_lock(&stop_cpus_mutex); 423 ret = __stop_cpus(cpumask, fn, arg); 424 mutex_unlock(&stop_cpus_mutex); 425 return ret; 426 } 427 428 /** 429 * try_stop_cpus - try to stop multiple cpus 430 * @cpumask: cpus to stop 431 * @fn: function to execute 432 * @arg: argument to @fn 433 * 434 * Identical to stop_cpus() except that it fails with -EAGAIN if 435 * someone else is already using the facility. 436 * 437 * CONTEXT: 438 * Might sleep. 439 * 440 * RETURNS: 441 * -EAGAIN if someone else is already stopping cpus, -ENOENT if 442 * @fn(@arg) was not executed at all because all cpus in @cpumask were 443 * offline; otherwise, 0 if all executions of @fn returned 0, any non 444 * zero return value if any returned non zero. 445 */ 446 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) 447 { 448 int ret; 449 450 /* static works are used, process one request at a time */ 451 if (!mutex_trylock(&stop_cpus_mutex)) 452 return -EAGAIN; 453 ret = __stop_cpus(cpumask, fn, arg); 454 mutex_unlock(&stop_cpus_mutex); 455 return ret; 456 } 457 458 static int cpu_stop_should_run(unsigned int cpu) 459 { 460 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 461 unsigned long flags; 462 int run; 463 464 raw_spin_lock_irqsave(&stopper->lock, flags); 465 run = !list_empty(&stopper->works); 466 raw_spin_unlock_irqrestore(&stopper->lock, flags); 467 return run; 468 } 469 470 static void cpu_stopper_thread(unsigned int cpu) 471 { 472 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 473 struct cpu_stop_work *work; 474 475 repeat: 476 work = NULL; 477 raw_spin_lock_irq(&stopper->lock); 478 if (!list_empty(&stopper->works)) { 479 work = list_first_entry(&stopper->works, 480 struct cpu_stop_work, list); 481 list_del_init(&work->list); 482 } 483 raw_spin_unlock_irq(&stopper->lock); 484 485 if (work) { 486 cpu_stop_fn_t fn = work->fn; 487 void *arg = work->arg; 488 struct cpu_stop_done *done = work->done; 489 int ret; 490 491 /* cpu stop callbacks must not sleep, make in_atomic() == T */ 492 preempt_count_inc(); 493 ret = fn(arg); 494 if (done) { 495 if (ret) 496 done->ret = ret; 497 cpu_stop_signal_done(done); 498 } 499 preempt_count_dec(); 500 WARN_ONCE(preempt_count(), 501 "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg); 502 goto repeat; 503 } 504 } 505 506 void stop_machine_park(int cpu) 507 { 508 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 509 /* 510 * Lockless. cpu_stopper_thread() will take stopper->lock and flush 511 * the pending works before it parks, until then it is fine to queue 512 * the new works. 513 */ 514 stopper->enabled = false; 515 kthread_park(stopper->thread); 516 } 517 518 extern void sched_set_stop_task(int cpu, struct task_struct *stop); 519 520 static void cpu_stop_create(unsigned int cpu) 521 { 522 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); 523 } 524 525 static void cpu_stop_park(unsigned int cpu) 526 { 527 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 528 529 WARN_ON(!list_empty(&stopper->works)); 530 } 531 532 void stop_machine_unpark(int cpu) 533 { 534 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 535 536 stopper->enabled = true; 537 kthread_unpark(stopper->thread); 538 } 539 540 static struct smp_hotplug_thread cpu_stop_threads = { 541 .store = &cpu_stopper.thread, 542 .thread_should_run = cpu_stop_should_run, 543 .thread_fn = cpu_stopper_thread, 544 .thread_comm = "migration/%u", 545 .create = cpu_stop_create, 546 .park = cpu_stop_park, 547 .selfparking = true, 548 }; 549 550 static int __init cpu_stop_init(void) 551 { 552 unsigned int cpu; 553 554 for_each_possible_cpu(cpu) { 555 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 556 557 raw_spin_lock_init(&stopper->lock); 558 INIT_LIST_HEAD(&stopper->works); 559 } 560 561 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); 562 stop_machine_unpark(raw_smp_processor_id()); 563 stop_machine_initialized = true; 564 return 0; 565 } 566 early_initcall(cpu_stop_init); 567 568 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, 569 const struct cpumask *cpus) 570 { 571 struct multi_stop_data msdata = { 572 .fn = fn, 573 .data = data, 574 .num_threads = num_online_cpus(), 575 .active_cpus = cpus, 576 }; 577 578 lockdep_assert_cpus_held(); 579 580 if (!stop_machine_initialized) { 581 /* 582 * Handle the case where stop_machine() is called 583 * early in boot before stop_machine() has been 584 * initialized. 585 */ 586 unsigned long flags; 587 int ret; 588 589 WARN_ON_ONCE(msdata.num_threads != 1); 590 591 local_irq_save(flags); 592 hard_irq_disable(); 593 ret = (*fn)(data); 594 local_irq_restore(flags); 595 596 return ret; 597 } 598 599 /* Set the initial state and stop all online cpus. */ 600 set_state(&msdata, MULTI_STOP_PREPARE); 601 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); 602 } 603 604 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) 605 { 606 int ret; 607 608 /* No CPUs can come up or down during this. */ 609 cpus_read_lock(); 610 ret = stop_machine_cpuslocked(fn, data, cpus); 611 cpus_read_unlock(); 612 return ret; 613 } 614 EXPORT_SYMBOL_GPL(stop_machine); 615 616 /** 617 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU 618 * @fn: the function to run 619 * @data: the data ptr for the @fn() 620 * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 621 * 622 * This is identical to stop_machine() but can be called from a CPU which 623 * is not active. The local CPU is in the process of hotplug (so no other 624 * CPU hotplug can start) and not marked active and doesn't have enough 625 * context to sleep. 626 * 627 * This function provides stop_machine() functionality for such state by 628 * using busy-wait for synchronization and executing @fn directly for local 629 * CPU. 630 * 631 * CONTEXT: 632 * Local CPU is inactive. Temporarily stops all active CPUs. 633 * 634 * RETURNS: 635 * 0 if all executions of @fn returned 0, any non zero return value if any 636 * returned non zero. 637 */ 638 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, 639 const struct cpumask *cpus) 640 { 641 struct multi_stop_data msdata = { .fn = fn, .data = data, 642 .active_cpus = cpus }; 643 struct cpu_stop_done done; 644 int ret; 645 646 /* Local CPU must be inactive and CPU hotplug in progress. */ 647 BUG_ON(cpu_active(raw_smp_processor_id())); 648 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ 649 650 /* No proper task established and can't sleep - busy wait for lock. */ 651 while (!mutex_trylock(&stop_cpus_mutex)) 652 cpu_relax(); 653 654 /* Schedule work on other CPUs and execute directly for local CPU */ 655 set_state(&msdata, MULTI_STOP_PREPARE); 656 cpu_stop_init_done(&done, num_active_cpus()); 657 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, 658 &done); 659 ret = multi_cpu_stop(&msdata); 660 661 /* Busy wait for completion. */ 662 while (!completion_done(&done.completion)) 663 cpu_relax(); 664 665 mutex_unlock(&stop_cpus_mutex); 666 return ret ?: done.ret; 667 } 668