1 /* 2 * kernel/stop_machine.c 3 * 4 * Copyright (C) 2008, 2005 IBM Corporation. 5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au 6 * Copyright (C) 2010 SUSE Linux Products GmbH 7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 8 * 9 * This file is released under the GPLv2 and any later version. 10 */ 11 #include <linux/completion.h> 12 #include <linux/cpu.h> 13 #include <linux/init.h> 14 #include <linux/kthread.h> 15 #include <linux/export.h> 16 #include <linux/percpu.h> 17 #include <linux/sched.h> 18 #include <linux/stop_machine.h> 19 #include <linux/interrupt.h> 20 #include <linux/kallsyms.h> 21 #include <linux/smpboot.h> 22 #include <linux/atomic.h> 23 #include <linux/lglock.h> 24 25 /* 26 * Structure to determine completion condition and record errors. May 27 * be shared by works on different cpus. 28 */ 29 struct cpu_stop_done { 30 atomic_t nr_todo; /* nr left to execute */ 31 bool executed; /* actually executed? */ 32 int ret; /* collected return value */ 33 struct completion completion; /* fired if nr_todo reaches 0 */ 34 }; 35 36 /* the actual stopper, one per every possible cpu, enabled on online cpus */ 37 struct cpu_stopper { 38 struct task_struct *thread; 39 40 spinlock_t lock; 41 bool enabled; /* is this stopper enabled? */ 42 struct list_head works; /* list of pending works */ 43 44 struct cpu_stop_work stop_work; /* for stop_cpus */ 45 }; 46 47 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); 48 static bool stop_machine_initialized = false; 49 50 /* 51 * Avoids a race between stop_two_cpus and global stop_cpus, where 52 * the stoppers could get queued up in reverse order, leading to 53 * system deadlock. Using an lglock means stop_two_cpus remains 54 * relatively cheap. 55 */ 56 DEFINE_STATIC_LGLOCK(stop_cpus_lock); 57 58 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) 59 { 60 memset(done, 0, sizeof(*done)); 61 atomic_set(&done->nr_todo, nr_todo); 62 init_completion(&done->completion); 63 } 64 65 /* signal completion unless @done is NULL */ 66 static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed) 67 { 68 if (done) { 69 if (executed) 70 done->executed = true; 71 if (atomic_dec_and_test(&done->nr_todo)) 72 complete(&done->completion); 73 } 74 } 75 76 /* queue @work to @stopper. if offline, @work is completed immediately */ 77 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) 78 { 79 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 80 81 unsigned long flags; 82 83 spin_lock_irqsave(&stopper->lock, flags); 84 85 if (stopper->enabled) { 86 list_add_tail(&work->list, &stopper->works); 87 wake_up_process(stopper->thread); 88 } else 89 cpu_stop_signal_done(work->done, false); 90 91 spin_unlock_irqrestore(&stopper->lock, flags); 92 } 93 94 /** 95 * stop_one_cpu - stop a cpu 96 * @cpu: cpu to stop 97 * @fn: function to execute 98 * @arg: argument to @fn 99 * 100 * Execute @fn(@arg) on @cpu. @fn is run in a process context with 101 * the highest priority preempting any task on the cpu and 102 * monopolizing it. This function returns after the execution is 103 * complete. 104 * 105 * This function doesn't guarantee @cpu stays online till @fn 106 * completes. If @cpu goes down in the middle, execution may happen 107 * partially or fully on different cpus. @fn should either be ready 108 * for that or the caller should ensure that @cpu stays online until 109 * this function completes. 110 * 111 * CONTEXT: 112 * Might sleep. 113 * 114 * RETURNS: 115 * -ENOENT if @fn(@arg) was not executed because @cpu was offline; 116 * otherwise, the return value of @fn. 117 */ 118 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) 119 { 120 struct cpu_stop_done done; 121 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; 122 123 cpu_stop_init_done(&done, 1); 124 cpu_stop_queue_work(cpu, &work); 125 wait_for_completion(&done.completion); 126 return done.executed ? done.ret : -ENOENT; 127 } 128 129 /* This controls the threads on each CPU. */ 130 enum multi_stop_state { 131 /* Dummy starting state for thread. */ 132 MULTI_STOP_NONE, 133 /* Awaiting everyone to be scheduled. */ 134 MULTI_STOP_PREPARE, 135 /* Disable interrupts. */ 136 MULTI_STOP_DISABLE_IRQ, 137 /* Run the function */ 138 MULTI_STOP_RUN, 139 /* Exit */ 140 MULTI_STOP_EXIT, 141 }; 142 143 struct multi_stop_data { 144 cpu_stop_fn_t fn; 145 void *data; 146 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ 147 unsigned int num_threads; 148 const struct cpumask *active_cpus; 149 150 enum multi_stop_state state; 151 atomic_t thread_ack; 152 }; 153 154 static void set_state(struct multi_stop_data *msdata, 155 enum multi_stop_state newstate) 156 { 157 /* Reset ack counter. */ 158 atomic_set(&msdata->thread_ack, msdata->num_threads); 159 smp_wmb(); 160 msdata->state = newstate; 161 } 162 163 /* Last one to ack a state moves to the next state. */ 164 static void ack_state(struct multi_stop_data *msdata) 165 { 166 if (atomic_dec_and_test(&msdata->thread_ack)) 167 set_state(msdata, msdata->state + 1); 168 } 169 170 /* This is the cpu_stop function which stops the CPU. */ 171 static int multi_cpu_stop(void *data) 172 { 173 struct multi_stop_data *msdata = data; 174 enum multi_stop_state curstate = MULTI_STOP_NONE; 175 int cpu = smp_processor_id(), err = 0; 176 unsigned long flags; 177 bool is_active; 178 179 /* 180 * When called from stop_machine_from_inactive_cpu(), irq might 181 * already be disabled. Save the state and restore it on exit. 182 */ 183 local_save_flags(flags); 184 185 if (!msdata->active_cpus) 186 is_active = cpu == cpumask_first(cpu_online_mask); 187 else 188 is_active = cpumask_test_cpu(cpu, msdata->active_cpus); 189 190 /* Simple state machine */ 191 do { 192 /* Chill out and ensure we re-read multi_stop_state. */ 193 cpu_relax(); 194 if (msdata->state != curstate) { 195 curstate = msdata->state; 196 switch (curstate) { 197 case MULTI_STOP_DISABLE_IRQ: 198 local_irq_disable(); 199 hard_irq_disable(); 200 break; 201 case MULTI_STOP_RUN: 202 if (is_active) 203 err = msdata->fn(msdata->data); 204 break; 205 default: 206 break; 207 } 208 ack_state(msdata); 209 } 210 } while (curstate != MULTI_STOP_EXIT); 211 212 local_irq_restore(flags); 213 return err; 214 } 215 216 /** 217 * stop_two_cpus - stops two cpus 218 * @cpu1: the cpu to stop 219 * @cpu2: the other cpu to stop 220 * @fn: function to execute 221 * @arg: argument to @fn 222 * 223 * Stops both the current and specified CPU and runs @fn on one of them. 224 * 225 * returns when both are completed. 226 */ 227 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) 228 { 229 struct cpu_stop_done done; 230 struct cpu_stop_work work1, work2; 231 struct multi_stop_data msdata; 232 233 preempt_disable(); 234 msdata = (struct multi_stop_data){ 235 .fn = fn, 236 .data = arg, 237 .num_threads = 2, 238 .active_cpus = cpumask_of(cpu1), 239 }; 240 241 work1 = work2 = (struct cpu_stop_work){ 242 .fn = multi_cpu_stop, 243 .arg = &msdata, 244 .done = &done 245 }; 246 247 cpu_stop_init_done(&done, 2); 248 set_state(&msdata, MULTI_STOP_PREPARE); 249 250 /* 251 * If we observe both CPUs active we know _cpu_down() cannot yet have 252 * queued its stop_machine works and therefore ours will get executed 253 * first. Or its not either one of our CPUs that's getting unplugged, 254 * in which case we don't care. 255 * 256 * This relies on the stopper workqueues to be FIFO. 257 */ 258 if (!cpu_active(cpu1) || !cpu_active(cpu2)) { 259 preempt_enable(); 260 return -ENOENT; 261 } 262 263 lg_double_lock(&stop_cpus_lock, cpu1, cpu2); 264 cpu_stop_queue_work(cpu1, &work1); 265 cpu_stop_queue_work(cpu2, &work2); 266 lg_double_unlock(&stop_cpus_lock, cpu1, cpu2); 267 268 preempt_enable(); 269 270 wait_for_completion(&done.completion); 271 272 return done.executed ? done.ret : -ENOENT; 273 } 274 275 /** 276 * stop_one_cpu_nowait - stop a cpu but don't wait for completion 277 * @cpu: cpu to stop 278 * @fn: function to execute 279 * @arg: argument to @fn 280 * @work_buf: pointer to cpu_stop_work structure 281 * 282 * Similar to stop_one_cpu() but doesn't wait for completion. The 283 * caller is responsible for ensuring @work_buf is currently unused 284 * and will remain untouched until stopper starts executing @fn. 285 * 286 * CONTEXT: 287 * Don't care. 288 */ 289 void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, 290 struct cpu_stop_work *work_buf) 291 { 292 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; 293 cpu_stop_queue_work(cpu, work_buf); 294 } 295 296 /* static data for stop_cpus */ 297 static DEFINE_MUTEX(stop_cpus_mutex); 298 299 static void queue_stop_cpus_work(const struct cpumask *cpumask, 300 cpu_stop_fn_t fn, void *arg, 301 struct cpu_stop_done *done) 302 { 303 struct cpu_stop_work *work; 304 unsigned int cpu; 305 306 /* 307 * Disable preemption while queueing to avoid getting 308 * preempted by a stopper which might wait for other stoppers 309 * to enter @fn which can lead to deadlock. 310 */ 311 lg_global_lock(&stop_cpus_lock); 312 for_each_cpu(cpu, cpumask) { 313 work = &per_cpu(cpu_stopper.stop_work, cpu); 314 work->fn = fn; 315 work->arg = arg; 316 work->done = done; 317 cpu_stop_queue_work(cpu, work); 318 } 319 lg_global_unlock(&stop_cpus_lock); 320 } 321 322 static int __stop_cpus(const struct cpumask *cpumask, 323 cpu_stop_fn_t fn, void *arg) 324 { 325 struct cpu_stop_done done; 326 327 cpu_stop_init_done(&done, cpumask_weight(cpumask)); 328 queue_stop_cpus_work(cpumask, fn, arg, &done); 329 wait_for_completion(&done.completion); 330 return done.executed ? done.ret : -ENOENT; 331 } 332 333 /** 334 * stop_cpus - stop multiple cpus 335 * @cpumask: cpus to stop 336 * @fn: function to execute 337 * @arg: argument to @fn 338 * 339 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, 340 * @fn is run in a process context with the highest priority 341 * preempting any task on the cpu and monopolizing it. This function 342 * returns after all executions are complete. 343 * 344 * This function doesn't guarantee the cpus in @cpumask stay online 345 * till @fn completes. If some cpus go down in the middle, execution 346 * on the cpu may happen partially or fully on different cpus. @fn 347 * should either be ready for that or the caller should ensure that 348 * the cpus stay online until this function completes. 349 * 350 * All stop_cpus() calls are serialized making it safe for @fn to wait 351 * for all cpus to start executing it. 352 * 353 * CONTEXT: 354 * Might sleep. 355 * 356 * RETURNS: 357 * -ENOENT if @fn(@arg) was not executed at all because all cpus in 358 * @cpumask were offline; otherwise, 0 if all executions of @fn 359 * returned 0, any non zero return value if any returned non zero. 360 */ 361 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) 362 { 363 int ret; 364 365 /* static works are used, process one request at a time */ 366 mutex_lock(&stop_cpus_mutex); 367 ret = __stop_cpus(cpumask, fn, arg); 368 mutex_unlock(&stop_cpus_mutex); 369 return ret; 370 } 371 372 /** 373 * try_stop_cpus - try to stop multiple cpus 374 * @cpumask: cpus to stop 375 * @fn: function to execute 376 * @arg: argument to @fn 377 * 378 * Identical to stop_cpus() except that it fails with -EAGAIN if 379 * someone else is already using the facility. 380 * 381 * CONTEXT: 382 * Might sleep. 383 * 384 * RETURNS: 385 * -EAGAIN if someone else is already stopping cpus, -ENOENT if 386 * @fn(@arg) was not executed at all because all cpus in @cpumask were 387 * offline; otherwise, 0 if all executions of @fn returned 0, any non 388 * zero return value if any returned non zero. 389 */ 390 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) 391 { 392 int ret; 393 394 /* static works are used, process one request at a time */ 395 if (!mutex_trylock(&stop_cpus_mutex)) 396 return -EAGAIN; 397 ret = __stop_cpus(cpumask, fn, arg); 398 mutex_unlock(&stop_cpus_mutex); 399 return ret; 400 } 401 402 static int cpu_stop_should_run(unsigned int cpu) 403 { 404 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 405 unsigned long flags; 406 int run; 407 408 spin_lock_irqsave(&stopper->lock, flags); 409 run = !list_empty(&stopper->works); 410 spin_unlock_irqrestore(&stopper->lock, flags); 411 return run; 412 } 413 414 static void cpu_stopper_thread(unsigned int cpu) 415 { 416 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 417 struct cpu_stop_work *work; 418 int ret; 419 420 repeat: 421 work = NULL; 422 spin_lock_irq(&stopper->lock); 423 if (!list_empty(&stopper->works)) { 424 work = list_first_entry(&stopper->works, 425 struct cpu_stop_work, list); 426 list_del_init(&work->list); 427 } 428 spin_unlock_irq(&stopper->lock); 429 430 if (work) { 431 cpu_stop_fn_t fn = work->fn; 432 void *arg = work->arg; 433 struct cpu_stop_done *done = work->done; 434 char ksym_buf[KSYM_NAME_LEN] __maybe_unused; 435 436 /* cpu stop callbacks are not allowed to sleep */ 437 preempt_disable(); 438 439 ret = fn(arg); 440 if (ret) 441 done->ret = ret; 442 443 /* restore preemption and check it's still balanced */ 444 preempt_enable(); 445 WARN_ONCE(preempt_count(), 446 "cpu_stop: %s(%p) leaked preempt count\n", 447 kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, 448 ksym_buf), arg); 449 450 cpu_stop_signal_done(done, true); 451 goto repeat; 452 } 453 } 454 455 extern void sched_set_stop_task(int cpu, struct task_struct *stop); 456 457 static void cpu_stop_create(unsigned int cpu) 458 { 459 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); 460 } 461 462 static void cpu_stop_park(unsigned int cpu) 463 { 464 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 465 struct cpu_stop_work *work, *tmp; 466 unsigned long flags; 467 468 /* drain remaining works */ 469 spin_lock_irqsave(&stopper->lock, flags); 470 list_for_each_entry_safe(work, tmp, &stopper->works, list) { 471 list_del_init(&work->list); 472 cpu_stop_signal_done(work->done, false); 473 } 474 stopper->enabled = false; 475 spin_unlock_irqrestore(&stopper->lock, flags); 476 } 477 478 static void cpu_stop_unpark(unsigned int cpu) 479 { 480 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 481 482 spin_lock_irq(&stopper->lock); 483 stopper->enabled = true; 484 spin_unlock_irq(&stopper->lock); 485 } 486 487 static struct smp_hotplug_thread cpu_stop_threads = { 488 .store = &cpu_stopper.thread, 489 .thread_should_run = cpu_stop_should_run, 490 .thread_fn = cpu_stopper_thread, 491 .thread_comm = "migration/%u", 492 .create = cpu_stop_create, 493 .setup = cpu_stop_unpark, 494 .park = cpu_stop_park, 495 .pre_unpark = cpu_stop_unpark, 496 .selfparking = true, 497 }; 498 499 static int __init cpu_stop_init(void) 500 { 501 unsigned int cpu; 502 503 for_each_possible_cpu(cpu) { 504 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 505 506 spin_lock_init(&stopper->lock); 507 INIT_LIST_HEAD(&stopper->works); 508 } 509 510 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); 511 stop_machine_initialized = true; 512 return 0; 513 } 514 early_initcall(cpu_stop_init); 515 516 #ifdef CONFIG_STOP_MACHINE 517 518 static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) 519 { 520 struct multi_stop_data msdata = { 521 .fn = fn, 522 .data = data, 523 .num_threads = num_online_cpus(), 524 .active_cpus = cpus, 525 }; 526 527 if (!stop_machine_initialized) { 528 /* 529 * Handle the case where stop_machine() is called 530 * early in boot before stop_machine() has been 531 * initialized. 532 */ 533 unsigned long flags; 534 int ret; 535 536 WARN_ON_ONCE(msdata.num_threads != 1); 537 538 local_irq_save(flags); 539 hard_irq_disable(); 540 ret = (*fn)(data); 541 local_irq_restore(flags); 542 543 return ret; 544 } 545 546 /* Set the initial state and stop all online cpus. */ 547 set_state(&msdata, MULTI_STOP_PREPARE); 548 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); 549 } 550 551 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) 552 { 553 int ret; 554 555 /* No CPUs can come up or down during this. */ 556 get_online_cpus(); 557 ret = __stop_machine(fn, data, cpus); 558 put_online_cpus(); 559 return ret; 560 } 561 EXPORT_SYMBOL_GPL(stop_machine); 562 563 /** 564 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU 565 * @fn: the function to run 566 * @data: the data ptr for the @fn() 567 * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 568 * 569 * This is identical to stop_machine() but can be called from a CPU which 570 * is not active. The local CPU is in the process of hotplug (so no other 571 * CPU hotplug can start) and not marked active and doesn't have enough 572 * context to sleep. 573 * 574 * This function provides stop_machine() functionality for such state by 575 * using busy-wait for synchronization and executing @fn directly for local 576 * CPU. 577 * 578 * CONTEXT: 579 * Local CPU is inactive. Temporarily stops all active CPUs. 580 * 581 * RETURNS: 582 * 0 if all executions of @fn returned 0, any non zero return value if any 583 * returned non zero. 584 */ 585 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, 586 const struct cpumask *cpus) 587 { 588 struct multi_stop_data msdata = { .fn = fn, .data = data, 589 .active_cpus = cpus }; 590 struct cpu_stop_done done; 591 int ret; 592 593 /* Local CPU must be inactive and CPU hotplug in progress. */ 594 BUG_ON(cpu_active(raw_smp_processor_id())); 595 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ 596 597 /* No proper task established and can't sleep - busy wait for lock. */ 598 while (!mutex_trylock(&stop_cpus_mutex)) 599 cpu_relax(); 600 601 /* Schedule work on other CPUs and execute directly for local CPU */ 602 set_state(&msdata, MULTI_STOP_PREPARE); 603 cpu_stop_init_done(&done, num_active_cpus()); 604 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, 605 &done); 606 ret = multi_cpu_stop(&msdata); 607 608 /* Busy wait for completion. */ 609 while (!completion_done(&done.completion)) 610 cpu_relax(); 611 612 mutex_unlock(&stop_cpus_mutex); 613 return ret ?: done.ret; 614 } 615 616 #endif /* CONFIG_STOP_MACHINE */ 617