1 /* 2 * Generic helpers for smp ipi calls 3 * 4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/irq_work.h> 10 #include <linux/rcupdate.h> 11 #include <linux/rculist.h> 12 #include <linux/kernel.h> 13 #include <linux/export.h> 14 #include <linux/percpu.h> 15 #include <linux/init.h> 16 #include <linux/gfp.h> 17 #include <linux/smp.h> 18 #include <linux/cpu.h> 19 #include <linux/sched.h> 20 #include <linux/sched/idle.h> 21 #include <linux/hypervisor.h> 22 23 #include "smpboot.h" 24 25 enum { 26 CSD_FLAG_LOCK = 0x01, 27 CSD_FLAG_SYNCHRONOUS = 0x02, 28 }; 29 30 struct call_function_data { 31 struct call_single_data __percpu *csd; 32 cpumask_var_t cpumask; 33 }; 34 35 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); 36 37 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); 38 39 static void flush_smp_call_function_queue(bool warn_cpu_offline); 40 41 int smpcfd_prepare_cpu(unsigned int cpu) 42 { 43 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); 44 45 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 46 cpu_to_node(cpu))) 47 return -ENOMEM; 48 cfd->csd = alloc_percpu(struct call_single_data); 49 if (!cfd->csd) { 50 free_cpumask_var(cfd->cpumask); 51 return -ENOMEM; 52 } 53 54 return 0; 55 } 56 57 int smpcfd_dead_cpu(unsigned int cpu) 58 { 59 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); 60 61 free_cpumask_var(cfd->cpumask); 62 free_percpu(cfd->csd); 63 return 0; 64 } 65 66 int smpcfd_dying_cpu(unsigned int cpu) 67 { 68 /* 69 * The IPIs for the smp-call-function callbacks queued by other 70 * CPUs might arrive late, either due to hardware latencies or 71 * because this CPU disabled interrupts (inside stop-machine) 72 * before the IPIs were sent. So flush out any pending callbacks 73 * explicitly (without waiting for the IPIs to arrive), to 74 * ensure that the outgoing CPU doesn't go offline with work 75 * still pending. 76 */ 77 flush_smp_call_function_queue(false); 78 return 0; 79 } 80 81 void __init call_function_init(void) 82 { 83 int i; 84 85 for_each_possible_cpu(i) 86 init_llist_head(&per_cpu(call_single_queue, i)); 87 88 smpcfd_prepare_cpu(smp_processor_id()); 89 } 90 91 /* 92 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources 93 * 94 * For non-synchronous ipi calls the csd can still be in use by the 95 * previous function call. For multi-cpu calls its even more interesting 96 * as we'll have to ensure no other cpu is observing our csd. 97 */ 98 static __always_inline void csd_lock_wait(struct call_single_data *csd) 99 { 100 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); 101 } 102 103 static __always_inline void csd_lock(struct call_single_data *csd) 104 { 105 csd_lock_wait(csd); 106 csd->flags |= CSD_FLAG_LOCK; 107 108 /* 109 * prevent CPU from reordering the above assignment 110 * to ->flags with any subsequent assignments to other 111 * fields of the specified call_single_data structure: 112 */ 113 smp_wmb(); 114 } 115 116 static __always_inline void csd_unlock(struct call_single_data *csd) 117 { 118 WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); 119 120 /* 121 * ensure we're all done before releasing data: 122 */ 123 smp_store_release(&csd->flags, 0); 124 } 125 126 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); 127 128 /* 129 * Insert a previously allocated call_single_data element 130 * for execution on the given CPU. data must already have 131 * ->func, ->info, and ->flags set. 132 */ 133 static int generic_exec_single(int cpu, struct call_single_data *csd, 134 smp_call_func_t func, void *info) 135 { 136 if (cpu == smp_processor_id()) { 137 unsigned long flags; 138 139 /* 140 * We can unlock early even for the synchronous on-stack case, 141 * since we're doing this from the same CPU.. 142 */ 143 csd_unlock(csd); 144 local_irq_save(flags); 145 func(info); 146 local_irq_restore(flags); 147 return 0; 148 } 149 150 151 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { 152 csd_unlock(csd); 153 return -ENXIO; 154 } 155 156 csd->func = func; 157 csd->info = info; 158 159 /* 160 * The list addition should be visible before sending the IPI 161 * handler locks the list to pull the entry off it because of 162 * normal cache coherency rules implied by spinlocks. 163 * 164 * If IPIs can go out of order to the cache coherency protocol 165 * in an architecture, sufficient synchronisation should be added 166 * to arch code to make it appear to obey cache coherency WRT 167 * locking and barrier primitives. Generic code isn't really 168 * equipped to do the right thing... 169 */ 170 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) 171 arch_send_call_function_single_ipi(cpu); 172 173 return 0; 174 } 175 176 /** 177 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks 178 * 179 * Invoked by arch to handle an IPI for call function single. 180 * Must be called with interrupts disabled. 181 */ 182 void generic_smp_call_function_single_interrupt(void) 183 { 184 flush_smp_call_function_queue(true); 185 } 186 187 /** 188 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks 189 * 190 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an 191 * offline CPU. Skip this check if set to 'false'. 192 * 193 * Flush any pending smp-call-function callbacks queued on this CPU. This is 194 * invoked by the generic IPI handler, as well as by a CPU about to go offline, 195 * to ensure that all pending IPI callbacks are run before it goes completely 196 * offline. 197 * 198 * Loop through the call_single_queue and run all the queued callbacks. 199 * Must be called with interrupts disabled. 200 */ 201 static void flush_smp_call_function_queue(bool warn_cpu_offline) 202 { 203 struct llist_head *head; 204 struct llist_node *entry; 205 struct call_single_data *csd, *csd_next; 206 static bool warned; 207 208 WARN_ON(!irqs_disabled()); 209 210 head = this_cpu_ptr(&call_single_queue); 211 entry = llist_del_all(head); 212 entry = llist_reverse_order(entry); 213 214 /* There shouldn't be any pending callbacks on an offline CPU. */ 215 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && 216 !warned && !llist_empty(head))) { 217 warned = true; 218 WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); 219 220 /* 221 * We don't have to use the _safe() variant here 222 * because we are not invoking the IPI handlers yet. 223 */ 224 llist_for_each_entry(csd, entry, llist) 225 pr_warn("IPI callback %pS sent to offline CPU\n", 226 csd->func); 227 } 228 229 llist_for_each_entry_safe(csd, csd_next, entry, llist) { 230 smp_call_func_t func = csd->func; 231 void *info = csd->info; 232 233 /* Do we wait until *after* callback? */ 234 if (csd->flags & CSD_FLAG_SYNCHRONOUS) { 235 func(info); 236 csd_unlock(csd); 237 } else { 238 csd_unlock(csd); 239 func(info); 240 } 241 } 242 243 /* 244 * Handle irq works queued remotely by irq_work_queue_on(). 245 * Smp functions above are typically synchronous so they 246 * better run first since some other CPUs may be busy waiting 247 * for them. 248 */ 249 irq_work_run(); 250 } 251 252 /* 253 * smp_call_function_single - Run a function on a specific CPU 254 * @func: The function to run. This must be fast and non-blocking. 255 * @info: An arbitrary pointer to pass to the function. 256 * @wait: If true, wait until function has completed on other CPUs. 257 * 258 * Returns 0 on success, else a negative status code. 259 */ 260 int smp_call_function_single(int cpu, smp_call_func_t func, void *info, 261 int wait) 262 { 263 struct call_single_data *csd; 264 struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS }; 265 int this_cpu; 266 int err; 267 268 /* 269 * prevent preemption and reschedule on another processor, 270 * as well as CPU removal 271 */ 272 this_cpu = get_cpu(); 273 274 /* 275 * Can deadlock when called with interrupts disabled. 276 * We allow cpu's that are not yet online though, as no one else can 277 * send smp call function interrupt to this cpu and as such deadlocks 278 * can't happen. 279 */ 280 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 281 && !oops_in_progress); 282 283 csd = &csd_stack; 284 if (!wait) { 285 csd = this_cpu_ptr(&csd_data); 286 csd_lock(csd); 287 } 288 289 err = generic_exec_single(cpu, csd, func, info); 290 291 if (wait) 292 csd_lock_wait(csd); 293 294 put_cpu(); 295 296 return err; 297 } 298 EXPORT_SYMBOL(smp_call_function_single); 299 300 /** 301 * smp_call_function_single_async(): Run an asynchronous function on a 302 * specific CPU. 303 * @cpu: The CPU to run on. 304 * @csd: Pre-allocated and setup data structure 305 * 306 * Like smp_call_function_single(), but the call is asynchonous and 307 * can thus be done from contexts with disabled interrupts. 308 * 309 * The caller passes his own pre-allocated data structure 310 * (ie: embedded in an object) and is responsible for synchronizing it 311 * such that the IPIs performed on the @csd are strictly serialized. 312 * 313 * NOTE: Be careful, there is unfortunately no current debugging facility to 314 * validate the correctness of this serialization. 315 */ 316 int smp_call_function_single_async(int cpu, struct call_single_data *csd) 317 { 318 int err = 0; 319 320 preempt_disable(); 321 322 /* We could deadlock if we have to wait here with interrupts disabled! */ 323 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) 324 csd_lock_wait(csd); 325 326 csd->flags = CSD_FLAG_LOCK; 327 smp_wmb(); 328 329 err = generic_exec_single(cpu, csd, csd->func, csd->info); 330 preempt_enable(); 331 332 return err; 333 } 334 EXPORT_SYMBOL_GPL(smp_call_function_single_async); 335 336 /* 337 * smp_call_function_any - Run a function on any of the given cpus 338 * @mask: The mask of cpus it can run on. 339 * @func: The function to run. This must be fast and non-blocking. 340 * @info: An arbitrary pointer to pass to the function. 341 * @wait: If true, wait until function has completed. 342 * 343 * Returns 0 on success, else a negative status code (if no cpus were online). 344 * 345 * Selection preference: 346 * 1) current cpu if in @mask 347 * 2) any cpu of current node if in @mask 348 * 3) any other online cpu in @mask 349 */ 350 int smp_call_function_any(const struct cpumask *mask, 351 smp_call_func_t func, void *info, int wait) 352 { 353 unsigned int cpu; 354 const struct cpumask *nodemask; 355 int ret; 356 357 /* Try for same CPU (cheapest) */ 358 cpu = get_cpu(); 359 if (cpumask_test_cpu(cpu, mask)) 360 goto call; 361 362 /* Try for same node. */ 363 nodemask = cpumask_of_node(cpu_to_node(cpu)); 364 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; 365 cpu = cpumask_next_and(cpu, nodemask, mask)) { 366 if (cpu_online(cpu)) 367 goto call; 368 } 369 370 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ 371 cpu = cpumask_any_and(mask, cpu_online_mask); 372 call: 373 ret = smp_call_function_single(cpu, func, info, wait); 374 put_cpu(); 375 return ret; 376 } 377 EXPORT_SYMBOL_GPL(smp_call_function_any); 378 379 /** 380 * smp_call_function_many(): Run a function on a set of other CPUs. 381 * @mask: The set of cpus to run on (only runs on online subset). 382 * @func: The function to run. This must be fast and non-blocking. 383 * @info: An arbitrary pointer to pass to the function. 384 * @wait: If true, wait (atomically) until function has completed 385 * on other CPUs. 386 * 387 * If @wait is true, then returns once @func has returned. 388 * 389 * You must not call this function with disabled interrupts or from a 390 * hardware interrupt handler or from a bottom half handler. Preemption 391 * must be disabled when calling this function. 392 */ 393 void smp_call_function_many(const struct cpumask *mask, 394 smp_call_func_t func, void *info, bool wait) 395 { 396 struct call_function_data *cfd; 397 int cpu, next_cpu, this_cpu = smp_processor_id(); 398 399 /* 400 * Can deadlock when called with interrupts disabled. 401 * We allow cpu's that are not yet online though, as no one else can 402 * send smp call function interrupt to this cpu and as such deadlocks 403 * can't happen. 404 */ 405 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 406 && !oops_in_progress && !early_boot_irqs_disabled); 407 408 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ 409 cpu = cpumask_first_and(mask, cpu_online_mask); 410 if (cpu == this_cpu) 411 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 412 413 /* No online cpus? We're done. */ 414 if (cpu >= nr_cpu_ids) 415 return; 416 417 /* Do we have another CPU which isn't us? */ 418 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 419 if (next_cpu == this_cpu) 420 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); 421 422 /* Fastpath: do that cpu by itself. */ 423 if (next_cpu >= nr_cpu_ids) { 424 smp_call_function_single(cpu, func, info, wait); 425 return; 426 } 427 428 cfd = this_cpu_ptr(&cfd_data); 429 430 cpumask_and(cfd->cpumask, mask, cpu_online_mask); 431 cpumask_clear_cpu(this_cpu, cfd->cpumask); 432 433 /* Some callers race with other cpus changing the passed mask */ 434 if (unlikely(!cpumask_weight(cfd->cpumask))) 435 return; 436 437 for_each_cpu(cpu, cfd->cpumask) { 438 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); 439 440 csd_lock(csd); 441 if (wait) 442 csd->flags |= CSD_FLAG_SYNCHRONOUS; 443 csd->func = func; 444 csd->info = info; 445 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); 446 } 447 448 /* Send a message to all CPUs in the map */ 449 arch_send_call_function_ipi_mask(cfd->cpumask); 450 451 if (wait) { 452 for_each_cpu(cpu, cfd->cpumask) { 453 struct call_single_data *csd; 454 455 csd = per_cpu_ptr(cfd->csd, cpu); 456 csd_lock_wait(csd); 457 } 458 } 459 } 460 EXPORT_SYMBOL(smp_call_function_many); 461 462 /** 463 * smp_call_function(): Run a function on all other CPUs. 464 * @func: The function to run. This must be fast and non-blocking. 465 * @info: An arbitrary pointer to pass to the function. 466 * @wait: If true, wait (atomically) until function has completed 467 * on other CPUs. 468 * 469 * Returns 0. 470 * 471 * If @wait is true, then returns once @func has returned; otherwise 472 * it returns just before the target cpu calls @func. 473 * 474 * You must not call this function with disabled interrupts or from a 475 * hardware interrupt handler or from a bottom half handler. 476 */ 477 int smp_call_function(smp_call_func_t func, void *info, int wait) 478 { 479 preempt_disable(); 480 smp_call_function_many(cpu_online_mask, func, info, wait); 481 preempt_enable(); 482 483 return 0; 484 } 485 EXPORT_SYMBOL(smp_call_function); 486 487 /* Setup configured maximum number of CPUs to activate */ 488 unsigned int setup_max_cpus = NR_CPUS; 489 EXPORT_SYMBOL(setup_max_cpus); 490 491 492 /* 493 * Setup routine for controlling SMP activation 494 * 495 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP 496 * activation entirely (the MPS table probe still happens, though). 497 * 498 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer 499 * greater than 0, limits the maximum number of CPUs activated in 500 * SMP mode to <NUM>. 501 */ 502 503 void __weak arch_disable_smp_support(void) { } 504 505 static int __init nosmp(char *str) 506 { 507 setup_max_cpus = 0; 508 arch_disable_smp_support(); 509 510 return 0; 511 } 512 513 early_param("nosmp", nosmp); 514 515 /* this is hard limit */ 516 static int __init nrcpus(char *str) 517 { 518 int nr_cpus; 519 520 get_option(&str, &nr_cpus); 521 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) 522 nr_cpu_ids = nr_cpus; 523 524 return 0; 525 } 526 527 early_param("nr_cpus", nrcpus); 528 529 static int __init maxcpus(char *str) 530 { 531 get_option(&str, &setup_max_cpus); 532 if (setup_max_cpus == 0) 533 arch_disable_smp_support(); 534 535 return 0; 536 } 537 538 early_param("maxcpus", maxcpus); 539 540 /* Setup number of possible processor ids */ 541 int nr_cpu_ids __read_mostly = NR_CPUS; 542 EXPORT_SYMBOL(nr_cpu_ids); 543 544 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ 545 void __init setup_nr_cpu_ids(void) 546 { 547 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; 548 } 549 550 /* Called by boot processor to activate the rest. */ 551 void __init smp_init(void) 552 { 553 int num_nodes, num_cpus; 554 unsigned int cpu; 555 556 idle_threads_init(); 557 cpuhp_threads_init(); 558 559 pr_info("Bringing up secondary CPUs ...\n"); 560 561 /* FIXME: This should be done in userspace --RR */ 562 for_each_present_cpu(cpu) { 563 if (num_online_cpus() >= setup_max_cpus) 564 break; 565 if (!cpu_online(cpu)) 566 cpu_up(cpu); 567 } 568 569 num_nodes = num_online_nodes(); 570 num_cpus = num_online_cpus(); 571 pr_info("Brought up %d node%s, %d CPU%s\n", 572 num_nodes, (num_nodes > 1 ? "s" : ""), 573 num_cpus, (num_cpus > 1 ? "s" : "")); 574 575 /* Any cleanup work */ 576 smp_cpus_done(setup_max_cpus); 577 } 578 579 /* 580 * Call a function on all processors. May be used during early boot while 581 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead 582 * of local_irq_disable/enable(). 583 */ 584 int on_each_cpu(void (*func) (void *info), void *info, int wait) 585 { 586 unsigned long flags; 587 int ret = 0; 588 589 preempt_disable(); 590 ret = smp_call_function(func, info, wait); 591 local_irq_save(flags); 592 func(info); 593 local_irq_restore(flags); 594 preempt_enable(); 595 return ret; 596 } 597 EXPORT_SYMBOL(on_each_cpu); 598 599 /** 600 * on_each_cpu_mask(): Run a function on processors specified by 601 * cpumask, which may include the local processor. 602 * @mask: The set of cpus to run on (only runs on online subset). 603 * @func: The function to run. This must be fast and non-blocking. 604 * @info: An arbitrary pointer to pass to the function. 605 * @wait: If true, wait (atomically) until function has completed 606 * on other CPUs. 607 * 608 * If @wait is true, then returns once @func has returned. 609 * 610 * You must not call this function with disabled interrupts or from a 611 * hardware interrupt handler or from a bottom half handler. The 612 * exception is that it may be used during early boot while 613 * early_boot_irqs_disabled is set. 614 */ 615 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, 616 void *info, bool wait) 617 { 618 int cpu = get_cpu(); 619 620 smp_call_function_many(mask, func, info, wait); 621 if (cpumask_test_cpu(cpu, mask)) { 622 unsigned long flags; 623 local_irq_save(flags); 624 func(info); 625 local_irq_restore(flags); 626 } 627 put_cpu(); 628 } 629 EXPORT_SYMBOL(on_each_cpu_mask); 630 631 /* 632 * on_each_cpu_cond(): Call a function on each processor for which 633 * the supplied function cond_func returns true, optionally waiting 634 * for all the required CPUs to finish. This may include the local 635 * processor. 636 * @cond_func: A callback function that is passed a cpu id and 637 * the the info parameter. The function is called 638 * with preemption disabled. The function should 639 * return a blooean value indicating whether to IPI 640 * the specified CPU. 641 * @func: The function to run on all applicable CPUs. 642 * This must be fast and non-blocking. 643 * @info: An arbitrary pointer to pass to both functions. 644 * @wait: If true, wait (atomically) until function has 645 * completed on other CPUs. 646 * @gfp_flags: GFP flags to use when allocating the cpumask 647 * used internally by the function. 648 * 649 * The function might sleep if the GFP flags indicates a non 650 * atomic allocation is allowed. 651 * 652 * Preemption is disabled to protect against CPUs going offline but not online. 653 * CPUs going online during the call will not be seen or sent an IPI. 654 * 655 * You must not call this function with disabled interrupts or 656 * from a hardware interrupt handler or from a bottom half handler. 657 */ 658 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), 659 smp_call_func_t func, void *info, bool wait, 660 gfp_t gfp_flags) 661 { 662 cpumask_var_t cpus; 663 int cpu, ret; 664 665 might_sleep_if(gfpflags_allow_blocking(gfp_flags)); 666 667 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { 668 preempt_disable(); 669 for_each_online_cpu(cpu) 670 if (cond_func(cpu, info)) 671 cpumask_set_cpu(cpu, cpus); 672 on_each_cpu_mask(cpus, func, info, wait); 673 preempt_enable(); 674 free_cpumask_var(cpus); 675 } else { 676 /* 677 * No free cpumask, bother. No matter, we'll 678 * just have to IPI them one by one. 679 */ 680 preempt_disable(); 681 for_each_online_cpu(cpu) 682 if (cond_func(cpu, info)) { 683 ret = smp_call_function_single(cpu, func, 684 info, wait); 685 WARN_ON_ONCE(ret); 686 } 687 preempt_enable(); 688 } 689 } 690 EXPORT_SYMBOL(on_each_cpu_cond); 691 692 static void do_nothing(void *unused) 693 { 694 } 695 696 /** 697 * kick_all_cpus_sync - Force all cpus out of idle 698 * 699 * Used to synchronize the update of pm_idle function pointer. It's 700 * called after the pointer is updated and returns after the dummy 701 * callback function has been executed on all cpus. The execution of 702 * the function can only happen on the remote cpus after they have 703 * left the idle function which had been called via pm_idle function 704 * pointer. So it's guaranteed that nothing uses the previous pointer 705 * anymore. 706 */ 707 void kick_all_cpus_sync(void) 708 { 709 /* Make sure the change is visible before we kick the cpus */ 710 smp_mb(); 711 smp_call_function(do_nothing, NULL, 1); 712 } 713 EXPORT_SYMBOL_GPL(kick_all_cpus_sync); 714 715 /** 716 * wake_up_all_idle_cpus - break all cpus out of idle 717 * wake_up_all_idle_cpus try to break all cpus which is in idle state even 718 * including idle polling cpus, for non-idle cpus, we will do nothing 719 * for them. 720 */ 721 void wake_up_all_idle_cpus(void) 722 { 723 int cpu; 724 725 preempt_disable(); 726 for_each_online_cpu(cpu) { 727 if (cpu == smp_processor_id()) 728 continue; 729 730 wake_up_if_idle(cpu); 731 } 732 preempt_enable(); 733 } 734 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); 735 736 /** 737 * smp_call_on_cpu - Call a function on a specific cpu 738 * 739 * Used to call a function on a specific cpu and wait for it to return. 740 * Optionally make sure the call is done on a specified physical cpu via vcpu 741 * pinning in order to support virtualized environments. 742 */ 743 struct smp_call_on_cpu_struct { 744 struct work_struct work; 745 struct completion done; 746 int (*func)(void *); 747 void *data; 748 int ret; 749 int cpu; 750 }; 751 752 static void smp_call_on_cpu_callback(struct work_struct *work) 753 { 754 struct smp_call_on_cpu_struct *sscs; 755 756 sscs = container_of(work, struct smp_call_on_cpu_struct, work); 757 if (sscs->cpu >= 0) 758 hypervisor_pin_vcpu(sscs->cpu); 759 sscs->ret = sscs->func(sscs->data); 760 if (sscs->cpu >= 0) 761 hypervisor_pin_vcpu(-1); 762 763 complete(&sscs->done); 764 } 765 766 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) 767 { 768 struct smp_call_on_cpu_struct sscs = { 769 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), 770 .func = func, 771 .data = par, 772 .cpu = phys ? cpu : -1, 773 }; 774 775 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); 776 777 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) 778 return -ENXIO; 779 780 queue_work_on(cpu, system_wq, &sscs.work); 781 wait_for_completion(&sscs.done); 782 783 return sscs.ret; 784 } 785 EXPORT_SYMBOL_GPL(smp_call_on_cpu); 786