1 /* 2 * Generic helpers for smp ipi calls 3 * 4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/irq_work.h> 10 #include <linux/rcupdate.h> 11 #include <linux/rculist.h> 12 #include <linux/kernel.h> 13 #include <linux/export.h> 14 #include <linux/percpu.h> 15 #include <linux/init.h> 16 #include <linux/gfp.h> 17 #include <linux/smp.h> 18 #include <linux/cpu.h> 19 #include <linux/sched.h> 20 #include <linux/sched/idle.h> 21 #include <linux/hypervisor.h> 22 23 #include "smpboot.h" 24 25 enum { 26 CSD_FLAG_LOCK = 0x01, 27 CSD_FLAG_SYNCHRONOUS = 0x02, 28 }; 29 30 struct call_function_data { 31 struct call_single_data __percpu *csd; 32 cpumask_var_t cpumask; 33 cpumask_var_t cpumask_ipi; 34 }; 35 36 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); 37 38 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); 39 40 static void flush_smp_call_function_queue(bool warn_cpu_offline); 41 42 int smpcfd_prepare_cpu(unsigned int cpu) 43 { 44 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); 45 46 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 47 cpu_to_node(cpu))) 48 return -ENOMEM; 49 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, 50 cpu_to_node(cpu))) { 51 free_cpumask_var(cfd->cpumask); 52 return -ENOMEM; 53 } 54 cfd->csd = alloc_percpu(struct call_single_data); 55 if (!cfd->csd) { 56 free_cpumask_var(cfd->cpumask); 57 free_cpumask_var(cfd->cpumask_ipi); 58 return -ENOMEM; 59 } 60 61 return 0; 62 } 63 64 int smpcfd_dead_cpu(unsigned int cpu) 65 { 66 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); 67 68 free_cpumask_var(cfd->cpumask); 69 free_cpumask_var(cfd->cpumask_ipi); 70 free_percpu(cfd->csd); 71 return 0; 72 } 73 74 int smpcfd_dying_cpu(unsigned int cpu) 75 { 76 /* 77 * The IPIs for the smp-call-function callbacks queued by other 78 * CPUs might arrive late, either due to hardware latencies or 79 * because this CPU disabled interrupts (inside stop-machine) 80 * before the IPIs were sent. So flush out any pending callbacks 81 * explicitly (without waiting for the IPIs to arrive), to 82 * ensure that the outgoing CPU doesn't go offline with work 83 * still pending. 84 */ 85 flush_smp_call_function_queue(false); 86 return 0; 87 } 88 89 void __init call_function_init(void) 90 { 91 int i; 92 93 for_each_possible_cpu(i) 94 init_llist_head(&per_cpu(call_single_queue, i)); 95 96 smpcfd_prepare_cpu(smp_processor_id()); 97 } 98 99 /* 100 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources 101 * 102 * For non-synchronous ipi calls the csd can still be in use by the 103 * previous function call. For multi-cpu calls its even more interesting 104 * as we'll have to ensure no other cpu is observing our csd. 105 */ 106 static __always_inline void csd_lock_wait(struct call_single_data *csd) 107 { 108 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); 109 } 110 111 static __always_inline void csd_lock(struct call_single_data *csd) 112 { 113 csd_lock_wait(csd); 114 csd->flags |= CSD_FLAG_LOCK; 115 116 /* 117 * prevent CPU from reordering the above assignment 118 * to ->flags with any subsequent assignments to other 119 * fields of the specified call_single_data structure: 120 */ 121 smp_wmb(); 122 } 123 124 static __always_inline void csd_unlock(struct call_single_data *csd) 125 { 126 WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); 127 128 /* 129 * ensure we're all done before releasing data: 130 */ 131 smp_store_release(&csd->flags, 0); 132 } 133 134 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); 135 136 /* 137 * Insert a previously allocated call_single_data element 138 * for execution on the given CPU. data must already have 139 * ->func, ->info, and ->flags set. 140 */ 141 static int generic_exec_single(int cpu, struct call_single_data *csd, 142 smp_call_func_t func, void *info) 143 { 144 if (cpu == smp_processor_id()) { 145 unsigned long flags; 146 147 /* 148 * We can unlock early even for the synchronous on-stack case, 149 * since we're doing this from the same CPU.. 150 */ 151 csd_unlock(csd); 152 local_irq_save(flags); 153 func(info); 154 local_irq_restore(flags); 155 return 0; 156 } 157 158 159 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { 160 csd_unlock(csd); 161 return -ENXIO; 162 } 163 164 csd->func = func; 165 csd->info = info; 166 167 /* 168 * The list addition should be visible before sending the IPI 169 * handler locks the list to pull the entry off it because of 170 * normal cache coherency rules implied by spinlocks. 171 * 172 * If IPIs can go out of order to the cache coherency protocol 173 * in an architecture, sufficient synchronisation should be added 174 * to arch code to make it appear to obey cache coherency WRT 175 * locking and barrier primitives. Generic code isn't really 176 * equipped to do the right thing... 177 */ 178 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) 179 arch_send_call_function_single_ipi(cpu); 180 181 return 0; 182 } 183 184 /** 185 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks 186 * 187 * Invoked by arch to handle an IPI for call function single. 188 * Must be called with interrupts disabled. 189 */ 190 void generic_smp_call_function_single_interrupt(void) 191 { 192 flush_smp_call_function_queue(true); 193 } 194 195 /** 196 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks 197 * 198 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an 199 * offline CPU. Skip this check if set to 'false'. 200 * 201 * Flush any pending smp-call-function callbacks queued on this CPU. This is 202 * invoked by the generic IPI handler, as well as by a CPU about to go offline, 203 * to ensure that all pending IPI callbacks are run before it goes completely 204 * offline. 205 * 206 * Loop through the call_single_queue and run all the queued callbacks. 207 * Must be called with interrupts disabled. 208 */ 209 static void flush_smp_call_function_queue(bool warn_cpu_offline) 210 { 211 struct llist_head *head; 212 struct llist_node *entry; 213 struct call_single_data *csd, *csd_next; 214 static bool warned; 215 216 WARN_ON(!irqs_disabled()); 217 218 head = this_cpu_ptr(&call_single_queue); 219 entry = llist_del_all(head); 220 entry = llist_reverse_order(entry); 221 222 /* There shouldn't be any pending callbacks on an offline CPU. */ 223 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && 224 !warned && !llist_empty(head))) { 225 warned = true; 226 WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); 227 228 /* 229 * We don't have to use the _safe() variant here 230 * because we are not invoking the IPI handlers yet. 231 */ 232 llist_for_each_entry(csd, entry, llist) 233 pr_warn("IPI callback %pS sent to offline CPU\n", 234 csd->func); 235 } 236 237 llist_for_each_entry_safe(csd, csd_next, entry, llist) { 238 smp_call_func_t func = csd->func; 239 void *info = csd->info; 240 241 /* Do we wait until *after* callback? */ 242 if (csd->flags & CSD_FLAG_SYNCHRONOUS) { 243 func(info); 244 csd_unlock(csd); 245 } else { 246 csd_unlock(csd); 247 func(info); 248 } 249 } 250 251 /* 252 * Handle irq works queued remotely by irq_work_queue_on(). 253 * Smp functions above are typically synchronous so they 254 * better run first since some other CPUs may be busy waiting 255 * for them. 256 */ 257 irq_work_run(); 258 } 259 260 /* 261 * smp_call_function_single - Run a function on a specific CPU 262 * @func: The function to run. This must be fast and non-blocking. 263 * @info: An arbitrary pointer to pass to the function. 264 * @wait: If true, wait until function has completed on other CPUs. 265 * 266 * Returns 0 on success, else a negative status code. 267 */ 268 int smp_call_function_single(int cpu, smp_call_func_t func, void *info, 269 int wait) 270 { 271 struct call_single_data *csd; 272 struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS }; 273 int this_cpu; 274 int err; 275 276 /* 277 * prevent preemption and reschedule on another processor, 278 * as well as CPU removal 279 */ 280 this_cpu = get_cpu(); 281 282 /* 283 * Can deadlock when called with interrupts disabled. 284 * We allow cpu's that are not yet online though, as no one else can 285 * send smp call function interrupt to this cpu and as such deadlocks 286 * can't happen. 287 */ 288 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 289 && !oops_in_progress); 290 291 csd = &csd_stack; 292 if (!wait) { 293 csd = this_cpu_ptr(&csd_data); 294 csd_lock(csd); 295 } 296 297 err = generic_exec_single(cpu, csd, func, info); 298 299 if (wait) 300 csd_lock_wait(csd); 301 302 put_cpu(); 303 304 return err; 305 } 306 EXPORT_SYMBOL(smp_call_function_single); 307 308 /** 309 * smp_call_function_single_async(): Run an asynchronous function on a 310 * specific CPU. 311 * @cpu: The CPU to run on. 312 * @csd: Pre-allocated and setup data structure 313 * 314 * Like smp_call_function_single(), but the call is asynchonous and 315 * can thus be done from contexts with disabled interrupts. 316 * 317 * The caller passes his own pre-allocated data structure 318 * (ie: embedded in an object) and is responsible for synchronizing it 319 * such that the IPIs performed on the @csd are strictly serialized. 320 * 321 * NOTE: Be careful, there is unfortunately no current debugging facility to 322 * validate the correctness of this serialization. 323 */ 324 int smp_call_function_single_async(int cpu, struct call_single_data *csd) 325 { 326 int err = 0; 327 328 preempt_disable(); 329 330 /* We could deadlock if we have to wait here with interrupts disabled! */ 331 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) 332 csd_lock_wait(csd); 333 334 csd->flags = CSD_FLAG_LOCK; 335 smp_wmb(); 336 337 err = generic_exec_single(cpu, csd, csd->func, csd->info); 338 preempt_enable(); 339 340 return err; 341 } 342 EXPORT_SYMBOL_GPL(smp_call_function_single_async); 343 344 /* 345 * smp_call_function_any - Run a function on any of the given cpus 346 * @mask: The mask of cpus it can run on. 347 * @func: The function to run. This must be fast and non-blocking. 348 * @info: An arbitrary pointer to pass to the function. 349 * @wait: If true, wait until function has completed. 350 * 351 * Returns 0 on success, else a negative status code (if no cpus were online). 352 * 353 * Selection preference: 354 * 1) current cpu if in @mask 355 * 2) any cpu of current node if in @mask 356 * 3) any other online cpu in @mask 357 */ 358 int smp_call_function_any(const struct cpumask *mask, 359 smp_call_func_t func, void *info, int wait) 360 { 361 unsigned int cpu; 362 const struct cpumask *nodemask; 363 int ret; 364 365 /* Try for same CPU (cheapest) */ 366 cpu = get_cpu(); 367 if (cpumask_test_cpu(cpu, mask)) 368 goto call; 369 370 /* Try for same node. */ 371 nodemask = cpumask_of_node(cpu_to_node(cpu)); 372 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; 373 cpu = cpumask_next_and(cpu, nodemask, mask)) { 374 if (cpu_online(cpu)) 375 goto call; 376 } 377 378 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ 379 cpu = cpumask_any_and(mask, cpu_online_mask); 380 call: 381 ret = smp_call_function_single(cpu, func, info, wait); 382 put_cpu(); 383 return ret; 384 } 385 EXPORT_SYMBOL_GPL(smp_call_function_any); 386 387 /** 388 * smp_call_function_many(): Run a function on a set of other CPUs. 389 * @mask: The set of cpus to run on (only runs on online subset). 390 * @func: The function to run. This must be fast and non-blocking. 391 * @info: An arbitrary pointer to pass to the function. 392 * @wait: If true, wait (atomically) until function has completed 393 * on other CPUs. 394 * 395 * If @wait is true, then returns once @func has returned. 396 * 397 * You must not call this function with disabled interrupts or from a 398 * hardware interrupt handler or from a bottom half handler. Preemption 399 * must be disabled when calling this function. 400 */ 401 void smp_call_function_many(const struct cpumask *mask, 402 smp_call_func_t func, void *info, bool wait) 403 { 404 struct call_function_data *cfd; 405 int cpu, next_cpu, this_cpu = smp_processor_id(); 406 407 /* 408 * Can deadlock when called with interrupts disabled. 409 * We allow cpu's that are not yet online though, as no one else can 410 * send smp call function interrupt to this cpu and as such deadlocks 411 * can't happen. 412 */ 413 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 414 && !oops_in_progress && !early_boot_irqs_disabled); 415 416 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ 417 cpu = cpumask_first_and(mask, cpu_online_mask); 418 if (cpu == this_cpu) 419 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 420 421 /* No online cpus? We're done. */ 422 if (cpu >= nr_cpu_ids) 423 return; 424 425 /* Do we have another CPU which isn't us? */ 426 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 427 if (next_cpu == this_cpu) 428 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); 429 430 /* Fastpath: do that cpu by itself. */ 431 if (next_cpu >= nr_cpu_ids) { 432 smp_call_function_single(cpu, func, info, wait); 433 return; 434 } 435 436 cfd = this_cpu_ptr(&cfd_data); 437 438 cpumask_and(cfd->cpumask, mask, cpu_online_mask); 439 __cpumask_clear_cpu(this_cpu, cfd->cpumask); 440 441 /* Some callers race with other cpus changing the passed mask */ 442 if (unlikely(!cpumask_weight(cfd->cpumask))) 443 return; 444 445 cpumask_clear(cfd->cpumask_ipi); 446 for_each_cpu(cpu, cfd->cpumask) { 447 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); 448 449 csd_lock(csd); 450 if (wait) 451 csd->flags |= CSD_FLAG_SYNCHRONOUS; 452 csd->func = func; 453 csd->info = info; 454 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) 455 __cpumask_set_cpu(cpu, cfd->cpumask_ipi); 456 } 457 458 /* Send a message to all CPUs in the map */ 459 arch_send_call_function_ipi_mask(cfd->cpumask_ipi); 460 461 if (wait) { 462 for_each_cpu(cpu, cfd->cpumask) { 463 struct call_single_data *csd; 464 465 csd = per_cpu_ptr(cfd->csd, cpu); 466 csd_lock_wait(csd); 467 } 468 } 469 } 470 EXPORT_SYMBOL(smp_call_function_many); 471 472 /** 473 * smp_call_function(): Run a function on all other CPUs. 474 * @func: The function to run. This must be fast and non-blocking. 475 * @info: An arbitrary pointer to pass to the function. 476 * @wait: If true, wait (atomically) until function has completed 477 * on other CPUs. 478 * 479 * Returns 0. 480 * 481 * If @wait is true, then returns once @func has returned; otherwise 482 * it returns just before the target cpu calls @func. 483 * 484 * You must not call this function with disabled interrupts or from a 485 * hardware interrupt handler or from a bottom half handler. 486 */ 487 int smp_call_function(smp_call_func_t func, void *info, int wait) 488 { 489 preempt_disable(); 490 smp_call_function_many(cpu_online_mask, func, info, wait); 491 preempt_enable(); 492 493 return 0; 494 } 495 EXPORT_SYMBOL(smp_call_function); 496 497 /* Setup configured maximum number of CPUs to activate */ 498 unsigned int setup_max_cpus = NR_CPUS; 499 EXPORT_SYMBOL(setup_max_cpus); 500 501 502 /* 503 * Setup routine for controlling SMP activation 504 * 505 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP 506 * activation entirely (the MPS table probe still happens, though). 507 * 508 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer 509 * greater than 0, limits the maximum number of CPUs activated in 510 * SMP mode to <NUM>. 511 */ 512 513 void __weak arch_disable_smp_support(void) { } 514 515 static int __init nosmp(char *str) 516 { 517 setup_max_cpus = 0; 518 arch_disable_smp_support(); 519 520 return 0; 521 } 522 523 early_param("nosmp", nosmp); 524 525 /* this is hard limit */ 526 static int __init nrcpus(char *str) 527 { 528 int nr_cpus; 529 530 get_option(&str, &nr_cpus); 531 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) 532 nr_cpu_ids = nr_cpus; 533 534 return 0; 535 } 536 537 early_param("nr_cpus", nrcpus); 538 539 static int __init maxcpus(char *str) 540 { 541 get_option(&str, &setup_max_cpus); 542 if (setup_max_cpus == 0) 543 arch_disable_smp_support(); 544 545 return 0; 546 } 547 548 early_param("maxcpus", maxcpus); 549 550 /* Setup number of possible processor ids */ 551 int nr_cpu_ids __read_mostly = NR_CPUS; 552 EXPORT_SYMBOL(nr_cpu_ids); 553 554 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ 555 void __init setup_nr_cpu_ids(void) 556 { 557 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; 558 } 559 560 /* Called by boot processor to activate the rest. */ 561 void __init smp_init(void) 562 { 563 int num_nodes, num_cpus; 564 unsigned int cpu; 565 566 idle_threads_init(); 567 cpuhp_threads_init(); 568 569 pr_info("Bringing up secondary CPUs ...\n"); 570 571 /* FIXME: This should be done in userspace --RR */ 572 for_each_present_cpu(cpu) { 573 if (num_online_cpus() >= setup_max_cpus) 574 break; 575 if (!cpu_online(cpu)) 576 cpu_up(cpu); 577 } 578 579 num_nodes = num_online_nodes(); 580 num_cpus = num_online_cpus(); 581 pr_info("Brought up %d node%s, %d CPU%s\n", 582 num_nodes, (num_nodes > 1 ? "s" : ""), 583 num_cpus, (num_cpus > 1 ? "s" : "")); 584 585 /* Any cleanup work */ 586 smp_cpus_done(setup_max_cpus); 587 } 588 589 /* 590 * Call a function on all processors. May be used during early boot while 591 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead 592 * of local_irq_disable/enable(). 593 */ 594 int on_each_cpu(void (*func) (void *info), void *info, int wait) 595 { 596 unsigned long flags; 597 int ret = 0; 598 599 preempt_disable(); 600 ret = smp_call_function(func, info, wait); 601 local_irq_save(flags); 602 func(info); 603 local_irq_restore(flags); 604 preempt_enable(); 605 return ret; 606 } 607 EXPORT_SYMBOL(on_each_cpu); 608 609 /** 610 * on_each_cpu_mask(): Run a function on processors specified by 611 * cpumask, which may include the local processor. 612 * @mask: The set of cpus to run on (only runs on online subset). 613 * @func: The function to run. This must be fast and non-blocking. 614 * @info: An arbitrary pointer to pass to the function. 615 * @wait: If true, wait (atomically) until function has completed 616 * on other CPUs. 617 * 618 * If @wait is true, then returns once @func has returned. 619 * 620 * You must not call this function with disabled interrupts or from a 621 * hardware interrupt handler or from a bottom half handler. The 622 * exception is that it may be used during early boot while 623 * early_boot_irqs_disabled is set. 624 */ 625 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, 626 void *info, bool wait) 627 { 628 int cpu = get_cpu(); 629 630 smp_call_function_many(mask, func, info, wait); 631 if (cpumask_test_cpu(cpu, mask)) { 632 unsigned long flags; 633 local_irq_save(flags); 634 func(info); 635 local_irq_restore(flags); 636 } 637 put_cpu(); 638 } 639 EXPORT_SYMBOL(on_each_cpu_mask); 640 641 /* 642 * on_each_cpu_cond(): Call a function on each processor for which 643 * the supplied function cond_func returns true, optionally waiting 644 * for all the required CPUs to finish. This may include the local 645 * processor. 646 * @cond_func: A callback function that is passed a cpu id and 647 * the the info parameter. The function is called 648 * with preemption disabled. The function should 649 * return a blooean value indicating whether to IPI 650 * the specified CPU. 651 * @func: The function to run on all applicable CPUs. 652 * This must be fast and non-blocking. 653 * @info: An arbitrary pointer to pass to both functions. 654 * @wait: If true, wait (atomically) until function has 655 * completed on other CPUs. 656 * @gfp_flags: GFP flags to use when allocating the cpumask 657 * used internally by the function. 658 * 659 * The function might sleep if the GFP flags indicates a non 660 * atomic allocation is allowed. 661 * 662 * Preemption is disabled to protect against CPUs going offline but not online. 663 * CPUs going online during the call will not be seen or sent an IPI. 664 * 665 * You must not call this function with disabled interrupts or 666 * from a hardware interrupt handler or from a bottom half handler. 667 */ 668 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), 669 smp_call_func_t func, void *info, bool wait, 670 gfp_t gfp_flags) 671 { 672 cpumask_var_t cpus; 673 int cpu, ret; 674 675 might_sleep_if(gfpflags_allow_blocking(gfp_flags)); 676 677 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { 678 preempt_disable(); 679 for_each_online_cpu(cpu) 680 if (cond_func(cpu, info)) 681 cpumask_set_cpu(cpu, cpus); 682 on_each_cpu_mask(cpus, func, info, wait); 683 preempt_enable(); 684 free_cpumask_var(cpus); 685 } else { 686 /* 687 * No free cpumask, bother. No matter, we'll 688 * just have to IPI them one by one. 689 */ 690 preempt_disable(); 691 for_each_online_cpu(cpu) 692 if (cond_func(cpu, info)) { 693 ret = smp_call_function_single(cpu, func, 694 info, wait); 695 WARN_ON_ONCE(ret); 696 } 697 preempt_enable(); 698 } 699 } 700 EXPORT_SYMBOL(on_each_cpu_cond); 701 702 static void do_nothing(void *unused) 703 { 704 } 705 706 /** 707 * kick_all_cpus_sync - Force all cpus out of idle 708 * 709 * Used to synchronize the update of pm_idle function pointer. It's 710 * called after the pointer is updated and returns after the dummy 711 * callback function has been executed on all cpus. The execution of 712 * the function can only happen on the remote cpus after they have 713 * left the idle function which had been called via pm_idle function 714 * pointer. So it's guaranteed that nothing uses the previous pointer 715 * anymore. 716 */ 717 void kick_all_cpus_sync(void) 718 { 719 /* Make sure the change is visible before we kick the cpus */ 720 smp_mb(); 721 smp_call_function(do_nothing, NULL, 1); 722 } 723 EXPORT_SYMBOL_GPL(kick_all_cpus_sync); 724 725 /** 726 * wake_up_all_idle_cpus - break all cpus out of idle 727 * wake_up_all_idle_cpus try to break all cpus which is in idle state even 728 * including idle polling cpus, for non-idle cpus, we will do nothing 729 * for them. 730 */ 731 void wake_up_all_idle_cpus(void) 732 { 733 int cpu; 734 735 preempt_disable(); 736 for_each_online_cpu(cpu) { 737 if (cpu == smp_processor_id()) 738 continue; 739 740 wake_up_if_idle(cpu); 741 } 742 preempt_enable(); 743 } 744 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); 745 746 /** 747 * smp_call_on_cpu - Call a function on a specific cpu 748 * 749 * Used to call a function on a specific cpu and wait for it to return. 750 * Optionally make sure the call is done on a specified physical cpu via vcpu 751 * pinning in order to support virtualized environments. 752 */ 753 struct smp_call_on_cpu_struct { 754 struct work_struct work; 755 struct completion done; 756 int (*func)(void *); 757 void *data; 758 int ret; 759 int cpu; 760 }; 761 762 static void smp_call_on_cpu_callback(struct work_struct *work) 763 { 764 struct smp_call_on_cpu_struct *sscs; 765 766 sscs = container_of(work, struct smp_call_on_cpu_struct, work); 767 if (sscs->cpu >= 0) 768 hypervisor_pin_vcpu(sscs->cpu); 769 sscs->ret = sscs->func(sscs->data); 770 if (sscs->cpu >= 0) 771 hypervisor_pin_vcpu(-1); 772 773 complete(&sscs->done); 774 } 775 776 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) 777 { 778 struct smp_call_on_cpu_struct sscs = { 779 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), 780 .func = func, 781 .data = par, 782 .cpu = phys ? cpu : -1, 783 }; 784 785 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); 786 787 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) 788 return -ENXIO; 789 790 queue_work_on(cpu, system_wq, &sscs.work); 791 wait_for_completion(&sscs.done); 792 793 return sscs.ret; 794 } 795 EXPORT_SYMBOL_GPL(smp_call_on_cpu); 796