1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic helpers for smp ipi calls 4 * 5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/irq_work.h> 11 #include <linux/rcupdate.h> 12 #include <linux/rculist.h> 13 #include <linux/kernel.h> 14 #include <linux/export.h> 15 #include <linux/percpu.h> 16 #include <linux/init.h> 17 #include <linux/gfp.h> 18 #include <linux/smp.h> 19 #include <linux/cpu.h> 20 #include <linux/sched.h> 21 #include <linux/sched/idle.h> 22 #include <linux/hypervisor.h> 23 24 #include "smpboot.h" 25 26 enum { 27 CSD_FLAG_LOCK = 0x01, 28 CSD_FLAG_SYNCHRONOUS = 0x02, 29 }; 30 31 struct call_function_data { 32 call_single_data_t __percpu *csd; 33 cpumask_var_t cpumask; 34 cpumask_var_t cpumask_ipi; 35 }; 36 37 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); 38 39 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); 40 41 static void flush_smp_call_function_queue(bool warn_cpu_offline); 42 43 int smpcfd_prepare_cpu(unsigned int cpu) 44 { 45 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); 46 47 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 48 cpu_to_node(cpu))) 49 return -ENOMEM; 50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, 51 cpu_to_node(cpu))) { 52 free_cpumask_var(cfd->cpumask); 53 return -ENOMEM; 54 } 55 cfd->csd = alloc_percpu(call_single_data_t); 56 if (!cfd->csd) { 57 free_cpumask_var(cfd->cpumask); 58 free_cpumask_var(cfd->cpumask_ipi); 59 return -ENOMEM; 60 } 61 62 return 0; 63 } 64 65 int smpcfd_dead_cpu(unsigned int cpu) 66 { 67 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); 68 69 free_cpumask_var(cfd->cpumask); 70 free_cpumask_var(cfd->cpumask_ipi); 71 free_percpu(cfd->csd); 72 return 0; 73 } 74 75 int smpcfd_dying_cpu(unsigned int cpu) 76 { 77 /* 78 * The IPIs for the smp-call-function callbacks queued by other 79 * CPUs might arrive late, either due to hardware latencies or 80 * because this CPU disabled interrupts (inside stop-machine) 81 * before the IPIs were sent. So flush out any pending callbacks 82 * explicitly (without waiting for the IPIs to arrive), to 83 * ensure that the outgoing CPU doesn't go offline with work 84 * still pending. 85 */ 86 flush_smp_call_function_queue(false); 87 return 0; 88 } 89 90 void __init call_function_init(void) 91 { 92 int i; 93 94 for_each_possible_cpu(i) 95 init_llist_head(&per_cpu(call_single_queue, i)); 96 97 smpcfd_prepare_cpu(smp_processor_id()); 98 } 99 100 /* 101 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources 102 * 103 * For non-synchronous ipi calls the csd can still be in use by the 104 * previous function call. For multi-cpu calls its even more interesting 105 * as we'll have to ensure no other cpu is observing our csd. 106 */ 107 static __always_inline void csd_lock_wait(call_single_data_t *csd) 108 { 109 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); 110 } 111 112 static __always_inline void csd_lock(call_single_data_t *csd) 113 { 114 csd_lock_wait(csd); 115 csd->flags |= CSD_FLAG_LOCK; 116 117 /* 118 * prevent CPU from reordering the above assignment 119 * to ->flags with any subsequent assignments to other 120 * fields of the specified call_single_data_t structure: 121 */ 122 smp_wmb(); 123 } 124 125 static __always_inline void csd_unlock(call_single_data_t *csd) 126 { 127 WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); 128 129 /* 130 * ensure we're all done before releasing data: 131 */ 132 smp_store_release(&csd->flags, 0); 133 } 134 135 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); 136 137 /* 138 * Insert a previously allocated call_single_data_t element 139 * for execution on the given CPU. data must already have 140 * ->func, ->info, and ->flags set. 141 */ 142 static int generic_exec_single(int cpu, call_single_data_t *csd, 143 smp_call_func_t func, void *info) 144 { 145 if (cpu == smp_processor_id()) { 146 unsigned long flags; 147 148 /* 149 * We can unlock early even for the synchronous on-stack case, 150 * since we're doing this from the same CPU.. 151 */ 152 csd_unlock(csd); 153 local_irq_save(flags); 154 func(info); 155 local_irq_restore(flags); 156 return 0; 157 } 158 159 160 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { 161 csd_unlock(csd); 162 return -ENXIO; 163 } 164 165 csd->func = func; 166 csd->info = info; 167 168 /* 169 * The list addition should be visible before sending the IPI 170 * handler locks the list to pull the entry off it because of 171 * normal cache coherency rules implied by spinlocks. 172 * 173 * If IPIs can go out of order to the cache coherency protocol 174 * in an architecture, sufficient synchronisation should be added 175 * to arch code to make it appear to obey cache coherency WRT 176 * locking and barrier primitives. Generic code isn't really 177 * equipped to do the right thing... 178 */ 179 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) 180 arch_send_call_function_single_ipi(cpu); 181 182 return 0; 183 } 184 185 /** 186 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks 187 * 188 * Invoked by arch to handle an IPI for call function single. 189 * Must be called with interrupts disabled. 190 */ 191 void generic_smp_call_function_single_interrupt(void) 192 { 193 flush_smp_call_function_queue(true); 194 } 195 196 /** 197 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks 198 * 199 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an 200 * offline CPU. Skip this check if set to 'false'. 201 * 202 * Flush any pending smp-call-function callbacks queued on this CPU. This is 203 * invoked by the generic IPI handler, as well as by a CPU about to go offline, 204 * to ensure that all pending IPI callbacks are run before it goes completely 205 * offline. 206 * 207 * Loop through the call_single_queue and run all the queued callbacks. 208 * Must be called with interrupts disabled. 209 */ 210 static void flush_smp_call_function_queue(bool warn_cpu_offline) 211 { 212 struct llist_head *head; 213 struct llist_node *entry; 214 call_single_data_t *csd, *csd_next; 215 static bool warned; 216 217 lockdep_assert_irqs_disabled(); 218 219 head = this_cpu_ptr(&call_single_queue); 220 entry = llist_del_all(head); 221 entry = llist_reverse_order(entry); 222 223 /* There shouldn't be any pending callbacks on an offline CPU. */ 224 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && 225 !warned && !llist_empty(head))) { 226 warned = true; 227 WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); 228 229 /* 230 * We don't have to use the _safe() variant here 231 * because we are not invoking the IPI handlers yet. 232 */ 233 llist_for_each_entry(csd, entry, llist) 234 pr_warn("IPI callback %pS sent to offline CPU\n", 235 csd->func); 236 } 237 238 llist_for_each_entry_safe(csd, csd_next, entry, llist) { 239 smp_call_func_t func = csd->func; 240 void *info = csd->info; 241 242 /* Do we wait until *after* callback? */ 243 if (csd->flags & CSD_FLAG_SYNCHRONOUS) { 244 func(info); 245 csd_unlock(csd); 246 } else { 247 csd_unlock(csd); 248 func(info); 249 } 250 } 251 252 /* 253 * Handle irq works queued remotely by irq_work_queue_on(). 254 * Smp functions above are typically synchronous so they 255 * better run first since some other CPUs may be busy waiting 256 * for them. 257 */ 258 irq_work_run(); 259 } 260 261 /* 262 * smp_call_function_single - Run a function on a specific CPU 263 * @func: The function to run. This must be fast and non-blocking. 264 * @info: An arbitrary pointer to pass to the function. 265 * @wait: If true, wait until function has completed on other CPUs. 266 * 267 * Returns 0 on success, else a negative status code. 268 */ 269 int smp_call_function_single(int cpu, smp_call_func_t func, void *info, 270 int wait) 271 { 272 call_single_data_t *csd; 273 call_single_data_t csd_stack = { 274 .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS, 275 }; 276 int this_cpu; 277 int err; 278 279 /* 280 * prevent preemption and reschedule on another processor, 281 * as well as CPU removal 282 */ 283 this_cpu = get_cpu(); 284 285 /* 286 * Can deadlock when called with interrupts disabled. 287 * We allow cpu's that are not yet online though, as no one else can 288 * send smp call function interrupt to this cpu and as such deadlocks 289 * can't happen. 290 */ 291 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 292 && !oops_in_progress); 293 294 csd = &csd_stack; 295 if (!wait) { 296 csd = this_cpu_ptr(&csd_data); 297 csd_lock(csd); 298 } 299 300 err = generic_exec_single(cpu, csd, func, info); 301 302 if (wait) 303 csd_lock_wait(csd); 304 305 put_cpu(); 306 307 return err; 308 } 309 EXPORT_SYMBOL(smp_call_function_single); 310 311 /** 312 * smp_call_function_single_async(): Run an asynchronous function on a 313 * specific CPU. 314 * @cpu: The CPU to run on. 315 * @csd: Pre-allocated and setup data structure 316 * 317 * Like smp_call_function_single(), but the call is asynchonous and 318 * can thus be done from contexts with disabled interrupts. 319 * 320 * The caller passes his own pre-allocated data structure 321 * (ie: embedded in an object) and is responsible for synchronizing it 322 * such that the IPIs performed on the @csd are strictly serialized. 323 * 324 * NOTE: Be careful, there is unfortunately no current debugging facility to 325 * validate the correctness of this serialization. 326 */ 327 int smp_call_function_single_async(int cpu, call_single_data_t *csd) 328 { 329 int err = 0; 330 331 preempt_disable(); 332 333 /* We could deadlock if we have to wait here with interrupts disabled! */ 334 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) 335 csd_lock_wait(csd); 336 337 csd->flags = CSD_FLAG_LOCK; 338 smp_wmb(); 339 340 err = generic_exec_single(cpu, csd, csd->func, csd->info); 341 preempt_enable(); 342 343 return err; 344 } 345 EXPORT_SYMBOL_GPL(smp_call_function_single_async); 346 347 /* 348 * smp_call_function_any - Run a function on any of the given cpus 349 * @mask: The mask of cpus it can run on. 350 * @func: The function to run. This must be fast and non-blocking. 351 * @info: An arbitrary pointer to pass to the function. 352 * @wait: If true, wait until function has completed. 353 * 354 * Returns 0 on success, else a negative status code (if no cpus were online). 355 * 356 * Selection preference: 357 * 1) current cpu if in @mask 358 * 2) any cpu of current node if in @mask 359 * 3) any other online cpu in @mask 360 */ 361 int smp_call_function_any(const struct cpumask *mask, 362 smp_call_func_t func, void *info, int wait) 363 { 364 unsigned int cpu; 365 const struct cpumask *nodemask; 366 int ret; 367 368 /* Try for same CPU (cheapest) */ 369 cpu = get_cpu(); 370 if (cpumask_test_cpu(cpu, mask)) 371 goto call; 372 373 /* Try for same node. */ 374 nodemask = cpumask_of_node(cpu_to_node(cpu)); 375 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; 376 cpu = cpumask_next_and(cpu, nodemask, mask)) { 377 if (cpu_online(cpu)) 378 goto call; 379 } 380 381 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ 382 cpu = cpumask_any_and(mask, cpu_online_mask); 383 call: 384 ret = smp_call_function_single(cpu, func, info, wait); 385 put_cpu(); 386 return ret; 387 } 388 EXPORT_SYMBOL_GPL(smp_call_function_any); 389 390 /** 391 * smp_call_function_many(): Run a function on a set of other CPUs. 392 * @mask: The set of cpus to run on (only runs on online subset). 393 * @func: The function to run. This must be fast and non-blocking. 394 * @info: An arbitrary pointer to pass to the function. 395 * @wait: If true, wait (atomically) until function has completed 396 * on other CPUs. 397 * 398 * If @wait is true, then returns once @func has returned. 399 * 400 * You must not call this function with disabled interrupts or from a 401 * hardware interrupt handler or from a bottom half handler. Preemption 402 * must be disabled when calling this function. 403 */ 404 void smp_call_function_many(const struct cpumask *mask, 405 smp_call_func_t func, void *info, bool wait) 406 { 407 struct call_function_data *cfd; 408 int cpu, next_cpu, this_cpu = smp_processor_id(); 409 410 /* 411 * Can deadlock when called with interrupts disabled. 412 * We allow cpu's that are not yet online though, as no one else can 413 * send smp call function interrupt to this cpu and as such deadlocks 414 * can't happen. 415 */ 416 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 417 && !oops_in_progress && !early_boot_irqs_disabled); 418 419 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ 420 cpu = cpumask_first_and(mask, cpu_online_mask); 421 if (cpu == this_cpu) 422 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 423 424 /* No online cpus? We're done. */ 425 if (cpu >= nr_cpu_ids) 426 return; 427 428 /* Do we have another CPU which isn't us? */ 429 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 430 if (next_cpu == this_cpu) 431 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); 432 433 /* Fastpath: do that cpu by itself. */ 434 if (next_cpu >= nr_cpu_ids) { 435 smp_call_function_single(cpu, func, info, wait); 436 return; 437 } 438 439 cfd = this_cpu_ptr(&cfd_data); 440 441 cpumask_and(cfd->cpumask, mask, cpu_online_mask); 442 __cpumask_clear_cpu(this_cpu, cfd->cpumask); 443 444 /* Some callers race with other cpus changing the passed mask */ 445 if (unlikely(!cpumask_weight(cfd->cpumask))) 446 return; 447 448 cpumask_clear(cfd->cpumask_ipi); 449 for_each_cpu(cpu, cfd->cpumask) { 450 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); 451 452 csd_lock(csd); 453 if (wait) 454 csd->flags |= CSD_FLAG_SYNCHRONOUS; 455 csd->func = func; 456 csd->info = info; 457 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) 458 __cpumask_set_cpu(cpu, cfd->cpumask_ipi); 459 } 460 461 /* Send a message to all CPUs in the map */ 462 arch_send_call_function_ipi_mask(cfd->cpumask_ipi); 463 464 if (wait) { 465 for_each_cpu(cpu, cfd->cpumask) { 466 call_single_data_t *csd; 467 468 csd = per_cpu_ptr(cfd->csd, cpu); 469 csd_lock_wait(csd); 470 } 471 } 472 } 473 EXPORT_SYMBOL(smp_call_function_many); 474 475 /** 476 * smp_call_function(): Run a function on all other CPUs. 477 * @func: The function to run. This must be fast and non-blocking. 478 * @info: An arbitrary pointer to pass to the function. 479 * @wait: If true, wait (atomically) until function has completed 480 * on other CPUs. 481 * 482 * Returns 0. 483 * 484 * If @wait is true, then returns once @func has returned; otherwise 485 * it returns just before the target cpu calls @func. 486 * 487 * You must not call this function with disabled interrupts or from a 488 * hardware interrupt handler or from a bottom half handler. 489 */ 490 int smp_call_function(smp_call_func_t func, void *info, int wait) 491 { 492 preempt_disable(); 493 smp_call_function_many(cpu_online_mask, func, info, wait); 494 preempt_enable(); 495 496 return 0; 497 } 498 EXPORT_SYMBOL(smp_call_function); 499 500 /* Setup configured maximum number of CPUs to activate */ 501 unsigned int setup_max_cpus = NR_CPUS; 502 EXPORT_SYMBOL(setup_max_cpus); 503 504 505 /* 506 * Setup routine for controlling SMP activation 507 * 508 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP 509 * activation entirely (the MPS table probe still happens, though). 510 * 511 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer 512 * greater than 0, limits the maximum number of CPUs activated in 513 * SMP mode to <NUM>. 514 */ 515 516 void __weak arch_disable_smp_support(void) { } 517 518 static int __init nosmp(char *str) 519 { 520 setup_max_cpus = 0; 521 arch_disable_smp_support(); 522 523 return 0; 524 } 525 526 early_param("nosmp", nosmp); 527 528 /* this is hard limit */ 529 static int __init nrcpus(char *str) 530 { 531 int nr_cpus; 532 533 get_option(&str, &nr_cpus); 534 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) 535 nr_cpu_ids = nr_cpus; 536 537 return 0; 538 } 539 540 early_param("nr_cpus", nrcpus); 541 542 static int __init maxcpus(char *str) 543 { 544 get_option(&str, &setup_max_cpus); 545 if (setup_max_cpus == 0) 546 arch_disable_smp_support(); 547 548 return 0; 549 } 550 551 early_param("maxcpus", maxcpus); 552 553 /* Setup number of possible processor ids */ 554 unsigned int nr_cpu_ids __read_mostly = NR_CPUS; 555 EXPORT_SYMBOL(nr_cpu_ids); 556 557 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ 558 void __init setup_nr_cpu_ids(void) 559 { 560 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; 561 } 562 563 /* Called by boot processor to activate the rest. */ 564 void __init smp_init(void) 565 { 566 int num_nodes, num_cpus; 567 unsigned int cpu; 568 569 idle_threads_init(); 570 cpuhp_threads_init(); 571 572 pr_info("Bringing up secondary CPUs ...\n"); 573 574 /* FIXME: This should be done in userspace --RR */ 575 for_each_present_cpu(cpu) { 576 if (num_online_cpus() >= setup_max_cpus) 577 break; 578 if (!cpu_online(cpu)) 579 cpu_up(cpu); 580 } 581 582 num_nodes = num_online_nodes(); 583 num_cpus = num_online_cpus(); 584 pr_info("Brought up %d node%s, %d CPU%s\n", 585 num_nodes, (num_nodes > 1 ? "s" : ""), 586 num_cpus, (num_cpus > 1 ? "s" : "")); 587 588 /* Any cleanup work */ 589 smp_cpus_done(setup_max_cpus); 590 } 591 592 /* 593 * Call a function on all processors. May be used during early boot while 594 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead 595 * of local_irq_disable/enable(). 596 */ 597 int on_each_cpu(void (*func) (void *info), void *info, int wait) 598 { 599 unsigned long flags; 600 int ret = 0; 601 602 preempt_disable(); 603 ret = smp_call_function(func, info, wait); 604 local_irq_save(flags); 605 func(info); 606 local_irq_restore(flags); 607 preempt_enable(); 608 return ret; 609 } 610 EXPORT_SYMBOL(on_each_cpu); 611 612 /** 613 * on_each_cpu_mask(): Run a function on processors specified by 614 * cpumask, which may include the local processor. 615 * @mask: The set of cpus to run on (only runs on online subset). 616 * @func: The function to run. This must be fast and non-blocking. 617 * @info: An arbitrary pointer to pass to the function. 618 * @wait: If true, wait (atomically) until function has completed 619 * on other CPUs. 620 * 621 * If @wait is true, then returns once @func has returned. 622 * 623 * You must not call this function with disabled interrupts or from a 624 * hardware interrupt handler or from a bottom half handler. The 625 * exception is that it may be used during early boot while 626 * early_boot_irqs_disabled is set. 627 */ 628 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, 629 void *info, bool wait) 630 { 631 int cpu = get_cpu(); 632 633 smp_call_function_many(mask, func, info, wait); 634 if (cpumask_test_cpu(cpu, mask)) { 635 unsigned long flags; 636 local_irq_save(flags); 637 func(info); 638 local_irq_restore(flags); 639 } 640 put_cpu(); 641 } 642 EXPORT_SYMBOL(on_each_cpu_mask); 643 644 /* 645 * on_each_cpu_cond(): Call a function on each processor for which 646 * the supplied function cond_func returns true, optionally waiting 647 * for all the required CPUs to finish. This may include the local 648 * processor. 649 * @cond_func: A callback function that is passed a cpu id and 650 * the the info parameter. The function is called 651 * with preemption disabled. The function should 652 * return a blooean value indicating whether to IPI 653 * the specified CPU. 654 * @func: The function to run on all applicable CPUs. 655 * This must be fast and non-blocking. 656 * @info: An arbitrary pointer to pass to both functions. 657 * @wait: If true, wait (atomically) until function has 658 * completed on other CPUs. 659 * @gfp_flags: GFP flags to use when allocating the cpumask 660 * used internally by the function. 661 * 662 * The function might sleep if the GFP flags indicates a non 663 * atomic allocation is allowed. 664 * 665 * Preemption is disabled to protect against CPUs going offline but not online. 666 * CPUs going online during the call will not be seen or sent an IPI. 667 * 668 * You must not call this function with disabled interrupts or 669 * from a hardware interrupt handler or from a bottom half handler. 670 */ 671 void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), 672 smp_call_func_t func, void *info, bool wait, 673 gfp_t gfp_flags, const struct cpumask *mask) 674 { 675 cpumask_var_t cpus; 676 int cpu, ret; 677 678 might_sleep_if(gfpflags_allow_blocking(gfp_flags)); 679 680 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { 681 preempt_disable(); 682 for_each_cpu(cpu, mask) 683 if (cond_func(cpu, info)) 684 __cpumask_set_cpu(cpu, cpus); 685 on_each_cpu_mask(cpus, func, info, wait); 686 preempt_enable(); 687 free_cpumask_var(cpus); 688 } else { 689 /* 690 * No free cpumask, bother. No matter, we'll 691 * just have to IPI them one by one. 692 */ 693 preempt_disable(); 694 for_each_cpu(cpu, mask) 695 if (cond_func(cpu, info)) { 696 ret = smp_call_function_single(cpu, func, 697 info, wait); 698 WARN_ON_ONCE(ret); 699 } 700 preempt_enable(); 701 } 702 } 703 EXPORT_SYMBOL(on_each_cpu_cond_mask); 704 705 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), 706 smp_call_func_t func, void *info, bool wait, 707 gfp_t gfp_flags) 708 { 709 on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, 710 cpu_online_mask); 711 } 712 EXPORT_SYMBOL(on_each_cpu_cond); 713 714 static void do_nothing(void *unused) 715 { 716 } 717 718 /** 719 * kick_all_cpus_sync - Force all cpus out of idle 720 * 721 * Used to synchronize the update of pm_idle function pointer. It's 722 * called after the pointer is updated and returns after the dummy 723 * callback function has been executed on all cpus. The execution of 724 * the function can only happen on the remote cpus after they have 725 * left the idle function which had been called via pm_idle function 726 * pointer. So it's guaranteed that nothing uses the previous pointer 727 * anymore. 728 */ 729 void kick_all_cpus_sync(void) 730 { 731 /* Make sure the change is visible before we kick the cpus */ 732 smp_mb(); 733 smp_call_function(do_nothing, NULL, 1); 734 } 735 EXPORT_SYMBOL_GPL(kick_all_cpus_sync); 736 737 /** 738 * wake_up_all_idle_cpus - break all cpus out of idle 739 * wake_up_all_idle_cpus try to break all cpus which is in idle state even 740 * including idle polling cpus, for non-idle cpus, we will do nothing 741 * for them. 742 */ 743 void wake_up_all_idle_cpus(void) 744 { 745 int cpu; 746 747 preempt_disable(); 748 for_each_online_cpu(cpu) { 749 if (cpu == smp_processor_id()) 750 continue; 751 752 wake_up_if_idle(cpu); 753 } 754 preempt_enable(); 755 } 756 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); 757 758 /** 759 * smp_call_on_cpu - Call a function on a specific cpu 760 * 761 * Used to call a function on a specific cpu and wait for it to return. 762 * Optionally make sure the call is done on a specified physical cpu via vcpu 763 * pinning in order to support virtualized environments. 764 */ 765 struct smp_call_on_cpu_struct { 766 struct work_struct work; 767 struct completion done; 768 int (*func)(void *); 769 void *data; 770 int ret; 771 int cpu; 772 }; 773 774 static void smp_call_on_cpu_callback(struct work_struct *work) 775 { 776 struct smp_call_on_cpu_struct *sscs; 777 778 sscs = container_of(work, struct smp_call_on_cpu_struct, work); 779 if (sscs->cpu >= 0) 780 hypervisor_pin_vcpu(sscs->cpu); 781 sscs->ret = sscs->func(sscs->data); 782 if (sscs->cpu >= 0) 783 hypervisor_pin_vcpu(-1); 784 785 complete(&sscs->done); 786 } 787 788 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) 789 { 790 struct smp_call_on_cpu_struct sscs = { 791 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), 792 .func = func, 793 .data = par, 794 .cpu = phys ? cpu : -1, 795 }; 796 797 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); 798 799 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) 800 return -ENXIO; 801 802 queue_work_on(cpu, system_wq, &sscs.work); 803 wait_for_completion(&sscs.done); 804 805 return sscs.ret; 806 } 807 EXPORT_SYMBOL_GPL(smp_call_on_cpu); 808