1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic helpers for smp ipi calls 4 * 5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/irq_work.h> 11 #include <linux/rcupdate.h> 12 #include <linux/rculist.h> 13 #include <linux/kernel.h> 14 #include <linux/export.h> 15 #include <linux/percpu.h> 16 #include <linux/init.h> 17 #include <linux/gfp.h> 18 #include <linux/smp.h> 19 #include <linux/cpu.h> 20 #include <linux/sched.h> 21 #include <linux/sched/idle.h> 22 #include <linux/hypervisor.h> 23 24 #include "smpboot.h" 25 26 enum { 27 CSD_FLAG_LOCK = 0x01, 28 CSD_FLAG_SYNCHRONOUS = 0x02, 29 }; 30 31 struct call_function_data { 32 call_single_data_t __percpu *csd; 33 cpumask_var_t cpumask; 34 cpumask_var_t cpumask_ipi; 35 }; 36 37 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data); 38 39 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); 40 41 static void flush_smp_call_function_queue(bool warn_cpu_offline); 42 43 int smpcfd_prepare_cpu(unsigned int cpu) 44 { 45 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); 46 47 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 48 cpu_to_node(cpu))) 49 return -ENOMEM; 50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, 51 cpu_to_node(cpu))) { 52 free_cpumask_var(cfd->cpumask); 53 return -ENOMEM; 54 } 55 cfd->csd = alloc_percpu(call_single_data_t); 56 if (!cfd->csd) { 57 free_cpumask_var(cfd->cpumask); 58 free_cpumask_var(cfd->cpumask_ipi); 59 return -ENOMEM; 60 } 61 62 return 0; 63 } 64 65 int smpcfd_dead_cpu(unsigned int cpu) 66 { 67 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); 68 69 free_cpumask_var(cfd->cpumask); 70 free_cpumask_var(cfd->cpumask_ipi); 71 free_percpu(cfd->csd); 72 return 0; 73 } 74 75 int smpcfd_dying_cpu(unsigned int cpu) 76 { 77 /* 78 * The IPIs for the smp-call-function callbacks queued by other 79 * CPUs might arrive late, either due to hardware latencies or 80 * because this CPU disabled interrupts (inside stop-machine) 81 * before the IPIs were sent. So flush out any pending callbacks 82 * explicitly (without waiting for the IPIs to arrive), to 83 * ensure that the outgoing CPU doesn't go offline with work 84 * still pending. 85 */ 86 flush_smp_call_function_queue(false); 87 return 0; 88 } 89 90 void __init call_function_init(void) 91 { 92 int i; 93 94 for_each_possible_cpu(i) 95 init_llist_head(&per_cpu(call_single_queue, i)); 96 97 smpcfd_prepare_cpu(smp_processor_id()); 98 } 99 100 /* 101 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources 102 * 103 * For non-synchronous ipi calls the csd can still be in use by the 104 * previous function call. For multi-cpu calls its even more interesting 105 * as we'll have to ensure no other cpu is observing our csd. 106 */ 107 static __always_inline void csd_lock_wait(call_single_data_t *csd) 108 { 109 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); 110 } 111 112 static __always_inline void csd_lock(call_single_data_t *csd) 113 { 114 csd_lock_wait(csd); 115 csd->flags |= CSD_FLAG_LOCK; 116 117 /* 118 * prevent CPU from reordering the above assignment 119 * to ->flags with any subsequent assignments to other 120 * fields of the specified call_single_data_t structure: 121 */ 122 smp_wmb(); 123 } 124 125 static __always_inline void csd_unlock(call_single_data_t *csd) 126 { 127 WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); 128 129 /* 130 * ensure we're all done before releasing data: 131 */ 132 smp_store_release(&csd->flags, 0); 133 } 134 135 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); 136 137 /* 138 * Insert a previously allocated call_single_data_t element 139 * for execution on the given CPU. data must already have 140 * ->func, ->info, and ->flags set. 141 */ 142 static int generic_exec_single(int cpu, call_single_data_t *csd, 143 smp_call_func_t func, void *info) 144 { 145 if (cpu == smp_processor_id()) { 146 unsigned long flags; 147 148 /* 149 * We can unlock early even for the synchronous on-stack case, 150 * since we're doing this from the same CPU.. 151 */ 152 csd_unlock(csd); 153 local_irq_save(flags); 154 func(info); 155 local_irq_restore(flags); 156 return 0; 157 } 158 159 160 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { 161 csd_unlock(csd); 162 return -ENXIO; 163 } 164 165 csd->func = func; 166 csd->info = info; 167 168 /* 169 * The list addition should be visible before sending the IPI 170 * handler locks the list to pull the entry off it because of 171 * normal cache coherency rules implied by spinlocks. 172 * 173 * If IPIs can go out of order to the cache coherency protocol 174 * in an architecture, sufficient synchronisation should be added 175 * to arch code to make it appear to obey cache coherency WRT 176 * locking and barrier primitives. Generic code isn't really 177 * equipped to do the right thing... 178 */ 179 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) 180 arch_send_call_function_single_ipi(cpu); 181 182 return 0; 183 } 184 185 /** 186 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks 187 * 188 * Invoked by arch to handle an IPI for call function single. 189 * Must be called with interrupts disabled. 190 */ 191 void generic_smp_call_function_single_interrupt(void) 192 { 193 flush_smp_call_function_queue(true); 194 } 195 196 /** 197 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks 198 * 199 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an 200 * offline CPU. Skip this check if set to 'false'. 201 * 202 * Flush any pending smp-call-function callbacks queued on this CPU. This is 203 * invoked by the generic IPI handler, as well as by a CPU about to go offline, 204 * to ensure that all pending IPI callbacks are run before it goes completely 205 * offline. 206 * 207 * Loop through the call_single_queue and run all the queued callbacks. 208 * Must be called with interrupts disabled. 209 */ 210 static void flush_smp_call_function_queue(bool warn_cpu_offline) 211 { 212 struct llist_head *head; 213 struct llist_node *entry; 214 call_single_data_t *csd, *csd_next; 215 static bool warned; 216 217 lockdep_assert_irqs_disabled(); 218 219 head = this_cpu_ptr(&call_single_queue); 220 entry = llist_del_all(head); 221 entry = llist_reverse_order(entry); 222 223 /* There shouldn't be any pending callbacks on an offline CPU. */ 224 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && 225 !warned && !llist_empty(head))) { 226 warned = true; 227 WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); 228 229 /* 230 * We don't have to use the _safe() variant here 231 * because we are not invoking the IPI handlers yet. 232 */ 233 llist_for_each_entry(csd, entry, llist) 234 pr_warn("IPI callback %pS sent to offline CPU\n", 235 csd->func); 236 } 237 238 llist_for_each_entry_safe(csd, csd_next, entry, llist) { 239 smp_call_func_t func = csd->func; 240 void *info = csd->info; 241 242 /* Do we wait until *after* callback? */ 243 if (csd->flags & CSD_FLAG_SYNCHRONOUS) { 244 func(info); 245 csd_unlock(csd); 246 } else { 247 csd_unlock(csd); 248 func(info); 249 } 250 } 251 252 /* 253 * Handle irq works queued remotely by irq_work_queue_on(). 254 * Smp functions above are typically synchronous so they 255 * better run first since some other CPUs may be busy waiting 256 * for them. 257 */ 258 irq_work_run(); 259 } 260 261 /* 262 * smp_call_function_single - Run a function on a specific CPU 263 * @func: The function to run. This must be fast and non-blocking. 264 * @info: An arbitrary pointer to pass to the function. 265 * @wait: If true, wait until function has completed on other CPUs. 266 * 267 * Returns 0 on success, else a negative status code. 268 */ 269 int smp_call_function_single(int cpu, smp_call_func_t func, void *info, 270 int wait) 271 { 272 call_single_data_t *csd; 273 call_single_data_t csd_stack = { 274 .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS, 275 }; 276 int this_cpu; 277 int err; 278 279 /* 280 * prevent preemption and reschedule on another processor, 281 * as well as CPU removal 282 */ 283 this_cpu = get_cpu(); 284 285 /* 286 * Can deadlock when called with interrupts disabled. 287 * We allow cpu's that are not yet online though, as no one else can 288 * send smp call function interrupt to this cpu and as such deadlocks 289 * can't happen. 290 */ 291 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 292 && !oops_in_progress); 293 294 /* 295 * When @wait we can deadlock when we interrupt between llist_add() and 296 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to 297 * csd_lock() on because the interrupt context uses the same csd 298 * storage. 299 */ 300 WARN_ON_ONCE(!in_task()); 301 302 csd = &csd_stack; 303 if (!wait) { 304 csd = this_cpu_ptr(&csd_data); 305 csd_lock(csd); 306 } 307 308 err = generic_exec_single(cpu, csd, func, info); 309 310 if (wait) 311 csd_lock_wait(csd); 312 313 put_cpu(); 314 315 return err; 316 } 317 EXPORT_SYMBOL(smp_call_function_single); 318 319 /** 320 * smp_call_function_single_async(): Run an asynchronous function on a 321 * specific CPU. 322 * @cpu: The CPU to run on. 323 * @csd: Pre-allocated and setup data structure 324 * 325 * Like smp_call_function_single(), but the call is asynchonous and 326 * can thus be done from contexts with disabled interrupts. 327 * 328 * The caller passes his own pre-allocated data structure 329 * (ie: embedded in an object) and is responsible for synchronizing it 330 * such that the IPIs performed on the @csd are strictly serialized. 331 * 332 * NOTE: Be careful, there is unfortunately no current debugging facility to 333 * validate the correctness of this serialization. 334 */ 335 int smp_call_function_single_async(int cpu, call_single_data_t *csd) 336 { 337 int err = 0; 338 339 preempt_disable(); 340 341 /* We could deadlock if we have to wait here with interrupts disabled! */ 342 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) 343 csd_lock_wait(csd); 344 345 csd->flags = CSD_FLAG_LOCK; 346 smp_wmb(); 347 348 err = generic_exec_single(cpu, csd, csd->func, csd->info); 349 preempt_enable(); 350 351 return err; 352 } 353 EXPORT_SYMBOL_GPL(smp_call_function_single_async); 354 355 /* 356 * smp_call_function_any - Run a function on any of the given cpus 357 * @mask: The mask of cpus it can run on. 358 * @func: The function to run. This must be fast and non-blocking. 359 * @info: An arbitrary pointer to pass to the function. 360 * @wait: If true, wait until function has completed. 361 * 362 * Returns 0 on success, else a negative status code (if no cpus were online). 363 * 364 * Selection preference: 365 * 1) current cpu if in @mask 366 * 2) any cpu of current node if in @mask 367 * 3) any other online cpu in @mask 368 */ 369 int smp_call_function_any(const struct cpumask *mask, 370 smp_call_func_t func, void *info, int wait) 371 { 372 unsigned int cpu; 373 const struct cpumask *nodemask; 374 int ret; 375 376 /* Try for same CPU (cheapest) */ 377 cpu = get_cpu(); 378 if (cpumask_test_cpu(cpu, mask)) 379 goto call; 380 381 /* Try for same node. */ 382 nodemask = cpumask_of_node(cpu_to_node(cpu)); 383 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; 384 cpu = cpumask_next_and(cpu, nodemask, mask)) { 385 if (cpu_online(cpu)) 386 goto call; 387 } 388 389 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ 390 cpu = cpumask_any_and(mask, cpu_online_mask); 391 call: 392 ret = smp_call_function_single(cpu, func, info, wait); 393 put_cpu(); 394 return ret; 395 } 396 EXPORT_SYMBOL_GPL(smp_call_function_any); 397 398 static void smp_call_function_many_cond(const struct cpumask *mask, 399 smp_call_func_t func, void *info, 400 bool wait, smp_cond_func_t cond_func) 401 { 402 struct call_function_data *cfd; 403 int cpu, next_cpu, this_cpu = smp_processor_id(); 404 405 /* 406 * Can deadlock when called with interrupts disabled. 407 * We allow cpu's that are not yet online though, as no one else can 408 * send smp call function interrupt to this cpu and as such deadlocks 409 * can't happen. 410 */ 411 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 412 && !oops_in_progress && !early_boot_irqs_disabled); 413 414 /* 415 * When @wait we can deadlock when we interrupt between llist_add() and 416 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to 417 * csd_lock() on because the interrupt context uses the same csd 418 * storage. 419 */ 420 WARN_ON_ONCE(!in_task()); 421 422 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ 423 cpu = cpumask_first_and(mask, cpu_online_mask); 424 if (cpu == this_cpu) 425 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 426 427 /* No online cpus? We're done. */ 428 if (cpu >= nr_cpu_ids) 429 return; 430 431 /* Do we have another CPU which isn't us? */ 432 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 433 if (next_cpu == this_cpu) 434 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); 435 436 /* Fastpath: do that cpu by itself. */ 437 if (next_cpu >= nr_cpu_ids) { 438 if (!cond_func || cond_func(cpu, info)) 439 smp_call_function_single(cpu, func, info, wait); 440 return; 441 } 442 443 cfd = this_cpu_ptr(&cfd_data); 444 445 cpumask_and(cfd->cpumask, mask, cpu_online_mask); 446 __cpumask_clear_cpu(this_cpu, cfd->cpumask); 447 448 /* Some callers race with other cpus changing the passed mask */ 449 if (unlikely(!cpumask_weight(cfd->cpumask))) 450 return; 451 452 cpumask_clear(cfd->cpumask_ipi); 453 for_each_cpu(cpu, cfd->cpumask) { 454 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); 455 456 if (cond_func && !cond_func(cpu, info)) 457 continue; 458 459 csd_lock(csd); 460 if (wait) 461 csd->flags |= CSD_FLAG_SYNCHRONOUS; 462 csd->func = func; 463 csd->info = info; 464 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) 465 __cpumask_set_cpu(cpu, cfd->cpumask_ipi); 466 } 467 468 /* Send a message to all CPUs in the map */ 469 arch_send_call_function_ipi_mask(cfd->cpumask_ipi); 470 471 if (wait) { 472 for_each_cpu(cpu, cfd->cpumask) { 473 call_single_data_t *csd; 474 475 csd = per_cpu_ptr(cfd->csd, cpu); 476 csd_lock_wait(csd); 477 } 478 } 479 } 480 481 /** 482 * smp_call_function_many(): Run a function on a set of other CPUs. 483 * @mask: The set of cpus to run on (only runs on online subset). 484 * @func: The function to run. This must be fast and non-blocking. 485 * @info: An arbitrary pointer to pass to the function. 486 * @wait: If true, wait (atomically) until function has completed 487 * on other CPUs. 488 * 489 * If @wait is true, then returns once @func has returned. 490 * 491 * You must not call this function with disabled interrupts or from a 492 * hardware interrupt handler or from a bottom half handler. Preemption 493 * must be disabled when calling this function. 494 */ 495 void smp_call_function_many(const struct cpumask *mask, 496 smp_call_func_t func, void *info, bool wait) 497 { 498 smp_call_function_many_cond(mask, func, info, wait, NULL); 499 } 500 EXPORT_SYMBOL(smp_call_function_many); 501 502 /** 503 * smp_call_function(): Run a function on all other CPUs. 504 * @func: The function to run. This must be fast and non-blocking. 505 * @info: An arbitrary pointer to pass to the function. 506 * @wait: If true, wait (atomically) until function has completed 507 * on other CPUs. 508 * 509 * Returns 0. 510 * 511 * If @wait is true, then returns once @func has returned; otherwise 512 * it returns just before the target cpu calls @func. 513 * 514 * You must not call this function with disabled interrupts or from a 515 * hardware interrupt handler or from a bottom half handler. 516 */ 517 void smp_call_function(smp_call_func_t func, void *info, int wait) 518 { 519 preempt_disable(); 520 smp_call_function_many(cpu_online_mask, func, info, wait); 521 preempt_enable(); 522 } 523 EXPORT_SYMBOL(smp_call_function); 524 525 /* Setup configured maximum number of CPUs to activate */ 526 unsigned int setup_max_cpus = NR_CPUS; 527 EXPORT_SYMBOL(setup_max_cpus); 528 529 530 /* 531 * Setup routine for controlling SMP activation 532 * 533 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP 534 * activation entirely (the MPS table probe still happens, though). 535 * 536 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer 537 * greater than 0, limits the maximum number of CPUs activated in 538 * SMP mode to <NUM>. 539 */ 540 541 void __weak arch_disable_smp_support(void) { } 542 543 static int __init nosmp(char *str) 544 { 545 setup_max_cpus = 0; 546 arch_disable_smp_support(); 547 548 return 0; 549 } 550 551 early_param("nosmp", nosmp); 552 553 /* this is hard limit */ 554 static int __init nrcpus(char *str) 555 { 556 int nr_cpus; 557 558 get_option(&str, &nr_cpus); 559 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) 560 nr_cpu_ids = nr_cpus; 561 562 return 0; 563 } 564 565 early_param("nr_cpus", nrcpus); 566 567 static int __init maxcpus(char *str) 568 { 569 get_option(&str, &setup_max_cpus); 570 if (setup_max_cpus == 0) 571 arch_disable_smp_support(); 572 573 return 0; 574 } 575 576 early_param("maxcpus", maxcpus); 577 578 /* Setup number of possible processor ids */ 579 unsigned int nr_cpu_ids __read_mostly = NR_CPUS; 580 EXPORT_SYMBOL(nr_cpu_ids); 581 582 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ 583 void __init setup_nr_cpu_ids(void) 584 { 585 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; 586 } 587 588 /* Called by boot processor to activate the rest. */ 589 void __init smp_init(void) 590 { 591 int num_nodes, num_cpus; 592 unsigned int cpu; 593 594 idle_threads_init(); 595 cpuhp_threads_init(); 596 597 pr_info("Bringing up secondary CPUs ...\n"); 598 599 /* FIXME: This should be done in userspace --RR */ 600 for_each_present_cpu(cpu) { 601 if (num_online_cpus() >= setup_max_cpus) 602 break; 603 if (!cpu_online(cpu)) 604 cpu_up(cpu); 605 } 606 607 num_nodes = num_online_nodes(); 608 num_cpus = num_online_cpus(); 609 pr_info("Brought up %d node%s, %d CPU%s\n", 610 num_nodes, (num_nodes > 1 ? "s" : ""), 611 num_cpus, (num_cpus > 1 ? "s" : "")); 612 613 /* Any cleanup work */ 614 smp_cpus_done(setup_max_cpus); 615 } 616 617 /* 618 * Call a function on all processors. May be used during early boot while 619 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead 620 * of local_irq_disable/enable(). 621 */ 622 void on_each_cpu(void (*func) (void *info), void *info, int wait) 623 { 624 unsigned long flags; 625 626 preempt_disable(); 627 smp_call_function(func, info, wait); 628 local_irq_save(flags); 629 func(info); 630 local_irq_restore(flags); 631 preempt_enable(); 632 } 633 EXPORT_SYMBOL(on_each_cpu); 634 635 /** 636 * on_each_cpu_mask(): Run a function on processors specified by 637 * cpumask, which may include the local processor. 638 * @mask: The set of cpus to run on (only runs on online subset). 639 * @func: The function to run. This must be fast and non-blocking. 640 * @info: An arbitrary pointer to pass to the function. 641 * @wait: If true, wait (atomically) until function has completed 642 * on other CPUs. 643 * 644 * If @wait is true, then returns once @func has returned. 645 * 646 * You must not call this function with disabled interrupts or from a 647 * hardware interrupt handler or from a bottom half handler. The 648 * exception is that it may be used during early boot while 649 * early_boot_irqs_disabled is set. 650 */ 651 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, 652 void *info, bool wait) 653 { 654 int cpu = get_cpu(); 655 656 smp_call_function_many(mask, func, info, wait); 657 if (cpumask_test_cpu(cpu, mask)) { 658 unsigned long flags; 659 local_irq_save(flags); 660 func(info); 661 local_irq_restore(flags); 662 } 663 put_cpu(); 664 } 665 EXPORT_SYMBOL(on_each_cpu_mask); 666 667 /* 668 * on_each_cpu_cond(): Call a function on each processor for which 669 * the supplied function cond_func returns true, optionally waiting 670 * for all the required CPUs to finish. This may include the local 671 * processor. 672 * @cond_func: A callback function that is passed a cpu id and 673 * the the info parameter. The function is called 674 * with preemption disabled. The function should 675 * return a blooean value indicating whether to IPI 676 * the specified CPU. 677 * @func: The function to run on all applicable CPUs. 678 * This must be fast and non-blocking. 679 * @info: An arbitrary pointer to pass to both functions. 680 * @wait: If true, wait (atomically) until function has 681 * completed on other CPUs. 682 * 683 * Preemption is disabled to protect against CPUs going offline but not online. 684 * CPUs going online during the call will not be seen or sent an IPI. 685 * 686 * You must not call this function with disabled interrupts or 687 * from a hardware interrupt handler or from a bottom half handler. 688 */ 689 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, 690 void *info, bool wait, const struct cpumask *mask) 691 { 692 int cpu = get_cpu(); 693 694 smp_call_function_many_cond(mask, func, info, wait, cond_func); 695 if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) { 696 unsigned long flags; 697 698 local_irq_save(flags); 699 func(info); 700 local_irq_restore(flags); 701 } 702 put_cpu(); 703 } 704 EXPORT_SYMBOL(on_each_cpu_cond_mask); 705 706 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, 707 void *info, bool wait) 708 { 709 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); 710 } 711 EXPORT_SYMBOL(on_each_cpu_cond); 712 713 static void do_nothing(void *unused) 714 { 715 } 716 717 /** 718 * kick_all_cpus_sync - Force all cpus out of idle 719 * 720 * Used to synchronize the update of pm_idle function pointer. It's 721 * called after the pointer is updated and returns after the dummy 722 * callback function has been executed on all cpus. The execution of 723 * the function can only happen on the remote cpus after they have 724 * left the idle function which had been called via pm_idle function 725 * pointer. So it's guaranteed that nothing uses the previous pointer 726 * anymore. 727 */ 728 void kick_all_cpus_sync(void) 729 { 730 /* Make sure the change is visible before we kick the cpus */ 731 smp_mb(); 732 smp_call_function(do_nothing, NULL, 1); 733 } 734 EXPORT_SYMBOL_GPL(kick_all_cpus_sync); 735 736 /** 737 * wake_up_all_idle_cpus - break all cpus out of idle 738 * wake_up_all_idle_cpus try to break all cpus which is in idle state even 739 * including idle polling cpus, for non-idle cpus, we will do nothing 740 * for them. 741 */ 742 void wake_up_all_idle_cpus(void) 743 { 744 int cpu; 745 746 preempt_disable(); 747 for_each_online_cpu(cpu) { 748 if (cpu == smp_processor_id()) 749 continue; 750 751 wake_up_if_idle(cpu); 752 } 753 preempt_enable(); 754 } 755 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); 756 757 /** 758 * smp_call_on_cpu - Call a function on a specific cpu 759 * 760 * Used to call a function on a specific cpu and wait for it to return. 761 * Optionally make sure the call is done on a specified physical cpu via vcpu 762 * pinning in order to support virtualized environments. 763 */ 764 struct smp_call_on_cpu_struct { 765 struct work_struct work; 766 struct completion done; 767 int (*func)(void *); 768 void *data; 769 int ret; 770 int cpu; 771 }; 772 773 static void smp_call_on_cpu_callback(struct work_struct *work) 774 { 775 struct smp_call_on_cpu_struct *sscs; 776 777 sscs = container_of(work, struct smp_call_on_cpu_struct, work); 778 if (sscs->cpu >= 0) 779 hypervisor_pin_vcpu(sscs->cpu); 780 sscs->ret = sscs->func(sscs->data); 781 if (sscs->cpu >= 0) 782 hypervisor_pin_vcpu(-1); 783 784 complete(&sscs->done); 785 } 786 787 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) 788 { 789 struct smp_call_on_cpu_struct sscs = { 790 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), 791 .func = func, 792 .data = par, 793 .cpu = phys ? cpu : -1, 794 }; 795 796 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); 797 798 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) 799 return -ENXIO; 800 801 queue_work_on(cpu, system_wq, &sscs.work); 802 wait_for_completion(&sscs.done); 803 804 return sscs.ret; 805 } 806 EXPORT_SYMBOL_GPL(smp_call_on_cpu); 807