Lines Matching +full:function +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0-only
37 #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
57 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, in smpcfd_prepare_cpu()
59 return -ENOMEM; in smpcfd_prepare_cpu()
60 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, in smpcfd_prepare_cpu()
62 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu()
63 return -ENOMEM; in smpcfd_prepare_cpu()
65 cfd->csd = alloc_percpu(call_single_data_t); in smpcfd_prepare_cpu()
66 if (!cfd->csd) { in smpcfd_prepare_cpu()
67 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu()
68 free_cpumask_var(cfd->cpumask_ipi); in smpcfd_prepare_cpu()
69 return -ENOMEM; in smpcfd_prepare_cpu()
79 free_cpumask_var(cfd->cpumask); in smpcfd_dead_cpu()
80 free_cpumask_var(cfd->cpumask_ipi); in smpcfd_dead_cpu()
81 free_percpu(cfd->csd); in smpcfd_dead_cpu()
88 * The IPIs for the smp-call-function callbacks queued by other in smpcfd_dying_cpu()
90 * because this CPU disabled interrupts (inside stop-machine) in smpcfd_dying_cpu()
122 send_call_function_ipi_mask(struct cpumask *mask) in send_call_function_ipi_mask() argument
124 trace_ipi_send_cpumask(mask, _RET_IP_, in send_call_function_ipi_mask()
126 arch_send_call_function_ipi_mask(mask); in send_call_function_ipi_mask()
186 __this_cpu_write(cur_csd_func, csd->func); in __csd_lock_record()
187 __this_cpu_write(cur_csd_info, csd->info); in __csd_lock_record()
190 smp_mb(); /* Update cur_csd before function call. */ in __csd_lock_record()
206 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */ in csd_lock_wait_getcpu()
207 return -1; in csd_lock_wait_getcpu()
217 int cpu = -1; in csd_lock_wait_toolong()
222 unsigned int flags = READ_ONCE(csd->node.u_flags); in csd_lock_wait_toolong()
236 ts_delta = ts2 - *ts1; in csd_lock_wait_toolong()
250 ts_delta = ts2 - ts0; in csd_lock_wait_toolong()
251 …pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\… in csd_lock_wait_toolong()
253 cpu, csd->func, csd->info); in csd_lock_wait_toolong()
272 …pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_proces… in csd_lock_wait_toolong()
284 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
286 * For non-synchronous ipi calls the csd can still be in use by the
287 * previous function call. For multi-cpu calls its even more interesting
311 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait()
320 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait()
327 csd->node.u_flags |= CSD_FLAG_LOCK; in csd_lock()
331 * to ->flags with any subsequent assignments to other in csd_lock()
339 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); in csd_unlock()
344 smp_store_release(&csd->node.u_flags, 0); in csd_unlock()
364 sched_ttwu_pending : csd->func; in __smp_call_single_queue()
388 * ->func, ->info, and ->flags set.
393 smp_call_func_t func = csd->func; in generic_exec_single()
394 void *info = csd->info; in generic_exec_single()
398 * We can unlock early even for the synchronous on-stack case, in generic_exec_single()
412 return -ENXIO; in generic_exec_single()
415 __smp_call_single_queue(cpu, &csd->node.llist); in generic_exec_single()
421 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
423 * Invoked by arch to handle an IPI for call function single.
432 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
437 * Flush any pending smp-call-function callbacks queued on this CPU. This is
479 csd->func); in __flush_smp_call_function_queue()
483 pr_warn("IPI task-wakeup sent to offline CPU\n"); in __flush_smp_call_function_queue()
501 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue()
502 void *info = csd->info; in __flush_smp_call_function_queue()
505 prev->next = &csd_next->node.llist; in __flush_smp_call_function_queue()
507 entry = &csd_next->node.llist; in __flush_smp_call_function_queue()
515 prev = &csd->node.llist; in __flush_smp_call_function_queue()
531 prev->next = &csd_next->node.llist; in __flush_smp_call_function_queue()
533 entry = &csd_next->node.llist; in __flush_smp_call_function_queue()
537 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue()
538 void *info = csd->info; in __flush_smp_call_function_queue()
549 prev = &csd->node.llist; in __flush_smp_call_function_queue()
564 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
570 * handle queued SMP function calls before scheduling.
594 * smp_call_function_single - Run a function on a specific CPU
595 * @func: The function to run. This must be fast and non-blocking.
596 * @info: An arbitrary pointer to pass to the function.
597 * @wait: If true, wait until function has completed on other CPUs.
620 * send smp call function interrupt to this cpu and as such deadlocks in smp_call_function_single()
640 csd->func = func; in smp_call_function_single()
641 csd->info = info; in smp_call_function_single()
643 csd->node.src = smp_processor_id(); in smp_call_function_single()
644 csd->node.dst = cpu; in smp_call_function_single()
659 * smp_call_function_single_async() - Run an asynchronous function on a
662 * @csd: Pre-allocated and setup data structure
667 * The caller passes his own pre-allocated data structure
671 * If the function is called with one csd which has not yet been
673 * function will return immediately with -EBUSY showing that the csd
687 if (csd->node.u_flags & CSD_FLAG_LOCK) { in smp_call_function_single_async()
688 err = -EBUSY; in smp_call_function_single_async()
692 csd->node.u_flags = CSD_FLAG_LOCK; in smp_call_function_single_async()
705 * smp_call_function_any - Run a function on any of the given cpus
706 * @mask: The mask of cpus it can run on.
707 * @func: The function to run. This must be fast and non-blocking.
708 * @info: An arbitrary pointer to pass to the function.
709 * @wait: If true, wait until function has completed.
714 * 1) current cpu if in @mask
715 * 2) any cpu of current node if in @mask
716 * 3) any other online cpu in @mask
718 int smp_call_function_any(const struct cpumask *mask, in smp_call_function_any() argument
727 if (cpumask_test_cpu(cpu, mask)) in smp_call_function_any()
732 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; in smp_call_function_any()
733 cpu = cpumask_next_and(cpu, nodemask, mask)) { in smp_call_function_any()
739 cpu = cpumask_any_and(mask, cpu_online_mask); in smp_call_function_any()
750 * %SCF_WAIT: Wait until function execution is completed
756 static void smp_call_function_many_cond(const struct cpumask *mask, in smp_call_function_many_cond() argument
773 * send smp call function interrupt to this cpu and as such deadlocks in smp_call_function_many_cond()
789 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask)) in smp_call_function_many_cond()
793 cpu = cpumask_first_and(mask, cpu_online_mask); in smp_call_function_many_cond()
795 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many_cond()
801 cpumask_and(cfd->cpumask, mask, cpu_online_mask); in smp_call_function_many_cond()
802 __cpumask_clear_cpu(this_cpu, cfd->cpumask); in smp_call_function_many_cond()
804 cpumask_clear(cfd->cpumask_ipi); in smp_call_function_many_cond()
805 for_each_cpu(cpu, cfd->cpumask) { in smp_call_function_many_cond()
806 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many_cond()
809 __cpumask_clear_cpu(cpu, cfd->cpumask); in smp_call_function_many_cond()
815 csd->node.u_flags |= CSD_TYPE_SYNC; in smp_call_function_many_cond()
816 csd->func = func; in smp_call_function_many_cond()
817 csd->info = info; in smp_call_function_many_cond()
819 csd->node.src = smp_processor_id(); in smp_call_function_many_cond()
820 csd->node.dst = cpu; in smp_call_function_many_cond()
824 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) { in smp_call_function_many_cond()
825 __cpumask_set_cpu(cpu, cfd->cpumask_ipi); in smp_call_function_many_cond()
834 * provided mask. in smp_call_function_many_cond()
839 send_call_function_ipi_mask(cfd->cpumask_ipi); in smp_call_function_many_cond()
851 for_each_cpu(cpu, cfd->cpumask) { in smp_call_function_many_cond()
854 csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many_cond()
861 * smp_call_function_many(): Run a function on a set of CPUs.
862 * @mask: The set of cpus to run on (only runs on online subset).
863 * @func: The function to run. This must be fast and non-blocking.
864 * @info: An arbitrary pointer to pass to the function.
866 * (atomically) until function has completed on other CPUs. If
867 * %SCF_RUN_LOCAL is set, the function will also be run locally
872 * You must not call this function with disabled interrupts or from a
874 * must be disabled when calling this function.
876 void smp_call_function_many(const struct cpumask *mask, in smp_call_function_many() argument
879 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL); in smp_call_function_many()
884 * smp_call_function(): Run a function on all other CPUs.
885 * @func: The function to run. This must be fast and non-blocking.
886 * @info: An arbitrary pointer to pass to the function.
887 * @wait: If true, wait (atomically) until function has completed
895 * You must not call this function with disabled interrupts or from a
914 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
917 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
993 * on_each_cpu_cond(): Call a function on each processor for which
994 * the supplied function cond_func returns true, optionally waiting
997 * @cond_func: A callback function that is passed a cpu id and
998 * the info parameter. The function is called
999 * with preemption disabled. The function should
1002 * @func: The function to run on all applicable CPUs.
1003 * This must be fast and non-blocking.
1005 * @wait: If true, wait (atomically) until function has
1011 * You must not call this function with disabled interrupts or
1015 void *info, bool wait, const struct cpumask *mask) in on_each_cpu_cond_mask() argument
1023 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func); in on_each_cpu_cond_mask()
1033 * kick_all_cpus_sync - Force all cpus out of idle
1035 * Used to synchronize the update of pm_idle function pointer. It's
1037 * callback function has been executed on all cpus. The execution of
1038 * the function can only happen on the remote cpus after they have
1039 * left the idle function which had been called via pm_idle function
1052 * wake_up_all_idle_cpus - break all cpus out of idle
1054 * including idle polling cpus, for non-idle cpus, we will do nothing
1071 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1074 * @func: function to call
1075 * @data: function's data argument
1077 * @cpu: target CPU (%-1 for any CPU)
1079 * Used to call a function on a specific cpu and wait for it to return.
1097 if (sscs->cpu >= 0) in smp_call_on_cpu_callback()
1098 hypervisor_pin_vcpu(sscs->cpu); in smp_call_on_cpu_callback()
1099 sscs->ret = sscs->func(sscs->data); in smp_call_on_cpu_callback()
1100 if (sscs->cpu >= 0) in smp_call_on_cpu_callback()
1101 hypervisor_pin_vcpu(-1); in smp_call_on_cpu_callback()
1103 complete(&sscs->done); in smp_call_on_cpu_callback()
1112 .cpu = phys ? cpu : -1, in smp_call_on_cpu()
1118 return -ENXIO; in smp_call_on_cpu()