xref: /openbmc/linux/kernel/smp.c (revision ae3473231e77a3f1909d48cd144cebe5e1d049b3)
1 /*
2  * Generic helpers for smp ipi calls
3  *
4  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/irq_work.h>
10 #include <linux/rcupdate.h>
11 #include <linux/rculist.h>
12 #include <linux/kernel.h>
13 #include <linux/export.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
16 #include <linux/gfp.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/sched.h>
20 #include <linux/hypervisor.h>
21 
22 #include "smpboot.h"
23 
24 enum {
25 	CSD_FLAG_LOCK		= 0x01,
26 	CSD_FLAG_SYNCHRONOUS	= 0x02,
27 };
28 
29 struct call_function_data {
30 	struct call_single_data	__percpu *csd;
31 	cpumask_var_t		cpumask;
32 };
33 
34 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
35 
36 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
37 
38 static void flush_smp_call_function_queue(bool warn_cpu_offline);
39 
40 int smpcfd_prepare_cpu(unsigned int cpu)
41 {
42 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
43 
44 	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
45 				     cpu_to_node(cpu)))
46 		return -ENOMEM;
47 	cfd->csd = alloc_percpu(struct call_single_data);
48 	if (!cfd->csd) {
49 		free_cpumask_var(cfd->cpumask);
50 		return -ENOMEM;
51 	}
52 
53 	return 0;
54 }
55 
56 int smpcfd_dead_cpu(unsigned int cpu)
57 {
58 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
59 
60 	free_cpumask_var(cfd->cpumask);
61 	free_percpu(cfd->csd);
62 	return 0;
63 }
64 
65 int smpcfd_dying_cpu(unsigned int cpu)
66 {
67 	/*
68 	 * The IPIs for the smp-call-function callbacks queued by other
69 	 * CPUs might arrive late, either due to hardware latencies or
70 	 * because this CPU disabled interrupts (inside stop-machine)
71 	 * before the IPIs were sent. So flush out any pending callbacks
72 	 * explicitly (without waiting for the IPIs to arrive), to
73 	 * ensure that the outgoing CPU doesn't go offline with work
74 	 * still pending.
75 	 */
76 	flush_smp_call_function_queue(false);
77 	return 0;
78 }
79 
80 void __init call_function_init(void)
81 {
82 	int i;
83 
84 	for_each_possible_cpu(i)
85 		init_llist_head(&per_cpu(call_single_queue, i));
86 
87 	smpcfd_prepare_cpu(smp_processor_id());
88 }
89 
90 /*
91  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
92  *
93  * For non-synchronous ipi calls the csd can still be in use by the
94  * previous function call. For multi-cpu calls its even more interesting
95  * as we'll have to ensure no other cpu is observing our csd.
96  */
97 static __always_inline void csd_lock_wait(struct call_single_data *csd)
98 {
99 	smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
100 }
101 
102 static __always_inline void csd_lock(struct call_single_data *csd)
103 {
104 	csd_lock_wait(csd);
105 	csd->flags |= CSD_FLAG_LOCK;
106 
107 	/*
108 	 * prevent CPU from reordering the above assignment
109 	 * to ->flags with any subsequent assignments to other
110 	 * fields of the specified call_single_data structure:
111 	 */
112 	smp_wmb();
113 }
114 
115 static __always_inline void csd_unlock(struct call_single_data *csd)
116 {
117 	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
118 
119 	/*
120 	 * ensure we're all done before releasing data:
121 	 */
122 	smp_store_release(&csd->flags, 0);
123 }
124 
125 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
126 
127 /*
128  * Insert a previously allocated call_single_data element
129  * for execution on the given CPU. data must already have
130  * ->func, ->info, and ->flags set.
131  */
132 static int generic_exec_single(int cpu, struct call_single_data *csd,
133 			       smp_call_func_t func, void *info)
134 {
135 	if (cpu == smp_processor_id()) {
136 		unsigned long flags;
137 
138 		/*
139 		 * We can unlock early even for the synchronous on-stack case,
140 		 * since we're doing this from the same CPU..
141 		 */
142 		csd_unlock(csd);
143 		local_irq_save(flags);
144 		func(info);
145 		local_irq_restore(flags);
146 		return 0;
147 	}
148 
149 
150 	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
151 		csd_unlock(csd);
152 		return -ENXIO;
153 	}
154 
155 	csd->func = func;
156 	csd->info = info;
157 
158 	/*
159 	 * The list addition should be visible before sending the IPI
160 	 * handler locks the list to pull the entry off it because of
161 	 * normal cache coherency rules implied by spinlocks.
162 	 *
163 	 * If IPIs can go out of order to the cache coherency protocol
164 	 * in an architecture, sufficient synchronisation should be added
165 	 * to arch code to make it appear to obey cache coherency WRT
166 	 * locking and barrier primitives. Generic code isn't really
167 	 * equipped to do the right thing...
168 	 */
169 	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
170 		arch_send_call_function_single_ipi(cpu);
171 
172 	return 0;
173 }
174 
175 /**
176  * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
177  *
178  * Invoked by arch to handle an IPI for call function single.
179  * Must be called with interrupts disabled.
180  */
181 void generic_smp_call_function_single_interrupt(void)
182 {
183 	flush_smp_call_function_queue(true);
184 }
185 
186 /**
187  * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
188  *
189  * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
190  *		      offline CPU. Skip this check if set to 'false'.
191  *
192  * Flush any pending smp-call-function callbacks queued on this CPU. This is
193  * invoked by the generic IPI handler, as well as by a CPU about to go offline,
194  * to ensure that all pending IPI callbacks are run before it goes completely
195  * offline.
196  *
197  * Loop through the call_single_queue and run all the queued callbacks.
198  * Must be called with interrupts disabled.
199  */
200 static void flush_smp_call_function_queue(bool warn_cpu_offline)
201 {
202 	struct llist_head *head;
203 	struct llist_node *entry;
204 	struct call_single_data *csd, *csd_next;
205 	static bool warned;
206 
207 	WARN_ON(!irqs_disabled());
208 
209 	head = this_cpu_ptr(&call_single_queue);
210 	entry = llist_del_all(head);
211 	entry = llist_reverse_order(entry);
212 
213 	/* There shouldn't be any pending callbacks on an offline CPU. */
214 	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
215 		     !warned && !llist_empty(head))) {
216 		warned = true;
217 		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
218 
219 		/*
220 		 * We don't have to use the _safe() variant here
221 		 * because we are not invoking the IPI handlers yet.
222 		 */
223 		llist_for_each_entry(csd, entry, llist)
224 			pr_warn("IPI callback %pS sent to offline CPU\n",
225 				csd->func);
226 	}
227 
228 	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
229 		smp_call_func_t func = csd->func;
230 		void *info = csd->info;
231 
232 		/* Do we wait until *after* callback? */
233 		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
234 			func(info);
235 			csd_unlock(csd);
236 		} else {
237 			csd_unlock(csd);
238 			func(info);
239 		}
240 	}
241 
242 	/*
243 	 * Handle irq works queued remotely by irq_work_queue_on().
244 	 * Smp functions above are typically synchronous so they
245 	 * better run first since some other CPUs may be busy waiting
246 	 * for them.
247 	 */
248 	irq_work_run();
249 }
250 
251 /*
252  * smp_call_function_single - Run a function on a specific CPU
253  * @func: The function to run. This must be fast and non-blocking.
254  * @info: An arbitrary pointer to pass to the function.
255  * @wait: If true, wait until function has completed on other CPUs.
256  *
257  * Returns 0 on success, else a negative status code.
258  */
259 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
260 			     int wait)
261 {
262 	struct call_single_data *csd;
263 	struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
264 	int this_cpu;
265 	int err;
266 
267 	/*
268 	 * prevent preemption and reschedule on another processor,
269 	 * as well as CPU removal
270 	 */
271 	this_cpu = get_cpu();
272 
273 	/*
274 	 * Can deadlock when called with interrupts disabled.
275 	 * We allow cpu's that are not yet online though, as no one else can
276 	 * send smp call function interrupt to this cpu and as such deadlocks
277 	 * can't happen.
278 	 */
279 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
280 		     && !oops_in_progress);
281 
282 	csd = &csd_stack;
283 	if (!wait) {
284 		csd = this_cpu_ptr(&csd_data);
285 		csd_lock(csd);
286 	}
287 
288 	err = generic_exec_single(cpu, csd, func, info);
289 
290 	if (wait)
291 		csd_lock_wait(csd);
292 
293 	put_cpu();
294 
295 	return err;
296 }
297 EXPORT_SYMBOL(smp_call_function_single);
298 
299 /**
300  * smp_call_function_single_async(): Run an asynchronous function on a
301  * 			         specific CPU.
302  * @cpu: The CPU to run on.
303  * @csd: Pre-allocated and setup data structure
304  *
305  * Like smp_call_function_single(), but the call is asynchonous and
306  * can thus be done from contexts with disabled interrupts.
307  *
308  * The caller passes his own pre-allocated data structure
309  * (ie: embedded in an object) and is responsible for synchronizing it
310  * such that the IPIs performed on the @csd are strictly serialized.
311  *
312  * NOTE: Be careful, there is unfortunately no current debugging facility to
313  * validate the correctness of this serialization.
314  */
315 int smp_call_function_single_async(int cpu, struct call_single_data *csd)
316 {
317 	int err = 0;
318 
319 	preempt_disable();
320 
321 	/* We could deadlock if we have to wait here with interrupts disabled! */
322 	if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
323 		csd_lock_wait(csd);
324 
325 	csd->flags = CSD_FLAG_LOCK;
326 	smp_wmb();
327 
328 	err = generic_exec_single(cpu, csd, csd->func, csd->info);
329 	preempt_enable();
330 
331 	return err;
332 }
333 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
334 
335 /*
336  * smp_call_function_any - Run a function on any of the given cpus
337  * @mask: The mask of cpus it can run on.
338  * @func: The function to run. This must be fast and non-blocking.
339  * @info: An arbitrary pointer to pass to the function.
340  * @wait: If true, wait until function has completed.
341  *
342  * Returns 0 on success, else a negative status code (if no cpus were online).
343  *
344  * Selection preference:
345  *	1) current cpu if in @mask
346  *	2) any cpu of current node if in @mask
347  *	3) any other online cpu in @mask
348  */
349 int smp_call_function_any(const struct cpumask *mask,
350 			  smp_call_func_t func, void *info, int wait)
351 {
352 	unsigned int cpu;
353 	const struct cpumask *nodemask;
354 	int ret;
355 
356 	/* Try for same CPU (cheapest) */
357 	cpu = get_cpu();
358 	if (cpumask_test_cpu(cpu, mask))
359 		goto call;
360 
361 	/* Try for same node. */
362 	nodemask = cpumask_of_node(cpu_to_node(cpu));
363 	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
364 	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
365 		if (cpu_online(cpu))
366 			goto call;
367 	}
368 
369 	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
370 	cpu = cpumask_any_and(mask, cpu_online_mask);
371 call:
372 	ret = smp_call_function_single(cpu, func, info, wait);
373 	put_cpu();
374 	return ret;
375 }
376 EXPORT_SYMBOL_GPL(smp_call_function_any);
377 
378 /**
379  * smp_call_function_many(): Run a function on a set of other CPUs.
380  * @mask: The set of cpus to run on (only runs on online subset).
381  * @func: The function to run. This must be fast and non-blocking.
382  * @info: An arbitrary pointer to pass to the function.
383  * @wait: If true, wait (atomically) until function has completed
384  *        on other CPUs.
385  *
386  * If @wait is true, then returns once @func has returned.
387  *
388  * You must not call this function with disabled interrupts or from a
389  * hardware interrupt handler or from a bottom half handler. Preemption
390  * must be disabled when calling this function.
391  */
392 void smp_call_function_many(const struct cpumask *mask,
393 			    smp_call_func_t func, void *info, bool wait)
394 {
395 	struct call_function_data *cfd;
396 	int cpu, next_cpu, this_cpu = smp_processor_id();
397 
398 	/*
399 	 * Can deadlock when called with interrupts disabled.
400 	 * We allow cpu's that are not yet online though, as no one else can
401 	 * send smp call function interrupt to this cpu and as such deadlocks
402 	 * can't happen.
403 	 */
404 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
405 		     && !oops_in_progress && !early_boot_irqs_disabled);
406 
407 	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
408 	cpu = cpumask_first_and(mask, cpu_online_mask);
409 	if (cpu == this_cpu)
410 		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
411 
412 	/* No online cpus?  We're done. */
413 	if (cpu >= nr_cpu_ids)
414 		return;
415 
416 	/* Do we have another CPU which isn't us? */
417 	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
418 	if (next_cpu == this_cpu)
419 		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
420 
421 	/* Fastpath: do that cpu by itself. */
422 	if (next_cpu >= nr_cpu_ids) {
423 		smp_call_function_single(cpu, func, info, wait);
424 		return;
425 	}
426 
427 	cfd = this_cpu_ptr(&cfd_data);
428 
429 	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
430 	cpumask_clear_cpu(this_cpu, cfd->cpumask);
431 
432 	/* Some callers race with other cpus changing the passed mask */
433 	if (unlikely(!cpumask_weight(cfd->cpumask)))
434 		return;
435 
436 	for_each_cpu(cpu, cfd->cpumask) {
437 		struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
438 
439 		csd_lock(csd);
440 		if (wait)
441 			csd->flags |= CSD_FLAG_SYNCHRONOUS;
442 		csd->func = func;
443 		csd->info = info;
444 		llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
445 	}
446 
447 	/* Send a message to all CPUs in the map */
448 	arch_send_call_function_ipi_mask(cfd->cpumask);
449 
450 	if (wait) {
451 		for_each_cpu(cpu, cfd->cpumask) {
452 			struct call_single_data *csd;
453 
454 			csd = per_cpu_ptr(cfd->csd, cpu);
455 			csd_lock_wait(csd);
456 		}
457 	}
458 }
459 EXPORT_SYMBOL(smp_call_function_many);
460 
461 /**
462  * smp_call_function(): Run a function on all other CPUs.
463  * @func: The function to run. This must be fast and non-blocking.
464  * @info: An arbitrary pointer to pass to the function.
465  * @wait: If true, wait (atomically) until function has completed
466  *        on other CPUs.
467  *
468  * Returns 0.
469  *
470  * If @wait is true, then returns once @func has returned; otherwise
471  * it returns just before the target cpu calls @func.
472  *
473  * You must not call this function with disabled interrupts or from a
474  * hardware interrupt handler or from a bottom half handler.
475  */
476 int smp_call_function(smp_call_func_t func, void *info, int wait)
477 {
478 	preempt_disable();
479 	smp_call_function_many(cpu_online_mask, func, info, wait);
480 	preempt_enable();
481 
482 	return 0;
483 }
484 EXPORT_SYMBOL(smp_call_function);
485 
486 /* Setup configured maximum number of CPUs to activate */
487 unsigned int setup_max_cpus = NR_CPUS;
488 EXPORT_SYMBOL(setup_max_cpus);
489 
490 
491 /*
492  * Setup routine for controlling SMP activation
493  *
494  * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
495  * activation entirely (the MPS table probe still happens, though).
496  *
497  * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
498  * greater than 0, limits the maximum number of CPUs activated in
499  * SMP mode to <NUM>.
500  */
501 
502 void __weak arch_disable_smp_support(void) { }
503 
504 static int __init nosmp(char *str)
505 {
506 	setup_max_cpus = 0;
507 	arch_disable_smp_support();
508 
509 	return 0;
510 }
511 
512 early_param("nosmp", nosmp);
513 
514 /* this is hard limit */
515 static int __init nrcpus(char *str)
516 {
517 	int nr_cpus;
518 
519 	get_option(&str, &nr_cpus);
520 	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
521 		nr_cpu_ids = nr_cpus;
522 
523 	return 0;
524 }
525 
526 early_param("nr_cpus", nrcpus);
527 
528 static int __init maxcpus(char *str)
529 {
530 	get_option(&str, &setup_max_cpus);
531 	if (setup_max_cpus == 0)
532 		arch_disable_smp_support();
533 
534 	return 0;
535 }
536 
537 early_param("maxcpus", maxcpus);
538 
539 /* Setup number of possible processor ids */
540 int nr_cpu_ids __read_mostly = NR_CPUS;
541 EXPORT_SYMBOL(nr_cpu_ids);
542 
543 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
544 void __init setup_nr_cpu_ids(void)
545 {
546 	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
547 }
548 
549 /* Called by boot processor to activate the rest. */
550 void __init smp_init(void)
551 {
552 	int num_nodes, num_cpus;
553 	unsigned int cpu;
554 
555 	idle_threads_init();
556 	cpuhp_threads_init();
557 
558 	pr_info("Bringing up secondary CPUs ...\n");
559 
560 	/* FIXME: This should be done in userspace --RR */
561 	for_each_present_cpu(cpu) {
562 		if (num_online_cpus() >= setup_max_cpus)
563 			break;
564 		if (!cpu_online(cpu))
565 			cpu_up(cpu);
566 	}
567 
568 	num_nodes = num_online_nodes();
569 	num_cpus  = num_online_cpus();
570 	pr_info("Brought up %d node%s, %d CPU%s\n",
571 		num_nodes, (num_nodes > 1 ? "s" : ""),
572 		num_cpus,  (num_cpus  > 1 ? "s" : ""));
573 
574 	/* Any cleanup work */
575 	smp_cpus_done(setup_max_cpus);
576 }
577 
578 /*
579  * Call a function on all processors.  May be used during early boot while
580  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
581  * of local_irq_disable/enable().
582  */
583 int on_each_cpu(void (*func) (void *info), void *info, int wait)
584 {
585 	unsigned long flags;
586 	int ret = 0;
587 
588 	preempt_disable();
589 	ret = smp_call_function(func, info, wait);
590 	local_irq_save(flags);
591 	func(info);
592 	local_irq_restore(flags);
593 	preempt_enable();
594 	return ret;
595 }
596 EXPORT_SYMBOL(on_each_cpu);
597 
598 /**
599  * on_each_cpu_mask(): Run a function on processors specified by
600  * cpumask, which may include the local processor.
601  * @mask: The set of cpus to run on (only runs on online subset).
602  * @func: The function to run. This must be fast and non-blocking.
603  * @info: An arbitrary pointer to pass to the function.
604  * @wait: If true, wait (atomically) until function has completed
605  *        on other CPUs.
606  *
607  * If @wait is true, then returns once @func has returned.
608  *
609  * You must not call this function with disabled interrupts or from a
610  * hardware interrupt handler or from a bottom half handler.  The
611  * exception is that it may be used during early boot while
612  * early_boot_irqs_disabled is set.
613  */
614 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
615 			void *info, bool wait)
616 {
617 	int cpu = get_cpu();
618 
619 	smp_call_function_many(mask, func, info, wait);
620 	if (cpumask_test_cpu(cpu, mask)) {
621 		unsigned long flags;
622 		local_irq_save(flags);
623 		func(info);
624 		local_irq_restore(flags);
625 	}
626 	put_cpu();
627 }
628 EXPORT_SYMBOL(on_each_cpu_mask);
629 
630 /*
631  * on_each_cpu_cond(): Call a function on each processor for which
632  * the supplied function cond_func returns true, optionally waiting
633  * for all the required CPUs to finish. This may include the local
634  * processor.
635  * @cond_func:	A callback function that is passed a cpu id and
636  *		the the info parameter. The function is called
637  *		with preemption disabled. The function should
638  *		return a blooean value indicating whether to IPI
639  *		the specified CPU.
640  * @func:	The function to run on all applicable CPUs.
641  *		This must be fast and non-blocking.
642  * @info:	An arbitrary pointer to pass to both functions.
643  * @wait:	If true, wait (atomically) until function has
644  *		completed on other CPUs.
645  * @gfp_flags:	GFP flags to use when allocating the cpumask
646  *		used internally by the function.
647  *
648  * The function might sleep if the GFP flags indicates a non
649  * atomic allocation is allowed.
650  *
651  * Preemption is disabled to protect against CPUs going offline but not online.
652  * CPUs going online during the call will not be seen or sent an IPI.
653  *
654  * You must not call this function with disabled interrupts or
655  * from a hardware interrupt handler or from a bottom half handler.
656  */
657 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
658 			smp_call_func_t func, void *info, bool wait,
659 			gfp_t gfp_flags)
660 {
661 	cpumask_var_t cpus;
662 	int cpu, ret;
663 
664 	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
665 
666 	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
667 		preempt_disable();
668 		for_each_online_cpu(cpu)
669 			if (cond_func(cpu, info))
670 				cpumask_set_cpu(cpu, cpus);
671 		on_each_cpu_mask(cpus, func, info, wait);
672 		preempt_enable();
673 		free_cpumask_var(cpus);
674 	} else {
675 		/*
676 		 * No free cpumask, bother. No matter, we'll
677 		 * just have to IPI them one by one.
678 		 */
679 		preempt_disable();
680 		for_each_online_cpu(cpu)
681 			if (cond_func(cpu, info)) {
682 				ret = smp_call_function_single(cpu, func,
683 								info, wait);
684 				WARN_ON_ONCE(ret);
685 			}
686 		preempt_enable();
687 	}
688 }
689 EXPORT_SYMBOL(on_each_cpu_cond);
690 
691 static void do_nothing(void *unused)
692 {
693 }
694 
695 /**
696  * kick_all_cpus_sync - Force all cpus out of idle
697  *
698  * Used to synchronize the update of pm_idle function pointer. It's
699  * called after the pointer is updated and returns after the dummy
700  * callback function has been executed on all cpus. The execution of
701  * the function can only happen on the remote cpus after they have
702  * left the idle function which had been called via pm_idle function
703  * pointer. So it's guaranteed that nothing uses the previous pointer
704  * anymore.
705  */
706 void kick_all_cpus_sync(void)
707 {
708 	/* Make sure the change is visible before we kick the cpus */
709 	smp_mb();
710 	smp_call_function(do_nothing, NULL, 1);
711 }
712 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
713 
714 /**
715  * wake_up_all_idle_cpus - break all cpus out of idle
716  * wake_up_all_idle_cpus try to break all cpus which is in idle state even
717  * including idle polling cpus, for non-idle cpus, we will do nothing
718  * for them.
719  */
720 void wake_up_all_idle_cpus(void)
721 {
722 	int cpu;
723 
724 	preempt_disable();
725 	for_each_online_cpu(cpu) {
726 		if (cpu == smp_processor_id())
727 			continue;
728 
729 		wake_up_if_idle(cpu);
730 	}
731 	preempt_enable();
732 }
733 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
734 
735 /**
736  * smp_call_on_cpu - Call a function on a specific cpu
737  *
738  * Used to call a function on a specific cpu and wait for it to return.
739  * Optionally make sure the call is done on a specified physical cpu via vcpu
740  * pinning in order to support virtualized environments.
741  */
742 struct smp_call_on_cpu_struct {
743 	struct work_struct	work;
744 	struct completion	done;
745 	int			(*func)(void *);
746 	void			*data;
747 	int			ret;
748 	int			cpu;
749 };
750 
751 static void smp_call_on_cpu_callback(struct work_struct *work)
752 {
753 	struct smp_call_on_cpu_struct *sscs;
754 
755 	sscs = container_of(work, struct smp_call_on_cpu_struct, work);
756 	if (sscs->cpu >= 0)
757 		hypervisor_pin_vcpu(sscs->cpu);
758 	sscs->ret = sscs->func(sscs->data);
759 	if (sscs->cpu >= 0)
760 		hypervisor_pin_vcpu(-1);
761 
762 	complete(&sscs->done);
763 }
764 
765 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
766 {
767 	struct smp_call_on_cpu_struct sscs = {
768 		.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
769 		.func = func,
770 		.data = par,
771 		.cpu  = phys ? cpu : -1,
772 	};
773 
774 	INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
775 
776 	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
777 		return -ENXIO;
778 
779 	queue_work_on(cpu, system_wq, &sscs.work);
780 	wait_for_completion(&sscs.done);
781 
782 	return sscs.ret;
783 }
784 EXPORT_SYMBOL_GPL(smp_call_on_cpu);
785