xref: /openbmc/linux/kernel/smp.c (revision 3932b9ca)
1 /*
2  * Generic helpers for smp ipi calls
3  *
4  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5  */
6 #include <linux/irq_work.h>
7 #include <linux/rcupdate.h>
8 #include <linux/rculist.h>
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/percpu.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/smp.h>
15 #include <linux/cpu.h>
16 
17 #include "smpboot.h"
18 
19 enum {
20 	CSD_FLAG_LOCK		= 0x01,
21 	CSD_FLAG_WAIT		= 0x02,
22 };
23 
24 struct call_function_data {
25 	struct call_single_data	__percpu *csd;
26 	cpumask_var_t		cpumask;
27 };
28 
29 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
30 
31 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
32 
33 static void flush_smp_call_function_queue(bool warn_cpu_offline);
34 
35 static int
36 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
37 {
38 	long cpu = (long)hcpu;
39 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
40 
41 	switch (action) {
42 	case CPU_UP_PREPARE:
43 	case CPU_UP_PREPARE_FROZEN:
44 		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
45 				cpu_to_node(cpu)))
46 			return notifier_from_errno(-ENOMEM);
47 		cfd->csd = alloc_percpu(struct call_single_data);
48 		if (!cfd->csd) {
49 			free_cpumask_var(cfd->cpumask);
50 			return notifier_from_errno(-ENOMEM);
51 		}
52 		break;
53 
54 #ifdef CONFIG_HOTPLUG_CPU
55 	case CPU_UP_CANCELED:
56 	case CPU_UP_CANCELED_FROZEN:
57 		/* Fall-through to the CPU_DEAD[_FROZEN] case. */
58 
59 	case CPU_DEAD:
60 	case CPU_DEAD_FROZEN:
61 		free_cpumask_var(cfd->cpumask);
62 		free_percpu(cfd->csd);
63 		break;
64 
65 	case CPU_DYING:
66 	case CPU_DYING_FROZEN:
67 		/*
68 		 * The IPIs for the smp-call-function callbacks queued by other
69 		 * CPUs might arrive late, either due to hardware latencies or
70 		 * because this CPU disabled interrupts (inside stop-machine)
71 		 * before the IPIs were sent. So flush out any pending callbacks
72 		 * explicitly (without waiting for the IPIs to arrive), to
73 		 * ensure that the outgoing CPU doesn't go offline with work
74 		 * still pending.
75 		 */
76 		flush_smp_call_function_queue(false);
77 		break;
78 #endif
79 	};
80 
81 	return NOTIFY_OK;
82 }
83 
84 static struct notifier_block hotplug_cfd_notifier = {
85 	.notifier_call		= hotplug_cfd,
86 };
87 
88 void __init call_function_init(void)
89 {
90 	void *cpu = (void *)(long)smp_processor_id();
91 	int i;
92 
93 	for_each_possible_cpu(i)
94 		init_llist_head(&per_cpu(call_single_queue, i));
95 
96 	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
97 	register_cpu_notifier(&hotplug_cfd_notifier);
98 }
99 
100 /*
101  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
102  *
103  * For non-synchronous ipi calls the csd can still be in use by the
104  * previous function call. For multi-cpu calls its even more interesting
105  * as we'll have to ensure no other cpu is observing our csd.
106  */
107 static void csd_lock_wait(struct call_single_data *csd)
108 {
109 	while (csd->flags & CSD_FLAG_LOCK)
110 		cpu_relax();
111 }
112 
113 static void csd_lock(struct call_single_data *csd)
114 {
115 	csd_lock_wait(csd);
116 	csd->flags |= CSD_FLAG_LOCK;
117 
118 	/*
119 	 * prevent CPU from reordering the above assignment
120 	 * to ->flags with any subsequent assignments to other
121 	 * fields of the specified call_single_data structure:
122 	 */
123 	smp_mb();
124 }
125 
126 static void csd_unlock(struct call_single_data *csd)
127 {
128 	WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
129 
130 	/*
131 	 * ensure we're all done before releasing data:
132 	 */
133 	smp_mb();
134 
135 	csd->flags &= ~CSD_FLAG_LOCK;
136 }
137 
138 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
139 
140 /*
141  * Insert a previously allocated call_single_data element
142  * for execution on the given CPU. data must already have
143  * ->func, ->info, and ->flags set.
144  */
145 static int generic_exec_single(int cpu, struct call_single_data *csd,
146 			       smp_call_func_t func, void *info, int wait)
147 {
148 	struct call_single_data csd_stack = { .flags = 0 };
149 	unsigned long flags;
150 
151 
152 	if (cpu == smp_processor_id()) {
153 		local_irq_save(flags);
154 		func(info);
155 		local_irq_restore(flags);
156 		return 0;
157 	}
158 
159 
160 	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
161 		return -ENXIO;
162 
163 
164 	if (!csd) {
165 		csd = &csd_stack;
166 		if (!wait)
167 			csd = &__get_cpu_var(csd_data);
168 	}
169 
170 	csd_lock(csd);
171 
172 	csd->func = func;
173 	csd->info = info;
174 
175 	if (wait)
176 		csd->flags |= CSD_FLAG_WAIT;
177 
178 	/*
179 	 * The list addition should be visible before sending the IPI
180 	 * handler locks the list to pull the entry off it because of
181 	 * normal cache coherency rules implied by spinlocks.
182 	 *
183 	 * If IPIs can go out of order to the cache coherency protocol
184 	 * in an architecture, sufficient synchronisation should be added
185 	 * to arch code to make it appear to obey cache coherency WRT
186 	 * locking and barrier primitives. Generic code isn't really
187 	 * equipped to do the right thing...
188 	 */
189 	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
190 		arch_send_call_function_single_ipi(cpu);
191 
192 	if (wait)
193 		csd_lock_wait(csd);
194 
195 	return 0;
196 }
197 
198 /**
199  * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
200  *
201  * Invoked by arch to handle an IPI for call function single.
202  * Must be called with interrupts disabled.
203  */
204 void generic_smp_call_function_single_interrupt(void)
205 {
206 	flush_smp_call_function_queue(true);
207 }
208 
209 /**
210  * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
211  *
212  * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
213  *		      offline CPU. Skip this check if set to 'false'.
214  *
215  * Flush any pending smp-call-function callbacks queued on this CPU. This is
216  * invoked by the generic IPI handler, as well as by a CPU about to go offline,
217  * to ensure that all pending IPI callbacks are run before it goes completely
218  * offline.
219  *
220  * Loop through the call_single_queue and run all the queued callbacks.
221  * Must be called with interrupts disabled.
222  */
223 static void flush_smp_call_function_queue(bool warn_cpu_offline)
224 {
225 	struct llist_head *head;
226 	struct llist_node *entry;
227 	struct call_single_data *csd, *csd_next;
228 	static bool warned;
229 
230 	WARN_ON(!irqs_disabled());
231 
232 	head = &__get_cpu_var(call_single_queue);
233 	entry = llist_del_all(head);
234 	entry = llist_reverse_order(entry);
235 
236 	/* There shouldn't be any pending callbacks on an offline CPU. */
237 	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
238 		     !warned && !llist_empty(head))) {
239 		warned = true;
240 		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
241 
242 		/*
243 		 * We don't have to use the _safe() variant here
244 		 * because we are not invoking the IPI handlers yet.
245 		 */
246 		llist_for_each_entry(csd, entry, llist)
247 			pr_warn("IPI callback %pS sent to offline CPU\n",
248 				csd->func);
249 	}
250 
251 	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
252 		csd->func(csd->info);
253 		csd_unlock(csd);
254 	}
255 
256 	/*
257 	 * Handle irq works queued remotely by irq_work_queue_on().
258 	 * Smp functions above are typically synchronous so they
259 	 * better run first since some other CPUs may be busy waiting
260 	 * for them.
261 	 */
262 	irq_work_run();
263 }
264 
265 /*
266  * smp_call_function_single - Run a function on a specific CPU
267  * @func: The function to run. This must be fast and non-blocking.
268  * @info: An arbitrary pointer to pass to the function.
269  * @wait: If true, wait until function has completed on other CPUs.
270  *
271  * Returns 0 on success, else a negative status code.
272  */
273 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
274 			     int wait)
275 {
276 	int this_cpu;
277 	int err;
278 
279 	/*
280 	 * prevent preemption and reschedule on another processor,
281 	 * as well as CPU removal
282 	 */
283 	this_cpu = get_cpu();
284 
285 	/*
286 	 * Can deadlock when called with interrupts disabled.
287 	 * We allow cpu's that are not yet online though, as no one else can
288 	 * send smp call function interrupt to this cpu and as such deadlocks
289 	 * can't happen.
290 	 */
291 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
292 		     && !oops_in_progress);
293 
294 	err = generic_exec_single(cpu, NULL, func, info, wait);
295 
296 	put_cpu();
297 
298 	return err;
299 }
300 EXPORT_SYMBOL(smp_call_function_single);
301 
302 /**
303  * smp_call_function_single_async(): Run an asynchronous function on a
304  * 			         specific CPU.
305  * @cpu: The CPU to run on.
306  * @csd: Pre-allocated and setup data structure
307  *
308  * Like smp_call_function_single(), but the call is asynchonous and
309  * can thus be done from contexts with disabled interrupts.
310  *
311  * The caller passes his own pre-allocated data structure
312  * (ie: embedded in an object) and is responsible for synchronizing it
313  * such that the IPIs performed on the @csd are strictly serialized.
314  *
315  * NOTE: Be careful, there is unfortunately no current debugging facility to
316  * validate the correctness of this serialization.
317  */
318 int smp_call_function_single_async(int cpu, struct call_single_data *csd)
319 {
320 	int err = 0;
321 
322 	preempt_disable();
323 	err = generic_exec_single(cpu, csd, csd->func, csd->info, 0);
324 	preempt_enable();
325 
326 	return err;
327 }
328 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
329 
330 /*
331  * smp_call_function_any - Run a function on any of the given cpus
332  * @mask: The mask of cpus it can run on.
333  * @func: The function to run. This must be fast and non-blocking.
334  * @info: An arbitrary pointer to pass to the function.
335  * @wait: If true, wait until function has completed.
336  *
337  * Returns 0 on success, else a negative status code (if no cpus were online).
338  *
339  * Selection preference:
340  *	1) current cpu if in @mask
341  *	2) any cpu of current node if in @mask
342  *	3) any other online cpu in @mask
343  */
344 int smp_call_function_any(const struct cpumask *mask,
345 			  smp_call_func_t func, void *info, int wait)
346 {
347 	unsigned int cpu;
348 	const struct cpumask *nodemask;
349 	int ret;
350 
351 	/* Try for same CPU (cheapest) */
352 	cpu = get_cpu();
353 	if (cpumask_test_cpu(cpu, mask))
354 		goto call;
355 
356 	/* Try for same node. */
357 	nodemask = cpumask_of_node(cpu_to_node(cpu));
358 	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
359 	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
360 		if (cpu_online(cpu))
361 			goto call;
362 	}
363 
364 	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
365 	cpu = cpumask_any_and(mask, cpu_online_mask);
366 call:
367 	ret = smp_call_function_single(cpu, func, info, wait);
368 	put_cpu();
369 	return ret;
370 }
371 EXPORT_SYMBOL_GPL(smp_call_function_any);
372 
373 /**
374  * smp_call_function_many(): Run a function on a set of other CPUs.
375  * @mask: The set of cpus to run on (only runs on online subset).
376  * @func: The function to run. This must be fast and non-blocking.
377  * @info: An arbitrary pointer to pass to the function.
378  * @wait: If true, wait (atomically) until function has completed
379  *        on other CPUs.
380  *
381  * If @wait is true, then returns once @func has returned.
382  *
383  * You must not call this function with disabled interrupts or from a
384  * hardware interrupt handler or from a bottom half handler. Preemption
385  * must be disabled when calling this function.
386  */
387 void smp_call_function_many(const struct cpumask *mask,
388 			    smp_call_func_t func, void *info, bool wait)
389 {
390 	struct call_function_data *cfd;
391 	int cpu, next_cpu, this_cpu = smp_processor_id();
392 
393 	/*
394 	 * Can deadlock when called with interrupts disabled.
395 	 * We allow cpu's that are not yet online though, as no one else can
396 	 * send smp call function interrupt to this cpu and as such deadlocks
397 	 * can't happen.
398 	 */
399 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
400 		     && !oops_in_progress && !early_boot_irqs_disabled);
401 
402 	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
403 	cpu = cpumask_first_and(mask, cpu_online_mask);
404 	if (cpu == this_cpu)
405 		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
406 
407 	/* No online cpus?  We're done. */
408 	if (cpu >= nr_cpu_ids)
409 		return;
410 
411 	/* Do we have another CPU which isn't us? */
412 	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
413 	if (next_cpu == this_cpu)
414 		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
415 
416 	/* Fastpath: do that cpu by itself. */
417 	if (next_cpu >= nr_cpu_ids) {
418 		smp_call_function_single(cpu, func, info, wait);
419 		return;
420 	}
421 
422 	cfd = &__get_cpu_var(cfd_data);
423 
424 	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
425 	cpumask_clear_cpu(this_cpu, cfd->cpumask);
426 
427 	/* Some callers race with other cpus changing the passed mask */
428 	if (unlikely(!cpumask_weight(cfd->cpumask)))
429 		return;
430 
431 	for_each_cpu(cpu, cfd->cpumask) {
432 		struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
433 
434 		csd_lock(csd);
435 		csd->func = func;
436 		csd->info = info;
437 		llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
438 	}
439 
440 	/* Send a message to all CPUs in the map */
441 	arch_send_call_function_ipi_mask(cfd->cpumask);
442 
443 	if (wait) {
444 		for_each_cpu(cpu, cfd->cpumask) {
445 			struct call_single_data *csd;
446 
447 			csd = per_cpu_ptr(cfd->csd, cpu);
448 			csd_lock_wait(csd);
449 		}
450 	}
451 }
452 EXPORT_SYMBOL(smp_call_function_many);
453 
454 /**
455  * smp_call_function(): Run a function on all other CPUs.
456  * @func: The function to run. This must be fast and non-blocking.
457  * @info: An arbitrary pointer to pass to the function.
458  * @wait: If true, wait (atomically) until function has completed
459  *        on other CPUs.
460  *
461  * Returns 0.
462  *
463  * If @wait is true, then returns once @func has returned; otherwise
464  * it returns just before the target cpu calls @func.
465  *
466  * You must not call this function with disabled interrupts or from a
467  * hardware interrupt handler or from a bottom half handler.
468  */
469 int smp_call_function(smp_call_func_t func, void *info, int wait)
470 {
471 	preempt_disable();
472 	smp_call_function_many(cpu_online_mask, func, info, wait);
473 	preempt_enable();
474 
475 	return 0;
476 }
477 EXPORT_SYMBOL(smp_call_function);
478 
479 /* Setup configured maximum number of CPUs to activate */
480 unsigned int setup_max_cpus = NR_CPUS;
481 EXPORT_SYMBOL(setup_max_cpus);
482 
483 
484 /*
485  * Setup routine for controlling SMP activation
486  *
487  * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
488  * activation entirely (the MPS table probe still happens, though).
489  *
490  * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
491  * greater than 0, limits the maximum number of CPUs activated in
492  * SMP mode to <NUM>.
493  */
494 
495 void __weak arch_disable_smp_support(void) { }
496 
497 static int __init nosmp(char *str)
498 {
499 	setup_max_cpus = 0;
500 	arch_disable_smp_support();
501 
502 	return 0;
503 }
504 
505 early_param("nosmp", nosmp);
506 
507 /* this is hard limit */
508 static int __init nrcpus(char *str)
509 {
510 	int nr_cpus;
511 
512 	get_option(&str, &nr_cpus);
513 	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
514 		nr_cpu_ids = nr_cpus;
515 
516 	return 0;
517 }
518 
519 early_param("nr_cpus", nrcpus);
520 
521 static int __init maxcpus(char *str)
522 {
523 	get_option(&str, &setup_max_cpus);
524 	if (setup_max_cpus == 0)
525 		arch_disable_smp_support();
526 
527 	return 0;
528 }
529 
530 early_param("maxcpus", maxcpus);
531 
532 /* Setup number of possible processor ids */
533 int nr_cpu_ids __read_mostly = NR_CPUS;
534 EXPORT_SYMBOL(nr_cpu_ids);
535 
536 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
537 void __init setup_nr_cpu_ids(void)
538 {
539 	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
540 }
541 
542 void __weak smp_announce(void)
543 {
544 	printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
545 }
546 
547 /* Called by boot processor to activate the rest. */
548 void __init smp_init(void)
549 {
550 	unsigned int cpu;
551 
552 	idle_threads_init();
553 
554 	/* FIXME: This should be done in userspace --RR */
555 	for_each_present_cpu(cpu) {
556 		if (num_online_cpus() >= setup_max_cpus)
557 			break;
558 		if (!cpu_online(cpu))
559 			cpu_up(cpu);
560 	}
561 
562 	/* Any cleanup work */
563 	smp_announce();
564 	smp_cpus_done(setup_max_cpus);
565 }
566 
567 /*
568  * Call a function on all processors.  May be used during early boot while
569  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
570  * of local_irq_disable/enable().
571  */
572 int on_each_cpu(void (*func) (void *info), void *info, int wait)
573 {
574 	unsigned long flags;
575 	int ret = 0;
576 
577 	preempt_disable();
578 	ret = smp_call_function(func, info, wait);
579 	local_irq_save(flags);
580 	func(info);
581 	local_irq_restore(flags);
582 	preempt_enable();
583 	return ret;
584 }
585 EXPORT_SYMBOL(on_each_cpu);
586 
587 /**
588  * on_each_cpu_mask(): Run a function on processors specified by
589  * cpumask, which may include the local processor.
590  * @mask: The set of cpus to run on (only runs on online subset).
591  * @func: The function to run. This must be fast and non-blocking.
592  * @info: An arbitrary pointer to pass to the function.
593  * @wait: If true, wait (atomically) until function has completed
594  *        on other CPUs.
595  *
596  * If @wait is true, then returns once @func has returned.
597  *
598  * You must not call this function with disabled interrupts or from a
599  * hardware interrupt handler or from a bottom half handler.  The
600  * exception is that it may be used during early boot while
601  * early_boot_irqs_disabled is set.
602  */
603 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
604 			void *info, bool wait)
605 {
606 	int cpu = get_cpu();
607 
608 	smp_call_function_many(mask, func, info, wait);
609 	if (cpumask_test_cpu(cpu, mask)) {
610 		unsigned long flags;
611 		local_irq_save(flags);
612 		func(info);
613 		local_irq_restore(flags);
614 	}
615 	put_cpu();
616 }
617 EXPORT_SYMBOL(on_each_cpu_mask);
618 
619 /*
620  * on_each_cpu_cond(): Call a function on each processor for which
621  * the supplied function cond_func returns true, optionally waiting
622  * for all the required CPUs to finish. This may include the local
623  * processor.
624  * @cond_func:	A callback function that is passed a cpu id and
625  *		the the info parameter. The function is called
626  *		with preemption disabled. The function should
627  *		return a blooean value indicating whether to IPI
628  *		the specified CPU.
629  * @func:	The function to run on all applicable CPUs.
630  *		This must be fast and non-blocking.
631  * @info:	An arbitrary pointer to pass to both functions.
632  * @wait:	If true, wait (atomically) until function has
633  *		completed on other CPUs.
634  * @gfp_flags:	GFP flags to use when allocating the cpumask
635  *		used internally by the function.
636  *
637  * The function might sleep if the GFP flags indicates a non
638  * atomic allocation is allowed.
639  *
640  * Preemption is disabled to protect against CPUs going offline but not online.
641  * CPUs going online during the call will not be seen or sent an IPI.
642  *
643  * You must not call this function with disabled interrupts or
644  * from a hardware interrupt handler or from a bottom half handler.
645  */
646 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
647 			smp_call_func_t func, void *info, bool wait,
648 			gfp_t gfp_flags)
649 {
650 	cpumask_var_t cpus;
651 	int cpu, ret;
652 
653 	might_sleep_if(gfp_flags & __GFP_WAIT);
654 
655 	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
656 		preempt_disable();
657 		for_each_online_cpu(cpu)
658 			if (cond_func(cpu, info))
659 				cpumask_set_cpu(cpu, cpus);
660 		on_each_cpu_mask(cpus, func, info, wait);
661 		preempt_enable();
662 		free_cpumask_var(cpus);
663 	} else {
664 		/*
665 		 * No free cpumask, bother. No matter, we'll
666 		 * just have to IPI them one by one.
667 		 */
668 		preempt_disable();
669 		for_each_online_cpu(cpu)
670 			if (cond_func(cpu, info)) {
671 				ret = smp_call_function_single(cpu, func,
672 								info, wait);
673 				WARN_ON_ONCE(ret);
674 			}
675 		preempt_enable();
676 	}
677 }
678 EXPORT_SYMBOL(on_each_cpu_cond);
679 
680 static void do_nothing(void *unused)
681 {
682 }
683 
684 /**
685  * kick_all_cpus_sync - Force all cpus out of idle
686  *
687  * Used to synchronize the update of pm_idle function pointer. It's
688  * called after the pointer is updated and returns after the dummy
689  * callback function has been executed on all cpus. The execution of
690  * the function can only happen on the remote cpus after they have
691  * left the idle function which had been called via pm_idle function
692  * pointer. So it's guaranteed that nothing uses the previous pointer
693  * anymore.
694  */
695 void kick_all_cpus_sync(void)
696 {
697 	/* Make sure the change is visible before we kick the cpus */
698 	smp_mb();
699 	smp_call_function(do_nothing, NULL, 1);
700 }
701 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
702