xref: /openbmc/linux/kernel/smp.c (revision 7b7dfdd2)
1 /*
2  * Generic helpers for smp ipi calls
3  *
4  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5  */
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/gfp.h>
13 #include <linux/smp.h>
14 #include <linux/cpu.h>
15 
16 #include "smpboot.h"
17 
18 enum {
19 	CSD_FLAG_LOCK		= 0x01,
20 	CSD_FLAG_WAIT		= 0x02,
21 };
22 
23 struct call_function_data {
24 	struct call_single_data	__percpu *csd;
25 	cpumask_var_t		cpumask;
26 };
27 
28 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
29 
30 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
31 
32 static int
33 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
34 {
35 	long cpu = (long)hcpu;
36 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
37 
38 	switch (action) {
39 	case CPU_UP_PREPARE:
40 	case CPU_UP_PREPARE_FROZEN:
41 		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
42 				cpu_to_node(cpu)))
43 			return notifier_from_errno(-ENOMEM);
44 		cfd->csd = alloc_percpu(struct call_single_data);
45 		if (!cfd->csd) {
46 			free_cpumask_var(cfd->cpumask);
47 			return notifier_from_errno(-ENOMEM);
48 		}
49 		break;
50 
51 #ifdef CONFIG_HOTPLUG_CPU
52 	case CPU_UP_CANCELED:
53 	case CPU_UP_CANCELED_FROZEN:
54 
55 	case CPU_DEAD:
56 	case CPU_DEAD_FROZEN:
57 		free_cpumask_var(cfd->cpumask);
58 		free_percpu(cfd->csd);
59 		break;
60 #endif
61 	};
62 
63 	return NOTIFY_OK;
64 }
65 
66 static struct notifier_block hotplug_cfd_notifier = {
67 	.notifier_call		= hotplug_cfd,
68 };
69 
70 void __init call_function_init(void)
71 {
72 	void *cpu = (void *)(long)smp_processor_id();
73 	int i;
74 
75 	for_each_possible_cpu(i)
76 		init_llist_head(&per_cpu(call_single_queue, i));
77 
78 	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
79 	register_cpu_notifier(&hotplug_cfd_notifier);
80 }
81 
82 /*
83  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
84  *
85  * For non-synchronous ipi calls the csd can still be in use by the
86  * previous function call. For multi-cpu calls its even more interesting
87  * as we'll have to ensure no other cpu is observing our csd.
88  */
89 static void csd_lock_wait(struct call_single_data *csd)
90 {
91 	while (csd->flags & CSD_FLAG_LOCK)
92 		cpu_relax();
93 }
94 
95 static void csd_lock(struct call_single_data *csd)
96 {
97 	csd_lock_wait(csd);
98 	csd->flags |= CSD_FLAG_LOCK;
99 
100 	/*
101 	 * prevent CPU from reordering the above assignment
102 	 * to ->flags with any subsequent assignments to other
103 	 * fields of the specified call_single_data structure:
104 	 */
105 	smp_mb();
106 }
107 
108 static void csd_unlock(struct call_single_data *csd)
109 {
110 	WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
111 
112 	/*
113 	 * ensure we're all done before releasing data:
114 	 */
115 	smp_mb();
116 
117 	csd->flags &= ~CSD_FLAG_LOCK;
118 }
119 
120 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
121 
122 /*
123  * Insert a previously allocated call_single_data element
124  * for execution on the given CPU. data must already have
125  * ->func, ->info, and ->flags set.
126  */
127 static int generic_exec_single(int cpu, struct call_single_data *csd,
128 			       smp_call_func_t func, void *info, int wait)
129 {
130 	struct call_single_data csd_stack = { .flags = 0 };
131 	unsigned long flags;
132 
133 
134 	if (cpu == smp_processor_id()) {
135 		local_irq_save(flags);
136 		func(info);
137 		local_irq_restore(flags);
138 		return 0;
139 	}
140 
141 
142 	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
143 		return -ENXIO;
144 
145 
146 	if (!csd) {
147 		csd = &csd_stack;
148 		if (!wait)
149 			csd = &__get_cpu_var(csd_data);
150 	}
151 
152 	csd_lock(csd);
153 
154 	csd->func = func;
155 	csd->info = info;
156 
157 	if (wait)
158 		csd->flags |= CSD_FLAG_WAIT;
159 
160 	/*
161 	 * The list addition should be visible before sending the IPI
162 	 * handler locks the list to pull the entry off it because of
163 	 * normal cache coherency rules implied by spinlocks.
164 	 *
165 	 * If IPIs can go out of order to the cache coherency protocol
166 	 * in an architecture, sufficient synchronisation should be added
167 	 * to arch code to make it appear to obey cache coherency WRT
168 	 * locking and barrier primitives. Generic code isn't really
169 	 * equipped to do the right thing...
170 	 */
171 	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
172 		arch_send_call_function_single_ipi(cpu);
173 
174 	if (wait)
175 		csd_lock_wait(csd);
176 
177 	return 0;
178 }
179 
180 /*
181  * Invoked by arch to handle an IPI for call function single. Must be
182  * called from the arch with interrupts disabled.
183  */
184 void generic_smp_call_function_single_interrupt(void)
185 {
186 	struct llist_node *entry;
187 	struct call_single_data *csd, *csd_next;
188 	static bool warned;
189 
190 	entry = llist_del_all(&__get_cpu_var(call_single_queue));
191 	entry = llist_reverse_order(entry);
192 
193 	/*
194 	 * Shouldn't receive this interrupt on a cpu that is not yet online.
195 	 */
196 	if (unlikely(!cpu_online(smp_processor_id()) && !warned)) {
197 		warned = true;
198 		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
199 
200 		/*
201 		 * We don't have to use the _safe() variant here
202 		 * because we are not invoking the IPI handlers yet.
203 		 */
204 		llist_for_each_entry(csd, entry, llist)
205 			pr_warn("IPI callback %pS sent to offline CPU\n",
206 				csd->func);
207 	}
208 
209 	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
210 		csd->func(csd->info);
211 		csd_unlock(csd);
212 	}
213 }
214 
215 /*
216  * smp_call_function_single - Run a function on a specific CPU
217  * @func: The function to run. This must be fast and non-blocking.
218  * @info: An arbitrary pointer to pass to the function.
219  * @wait: If true, wait until function has completed on other CPUs.
220  *
221  * Returns 0 on success, else a negative status code.
222  */
223 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
224 			     int wait)
225 {
226 	int this_cpu;
227 	int err;
228 
229 	/*
230 	 * prevent preemption and reschedule on another processor,
231 	 * as well as CPU removal
232 	 */
233 	this_cpu = get_cpu();
234 
235 	/*
236 	 * Can deadlock when called with interrupts disabled.
237 	 * We allow cpu's that are not yet online though, as no one else can
238 	 * send smp call function interrupt to this cpu and as such deadlocks
239 	 * can't happen.
240 	 */
241 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
242 		     && !oops_in_progress);
243 
244 	err = generic_exec_single(cpu, NULL, func, info, wait);
245 
246 	put_cpu();
247 
248 	return err;
249 }
250 EXPORT_SYMBOL(smp_call_function_single);
251 
252 /**
253  * smp_call_function_single_async(): Run an asynchronous function on a
254  * 			         specific CPU.
255  * @cpu: The CPU to run on.
256  * @csd: Pre-allocated and setup data structure
257  *
258  * Like smp_call_function_single(), but the call is asynchonous and
259  * can thus be done from contexts with disabled interrupts.
260  *
261  * The caller passes his own pre-allocated data structure
262  * (ie: embedded in an object) and is responsible for synchronizing it
263  * such that the IPIs performed on the @csd are strictly serialized.
264  *
265  * NOTE: Be careful, there is unfortunately no current debugging facility to
266  * validate the correctness of this serialization.
267  */
268 int smp_call_function_single_async(int cpu, struct call_single_data *csd)
269 {
270 	int err = 0;
271 
272 	preempt_disable();
273 	err = generic_exec_single(cpu, csd, csd->func, csd->info, 0);
274 	preempt_enable();
275 
276 	return err;
277 }
278 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
279 
280 /*
281  * smp_call_function_any - Run a function on any of the given cpus
282  * @mask: The mask of cpus it can run on.
283  * @func: The function to run. This must be fast and non-blocking.
284  * @info: An arbitrary pointer to pass to the function.
285  * @wait: If true, wait until function has completed.
286  *
287  * Returns 0 on success, else a negative status code (if no cpus were online).
288  *
289  * Selection preference:
290  *	1) current cpu if in @mask
291  *	2) any cpu of current node if in @mask
292  *	3) any other online cpu in @mask
293  */
294 int smp_call_function_any(const struct cpumask *mask,
295 			  smp_call_func_t func, void *info, int wait)
296 {
297 	unsigned int cpu;
298 	const struct cpumask *nodemask;
299 	int ret;
300 
301 	/* Try for same CPU (cheapest) */
302 	cpu = get_cpu();
303 	if (cpumask_test_cpu(cpu, mask))
304 		goto call;
305 
306 	/* Try for same node. */
307 	nodemask = cpumask_of_node(cpu_to_node(cpu));
308 	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
309 	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
310 		if (cpu_online(cpu))
311 			goto call;
312 	}
313 
314 	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
315 	cpu = cpumask_any_and(mask, cpu_online_mask);
316 call:
317 	ret = smp_call_function_single(cpu, func, info, wait);
318 	put_cpu();
319 	return ret;
320 }
321 EXPORT_SYMBOL_GPL(smp_call_function_any);
322 
323 /**
324  * smp_call_function_many(): Run a function on a set of other CPUs.
325  * @mask: The set of cpus to run on (only runs on online subset).
326  * @func: The function to run. This must be fast and non-blocking.
327  * @info: An arbitrary pointer to pass to the function.
328  * @wait: If true, wait (atomically) until function has completed
329  *        on other CPUs.
330  *
331  * If @wait is true, then returns once @func has returned.
332  *
333  * You must not call this function with disabled interrupts or from a
334  * hardware interrupt handler or from a bottom half handler. Preemption
335  * must be disabled when calling this function.
336  */
337 void smp_call_function_many(const struct cpumask *mask,
338 			    smp_call_func_t func, void *info, bool wait)
339 {
340 	struct call_function_data *cfd;
341 	int cpu, next_cpu, this_cpu = smp_processor_id();
342 
343 	/*
344 	 * Can deadlock when called with interrupts disabled.
345 	 * We allow cpu's that are not yet online though, as no one else can
346 	 * send smp call function interrupt to this cpu and as such deadlocks
347 	 * can't happen.
348 	 */
349 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
350 		     && !oops_in_progress && !early_boot_irqs_disabled);
351 
352 	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
353 	cpu = cpumask_first_and(mask, cpu_online_mask);
354 	if (cpu == this_cpu)
355 		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
356 
357 	/* No online cpus?  We're done. */
358 	if (cpu >= nr_cpu_ids)
359 		return;
360 
361 	/* Do we have another CPU which isn't us? */
362 	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
363 	if (next_cpu == this_cpu)
364 		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
365 
366 	/* Fastpath: do that cpu by itself. */
367 	if (next_cpu >= nr_cpu_ids) {
368 		smp_call_function_single(cpu, func, info, wait);
369 		return;
370 	}
371 
372 	cfd = &__get_cpu_var(cfd_data);
373 
374 	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
375 	cpumask_clear_cpu(this_cpu, cfd->cpumask);
376 
377 	/* Some callers race with other cpus changing the passed mask */
378 	if (unlikely(!cpumask_weight(cfd->cpumask)))
379 		return;
380 
381 	for_each_cpu(cpu, cfd->cpumask) {
382 		struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
383 
384 		csd_lock(csd);
385 		csd->func = func;
386 		csd->info = info;
387 		llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
388 	}
389 
390 	/* Send a message to all CPUs in the map */
391 	arch_send_call_function_ipi_mask(cfd->cpumask);
392 
393 	if (wait) {
394 		for_each_cpu(cpu, cfd->cpumask) {
395 			struct call_single_data *csd;
396 
397 			csd = per_cpu_ptr(cfd->csd, cpu);
398 			csd_lock_wait(csd);
399 		}
400 	}
401 }
402 EXPORT_SYMBOL(smp_call_function_many);
403 
404 /**
405  * smp_call_function(): Run a function on all other CPUs.
406  * @func: The function to run. This must be fast and non-blocking.
407  * @info: An arbitrary pointer to pass to the function.
408  * @wait: If true, wait (atomically) until function has completed
409  *        on other CPUs.
410  *
411  * Returns 0.
412  *
413  * If @wait is true, then returns once @func has returned; otherwise
414  * it returns just before the target cpu calls @func.
415  *
416  * You must not call this function with disabled interrupts or from a
417  * hardware interrupt handler or from a bottom half handler.
418  */
419 int smp_call_function(smp_call_func_t func, void *info, int wait)
420 {
421 	preempt_disable();
422 	smp_call_function_many(cpu_online_mask, func, info, wait);
423 	preempt_enable();
424 
425 	return 0;
426 }
427 EXPORT_SYMBOL(smp_call_function);
428 
429 /* Setup configured maximum number of CPUs to activate */
430 unsigned int setup_max_cpus = NR_CPUS;
431 EXPORT_SYMBOL(setup_max_cpus);
432 
433 
434 /*
435  * Setup routine for controlling SMP activation
436  *
437  * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
438  * activation entirely (the MPS table probe still happens, though).
439  *
440  * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
441  * greater than 0, limits the maximum number of CPUs activated in
442  * SMP mode to <NUM>.
443  */
444 
445 void __weak arch_disable_smp_support(void) { }
446 
447 static int __init nosmp(char *str)
448 {
449 	setup_max_cpus = 0;
450 	arch_disable_smp_support();
451 
452 	return 0;
453 }
454 
455 early_param("nosmp", nosmp);
456 
457 /* this is hard limit */
458 static int __init nrcpus(char *str)
459 {
460 	int nr_cpus;
461 
462 	get_option(&str, &nr_cpus);
463 	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
464 		nr_cpu_ids = nr_cpus;
465 
466 	return 0;
467 }
468 
469 early_param("nr_cpus", nrcpus);
470 
471 static int __init maxcpus(char *str)
472 {
473 	get_option(&str, &setup_max_cpus);
474 	if (setup_max_cpus == 0)
475 		arch_disable_smp_support();
476 
477 	return 0;
478 }
479 
480 early_param("maxcpus", maxcpus);
481 
482 /* Setup number of possible processor ids */
483 int nr_cpu_ids __read_mostly = NR_CPUS;
484 EXPORT_SYMBOL(nr_cpu_ids);
485 
486 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
487 void __init setup_nr_cpu_ids(void)
488 {
489 	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
490 }
491 
492 void __weak smp_announce(void)
493 {
494 	printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
495 }
496 
497 /* Called by boot processor to activate the rest. */
498 void __init smp_init(void)
499 {
500 	unsigned int cpu;
501 
502 	idle_threads_init();
503 
504 	/* FIXME: This should be done in userspace --RR */
505 	for_each_present_cpu(cpu) {
506 		if (num_online_cpus() >= setup_max_cpus)
507 			break;
508 		if (!cpu_online(cpu))
509 			cpu_up(cpu);
510 	}
511 
512 	/* Any cleanup work */
513 	smp_announce();
514 	smp_cpus_done(setup_max_cpus);
515 }
516 
517 /*
518  * Call a function on all processors.  May be used during early boot while
519  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
520  * of local_irq_disable/enable().
521  */
522 int on_each_cpu(void (*func) (void *info), void *info, int wait)
523 {
524 	unsigned long flags;
525 	int ret = 0;
526 
527 	preempt_disable();
528 	ret = smp_call_function(func, info, wait);
529 	local_irq_save(flags);
530 	func(info);
531 	local_irq_restore(flags);
532 	preempt_enable();
533 	return ret;
534 }
535 EXPORT_SYMBOL(on_each_cpu);
536 
537 /**
538  * on_each_cpu_mask(): Run a function on processors specified by
539  * cpumask, which may include the local processor.
540  * @mask: The set of cpus to run on (only runs on online subset).
541  * @func: The function to run. This must be fast and non-blocking.
542  * @info: An arbitrary pointer to pass to the function.
543  * @wait: If true, wait (atomically) until function has completed
544  *        on other CPUs.
545  *
546  * If @wait is true, then returns once @func has returned.
547  *
548  * You must not call this function with disabled interrupts or from a
549  * hardware interrupt handler or from a bottom half handler.  The
550  * exception is that it may be used during early boot while
551  * early_boot_irqs_disabled is set.
552  */
553 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
554 			void *info, bool wait)
555 {
556 	int cpu = get_cpu();
557 
558 	smp_call_function_many(mask, func, info, wait);
559 	if (cpumask_test_cpu(cpu, mask)) {
560 		unsigned long flags;
561 		local_irq_save(flags);
562 		func(info);
563 		local_irq_restore(flags);
564 	}
565 	put_cpu();
566 }
567 EXPORT_SYMBOL(on_each_cpu_mask);
568 
569 /*
570  * on_each_cpu_cond(): Call a function on each processor for which
571  * the supplied function cond_func returns true, optionally waiting
572  * for all the required CPUs to finish. This may include the local
573  * processor.
574  * @cond_func:	A callback function that is passed a cpu id and
575  *		the the info parameter. The function is called
576  *		with preemption disabled. The function should
577  *		return a blooean value indicating whether to IPI
578  *		the specified CPU.
579  * @func:	The function to run on all applicable CPUs.
580  *		This must be fast and non-blocking.
581  * @info:	An arbitrary pointer to pass to both functions.
582  * @wait:	If true, wait (atomically) until function has
583  *		completed on other CPUs.
584  * @gfp_flags:	GFP flags to use when allocating the cpumask
585  *		used internally by the function.
586  *
587  * The function might sleep if the GFP flags indicates a non
588  * atomic allocation is allowed.
589  *
590  * Preemption is disabled to protect against CPUs going offline but not online.
591  * CPUs going online during the call will not be seen or sent an IPI.
592  *
593  * You must not call this function with disabled interrupts or
594  * from a hardware interrupt handler or from a bottom half handler.
595  */
596 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
597 			smp_call_func_t func, void *info, bool wait,
598 			gfp_t gfp_flags)
599 {
600 	cpumask_var_t cpus;
601 	int cpu, ret;
602 
603 	might_sleep_if(gfp_flags & __GFP_WAIT);
604 
605 	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
606 		preempt_disable();
607 		for_each_online_cpu(cpu)
608 			if (cond_func(cpu, info))
609 				cpumask_set_cpu(cpu, cpus);
610 		on_each_cpu_mask(cpus, func, info, wait);
611 		preempt_enable();
612 		free_cpumask_var(cpus);
613 	} else {
614 		/*
615 		 * No free cpumask, bother. No matter, we'll
616 		 * just have to IPI them one by one.
617 		 */
618 		preempt_disable();
619 		for_each_online_cpu(cpu)
620 			if (cond_func(cpu, info)) {
621 				ret = smp_call_function_single(cpu, func,
622 								info, wait);
623 				WARN_ON_ONCE(!ret);
624 			}
625 		preempt_enable();
626 	}
627 }
628 EXPORT_SYMBOL(on_each_cpu_cond);
629 
630 static void do_nothing(void *unused)
631 {
632 }
633 
634 /**
635  * kick_all_cpus_sync - Force all cpus out of idle
636  *
637  * Used to synchronize the update of pm_idle function pointer. It's
638  * called after the pointer is updated and returns after the dummy
639  * callback function has been executed on all cpus. The execution of
640  * the function can only happen on the remote cpus after they have
641  * left the idle function which had been called via pm_idle function
642  * pointer. So it's guaranteed that nothing uses the previous pointer
643  * anymore.
644  */
645 void kick_all_cpus_sync(void)
646 {
647 	/* Make sure the change is visible before we kick the cpus */
648 	smp_mb();
649 	smp_call_function(do_nothing, NULL, 1);
650 }
651 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
652