xref: /openbmc/linux/kernel/smp.c (revision 49531192)
1 /*
2  * Generic helpers for smp ipi calls
3  *
4  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5  */
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/gfp.h>
13 #include <linux/smp.h>
14 #include <linux/cpu.h>
15 
16 static struct {
17 	struct list_head	queue;
18 	raw_spinlock_t		lock;
19 } call_function __cacheline_aligned_in_smp =
20 	{
21 		.queue		= LIST_HEAD_INIT(call_function.queue),
22 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
23 	};
24 
25 enum {
26 	CSD_FLAG_LOCK		= 0x01,
27 };
28 
29 struct call_function_data {
30 	struct call_single_data	csd;
31 	atomic_t		refs;
32 	cpumask_var_t		cpumask;
33 };
34 
35 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
36 
37 struct call_single_queue {
38 	struct list_head	list;
39 	raw_spinlock_t		lock;
40 };
41 
42 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
43 
44 static int
45 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
46 {
47 	long cpu = (long)hcpu;
48 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
49 
50 	switch (action) {
51 	case CPU_UP_PREPARE:
52 	case CPU_UP_PREPARE_FROZEN:
53 		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
54 				cpu_to_node(cpu)))
55 			return notifier_from_errno(-ENOMEM);
56 		break;
57 
58 #ifdef CONFIG_HOTPLUG_CPU
59 	case CPU_UP_CANCELED:
60 	case CPU_UP_CANCELED_FROZEN:
61 
62 	case CPU_DEAD:
63 	case CPU_DEAD_FROZEN:
64 		free_cpumask_var(cfd->cpumask);
65 		break;
66 #endif
67 	};
68 
69 	return NOTIFY_OK;
70 }
71 
72 static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
73 	.notifier_call		= hotplug_cfd,
74 };
75 
76 static int __cpuinit init_call_single_data(void)
77 {
78 	void *cpu = (void *)(long)smp_processor_id();
79 	int i;
80 
81 	for_each_possible_cpu(i) {
82 		struct call_single_queue *q = &per_cpu(call_single_queue, i);
83 
84 		raw_spin_lock_init(&q->lock);
85 		INIT_LIST_HEAD(&q->list);
86 	}
87 
88 	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
89 	register_cpu_notifier(&hotplug_cfd_notifier);
90 
91 	return 0;
92 }
93 early_initcall(init_call_single_data);
94 
95 /*
96  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
97  *
98  * For non-synchronous ipi calls the csd can still be in use by the
99  * previous function call. For multi-cpu calls its even more interesting
100  * as we'll have to ensure no other cpu is observing our csd.
101  */
102 static void csd_lock_wait(struct call_single_data *data)
103 {
104 	while (data->flags & CSD_FLAG_LOCK)
105 		cpu_relax();
106 }
107 
108 static void csd_lock(struct call_single_data *data)
109 {
110 	csd_lock_wait(data);
111 	data->flags = CSD_FLAG_LOCK;
112 
113 	/*
114 	 * prevent CPU from reordering the above assignment
115 	 * to ->flags with any subsequent assignments to other
116 	 * fields of the specified call_single_data structure:
117 	 */
118 	smp_mb();
119 }
120 
121 static void csd_unlock(struct call_single_data *data)
122 {
123 	WARN_ON(!(data->flags & CSD_FLAG_LOCK));
124 
125 	/*
126 	 * ensure we're all done before releasing data:
127 	 */
128 	smp_mb();
129 
130 	data->flags &= ~CSD_FLAG_LOCK;
131 }
132 
133 /*
134  * Insert a previously allocated call_single_data element
135  * for execution on the given CPU. data must already have
136  * ->func, ->info, and ->flags set.
137  */
138 static
139 void generic_exec_single(int cpu, struct call_single_data *data, int wait)
140 {
141 	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
142 	unsigned long flags;
143 	int ipi;
144 
145 	raw_spin_lock_irqsave(&dst->lock, flags);
146 	ipi = list_empty(&dst->list);
147 	list_add_tail(&data->list, &dst->list);
148 	raw_spin_unlock_irqrestore(&dst->lock, flags);
149 
150 	/*
151 	 * The list addition should be visible before sending the IPI
152 	 * handler locks the list to pull the entry off it because of
153 	 * normal cache coherency rules implied by spinlocks.
154 	 *
155 	 * If IPIs can go out of order to the cache coherency protocol
156 	 * in an architecture, sufficient synchronisation should be added
157 	 * to arch code to make it appear to obey cache coherency WRT
158 	 * locking and barrier primitives. Generic code isn't really
159 	 * equipped to do the right thing...
160 	 */
161 	if (ipi)
162 		arch_send_call_function_single_ipi(cpu);
163 
164 	if (wait)
165 		csd_lock_wait(data);
166 }
167 
168 /*
169  * Invoked by arch to handle an IPI for call function. Must be called with
170  * interrupts disabled.
171  */
172 void generic_smp_call_function_interrupt(void)
173 {
174 	struct call_function_data *data;
175 	int cpu = smp_processor_id();
176 
177 	/*
178 	 * Shouldn't receive this interrupt on a cpu that is not yet online.
179 	 */
180 	WARN_ON_ONCE(!cpu_online(cpu));
181 
182 	/*
183 	 * Ensure entry is visible on call_function_queue after we have
184 	 * entered the IPI. See comment in smp_call_function_many.
185 	 * If we don't have this, then we may miss an entry on the list
186 	 * and never get another IPI to process it.
187 	 */
188 	smp_mb();
189 
190 	/*
191 	 * It's ok to use list_for_each_rcu() here even though we may
192 	 * delete 'pos', since list_del_rcu() doesn't clear ->next
193 	 */
194 	list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
195 		int refs;
196 
197 		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
198 			continue;
199 
200 		data->csd.func(data->csd.info);
201 
202 		refs = atomic_dec_return(&data->refs);
203 		WARN_ON(refs < 0);
204 		if (!refs) {
205 			raw_spin_lock(&call_function.lock);
206 			list_del_rcu(&data->csd.list);
207 			raw_spin_unlock(&call_function.lock);
208 		}
209 
210 		if (refs)
211 			continue;
212 
213 		csd_unlock(&data->csd);
214 	}
215 
216 }
217 
218 /*
219  * Invoked by arch to handle an IPI for call function single. Must be
220  * called from the arch with interrupts disabled.
221  */
222 void generic_smp_call_function_single_interrupt(void)
223 {
224 	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
225 	unsigned int data_flags;
226 	LIST_HEAD(list);
227 
228 	/*
229 	 * Shouldn't receive this interrupt on a cpu that is not yet online.
230 	 */
231 	WARN_ON_ONCE(!cpu_online(smp_processor_id()));
232 
233 	raw_spin_lock(&q->lock);
234 	list_replace_init(&q->list, &list);
235 	raw_spin_unlock(&q->lock);
236 
237 	while (!list_empty(&list)) {
238 		struct call_single_data *data;
239 
240 		data = list_entry(list.next, struct call_single_data, list);
241 		list_del(&data->list);
242 
243 		/*
244 		 * 'data' can be invalid after this call if flags == 0
245 		 * (when called through generic_exec_single()),
246 		 * so save them away before making the call:
247 		 */
248 		data_flags = data->flags;
249 
250 		data->func(data->info);
251 
252 		/*
253 		 * Unlocked CSDs are valid through generic_exec_single():
254 		 */
255 		if (data_flags & CSD_FLAG_LOCK)
256 			csd_unlock(data);
257 	}
258 }
259 
260 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
261 
262 /*
263  * smp_call_function_single - Run a function on a specific CPU
264  * @func: The function to run. This must be fast and non-blocking.
265  * @info: An arbitrary pointer to pass to the function.
266  * @wait: If true, wait until function has completed on other CPUs.
267  *
268  * Returns 0 on success, else a negative status code.
269  */
270 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
271 			     int wait)
272 {
273 	struct call_single_data d = {
274 		.flags = 0,
275 	};
276 	unsigned long flags;
277 	int this_cpu;
278 	int err = 0;
279 
280 	/*
281 	 * prevent preemption and reschedule on another processor,
282 	 * as well as CPU removal
283 	 */
284 	this_cpu = get_cpu();
285 
286 	/*
287 	 * Can deadlock when called with interrupts disabled.
288 	 * We allow cpu's that are not yet online though, as no one else can
289 	 * send smp call function interrupt to this cpu and as such deadlocks
290 	 * can't happen.
291 	 */
292 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
293 		     && !oops_in_progress);
294 
295 	if (cpu == this_cpu) {
296 		local_irq_save(flags);
297 		func(info);
298 		local_irq_restore(flags);
299 	} else {
300 		if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
301 			struct call_single_data *data = &d;
302 
303 			if (!wait)
304 				data = &__get_cpu_var(csd_data);
305 
306 			csd_lock(data);
307 
308 			data->func = func;
309 			data->info = info;
310 			generic_exec_single(cpu, data, wait);
311 		} else {
312 			err = -ENXIO;	/* CPU not online */
313 		}
314 	}
315 
316 	put_cpu();
317 
318 	return err;
319 }
320 EXPORT_SYMBOL(smp_call_function_single);
321 
322 /*
323  * smp_call_function_any - Run a function on any of the given cpus
324  * @mask: The mask of cpus it can run on.
325  * @func: The function to run. This must be fast and non-blocking.
326  * @info: An arbitrary pointer to pass to the function.
327  * @wait: If true, wait until function has completed.
328  *
329  * Returns 0 on success, else a negative status code (if no cpus were online).
330  * Note that @wait will be implicitly turned on in case of allocation failures,
331  * since we fall back to on-stack allocation.
332  *
333  * Selection preference:
334  *	1) current cpu if in @mask
335  *	2) any cpu of current node if in @mask
336  *	3) any other online cpu in @mask
337  */
338 int smp_call_function_any(const struct cpumask *mask,
339 			  void (*func)(void *info), void *info, int wait)
340 {
341 	unsigned int cpu;
342 	const struct cpumask *nodemask;
343 	int ret;
344 
345 	/* Try for same CPU (cheapest) */
346 	cpu = get_cpu();
347 	if (cpumask_test_cpu(cpu, mask))
348 		goto call;
349 
350 	/* Try for same node. */
351 	nodemask = cpumask_of_node(cpu_to_node(cpu));
352 	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
353 	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
354 		if (cpu_online(cpu))
355 			goto call;
356 	}
357 
358 	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
359 	cpu = cpumask_any_and(mask, cpu_online_mask);
360 call:
361 	ret = smp_call_function_single(cpu, func, info, wait);
362 	put_cpu();
363 	return ret;
364 }
365 EXPORT_SYMBOL_GPL(smp_call_function_any);
366 
367 /**
368  * __smp_call_function_single(): Run a function on another CPU
369  * @cpu: The CPU to run on.
370  * @data: Pre-allocated and setup data structure
371  *
372  * Like smp_call_function_single(), but allow caller to pass in a
373  * pre-allocated data structure. Useful for embedding @data inside
374  * other structures, for instance.
375  */
376 void __smp_call_function_single(int cpu, struct call_single_data *data,
377 				int wait)
378 {
379 	csd_lock(data);
380 
381 	/*
382 	 * Can deadlock when called with interrupts disabled.
383 	 * We allow cpu's that are not yet online though, as no one else can
384 	 * send smp call function interrupt to this cpu and as such deadlocks
385 	 * can't happen.
386 	 */
387 	WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
388 		     && !oops_in_progress);
389 
390 	generic_exec_single(cpu, data, wait);
391 }
392 
393 /**
394  * smp_call_function_many(): Run a function on a set of other CPUs.
395  * @mask: The set of cpus to run on (only runs on online subset).
396  * @func: The function to run. This must be fast and non-blocking.
397  * @info: An arbitrary pointer to pass to the function.
398  * @wait: If true, wait (atomically) until function has completed
399  *        on other CPUs.
400  *
401  * If @wait is true, then returns once @func has returned.
402  *
403  * You must not call this function with disabled interrupts or from a
404  * hardware interrupt handler or from a bottom half handler. Preemption
405  * must be disabled when calling this function.
406  */
407 void smp_call_function_many(const struct cpumask *mask,
408 			    void (*func)(void *), void *info, bool wait)
409 {
410 	struct call_function_data *data;
411 	unsigned long flags;
412 	int cpu, next_cpu, this_cpu = smp_processor_id();
413 
414 	/*
415 	 * Can deadlock when called with interrupts disabled.
416 	 * We allow cpu's that are not yet online though, as no one else can
417 	 * send smp call function interrupt to this cpu and as such deadlocks
418 	 * can't happen.
419 	 */
420 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
421 		     && !oops_in_progress);
422 
423 	/* So, what's a CPU they want? Ignoring this one. */
424 	cpu = cpumask_first_and(mask, cpu_online_mask);
425 	if (cpu == this_cpu)
426 		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
427 
428 	/* No online cpus?  We're done. */
429 	if (cpu >= nr_cpu_ids)
430 		return;
431 
432 	/* Do we have another CPU which isn't us? */
433 	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
434 	if (next_cpu == this_cpu)
435 		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
436 
437 	/* Fastpath: do that cpu by itself. */
438 	if (next_cpu >= nr_cpu_ids) {
439 		smp_call_function_single(cpu, func, info, wait);
440 		return;
441 	}
442 
443 	data = &__get_cpu_var(cfd_data);
444 	csd_lock(&data->csd);
445 
446 	data->csd.func = func;
447 	data->csd.info = info;
448 	cpumask_and(data->cpumask, mask, cpu_online_mask);
449 	cpumask_clear_cpu(this_cpu, data->cpumask);
450 	atomic_set(&data->refs, cpumask_weight(data->cpumask));
451 
452 	raw_spin_lock_irqsave(&call_function.lock, flags);
453 	/*
454 	 * Place entry at the _HEAD_ of the list, so that any cpu still
455 	 * observing the entry in generic_smp_call_function_interrupt()
456 	 * will not miss any other list entries:
457 	 */
458 	list_add_rcu(&data->csd.list, &call_function.queue);
459 	raw_spin_unlock_irqrestore(&call_function.lock, flags);
460 
461 	/*
462 	 * Make the list addition visible before sending the ipi.
463 	 * (IPIs must obey or appear to obey normal Linux cache
464 	 * coherency rules -- see comment in generic_exec_single).
465 	 */
466 	smp_mb();
467 
468 	/* Send a message to all CPUs in the map */
469 	arch_send_call_function_ipi_mask(data->cpumask);
470 
471 	/* Optionally wait for the CPUs to complete */
472 	if (wait)
473 		csd_lock_wait(&data->csd);
474 }
475 EXPORT_SYMBOL(smp_call_function_many);
476 
477 /**
478  * smp_call_function(): Run a function on all other CPUs.
479  * @func: The function to run. This must be fast and non-blocking.
480  * @info: An arbitrary pointer to pass to the function.
481  * @wait: If true, wait (atomically) until function has completed
482  *        on other CPUs.
483  *
484  * Returns 0.
485  *
486  * If @wait is true, then returns once @func has returned; otherwise
487  * it returns just before the target cpu calls @func.
488  *
489  * You must not call this function with disabled interrupts or from a
490  * hardware interrupt handler or from a bottom half handler.
491  */
492 int smp_call_function(void (*func)(void *), void *info, int wait)
493 {
494 	preempt_disable();
495 	smp_call_function_many(cpu_online_mask, func, info, wait);
496 	preempt_enable();
497 
498 	return 0;
499 }
500 EXPORT_SYMBOL(smp_call_function);
501 
502 void ipi_call_lock(void)
503 {
504 	raw_spin_lock(&call_function.lock);
505 }
506 
507 void ipi_call_unlock(void)
508 {
509 	raw_spin_unlock(&call_function.lock);
510 }
511 
512 void ipi_call_lock_irq(void)
513 {
514 	raw_spin_lock_irq(&call_function.lock);
515 }
516 
517 void ipi_call_unlock_irq(void)
518 {
519 	raw_spin_unlock_irq(&call_function.lock);
520 }
521