xref: /openbmc/linux/kernel/irq_work.c (revision c0e980a4)
1e360adbeSPeter Zijlstra /*
2e360adbeSPeter Zijlstra  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3e360adbeSPeter Zijlstra  *
4e360adbeSPeter Zijlstra  * Provides a framework for enqueueing and running callbacks from hardirq
5e360adbeSPeter Zijlstra  * context. The enqueueing is NMI-safe.
6e360adbeSPeter Zijlstra  */
7e360adbeSPeter Zijlstra 
883e3fa6fSPaul Gortmaker #include <linux/bug.h>
9e360adbeSPeter Zijlstra #include <linux/kernel.h>
109984de1aSPaul Gortmaker #include <linux/export.h>
11e360adbeSPeter Zijlstra #include <linux/irq_work.h>
12967d1f90SPaul Gortmaker #include <linux/percpu.h>
13e360adbeSPeter Zijlstra #include <linux/hardirq.h>
14ef1f0982SChris Metcalf #include <linux/irqflags.h>
15c0e980a4SSteven Rostedt #include <linux/cpu.h>
16c0e980a4SSteven Rostedt #include <linux/notifier.h>
17967d1f90SPaul Gortmaker #include <asm/processor.h>
18e360adbeSPeter Zijlstra 
19e360adbeSPeter Zijlstra /*
20e360adbeSPeter Zijlstra  * An entry can be in one of four states:
21e360adbeSPeter Zijlstra  *
22e360adbeSPeter Zijlstra  * free	     NULL, 0 -> {claimed}       : free to be used
23e360adbeSPeter Zijlstra  * claimed   NULL, 3 -> {pending}       : claimed to be enqueued
24e360adbeSPeter Zijlstra  * pending   next, 3 -> {busy}          : queued, pending callback
25e360adbeSPeter Zijlstra  * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
26e360adbeSPeter Zijlstra  */
27e360adbeSPeter Zijlstra 
28e360adbeSPeter Zijlstra #define IRQ_WORK_PENDING	1UL
29e360adbeSPeter Zijlstra #define IRQ_WORK_BUSY		2UL
30e360adbeSPeter Zijlstra #define IRQ_WORK_FLAGS		3UL
31e360adbeSPeter Zijlstra 
3238aaf809SHuang Ying static DEFINE_PER_CPU(struct llist_head, irq_work_list);
33e360adbeSPeter Zijlstra 
34e360adbeSPeter Zijlstra /*
35e360adbeSPeter Zijlstra  * Claim the entry so that no one else will poke at it.
36e360adbeSPeter Zijlstra  */
3738aaf809SHuang Ying static bool irq_work_claim(struct irq_work *work)
38e360adbeSPeter Zijlstra {
39e0bbe2d8SFrederic Weisbecker 	unsigned long flags, oflags, nflags;
40e360adbeSPeter Zijlstra 
41e0bbe2d8SFrederic Weisbecker 	/*
42e0bbe2d8SFrederic Weisbecker 	 * Start with our best wish as a premise but only trust any
43e0bbe2d8SFrederic Weisbecker 	 * flag value after cmpxchg() result.
44e0bbe2d8SFrederic Weisbecker 	 */
45e0bbe2d8SFrederic Weisbecker 	flags = work->flags & ~IRQ_WORK_PENDING;
4638aaf809SHuang Ying 	for (;;) {
4738aaf809SHuang Ying 		nflags = flags | IRQ_WORK_FLAGS;
48e0bbe2d8SFrederic Weisbecker 		oflags = cmpxchg(&work->flags, flags, nflags);
49e0bbe2d8SFrederic Weisbecker 		if (oflags == flags)
5038aaf809SHuang Ying 			break;
51e0bbe2d8SFrederic Weisbecker 		if (oflags & IRQ_WORK_PENDING)
52e0bbe2d8SFrederic Weisbecker 			return false;
53e0bbe2d8SFrederic Weisbecker 		flags = oflags;
5438aaf809SHuang Ying 		cpu_relax();
5538aaf809SHuang Ying 	}
56e360adbeSPeter Zijlstra 
57e360adbeSPeter Zijlstra 	return true;
58e360adbeSPeter Zijlstra }
59e360adbeSPeter Zijlstra 
60e360adbeSPeter Zijlstra void __weak arch_irq_work_raise(void)
61e360adbeSPeter Zijlstra {
62e360adbeSPeter Zijlstra 	/*
63e360adbeSPeter Zijlstra 	 * Lame architectures will get the timer tick callback
64e360adbeSPeter Zijlstra 	 */
65e360adbeSPeter Zijlstra }
66e360adbeSPeter Zijlstra 
67e360adbeSPeter Zijlstra /*
68e360adbeSPeter Zijlstra  * Queue the entry and raise the IPI if needed.
69e360adbeSPeter Zijlstra  */
7038aaf809SHuang Ying static void __irq_work_queue(struct irq_work *work)
71e360adbeSPeter Zijlstra {
7238aaf809SHuang Ying 	bool empty;
73e360adbeSPeter Zijlstra 
7420b87691SChristoph Lameter 	preempt_disable();
75e360adbeSPeter Zijlstra 
7638aaf809SHuang Ying 	empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
77e360adbeSPeter Zijlstra 	/* The list was empty, raise self-interrupt to start processing. */
7838aaf809SHuang Ying 	if (empty)
79e360adbeSPeter Zijlstra 		arch_irq_work_raise();
80e360adbeSPeter Zijlstra 
8120b87691SChristoph Lameter 	preempt_enable();
82e360adbeSPeter Zijlstra }
83e360adbeSPeter Zijlstra 
84e360adbeSPeter Zijlstra /*
85e360adbeSPeter Zijlstra  * Enqueue the irq_work @entry, returns true on success, failure when the
86e360adbeSPeter Zijlstra  * @entry was already enqueued by someone else.
87e360adbeSPeter Zijlstra  *
88e360adbeSPeter Zijlstra  * Can be re-enqueued while the callback is still in progress.
89e360adbeSPeter Zijlstra  */
9038aaf809SHuang Ying bool irq_work_queue(struct irq_work *work)
91e360adbeSPeter Zijlstra {
9238aaf809SHuang Ying 	if (!irq_work_claim(work)) {
93e360adbeSPeter Zijlstra 		/*
94e360adbeSPeter Zijlstra 		 * Already enqueued, can't do!
95e360adbeSPeter Zijlstra 		 */
96e360adbeSPeter Zijlstra 		return false;
97e360adbeSPeter Zijlstra 	}
98e360adbeSPeter Zijlstra 
9938aaf809SHuang Ying 	__irq_work_queue(work);
100e360adbeSPeter Zijlstra 	return true;
101e360adbeSPeter Zijlstra }
102e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_queue);
103e360adbeSPeter Zijlstra 
10400b42959SFrederic Weisbecker bool irq_work_needs_cpu(void)
10500b42959SFrederic Weisbecker {
10600b42959SFrederic Weisbecker 	struct llist_head *this_list;
10700b42959SFrederic Weisbecker 
10800b42959SFrederic Weisbecker 	this_list = &__get_cpu_var(irq_work_list);
10900b42959SFrederic Weisbecker 	if (llist_empty(this_list))
11000b42959SFrederic Weisbecker 		return false;
11100b42959SFrederic Weisbecker 
11200b42959SFrederic Weisbecker 	return true;
11300b42959SFrederic Weisbecker }
11400b42959SFrederic Weisbecker 
115c0e980a4SSteven Rostedt static void __irq_work_run(void)
116e360adbeSPeter Zijlstra {
11738aaf809SHuang Ying 	struct irq_work *work;
11838aaf809SHuang Ying 	struct llist_head *this_list;
11938aaf809SHuang Ying 	struct llist_node *llnode;
120e360adbeSPeter Zijlstra 
12138aaf809SHuang Ying 	this_list = &__get_cpu_var(irq_work_list);
12238aaf809SHuang Ying 	if (llist_empty(this_list))
123e360adbeSPeter Zijlstra 		return;
124e360adbeSPeter Zijlstra 
125e360adbeSPeter Zijlstra 	BUG_ON(!irqs_disabled());
126e360adbeSPeter Zijlstra 
12738aaf809SHuang Ying 	llnode = llist_del_all(this_list);
12838aaf809SHuang Ying 	while (llnode != NULL) {
12938aaf809SHuang Ying 		work = llist_entry(llnode, struct irq_work, llnode);
13020b87691SChristoph Lameter 
131924f8f5aSPeter Zijlstra 		llnode = llist_next(llnode);
132e360adbeSPeter Zijlstra 
133e360adbeSPeter Zijlstra 		/*
13438aaf809SHuang Ying 		 * Clear the PENDING bit, after this point the @work
135e360adbeSPeter Zijlstra 		 * can be re-used.
136c8446b75SFrederic Weisbecker 		 * Make it immediately visible so that other CPUs trying
137c8446b75SFrederic Weisbecker 		 * to claim that work don't rely on us to handle their data
138c8446b75SFrederic Weisbecker 		 * while we are in the middle of the func.
139e360adbeSPeter Zijlstra 		 */
140c8446b75SFrederic Weisbecker 		xchg(&work->flags, IRQ_WORK_BUSY);
14138aaf809SHuang Ying 		work->func(work);
142e360adbeSPeter Zijlstra 		/*
143e360adbeSPeter Zijlstra 		 * Clear the BUSY bit and return to the free state if
144e360adbeSPeter Zijlstra 		 * no-one else claimed it meanwhile.
145e360adbeSPeter Zijlstra 		 */
14638aaf809SHuang Ying 		(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
147e360adbeSPeter Zijlstra 	}
148e360adbeSPeter Zijlstra }
149c0e980a4SSteven Rostedt 
150c0e980a4SSteven Rostedt /*
151c0e980a4SSteven Rostedt  * Run the irq_work entries on this cpu. Requires to be ran from hardirq
152c0e980a4SSteven Rostedt  * context with local IRQs disabled.
153c0e980a4SSteven Rostedt  */
154c0e980a4SSteven Rostedt void irq_work_run(void)
155c0e980a4SSteven Rostedt {
156c0e980a4SSteven Rostedt 	BUG_ON(!in_irq());
157c0e980a4SSteven Rostedt 	__irq_work_run();
158c0e980a4SSteven Rostedt }
159e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_run);
160e360adbeSPeter Zijlstra 
161e360adbeSPeter Zijlstra /*
162e360adbeSPeter Zijlstra  * Synchronize against the irq_work @entry, ensures the entry is not
163e360adbeSPeter Zijlstra  * currently in use.
164e360adbeSPeter Zijlstra  */
16538aaf809SHuang Ying void irq_work_sync(struct irq_work *work)
166e360adbeSPeter Zijlstra {
167e360adbeSPeter Zijlstra 	WARN_ON_ONCE(irqs_disabled());
168e360adbeSPeter Zijlstra 
16938aaf809SHuang Ying 	while (work->flags & IRQ_WORK_BUSY)
170e360adbeSPeter Zijlstra 		cpu_relax();
171e360adbeSPeter Zijlstra }
172e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_sync);
173c0e980a4SSteven Rostedt 
174c0e980a4SSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
175c0e980a4SSteven Rostedt static int irq_work_cpu_notify(struct notifier_block *self,
176c0e980a4SSteven Rostedt 			       unsigned long action, void *hcpu)
177c0e980a4SSteven Rostedt {
178c0e980a4SSteven Rostedt 	long cpu = (long)hcpu;
179c0e980a4SSteven Rostedt 
180c0e980a4SSteven Rostedt 	switch (action) {
181c0e980a4SSteven Rostedt 	case CPU_DYING:
182c0e980a4SSteven Rostedt 		/* Called from stop_machine */
183c0e980a4SSteven Rostedt 		if (WARN_ON_ONCE(cpu != smp_processor_id()))
184c0e980a4SSteven Rostedt 			break;
185c0e980a4SSteven Rostedt 		__irq_work_run();
186c0e980a4SSteven Rostedt 		break;
187c0e980a4SSteven Rostedt 	default:
188c0e980a4SSteven Rostedt 		break;
189c0e980a4SSteven Rostedt 	}
190c0e980a4SSteven Rostedt 	return NOTIFY_OK;
191c0e980a4SSteven Rostedt }
192c0e980a4SSteven Rostedt 
193c0e980a4SSteven Rostedt static struct notifier_block cpu_notify;
194c0e980a4SSteven Rostedt 
195c0e980a4SSteven Rostedt static __init int irq_work_init_cpu_notifier(void)
196c0e980a4SSteven Rostedt {
197c0e980a4SSteven Rostedt 	cpu_notify.notifier_call = irq_work_cpu_notify;
198c0e980a4SSteven Rostedt 	cpu_notify.priority = 0;
199c0e980a4SSteven Rostedt 	register_cpu_notifier(&cpu_notify);
200c0e980a4SSteven Rostedt 	return 0;
201c0e980a4SSteven Rostedt }
202c0e980a4SSteven Rostedt device_initcall(irq_work_init_cpu_notifier);
203c0e980a4SSteven Rostedt 
204c0e980a4SSteven Rostedt #endif /* CONFIG_HOTPLUG_CPU */
205