xref: /openbmc/linux/kernel/irq_work.c (revision c02cf5f8)
1e360adbeSPeter Zijlstra /*
2e360adbeSPeter Zijlstra  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3e360adbeSPeter Zijlstra  *
4e360adbeSPeter Zijlstra  * Provides a framework for enqueueing and running callbacks from hardirq
5e360adbeSPeter Zijlstra  * context. The enqueueing is NMI-safe.
6e360adbeSPeter Zijlstra  */
7e360adbeSPeter Zijlstra 
883e3fa6fSPaul Gortmaker #include <linux/bug.h>
9e360adbeSPeter Zijlstra #include <linux/kernel.h>
109984de1aSPaul Gortmaker #include <linux/export.h>
11e360adbeSPeter Zijlstra #include <linux/irq_work.h>
12967d1f90SPaul Gortmaker #include <linux/percpu.h>
13e360adbeSPeter Zijlstra #include <linux/hardirq.h>
14ef1f0982SChris Metcalf #include <linux/irqflags.h>
15967d1f90SPaul Gortmaker #include <asm/processor.h>
16e360adbeSPeter Zijlstra 
17e360adbeSPeter Zijlstra /*
18e360adbeSPeter Zijlstra  * An entry can be in one of four states:
19e360adbeSPeter Zijlstra  *
20e360adbeSPeter Zijlstra  * free	     NULL, 0 -> {claimed}       : free to be used
21e360adbeSPeter Zijlstra  * claimed   NULL, 3 -> {pending}       : claimed to be enqueued
22e360adbeSPeter Zijlstra  * pending   next, 3 -> {busy}          : queued, pending callback
23e360adbeSPeter Zijlstra  * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
24e360adbeSPeter Zijlstra  */
25e360adbeSPeter Zijlstra 
26e360adbeSPeter Zijlstra #define IRQ_WORK_PENDING	1UL
27e360adbeSPeter Zijlstra #define IRQ_WORK_BUSY		2UL
28e360adbeSPeter Zijlstra #define IRQ_WORK_FLAGS		3UL
29e360adbeSPeter Zijlstra 
3038aaf809SHuang Ying static DEFINE_PER_CPU(struct llist_head, irq_work_list);
31e360adbeSPeter Zijlstra 
32e360adbeSPeter Zijlstra /*
33e360adbeSPeter Zijlstra  * Claim the entry so that no one else will poke at it.
34e360adbeSPeter Zijlstra  */
3538aaf809SHuang Ying static bool irq_work_claim(struct irq_work *work)
36e360adbeSPeter Zijlstra {
37e0bbe2d8SFrederic Weisbecker 	unsigned long flags, oflags, nflags;
38e360adbeSPeter Zijlstra 
39e0bbe2d8SFrederic Weisbecker 	/*
40e0bbe2d8SFrederic Weisbecker 	 * Start with our best wish as a premise but only trust any
41e0bbe2d8SFrederic Weisbecker 	 * flag value after cmpxchg() result.
42e0bbe2d8SFrederic Weisbecker 	 */
43e0bbe2d8SFrederic Weisbecker 	flags = work->flags & ~IRQ_WORK_PENDING;
4438aaf809SHuang Ying 	for (;;) {
4538aaf809SHuang Ying 		nflags = flags | IRQ_WORK_FLAGS;
46e0bbe2d8SFrederic Weisbecker 		oflags = cmpxchg(&work->flags, flags, nflags);
47e0bbe2d8SFrederic Weisbecker 		if (oflags == flags)
4838aaf809SHuang Ying 			break;
49e0bbe2d8SFrederic Weisbecker 		if (oflags & IRQ_WORK_PENDING)
50e0bbe2d8SFrederic Weisbecker 			return false;
51e0bbe2d8SFrederic Weisbecker 		flags = oflags;
5238aaf809SHuang Ying 		cpu_relax();
5338aaf809SHuang Ying 	}
54e360adbeSPeter Zijlstra 
55e360adbeSPeter Zijlstra 	return true;
56e360adbeSPeter Zijlstra }
57e360adbeSPeter Zijlstra 
58e360adbeSPeter Zijlstra void __weak arch_irq_work_raise(void)
59e360adbeSPeter Zijlstra {
60e360adbeSPeter Zijlstra 	/*
61e360adbeSPeter Zijlstra 	 * Lame architectures will get the timer tick callback
62e360adbeSPeter Zijlstra 	 */
63e360adbeSPeter Zijlstra }
64e360adbeSPeter Zijlstra 
65e360adbeSPeter Zijlstra /*
66c02cf5f8Sanish kumar  * Enqueue the irq_work @entry unless it's already pending
67c02cf5f8Sanish kumar  * somewhere.
68c02cf5f8Sanish kumar  *
69c02cf5f8Sanish kumar  * Can be re-enqueued while the callback is still in progress.
70e360adbeSPeter Zijlstra  */
71c02cf5f8Sanish kumar void irq_work_queue(struct irq_work *work)
72e360adbeSPeter Zijlstra {
7338aaf809SHuang Ying 	bool empty;
74e360adbeSPeter Zijlstra 
75c02cf5f8Sanish kumar 	/* Only queue if not already pending */
76c02cf5f8Sanish kumar 	if (!irq_work_claim(work))
77c02cf5f8Sanish kumar 		return;
78c02cf5f8Sanish kumar 
79c02cf5f8Sanish kumar 	/* Queue the entry and raise the IPI if needed. */
8020b87691SChristoph Lameter 	preempt_disable();
81e360adbeSPeter Zijlstra 
8238aaf809SHuang Ying 	empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
83e360adbeSPeter Zijlstra 	/* The list was empty, raise self-interrupt to start processing. */
8438aaf809SHuang Ying 	if (empty)
85e360adbeSPeter Zijlstra 		arch_irq_work_raise();
86e360adbeSPeter Zijlstra 
8720b87691SChristoph Lameter 	preempt_enable();
88e360adbeSPeter Zijlstra }
89e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_queue);
90e360adbeSPeter Zijlstra 
91e360adbeSPeter Zijlstra /*
92e360adbeSPeter Zijlstra  * Run the irq_work entries on this cpu. Requires to be ran from hardirq
93e360adbeSPeter Zijlstra  * context with local IRQs disabled.
94e360adbeSPeter Zijlstra  */
95e360adbeSPeter Zijlstra void irq_work_run(void)
96e360adbeSPeter Zijlstra {
9738aaf809SHuang Ying 	struct irq_work *work;
9838aaf809SHuang Ying 	struct llist_head *this_list;
9938aaf809SHuang Ying 	struct llist_node *llnode;
100e360adbeSPeter Zijlstra 
10138aaf809SHuang Ying 	this_list = &__get_cpu_var(irq_work_list);
10238aaf809SHuang Ying 	if (llist_empty(this_list))
103e360adbeSPeter Zijlstra 		return;
104e360adbeSPeter Zijlstra 
105e360adbeSPeter Zijlstra 	BUG_ON(!in_irq());
106e360adbeSPeter Zijlstra 	BUG_ON(!irqs_disabled());
107e360adbeSPeter Zijlstra 
10838aaf809SHuang Ying 	llnode = llist_del_all(this_list);
10938aaf809SHuang Ying 	while (llnode != NULL) {
11038aaf809SHuang Ying 		work = llist_entry(llnode, struct irq_work, llnode);
11120b87691SChristoph Lameter 
112924f8f5aSPeter Zijlstra 		llnode = llist_next(llnode);
113e360adbeSPeter Zijlstra 
114e360adbeSPeter Zijlstra 		/*
11538aaf809SHuang Ying 		 * Clear the PENDING bit, after this point the @work
116e360adbeSPeter Zijlstra 		 * can be re-used.
117c8446b75SFrederic Weisbecker 		 * Make it immediately visible so that other CPUs trying
118c8446b75SFrederic Weisbecker 		 * to claim that work don't rely on us to handle their data
119c8446b75SFrederic Weisbecker 		 * while we are in the middle of the func.
120e360adbeSPeter Zijlstra 		 */
121c8446b75SFrederic Weisbecker 		xchg(&work->flags, IRQ_WORK_BUSY);
12238aaf809SHuang Ying 		work->func(work);
123e360adbeSPeter Zijlstra 		/*
124e360adbeSPeter Zijlstra 		 * Clear the BUSY bit and return to the free state if
125e360adbeSPeter Zijlstra 		 * no-one else claimed it meanwhile.
126e360adbeSPeter Zijlstra 		 */
12738aaf809SHuang Ying 		(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
128e360adbeSPeter Zijlstra 	}
129e360adbeSPeter Zijlstra }
130e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_run);
131e360adbeSPeter Zijlstra 
132e360adbeSPeter Zijlstra /*
133e360adbeSPeter Zijlstra  * Synchronize against the irq_work @entry, ensures the entry is not
134e360adbeSPeter Zijlstra  * currently in use.
135e360adbeSPeter Zijlstra  */
13638aaf809SHuang Ying void irq_work_sync(struct irq_work *work)
137e360adbeSPeter Zijlstra {
138e360adbeSPeter Zijlstra 	WARN_ON_ONCE(irqs_disabled());
139e360adbeSPeter Zijlstra 
14038aaf809SHuang Ying 	while (work->flags & IRQ_WORK_BUSY)
141e360adbeSPeter Zijlstra 		cpu_relax();
142e360adbeSPeter Zijlstra }
143e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_sync);
144