xref: /openbmc/linux/include/linux/irq_work.h (revision 165f2d28)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_IRQ_WORK_H
3 #define _LINUX_IRQ_WORK_H
4 
5 #include <linux/llist.h>
6 
7 /*
8  * An entry can be in one of four states:
9  *
10  * free	     NULL, 0 -> {claimed}       : free to be used
11  * claimed   NULL, 3 -> {pending}       : claimed to be enqueued
12  * pending   next, 3 -> {busy}          : queued, pending callback
13  * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
14  */
15 
16 #define IRQ_WORK_PENDING	BIT(0)
17 #define IRQ_WORK_BUSY		BIT(1)
18 
19 /* Doesn't want IPI, wait for tick: */
20 #define IRQ_WORK_LAZY		BIT(2)
21 /* Run hard IRQ context, even on RT */
22 #define IRQ_WORK_HARD_IRQ	BIT(3)
23 
24 #define IRQ_WORK_CLAIMED	(IRQ_WORK_PENDING | IRQ_WORK_BUSY)
25 
26 struct irq_work {
27 	atomic_t flags;
28 	struct llist_node llnode;
29 	void (*func)(struct irq_work *);
30 };
31 
32 static inline
33 void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
34 {
35 	atomic_set(&work->flags, 0);
36 	work->func = func;
37 }
38 
39 #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = {	\
40 		.flags = ATOMIC_INIT(0),			\
41 		.func  = (_f)					\
42 }
43 
44 
45 bool irq_work_queue(struct irq_work *work);
46 bool irq_work_queue_on(struct irq_work *work, int cpu);
47 
48 void irq_work_tick(void);
49 void irq_work_sync(struct irq_work *work);
50 
51 #ifdef CONFIG_IRQ_WORK
52 #include <asm/irq_work.h>
53 
54 void irq_work_run(void);
55 bool irq_work_needs_cpu(void);
56 #else
57 static inline bool irq_work_needs_cpu(void) { return false; }
58 static inline void irq_work_run(void) { }
59 #endif
60 
61 #endif /* _LINUX_IRQ_WORK_H */
62