1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_IRQ_WORK_H 3 #define _LINUX_IRQ_WORK_H 4 5 #include <linux/llist.h> 6 7 /* 8 * An entry can be in one of four states: 9 * 10 * free NULL, 0 -> {claimed} : free to be used 11 * claimed NULL, 3 -> {pending} : claimed to be enqueued 12 * pending next, 3 -> {busy} : queued, pending callback 13 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed 14 */ 15 16 /* flags share CSD_FLAG_ space */ 17 18 #define IRQ_WORK_PENDING BIT(0) 19 #define IRQ_WORK_BUSY BIT(1) 20 21 /* Doesn't want IPI, wait for tick: */ 22 #define IRQ_WORK_LAZY BIT(2) 23 /* Run hard IRQ context, even on RT */ 24 #define IRQ_WORK_HARD_IRQ BIT(3) 25 26 #define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) 27 28 /* 29 * structure shares layout with single_call_data_t. 30 */ 31 struct irq_work { 32 struct llist_node llnode; 33 atomic_t flags; 34 void (*func)(struct irq_work *); 35 }; 36 37 static inline 38 void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) 39 { 40 atomic_set(&work->flags, 0); 41 work->func = func; 42 } 43 44 #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \ 45 .flags = ATOMIC_INIT(0), \ 46 .func = (_f) \ 47 } 48 49 50 bool irq_work_queue(struct irq_work *work); 51 bool irq_work_queue_on(struct irq_work *work, int cpu); 52 53 void irq_work_tick(void); 54 void irq_work_sync(struct irq_work *work); 55 56 #ifdef CONFIG_IRQ_WORK 57 #include <asm/irq_work.h> 58 59 void irq_work_run(void); 60 bool irq_work_needs_cpu(void); 61 void irq_work_single(void *arg); 62 #else 63 static inline bool irq_work_needs_cpu(void) { return false; } 64 static inline void irq_work_run(void) { } 65 static inline void irq_work_single(void *arg) { } 66 #endif 67 68 #endif /* _LINUX_IRQ_WORK_H */ 69