1e360adbeSPeter Zijlstra /* 2e360adbeSPeter Zijlstra * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 3e360adbeSPeter Zijlstra * 4e360adbeSPeter Zijlstra * Provides a framework for enqueueing and running callbacks from hardirq 5e360adbeSPeter Zijlstra * context. The enqueueing is NMI-safe. 6e360adbeSPeter Zijlstra */ 7e360adbeSPeter Zijlstra 8e360adbeSPeter Zijlstra #include <linux/kernel.h> 9e360adbeSPeter Zijlstra #include <linux/module.h> 10e360adbeSPeter Zijlstra #include <linux/irq_work.h> 11e360adbeSPeter Zijlstra #include <linux/hardirq.h> 12e360adbeSPeter Zijlstra 13e360adbeSPeter Zijlstra /* 14e360adbeSPeter Zijlstra * An entry can be in one of four states: 15e360adbeSPeter Zijlstra * 16e360adbeSPeter Zijlstra * free NULL, 0 -> {claimed} : free to be used 17e360adbeSPeter Zijlstra * claimed NULL, 3 -> {pending} : claimed to be enqueued 18e360adbeSPeter Zijlstra * pending next, 3 -> {busy} : queued, pending callback 19e360adbeSPeter Zijlstra * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed 20e360adbeSPeter Zijlstra */ 21e360adbeSPeter Zijlstra 22e360adbeSPeter Zijlstra #define IRQ_WORK_PENDING 1UL 23e360adbeSPeter Zijlstra #define IRQ_WORK_BUSY 2UL 24e360adbeSPeter Zijlstra #define IRQ_WORK_FLAGS 3UL 25e360adbeSPeter Zijlstra 2638aaf809SHuang Ying static DEFINE_PER_CPU(struct llist_head, irq_work_list); 27e360adbeSPeter Zijlstra 28e360adbeSPeter Zijlstra /* 29e360adbeSPeter Zijlstra * Claim the entry so that no one else will poke at it. 30e360adbeSPeter Zijlstra */ 3138aaf809SHuang Ying static bool irq_work_claim(struct irq_work *work) 32e360adbeSPeter Zijlstra { 3338aaf809SHuang Ying unsigned long flags, nflags; 34e360adbeSPeter Zijlstra 3538aaf809SHuang Ying for (;;) { 3638aaf809SHuang Ying flags = work->flags; 3738aaf809SHuang Ying if (flags & IRQ_WORK_PENDING) 38e360adbeSPeter Zijlstra return false; 3938aaf809SHuang Ying nflags = flags | IRQ_WORK_FLAGS; 4038aaf809SHuang Ying if (cmpxchg(&work->flags, flags, nflags) == flags) 4138aaf809SHuang Ying break; 4238aaf809SHuang Ying cpu_relax(); 4338aaf809SHuang Ying } 44e360adbeSPeter Zijlstra 45e360adbeSPeter Zijlstra return true; 46e360adbeSPeter Zijlstra } 47e360adbeSPeter Zijlstra 48e360adbeSPeter Zijlstra void __weak arch_irq_work_raise(void) 49e360adbeSPeter Zijlstra { 50e360adbeSPeter Zijlstra /* 51e360adbeSPeter Zijlstra * Lame architectures will get the timer tick callback 52e360adbeSPeter Zijlstra */ 53e360adbeSPeter Zijlstra } 54e360adbeSPeter Zijlstra 55e360adbeSPeter Zijlstra /* 56e360adbeSPeter Zijlstra * Queue the entry and raise the IPI if needed. 57e360adbeSPeter Zijlstra */ 5838aaf809SHuang Ying static void __irq_work_queue(struct irq_work *work) 59e360adbeSPeter Zijlstra { 6038aaf809SHuang Ying bool empty; 61e360adbeSPeter Zijlstra 6220b87691SChristoph Lameter preempt_disable(); 63e360adbeSPeter Zijlstra 6438aaf809SHuang Ying empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); 65e360adbeSPeter Zijlstra /* The list was empty, raise self-interrupt to start processing. */ 6638aaf809SHuang Ying if (empty) 67e360adbeSPeter Zijlstra arch_irq_work_raise(); 68e360adbeSPeter Zijlstra 6920b87691SChristoph Lameter preempt_enable(); 70e360adbeSPeter Zijlstra } 71e360adbeSPeter Zijlstra 72e360adbeSPeter Zijlstra /* 73e360adbeSPeter Zijlstra * Enqueue the irq_work @entry, returns true on success, failure when the 74e360adbeSPeter Zijlstra * @entry was already enqueued by someone else. 75e360adbeSPeter Zijlstra * 76e360adbeSPeter Zijlstra * Can be re-enqueued while the callback is still in progress. 77e360adbeSPeter Zijlstra */ 7838aaf809SHuang Ying bool irq_work_queue(struct irq_work *work) 79e360adbeSPeter Zijlstra { 8038aaf809SHuang Ying if (!irq_work_claim(work)) { 81e360adbeSPeter Zijlstra /* 82e360adbeSPeter Zijlstra * Already enqueued, can't do! 83e360adbeSPeter Zijlstra */ 84e360adbeSPeter Zijlstra return false; 85e360adbeSPeter Zijlstra } 86e360adbeSPeter Zijlstra 8738aaf809SHuang Ying __irq_work_queue(work); 88e360adbeSPeter Zijlstra return true; 89e360adbeSPeter Zijlstra } 90e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_queue); 91e360adbeSPeter Zijlstra 92e360adbeSPeter Zijlstra /* 93e360adbeSPeter Zijlstra * Run the irq_work entries on this cpu. Requires to be ran from hardirq 94e360adbeSPeter Zijlstra * context with local IRQs disabled. 95e360adbeSPeter Zijlstra */ 96e360adbeSPeter Zijlstra void irq_work_run(void) 97e360adbeSPeter Zijlstra { 9838aaf809SHuang Ying struct irq_work *work; 9938aaf809SHuang Ying struct llist_head *this_list; 10038aaf809SHuang Ying struct llist_node *llnode; 101e360adbeSPeter Zijlstra 10238aaf809SHuang Ying this_list = &__get_cpu_var(irq_work_list); 10338aaf809SHuang Ying if (llist_empty(this_list)) 104e360adbeSPeter Zijlstra return; 105e360adbeSPeter Zijlstra 106e360adbeSPeter Zijlstra BUG_ON(!in_irq()); 107e360adbeSPeter Zijlstra BUG_ON(!irqs_disabled()); 108e360adbeSPeter Zijlstra 10938aaf809SHuang Ying llnode = llist_del_all(this_list); 11038aaf809SHuang Ying while (llnode != NULL) { 11138aaf809SHuang Ying work = llist_entry(llnode, struct irq_work, llnode); 11220b87691SChristoph Lameter 11338aaf809SHuang Ying llnode = llnode->next; 114e360adbeSPeter Zijlstra 115e360adbeSPeter Zijlstra /* 11638aaf809SHuang Ying * Clear the PENDING bit, after this point the @work 117e360adbeSPeter Zijlstra * can be re-used. 118e360adbeSPeter Zijlstra */ 11938aaf809SHuang Ying work->flags = IRQ_WORK_BUSY; 12038aaf809SHuang Ying work->func(work); 121e360adbeSPeter Zijlstra /* 122e360adbeSPeter Zijlstra * Clear the BUSY bit and return to the free state if 123e360adbeSPeter Zijlstra * no-one else claimed it meanwhile. 124e360adbeSPeter Zijlstra */ 12538aaf809SHuang Ying (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); 126e360adbeSPeter Zijlstra } 127e360adbeSPeter Zijlstra } 128e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_run); 129e360adbeSPeter Zijlstra 130e360adbeSPeter Zijlstra /* 131e360adbeSPeter Zijlstra * Synchronize against the irq_work @entry, ensures the entry is not 132e360adbeSPeter Zijlstra * currently in use. 133e360adbeSPeter Zijlstra */ 13438aaf809SHuang Ying void irq_work_sync(struct irq_work *work) 135e360adbeSPeter Zijlstra { 136e360adbeSPeter Zijlstra WARN_ON_ONCE(irqs_disabled()); 137e360adbeSPeter Zijlstra 13838aaf809SHuang Ying while (work->flags & IRQ_WORK_BUSY) 139e360adbeSPeter Zijlstra cpu_relax(); 140e360adbeSPeter Zijlstra } 141e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_sync); 142