1e360adbeSPeter Zijlstra /* 2e360adbeSPeter Zijlstra * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 3e360adbeSPeter Zijlstra * 4e360adbeSPeter Zijlstra * Provides a framework for enqueueing and running callbacks from hardirq 5e360adbeSPeter Zijlstra * context. The enqueueing is NMI-safe. 6e360adbeSPeter Zijlstra */ 7e360adbeSPeter Zijlstra 883e3fa6fSPaul Gortmaker #include <linux/bug.h> 9e360adbeSPeter Zijlstra #include <linux/kernel.h> 109984de1aSPaul Gortmaker #include <linux/export.h> 11e360adbeSPeter Zijlstra #include <linux/irq_work.h> 12967d1f90SPaul Gortmaker #include <linux/percpu.h> 13e360adbeSPeter Zijlstra #include <linux/hardirq.h> 14ef1f0982SChris Metcalf #include <linux/irqflags.h> 15bc6679aeSFrederic Weisbecker #include <linux/sched.h> 16bc6679aeSFrederic Weisbecker #include <linux/tick.h> 17c0e980a4SSteven Rostedt #include <linux/cpu.h> 18c0e980a4SSteven Rostedt #include <linux/notifier.h> 1947885016SFrederic Weisbecker #include <linux/smp.h> 20967d1f90SPaul Gortmaker #include <asm/processor.h> 21e360adbeSPeter Zijlstra 22e360adbeSPeter Zijlstra 23b93e0b8fSFrederic Weisbecker static DEFINE_PER_CPU(struct llist_head, raised_list); 24b93e0b8fSFrederic Weisbecker static DEFINE_PER_CPU(struct llist_head, lazy_list); 25e360adbeSPeter Zijlstra 26e360adbeSPeter Zijlstra /* 27e360adbeSPeter Zijlstra * Claim the entry so that no one else will poke at it. 28e360adbeSPeter Zijlstra */ 2938aaf809SHuang Ying static bool irq_work_claim(struct irq_work *work) 30e360adbeSPeter Zijlstra { 31e0bbe2d8SFrederic Weisbecker unsigned long flags, oflags, nflags; 32e360adbeSPeter Zijlstra 33e0bbe2d8SFrederic Weisbecker /* 34e0bbe2d8SFrederic Weisbecker * Start with our best wish as a premise but only trust any 35e0bbe2d8SFrederic Weisbecker * flag value after cmpxchg() result. 36e0bbe2d8SFrederic Weisbecker */ 37e0bbe2d8SFrederic Weisbecker flags = work->flags & ~IRQ_WORK_PENDING; 3838aaf809SHuang Ying for (;;) { 3938aaf809SHuang Ying nflags = flags | IRQ_WORK_FLAGS; 40e0bbe2d8SFrederic Weisbecker oflags = cmpxchg(&work->flags, flags, nflags); 41e0bbe2d8SFrederic Weisbecker if (oflags == flags) 4238aaf809SHuang Ying break; 43e0bbe2d8SFrederic Weisbecker if (oflags & IRQ_WORK_PENDING) 44e0bbe2d8SFrederic Weisbecker return false; 45e0bbe2d8SFrederic Weisbecker flags = oflags; 4638aaf809SHuang Ying cpu_relax(); 4738aaf809SHuang Ying } 48e360adbeSPeter Zijlstra 49e360adbeSPeter Zijlstra return true; 50e360adbeSPeter Zijlstra } 51e360adbeSPeter Zijlstra 52e360adbeSPeter Zijlstra void __weak arch_irq_work_raise(void) 53e360adbeSPeter Zijlstra { 54e360adbeSPeter Zijlstra /* 55e360adbeSPeter Zijlstra * Lame architectures will get the timer tick callback 56e360adbeSPeter Zijlstra */ 57e360adbeSPeter Zijlstra } 58e360adbeSPeter Zijlstra 5947885016SFrederic Weisbecker #ifdef CONFIG_SMP 60e360adbeSPeter Zijlstra /* 6147885016SFrederic Weisbecker * Enqueue the irq_work @work on @cpu unless it's already pending 62c02cf5f8Sanish kumar * somewhere. 63c02cf5f8Sanish kumar * 64c02cf5f8Sanish kumar * Can be re-enqueued while the callback is still in progress. 65e360adbeSPeter Zijlstra */ 6647885016SFrederic Weisbecker bool irq_work_queue_on(struct irq_work *work, int cpu) 6747885016SFrederic Weisbecker { 6847885016SFrederic Weisbecker /* All work should have been flushed before going offline */ 6947885016SFrederic Weisbecker WARN_ON_ONCE(cpu_is_offline(cpu)); 7047885016SFrederic Weisbecker 7147885016SFrederic Weisbecker /* Arch remote IPI send/receive backend aren't NMI safe */ 7247885016SFrederic Weisbecker WARN_ON_ONCE(in_nmi()); 7347885016SFrederic Weisbecker 7447885016SFrederic Weisbecker /* Only queue if not already pending */ 7547885016SFrederic Weisbecker if (!irq_work_claim(work)) 7647885016SFrederic Weisbecker return false; 7747885016SFrederic Weisbecker 7847885016SFrederic Weisbecker if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) 7947885016SFrederic Weisbecker arch_send_call_function_single_ipi(cpu); 8047885016SFrederic Weisbecker 8147885016SFrederic Weisbecker return true; 8247885016SFrederic Weisbecker } 8347885016SFrederic Weisbecker EXPORT_SYMBOL_GPL(irq_work_queue_on); 8447885016SFrederic Weisbecker #endif 8547885016SFrederic Weisbecker 8647885016SFrederic Weisbecker /* Enqueue the irq work @work on the current CPU */ 87cd578abbSPeter Zijlstra bool irq_work_queue(struct irq_work *work) 88e360adbeSPeter Zijlstra { 89c02cf5f8Sanish kumar /* Only queue if not already pending */ 90c02cf5f8Sanish kumar if (!irq_work_claim(work)) 91cd578abbSPeter Zijlstra return false; 92c02cf5f8Sanish kumar 93c02cf5f8Sanish kumar /* Queue the entry and raise the IPI if needed. */ 9420b87691SChristoph Lameter preempt_disable(); 95e360adbeSPeter Zijlstra 96b93e0b8fSFrederic Weisbecker /* If the work is "lazy", handle it from next tick if any */ 97b93e0b8fSFrederic Weisbecker if (work->flags & IRQ_WORK_LAZY) { 98b93e0b8fSFrederic Weisbecker if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) && 99b93e0b8fSFrederic Weisbecker tick_nohz_tick_stopped()) 100b93e0b8fSFrederic Weisbecker arch_irq_work_raise(); 101b93e0b8fSFrederic Weisbecker } else { 102b93e0b8fSFrederic Weisbecker if (llist_add(&work->llnode, &__get_cpu_var(raised_list))) 103e360adbeSPeter Zijlstra arch_irq_work_raise(); 104bc6679aeSFrederic Weisbecker } 105e360adbeSPeter Zijlstra 10620b87691SChristoph Lameter preempt_enable(); 107cd578abbSPeter Zijlstra 108cd578abbSPeter Zijlstra return true; 109e360adbeSPeter Zijlstra } 110e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_queue); 111e360adbeSPeter Zijlstra 11200b42959SFrederic Weisbecker bool irq_work_needs_cpu(void) 113e360adbeSPeter Zijlstra { 114b93e0b8fSFrederic Weisbecker struct llist_head *raised, *lazy; 11500b42959SFrederic Weisbecker 116b93e0b8fSFrederic Weisbecker raised = &__get_cpu_var(raised_list); 117b93e0b8fSFrederic Weisbecker lazy = &__get_cpu_var(lazy_list); 118b93e0b8fSFrederic Weisbecker if (llist_empty(raised) && llist_empty(lazy)) 11900b42959SFrederic Weisbecker return false; 12000b42959SFrederic Weisbecker 1218aa2acceSSteven Rostedt /* All work should have been flushed before going offline */ 1228aa2acceSSteven Rostedt WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 1238aa2acceSSteven Rostedt 12400b42959SFrederic Weisbecker return true; 12500b42959SFrederic Weisbecker } 12600b42959SFrederic Weisbecker 127b93e0b8fSFrederic Weisbecker static void irq_work_run_list(struct llist_head *list) 128e360adbeSPeter Zijlstra { 129bc6679aeSFrederic Weisbecker unsigned long flags; 13038aaf809SHuang Ying struct irq_work *work; 13138aaf809SHuang Ying struct llist_node *llnode; 132e360adbeSPeter Zijlstra 133e360adbeSPeter Zijlstra BUG_ON(!irqs_disabled()); 134e360adbeSPeter Zijlstra 135b93e0b8fSFrederic Weisbecker if (llist_empty(list)) 136b93e0b8fSFrederic Weisbecker return; 137b93e0b8fSFrederic Weisbecker 138b93e0b8fSFrederic Weisbecker llnode = llist_del_all(list); 13938aaf809SHuang Ying while (llnode != NULL) { 14038aaf809SHuang Ying work = llist_entry(llnode, struct irq_work, llnode); 14120b87691SChristoph Lameter 142924f8f5aSPeter Zijlstra llnode = llist_next(llnode); 143e360adbeSPeter Zijlstra 144e360adbeSPeter Zijlstra /* 14538aaf809SHuang Ying * Clear the PENDING bit, after this point the @work 146e360adbeSPeter Zijlstra * can be re-used. 147c8446b75SFrederic Weisbecker * Make it immediately visible so that other CPUs trying 148c8446b75SFrederic Weisbecker * to claim that work don't rely on us to handle their data 149c8446b75SFrederic Weisbecker * while we are in the middle of the func. 150e360adbeSPeter Zijlstra */ 151bc6679aeSFrederic Weisbecker flags = work->flags & ~IRQ_WORK_PENDING; 152bc6679aeSFrederic Weisbecker xchg(&work->flags, flags); 153bc6679aeSFrederic Weisbecker 15438aaf809SHuang Ying work->func(work); 155e360adbeSPeter Zijlstra /* 156e360adbeSPeter Zijlstra * Clear the BUSY bit and return to the free state if 157e360adbeSPeter Zijlstra * no-one else claimed it meanwhile. 158e360adbeSPeter Zijlstra */ 159bc6679aeSFrederic Weisbecker (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); 160e360adbeSPeter Zijlstra } 161e360adbeSPeter Zijlstra } 162c0e980a4SSteven Rostedt 163c0e980a4SSteven Rostedt /* 164a77353e5SPeter Zijlstra * hotplug calls this through: 165a77353e5SPeter Zijlstra * hotplug_cfd() -> flush_smp_call_function_queue() 166c0e980a4SSteven Rostedt */ 167c0e980a4SSteven Rostedt void irq_work_run(void) 168c0e980a4SSteven Rostedt { 169a77353e5SPeter Zijlstra irq_work_run_list(&__get_cpu_var(raised_list)); 170a77353e5SPeter Zijlstra irq_work_run_list(&__get_cpu_var(lazy_list)); 171c0e980a4SSteven Rostedt } 172e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_run); 173e360adbeSPeter Zijlstra 174e360adbeSPeter Zijlstra /* 175e360adbeSPeter Zijlstra * Synchronize against the irq_work @entry, ensures the entry is not 176e360adbeSPeter Zijlstra * currently in use. 177e360adbeSPeter Zijlstra */ 17838aaf809SHuang Ying void irq_work_sync(struct irq_work *work) 179e360adbeSPeter Zijlstra { 180e360adbeSPeter Zijlstra WARN_ON_ONCE(irqs_disabled()); 181e360adbeSPeter Zijlstra 18238aaf809SHuang Ying while (work->flags & IRQ_WORK_BUSY) 183e360adbeSPeter Zijlstra cpu_relax(); 184e360adbeSPeter Zijlstra } 185e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_sync); 186