1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2e360adbeSPeter Zijlstra /* 390eec103SPeter Zijlstra * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra 4e360adbeSPeter Zijlstra * 5e360adbeSPeter Zijlstra * Provides a framework for enqueueing and running callbacks from hardirq 6e360adbeSPeter Zijlstra * context. The enqueueing is NMI-safe. 7e360adbeSPeter Zijlstra */ 8e360adbeSPeter Zijlstra 983e3fa6fSPaul Gortmaker #include <linux/bug.h> 10e360adbeSPeter Zijlstra #include <linux/kernel.h> 119984de1aSPaul Gortmaker #include <linux/export.h> 12e360adbeSPeter Zijlstra #include <linux/irq_work.h> 13967d1f90SPaul Gortmaker #include <linux/percpu.h> 14e360adbeSPeter Zijlstra #include <linux/hardirq.h> 15ef1f0982SChris Metcalf #include <linux/irqflags.h> 16bc6679aeSFrederic Weisbecker #include <linux/sched.h> 17bc6679aeSFrederic Weisbecker #include <linux/tick.h> 18c0e980a4SSteven Rostedt #include <linux/cpu.h> 19c0e980a4SSteven Rostedt #include <linux/notifier.h> 2047885016SFrederic Weisbecker #include <linux/smp.h> 21967d1f90SPaul Gortmaker #include <asm/processor.h> 22e360adbeSPeter Zijlstra 23e360adbeSPeter Zijlstra 24b93e0b8fSFrederic Weisbecker static DEFINE_PER_CPU(struct llist_head, raised_list); 25b93e0b8fSFrederic Weisbecker static DEFINE_PER_CPU(struct llist_head, lazy_list); 26e360adbeSPeter Zijlstra 27e360adbeSPeter Zijlstra /* 28e360adbeSPeter Zijlstra * Claim the entry so that no one else will poke at it. 29e360adbeSPeter Zijlstra */ 3038aaf809SHuang Ying static bool irq_work_claim(struct irq_work *work) 31e360adbeSPeter Zijlstra { 3225269871SFrederic Weisbecker int oflags; 33e360adbeSPeter Zijlstra 344b44a21dSPeter Zijlstra oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags); 35e0bbe2d8SFrederic Weisbecker /* 3625269871SFrederic Weisbecker * If the work is already pending, no need to raise the IPI. 37feb4a513SFrederic Weisbecker * The pairing atomic_fetch_andnot() in irq_work_run() makes sure 3825269871SFrederic Weisbecker * everything we did before is visible. 39e0bbe2d8SFrederic Weisbecker */ 40e0bbe2d8SFrederic Weisbecker if (oflags & IRQ_WORK_PENDING) 41e0bbe2d8SFrederic Weisbecker return false; 42e360adbeSPeter Zijlstra return true; 43e360adbeSPeter Zijlstra } 44e360adbeSPeter Zijlstra 45e360adbeSPeter Zijlstra void __weak arch_irq_work_raise(void) 46e360adbeSPeter Zijlstra { 47e360adbeSPeter Zijlstra /* 48e360adbeSPeter Zijlstra * Lame architectures will get the timer tick callback 49e360adbeSPeter Zijlstra */ 50e360adbeSPeter Zijlstra } 51e360adbeSPeter Zijlstra 52471ba0e6SNicholas Piggin /* Enqueue on current CPU, work must already be claimed and preempt disabled */ 53471ba0e6SNicholas Piggin static void __irq_work_queue_local(struct irq_work *work) 5447885016SFrederic Weisbecker { 55471ba0e6SNicholas Piggin /* If the work is "lazy", handle it from next tick if any */ 56153bedbaSFrederic Weisbecker if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { 57471ba0e6SNicholas Piggin if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && 58471ba0e6SNicholas Piggin tick_nohz_tick_stopped()) 59471ba0e6SNicholas Piggin arch_irq_work_raise(); 60471ba0e6SNicholas Piggin } else { 61471ba0e6SNicholas Piggin if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) 62471ba0e6SNicholas Piggin arch_irq_work_raise(); 63471ba0e6SNicholas Piggin } 6447885016SFrederic Weisbecker } 6547885016SFrederic Weisbecker 6647885016SFrederic Weisbecker /* Enqueue the irq work @work on the current CPU */ 67cd578abbSPeter Zijlstra bool irq_work_queue(struct irq_work *work) 68e360adbeSPeter Zijlstra { 69c02cf5f8Sanish kumar /* Only queue if not already pending */ 70c02cf5f8Sanish kumar if (!irq_work_claim(work)) 71cd578abbSPeter Zijlstra return false; 72c02cf5f8Sanish kumar 73c02cf5f8Sanish kumar /* Queue the entry and raise the IPI if needed. */ 7420b87691SChristoph Lameter preempt_disable(); 75471ba0e6SNicholas Piggin __irq_work_queue_local(work); 7620b87691SChristoph Lameter preempt_enable(); 77cd578abbSPeter Zijlstra 78cd578abbSPeter Zijlstra return true; 79e360adbeSPeter Zijlstra } 80e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_queue); 81e360adbeSPeter Zijlstra 82471ba0e6SNicholas Piggin /* 83471ba0e6SNicholas Piggin * Enqueue the irq_work @work on @cpu unless it's already pending 84471ba0e6SNicholas Piggin * somewhere. 85471ba0e6SNicholas Piggin * 86471ba0e6SNicholas Piggin * Can be re-enqueued while the callback is still in progress. 87471ba0e6SNicholas Piggin */ 88471ba0e6SNicholas Piggin bool irq_work_queue_on(struct irq_work *work, int cpu) 89471ba0e6SNicholas Piggin { 90471ba0e6SNicholas Piggin #ifndef CONFIG_SMP 91471ba0e6SNicholas Piggin return irq_work_queue(work); 92471ba0e6SNicholas Piggin 93471ba0e6SNicholas Piggin #else /* CONFIG_SMP: */ 94471ba0e6SNicholas Piggin /* All work should have been flushed before going offline */ 95471ba0e6SNicholas Piggin WARN_ON_ONCE(cpu_is_offline(cpu)); 96471ba0e6SNicholas Piggin 97471ba0e6SNicholas Piggin /* Only queue if not already pending */ 98471ba0e6SNicholas Piggin if (!irq_work_claim(work)) 99471ba0e6SNicholas Piggin return false; 100471ba0e6SNicholas Piggin 101471ba0e6SNicholas Piggin preempt_disable(); 102471ba0e6SNicholas Piggin if (cpu != smp_processor_id()) { 103471ba0e6SNicholas Piggin /* Arch remote IPI send/receive backend aren't NMI safe */ 104471ba0e6SNicholas Piggin WARN_ON_ONCE(in_nmi()); 1054b44a21dSPeter Zijlstra __smp_call_single_queue(cpu, &work->llnode); 106471ba0e6SNicholas Piggin } else { 107471ba0e6SNicholas Piggin __irq_work_queue_local(work); 108471ba0e6SNicholas Piggin } 109471ba0e6SNicholas Piggin preempt_enable(); 110471ba0e6SNicholas Piggin 111471ba0e6SNicholas Piggin return true; 112471ba0e6SNicholas Piggin #endif /* CONFIG_SMP */ 113471ba0e6SNicholas Piggin } 114471ba0e6SNicholas Piggin 115471ba0e6SNicholas Piggin 11600b42959SFrederic Weisbecker bool irq_work_needs_cpu(void) 117e360adbeSPeter Zijlstra { 118b93e0b8fSFrederic Weisbecker struct llist_head *raised, *lazy; 11900b42959SFrederic Weisbecker 12022127e93SChristoph Lameter raised = this_cpu_ptr(&raised_list); 12122127e93SChristoph Lameter lazy = this_cpu_ptr(&lazy_list); 12276a33061SFrederic Weisbecker 12376a33061SFrederic Weisbecker if (llist_empty(raised) || arch_irq_work_has_interrupt()) 12476a33061SFrederic Weisbecker if (llist_empty(lazy)) 12500b42959SFrederic Weisbecker return false; 12600b42959SFrederic Weisbecker 1278aa2acceSSteven Rostedt /* All work should have been flushed before going offline */ 1288aa2acceSSteven Rostedt WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 1298aa2acceSSteven Rostedt 13000b42959SFrederic Weisbecker return true; 13100b42959SFrederic Weisbecker } 13200b42959SFrederic Weisbecker 1334b44a21dSPeter Zijlstra void irq_work_single(void *arg) 134e360adbeSPeter Zijlstra { 1354b44a21dSPeter Zijlstra struct irq_work *work = arg; 136feb4a513SFrederic Weisbecker int flags; 1374b44a21dSPeter Zijlstra 138e360adbeSPeter Zijlstra /* 13938aaf809SHuang Ying * Clear the PENDING bit, after this point the @work 140e360adbeSPeter Zijlstra * can be re-used. 141c8446b75SFrederic Weisbecker * Make it immediately visible so that other CPUs trying 142c8446b75SFrederic Weisbecker * to claim that work don't rely on us to handle their data 143c8446b75SFrederic Weisbecker * while we are in the middle of the func. 144e360adbeSPeter Zijlstra */ 145feb4a513SFrederic Weisbecker flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags); 146bc6679aeSFrederic Weisbecker 14749915ac3SSebastian Andrzej Siewior lockdep_irq_work_enter(work); 14838aaf809SHuang Ying work->func(work); 14949915ac3SSebastian Andrzej Siewior lockdep_irq_work_exit(work); 150e360adbeSPeter Zijlstra /* 151e360adbeSPeter Zijlstra * Clear the BUSY bit and return to the free state if 152e360adbeSPeter Zijlstra * no-one else claimed it meanwhile. 153e360adbeSPeter Zijlstra */ 154e9838bd5SFrederic Weisbecker flags &= ~IRQ_WORK_PENDING; 155153bedbaSFrederic Weisbecker (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); 156e360adbeSPeter Zijlstra } 1574b44a21dSPeter Zijlstra 1584b44a21dSPeter Zijlstra static void irq_work_run_list(struct llist_head *list) 1594b44a21dSPeter Zijlstra { 1604b44a21dSPeter Zijlstra struct irq_work *work, *tmp; 1614b44a21dSPeter Zijlstra struct llist_node *llnode; 1624b44a21dSPeter Zijlstra 1634b44a21dSPeter Zijlstra BUG_ON(!irqs_disabled()); 1644b44a21dSPeter Zijlstra 1654b44a21dSPeter Zijlstra if (llist_empty(list)) 1664b44a21dSPeter Zijlstra return; 1674b44a21dSPeter Zijlstra 1684b44a21dSPeter Zijlstra llnode = llist_del_all(list); 1694b44a21dSPeter Zijlstra llist_for_each_entry_safe(work, tmp, llnode, llnode) 1704b44a21dSPeter Zijlstra irq_work_single(work); 171e360adbeSPeter Zijlstra } 172c0e980a4SSteven Rostedt 173c0e980a4SSteven Rostedt /* 174a77353e5SPeter Zijlstra * hotplug calls this through: 175a77353e5SPeter Zijlstra * hotplug_cfd() -> flush_smp_call_function_queue() 176c0e980a4SSteven Rostedt */ 177c0e980a4SSteven Rostedt void irq_work_run(void) 178c0e980a4SSteven Rostedt { 17922127e93SChristoph Lameter irq_work_run_list(this_cpu_ptr(&raised_list)); 18022127e93SChristoph Lameter irq_work_run_list(this_cpu_ptr(&lazy_list)); 181c0e980a4SSteven Rostedt } 182e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_run); 183e360adbeSPeter Zijlstra 18476a33061SFrederic Weisbecker void irq_work_tick(void) 18576a33061SFrederic Weisbecker { 18656e4dea8SChristoph Lameter struct llist_head *raised = this_cpu_ptr(&raised_list); 18776a33061SFrederic Weisbecker 18876a33061SFrederic Weisbecker if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) 18976a33061SFrederic Weisbecker irq_work_run_list(raised); 19056e4dea8SChristoph Lameter irq_work_run_list(this_cpu_ptr(&lazy_list)); 19176a33061SFrederic Weisbecker } 19276a33061SFrederic Weisbecker 193e360adbeSPeter Zijlstra /* 194e360adbeSPeter Zijlstra * Synchronize against the irq_work @entry, ensures the entry is not 195e360adbeSPeter Zijlstra * currently in use. 196e360adbeSPeter Zijlstra */ 19738aaf809SHuang Ying void irq_work_sync(struct irq_work *work) 198e360adbeSPeter Zijlstra { 1993c7169a3SFrederic Weisbecker lockdep_assert_irqs_enabled(); 200e360adbeSPeter Zijlstra 201153bedbaSFrederic Weisbecker while (atomic_read(&work->flags) & IRQ_WORK_BUSY) 202e360adbeSPeter Zijlstra cpu_relax(); 203e360adbeSPeter Zijlstra } 204e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_sync); 205