1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2e360adbeSPeter Zijlstra /* 390eec103SPeter Zijlstra * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra 4e360adbeSPeter Zijlstra * 5e360adbeSPeter Zijlstra * Provides a framework for enqueueing and running callbacks from hardirq 6e360adbeSPeter Zijlstra * context. The enqueueing is NMI-safe. 7e360adbeSPeter Zijlstra */ 8e360adbeSPeter Zijlstra 983e3fa6fSPaul Gortmaker #include <linux/bug.h> 10e360adbeSPeter Zijlstra #include <linux/kernel.h> 119984de1aSPaul Gortmaker #include <linux/export.h> 12e360adbeSPeter Zijlstra #include <linux/irq_work.h> 13967d1f90SPaul Gortmaker #include <linux/percpu.h> 14e360adbeSPeter Zijlstra #include <linux/hardirq.h> 15ef1f0982SChris Metcalf #include <linux/irqflags.h> 16bc6679aeSFrederic Weisbecker #include <linux/sched.h> 17bc6679aeSFrederic Weisbecker #include <linux/tick.h> 18c0e980a4SSteven Rostedt #include <linux/cpu.h> 19c0e980a4SSteven Rostedt #include <linux/notifier.h> 2047885016SFrederic Weisbecker #include <linux/smp.h> 21967d1f90SPaul Gortmaker #include <asm/processor.h> 22e360adbeSPeter Zijlstra 23e360adbeSPeter Zijlstra 24b93e0b8fSFrederic Weisbecker static DEFINE_PER_CPU(struct llist_head, raised_list); 25b93e0b8fSFrederic Weisbecker static DEFINE_PER_CPU(struct llist_head, lazy_list); 26e360adbeSPeter Zijlstra 27e360adbeSPeter Zijlstra /* 28e360adbeSPeter Zijlstra * Claim the entry so that no one else will poke at it. 29e360adbeSPeter Zijlstra */ 3038aaf809SHuang Ying static bool irq_work_claim(struct irq_work *work) 31e360adbeSPeter Zijlstra { 3225269871SFrederic Weisbecker int oflags; 33e360adbeSPeter Zijlstra 3425269871SFrederic Weisbecker oflags = atomic_fetch_or(IRQ_WORK_CLAIMED, &work->flags); 35e0bbe2d8SFrederic Weisbecker /* 3625269871SFrederic Weisbecker * If the work is already pending, no need to raise the IPI. 3725269871SFrederic Weisbecker * The pairing atomic_xchg() in irq_work_run() makes sure 3825269871SFrederic Weisbecker * everything we did before is visible. 39e0bbe2d8SFrederic Weisbecker */ 40e0bbe2d8SFrederic Weisbecker if (oflags & IRQ_WORK_PENDING) 41e0bbe2d8SFrederic Weisbecker return false; 42e360adbeSPeter Zijlstra return true; 43e360adbeSPeter Zijlstra } 44e360adbeSPeter Zijlstra 45e360adbeSPeter Zijlstra void __weak arch_irq_work_raise(void) 46e360adbeSPeter Zijlstra { 47e360adbeSPeter Zijlstra /* 48e360adbeSPeter Zijlstra * Lame architectures will get the timer tick callback 49e360adbeSPeter Zijlstra */ 50e360adbeSPeter Zijlstra } 51e360adbeSPeter Zijlstra 52471ba0e6SNicholas Piggin /* Enqueue on current CPU, work must already be claimed and preempt disabled */ 53471ba0e6SNicholas Piggin static void __irq_work_queue_local(struct irq_work *work) 5447885016SFrederic Weisbecker { 55471ba0e6SNicholas Piggin /* If the work is "lazy", handle it from next tick if any */ 56153bedbaSFrederic Weisbecker if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { 57471ba0e6SNicholas Piggin if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && 58471ba0e6SNicholas Piggin tick_nohz_tick_stopped()) 59471ba0e6SNicholas Piggin arch_irq_work_raise(); 60471ba0e6SNicholas Piggin } else { 61471ba0e6SNicholas Piggin if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) 62471ba0e6SNicholas Piggin arch_irq_work_raise(); 63471ba0e6SNicholas Piggin } 6447885016SFrederic Weisbecker } 6547885016SFrederic Weisbecker 6647885016SFrederic Weisbecker /* Enqueue the irq work @work on the current CPU */ 67cd578abbSPeter Zijlstra bool irq_work_queue(struct irq_work *work) 68e360adbeSPeter Zijlstra { 69c02cf5f8Sanish kumar /* Only queue if not already pending */ 70c02cf5f8Sanish kumar if (!irq_work_claim(work)) 71cd578abbSPeter Zijlstra return false; 72c02cf5f8Sanish kumar 73c02cf5f8Sanish kumar /* Queue the entry and raise the IPI if needed. */ 7420b87691SChristoph Lameter preempt_disable(); 75471ba0e6SNicholas Piggin __irq_work_queue_local(work); 7620b87691SChristoph Lameter preempt_enable(); 77cd578abbSPeter Zijlstra 78cd578abbSPeter Zijlstra return true; 79e360adbeSPeter Zijlstra } 80e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_queue); 81e360adbeSPeter Zijlstra 82471ba0e6SNicholas Piggin /* 83471ba0e6SNicholas Piggin * Enqueue the irq_work @work on @cpu unless it's already pending 84471ba0e6SNicholas Piggin * somewhere. 85471ba0e6SNicholas Piggin * 86471ba0e6SNicholas Piggin * Can be re-enqueued while the callback is still in progress. 87471ba0e6SNicholas Piggin */ 88471ba0e6SNicholas Piggin bool irq_work_queue_on(struct irq_work *work, int cpu) 89471ba0e6SNicholas Piggin { 90471ba0e6SNicholas Piggin #ifndef CONFIG_SMP 91471ba0e6SNicholas Piggin return irq_work_queue(work); 92471ba0e6SNicholas Piggin 93471ba0e6SNicholas Piggin #else /* CONFIG_SMP: */ 94471ba0e6SNicholas Piggin /* All work should have been flushed before going offline */ 95471ba0e6SNicholas Piggin WARN_ON_ONCE(cpu_is_offline(cpu)); 96471ba0e6SNicholas Piggin 97471ba0e6SNicholas Piggin /* Only queue if not already pending */ 98471ba0e6SNicholas Piggin if (!irq_work_claim(work)) 99471ba0e6SNicholas Piggin return false; 100471ba0e6SNicholas Piggin 101471ba0e6SNicholas Piggin preempt_disable(); 102471ba0e6SNicholas Piggin if (cpu != smp_processor_id()) { 103471ba0e6SNicholas Piggin /* Arch remote IPI send/receive backend aren't NMI safe */ 104471ba0e6SNicholas Piggin WARN_ON_ONCE(in_nmi()); 105471ba0e6SNicholas Piggin if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) 106471ba0e6SNicholas Piggin arch_send_call_function_single_ipi(cpu); 107471ba0e6SNicholas Piggin } else { 108471ba0e6SNicholas Piggin __irq_work_queue_local(work); 109471ba0e6SNicholas Piggin } 110471ba0e6SNicholas Piggin preempt_enable(); 111471ba0e6SNicholas Piggin 112471ba0e6SNicholas Piggin return true; 113471ba0e6SNicholas Piggin #endif /* CONFIG_SMP */ 114471ba0e6SNicholas Piggin } 115471ba0e6SNicholas Piggin 116471ba0e6SNicholas Piggin 11700b42959SFrederic Weisbecker bool irq_work_needs_cpu(void) 118e360adbeSPeter Zijlstra { 119b93e0b8fSFrederic Weisbecker struct llist_head *raised, *lazy; 12000b42959SFrederic Weisbecker 12122127e93SChristoph Lameter raised = this_cpu_ptr(&raised_list); 12222127e93SChristoph Lameter lazy = this_cpu_ptr(&lazy_list); 12376a33061SFrederic Weisbecker 12476a33061SFrederic Weisbecker if (llist_empty(raised) || arch_irq_work_has_interrupt()) 12576a33061SFrederic Weisbecker if (llist_empty(lazy)) 12600b42959SFrederic Weisbecker return false; 12700b42959SFrederic Weisbecker 1288aa2acceSSteven Rostedt /* All work should have been flushed before going offline */ 1298aa2acceSSteven Rostedt WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 1308aa2acceSSteven Rostedt 13100b42959SFrederic Weisbecker return true; 13200b42959SFrederic Weisbecker } 13300b42959SFrederic Weisbecker 134b93e0b8fSFrederic Weisbecker static void irq_work_run_list(struct llist_head *list) 135e360adbeSPeter Zijlstra { 136d00a08cfSThomas Gleixner struct irq_work *work, *tmp; 13738aaf809SHuang Ying struct llist_node *llnode; 138153bedbaSFrederic Weisbecker int flags; 139e360adbeSPeter Zijlstra 140e360adbeSPeter Zijlstra BUG_ON(!irqs_disabled()); 141e360adbeSPeter Zijlstra 142b93e0b8fSFrederic Weisbecker if (llist_empty(list)) 143b93e0b8fSFrederic Weisbecker return; 144b93e0b8fSFrederic Weisbecker 145b93e0b8fSFrederic Weisbecker llnode = llist_del_all(list); 146d00a08cfSThomas Gleixner llist_for_each_entry_safe(work, tmp, llnode, llnode) { 147e360adbeSPeter Zijlstra /* 14838aaf809SHuang Ying * Clear the PENDING bit, after this point the @work 149e360adbeSPeter Zijlstra * can be re-used. 150c8446b75SFrederic Weisbecker * Make it immediately visible so that other CPUs trying 151c8446b75SFrederic Weisbecker * to claim that work don't rely on us to handle their data 152c8446b75SFrederic Weisbecker * while we are in the middle of the func. 153e360adbeSPeter Zijlstra */ 154153bedbaSFrederic Weisbecker flags = atomic_read(&work->flags) & ~IRQ_WORK_PENDING; 155153bedbaSFrederic Weisbecker atomic_xchg(&work->flags, flags); 156bc6679aeSFrederic Weisbecker 15738aaf809SHuang Ying work->func(work); 158e360adbeSPeter Zijlstra /* 159e360adbeSPeter Zijlstra * Clear the BUSY bit and return to the free state if 160e360adbeSPeter Zijlstra * no-one else claimed it meanwhile. 161e360adbeSPeter Zijlstra */ 162153bedbaSFrederic Weisbecker (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); 163e360adbeSPeter Zijlstra } 164e360adbeSPeter Zijlstra } 165c0e980a4SSteven Rostedt 166c0e980a4SSteven Rostedt /* 167a77353e5SPeter Zijlstra * hotplug calls this through: 168a77353e5SPeter Zijlstra * hotplug_cfd() -> flush_smp_call_function_queue() 169c0e980a4SSteven Rostedt */ 170c0e980a4SSteven Rostedt void irq_work_run(void) 171c0e980a4SSteven Rostedt { 17222127e93SChristoph Lameter irq_work_run_list(this_cpu_ptr(&raised_list)); 17322127e93SChristoph Lameter irq_work_run_list(this_cpu_ptr(&lazy_list)); 174c0e980a4SSteven Rostedt } 175e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_run); 176e360adbeSPeter Zijlstra 17776a33061SFrederic Weisbecker void irq_work_tick(void) 17876a33061SFrederic Weisbecker { 17956e4dea8SChristoph Lameter struct llist_head *raised = this_cpu_ptr(&raised_list); 18076a33061SFrederic Weisbecker 18176a33061SFrederic Weisbecker if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) 18276a33061SFrederic Weisbecker irq_work_run_list(raised); 18356e4dea8SChristoph Lameter irq_work_run_list(this_cpu_ptr(&lazy_list)); 18476a33061SFrederic Weisbecker } 18576a33061SFrederic Weisbecker 186e360adbeSPeter Zijlstra /* 187e360adbeSPeter Zijlstra * Synchronize against the irq_work @entry, ensures the entry is not 188e360adbeSPeter Zijlstra * currently in use. 189e360adbeSPeter Zijlstra */ 19038aaf809SHuang Ying void irq_work_sync(struct irq_work *work) 191e360adbeSPeter Zijlstra { 1923c7169a3SFrederic Weisbecker lockdep_assert_irqs_enabled(); 193e360adbeSPeter Zijlstra 194153bedbaSFrederic Weisbecker while (atomic_read(&work->flags) & IRQ_WORK_BUSY) 195e360adbeSPeter Zijlstra cpu_relax(); 196e360adbeSPeter Zijlstra } 197e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_sync); 198