1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2e360adbeSPeter Zijlstra /* 390eec103SPeter Zijlstra * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra 4e360adbeSPeter Zijlstra * 5e360adbeSPeter Zijlstra * Provides a framework for enqueueing and running callbacks from hardirq 6e360adbeSPeter Zijlstra * context. The enqueueing is NMI-safe. 7e360adbeSPeter Zijlstra */ 8e360adbeSPeter Zijlstra 983e3fa6fSPaul Gortmaker #include <linux/bug.h> 10e360adbeSPeter Zijlstra #include <linux/kernel.h> 119984de1aSPaul Gortmaker #include <linux/export.h> 12e360adbeSPeter Zijlstra #include <linux/irq_work.h> 13967d1f90SPaul Gortmaker #include <linux/percpu.h> 14e360adbeSPeter Zijlstra #include <linux/hardirq.h> 15ef1f0982SChris Metcalf #include <linux/irqflags.h> 16bc6679aeSFrederic Weisbecker #include <linux/sched.h> 17bc6679aeSFrederic Weisbecker #include <linux/tick.h> 18c0e980a4SSteven Rostedt #include <linux/cpu.h> 19c0e980a4SSteven Rostedt #include <linux/notifier.h> 2047885016SFrederic Weisbecker #include <linux/smp.h> 21b4c6f86eSSebastian Andrzej Siewior #include <linux/smpboot.h> 22967d1f90SPaul Gortmaker #include <asm/processor.h> 23e2b5bcf9SZqiang #include <linux/kasan.h> 24e360adbeSPeter Zijlstra 25b93e0b8fSFrederic Weisbecker static DEFINE_PER_CPU(struct llist_head, raised_list); 26b93e0b8fSFrederic Weisbecker static DEFINE_PER_CPU(struct llist_head, lazy_list); 27b4c6f86eSSebastian Andrzej Siewior static DEFINE_PER_CPU(struct task_struct *, irq_workd); 28b4c6f86eSSebastian Andrzej Siewior 29b4c6f86eSSebastian Andrzej Siewior static void wake_irq_workd(void) 30b4c6f86eSSebastian Andrzej Siewior { 31b4c6f86eSSebastian Andrzej Siewior struct task_struct *tsk = __this_cpu_read(irq_workd); 32b4c6f86eSSebastian Andrzej Siewior 33b4c6f86eSSebastian Andrzej Siewior if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk) 34b4c6f86eSSebastian Andrzej Siewior wake_up_process(tsk); 35b4c6f86eSSebastian Andrzej Siewior } 36b4c6f86eSSebastian Andrzej Siewior 37b4c6f86eSSebastian Andrzej Siewior #ifdef CONFIG_SMP 38b4c6f86eSSebastian Andrzej Siewior static void irq_work_wake(struct irq_work *entry) 39b4c6f86eSSebastian Andrzej Siewior { 40b4c6f86eSSebastian Andrzej Siewior wake_irq_workd(); 41b4c6f86eSSebastian Andrzej Siewior } 42b4c6f86eSSebastian Andrzej Siewior 43b4c6f86eSSebastian Andrzej Siewior static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) = 44b4c6f86eSSebastian Andrzej Siewior IRQ_WORK_INIT_HARD(irq_work_wake); 45b4c6f86eSSebastian Andrzej Siewior #endif 46b4c6f86eSSebastian Andrzej Siewior 47b4c6f86eSSebastian Andrzej Siewior static int irq_workd_should_run(unsigned int cpu) 48b4c6f86eSSebastian Andrzej Siewior { 49b4c6f86eSSebastian Andrzej Siewior return !llist_empty(this_cpu_ptr(&lazy_list)); 50b4c6f86eSSebastian Andrzej Siewior } 51e360adbeSPeter Zijlstra 52e360adbeSPeter Zijlstra /* 53e360adbeSPeter Zijlstra * Claim the entry so that no one else will poke at it. 54e360adbeSPeter Zijlstra */ 5538aaf809SHuang Ying static bool irq_work_claim(struct irq_work *work) 56e360adbeSPeter Zijlstra { 5725269871SFrederic Weisbecker int oflags; 58e360adbeSPeter Zijlstra 597a9f50a0SPeter Zijlstra oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); 60e0bbe2d8SFrederic Weisbecker /* 6125269871SFrederic Weisbecker * If the work is already pending, no need to raise the IPI. 622914b0baSPeter Zijlstra * The pairing smp_mb() in irq_work_single() makes sure 6325269871SFrederic Weisbecker * everything we did before is visible. 64e0bbe2d8SFrederic Weisbecker */ 65e0bbe2d8SFrederic Weisbecker if (oflags & IRQ_WORK_PENDING) 66e0bbe2d8SFrederic Weisbecker return false; 67e360adbeSPeter Zijlstra return true; 68e360adbeSPeter Zijlstra } 69e360adbeSPeter Zijlstra 70e360adbeSPeter Zijlstra void __weak arch_irq_work_raise(void) 71e360adbeSPeter Zijlstra { 72e360adbeSPeter Zijlstra /* 73e360adbeSPeter Zijlstra * Lame architectures will get the timer tick callback 74e360adbeSPeter Zijlstra */ 75e360adbeSPeter Zijlstra } 76e360adbeSPeter Zijlstra 77471ba0e6SNicholas Piggin /* Enqueue on current CPU, work must already be claimed and preempt disabled */ 78471ba0e6SNicholas Piggin static void __irq_work_queue_local(struct irq_work *work) 7947885016SFrederic Weisbecker { 80b4c6f86eSSebastian Andrzej Siewior struct llist_head *list; 81b4c6f86eSSebastian Andrzej Siewior bool rt_lazy_work = false; 82b4c6f86eSSebastian Andrzej Siewior bool lazy_work = false; 83b4c6f86eSSebastian Andrzej Siewior int work_flags; 84b4c6f86eSSebastian Andrzej Siewior 85b4c6f86eSSebastian Andrzej Siewior work_flags = atomic_read(&work->node.a_flags); 86b4c6f86eSSebastian Andrzej Siewior if (work_flags & IRQ_WORK_LAZY) 87b4c6f86eSSebastian Andrzej Siewior lazy_work = true; 88b4c6f86eSSebastian Andrzej Siewior else if (IS_ENABLED(CONFIG_PREEMPT_RT) && 89b4c6f86eSSebastian Andrzej Siewior !(work_flags & IRQ_WORK_HARD_IRQ)) 90b4c6f86eSSebastian Andrzej Siewior rt_lazy_work = true; 91b4c6f86eSSebastian Andrzej Siewior 92b4c6f86eSSebastian Andrzej Siewior if (lazy_work || rt_lazy_work) 93b4c6f86eSSebastian Andrzej Siewior list = this_cpu_ptr(&lazy_list); 94b4c6f86eSSebastian Andrzej Siewior else 95b4c6f86eSSebastian Andrzej Siewior list = this_cpu_ptr(&raised_list); 96b4c6f86eSSebastian Andrzej Siewior 97b4c6f86eSSebastian Andrzej Siewior if (!llist_add(&work->node.llist, list)) 98b4c6f86eSSebastian Andrzej Siewior return; 99b4c6f86eSSebastian Andrzej Siewior 100471ba0e6SNicholas Piggin /* If the work is "lazy", handle it from next tick if any */ 101b4c6f86eSSebastian Andrzej Siewior if (!lazy_work || tick_nohz_tick_stopped()) 102471ba0e6SNicholas Piggin arch_irq_work_raise(); 10347885016SFrederic Weisbecker } 10447885016SFrederic Weisbecker 10547885016SFrederic Weisbecker /* Enqueue the irq work @work on the current CPU */ 106cd578abbSPeter Zijlstra bool irq_work_queue(struct irq_work *work) 107e360adbeSPeter Zijlstra { 108c02cf5f8Sanish kumar /* Only queue if not already pending */ 109c02cf5f8Sanish kumar if (!irq_work_claim(work)) 110cd578abbSPeter Zijlstra return false; 111c02cf5f8Sanish kumar 112c02cf5f8Sanish kumar /* Queue the entry and raise the IPI if needed. */ 11320b87691SChristoph Lameter preempt_disable(); 114471ba0e6SNicholas Piggin __irq_work_queue_local(work); 11520b87691SChristoph Lameter preempt_enable(); 116cd578abbSPeter Zijlstra 117cd578abbSPeter Zijlstra return true; 118e360adbeSPeter Zijlstra } 119e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_queue); 120e360adbeSPeter Zijlstra 121471ba0e6SNicholas Piggin /* 122471ba0e6SNicholas Piggin * Enqueue the irq_work @work on @cpu unless it's already pending 123471ba0e6SNicholas Piggin * somewhere. 124471ba0e6SNicholas Piggin * 125471ba0e6SNicholas Piggin * Can be re-enqueued while the callback is still in progress. 126471ba0e6SNicholas Piggin */ 127471ba0e6SNicholas Piggin bool irq_work_queue_on(struct irq_work *work, int cpu) 128471ba0e6SNicholas Piggin { 129471ba0e6SNicholas Piggin #ifndef CONFIG_SMP 130471ba0e6SNicholas Piggin return irq_work_queue(work); 131471ba0e6SNicholas Piggin 132471ba0e6SNicholas Piggin #else /* CONFIG_SMP: */ 133471ba0e6SNicholas Piggin /* All work should have been flushed before going offline */ 134471ba0e6SNicholas Piggin WARN_ON_ONCE(cpu_is_offline(cpu)); 135471ba0e6SNicholas Piggin 136471ba0e6SNicholas Piggin /* Only queue if not already pending */ 137471ba0e6SNicholas Piggin if (!irq_work_claim(work)) 138471ba0e6SNicholas Piggin return false; 139471ba0e6SNicholas Piggin 140*25934fcfSZqiang kasan_record_aux_stack_noalloc(work); 141e2b5bcf9SZqiang 142471ba0e6SNicholas Piggin preempt_disable(); 143471ba0e6SNicholas Piggin if (cpu != smp_processor_id()) { 144471ba0e6SNicholas Piggin /* Arch remote IPI send/receive backend aren't NMI safe */ 145471ba0e6SNicholas Piggin WARN_ON_ONCE(in_nmi()); 146b4c6f86eSSebastian Andrzej Siewior 147b4c6f86eSSebastian Andrzej Siewior /* 148b4c6f86eSSebastian Andrzej Siewior * On PREEMPT_RT the items which are not marked as 149b4c6f86eSSebastian Andrzej Siewior * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work 150b4c6f86eSSebastian Andrzej Siewior * item is used on the remote CPU to wake the thread. 151b4c6f86eSSebastian Andrzej Siewior */ 152b4c6f86eSSebastian Andrzej Siewior if (IS_ENABLED(CONFIG_PREEMPT_RT) && 153b4c6f86eSSebastian Andrzej Siewior !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) { 154b4c6f86eSSebastian Andrzej Siewior 155b4c6f86eSSebastian Andrzej Siewior if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu))) 156b4c6f86eSSebastian Andrzej Siewior goto out; 157b4c6f86eSSebastian Andrzej Siewior 158b4c6f86eSSebastian Andrzej Siewior work = &per_cpu(irq_work_wakeup, cpu); 159b4c6f86eSSebastian Andrzej Siewior if (!irq_work_claim(work)) 160b4c6f86eSSebastian Andrzej Siewior goto out; 161b4c6f86eSSebastian Andrzej Siewior } 162b4c6f86eSSebastian Andrzej Siewior 1637a9f50a0SPeter Zijlstra __smp_call_single_queue(cpu, &work->node.llist); 164471ba0e6SNicholas Piggin } else { 165471ba0e6SNicholas Piggin __irq_work_queue_local(work); 166471ba0e6SNicholas Piggin } 167b4c6f86eSSebastian Andrzej Siewior out: 168471ba0e6SNicholas Piggin preempt_enable(); 169471ba0e6SNicholas Piggin 170471ba0e6SNicholas Piggin return true; 171471ba0e6SNicholas Piggin #endif /* CONFIG_SMP */ 172471ba0e6SNicholas Piggin } 173471ba0e6SNicholas Piggin 17400b42959SFrederic Weisbecker bool irq_work_needs_cpu(void) 175e360adbeSPeter Zijlstra { 176b93e0b8fSFrederic Weisbecker struct llist_head *raised, *lazy; 17700b42959SFrederic Weisbecker 17822127e93SChristoph Lameter raised = this_cpu_ptr(&raised_list); 17922127e93SChristoph Lameter lazy = this_cpu_ptr(&lazy_list); 18076a33061SFrederic Weisbecker 18176a33061SFrederic Weisbecker if (llist_empty(raised) || arch_irq_work_has_interrupt()) 18276a33061SFrederic Weisbecker if (llist_empty(lazy)) 18300b42959SFrederic Weisbecker return false; 18400b42959SFrederic Weisbecker 1858aa2acceSSteven Rostedt /* All work should have been flushed before going offline */ 1868aa2acceSSteven Rostedt WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 1878aa2acceSSteven Rostedt 18800b42959SFrederic Weisbecker return true; 18900b42959SFrederic Weisbecker } 19000b42959SFrederic Weisbecker 1914b44a21dSPeter Zijlstra void irq_work_single(void *arg) 192e360adbeSPeter Zijlstra { 1934b44a21dSPeter Zijlstra struct irq_work *work = arg; 194feb4a513SFrederic Weisbecker int flags; 1954b44a21dSPeter Zijlstra 196e360adbeSPeter Zijlstra /* 1972914b0baSPeter Zijlstra * Clear the PENDING bit, after this point the @work can be re-used. 1982914b0baSPeter Zijlstra * The PENDING bit acts as a lock, and we own it, so we can clear it 1992914b0baSPeter Zijlstra * without atomic ops. 200e360adbeSPeter Zijlstra */ 2012914b0baSPeter Zijlstra flags = atomic_read(&work->node.a_flags); 202e9838bd5SFrederic Weisbecker flags &= ~IRQ_WORK_PENDING; 2032914b0baSPeter Zijlstra atomic_set(&work->node.a_flags, flags); 2042914b0baSPeter Zijlstra 2052914b0baSPeter Zijlstra /* 2062914b0baSPeter Zijlstra * See irq_work_claim(). 2072914b0baSPeter Zijlstra */ 2082914b0baSPeter Zijlstra smp_mb(); 2092914b0baSPeter Zijlstra 2102914b0baSPeter Zijlstra lockdep_irq_work_enter(flags); 2112914b0baSPeter Zijlstra work->func(work); 2122914b0baSPeter Zijlstra lockdep_irq_work_exit(flags); 2132914b0baSPeter Zijlstra 2142914b0baSPeter Zijlstra /* 2152914b0baSPeter Zijlstra * Clear the BUSY bit, if set, and return to the free state if no-one 2162914b0baSPeter Zijlstra * else claimed it meanwhile. 2172914b0baSPeter Zijlstra */ 2187a9f50a0SPeter Zijlstra (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); 21981097968SSebastian Andrzej Siewior 22009089db7SSebastian Andrzej Siewior if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || 22109089db7SSebastian Andrzej Siewior !arch_irq_work_has_interrupt()) 22281097968SSebastian Andrzej Siewior rcuwait_wake_up(&work->irqwait); 223e360adbeSPeter Zijlstra } 2244b44a21dSPeter Zijlstra 2254b44a21dSPeter Zijlstra static void irq_work_run_list(struct llist_head *list) 2264b44a21dSPeter Zijlstra { 2274b44a21dSPeter Zijlstra struct irq_work *work, *tmp; 2284b44a21dSPeter Zijlstra struct llist_node *llnode; 2294b44a21dSPeter Zijlstra 230b4c6f86eSSebastian Andrzej Siewior /* 231b4c6f86eSSebastian Andrzej Siewior * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed 232b4c6f86eSSebastian Andrzej Siewior * in a per-CPU thread in preemptible context. Only the items which are 233b4c6f86eSSebastian Andrzej Siewior * marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context. 234b4c6f86eSSebastian Andrzej Siewior */ 235b4c6f86eSSebastian Andrzej Siewior BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT)); 2364b44a21dSPeter Zijlstra 2374b44a21dSPeter Zijlstra if (llist_empty(list)) 2384b44a21dSPeter Zijlstra return; 2394b44a21dSPeter Zijlstra 2404b44a21dSPeter Zijlstra llnode = llist_del_all(list); 2417a9f50a0SPeter Zijlstra llist_for_each_entry_safe(work, tmp, llnode, node.llist) 2424b44a21dSPeter Zijlstra irq_work_single(work); 243e360adbeSPeter Zijlstra } 244c0e980a4SSteven Rostedt 245c0e980a4SSteven Rostedt /* 246a77353e5SPeter Zijlstra * hotplug calls this through: 247a77353e5SPeter Zijlstra * hotplug_cfd() -> flush_smp_call_function_queue() 248c0e980a4SSteven Rostedt */ 249c0e980a4SSteven Rostedt void irq_work_run(void) 250c0e980a4SSteven Rostedt { 25122127e93SChristoph Lameter irq_work_run_list(this_cpu_ptr(&raised_list)); 252b4c6f86eSSebastian Andrzej Siewior if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 25322127e93SChristoph Lameter irq_work_run_list(this_cpu_ptr(&lazy_list)); 254b4c6f86eSSebastian Andrzej Siewior else 255b4c6f86eSSebastian Andrzej Siewior wake_irq_workd(); 256c0e980a4SSteven Rostedt } 257e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_run); 258e360adbeSPeter Zijlstra 25976a33061SFrederic Weisbecker void irq_work_tick(void) 26076a33061SFrederic Weisbecker { 26156e4dea8SChristoph Lameter struct llist_head *raised = this_cpu_ptr(&raised_list); 26276a33061SFrederic Weisbecker 26376a33061SFrederic Weisbecker if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) 26476a33061SFrederic Weisbecker irq_work_run_list(raised); 265b4c6f86eSSebastian Andrzej Siewior 266b4c6f86eSSebastian Andrzej Siewior if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 26756e4dea8SChristoph Lameter irq_work_run_list(this_cpu_ptr(&lazy_list)); 268b4c6f86eSSebastian Andrzej Siewior else 269b4c6f86eSSebastian Andrzej Siewior wake_irq_workd(); 27076a33061SFrederic Weisbecker } 27176a33061SFrederic Weisbecker 272e360adbeSPeter Zijlstra /* 273e360adbeSPeter Zijlstra * Synchronize against the irq_work @entry, ensures the entry is not 274e360adbeSPeter Zijlstra * currently in use. 275e360adbeSPeter Zijlstra */ 27638aaf809SHuang Ying void irq_work_sync(struct irq_work *work) 277e360adbeSPeter Zijlstra { 2783c7169a3SFrederic Weisbecker lockdep_assert_irqs_enabled(); 27981097968SSebastian Andrzej Siewior might_sleep(); 28081097968SSebastian Andrzej Siewior 28109089db7SSebastian Andrzej Siewior if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || 28209089db7SSebastian Andrzej Siewior !arch_irq_work_has_interrupt()) { 28381097968SSebastian Andrzej Siewior rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work), 28481097968SSebastian Andrzej Siewior TASK_UNINTERRUPTIBLE); 28581097968SSebastian Andrzej Siewior return; 28681097968SSebastian Andrzej Siewior } 287e360adbeSPeter Zijlstra 2887a9f50a0SPeter Zijlstra while (irq_work_is_busy(work)) 289e360adbeSPeter Zijlstra cpu_relax(); 290e360adbeSPeter Zijlstra } 291e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_sync); 292b4c6f86eSSebastian Andrzej Siewior 293b4c6f86eSSebastian Andrzej Siewior static void run_irq_workd(unsigned int cpu) 294b4c6f86eSSebastian Andrzej Siewior { 295b4c6f86eSSebastian Andrzej Siewior irq_work_run_list(this_cpu_ptr(&lazy_list)); 296b4c6f86eSSebastian Andrzej Siewior } 297b4c6f86eSSebastian Andrzej Siewior 298b4c6f86eSSebastian Andrzej Siewior static void irq_workd_setup(unsigned int cpu) 299b4c6f86eSSebastian Andrzej Siewior { 300b4c6f86eSSebastian Andrzej Siewior sched_set_fifo_low(current); 301b4c6f86eSSebastian Andrzej Siewior } 302b4c6f86eSSebastian Andrzej Siewior 303b4c6f86eSSebastian Andrzej Siewior static struct smp_hotplug_thread irqwork_threads = { 304b4c6f86eSSebastian Andrzej Siewior .store = &irq_workd, 305b4c6f86eSSebastian Andrzej Siewior .setup = irq_workd_setup, 306b4c6f86eSSebastian Andrzej Siewior .thread_should_run = irq_workd_should_run, 307b4c6f86eSSebastian Andrzej Siewior .thread_fn = run_irq_workd, 308b4c6f86eSSebastian Andrzej Siewior .thread_comm = "irq_work/%u", 309b4c6f86eSSebastian Andrzej Siewior }; 310b4c6f86eSSebastian Andrzej Siewior 311b4c6f86eSSebastian Andrzej Siewior static __init int irq_work_init_threads(void) 312b4c6f86eSSebastian Andrzej Siewior { 313b4c6f86eSSebastian Andrzej Siewior if (IS_ENABLED(CONFIG_PREEMPT_RT)) 314b4c6f86eSSebastian Andrzej Siewior BUG_ON(smpboot_register_percpu_thread(&irqwork_threads)); 315b4c6f86eSSebastian Andrzej Siewior return 0; 316b4c6f86eSSebastian Andrzej Siewior } 317b4c6f86eSSebastian Andrzej Siewior early_initcall(irq_work_init_threads); 318