1 /* 2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra 3 * 4 * Provides a framework for enqueueing and running callbacks from hardirq 5 * context. The enqueueing is NMI-safe. 6 */ 7 8 #include <linux/bug.h> 9 #include <linux/kernel.h> 10 #include <linux/export.h> 11 #include <linux/irq_work.h> 12 #include <linux/percpu.h> 13 #include <linux/hardirq.h> 14 #include <linux/irqflags.h> 15 #include <linux/sched.h> 16 #include <linux/tick.h> 17 #include <linux/cpu.h> 18 #include <linux/notifier.h> 19 #include <linux/smp.h> 20 #include <asm/processor.h> 21 22 23 static DEFINE_PER_CPU(struct llist_head, raised_list); 24 static DEFINE_PER_CPU(struct llist_head, lazy_list); 25 26 /* 27 * Claim the entry so that no one else will poke at it. 28 */ 29 static bool irq_work_claim(struct irq_work *work) 30 { 31 unsigned long flags, oflags, nflags; 32 33 /* 34 * Start with our best wish as a premise but only trust any 35 * flag value after cmpxchg() result. 36 */ 37 flags = work->flags & ~IRQ_WORK_PENDING; 38 for (;;) { 39 nflags = flags | IRQ_WORK_FLAGS; 40 oflags = cmpxchg(&work->flags, flags, nflags); 41 if (oflags == flags) 42 break; 43 if (oflags & IRQ_WORK_PENDING) 44 return false; 45 flags = oflags; 46 cpu_relax(); 47 } 48 49 return true; 50 } 51 52 void __weak arch_irq_work_raise(void) 53 { 54 /* 55 * Lame architectures will get the timer tick callback 56 */ 57 } 58 59 #ifdef CONFIG_SMP 60 /* 61 * Enqueue the irq_work @work on @cpu unless it's already pending 62 * somewhere. 63 * 64 * Can be re-enqueued while the callback is still in progress. 65 */ 66 bool irq_work_queue_on(struct irq_work *work, int cpu) 67 { 68 /* All work should have been flushed before going offline */ 69 WARN_ON_ONCE(cpu_is_offline(cpu)); 70 71 /* Arch remote IPI send/receive backend aren't NMI safe */ 72 WARN_ON_ONCE(in_nmi()); 73 74 /* Only queue if not already pending */ 75 if (!irq_work_claim(work)) 76 return false; 77 78 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) 79 arch_send_call_function_single_ipi(cpu); 80 81 return true; 82 } 83 EXPORT_SYMBOL_GPL(irq_work_queue_on); 84 #endif 85 86 /* Enqueue the irq work @work on the current CPU */ 87 bool irq_work_queue(struct irq_work *work) 88 { 89 /* Only queue if not already pending */ 90 if (!irq_work_claim(work)) 91 return false; 92 93 /* Queue the entry and raise the IPI if needed. */ 94 preempt_disable(); 95 96 /* If the work is "lazy", handle it from next tick if any */ 97 if (work->flags & IRQ_WORK_LAZY) { 98 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && 99 tick_nohz_tick_stopped()) 100 arch_irq_work_raise(); 101 } else { 102 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) 103 arch_irq_work_raise(); 104 } 105 106 preempt_enable(); 107 108 return true; 109 } 110 EXPORT_SYMBOL_GPL(irq_work_queue); 111 112 bool irq_work_needs_cpu(void) 113 { 114 struct llist_head *raised, *lazy; 115 116 raised = this_cpu_ptr(&raised_list); 117 lazy = this_cpu_ptr(&lazy_list); 118 119 if (llist_empty(raised) || arch_irq_work_has_interrupt()) 120 if (llist_empty(lazy)) 121 return false; 122 123 /* All work should have been flushed before going offline */ 124 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 125 126 return true; 127 } 128 129 static void irq_work_run_list(struct llist_head *list) 130 { 131 struct irq_work *work, *tmp; 132 struct llist_node *llnode; 133 unsigned long flags; 134 135 BUG_ON(!irqs_disabled()); 136 137 if (llist_empty(list)) 138 return; 139 140 llnode = llist_del_all(list); 141 llist_for_each_entry_safe(work, tmp, llnode, llnode) { 142 /* 143 * Clear the PENDING bit, after this point the @work 144 * can be re-used. 145 * Make it immediately visible so that other CPUs trying 146 * to claim that work don't rely on us to handle their data 147 * while we are in the middle of the func. 148 */ 149 flags = work->flags & ~IRQ_WORK_PENDING; 150 xchg(&work->flags, flags); 151 152 work->func(work); 153 /* 154 * Clear the BUSY bit and return to the free state if 155 * no-one else claimed it meanwhile. 156 */ 157 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); 158 } 159 } 160 161 /* 162 * hotplug calls this through: 163 * hotplug_cfd() -> flush_smp_call_function_queue() 164 */ 165 void irq_work_run(void) 166 { 167 irq_work_run_list(this_cpu_ptr(&raised_list)); 168 irq_work_run_list(this_cpu_ptr(&lazy_list)); 169 } 170 EXPORT_SYMBOL_GPL(irq_work_run); 171 172 void irq_work_tick(void) 173 { 174 struct llist_head *raised = this_cpu_ptr(&raised_list); 175 176 if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) 177 irq_work_run_list(raised); 178 irq_work_run_list(this_cpu_ptr(&lazy_list)); 179 } 180 181 /* 182 * Synchronize against the irq_work @entry, ensures the entry is not 183 * currently in use. 184 */ 185 void irq_work_sync(struct irq_work *work) 186 { 187 WARN_ON_ONCE(irqs_disabled()); 188 189 while (work->flags & IRQ_WORK_BUSY) 190 cpu_relax(); 191 } 192 EXPORT_SYMBOL_GPL(irq_work_sync); 193