1 /* 2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 3 * 4 * Provides a framework for enqueueing and running callbacks from hardirq 5 * context. The enqueueing is NMI-safe. 6 */ 7 8 #include <linux/bug.h> 9 #include <linux/kernel.h> 10 #include <linux/export.h> 11 #include <linux/irq_work.h> 12 #include <linux/percpu.h> 13 #include <linux/hardirq.h> 14 #include <linux/irqflags.h> 15 #include <linux/sched.h> 16 #include <linux/tick.h> 17 #include <linux/cpu.h> 18 #include <linux/notifier.h> 19 #include <asm/processor.h> 20 21 22 static DEFINE_PER_CPU(struct llist_head, irq_work_list); 23 static DEFINE_PER_CPU(int, irq_work_raised); 24 25 /* 26 * Claim the entry so that no one else will poke at it. 27 */ 28 static bool irq_work_claim(struct irq_work *work) 29 { 30 unsigned long flags, oflags, nflags; 31 32 /* 33 * Start with our best wish as a premise but only trust any 34 * flag value after cmpxchg() result. 35 */ 36 flags = work->flags & ~IRQ_WORK_PENDING; 37 for (;;) { 38 nflags = flags | IRQ_WORK_FLAGS; 39 oflags = cmpxchg(&work->flags, flags, nflags); 40 if (oflags == flags) 41 break; 42 if (oflags & IRQ_WORK_PENDING) 43 return false; 44 flags = oflags; 45 cpu_relax(); 46 } 47 48 return true; 49 } 50 51 void __weak arch_irq_work_raise(void) 52 { 53 /* 54 * Lame architectures will get the timer tick callback 55 */ 56 } 57 58 /* 59 * Enqueue the irq_work @entry unless it's already pending 60 * somewhere. 61 * 62 * Can be re-enqueued while the callback is still in progress. 63 */ 64 void irq_work_queue(struct irq_work *work) 65 { 66 /* Only queue if not already pending */ 67 if (!irq_work_claim(work)) 68 return; 69 70 /* Queue the entry and raise the IPI if needed. */ 71 preempt_disable(); 72 73 llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); 74 75 /* 76 * If the work is not "lazy" or the tick is stopped, raise the irq 77 * work interrupt (if supported by the arch), otherwise, just wait 78 * for the next tick. 79 */ 80 if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) { 81 if (!this_cpu_cmpxchg(irq_work_raised, 0, 1)) 82 arch_irq_work_raise(); 83 } 84 85 preempt_enable(); 86 } 87 EXPORT_SYMBOL_GPL(irq_work_queue); 88 89 bool irq_work_needs_cpu(void) 90 { 91 struct llist_head *this_list; 92 93 this_list = &__get_cpu_var(irq_work_list); 94 if (llist_empty(this_list)) 95 return false; 96 97 /* All work should have been flushed before going offline */ 98 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 99 100 return true; 101 } 102 103 static void __irq_work_run(void) 104 { 105 unsigned long flags; 106 struct irq_work *work; 107 struct llist_head *this_list; 108 struct llist_node *llnode; 109 110 111 /* 112 * Reset the "raised" state right before we check the list because 113 * an NMI may enqueue after we find the list empty from the runner. 114 */ 115 __this_cpu_write(irq_work_raised, 0); 116 barrier(); 117 118 this_list = &__get_cpu_var(irq_work_list); 119 if (llist_empty(this_list)) 120 return; 121 122 BUG_ON(!irqs_disabled()); 123 124 llnode = llist_del_all(this_list); 125 while (llnode != NULL) { 126 work = llist_entry(llnode, struct irq_work, llnode); 127 128 llnode = llist_next(llnode); 129 130 /* 131 * Clear the PENDING bit, after this point the @work 132 * can be re-used. 133 * Make it immediately visible so that other CPUs trying 134 * to claim that work don't rely on us to handle their data 135 * while we are in the middle of the func. 136 */ 137 flags = work->flags & ~IRQ_WORK_PENDING; 138 xchg(&work->flags, flags); 139 140 work->func(work); 141 /* 142 * Clear the BUSY bit and return to the free state if 143 * no-one else claimed it meanwhile. 144 */ 145 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); 146 } 147 } 148 149 /* 150 * Run the irq_work entries on this cpu. Requires to be ran from hardirq 151 * context with local IRQs disabled. 152 */ 153 void irq_work_run(void) 154 { 155 BUG_ON(!in_irq()); 156 __irq_work_run(); 157 } 158 EXPORT_SYMBOL_GPL(irq_work_run); 159 160 /* 161 * Synchronize against the irq_work @entry, ensures the entry is not 162 * currently in use. 163 */ 164 void irq_work_sync(struct irq_work *work) 165 { 166 WARN_ON_ONCE(irqs_disabled()); 167 168 while (work->flags & IRQ_WORK_BUSY) 169 cpu_relax(); 170 } 171 EXPORT_SYMBOL_GPL(irq_work_sync); 172 173 #ifdef CONFIG_HOTPLUG_CPU 174 static int irq_work_cpu_notify(struct notifier_block *self, 175 unsigned long action, void *hcpu) 176 { 177 long cpu = (long)hcpu; 178 179 switch (action) { 180 case CPU_DYING: 181 /* Called from stop_machine */ 182 if (WARN_ON_ONCE(cpu != smp_processor_id())) 183 break; 184 __irq_work_run(); 185 break; 186 default: 187 break; 188 } 189 return NOTIFY_OK; 190 } 191 192 static struct notifier_block cpu_notify; 193 194 static __init int irq_work_init_cpu_notifier(void) 195 { 196 cpu_notify.notifier_call = irq_work_cpu_notify; 197 cpu_notify.priority = 0; 198 register_cpu_notifier(&cpu_notify); 199 return 0; 200 } 201 device_initcall(irq_work_init_cpu_notifier); 202 203 #endif /* CONFIG_HOTPLUG_CPU */ 204