1 /* 2 * linux/kernel/irq/spurious.c 3 * 4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar 5 * 6 * This file contains spurious interrupt handling. 7 */ 8 9 #include <linux/jiffies.h> 10 #include <linux/irq.h> 11 #include <linux/module.h> 12 #include <linux/kallsyms.h> 13 #include <linux/interrupt.h> 14 #include <linux/moduleparam.h> 15 #include <linux/timer.h> 16 17 static int irqfixup __read_mostly; 18 19 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) 20 static void poll_spurious_irqs(unsigned long dummy); 21 static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); 22 23 /* 24 * Recovery handler for misrouted interrupts. 25 */ 26 static int try_one_irq(int irq, struct irq_desc *desc) 27 { 28 struct irqaction *action; 29 int ok = 0, work = 0; 30 31 spin_lock(&desc->lock); 32 /* Already running on another processor */ 33 if (desc->status & IRQ_INPROGRESS) { 34 /* 35 * Already running: If it is shared get the other 36 * CPU to go looking for our mystery interrupt too 37 */ 38 if (desc->action && (desc->action->flags & IRQF_SHARED)) 39 desc->status |= IRQ_PENDING; 40 spin_unlock(&desc->lock); 41 return ok; 42 } 43 /* Honour the normal IRQ locking */ 44 desc->status |= IRQ_INPROGRESS; 45 action = desc->action; 46 spin_unlock(&desc->lock); 47 48 while (action) { 49 /* Only shared IRQ handlers are safe to call */ 50 if (action->flags & IRQF_SHARED) { 51 if (action->handler(irq, action->dev_id) == 52 IRQ_HANDLED) 53 ok = 1; 54 } 55 action = action->next; 56 } 57 local_irq_disable(); 58 /* Now clean up the flags */ 59 spin_lock(&desc->lock); 60 action = desc->action; 61 62 /* 63 * While we were looking for a fixup someone queued a real 64 * IRQ clashing with our walk: 65 */ 66 while ((desc->status & IRQ_PENDING) && action) { 67 /* 68 * Perform real IRQ processing for the IRQ we deferred 69 */ 70 work = 1; 71 spin_unlock(&desc->lock); 72 handle_IRQ_event(irq, action); 73 spin_lock(&desc->lock); 74 desc->status &= ~IRQ_PENDING; 75 } 76 desc->status &= ~IRQ_INPROGRESS; 77 /* 78 * If we did actual work for the real IRQ line we must let the 79 * IRQ controller clean up too 80 */ 81 if (work && desc->chip && desc->chip->end) 82 desc->chip->end(irq); 83 spin_unlock(&desc->lock); 84 85 return ok; 86 } 87 88 static int misrouted_irq(int irq) 89 { 90 struct irq_desc *desc; 91 int i, ok = 0; 92 93 for_each_irq_desc(i, desc) { 94 if (!i) 95 continue; 96 97 if (i == irq) /* Already tried */ 98 continue; 99 100 if (try_one_irq(i, desc)) 101 ok = 1; 102 } 103 /* So the caller can adjust the irq error counts */ 104 return ok; 105 } 106 107 static void poll_spurious_irqs(unsigned long dummy) 108 { 109 struct irq_desc *desc; 110 int i; 111 112 for_each_irq_desc(i, desc) { 113 unsigned int status; 114 115 if (!i) 116 continue; 117 118 /* Racy but it doesn't matter */ 119 status = desc->status; 120 barrier(); 121 if (!(status & IRQ_SPURIOUS_DISABLED)) 122 continue; 123 124 try_one_irq(i, desc); 125 } 126 127 mod_timer(&poll_spurious_irq_timer, 128 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 129 } 130 131 /* 132 * If 99,900 of the previous 100,000 interrupts have not been handled 133 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 134 * and try to turn the IRQ off. 135 * 136 * (The other 100-of-100,000 interrupts may have been a correctly 137 * functioning device sharing an IRQ with the failing one) 138 * 139 * Called under desc->lock 140 */ 141 142 static void 143 __report_bad_irq(unsigned int irq, struct irq_desc *desc, 144 irqreturn_t action_ret) 145 { 146 struct irqaction *action; 147 148 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { 149 printk(KERN_ERR "irq event %d: bogus return value %x\n", 150 irq, action_ret); 151 } else { 152 printk(KERN_ERR "irq %d: nobody cared (try booting with " 153 "the \"irqpoll\" option)\n", irq); 154 } 155 dump_stack(); 156 printk(KERN_ERR "handlers:\n"); 157 158 action = desc->action; 159 while (action) { 160 printk(KERN_ERR "[<%p>]", action->handler); 161 print_symbol(" (%s)", 162 (unsigned long)action->handler); 163 printk("\n"); 164 action = action->next; 165 } 166 } 167 168 static void 169 report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) 170 { 171 static int count = 100; 172 173 if (count > 0) { 174 count--; 175 __report_bad_irq(irq, desc, action_ret); 176 } 177 } 178 179 static inline int 180 try_misrouted_irq(unsigned int irq, struct irq_desc *desc, 181 irqreturn_t action_ret) 182 { 183 struct irqaction *action; 184 185 if (!irqfixup) 186 return 0; 187 188 /* We didn't actually handle the IRQ - see if it was misrouted? */ 189 if (action_ret == IRQ_NONE) 190 return 1; 191 192 /* 193 * But for 'irqfixup == 2' we also do it for handled interrupts if 194 * they are marked as IRQF_IRQPOLL (or for irq zero, which is the 195 * traditional PC timer interrupt.. Legacy) 196 */ 197 if (irqfixup < 2) 198 return 0; 199 200 if (!irq) 201 return 1; 202 203 /* 204 * Since we don't get the descriptor lock, "action" can 205 * change under us. We don't really care, but we don't 206 * want to follow a NULL pointer. So tell the compiler to 207 * just load it once by using a barrier. 208 */ 209 action = desc->action; 210 barrier(); 211 return action && (action->flags & IRQF_IRQPOLL); 212 } 213 214 void note_interrupt(unsigned int irq, struct irq_desc *desc, 215 irqreturn_t action_ret) 216 { 217 if (unlikely(action_ret != IRQ_HANDLED)) { 218 /* 219 * If we are seeing only the odd spurious IRQ caused by 220 * bus asynchronicity then don't eventually trigger an error, 221 * otherwise the couter becomes a doomsday timer for otherwise 222 * working systems 223 */ 224 if (time_after(jiffies, desc->last_unhandled + HZ/10)) 225 desc->irqs_unhandled = 1; 226 else 227 desc->irqs_unhandled++; 228 desc->last_unhandled = jiffies; 229 if (unlikely(action_ret != IRQ_NONE)) 230 report_bad_irq(irq, desc, action_ret); 231 } 232 233 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { 234 int ok = misrouted_irq(irq); 235 if (action_ret == IRQ_NONE) 236 desc->irqs_unhandled -= ok; 237 } 238 239 desc->irq_count++; 240 if (likely(desc->irq_count < 100000)) 241 return; 242 243 desc->irq_count = 0; 244 if (unlikely(desc->irqs_unhandled > 99900)) { 245 /* 246 * The interrupt is stuck 247 */ 248 __report_bad_irq(irq, desc, action_ret); 249 /* 250 * Now kill the IRQ 251 */ 252 printk(KERN_EMERG "Disabling IRQ #%d\n", irq); 253 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; 254 desc->depth++; 255 desc->chip->disable(irq); 256 257 mod_timer(&poll_spurious_irq_timer, 258 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 259 } 260 desc->irqs_unhandled = 0; 261 } 262 263 int noirqdebug __read_mostly; 264 265 int noirqdebug_setup(char *str) 266 { 267 noirqdebug = 1; 268 printk(KERN_INFO "IRQ lockup detection disabled\n"); 269 270 return 1; 271 } 272 273 __setup("noirqdebug", noirqdebug_setup); 274 module_param(noirqdebug, bool, 0644); 275 MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); 276 277 static int __init irqfixup_setup(char *str) 278 { 279 irqfixup = 1; 280 printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); 281 printk(KERN_WARNING "This may impact system performance.\n"); 282 283 return 1; 284 } 285 286 __setup("irqfixup", irqfixup_setup); 287 module_param(irqfixup, int, 0644); 288 MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode, 2: irqpoll mode"); 289 290 static int __init irqpoll_setup(char *str) 291 { 292 irqfixup = 2; 293 printk(KERN_WARNING "Misrouted IRQ fixup and polling support " 294 "enabled\n"); 295 printk(KERN_WARNING "This may significantly impact system " 296 "performance\n"); 297 return 1; 298 } 299 300 __setup("irqpoll", irqpoll_setup); 301