1 /* 2 * linux/kernel/irq/handle.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code. 8 * 9 * Detailed information is available in Documentation/DocBook/genericirq 10 * 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/module.h> 15 #include <linux/random.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include "internals.h" 20 21 /** 22 * handle_bad_irq - handle spurious and unhandled irqs 23 * @irq: the interrupt number 24 * @desc: description of the interrupt 25 * 26 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 27 */ 28 void 29 handle_bad_irq(unsigned int irq, struct irq_desc *desc) 30 { 31 print_irq_desc(irq, desc); 32 kstat_this_cpu.irqs[irq]++; 33 ack_bad_irq(irq); 34 } 35 36 /* 37 * Linux has a controller-independent interrupt architecture. 38 * Every controller has a 'controller-template', that is used 39 * by the main code to do the right thing. Each driver-visible 40 * interrupt source is transparently wired to the appropriate 41 * controller. Thus drivers need not be aware of the 42 * interrupt-controller. 43 * 44 * The code is designed to be easily extended with new/different 45 * interrupt controllers, without having to do assembly magic or 46 * having to touch the generic code. 47 * 48 * Controller mappings for all interrupt sources: 49 */ 50 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 51 [0 ... NR_IRQS-1] = { 52 .status = IRQ_DISABLED, 53 .chip = &no_irq_chip, 54 .handle_irq = handle_bad_irq, 55 .depth = 1, 56 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 57 #ifdef CONFIG_SMP 58 .affinity = CPU_MASK_ALL 59 #endif 60 } 61 }; 62 63 /* 64 * What should we do if we get a hw irq event on an illegal vector? 65 * Each architecture has to answer this themself. 66 */ 67 static void ack_bad(unsigned int irq) 68 { 69 print_irq_desc(irq, irq_desc + irq); 70 ack_bad_irq(irq); 71 } 72 73 /* 74 * NOP functions 75 */ 76 static void noop(unsigned int irq) 77 { 78 } 79 80 static unsigned int noop_ret(unsigned int irq) 81 { 82 return 0; 83 } 84 85 /* 86 * Generic no controller implementation 87 */ 88 struct irq_chip no_irq_chip = { 89 .name = "none", 90 .startup = noop_ret, 91 .shutdown = noop, 92 .enable = noop, 93 .disable = noop, 94 .ack = ack_bad, 95 .end = noop, 96 }; 97 98 /* 99 * Generic dummy implementation which can be used for 100 * real dumb interrupt sources 101 */ 102 struct irq_chip dummy_irq_chip = { 103 .name = "dummy", 104 .startup = noop_ret, 105 .shutdown = noop, 106 .enable = noop, 107 .disable = noop, 108 .ack = noop, 109 .mask = noop, 110 .unmask = noop, 111 .end = noop, 112 }; 113 114 /* 115 * Special, empty irq handler: 116 */ 117 irqreturn_t no_action(int cpl, void *dev_id) 118 { 119 return IRQ_NONE; 120 } 121 122 /** 123 * handle_IRQ_event - irq action chain handler 124 * @irq: the interrupt number 125 * @action: the interrupt action chain for this irq 126 * 127 * Handles the action chain of an irq event 128 */ 129 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) 130 { 131 irqreturn_t ret, retval = IRQ_NONE; 132 unsigned int status = 0; 133 134 handle_dynamic_tick(action); 135 136 if (!(action->flags & IRQF_DISABLED)) 137 local_irq_enable_in_hardirq(); 138 139 do { 140 ret = action->handler(irq, action->dev_id); 141 if (ret == IRQ_HANDLED) 142 status |= action->flags; 143 retval |= ret; 144 action = action->next; 145 } while (action); 146 147 if (status & IRQF_SAMPLE_RANDOM) 148 add_interrupt_randomness(irq); 149 local_irq_disable(); 150 151 return retval; 152 } 153 154 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 155 /** 156 * __do_IRQ - original all in one highlevel IRQ handler 157 * @irq: the interrupt number 158 * 159 * __do_IRQ handles all normal device IRQ's (the special 160 * SMP cross-CPU interrupts have their own specific 161 * handlers). 162 * 163 * This is the original x86 implementation which is used for every 164 * interrupt type. 165 */ 166 unsigned int __do_IRQ(unsigned int irq) 167 { 168 struct irq_desc *desc = irq_desc + irq; 169 struct irqaction *action; 170 unsigned int status; 171 172 kstat_this_cpu.irqs[irq]++; 173 if (CHECK_IRQ_PER_CPU(desc->status)) { 174 irqreturn_t action_ret; 175 176 /* 177 * No locking required for CPU-local interrupts: 178 */ 179 if (desc->chip->ack) 180 desc->chip->ack(irq); 181 if (likely(!(desc->status & IRQ_DISABLED))) { 182 action_ret = handle_IRQ_event(irq, desc->action); 183 if (!noirqdebug) 184 note_interrupt(irq, desc, action_ret); 185 } 186 desc->chip->end(irq); 187 return 1; 188 } 189 190 spin_lock(&desc->lock); 191 if (desc->chip->ack) 192 desc->chip->ack(irq); 193 /* 194 * REPLAY is when Linux resends an IRQ that was dropped earlier 195 * WAITING is used by probe to mark irqs that are being tested 196 */ 197 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); 198 status |= IRQ_PENDING; /* we _want_ to handle it */ 199 200 /* 201 * If the IRQ is disabled for whatever reason, we cannot 202 * use the action we have. 203 */ 204 action = NULL; 205 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { 206 action = desc->action; 207 status &= ~IRQ_PENDING; /* we commit to handling */ 208 status |= IRQ_INPROGRESS; /* we are handling it */ 209 } 210 desc->status = status; 211 212 /* 213 * If there is no IRQ handler or it was disabled, exit early. 214 * Since we set PENDING, if another processor is handling 215 * a different instance of this same irq, the other processor 216 * will take care of it. 217 */ 218 if (unlikely(!action)) 219 goto out; 220 221 /* 222 * Edge triggered interrupts need to remember 223 * pending events. 224 * This applies to any hw interrupts that allow a second 225 * instance of the same irq to arrive while we are in do_IRQ 226 * or in the handler. But the code here only handles the _second_ 227 * instance of the irq, not the third or fourth. So it is mostly 228 * useful for irq hardware that does not mask cleanly in an 229 * SMP environment. 230 */ 231 for (;;) { 232 irqreturn_t action_ret; 233 234 spin_unlock(&desc->lock); 235 236 action_ret = handle_IRQ_event(irq, action); 237 if (!noirqdebug) 238 note_interrupt(irq, desc, action_ret); 239 240 spin_lock(&desc->lock); 241 if (likely(!(desc->status & IRQ_PENDING))) 242 break; 243 desc->status &= ~IRQ_PENDING; 244 } 245 desc->status &= ~IRQ_INPROGRESS; 246 247 out: 248 /* 249 * The ->end() handler has to deal with interrupts which got 250 * disabled while the handler was running. 251 */ 252 desc->chip->end(irq); 253 spin_unlock(&desc->lock); 254 255 return 1; 256 } 257 #endif 258 259 #ifdef CONFIG_TRACE_IRQFLAGS 260 261 /* 262 * lockdep: we want to handle all irq_desc locks as a single lock-class: 263 */ 264 static struct lock_class_key irq_desc_lock_class; 265 266 void early_init_irq_lock_class(void) 267 { 268 int i; 269 270 for (i = 0; i < NR_IRQS; i++) 271 lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); 272 } 273 274 #endif 275