1 /* 2 * linux/kernel/irq/handle.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code. 8 * 9 * Detailed information is available in Documentation/DocBook/genericirq 10 * 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/module.h> 15 #include <linux/random.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include "internals.h" 20 21 /** 22 * handle_bad_irq - handle spurious and unhandled irqs 23 * @irq: the interrupt number 24 * @desc: description of the interrupt 25 * 26 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 27 */ 28 void fastcall 29 handle_bad_irq(unsigned int irq, struct irq_desc *desc) 30 { 31 print_irq_desc(irq, desc); 32 kstat_this_cpu.irqs[irq]++; 33 ack_bad_irq(irq); 34 } 35 36 /* 37 * Linux has a controller-independent interrupt architecture. 38 * Every controller has a 'controller-template', that is used 39 * by the main code to do the right thing. Each driver-visible 40 * interrupt source is transparently wired to the appropriate 41 * controller. Thus drivers need not be aware of the 42 * interrupt-controller. 43 * 44 * The code is designed to be easily extended with new/different 45 * interrupt controllers, without having to do assembly magic or 46 * having to touch the generic code. 47 * 48 * Controller mappings for all interrupt sources: 49 */ 50 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 51 [0 ... NR_IRQS-1] = { 52 .status = IRQ_DISABLED, 53 .chip = &no_irq_chip, 54 .handle_irq = handle_bad_irq, 55 .depth = 1, 56 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 57 #ifdef CONFIG_SMP 58 .affinity = CPU_MASK_ALL 59 #endif 60 } 61 }; 62 63 /* 64 * What should we do if we get a hw irq event on an illegal vector? 65 * Each architecture has to answer this themself. 66 */ 67 static void ack_bad(unsigned int irq) 68 { 69 print_irq_desc(irq, irq_desc + irq); 70 ack_bad_irq(irq); 71 } 72 73 /* 74 * NOP functions 75 */ 76 static void noop(unsigned int irq) 77 { 78 } 79 80 static unsigned int noop_ret(unsigned int irq) 81 { 82 return 0; 83 } 84 85 /* 86 * Generic no controller implementation 87 */ 88 struct irq_chip no_irq_chip = { 89 .name = "none", 90 .startup = noop_ret, 91 .shutdown = noop, 92 .enable = noop, 93 .disable = noop, 94 .ack = ack_bad, 95 .end = noop, 96 }; 97 98 /* 99 * Generic dummy implementation which can be used for 100 * real dumb interrupt sources 101 */ 102 struct irq_chip dummy_irq_chip = { 103 .name = "dummy", 104 .startup = noop_ret, 105 .shutdown = noop, 106 .enable = noop, 107 .disable = noop, 108 .ack = noop, 109 .mask = noop, 110 .unmask = noop, 111 .end = noop, 112 }; 113 114 /* 115 * Special, empty irq handler: 116 */ 117 irqreturn_t no_action(int cpl, void *dev_id) 118 { 119 return IRQ_NONE; 120 } 121 122 /** 123 * handle_IRQ_event - irq action chain handler 124 * @irq: the interrupt number 125 * @action: the interrupt action chain for this irq 126 * 127 * Handles the action chain of an irq event 128 */ 129 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) 130 { 131 irqreturn_t ret, retval = IRQ_NONE; 132 unsigned int status = 0; 133 134 handle_dynamic_tick(action); 135 136 if (!(action->flags & IRQF_DISABLED)) 137 local_irq_enable_in_hardirq(); 138 139 do { 140 ret = action->handler(irq, action->dev_id); 141 if (ret == IRQ_HANDLED) 142 status |= action->flags; 143 retval |= ret; 144 action = action->next; 145 } while (action); 146 147 if (status & IRQF_SAMPLE_RANDOM) 148 add_interrupt_randomness(irq); 149 local_irq_disable(); 150 151 return retval; 152 } 153 154 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 155 /** 156 * __do_IRQ - original all in one highlevel IRQ handler 157 * @irq: the interrupt number 158 * 159 * __do_IRQ handles all normal device IRQ's (the special 160 * SMP cross-CPU interrupts have their own specific 161 * handlers). 162 * 163 * This is the original x86 implementation which is used for every 164 * interrupt type. 165 */ 166 fastcall unsigned int __do_IRQ(unsigned int irq) 167 { 168 struct irq_desc *desc = irq_desc + irq; 169 struct irqaction *action; 170 unsigned int status; 171 172 kstat_this_cpu.irqs[irq]++; 173 if (CHECK_IRQ_PER_CPU(desc->status)) { 174 irqreturn_t action_ret; 175 176 /* 177 * No locking required for CPU-local interrupts: 178 */ 179 if (desc->chip->ack) 180 desc->chip->ack(irq); 181 action_ret = handle_IRQ_event(irq, desc->action); 182 if (!noirqdebug) 183 note_interrupt(irq, desc, action_ret); 184 desc->chip->end(irq); 185 return 1; 186 } 187 188 spin_lock(&desc->lock); 189 if (desc->chip->ack) 190 desc->chip->ack(irq); 191 /* 192 * REPLAY is when Linux resends an IRQ that was dropped earlier 193 * WAITING is used by probe to mark irqs that are being tested 194 */ 195 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); 196 status |= IRQ_PENDING; /* we _want_ to handle it */ 197 198 /* 199 * If the IRQ is disabled for whatever reason, we cannot 200 * use the action we have. 201 */ 202 action = NULL; 203 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { 204 action = desc->action; 205 status &= ~IRQ_PENDING; /* we commit to handling */ 206 status |= IRQ_INPROGRESS; /* we are handling it */ 207 } 208 desc->status = status; 209 210 /* 211 * If there is no IRQ handler or it was disabled, exit early. 212 * Since we set PENDING, if another processor is handling 213 * a different instance of this same irq, the other processor 214 * will take care of it. 215 */ 216 if (unlikely(!action)) 217 goto out; 218 219 /* 220 * Edge triggered interrupts need to remember 221 * pending events. 222 * This applies to any hw interrupts that allow a second 223 * instance of the same irq to arrive while we are in do_IRQ 224 * or in the handler. But the code here only handles the _second_ 225 * instance of the irq, not the third or fourth. So it is mostly 226 * useful for irq hardware that does not mask cleanly in an 227 * SMP environment. 228 */ 229 for (;;) { 230 irqreturn_t action_ret; 231 232 spin_unlock(&desc->lock); 233 234 action_ret = handle_IRQ_event(irq, action); 235 if (!noirqdebug) 236 note_interrupt(irq, desc, action_ret); 237 238 spin_lock(&desc->lock); 239 if (likely(!(desc->status & IRQ_PENDING))) 240 break; 241 desc->status &= ~IRQ_PENDING; 242 } 243 desc->status &= ~IRQ_INPROGRESS; 244 245 out: 246 /* 247 * The ->end() handler has to deal with interrupts which got 248 * disabled while the handler was running. 249 */ 250 desc->chip->end(irq); 251 spin_unlock(&desc->lock); 252 253 return 1; 254 } 255 #endif 256 257 #ifdef CONFIG_TRACE_IRQFLAGS 258 259 /* 260 * lockdep: we want to handle all irq_desc locks as a single lock-class: 261 */ 262 static struct lock_class_key irq_desc_lock_class; 263 264 void early_init_irq_lock_class(void) 265 { 266 int i; 267 268 for (i = 0; i < NR_IRQS; i++) 269 lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); 270 } 271 272 #endif 273