1 /* 2 * linux/kernel/irq/manage.c 3 * 4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar 5 * 6 * This file contains driver APIs to the irq subsystem. 7 */ 8 9 #include <linux/config.h> 10 #include <linux/irq.h> 11 #include <linux/module.h> 12 #include <linux/random.h> 13 #include <linux/interrupt.h> 14 15 #include "internals.h" 16 17 #ifdef CONFIG_SMP 18 19 cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL }; 20 21 #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) 22 cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS]; 23 #endif 24 25 /** 26 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 27 * @irq: interrupt number to wait for 28 * 29 * This function waits for any pending IRQ handlers for this interrupt 30 * to complete before returning. If you use this function while 31 * holding a resource the IRQ handler may need you will deadlock. 32 * 33 * This function may be called - with care - from IRQ context. 34 */ 35 void synchronize_irq(unsigned int irq) 36 { 37 struct irq_desc *desc = irq_desc + irq; 38 39 if (irq >= NR_IRQS) 40 return; 41 42 while (desc->status & IRQ_INPROGRESS) 43 cpu_relax(); 44 } 45 46 EXPORT_SYMBOL(synchronize_irq); 47 48 #endif 49 50 /** 51 * disable_irq_nosync - disable an irq without waiting 52 * @irq: Interrupt to disable 53 * 54 * Disable the selected interrupt line. Disables and Enables are 55 * nested. 56 * Unlike disable_irq(), this function does not ensure existing 57 * instances of the IRQ handler have completed before returning. 58 * 59 * This function may be called from IRQ context. 60 */ 61 void disable_irq_nosync(unsigned int irq) 62 { 63 irq_desc_t *desc = irq_desc + irq; 64 unsigned long flags; 65 66 if (irq >= NR_IRQS) 67 return; 68 69 spin_lock_irqsave(&desc->lock, flags); 70 if (!desc->depth++) { 71 desc->status |= IRQ_DISABLED; 72 desc->handler->disable(irq); 73 } 74 spin_unlock_irqrestore(&desc->lock, flags); 75 } 76 77 EXPORT_SYMBOL(disable_irq_nosync); 78 79 /** 80 * disable_irq - disable an irq and wait for completion 81 * @irq: Interrupt to disable 82 * 83 * Disable the selected interrupt line. Enables and Disables are 84 * nested. 85 * This function waits for any pending IRQ handlers for this interrupt 86 * to complete before returning. If you use this function while 87 * holding a resource the IRQ handler may need you will deadlock. 88 * 89 * This function may be called - with care - from IRQ context. 90 */ 91 void disable_irq(unsigned int irq) 92 { 93 irq_desc_t *desc = irq_desc + irq; 94 95 if (irq >= NR_IRQS) 96 return; 97 98 disable_irq_nosync(irq); 99 if (desc->action) 100 synchronize_irq(irq); 101 } 102 103 EXPORT_SYMBOL(disable_irq); 104 105 /** 106 * enable_irq - enable handling of an irq 107 * @irq: Interrupt to enable 108 * 109 * Undoes the effect of one call to disable_irq(). If this 110 * matches the last disable, processing of interrupts on this 111 * IRQ line is re-enabled. 112 * 113 * This function may be called from IRQ context. 114 */ 115 void enable_irq(unsigned int irq) 116 { 117 irq_desc_t *desc = irq_desc + irq; 118 unsigned long flags; 119 120 if (irq >= NR_IRQS) 121 return; 122 123 spin_lock_irqsave(&desc->lock, flags); 124 switch (desc->depth) { 125 case 0: 126 WARN_ON(1); 127 break; 128 case 1: { 129 unsigned int status = desc->status & ~IRQ_DISABLED; 130 131 desc->status = status; 132 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { 133 desc->status = status | IRQ_REPLAY; 134 hw_resend_irq(desc->handler,irq); 135 } 136 desc->handler->enable(irq); 137 /* fall-through */ 138 } 139 default: 140 desc->depth--; 141 } 142 spin_unlock_irqrestore(&desc->lock, flags); 143 } 144 145 EXPORT_SYMBOL(enable_irq); 146 147 /* 148 * Internal function that tells the architecture code whether a 149 * particular irq has been exclusively allocated or is available 150 * for driver use. 151 */ 152 int can_request_irq(unsigned int irq, unsigned long irqflags) 153 { 154 struct irqaction *action; 155 156 if (irq >= NR_IRQS) 157 return 0; 158 159 action = irq_desc[irq].action; 160 if (action) 161 if (irqflags & action->flags & SA_SHIRQ) 162 action = NULL; 163 164 return !action; 165 } 166 167 /* 168 * Internal function to register an irqaction - typically used to 169 * allocate special interrupts that are part of the architecture. 170 */ 171 int setup_irq(unsigned int irq, struct irqaction * new) 172 { 173 struct irq_desc *desc = irq_desc + irq; 174 struct irqaction *old, **p; 175 unsigned long flags; 176 int shared = 0; 177 178 if (irq >= NR_IRQS) 179 return -EINVAL; 180 181 if (desc->handler == &no_irq_type) 182 return -ENOSYS; 183 /* 184 * Some drivers like serial.c use request_irq() heavily, 185 * so we have to be careful not to interfere with a 186 * running system. 187 */ 188 if (new->flags & SA_SAMPLE_RANDOM) { 189 /* 190 * This function might sleep, we want to call it first, 191 * outside of the atomic block. 192 * Yes, this might clear the entropy pool if the wrong 193 * driver is attempted to be loaded, without actually 194 * installing a new handler, but is this really a problem, 195 * only the sysadmin is able to do this. 196 */ 197 rand_initialize_irq(irq); 198 } 199 200 /* 201 * The following block of code has to be executed atomically 202 */ 203 spin_lock_irqsave(&desc->lock,flags); 204 p = &desc->action; 205 if ((old = *p) != NULL) { 206 /* Can't share interrupts unless both agree to */ 207 if (!(old->flags & new->flags & SA_SHIRQ)) { 208 spin_unlock_irqrestore(&desc->lock,flags); 209 return -EBUSY; 210 } 211 212 /* add new interrupt at end of irq queue */ 213 do { 214 p = &old->next; 215 old = *p; 216 } while (old); 217 shared = 1; 218 } 219 220 *p = new; 221 222 if (!shared) { 223 desc->depth = 0; 224 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | 225 IRQ_WAITING | IRQ_INPROGRESS); 226 if (desc->handler->startup) 227 desc->handler->startup(irq); 228 else 229 desc->handler->enable(irq); 230 } 231 spin_unlock_irqrestore(&desc->lock,flags); 232 233 new->irq = irq; 234 register_irq_proc(irq); 235 new->dir = NULL; 236 register_handler_proc(irq, new); 237 238 return 0; 239 } 240 241 /** 242 * free_irq - free an interrupt 243 * @irq: Interrupt line to free 244 * @dev_id: Device identity to free 245 * 246 * Remove an interrupt handler. The handler is removed and if the 247 * interrupt line is no longer in use by any driver it is disabled. 248 * On a shared IRQ the caller must ensure the interrupt is disabled 249 * on the card it drives before calling this function. The function 250 * does not return until any executing interrupts for this IRQ 251 * have completed. 252 * 253 * This function must not be called from interrupt context. 254 */ 255 void free_irq(unsigned int irq, void *dev_id) 256 { 257 struct irq_desc *desc; 258 struct irqaction **p; 259 unsigned long flags; 260 261 if (irq >= NR_IRQS) 262 return; 263 264 desc = irq_desc + irq; 265 spin_lock_irqsave(&desc->lock,flags); 266 p = &desc->action; 267 for (;;) { 268 struct irqaction * action = *p; 269 270 if (action) { 271 struct irqaction **pp = p; 272 273 p = &action->next; 274 if (action->dev_id != dev_id) 275 continue; 276 277 /* Found it - now remove it from the list of entries */ 278 *pp = action->next; 279 280 /* Currently used only by UML, might disappear one day.*/ 281 #ifdef CONFIG_IRQ_RELEASE_METHOD 282 if (desc->handler->release) 283 desc->handler->release(irq, dev_id); 284 #endif 285 286 if (!desc->action) { 287 desc->status |= IRQ_DISABLED; 288 if (desc->handler->shutdown) 289 desc->handler->shutdown(irq); 290 else 291 desc->handler->disable(irq); 292 } 293 spin_unlock_irqrestore(&desc->lock,flags); 294 unregister_handler_proc(irq, action); 295 296 /* Make sure it's not being used on another CPU */ 297 synchronize_irq(irq); 298 kfree(action); 299 return; 300 } 301 printk(KERN_ERR "Trying to free free IRQ%d\n",irq); 302 spin_unlock_irqrestore(&desc->lock,flags); 303 return; 304 } 305 } 306 307 EXPORT_SYMBOL(free_irq); 308 309 /** 310 * request_irq - allocate an interrupt line 311 * @irq: Interrupt line to allocate 312 * @handler: Function to be called when the IRQ occurs 313 * @irqflags: Interrupt type flags 314 * @devname: An ascii name for the claiming device 315 * @dev_id: A cookie passed back to the handler function 316 * 317 * This call allocates interrupt resources and enables the 318 * interrupt line and IRQ handling. From the point this 319 * call is made your handler function may be invoked. Since 320 * your handler function must clear any interrupt the board 321 * raises, you must take care both to initialise your hardware 322 * and to set up the interrupt handler in the right order. 323 * 324 * Dev_id must be globally unique. Normally the address of the 325 * device data structure is used as the cookie. Since the handler 326 * receives this value it makes sense to use it. 327 * 328 * If your interrupt is shared you must pass a non NULL dev_id 329 * as this is required when freeing the interrupt. 330 * 331 * Flags: 332 * 333 * SA_SHIRQ Interrupt is shared 334 * SA_INTERRUPT Disable local interrupts while processing 335 * SA_SAMPLE_RANDOM The interrupt can be used for entropy 336 * 337 */ 338 int request_irq(unsigned int irq, 339 irqreturn_t (*handler)(int, void *, struct pt_regs *), 340 unsigned long irqflags, const char * devname, void *dev_id) 341 { 342 struct irqaction * action; 343 int retval; 344 345 /* 346 * Sanity-check: shared interrupts must pass in a real dev-ID, 347 * otherwise we'll have trouble later trying to figure out 348 * which interrupt is which (messes up the interrupt freeing 349 * logic etc). 350 */ 351 if ((irqflags & SA_SHIRQ) && !dev_id) 352 return -EINVAL; 353 if (irq >= NR_IRQS) 354 return -EINVAL; 355 if (!handler) 356 return -EINVAL; 357 358 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); 359 if (!action) 360 return -ENOMEM; 361 362 action->handler = handler; 363 action->flags = irqflags; 364 cpus_clear(action->mask); 365 action->name = devname; 366 action->next = NULL; 367 action->dev_id = dev_id; 368 369 select_smp_affinity(irq); 370 371 retval = setup_irq(irq, action); 372 if (retval) 373 kfree(action); 374 375 return retval; 376 } 377 378 EXPORT_SYMBOL(request_irq); 379 380