xref: /openbmc/linux/kernel/irq/chip.c (revision acc6a093)
1 /*
2  * linux/kernel/irq/chip.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code, for irq-chip
8  * based architectures.
9  *
10  * Detailed information is available in Documentation/DocBook/genericirq
11  */
12 
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 
19 #include "internals.h"
20 
21 static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
22 {
23 	struct irq_desc *desc;
24 	unsigned long flags;
25 
26 	desc = irq_to_desc(irq);
27 	if (!desc) {
28 		WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
29 		return;
30 	}
31 
32 	/* Ensure we don't have left over values from a previous use of this irq */
33 	raw_spin_lock_irqsave(&desc->lock, flags);
34 	desc->status = IRQ_DISABLED;
35 	desc->chip = &no_irq_chip;
36 	desc->handle_irq = handle_bad_irq;
37 	desc->depth = 1;
38 	desc->msi_desc = NULL;
39 	desc->handler_data = NULL;
40 	if (!keep_chip_data)
41 		desc->chip_data = NULL;
42 	desc->action = NULL;
43 	desc->irq_count = 0;
44 	desc->irqs_unhandled = 0;
45 #ifdef CONFIG_SMP
46 	cpumask_setall(desc->affinity);
47 #ifdef CONFIG_GENERIC_PENDING_IRQ
48 	cpumask_clear(desc->pending_mask);
49 #endif
50 #endif
51 	raw_spin_unlock_irqrestore(&desc->lock, flags);
52 }
53 
54 /**
55  *	dynamic_irq_init - initialize a dynamically allocated irq
56  *	@irq:	irq number to initialize
57  */
58 void dynamic_irq_init(unsigned int irq)
59 {
60 	dynamic_irq_init_x(irq, false);
61 }
62 
63 /**
64  *	dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
65  *	@irq:	irq number to initialize
66  *
67  *	does not set irq_to_desc(irq)->chip_data to NULL
68  */
69 void dynamic_irq_init_keep_chip_data(unsigned int irq)
70 {
71 	dynamic_irq_init_x(irq, true);
72 }
73 
74 static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
75 {
76 	struct irq_desc *desc = irq_to_desc(irq);
77 	unsigned long flags;
78 
79 	if (!desc) {
80 		WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
81 		return;
82 	}
83 
84 	raw_spin_lock_irqsave(&desc->lock, flags);
85 	if (desc->action) {
86 		raw_spin_unlock_irqrestore(&desc->lock, flags);
87 		WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
88 			irq);
89 		return;
90 	}
91 	desc->msi_desc = NULL;
92 	desc->handler_data = NULL;
93 	if (!keep_chip_data)
94 		desc->chip_data = NULL;
95 	desc->handle_irq = handle_bad_irq;
96 	desc->chip = &no_irq_chip;
97 	desc->name = NULL;
98 	clear_kstat_irqs(desc);
99 	raw_spin_unlock_irqrestore(&desc->lock, flags);
100 }
101 
102 /**
103  *	dynamic_irq_cleanup - cleanup a dynamically allocated irq
104  *	@irq:	irq number to initialize
105  */
106 void dynamic_irq_cleanup(unsigned int irq)
107 {
108 	dynamic_irq_cleanup_x(irq, false);
109 }
110 
111 /**
112  *	dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
113  *	@irq:	irq number to initialize
114  *
115  *	does not set irq_to_desc(irq)->chip_data to NULL
116  */
117 void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
118 {
119 	dynamic_irq_cleanup_x(irq, true);
120 }
121 
122 
123 /**
124  *	set_irq_chip - set the irq chip for an irq
125  *	@irq:	irq number
126  *	@chip:	pointer to irq chip description structure
127  */
128 int set_irq_chip(unsigned int irq, struct irq_chip *chip)
129 {
130 	struct irq_desc *desc = irq_to_desc(irq);
131 	unsigned long flags;
132 
133 	if (!desc) {
134 		WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
135 		return -EINVAL;
136 	}
137 
138 	if (!chip)
139 		chip = &no_irq_chip;
140 
141 	raw_spin_lock_irqsave(&desc->lock, flags);
142 	irq_chip_set_defaults(chip);
143 	desc->chip = chip;
144 	raw_spin_unlock_irqrestore(&desc->lock, flags);
145 
146 	return 0;
147 }
148 EXPORT_SYMBOL(set_irq_chip);
149 
150 /**
151  *	set_irq_type - set the irq trigger type for an irq
152  *	@irq:	irq number
153  *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
154  */
155 int set_irq_type(unsigned int irq, unsigned int type)
156 {
157 	struct irq_desc *desc = irq_to_desc(irq);
158 	unsigned long flags;
159 	int ret = -ENXIO;
160 
161 	if (!desc) {
162 		printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
163 		return -ENODEV;
164 	}
165 
166 	type &= IRQ_TYPE_SENSE_MASK;
167 	if (type == IRQ_TYPE_NONE)
168 		return 0;
169 
170 	raw_spin_lock_irqsave(&desc->lock, flags);
171 	ret = __irq_set_trigger(desc, irq, type);
172 	raw_spin_unlock_irqrestore(&desc->lock, flags);
173 	return ret;
174 }
175 EXPORT_SYMBOL(set_irq_type);
176 
177 /**
178  *	set_irq_data - set irq type data for an irq
179  *	@irq:	Interrupt number
180  *	@data:	Pointer to interrupt specific data
181  *
182  *	Set the hardware irq controller data for an irq
183  */
184 int set_irq_data(unsigned int irq, void *data)
185 {
186 	struct irq_desc *desc = irq_to_desc(irq);
187 	unsigned long flags;
188 
189 	if (!desc) {
190 		printk(KERN_ERR
191 		       "Trying to install controller data for IRQ%d\n", irq);
192 		return -EINVAL;
193 	}
194 
195 	raw_spin_lock_irqsave(&desc->lock, flags);
196 	desc->handler_data = data;
197 	raw_spin_unlock_irqrestore(&desc->lock, flags);
198 	return 0;
199 }
200 EXPORT_SYMBOL(set_irq_data);
201 
202 /**
203  *	set_irq_msi - set MSI descriptor data for an irq
204  *	@irq:	Interrupt number
205  *	@entry:	Pointer to MSI descriptor data
206  *
207  *	Set the MSI descriptor entry for an irq
208  */
209 int set_irq_msi(unsigned int irq, struct msi_desc *entry)
210 {
211 	struct irq_desc *desc = irq_to_desc(irq);
212 	unsigned long flags;
213 
214 	if (!desc) {
215 		printk(KERN_ERR
216 		       "Trying to install msi data for IRQ%d\n", irq);
217 		return -EINVAL;
218 	}
219 
220 	raw_spin_lock_irqsave(&desc->lock, flags);
221 	desc->msi_desc = entry;
222 	if (entry)
223 		entry->irq = irq;
224 	raw_spin_unlock_irqrestore(&desc->lock, flags);
225 	return 0;
226 }
227 
228 /**
229  *	set_irq_chip_data - set irq chip data for an irq
230  *	@irq:	Interrupt number
231  *	@data:	Pointer to chip specific data
232  *
233  *	Set the hardware irq chip data for an irq
234  */
235 int set_irq_chip_data(unsigned int irq, void *data)
236 {
237 	struct irq_desc *desc = irq_to_desc(irq);
238 	unsigned long flags;
239 
240 	if (!desc) {
241 		printk(KERN_ERR
242 		       "Trying to install chip data for IRQ%d\n", irq);
243 		return -EINVAL;
244 	}
245 
246 	if (!desc->chip) {
247 		printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
248 		return -EINVAL;
249 	}
250 
251 	raw_spin_lock_irqsave(&desc->lock, flags);
252 	desc->chip_data = data;
253 	raw_spin_unlock_irqrestore(&desc->lock, flags);
254 
255 	return 0;
256 }
257 EXPORT_SYMBOL(set_irq_chip_data);
258 
259 /**
260  *	set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
261  *
262  *	@irq:	Interrupt number
263  *	@nest:	0 to clear / 1 to set the IRQ_NESTED_THREAD flag
264  *
265  *	The IRQ_NESTED_THREAD flag indicates that on
266  *	request_threaded_irq() no separate interrupt thread should be
267  *	created for the irq as the handler are called nested in the
268  *	context of a demultiplexing interrupt handler thread.
269  */
270 void set_irq_nested_thread(unsigned int irq, int nest)
271 {
272 	struct irq_desc *desc = irq_to_desc(irq);
273 	unsigned long flags;
274 
275 	if (!desc)
276 		return;
277 
278 	raw_spin_lock_irqsave(&desc->lock, flags);
279 	if (nest)
280 		desc->status |= IRQ_NESTED_THREAD;
281 	else
282 		desc->status &= ~IRQ_NESTED_THREAD;
283 	raw_spin_unlock_irqrestore(&desc->lock, flags);
284 }
285 EXPORT_SYMBOL_GPL(set_irq_nested_thread);
286 
287 /*
288  * default enable function
289  */
290 static void default_enable(unsigned int irq)
291 {
292 	struct irq_desc *desc = irq_to_desc(irq);
293 
294 	desc->chip->unmask(irq);
295 	desc->status &= ~IRQ_MASKED;
296 }
297 
298 /*
299  * default disable function
300  */
301 static void default_disable(unsigned int irq)
302 {
303 }
304 
305 /*
306  * default startup function
307  */
308 static unsigned int default_startup(unsigned int irq)
309 {
310 	struct irq_desc *desc = irq_to_desc(irq);
311 
312 	desc->chip->enable(irq);
313 	return 0;
314 }
315 
316 /*
317  * default shutdown function
318  */
319 static void default_shutdown(unsigned int irq)
320 {
321 	struct irq_desc *desc = irq_to_desc(irq);
322 
323 	desc->chip->mask(irq);
324 	desc->status |= IRQ_MASKED;
325 }
326 
327 /*
328  * Fixup enable/disable function pointers
329  */
330 void irq_chip_set_defaults(struct irq_chip *chip)
331 {
332 	if (!chip->enable)
333 		chip->enable = default_enable;
334 	if (!chip->disable)
335 		chip->disable = default_disable;
336 	if (!chip->startup)
337 		chip->startup = default_startup;
338 	/*
339 	 * We use chip->disable, when the user provided its own. When
340 	 * we have default_disable set for chip->disable, then we need
341 	 * to use default_shutdown, otherwise the irq line is not
342 	 * disabled on free_irq():
343 	 */
344 	if (!chip->shutdown)
345 		chip->shutdown = chip->disable != default_disable ?
346 			chip->disable : default_shutdown;
347 	if (!chip->name)
348 		chip->name = chip->typename;
349 	if (!chip->end)
350 		chip->end = dummy_irq_chip.end;
351 }
352 
353 static inline void mask_ack_irq(struct irq_desc *desc, int irq)
354 {
355 	if (desc->chip->mask_ack)
356 		desc->chip->mask_ack(irq);
357 	else {
358 		desc->chip->mask(irq);
359 		if (desc->chip->ack)
360 			desc->chip->ack(irq);
361 	}
362 }
363 
364 /*
365  *	handle_nested_irq - Handle a nested irq from a irq thread
366  *	@irq:	the interrupt number
367  *
368  *	Handle interrupts which are nested into a threaded interrupt
369  *	handler. The handler function is called inside the calling
370  *	threads context.
371  */
372 void handle_nested_irq(unsigned int irq)
373 {
374 	struct irq_desc *desc = irq_to_desc(irq);
375 	struct irqaction *action;
376 	irqreturn_t action_ret;
377 
378 	might_sleep();
379 
380 	raw_spin_lock_irq(&desc->lock);
381 
382 	kstat_incr_irqs_this_cpu(irq, desc);
383 
384 	action = desc->action;
385 	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
386 		goto out_unlock;
387 
388 	desc->status |= IRQ_INPROGRESS;
389 	raw_spin_unlock_irq(&desc->lock);
390 
391 	action_ret = action->thread_fn(action->irq, action->dev_id);
392 	if (!noirqdebug)
393 		note_interrupt(irq, desc, action_ret);
394 
395 	raw_spin_lock_irq(&desc->lock);
396 	desc->status &= ~IRQ_INPROGRESS;
397 
398 out_unlock:
399 	raw_spin_unlock_irq(&desc->lock);
400 }
401 EXPORT_SYMBOL_GPL(handle_nested_irq);
402 
403 /**
404  *	handle_simple_irq - Simple and software-decoded IRQs.
405  *	@irq:	the interrupt number
406  *	@desc:	the interrupt description structure for this irq
407  *
408  *	Simple interrupts are either sent from a demultiplexing interrupt
409  *	handler or come from hardware, where no interrupt hardware control
410  *	is necessary.
411  *
412  *	Note: The caller is expected to handle the ack, clear, mask and
413  *	unmask issues if necessary.
414  */
415 void
416 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
417 {
418 	struct irqaction *action;
419 	irqreturn_t action_ret;
420 
421 	raw_spin_lock(&desc->lock);
422 
423 	if (unlikely(desc->status & IRQ_INPROGRESS))
424 		goto out_unlock;
425 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
426 	kstat_incr_irqs_this_cpu(irq, desc);
427 
428 	action = desc->action;
429 	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
430 		goto out_unlock;
431 
432 	desc->status |= IRQ_INPROGRESS;
433 	raw_spin_unlock(&desc->lock);
434 
435 	action_ret = handle_IRQ_event(irq, action);
436 	if (!noirqdebug)
437 		note_interrupt(irq, desc, action_ret);
438 
439 	raw_spin_lock(&desc->lock);
440 	desc->status &= ~IRQ_INPROGRESS;
441 out_unlock:
442 	raw_spin_unlock(&desc->lock);
443 }
444 
445 /**
446  *	handle_level_irq - Level type irq handler
447  *	@irq:	the interrupt number
448  *	@desc:	the interrupt description structure for this irq
449  *
450  *	Level type interrupts are active as long as the hardware line has
451  *	the active level. This may require to mask the interrupt and unmask
452  *	it after the associated handler has acknowledged the device, so the
453  *	interrupt line is back to inactive.
454  */
455 void
456 handle_level_irq(unsigned int irq, struct irq_desc *desc)
457 {
458 	struct irqaction *action;
459 	irqreturn_t action_ret;
460 
461 	raw_spin_lock(&desc->lock);
462 	mask_ack_irq(desc, irq);
463 
464 	if (unlikely(desc->status & IRQ_INPROGRESS))
465 		goto out_unlock;
466 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
467 	kstat_incr_irqs_this_cpu(irq, desc);
468 
469 	/*
470 	 * If its disabled or no action available
471 	 * keep it masked and get out of here
472 	 */
473 	action = desc->action;
474 	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
475 		goto out_unlock;
476 
477 	desc->status |= IRQ_INPROGRESS;
478 	raw_spin_unlock(&desc->lock);
479 
480 	action_ret = handle_IRQ_event(irq, action);
481 	if (!noirqdebug)
482 		note_interrupt(irq, desc, action_ret);
483 
484 	raw_spin_lock(&desc->lock);
485 	desc->status &= ~IRQ_INPROGRESS;
486 
487 	if (unlikely(desc->status & IRQ_ONESHOT))
488 		desc->status |= IRQ_MASKED;
489 	else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
490 		desc->chip->unmask(irq);
491 out_unlock:
492 	raw_spin_unlock(&desc->lock);
493 }
494 EXPORT_SYMBOL_GPL(handle_level_irq);
495 
496 /**
497  *	handle_fasteoi_irq - irq handler for transparent controllers
498  *	@irq:	the interrupt number
499  *	@desc:	the interrupt description structure for this irq
500  *
501  *	Only a single callback will be issued to the chip: an ->eoi()
502  *	call when the interrupt has been serviced. This enables support
503  *	for modern forms of interrupt handlers, which handle the flow
504  *	details in hardware, transparently.
505  */
506 void
507 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
508 {
509 	struct irqaction *action;
510 	irqreturn_t action_ret;
511 
512 	raw_spin_lock(&desc->lock);
513 
514 	if (unlikely(desc->status & IRQ_INPROGRESS))
515 		goto out;
516 
517 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
518 	kstat_incr_irqs_this_cpu(irq, desc);
519 
520 	/*
521 	 * If its disabled or no action available
522 	 * then mask it and get out of here:
523 	 */
524 	action = desc->action;
525 	if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
526 		desc->status |= IRQ_PENDING;
527 		if (desc->chip->mask)
528 			desc->chip->mask(irq);
529 		goto out;
530 	}
531 
532 	desc->status |= IRQ_INPROGRESS;
533 	desc->status &= ~IRQ_PENDING;
534 	raw_spin_unlock(&desc->lock);
535 
536 	action_ret = handle_IRQ_event(irq, action);
537 	if (!noirqdebug)
538 		note_interrupt(irq, desc, action_ret);
539 
540 	raw_spin_lock(&desc->lock);
541 	desc->status &= ~IRQ_INPROGRESS;
542 out:
543 	desc->chip->eoi(irq);
544 
545 	raw_spin_unlock(&desc->lock);
546 }
547 
548 /**
549  *	handle_edge_irq - edge type IRQ handler
550  *	@irq:	the interrupt number
551  *	@desc:	the interrupt description structure for this irq
552  *
553  *	Interrupt occures on the falling and/or rising edge of a hardware
554  *	signal. The occurence is latched into the irq controller hardware
555  *	and must be acked in order to be reenabled. After the ack another
556  *	interrupt can happen on the same source even before the first one
557  *	is handled by the assosiacted event handler. If this happens it
558  *	might be necessary to disable (mask) the interrupt depending on the
559  *	controller hardware. This requires to reenable the interrupt inside
560  *	of the loop which handles the interrupts which have arrived while
561  *	the handler was running. If all pending interrupts are handled, the
562  *	loop is left.
563  */
564 void
565 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
566 {
567 	raw_spin_lock(&desc->lock);
568 
569 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
570 
571 	/*
572 	 * If we're currently running this IRQ, or its disabled,
573 	 * we shouldn't process the IRQ. Mark it pending, handle
574 	 * the necessary masking and go out
575 	 */
576 	if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
577 		    !desc->action)) {
578 		desc->status |= (IRQ_PENDING | IRQ_MASKED);
579 		mask_ack_irq(desc, irq);
580 		goto out_unlock;
581 	}
582 	kstat_incr_irqs_this_cpu(irq, desc);
583 
584 	/* Start handling the irq */
585 	if (desc->chip->ack)
586 		desc->chip->ack(irq);
587 
588 	/* Mark the IRQ currently in progress.*/
589 	desc->status |= IRQ_INPROGRESS;
590 
591 	do {
592 		struct irqaction *action = desc->action;
593 		irqreturn_t action_ret;
594 
595 		if (unlikely(!action)) {
596 			desc->chip->mask(irq);
597 			goto out_unlock;
598 		}
599 
600 		/*
601 		 * When another irq arrived while we were handling
602 		 * one, we could have masked the irq.
603 		 * Renable it, if it was not disabled in meantime.
604 		 */
605 		if (unlikely((desc->status &
606 			       (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
607 			      (IRQ_PENDING | IRQ_MASKED))) {
608 			desc->chip->unmask(irq);
609 			desc->status &= ~IRQ_MASKED;
610 		}
611 
612 		desc->status &= ~IRQ_PENDING;
613 		raw_spin_unlock(&desc->lock);
614 		action_ret = handle_IRQ_event(irq, action);
615 		if (!noirqdebug)
616 			note_interrupt(irq, desc, action_ret);
617 		raw_spin_lock(&desc->lock);
618 
619 	} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
620 
621 	desc->status &= ~IRQ_INPROGRESS;
622 out_unlock:
623 	raw_spin_unlock(&desc->lock);
624 }
625 
626 /**
627  *	handle_percpu_irq - Per CPU local irq handler
628  *	@irq:	the interrupt number
629  *	@desc:	the interrupt description structure for this irq
630  *
631  *	Per CPU interrupts on SMP machines without locking requirements
632  */
633 void
634 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
635 {
636 	irqreturn_t action_ret;
637 
638 	kstat_incr_irqs_this_cpu(irq, desc);
639 
640 	if (desc->chip->ack)
641 		desc->chip->ack(irq);
642 
643 	action_ret = handle_IRQ_event(irq, desc->action);
644 	if (!noirqdebug)
645 		note_interrupt(irq, desc, action_ret);
646 
647 	if (desc->chip->eoi)
648 		desc->chip->eoi(irq);
649 }
650 
651 void
652 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
653 		  const char *name)
654 {
655 	struct irq_desc *desc = irq_to_desc(irq);
656 	unsigned long flags;
657 
658 	if (!desc) {
659 		printk(KERN_ERR
660 		       "Trying to install type control for IRQ%d\n", irq);
661 		return;
662 	}
663 
664 	if (!handle)
665 		handle = handle_bad_irq;
666 	else if (desc->chip == &no_irq_chip) {
667 		printk(KERN_WARNING "Trying to install %sinterrupt handler "
668 		       "for IRQ%d\n", is_chained ? "chained " : "", irq);
669 		/*
670 		 * Some ARM implementations install a handler for really dumb
671 		 * interrupt hardware without setting an irq_chip. This worked
672 		 * with the ARM no_irq_chip but the check in setup_irq would
673 		 * prevent us to setup the interrupt at all. Switch it to
674 		 * dummy_irq_chip for easy transition.
675 		 */
676 		desc->chip = &dummy_irq_chip;
677 	}
678 
679 	chip_bus_lock(irq, desc);
680 	raw_spin_lock_irqsave(&desc->lock, flags);
681 
682 	/* Uninstall? */
683 	if (handle == handle_bad_irq) {
684 		if (desc->chip != &no_irq_chip)
685 			mask_ack_irq(desc, irq);
686 		desc->status |= IRQ_DISABLED;
687 		desc->depth = 1;
688 	}
689 	desc->handle_irq = handle;
690 	desc->name = name;
691 
692 	if (handle != handle_bad_irq && is_chained) {
693 		desc->status &= ~IRQ_DISABLED;
694 		desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
695 		desc->depth = 0;
696 		desc->chip->startup(irq);
697 	}
698 	raw_spin_unlock_irqrestore(&desc->lock, flags);
699 	chip_bus_sync_unlock(irq, desc);
700 }
701 EXPORT_SYMBOL_GPL(__set_irq_handler);
702 
703 void
704 set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
705 			 irq_flow_handler_t handle)
706 {
707 	set_irq_chip(irq, chip);
708 	__set_irq_handler(irq, handle, 0, NULL);
709 }
710 
711 void
712 set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
713 			      irq_flow_handler_t handle, const char *name)
714 {
715 	set_irq_chip(irq, chip);
716 	__set_irq_handler(irq, handle, 0, name);
717 }
718 
719 void __init set_irq_noprobe(unsigned int irq)
720 {
721 	struct irq_desc *desc = irq_to_desc(irq);
722 	unsigned long flags;
723 
724 	if (!desc) {
725 		printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
726 		return;
727 	}
728 
729 	raw_spin_lock_irqsave(&desc->lock, flags);
730 	desc->status |= IRQ_NOPROBE;
731 	raw_spin_unlock_irqrestore(&desc->lock, flags);
732 }
733 
734 void __init set_irq_probe(unsigned int irq)
735 {
736 	struct irq_desc *desc = irq_to_desc(irq);
737 	unsigned long flags;
738 
739 	if (!desc) {
740 		printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
741 		return;
742 	}
743 
744 	raw_spin_lock_irqsave(&desc->lock, flags);
745 	desc->status &= ~IRQ_NOPROBE;
746 	raw_spin_unlock_irqrestore(&desc->lock, flags);
747 }
748