xref: /openbmc/linux/kernel/irq/migration.c (revision 9ac8d3fb)
1 
2 #include <linux/irq.h>
3 
4 void set_pending_irq(unsigned int irq, cpumask_t mask)
5 {
6 	struct irq_desc *desc = irq_to_desc(irq);
7 	unsigned long flags;
8 
9 	spin_lock_irqsave(&desc->lock, flags);
10 	desc->status |= IRQ_MOVE_PENDING;
11 	desc->pending_mask = mask;
12 	spin_unlock_irqrestore(&desc->lock, flags);
13 }
14 
15 void move_masked_irq(int irq)
16 {
17 	struct irq_desc *desc = irq_to_desc(irq);
18 	cpumask_t tmp;
19 
20 	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
21 		return;
22 
23 	/*
24 	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
25 	 */
26 	if (CHECK_IRQ_PER_CPU(desc->status)) {
27 		WARN_ON(1);
28 		return;
29 	}
30 
31 	desc->status &= ~IRQ_MOVE_PENDING;
32 
33 	if (unlikely(cpus_empty(desc->pending_mask)))
34 		return;
35 
36 	if (!desc->chip->set_affinity)
37 		return;
38 
39 	assert_spin_locked(&desc->lock);
40 
41 	cpus_and(tmp, desc->pending_mask, cpu_online_map);
42 
43 	/*
44 	 * If there was a valid mask to work with, please
45 	 * do the disable, re-program, enable sequence.
46 	 * This is *not* particularly important for level triggered
47 	 * but in a edge trigger case, we might be setting rte
48 	 * when an active trigger is comming in. This could
49 	 * cause some ioapics to mal-function.
50 	 * Being paranoid i guess!
51 	 *
52 	 * For correct operation this depends on the caller
53 	 * masking the irqs.
54 	 */
55 	if (likely(!cpus_empty(tmp))) {
56 		desc->chip->set_affinity(irq,tmp);
57 	}
58 	cpus_clear(desc->pending_mask);
59 }
60 
61 void move_native_irq(int irq)
62 {
63 	struct irq_desc *desc = irq_to_desc(irq);
64 
65 	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
66 		return;
67 
68 	if (unlikely(desc->status & IRQ_DISABLED))
69 		return;
70 
71 	desc->chip->mask(irq);
72 	move_masked_irq(irq);
73 	desc->chip->unmask(irq);
74 }
75 
76