xref: /openbmc/linux/kernel/irq/migration.c (revision 7211ec63)
1 
2 #include <linux/irq.h>
3 #include <linux/interrupt.h>
4 
5 #include "internals.h"
6 
7 /**
8  * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
9  * @desc:		Interrupt descpriptor to clean up
10  * @force_clear:	If set clear the move pending bit unconditionally.
11  *			If not set, clear it only when the dying CPU is the
12  *			last one in the pending mask.
13  *
14  * Returns true if the pending bit was set and the pending mask contains an
15  * online CPU other than the dying CPU.
16  */
17 bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
18 {
19 	struct irq_data *data = irq_desc_get_irq_data(desc);
20 
21 	if (!irqd_is_setaffinity_pending(data))
22 		return false;
23 
24 	/*
25 	 * The outgoing CPU might be the last online target in a pending
26 	 * interrupt move. If that's the case clear the pending move bit.
27 	 */
28 	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
29 		irqd_clr_move_pending(data);
30 		return false;
31 	}
32 	if (force_clear)
33 		irqd_clr_move_pending(data);
34 	return true;
35 }
36 
37 void irq_move_masked_irq(struct irq_data *idata)
38 {
39 	struct irq_desc *desc = irq_data_to_desc(idata);
40 	struct irq_chip *chip = desc->irq_data.chip;
41 
42 	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
43 		return;
44 
45 	irqd_clr_move_pending(&desc->irq_data);
46 
47 	/*
48 	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
49 	 */
50 	if (irqd_is_per_cpu(&desc->irq_data)) {
51 		WARN_ON(1);
52 		return;
53 	}
54 
55 	if (unlikely(cpumask_empty(desc->pending_mask)))
56 		return;
57 
58 	if (!chip->irq_set_affinity)
59 		return;
60 
61 	assert_raw_spin_locked(&desc->lock);
62 
63 	/*
64 	 * If there was a valid mask to work with, please
65 	 * do the disable, re-program, enable sequence.
66 	 * This is *not* particularly important for level triggered
67 	 * but in a edge trigger case, we might be setting rte
68 	 * when an active trigger is coming in. This could
69 	 * cause some ioapics to mal-function.
70 	 * Being paranoid i guess!
71 	 *
72 	 * For correct operation this depends on the caller
73 	 * masking the irqs.
74 	 */
75 	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
76 		irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
77 
78 	cpumask_clear(desc->pending_mask);
79 }
80 
81 void irq_move_irq(struct irq_data *idata)
82 {
83 	bool masked;
84 
85 	/*
86 	 * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
87 	 * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
88 	 * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
89 	 */
90 	idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
91 
92 	if (likely(!irqd_is_setaffinity_pending(idata)))
93 		return;
94 
95 	if (unlikely(irqd_irq_disabled(idata)))
96 		return;
97 
98 	/*
99 	 * Be careful vs. already masked interrupts. If this is a
100 	 * threaded interrupt with ONESHOT set, we can end up with an
101 	 * interrupt storm.
102 	 */
103 	masked = irqd_irq_masked(idata);
104 	if (!masked)
105 		idata->chip->irq_mask(idata);
106 	irq_move_masked_irq(idata);
107 	if (!masked)
108 		idata->chip->irq_unmask(idata);
109 }
110