xref: /openbmc/linux/kernel/irq/migration.c (revision 7a010c3c)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/irq.h>
4 #include <linux/interrupt.h>
5 
6 #include "internals.h"
7 
8 /**
9  * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
10  * @desc:		Interrupt descriptor to clean up
11  * @force_clear:	If set clear the move pending bit unconditionally.
12  *			If not set, clear it only when the dying CPU is the
13  *			last one in the pending mask.
14  *
15  * Returns true if the pending bit was set and the pending mask contains an
16  * online CPU other than the dying CPU.
17  */
18 bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
19 {
20 	struct irq_data *data = irq_desc_get_irq_data(desc);
21 
22 	if (!irqd_is_setaffinity_pending(data))
23 		return false;
24 
25 	/*
26 	 * The outgoing CPU might be the last online target in a pending
27 	 * interrupt move. If that's the case clear the pending move bit.
28 	 */
29 	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
30 		irqd_clr_move_pending(data);
31 		return false;
32 	}
33 	if (force_clear)
34 		irqd_clr_move_pending(data);
35 	return true;
36 }
37 
38 void irq_move_masked_irq(struct irq_data *idata)
39 {
40 	struct irq_desc *desc = irq_data_to_desc(idata);
41 	struct irq_data *data = &desc->irq_data;
42 	struct irq_chip *chip = data->chip;
43 
44 	if (likely(!irqd_is_setaffinity_pending(data)))
45 		return;
46 
47 	irqd_clr_move_pending(data);
48 
49 	/*
50 	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
51 	 */
52 	if (irqd_is_per_cpu(data)) {
53 		WARN_ON(1);
54 		return;
55 	}
56 
57 	if (unlikely(cpumask_empty(desc->pending_mask)))
58 		return;
59 
60 	if (!chip->irq_set_affinity)
61 		return;
62 
63 	assert_raw_spin_locked(&desc->lock);
64 
65 	/*
66 	 * If there was a valid mask to work with, please
67 	 * do the disable, re-program, enable sequence.
68 	 * This is *not* particularly important for level triggered
69 	 * but in a edge trigger case, we might be setting rte
70 	 * when an active trigger is coming in. This could
71 	 * cause some ioapics to mal-function.
72 	 * Being paranoid i guess!
73 	 *
74 	 * For correct operation this depends on the caller
75 	 * masking the irqs.
76 	 */
77 	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
78 		int ret;
79 
80 		ret = irq_do_set_affinity(data, desc->pending_mask, false);
81 		/*
82 		 * If the there is a cleanup pending in the underlying
83 		 * vector management, reschedule the move for the next
84 		 * interrupt. Leave desc->pending_mask intact.
85 		 */
86 		if (ret == -EBUSY) {
87 			irqd_set_move_pending(data);
88 			return;
89 		}
90 	}
91 	cpumask_clear(desc->pending_mask);
92 }
93 
94 void __irq_move_irq(struct irq_data *idata)
95 {
96 	bool masked;
97 
98 	/*
99 	 * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
100 	 * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
101 	 * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
102 	 */
103 	idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
104 
105 	if (unlikely(irqd_irq_disabled(idata)))
106 		return;
107 
108 	/*
109 	 * Be careful vs. already masked interrupts. If this is a
110 	 * threaded interrupt with ONESHOT set, we can end up with an
111 	 * interrupt storm.
112 	 */
113 	masked = irqd_irq_masked(idata);
114 	if (!masked)
115 		idata->chip->irq_mask(idata);
116 	irq_move_masked_irq(idata);
117 	if (!masked)
118 		idata->chip->irq_unmask(idata);
119 }
120