xref: /openbmc/linux/kernel/irq/cpuhotplug.c (revision 31368ce8)
1 /*
2  * Generic cpu hotunplug interrupt migration code copied from the
3  * arch/arm implementation
4  *
5  * Copyright (C) Russell King
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/interrupt.h>
12 #include <linux/ratelimit.h>
13 #include <linux/irq.h>
14 
15 #include "internals.h"
16 
17 /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
18 static inline bool irq_needs_fixup(struct irq_data *d)
19 {
20 	const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
21 
22 	return cpumask_test_cpu(smp_processor_id(), m);
23 }
24 
25 static bool migrate_one_irq(struct irq_desc *desc)
26 {
27 	struct irq_data *d = irq_desc_get_irq_data(desc);
28 	struct irq_chip *chip = irq_data_get_irq_chip(d);
29 	bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
30 	const struct cpumask *affinity;
31 	bool brokeaff = false;
32 	int err;
33 
34 	/*
35 	 * IRQ chip might be already torn down, but the irq descriptor is
36 	 * still in the radix tree. Also if the chip has no affinity setter,
37 	 * nothing can be done here.
38 	 */
39 	if (!chip || !chip->irq_set_affinity) {
40 		pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
41 		return false;
42 	}
43 
44 	/*
45 	 * No move required, if:
46 	 * - Interrupt is per cpu
47 	 * - Interrupt is not started
48 	 * - Affinity mask does not include this CPU.
49 	 *
50 	 * Note: Do not check desc->action as this might be a chained
51 	 * interrupt.
52 	 */
53 	if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
54 		/*
55 		 * If an irq move is pending, abort it if the dying CPU is
56 		 * the sole target.
57 		 */
58 		irq_fixup_move_pending(desc, false);
59 		return false;
60 	}
61 
62 	/*
63 	 * Complete an eventually pending irq move cleanup. If this
64 	 * interrupt was moved in hard irq context, then the vectors need
65 	 * to be cleaned up. It can't wait until this interrupt actually
66 	 * happens and this CPU was involved.
67 	 */
68 	irq_force_complete_move(desc);
69 
70 	/*
71 	 * If there is a setaffinity pending, then try to reuse the pending
72 	 * mask, so the last change of the affinity does not get lost. If
73 	 * there is no move pending or the pending mask does not contain
74 	 * any online CPU, use the current affinity mask.
75 	 */
76 	if (irq_fixup_move_pending(desc, true))
77 		affinity = irq_desc_get_pending_mask(desc);
78 	else
79 		affinity = irq_data_get_affinity_mask(d);
80 
81 	/* Mask the chip for interrupts which cannot move in process context */
82 	if (maskchip && chip->irq_mask)
83 		chip->irq_mask(d);
84 
85 	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
86 		/*
87 		 * If the interrupt is managed, then shut it down and leave
88 		 * the affinity untouched.
89 		 */
90 		if (irqd_affinity_is_managed(d)) {
91 			irqd_set_managed_shutdown(d);
92 			irq_shutdown(desc);
93 			return false;
94 		}
95 		affinity = cpu_online_mask;
96 		brokeaff = true;
97 	}
98 
99 	err = irq_do_set_affinity(d, affinity, true);
100 	if (err) {
101 		pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
102 				    d->irq, err);
103 		brokeaff = false;
104 	}
105 
106 	if (maskchip && chip->irq_unmask)
107 		chip->irq_unmask(d);
108 
109 	return brokeaff;
110 }
111 
112 /**
113  * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
114  *
115  * The current CPU has been marked offline.  Migrate IRQs off this CPU.
116  * If the affinity settings do not allow other CPUs, force them onto any
117  * available CPU.
118  *
119  * Note: we must iterate over all IRQs, whether they have an attached
120  * action structure or not, as we need to get chained interrupts too.
121  */
122 void irq_migrate_all_off_this_cpu(void)
123 {
124 	struct irq_desc *desc;
125 	unsigned int irq;
126 
127 	for_each_active_irq(irq) {
128 		bool affinity_broken;
129 
130 		desc = irq_to_desc(irq);
131 		raw_spin_lock(&desc->lock);
132 		affinity_broken = migrate_one_irq(desc);
133 		raw_spin_unlock(&desc->lock);
134 
135 		if (affinity_broken) {
136 			pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
137 					    irq, smp_processor_id());
138 		}
139 	}
140 }
141 
142 static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
143 {
144 	struct irq_data *data = irq_desc_get_irq_data(desc);
145 	const struct cpumask *affinity = irq_data_get_affinity_mask(data);
146 
147 	if (!irqd_affinity_is_managed(data) || !desc->action ||
148 	    !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
149 		return;
150 
151 	if (irqd_is_managed_and_shutdown(data)) {
152 		irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
153 		return;
154 	}
155 
156 	/*
157 	 * If the interrupt can only be directed to a single target
158 	 * CPU then it is already assigned to a CPU in the affinity
159 	 * mask. No point in trying to move it around.
160 	 */
161 	if (!irqd_is_single_target(data))
162 		irq_set_affinity_locked(data, affinity, false);
163 }
164 
165 /**
166  * irq_affinity_online_cpu - Restore affinity for managed interrupts
167  * @cpu:	Upcoming CPU for which interrupts should be restored
168  */
169 int irq_affinity_online_cpu(unsigned int cpu)
170 {
171 	struct irq_desc *desc;
172 	unsigned int irq;
173 
174 	irq_lock_sparse();
175 	for_each_active_irq(irq) {
176 		desc = irq_to_desc(irq);
177 		raw_spin_lock_irq(&desc->lock);
178 		irq_restore_affinity_of_irq(desc, cpu);
179 		raw_spin_unlock_irq(&desc->lock);
180 	}
181 	irq_unlock_sparse();
182 
183 	return 0;
184 }
185