1 /* 2 * Generic cpu hotunplug interrupt migration code copied from the 3 * arch/arm implementation 4 * 5 * Copyright (C) Russell King 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/interrupt.h> 12 #include <linux/ratelimit.h> 13 #include <linux/irq.h> 14 15 #include "internals.h" 16 17 /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */ 18 static inline bool irq_needs_fixup(struct irq_data *d) 19 { 20 const struct cpumask *m = irq_data_get_effective_affinity_mask(d); 21 22 return cpumask_test_cpu(smp_processor_id(), m); 23 } 24 25 static bool migrate_one_irq(struct irq_desc *desc) 26 { 27 struct irq_data *d = irq_desc_get_irq_data(desc); 28 struct irq_chip *chip = irq_data_get_irq_chip(d); 29 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d); 30 const struct cpumask *affinity; 31 bool brokeaff = false; 32 int err; 33 34 /* 35 * IRQ chip might be already torn down, but the irq descriptor is 36 * still in the radix tree. Also if the chip has no affinity setter, 37 * nothing can be done here. 38 */ 39 if (!chip || !chip->irq_set_affinity) { 40 pr_debug("IRQ %u: Unable to migrate away\n", d->irq); 41 return false; 42 } 43 44 /* 45 * No move required, if: 46 * - Interrupt is per cpu 47 * - Interrupt is not started 48 * - Affinity mask does not include this CPU. 49 * 50 * Note: Do not check desc->action as this might be a chained 51 * interrupt. 52 */ 53 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) { 54 /* 55 * If an irq move is pending, abort it if the dying CPU is 56 * the sole target. 57 */ 58 irq_fixup_move_pending(desc, false); 59 return false; 60 } 61 62 /* 63 * Complete an eventually pending irq move cleanup. If this 64 * interrupt was moved in hard irq context, then the vectors need 65 * to be cleaned up. It can't wait until this interrupt actually 66 * happens and this CPU was involved. 67 */ 68 irq_force_complete_move(desc); 69 70 /* 71 * If there is a setaffinity pending, then try to reuse the pending 72 * mask, so the last change of the affinity does not get lost. If 73 * there is no move pending or the pending mask does not contain 74 * any online CPU, use the current affinity mask. 75 */ 76 if (irq_fixup_move_pending(desc, true)) 77 affinity = irq_desc_get_pending_mask(desc); 78 else 79 affinity = irq_data_get_affinity_mask(d); 80 81 /* Mask the chip for interrupts which cannot move in process context */ 82 if (maskchip && chip->irq_mask) 83 chip->irq_mask(d); 84 85 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 86 /* 87 * If the interrupt is managed, then shut it down and leave 88 * the affinity untouched. 89 */ 90 if (irqd_affinity_is_managed(d)) { 91 irqd_set_managed_shutdown(d); 92 irq_shutdown(desc); 93 return false; 94 } 95 affinity = cpu_online_mask; 96 brokeaff = true; 97 } 98 /* 99 * Do not set the force argument of irq_do_set_affinity() as this 100 * disables the masking of offline CPUs from the supplied affinity 101 * mask and therefore might keep/reassign the irq to the outgoing 102 * CPU. 103 */ 104 err = irq_do_set_affinity(d, affinity, false); 105 if (err) { 106 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", 107 d->irq, err); 108 brokeaff = false; 109 } 110 111 if (maskchip && chip->irq_unmask) 112 chip->irq_unmask(d); 113 114 return brokeaff; 115 } 116 117 /** 118 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu 119 * 120 * The current CPU has been marked offline. Migrate IRQs off this CPU. 121 * If the affinity settings do not allow other CPUs, force them onto any 122 * available CPU. 123 * 124 * Note: we must iterate over all IRQs, whether they have an attached 125 * action structure or not, as we need to get chained interrupts too. 126 */ 127 void irq_migrate_all_off_this_cpu(void) 128 { 129 struct irq_desc *desc; 130 unsigned int irq; 131 132 for_each_active_irq(irq) { 133 bool affinity_broken; 134 135 desc = irq_to_desc(irq); 136 raw_spin_lock(&desc->lock); 137 affinity_broken = migrate_one_irq(desc); 138 raw_spin_unlock(&desc->lock); 139 140 if (affinity_broken) { 141 pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n", 142 irq, smp_processor_id()); 143 } 144 } 145 } 146 147 static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu) 148 { 149 struct irq_data *data = irq_desc_get_irq_data(desc); 150 const struct cpumask *affinity = irq_data_get_affinity_mask(data); 151 152 if (!irqd_affinity_is_managed(data) || !desc->action || 153 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity)) 154 return; 155 156 if (irqd_is_managed_and_shutdown(data)) { 157 irq_startup(desc, IRQ_RESEND, IRQ_START_COND); 158 return; 159 } 160 161 /* 162 * If the interrupt can only be directed to a single target 163 * CPU then it is already assigned to a CPU in the affinity 164 * mask. No point in trying to move it around. 165 */ 166 if (!irqd_is_single_target(data)) 167 irq_set_affinity_locked(data, affinity, false); 168 } 169 170 /** 171 * irq_affinity_online_cpu - Restore affinity for managed interrupts 172 * @cpu: Upcoming CPU for which interrupts should be restored 173 */ 174 int irq_affinity_online_cpu(unsigned int cpu) 175 { 176 struct irq_desc *desc; 177 unsigned int irq; 178 179 irq_lock_sparse(); 180 for_each_active_irq(irq) { 181 desc = irq_to_desc(irq); 182 raw_spin_lock_irq(&desc->lock); 183 irq_restore_affinity_of_irq(desc, cpu); 184 raw_spin_unlock_irq(&desc->lock); 185 } 186 irq_unlock_sparse(); 187 188 return 0; 189 } 190