1 #include <linux/smp.h> 2 #include <linux/cpu.h> 3 #include <linux/slab.h> 4 #include <linux/cpumask.h> 5 #include <linux/percpu.h> 6 7 #include <xen/events.h> 8 9 #include <xen/hvc-console.h> 10 #include "xen-ops.h" 11 #include "smp.h" 12 13 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 }; 14 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 }; 15 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 }; 16 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 }; 17 18 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 19 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 20 21 /* 22 * Reschedule call back. 23 */ 24 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) 25 { 26 inc_irq_stat(irq_resched_count); 27 scheduler_ipi(); 28 29 return IRQ_HANDLED; 30 } 31 32 void xen_smp_intr_free(unsigned int cpu) 33 { 34 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { 35 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); 36 per_cpu(xen_resched_irq, cpu).irq = -1; 37 kfree(per_cpu(xen_resched_irq, cpu).name); 38 per_cpu(xen_resched_irq, cpu).name = NULL; 39 } 40 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { 41 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); 42 per_cpu(xen_callfunc_irq, cpu).irq = -1; 43 kfree(per_cpu(xen_callfunc_irq, cpu).name); 44 per_cpu(xen_callfunc_irq, cpu).name = NULL; 45 } 46 if (per_cpu(xen_debug_irq, cpu).irq >= 0) { 47 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); 48 per_cpu(xen_debug_irq, cpu).irq = -1; 49 kfree(per_cpu(xen_debug_irq, cpu).name); 50 per_cpu(xen_debug_irq, cpu).name = NULL; 51 } 52 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { 53 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, 54 NULL); 55 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; 56 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); 57 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; 58 } 59 } 60 61 int xen_smp_intr_init(unsigned int cpu) 62 { 63 int rc; 64 char *resched_name, *callfunc_name, *debug_name; 65 66 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); 67 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, 68 cpu, 69 xen_reschedule_interrupt, 70 IRQF_PERCPU|IRQF_NOBALANCING, 71 resched_name, 72 NULL); 73 if (rc < 0) 74 goto fail; 75 per_cpu(xen_resched_irq, cpu).irq = rc; 76 per_cpu(xen_resched_irq, cpu).name = resched_name; 77 78 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 79 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 80 cpu, 81 xen_call_function_interrupt, 82 IRQF_PERCPU|IRQF_NOBALANCING, 83 callfunc_name, 84 NULL); 85 if (rc < 0) 86 goto fail; 87 per_cpu(xen_callfunc_irq, cpu).irq = rc; 88 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; 89 90 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 91 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 92 IRQF_PERCPU | IRQF_NOBALANCING, 93 debug_name, NULL); 94 if (rc < 0) 95 goto fail; 96 per_cpu(xen_debug_irq, cpu).irq = rc; 97 per_cpu(xen_debug_irq, cpu).name = debug_name; 98 99 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 100 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 101 cpu, 102 xen_call_function_single_interrupt, 103 IRQF_PERCPU|IRQF_NOBALANCING, 104 callfunc_name, 105 NULL); 106 if (rc < 0) 107 goto fail; 108 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; 109 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; 110 111 return 0; 112 113 fail: 114 xen_smp_intr_free(cpu); 115 return rc; 116 } 117 118 void __init xen_smp_cpus_done(unsigned int max_cpus) 119 { 120 int cpu, rc, count = 0; 121 122 if (xen_hvm_domain()) 123 native_smp_cpus_done(max_cpus); 124 125 if (xen_have_vcpu_info_placement) 126 return; 127 128 for_each_online_cpu(cpu) { 129 if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) 130 continue; 131 132 rc = cpu_down(cpu); 133 134 if (rc == 0) { 135 /* 136 * Reset vcpu_info so this cpu cannot be onlined again. 137 */ 138 xen_vcpu_info_reset(cpu); 139 count++; 140 } else { 141 pr_warn("%s: failed to bring CPU %d down, error %d\n", 142 __func__, cpu, rc); 143 } 144 } 145 WARN(count, "%s: brought %d CPUs offline\n", __func__, count); 146 } 147 148 void xen_smp_send_reschedule(int cpu) 149 { 150 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 151 } 152 153 static void __xen_send_IPI_mask(const struct cpumask *mask, 154 int vector) 155 { 156 unsigned cpu; 157 158 for_each_cpu_and(cpu, mask, cpu_online_mask) 159 xen_send_IPI_one(cpu, vector); 160 } 161 162 void xen_smp_send_call_function_ipi(const struct cpumask *mask) 163 { 164 int cpu; 165 166 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 167 168 /* Make sure other vcpus get a chance to run if they need to. */ 169 for_each_cpu(cpu, mask) { 170 if (xen_vcpu_stolen(cpu)) { 171 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 172 break; 173 } 174 } 175 } 176 177 void xen_smp_send_call_function_single_ipi(int cpu) 178 { 179 __xen_send_IPI_mask(cpumask_of(cpu), 180 XEN_CALL_FUNCTION_SINGLE_VECTOR); 181 } 182 183 static inline int xen_map_vector(int vector) 184 { 185 int xen_vector; 186 187 switch (vector) { 188 case RESCHEDULE_VECTOR: 189 xen_vector = XEN_RESCHEDULE_VECTOR; 190 break; 191 case CALL_FUNCTION_VECTOR: 192 xen_vector = XEN_CALL_FUNCTION_VECTOR; 193 break; 194 case CALL_FUNCTION_SINGLE_VECTOR: 195 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR; 196 break; 197 case IRQ_WORK_VECTOR: 198 xen_vector = XEN_IRQ_WORK_VECTOR; 199 break; 200 #ifdef CONFIG_X86_64 201 case NMI_VECTOR: 202 case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */ 203 xen_vector = XEN_NMI_VECTOR; 204 break; 205 #endif 206 default: 207 xen_vector = -1; 208 printk(KERN_ERR "xen: vector 0x%x is not implemented\n", 209 vector); 210 } 211 212 return xen_vector; 213 } 214 215 void xen_send_IPI_mask(const struct cpumask *mask, 216 int vector) 217 { 218 int xen_vector = xen_map_vector(vector); 219 220 if (xen_vector >= 0) 221 __xen_send_IPI_mask(mask, xen_vector); 222 } 223 224 void xen_send_IPI_all(int vector) 225 { 226 int xen_vector = xen_map_vector(vector); 227 228 if (xen_vector >= 0) 229 __xen_send_IPI_mask(cpu_online_mask, xen_vector); 230 } 231 232 void xen_send_IPI_self(int vector) 233 { 234 int xen_vector = xen_map_vector(vector); 235 236 if (xen_vector >= 0) 237 xen_send_IPI_one(smp_processor_id(), xen_vector); 238 } 239 240 void xen_send_IPI_mask_allbutself(const struct cpumask *mask, 241 int vector) 242 { 243 unsigned cpu; 244 unsigned int this_cpu = smp_processor_id(); 245 int xen_vector = xen_map_vector(vector); 246 247 if (!(num_online_cpus() > 1) || (xen_vector < 0)) 248 return; 249 250 for_each_cpu_and(cpu, mask, cpu_online_mask) { 251 if (this_cpu == cpu) 252 continue; 253 254 xen_send_IPI_one(cpu, xen_vector); 255 } 256 } 257 258 void xen_send_IPI_allbutself(int vector) 259 { 260 xen_send_IPI_mask_allbutself(cpu_online_mask, vector); 261 } 262 263 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 264 { 265 irq_enter(); 266 generic_smp_call_function_interrupt(); 267 inc_irq_stat(irq_call_count); 268 irq_exit(); 269 270 return IRQ_HANDLED; 271 } 272 273 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) 274 { 275 irq_enter(); 276 generic_smp_call_function_single_interrupt(); 277 inc_irq_stat(irq_call_count); 278 irq_exit(); 279 280 return IRQ_HANDLED; 281 } 282