xref: /openbmc/linux/arch/x86/xen/smp.c (revision fcc8487d)
1 #include <linux/smp.h>
2 #include <linux/slab.h>
3 #include <linux/cpumask.h>
4 #include <linux/percpu.h>
5 
6 #include <xen/events.h>
7 
8 #include <xen/hvc-console.h>
9 #include "xen-ops.h"
10 #include "smp.h"
11 
12 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
13 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
14 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
15 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
16 
17 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
18 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
19 
20 /*
21  * Reschedule call back.
22  */
23 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
24 {
25 	inc_irq_stat(irq_resched_count);
26 	scheduler_ipi();
27 
28 	return IRQ_HANDLED;
29 }
30 
31 void xen_smp_intr_free(unsigned int cpu)
32 {
33 	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
34 		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
35 		per_cpu(xen_resched_irq, cpu).irq = -1;
36 		kfree(per_cpu(xen_resched_irq, cpu).name);
37 		per_cpu(xen_resched_irq, cpu).name = NULL;
38 	}
39 	if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
40 		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
41 		per_cpu(xen_callfunc_irq, cpu).irq = -1;
42 		kfree(per_cpu(xen_callfunc_irq, cpu).name);
43 		per_cpu(xen_callfunc_irq, cpu).name = NULL;
44 	}
45 	if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
46 		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
47 		per_cpu(xen_debug_irq, cpu).irq = -1;
48 		kfree(per_cpu(xen_debug_irq, cpu).name);
49 		per_cpu(xen_debug_irq, cpu).name = NULL;
50 	}
51 	if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
52 		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
53 				       NULL);
54 		per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
55 		kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
56 		per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
57 	}
58 }
59 
60 int xen_smp_intr_init(unsigned int cpu)
61 {
62 	int rc;
63 	char *resched_name, *callfunc_name, *debug_name;
64 
65 	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
66 	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
67 				    cpu,
68 				    xen_reschedule_interrupt,
69 				    IRQF_PERCPU|IRQF_NOBALANCING,
70 				    resched_name,
71 				    NULL);
72 	if (rc < 0)
73 		goto fail;
74 	per_cpu(xen_resched_irq, cpu).irq = rc;
75 	per_cpu(xen_resched_irq, cpu).name = resched_name;
76 
77 	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
78 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
79 				    cpu,
80 				    xen_call_function_interrupt,
81 				    IRQF_PERCPU|IRQF_NOBALANCING,
82 				    callfunc_name,
83 				    NULL);
84 	if (rc < 0)
85 		goto fail;
86 	per_cpu(xen_callfunc_irq, cpu).irq = rc;
87 	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
88 
89 	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
90 	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
91 				     IRQF_PERCPU | IRQF_NOBALANCING,
92 				     debug_name, NULL);
93 	if (rc < 0)
94 		goto fail;
95 	per_cpu(xen_debug_irq, cpu).irq = rc;
96 	per_cpu(xen_debug_irq, cpu).name = debug_name;
97 
98 	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
99 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
100 				    cpu,
101 				    xen_call_function_single_interrupt,
102 				    IRQF_PERCPU|IRQF_NOBALANCING,
103 				    callfunc_name,
104 				    NULL);
105 	if (rc < 0)
106 		goto fail;
107 	per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
108 	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
109 
110 	return 0;
111 
112  fail:
113 	xen_smp_intr_free(cpu);
114 	return rc;
115 }
116 
117 void xen_smp_send_reschedule(int cpu)
118 {
119 	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
120 }
121 
122 static void __xen_send_IPI_mask(const struct cpumask *mask,
123 			      int vector)
124 {
125 	unsigned cpu;
126 
127 	for_each_cpu_and(cpu, mask, cpu_online_mask)
128 		xen_send_IPI_one(cpu, vector);
129 }
130 
131 void xen_smp_send_call_function_ipi(const struct cpumask *mask)
132 {
133 	int cpu;
134 
135 	__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
136 
137 	/* Make sure other vcpus get a chance to run if they need to. */
138 	for_each_cpu(cpu, mask) {
139 		if (xen_vcpu_stolen(cpu)) {
140 			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
141 			break;
142 		}
143 	}
144 }
145 
146 void xen_smp_send_call_function_single_ipi(int cpu)
147 {
148 	__xen_send_IPI_mask(cpumask_of(cpu),
149 			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
150 }
151 
152 static inline int xen_map_vector(int vector)
153 {
154 	int xen_vector;
155 
156 	switch (vector) {
157 	case RESCHEDULE_VECTOR:
158 		xen_vector = XEN_RESCHEDULE_VECTOR;
159 		break;
160 	case CALL_FUNCTION_VECTOR:
161 		xen_vector = XEN_CALL_FUNCTION_VECTOR;
162 		break;
163 	case CALL_FUNCTION_SINGLE_VECTOR:
164 		xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
165 		break;
166 	case IRQ_WORK_VECTOR:
167 		xen_vector = XEN_IRQ_WORK_VECTOR;
168 		break;
169 #ifdef CONFIG_X86_64
170 	case NMI_VECTOR:
171 	case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
172 		xen_vector = XEN_NMI_VECTOR;
173 		break;
174 #endif
175 	default:
176 		xen_vector = -1;
177 		printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
178 			vector);
179 	}
180 
181 	return xen_vector;
182 }
183 
184 void xen_send_IPI_mask(const struct cpumask *mask,
185 			      int vector)
186 {
187 	int xen_vector = xen_map_vector(vector);
188 
189 	if (xen_vector >= 0)
190 		__xen_send_IPI_mask(mask, xen_vector);
191 }
192 
193 void xen_send_IPI_all(int vector)
194 {
195 	int xen_vector = xen_map_vector(vector);
196 
197 	if (xen_vector >= 0)
198 		__xen_send_IPI_mask(cpu_online_mask, xen_vector);
199 }
200 
201 void xen_send_IPI_self(int vector)
202 {
203 	int xen_vector = xen_map_vector(vector);
204 
205 	if (xen_vector >= 0)
206 		xen_send_IPI_one(smp_processor_id(), xen_vector);
207 }
208 
209 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
210 				int vector)
211 {
212 	unsigned cpu;
213 	unsigned int this_cpu = smp_processor_id();
214 	int xen_vector = xen_map_vector(vector);
215 
216 	if (!(num_online_cpus() > 1) || (xen_vector < 0))
217 		return;
218 
219 	for_each_cpu_and(cpu, mask, cpu_online_mask) {
220 		if (this_cpu == cpu)
221 			continue;
222 
223 		xen_send_IPI_one(cpu, xen_vector);
224 	}
225 }
226 
227 void xen_send_IPI_allbutself(int vector)
228 {
229 	xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
230 }
231 
232 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
233 {
234 	irq_enter();
235 	generic_smp_call_function_interrupt();
236 	inc_irq_stat(irq_call_count);
237 	irq_exit();
238 
239 	return IRQ_HANDLED;
240 }
241 
242 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
243 {
244 	irq_enter();
245 	generic_smp_call_function_single_interrupt();
246 	inc_irq_stat(irq_call_count);
247 	irq_exit();
248 
249 	return IRQ_HANDLED;
250 }
251