xref: /openbmc/linux/arch/x86/xen/smp.c (revision 2984f26a)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/smp.h>
3 #include <linux/cpu.h>
4 #include <linux/slab.h>
5 #include <linux/cpumask.h>
6 #include <linux/percpu.h>
7 
8 #include <xen/events.h>
9 
10 #include <xen/hvc-console.h>
11 #include "xen-ops.h"
12 #include "smp.h"
13 
14 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
15 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
16 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
17 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
18 
19 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
20 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
21 
22 /*
23  * Reschedule call back.
24  */
25 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
26 {
27 	inc_irq_stat(irq_resched_count);
28 	scheduler_ipi();
29 
30 	return IRQ_HANDLED;
31 }
32 
33 void xen_smp_intr_free(unsigned int cpu)
34 {
35 	kfree(per_cpu(xen_resched_irq, cpu).name);
36 	per_cpu(xen_resched_irq, cpu).name = NULL;
37 	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
38 		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
39 		per_cpu(xen_resched_irq, cpu).irq = -1;
40 	}
41 	kfree(per_cpu(xen_callfunc_irq, cpu).name);
42 	per_cpu(xen_callfunc_irq, cpu).name = NULL;
43 	if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
44 		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
45 		per_cpu(xen_callfunc_irq, cpu).irq = -1;
46 	}
47 	kfree(per_cpu(xen_debug_irq, cpu).name);
48 	per_cpu(xen_debug_irq, cpu).name = NULL;
49 	if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
50 		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
51 		per_cpu(xen_debug_irq, cpu).irq = -1;
52 	}
53 	kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
54 	per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
55 	if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
56 		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
57 				       NULL);
58 		per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
59 	}
60 }
61 
62 int xen_smp_intr_init(unsigned int cpu)
63 {
64 	int rc;
65 	char *resched_name, *callfunc_name, *debug_name;
66 
67 	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
68 	if (!resched_name)
69 		goto fail_mem;
70 	per_cpu(xen_resched_irq, cpu).name = resched_name;
71 	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
72 				    cpu,
73 				    xen_reschedule_interrupt,
74 				    IRQF_PERCPU|IRQF_NOBALANCING,
75 				    resched_name,
76 				    NULL);
77 	if (rc < 0)
78 		goto fail;
79 	per_cpu(xen_resched_irq, cpu).irq = rc;
80 
81 	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
82 	if (!callfunc_name)
83 		goto fail_mem;
84 	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
85 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
86 				    cpu,
87 				    xen_call_function_interrupt,
88 				    IRQF_PERCPU|IRQF_NOBALANCING,
89 				    callfunc_name,
90 				    NULL);
91 	if (rc < 0)
92 		goto fail;
93 	per_cpu(xen_callfunc_irq, cpu).irq = rc;
94 
95 	if (!xen_fifo_events) {
96 		debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
97 		if (!debug_name)
98 			goto fail_mem;
99 
100 		per_cpu(xen_debug_irq, cpu).name = debug_name;
101 		rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
102 					     xen_debug_interrupt,
103 					     IRQF_PERCPU | IRQF_NOBALANCING,
104 					     debug_name, NULL);
105 		if (rc < 0)
106 			goto fail;
107 		per_cpu(xen_debug_irq, cpu).irq = rc;
108 	}
109 
110 	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
111 	if (!callfunc_name)
112 		goto fail_mem;
113 
114 	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
115 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
116 				    cpu,
117 				    xen_call_function_single_interrupt,
118 				    IRQF_PERCPU|IRQF_NOBALANCING,
119 				    callfunc_name,
120 				    NULL);
121 	if (rc < 0)
122 		goto fail;
123 	per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
124 
125 	return 0;
126 
127  fail_mem:
128 	rc = -ENOMEM;
129  fail:
130 	xen_smp_intr_free(cpu);
131 	return rc;
132 }
133 
134 void __init xen_smp_cpus_done(unsigned int max_cpus)
135 {
136 	if (xen_hvm_domain())
137 		native_smp_cpus_done(max_cpus);
138 	else
139 		calculate_max_logical_packages();
140 }
141 
142 void xen_smp_send_reschedule(int cpu)
143 {
144 	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
145 }
146 
147 static void __xen_send_IPI_mask(const struct cpumask *mask,
148 			      int vector)
149 {
150 	unsigned cpu;
151 
152 	for_each_cpu_and(cpu, mask, cpu_online_mask)
153 		xen_send_IPI_one(cpu, vector);
154 }
155 
156 void xen_smp_send_call_function_ipi(const struct cpumask *mask)
157 {
158 	int cpu;
159 
160 	__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
161 
162 	/* Make sure other vcpus get a chance to run if they need to. */
163 	for_each_cpu(cpu, mask) {
164 		if (xen_vcpu_stolen(cpu)) {
165 			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
166 			break;
167 		}
168 	}
169 }
170 
171 void xen_smp_send_call_function_single_ipi(int cpu)
172 {
173 	__xen_send_IPI_mask(cpumask_of(cpu),
174 			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
175 }
176 
177 static inline int xen_map_vector(int vector)
178 {
179 	int xen_vector;
180 
181 	switch (vector) {
182 	case RESCHEDULE_VECTOR:
183 		xen_vector = XEN_RESCHEDULE_VECTOR;
184 		break;
185 	case CALL_FUNCTION_VECTOR:
186 		xen_vector = XEN_CALL_FUNCTION_VECTOR;
187 		break;
188 	case CALL_FUNCTION_SINGLE_VECTOR:
189 		xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
190 		break;
191 	case IRQ_WORK_VECTOR:
192 		xen_vector = XEN_IRQ_WORK_VECTOR;
193 		break;
194 #ifdef CONFIG_X86_64
195 	case NMI_VECTOR:
196 	case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
197 		xen_vector = XEN_NMI_VECTOR;
198 		break;
199 #endif
200 	default:
201 		xen_vector = -1;
202 		printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
203 			vector);
204 	}
205 
206 	return xen_vector;
207 }
208 
209 void xen_send_IPI_mask(const struct cpumask *mask,
210 			      int vector)
211 {
212 	int xen_vector = xen_map_vector(vector);
213 
214 	if (xen_vector >= 0)
215 		__xen_send_IPI_mask(mask, xen_vector);
216 }
217 
218 void xen_send_IPI_all(int vector)
219 {
220 	int xen_vector = xen_map_vector(vector);
221 
222 	if (xen_vector >= 0)
223 		__xen_send_IPI_mask(cpu_online_mask, xen_vector);
224 }
225 
226 void xen_send_IPI_self(int vector)
227 {
228 	int xen_vector = xen_map_vector(vector);
229 
230 	if (xen_vector >= 0)
231 		xen_send_IPI_one(smp_processor_id(), xen_vector);
232 }
233 
234 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
235 				int vector)
236 {
237 	unsigned cpu;
238 	unsigned int this_cpu = smp_processor_id();
239 	int xen_vector = xen_map_vector(vector);
240 
241 	if (!(num_online_cpus() > 1) || (xen_vector < 0))
242 		return;
243 
244 	for_each_cpu_and(cpu, mask, cpu_online_mask) {
245 		if (this_cpu == cpu)
246 			continue;
247 
248 		xen_send_IPI_one(cpu, xen_vector);
249 	}
250 }
251 
252 void xen_send_IPI_allbutself(int vector)
253 {
254 	xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
255 }
256 
257 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
258 {
259 	generic_smp_call_function_interrupt();
260 	inc_irq_stat(irq_call_count);
261 
262 	return IRQ_HANDLED;
263 }
264 
265 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
266 {
267 	generic_smp_call_function_single_interrupt();
268 	inc_irq_stat(irq_call_count);
269 
270 	return IRQ_HANDLED;
271 }
272