xref: /openbmc/linux/arch/x86/xen/smp.c (revision f7777dcc)
1 /*
2  * Xen SMP support
3  *
4  * This file implements the Xen versions of smp_ops.  SMP under Xen is
5  * very straightforward.  Bringing a CPU up is simply a matter of
6  * loading its initial context and setting it running.
7  *
8  * IPIs are handled through the Xen event mechanism.
9  *
10  * Because virtual CPUs can be scheduled onto any real CPU, there's no
11  * useful topology information for the kernel to make use of.  As a
12  * result, all CPUs are treated as if they're single-core and
13  * single-threaded.
14  */
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19 #include <linux/irq_work.h>
20 #include <linux/tick.h>
21 
22 #include <asm/paravirt.h>
23 #include <asm/desc.h>
24 #include <asm/pgtable.h>
25 #include <asm/cpu.h>
26 
27 #include <xen/interface/xen.h>
28 #include <xen/interface/vcpu.h>
29 
30 #include <asm/xen/interface.h>
31 #include <asm/xen/hypercall.h>
32 
33 #include <xen/xen.h>
34 #include <xen/page.h>
35 #include <xen/events.h>
36 
37 #include <xen/hvc-console.h>
38 #include "xen-ops.h"
39 #include "mmu.h"
40 
41 cpumask_var_t xen_cpu_initialized_map;
42 
43 struct xen_common_irq {
44 	int irq;
45 	char *name;
46 };
47 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
48 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
49 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
50 static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
51 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
52 
53 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
54 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
55 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
56 
57 /*
58  * Reschedule call back.
59  */
60 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
61 {
62 	inc_irq_stat(irq_resched_count);
63 	scheduler_ipi();
64 
65 	return IRQ_HANDLED;
66 }
67 
68 static void cpu_bringup(void)
69 {
70 	int cpu;
71 
72 	cpu_init();
73 	touch_softlockup_watchdog();
74 	preempt_disable();
75 
76 	xen_enable_sysenter();
77 	xen_enable_syscall();
78 
79 	cpu = smp_processor_id();
80 	smp_store_cpu_info(cpu);
81 	cpu_data(cpu).x86_max_cores = 1;
82 	set_cpu_sibling_map(cpu);
83 
84 	xen_setup_cpu_clockevents();
85 
86 	notify_cpu_starting(cpu);
87 
88 	set_cpu_online(cpu, true);
89 
90 	this_cpu_write(cpu_state, CPU_ONLINE);
91 
92 	wmb();
93 
94 	/* We can take interrupts now: we're officially "up". */
95 	local_irq_enable();
96 
97 	wmb();			/* make sure everything is out */
98 }
99 
100 static void cpu_bringup_and_idle(void)
101 {
102 	cpu_bringup();
103 	cpu_startup_entry(CPUHP_ONLINE);
104 }
105 
106 static void xen_smp_intr_free(unsigned int cpu)
107 {
108 	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
109 		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
110 		per_cpu(xen_resched_irq, cpu).irq = -1;
111 		kfree(per_cpu(xen_resched_irq, cpu).name);
112 		per_cpu(xen_resched_irq, cpu).name = NULL;
113 	}
114 	if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
115 		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
116 		per_cpu(xen_callfunc_irq, cpu).irq = -1;
117 		kfree(per_cpu(xen_callfunc_irq, cpu).name);
118 		per_cpu(xen_callfunc_irq, cpu).name = NULL;
119 	}
120 	if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
121 		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
122 		per_cpu(xen_debug_irq, cpu).irq = -1;
123 		kfree(per_cpu(xen_debug_irq, cpu).name);
124 		per_cpu(xen_debug_irq, cpu).name = NULL;
125 	}
126 	if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
127 		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
128 				       NULL);
129 		per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
130 		kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
131 		per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
132 	}
133 	if (xen_hvm_domain())
134 		return;
135 
136 	if (per_cpu(xen_irq_work, cpu).irq >= 0) {
137 		unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
138 		per_cpu(xen_irq_work, cpu).irq = -1;
139 		kfree(per_cpu(xen_irq_work, cpu).name);
140 		per_cpu(xen_irq_work, cpu).name = NULL;
141 	}
142 };
143 static int xen_smp_intr_init(unsigned int cpu)
144 {
145 	int rc;
146 	char *resched_name, *callfunc_name, *debug_name;
147 
148 	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
149 	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
150 				    cpu,
151 				    xen_reschedule_interrupt,
152 				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
153 				    resched_name,
154 				    NULL);
155 	if (rc < 0)
156 		goto fail;
157 	per_cpu(xen_resched_irq, cpu).irq = rc;
158 	per_cpu(xen_resched_irq, cpu).name = resched_name;
159 
160 	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
161 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
162 				    cpu,
163 				    xen_call_function_interrupt,
164 				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
165 				    callfunc_name,
166 				    NULL);
167 	if (rc < 0)
168 		goto fail;
169 	per_cpu(xen_callfunc_irq, cpu).irq = rc;
170 	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
171 
172 	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
173 	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
174 				     IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
175 				     debug_name, NULL);
176 	if (rc < 0)
177 		goto fail;
178 	per_cpu(xen_debug_irq, cpu).irq = rc;
179 	per_cpu(xen_debug_irq, cpu).name = debug_name;
180 
181 	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
182 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
183 				    cpu,
184 				    xen_call_function_single_interrupt,
185 				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
186 				    callfunc_name,
187 				    NULL);
188 	if (rc < 0)
189 		goto fail;
190 	per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
191 	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
192 
193 	/*
194 	 * The IRQ worker on PVHVM goes through the native path and uses the
195 	 * IPI mechanism.
196 	 */
197 	if (xen_hvm_domain())
198 		return 0;
199 
200 	callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
201 	rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
202 				    cpu,
203 				    xen_irq_work_interrupt,
204 				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
205 				    callfunc_name,
206 				    NULL);
207 	if (rc < 0)
208 		goto fail;
209 	per_cpu(xen_irq_work, cpu).irq = rc;
210 	per_cpu(xen_irq_work, cpu).name = callfunc_name;
211 
212 	return 0;
213 
214  fail:
215 	xen_smp_intr_free(cpu);
216 	return rc;
217 }
218 
219 static void __init xen_fill_possible_map(void)
220 {
221 	int i, rc;
222 
223 	if (xen_initial_domain())
224 		return;
225 
226 	for (i = 0; i < nr_cpu_ids; i++) {
227 		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
228 		if (rc >= 0) {
229 			num_processors++;
230 			set_cpu_possible(i, true);
231 		}
232 	}
233 }
234 
235 static void __init xen_filter_cpu_maps(void)
236 {
237 	int i, rc;
238 	unsigned int subtract = 0;
239 
240 	if (!xen_initial_domain())
241 		return;
242 
243 	num_processors = 0;
244 	disabled_cpus = 0;
245 	for (i = 0; i < nr_cpu_ids; i++) {
246 		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
247 		if (rc >= 0) {
248 			num_processors++;
249 			set_cpu_possible(i, true);
250 		} else {
251 			set_cpu_possible(i, false);
252 			set_cpu_present(i, false);
253 			subtract++;
254 		}
255 	}
256 #ifdef CONFIG_HOTPLUG_CPU
257 	/* This is akin to using 'nr_cpus' on the Linux command line.
258 	 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
259 	 * have up to X, while nr_cpu_ids is greater than X. This
260 	 * normally is not a problem, except when CPU hotplugging
261 	 * is involved and then there might be more than X CPUs
262 	 * in the guest - which will not work as there is no
263 	 * hypercall to expand the max number of VCPUs an already
264 	 * running guest has. So cap it up to X. */
265 	if (subtract)
266 		nr_cpu_ids = nr_cpu_ids - subtract;
267 #endif
268 
269 }
270 
271 static void __init xen_smp_prepare_boot_cpu(void)
272 {
273 	BUG_ON(smp_processor_id() != 0);
274 	native_smp_prepare_boot_cpu();
275 
276 	if (xen_pv_domain()) {
277 		/* We've switched to the "real" per-cpu gdt, so make sure the
278 		   old memory can be recycled */
279 		make_lowmem_page_readwrite(xen_initial_gdt);
280 
281 #ifdef CONFIG_X86_32
282 		/*
283 		 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
284 		 * expects __USER_DS
285 		 */
286 		loadsegment(ds, __USER_DS);
287 		loadsegment(es, __USER_DS);
288 #endif
289 
290 		xen_filter_cpu_maps();
291 		xen_setup_vcpu_info_placement();
292 	}
293 	/*
294 	 * The alternative logic (which patches the unlock/lock) runs before
295 	 * the smp bootup up code is activated. Hence we need to set this up
296 	 * the core kernel is being patched. Otherwise we will have only
297 	 * modules patched but not core code.
298 	 */
299 	xen_init_spinlocks();
300 }
301 
302 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
303 {
304 	unsigned cpu;
305 	unsigned int i;
306 
307 	if (skip_ioapic_setup) {
308 		char *m = (max_cpus == 0) ?
309 			"The nosmp parameter is incompatible with Xen; " \
310 			"use Xen dom0_max_vcpus=1 parameter" :
311 			"The noapic parameter is incompatible with Xen";
312 
313 		xen_raw_printk(m);
314 		panic(m);
315 	}
316 	xen_init_lock_cpu(0);
317 
318 	smp_store_boot_cpu_info();
319 	cpu_data(0).x86_max_cores = 1;
320 
321 	for_each_possible_cpu(i) {
322 		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
323 		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
324 		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
325 	}
326 	set_cpu_sibling_map(0);
327 
328 	if (xen_smp_intr_init(0))
329 		BUG();
330 
331 	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
332 		panic("could not allocate xen_cpu_initialized_map\n");
333 
334 	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
335 
336 	/* Restrict the possible_map according to max_cpus. */
337 	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
338 		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
339 			continue;
340 		set_cpu_possible(cpu, false);
341 	}
342 
343 	for_each_possible_cpu(cpu)
344 		set_cpu_present(cpu, true);
345 }
346 
347 static int
348 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
349 {
350 	struct vcpu_guest_context *ctxt;
351 	struct desc_struct *gdt;
352 	unsigned long gdt_mfn;
353 
354 	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
355 		return 0;
356 
357 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
358 	if (ctxt == NULL)
359 		return -ENOMEM;
360 
361 	gdt = get_cpu_gdt_table(cpu);
362 
363 	ctxt->flags = VGCF_IN_KERNEL;
364 	ctxt->user_regs.ss = __KERNEL_DS;
365 #ifdef CONFIG_X86_32
366 	ctxt->user_regs.fs = __KERNEL_PERCPU;
367 	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
368 #else
369 	ctxt->gs_base_kernel = per_cpu_offset(cpu);
370 #endif
371 	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
372 
373 	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
374 
375 	{
376 		ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
377 		ctxt->user_regs.ds = __USER_DS;
378 		ctxt->user_regs.es = __USER_DS;
379 
380 		xen_copy_trap_info(ctxt->trap_ctxt);
381 
382 		ctxt->ldt_ents = 0;
383 
384 		BUG_ON((unsigned long)gdt & ~PAGE_MASK);
385 
386 		gdt_mfn = arbitrary_virt_to_mfn(gdt);
387 		make_lowmem_page_readonly(gdt);
388 		make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
389 
390 		ctxt->gdt_frames[0] = gdt_mfn;
391 		ctxt->gdt_ents      = GDT_ENTRIES;
392 
393 		ctxt->kernel_ss = __KERNEL_DS;
394 		ctxt->kernel_sp = idle->thread.sp0;
395 
396 #ifdef CONFIG_X86_32
397 		ctxt->event_callback_cs     = __KERNEL_CS;
398 		ctxt->failsafe_callback_cs  = __KERNEL_CS;
399 #endif
400 		ctxt->event_callback_eip    =
401 					(unsigned long)xen_hypervisor_callback;
402 		ctxt->failsafe_callback_eip =
403 					(unsigned long)xen_failsafe_callback;
404 	}
405 	ctxt->user_regs.cs = __KERNEL_CS;
406 	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
407 
408 	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
409 	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
410 
411 	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
412 		BUG();
413 
414 	kfree(ctxt);
415 	return 0;
416 }
417 
418 static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
419 {
420 	int rc;
421 
422 	per_cpu(current_task, cpu) = idle;
423 #ifdef CONFIG_X86_32
424 	irq_ctx_init(cpu);
425 #else
426 	clear_tsk_thread_flag(idle, TIF_FORK);
427 	per_cpu(kernel_stack, cpu) =
428 		(unsigned long)task_stack_page(idle) -
429 		KERNEL_STACK_OFFSET + THREAD_SIZE;
430 #endif
431 	xen_setup_runstate_info(cpu);
432 	xen_setup_timer(cpu);
433 	xen_init_lock_cpu(cpu);
434 
435 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
436 
437 	/* make sure interrupts start blocked */
438 	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
439 
440 	rc = cpu_initialize_context(cpu, idle);
441 	if (rc)
442 		return rc;
443 
444 	if (num_online_cpus() == 1)
445 		/* Just in case we booted with a single CPU. */
446 		alternatives_enable_smp();
447 
448 	rc = xen_smp_intr_init(cpu);
449 	if (rc)
450 		return rc;
451 
452 	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
453 	BUG_ON(rc);
454 
455 	while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
456 		HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
457 		barrier();
458 	}
459 
460 	return 0;
461 }
462 
463 static void xen_smp_cpus_done(unsigned int max_cpus)
464 {
465 }
466 
467 #ifdef CONFIG_HOTPLUG_CPU
468 static int xen_cpu_disable(void)
469 {
470 	unsigned int cpu = smp_processor_id();
471 	if (cpu == 0)
472 		return -EBUSY;
473 
474 	cpu_disable_common();
475 
476 	load_cr3(swapper_pg_dir);
477 	return 0;
478 }
479 
480 static void xen_cpu_die(unsigned int cpu)
481 {
482 	while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
483 		current->state = TASK_UNINTERRUPTIBLE;
484 		schedule_timeout(HZ/10);
485 	}
486 	xen_smp_intr_free(cpu);
487 	xen_uninit_lock_cpu(cpu);
488 	xen_teardown_timer(cpu);
489 }
490 
491 static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
492 {
493 	play_dead_common();
494 	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
495 	cpu_bringup();
496 	/*
497 	 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
498 	 * clears certain data that the cpu_idle loop (which called us
499 	 * and that we return from) expects. The only way to get that
500 	 * data back is to call:
501 	 */
502 	tick_nohz_idle_enter();
503 }
504 
505 #else /* !CONFIG_HOTPLUG_CPU */
506 static int xen_cpu_disable(void)
507 {
508 	return -ENOSYS;
509 }
510 
511 static void xen_cpu_die(unsigned int cpu)
512 {
513 	BUG();
514 }
515 
516 static void xen_play_dead(void)
517 {
518 	BUG();
519 }
520 
521 #endif
522 static void stop_self(void *v)
523 {
524 	int cpu = smp_processor_id();
525 
526 	/* make sure we're not pinning something down */
527 	load_cr3(swapper_pg_dir);
528 	/* should set up a minimal gdt */
529 
530 	set_cpu_online(cpu, false);
531 
532 	HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
533 	BUG();
534 }
535 
536 static void xen_stop_other_cpus(int wait)
537 {
538 	smp_call_function(stop_self, NULL, wait);
539 }
540 
541 static void xen_smp_send_reschedule(int cpu)
542 {
543 	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
544 }
545 
546 static void __xen_send_IPI_mask(const struct cpumask *mask,
547 			      int vector)
548 {
549 	unsigned cpu;
550 
551 	for_each_cpu_and(cpu, mask, cpu_online_mask)
552 		xen_send_IPI_one(cpu, vector);
553 }
554 
555 static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
556 {
557 	int cpu;
558 
559 	__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
560 
561 	/* Make sure other vcpus get a chance to run if they need to. */
562 	for_each_cpu(cpu, mask) {
563 		if (xen_vcpu_stolen(cpu)) {
564 			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
565 			break;
566 		}
567 	}
568 }
569 
570 static void xen_smp_send_call_function_single_ipi(int cpu)
571 {
572 	__xen_send_IPI_mask(cpumask_of(cpu),
573 			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
574 }
575 
576 static inline int xen_map_vector(int vector)
577 {
578 	int xen_vector;
579 
580 	switch (vector) {
581 	case RESCHEDULE_VECTOR:
582 		xen_vector = XEN_RESCHEDULE_VECTOR;
583 		break;
584 	case CALL_FUNCTION_VECTOR:
585 		xen_vector = XEN_CALL_FUNCTION_VECTOR;
586 		break;
587 	case CALL_FUNCTION_SINGLE_VECTOR:
588 		xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
589 		break;
590 	case IRQ_WORK_VECTOR:
591 		xen_vector = XEN_IRQ_WORK_VECTOR;
592 		break;
593 #ifdef CONFIG_X86_64
594 	case NMI_VECTOR:
595 	case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
596 		xen_vector = XEN_NMI_VECTOR;
597 		break;
598 #endif
599 	default:
600 		xen_vector = -1;
601 		printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
602 			vector);
603 	}
604 
605 	return xen_vector;
606 }
607 
608 void xen_send_IPI_mask(const struct cpumask *mask,
609 			      int vector)
610 {
611 	int xen_vector = xen_map_vector(vector);
612 
613 	if (xen_vector >= 0)
614 		__xen_send_IPI_mask(mask, xen_vector);
615 }
616 
617 void xen_send_IPI_all(int vector)
618 {
619 	int xen_vector = xen_map_vector(vector);
620 
621 	if (xen_vector >= 0)
622 		__xen_send_IPI_mask(cpu_online_mask, xen_vector);
623 }
624 
625 void xen_send_IPI_self(int vector)
626 {
627 	int xen_vector = xen_map_vector(vector);
628 
629 	if (xen_vector >= 0)
630 		xen_send_IPI_one(smp_processor_id(), xen_vector);
631 }
632 
633 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
634 				int vector)
635 {
636 	unsigned cpu;
637 	unsigned int this_cpu = smp_processor_id();
638 	int xen_vector = xen_map_vector(vector);
639 
640 	if (!(num_online_cpus() > 1) || (xen_vector < 0))
641 		return;
642 
643 	for_each_cpu_and(cpu, mask, cpu_online_mask) {
644 		if (this_cpu == cpu)
645 			continue;
646 
647 		xen_send_IPI_one(cpu, xen_vector);
648 	}
649 }
650 
651 void xen_send_IPI_allbutself(int vector)
652 {
653 	xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
654 }
655 
656 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
657 {
658 	irq_enter();
659 	generic_smp_call_function_interrupt();
660 	inc_irq_stat(irq_call_count);
661 	irq_exit();
662 
663 	return IRQ_HANDLED;
664 }
665 
666 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
667 {
668 	irq_enter();
669 	generic_smp_call_function_single_interrupt();
670 	inc_irq_stat(irq_call_count);
671 	irq_exit();
672 
673 	return IRQ_HANDLED;
674 }
675 
676 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
677 {
678 	irq_enter();
679 	irq_work_run();
680 	inc_irq_stat(apic_irq_work_irqs);
681 	irq_exit();
682 
683 	return IRQ_HANDLED;
684 }
685 
686 static const struct smp_ops xen_smp_ops __initconst = {
687 	.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
688 	.smp_prepare_cpus = xen_smp_prepare_cpus,
689 	.smp_cpus_done = xen_smp_cpus_done,
690 
691 	.cpu_up = xen_cpu_up,
692 	.cpu_die = xen_cpu_die,
693 	.cpu_disable = xen_cpu_disable,
694 	.play_dead = xen_play_dead,
695 
696 	.stop_other_cpus = xen_stop_other_cpus,
697 	.smp_send_reschedule = xen_smp_send_reschedule,
698 
699 	.send_call_func_ipi = xen_smp_send_call_function_ipi,
700 	.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
701 };
702 
703 void __init xen_smp_init(void)
704 {
705 	smp_ops = xen_smp_ops;
706 	xen_fill_possible_map();
707 }
708 
709 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
710 {
711 	native_smp_prepare_cpus(max_cpus);
712 	WARN_ON(xen_smp_intr_init(0));
713 
714 	xen_init_lock_cpu(0);
715 }
716 
717 static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
718 {
719 	int rc;
720 	/*
721 	 * xen_smp_intr_init() needs to run before native_cpu_up()
722 	 * so that IPI vectors are set up on the booting CPU before
723 	 * it is marked online in native_cpu_up().
724 	*/
725 	rc = xen_smp_intr_init(cpu);
726 	WARN_ON(rc);
727 	if (!rc)
728 		rc =  native_cpu_up(cpu, tidle);
729 
730 	/*
731 	 * We must initialize the slowpath CPU kicker _after_ the native
732 	 * path has executed. If we initialized it before none of the
733 	 * unlocker IPI kicks would reach the booting CPU as the booting
734 	 * CPU had not set itself 'online' in cpu_online_mask. That mask
735 	 * is checked when IPIs are sent (on HVM at least).
736 	 */
737 	xen_init_lock_cpu(cpu);
738 	return rc;
739 }
740 
741 static void xen_hvm_cpu_die(unsigned int cpu)
742 {
743 	xen_cpu_die(cpu);
744 	native_cpu_die(cpu);
745 }
746 
747 void __init xen_hvm_smp_init(void)
748 {
749 	if (!xen_have_vector_callback)
750 		return;
751 	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
752 	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
753 	smp_ops.cpu_up = xen_hvm_cpu_up;
754 	smp_ops.cpu_die = xen_hvm_cpu_die;
755 	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
756 	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
757 	smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
758 }
759