xref: /openbmc/linux/arch/x86/xen/smp_hvm.c (revision 0b64ffb8db4e310f77a01079ca752d946a8526b5)
1a52482d9SVitaly Kuznetsov #include <asm/smp.h>
2a52482d9SVitaly Kuznetsov 
384d582d2SBoris Ostrovsky #include <xen/events.h>
484d582d2SBoris Ostrovsky 
5a52482d9SVitaly Kuznetsov #include "xen-ops.h"
6a52482d9SVitaly Kuznetsov #include "smp.h"
7a52482d9SVitaly Kuznetsov 
8a52482d9SVitaly Kuznetsov 
9a52482d9SVitaly Kuznetsov static void __init xen_hvm_smp_prepare_boot_cpu(void)
10a52482d9SVitaly Kuznetsov {
11a52482d9SVitaly Kuznetsov 	BUG_ON(smp_processor_id() != 0);
12a52482d9SVitaly Kuznetsov 	native_smp_prepare_boot_cpu();
13a52482d9SVitaly Kuznetsov 
14a52482d9SVitaly Kuznetsov 	/*
15ad73fd59SAnkur Arora 	 * Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info
16ad73fd59SAnkur Arora 	 * in xen_cpu_up_prepare_hvm().
17a52482d9SVitaly Kuznetsov 	 */
18a52482d9SVitaly Kuznetsov 	xen_vcpu_setup(0);
19a52482d9SVitaly Kuznetsov 
20a52482d9SVitaly Kuznetsov 	/*
21a52482d9SVitaly Kuznetsov 	 * The alternative logic (which patches the unlock/lock) runs before
22a52482d9SVitaly Kuznetsov 	 * the smp bootup up code is activated. Hence we need to set this up
23a52482d9SVitaly Kuznetsov 	 * the core kernel is being patched. Otherwise we will have only
24a52482d9SVitaly Kuznetsov 	 * modules patched but not core code.
25a52482d9SVitaly Kuznetsov 	 */
26a52482d9SVitaly Kuznetsov 	xen_init_spinlocks();
27a52482d9SVitaly Kuznetsov }
28a52482d9SVitaly Kuznetsov 
29a52482d9SVitaly Kuznetsov static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
30a52482d9SVitaly Kuznetsov {
31*0b64ffb8SAnkur Arora 	int cpu;
32*0b64ffb8SAnkur Arora 
33a52482d9SVitaly Kuznetsov 	native_smp_prepare_cpus(max_cpus);
34a52482d9SVitaly Kuznetsov 	WARN_ON(xen_smp_intr_init(0));
35a52482d9SVitaly Kuznetsov 
36a52482d9SVitaly Kuznetsov 	xen_init_lock_cpu(0);
37*0b64ffb8SAnkur Arora 
38*0b64ffb8SAnkur Arora 	for_each_possible_cpu(cpu) {
39*0b64ffb8SAnkur Arora 		if (cpu == 0)
40*0b64ffb8SAnkur Arora 			continue;
41*0b64ffb8SAnkur Arora 
42*0b64ffb8SAnkur Arora 		/* Set default vcpu_id to make sure that we don't use cpu-0's */
43*0b64ffb8SAnkur Arora 		per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
44*0b64ffb8SAnkur Arora 	}
45a52482d9SVitaly Kuznetsov }
46a52482d9SVitaly Kuznetsov 
47a52482d9SVitaly Kuznetsov #ifdef CONFIG_HOTPLUG_CPU
48a52482d9SVitaly Kuznetsov static void xen_hvm_cpu_die(unsigned int cpu)
49a52482d9SVitaly Kuznetsov {
50a52482d9SVitaly Kuznetsov 	if (common_cpu_die(cpu) == 0) {
51a52482d9SVitaly Kuznetsov 		xen_smp_intr_free(cpu);
52a52482d9SVitaly Kuznetsov 		xen_uninit_lock_cpu(cpu);
53a52482d9SVitaly Kuznetsov 		xen_teardown_timer(cpu);
54a52482d9SVitaly Kuznetsov 	}
55a52482d9SVitaly Kuznetsov }
56a52482d9SVitaly Kuznetsov #else
57a52482d9SVitaly Kuznetsov static void xen_hvm_cpu_die(unsigned int cpu)
58a52482d9SVitaly Kuznetsov {
59a52482d9SVitaly Kuznetsov 	BUG();
60a52482d9SVitaly Kuznetsov }
61a52482d9SVitaly Kuznetsov #endif
62a52482d9SVitaly Kuznetsov 
63a52482d9SVitaly Kuznetsov void __init xen_hvm_smp_init(void)
64a52482d9SVitaly Kuznetsov {
6584d582d2SBoris Ostrovsky 	if (!xen_have_vector_callback)
6684d582d2SBoris Ostrovsky 		return;
6784d582d2SBoris Ostrovsky 
68a52482d9SVitaly Kuznetsov 	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
69a52482d9SVitaly Kuznetsov 	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
70a52482d9SVitaly Kuznetsov 	smp_ops.cpu_die = xen_hvm_cpu_die;
71a52482d9SVitaly Kuznetsov 	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
72a52482d9SVitaly Kuznetsov 	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
73a52482d9SVitaly Kuznetsov 	smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
74a52482d9SVitaly Kuznetsov }
75