enlighten.c (ad73fd595c2ab168fdd01a266cbe6e4df95f8db0) enlighten.c (0b64ffb8db4e310f77a01079ca752d946a8526b5)
1#include <linux/cpu.h>
2#include <linux/kexec.h>
3
4#include <xen/features.h>
5#include <xen/page.h>
6
7#include <asm/xen/hypercall.h>
8#include <asm/xen/hypervisor.h>

--- 92 unchanged lines hidden (view full) ---

101 xen_cpu_up_online, NULL);
102 if (rc < 0)
103 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
104 }
105
106 return rc >= 0 ? 0 : rc;
107}
108
1#include <linux/cpu.h>
2#include <linux/kexec.h>
3
4#include <xen/features.h>
5#include <xen/page.h>
6
7#include <asm/xen/hypercall.h>
8#include <asm/xen/hypervisor.h>

--- 92 unchanged lines hidden (view full) ---

101 xen_cpu_up_online, NULL);
102 if (rc < 0)
103 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
104 }
105
106 return rc >= 0 ? 0 : rc;
107}
108
109static void xen_vcpu_setup_restore(int cpu)
110{
111 /* Any per_cpu(xen_vcpu) is stale, so reset it */
112 xen_vcpu_info_reset(cpu);
113
114 /*
115 * For PVH and PVHVM, setup online VCPUs only. The rest will
116 * be handled by hotplug.
117 */
118 if (xen_pv_domain() ||
119 (xen_hvm_domain() && cpu_online(cpu))) {
120 xen_vcpu_setup(cpu);
121 }
122}
123
109/*
110 * On restore, set the vcpu placement up again.
111 * If it fails, then we're in a bad state, since
112 * we can't back out from using it...
113 */
114void xen_vcpu_restore(void)
115{
116 int cpu;
117
118 for_each_possible_cpu(cpu) {
119 bool other_cpu = (cpu != smp_processor_id());
124/*
125 * On restore, set the vcpu placement up again.
126 * If it fails, then we're in a bad state, since
127 * we can't back out from using it...
128 */
129void xen_vcpu_restore(void)
130{
131 int cpu;
132
133 for_each_possible_cpu(cpu) {
134 bool other_cpu = (cpu != smp_processor_id());
120 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu),
121 NULL);
135 bool is_up;
122
136
137 if (xen_vcpu_nr(cpu) == XEN_VCPU_ID_INVALID)
138 continue;
139
140 /* Only Xen 4.5 and higher support this. */
141 is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up,
142 xen_vcpu_nr(cpu), NULL) > 0;
143
123 if (other_cpu && is_up &&
124 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
125 BUG();
126
144 if (other_cpu && is_up &&
145 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
146 BUG();
147
127 xen_setup_runstate_info(cpu);
148 if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
149 xen_setup_runstate_info(cpu);
128
150
129 if (xen_have_vcpu_info_placement)
130 xen_vcpu_setup(cpu);
151 xen_vcpu_setup_restore(cpu);
131
132 if (other_cpu && is_up &&
133 HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
134 BUG();
135 }
136}
137
138static void clamp_max_cpus(void)

--- 19 unchanged lines hidden (view full) ---

158{
159 struct vcpu_register_vcpu_info info;
160 int err;
161 struct vcpu_info *vcpup;
162
163 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
164
165 /*
152
153 if (other_cpu && is_up &&
154 HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
155 BUG();
156 }
157}
158
159static void clamp_max_cpus(void)

--- 19 unchanged lines hidden (view full) ---

179{
180 struct vcpu_register_vcpu_info info;
181 int err;
182 struct vcpu_info *vcpup;
183
184 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
185
186 /*
166 * This path is called twice on PVHVM - first during bootup via
167 * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
168 * hotplugged: cpu_up -> xen_hvm_cpu_notify.
169 * As we can only do the VCPUOP_register_vcpu_info once lets
170 * not over-write its result.
187 * This path is called on PVHVM at bootup (xen_hvm_smp_prepare_boot_cpu)
188 * and at restore (xen_vcpu_restore). Also called for hotplugged
189 * VCPUs (cpu_init -> xen_hvm_cpu_prepare_hvm).
190 * However, the hypercall can only be done once (see below) so if a VCPU
191 * is offlined and comes back online then let's not redo the hypercall.
171 *
172 * For PV it is called during restore (xen_vcpu_restore) and bootup
173 * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
174 * use this function.
175 */
176 if (xen_hvm_domain()) {
177 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
178 return;
179 }
180
192 *
193 * For PV it is called during restore (xen_vcpu_restore) and bootup
194 * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
195 * use this function.
196 */
197 if (xen_hvm_domain()) {
198 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
199 return;
200 }
201
181 xen_vcpu_info_reset(cpu);
182
183 if (xen_have_vcpu_info_placement) {
184 vcpup = &per_cpu(xen_vcpu_info, cpu);
185 info.mfn = arbitrary_virt_to_mfn(vcpup);
186 info.offset = offset_in_page(vcpup);
187
188 /*
189 * Check to see if the hypervisor will put the vcpu_info
190 * structure where we want it, which allows direct access via

--- 18 unchanged lines hidden (view full) ---

209 */
210 per_cpu(xen_vcpu, cpu) = vcpup;
211 }
212 }
213
214 if (!xen_have_vcpu_info_placement) {
215 if (cpu >= MAX_VIRT_CPUS)
216 clamp_max_cpus();
202 if (xen_have_vcpu_info_placement) {
203 vcpup = &per_cpu(xen_vcpu_info, cpu);
204 info.mfn = arbitrary_virt_to_mfn(vcpup);
205 info.offset = offset_in_page(vcpup);
206
207 /*
208 * Check to see if the hypervisor will put the vcpu_info
209 * structure where we want it, which allows direct access via

--- 18 unchanged lines hidden (view full) ---

228 */
229 per_cpu(xen_vcpu, cpu) = vcpup;
230 }
231 }
232
233 if (!xen_have_vcpu_info_placement) {
234 if (cpu >= MAX_VIRT_CPUS)
235 clamp_max_cpus();
217 return;
236 xen_vcpu_info_reset(cpu);
218 }
219}
220
221void xen_reboot(int reason)
222{
223 struct sched_shutdown r = { .reason = reason };
224 int cpu;
225

--- 83 unchanged lines hidden ---
237 }
238}
239
240void xen_reboot(int reason)
241{
242 struct sched_shutdown r = { .reason = reason };
243 int cpu;
244

--- 83 unchanged lines hidden ---