xref: /openbmc/linux/arch/x86/kvm/svm/nested.c (revision 386a966f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14 
15 #define pr_fmt(fmt) "SVM: " fmt
16 
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20 
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
23 
24 #include "kvm_emulate.h"
25 #include "trace.h"
26 #include "mmu.h"
27 #include "x86.h"
28 #include "cpuid.h"
29 #include "lapic.h"
30 #include "svm.h"
31 
32 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
33 				       struct x86_exception *fault)
34 {
35 	struct vcpu_svm *svm = to_svm(vcpu);
36 
37 	if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
38 		/*
39 		 * TODO: track the cause of the nested page fault, and
40 		 * correctly fill in the high bits of exit_info_1.
41 		 */
42 		svm->vmcb->control.exit_code = SVM_EXIT_NPF;
43 		svm->vmcb->control.exit_code_hi = 0;
44 		svm->vmcb->control.exit_info_1 = (1ULL << 32);
45 		svm->vmcb->control.exit_info_2 = fault->address;
46 	}
47 
48 	svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
49 	svm->vmcb->control.exit_info_1 |= fault->error_code;
50 
51 	nested_svm_vmexit(svm);
52 }
53 
54 static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
55 {
56        struct vcpu_svm *svm = to_svm(vcpu);
57        WARN_ON(!is_guest_mode(vcpu));
58 
59        if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
60 	   !svm->nested.nested_run_pending) {
61                svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
62                svm->vmcb->control.exit_code_hi = 0;
63                svm->vmcb->control.exit_info_1 = fault->error_code;
64                svm->vmcb->control.exit_info_2 = fault->address;
65                nested_svm_vmexit(svm);
66        } else {
67                kvm_inject_page_fault(vcpu, fault);
68        }
69 }
70 
71 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
72 {
73 	struct vcpu_svm *svm = to_svm(vcpu);
74 	u64 cr3 = svm->nested.ctl.nested_cr3;
75 	u64 pdpte;
76 	int ret;
77 
78 	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
79 				       offset_in_page(cr3) + index * 8, 8);
80 	if (ret)
81 		return 0;
82 	return pdpte;
83 }
84 
85 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
86 {
87 	struct vcpu_svm *svm = to_svm(vcpu);
88 
89 	return svm->nested.ctl.nested_cr3;
90 }
91 
92 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
93 {
94 	struct vcpu_svm *svm = to_svm(vcpu);
95 	struct vmcb *hsave = svm->nested.hsave;
96 
97 	WARN_ON(mmu_is_nested(vcpu));
98 
99 	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
100 	kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer,
101 				svm->nested.ctl.nested_cr3);
102 	vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
103 	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
104 	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
105 	reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
106 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
107 }
108 
109 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
110 {
111 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
112 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
113 }
114 
115 void recalc_intercepts(struct vcpu_svm *svm)
116 {
117 	struct vmcb_control_area *c, *h, *g;
118 	unsigned int i;
119 
120 	vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
121 
122 	if (!is_guest_mode(&svm->vcpu))
123 		return;
124 
125 	c = &svm->vmcb->control;
126 	h = &svm->nested.hsave->control;
127 	g = &svm->nested.ctl;
128 
129 	for (i = 0; i < MAX_INTERCEPT; i++)
130 		c->intercepts[i] = h->intercepts[i];
131 
132 	if (g->int_ctl & V_INTR_MASKING_MASK) {
133 		/* We only want the cr8 intercept bits of L1 */
134 		vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
135 		vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
136 
137 		/*
138 		 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
139 		 * affect any interrupt we may want to inject; therefore,
140 		 * interrupt window vmexits are irrelevant to L0.
141 		 */
142 		vmcb_clr_intercept(c, INTERCEPT_VINTR);
143 	}
144 
145 	/* We don't want to see VMMCALLs from a nested guest */
146 	vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
147 
148 	for (i = 0; i < MAX_INTERCEPT; i++)
149 		c->intercepts[i] |= g->intercepts[i];
150 }
151 
152 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
153 				   struct vmcb_control_area *from)
154 {
155 	unsigned int i;
156 
157 	for (i = 0; i < MAX_INTERCEPT; i++)
158 		dst->intercepts[i] = from->intercepts[i];
159 
160 	dst->iopm_base_pa         = from->iopm_base_pa;
161 	dst->msrpm_base_pa        = from->msrpm_base_pa;
162 	dst->tsc_offset           = from->tsc_offset;
163 	/* asid not copied, it is handled manually for svm->vmcb.  */
164 	dst->tlb_ctl              = from->tlb_ctl;
165 	dst->int_ctl              = from->int_ctl;
166 	dst->int_vector           = from->int_vector;
167 	dst->int_state            = from->int_state;
168 	dst->exit_code            = from->exit_code;
169 	dst->exit_code_hi         = from->exit_code_hi;
170 	dst->exit_info_1          = from->exit_info_1;
171 	dst->exit_info_2          = from->exit_info_2;
172 	dst->exit_int_info        = from->exit_int_info;
173 	dst->exit_int_info_err    = from->exit_int_info_err;
174 	dst->nested_ctl           = from->nested_ctl;
175 	dst->event_inj            = from->event_inj;
176 	dst->event_inj_err        = from->event_inj_err;
177 	dst->nested_cr3           = from->nested_cr3;
178 	dst->virt_ext              = from->virt_ext;
179 	dst->pause_filter_count   = from->pause_filter_count;
180 	dst->pause_filter_thresh  = from->pause_filter_thresh;
181 }
182 
183 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
184 {
185 	/*
186 	 * This function merges the msr permission bitmaps of kvm and the
187 	 * nested vmcb. It is optimized in that it only merges the parts where
188 	 * the kvm msr permission bitmap may contain zero bits
189 	 */
190 	int i;
191 
192 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
193 		return true;
194 
195 	for (i = 0; i < MSRPM_OFFSETS; i++) {
196 		u32 value, p;
197 		u64 offset;
198 
199 		if (msrpm_offsets[i] == 0xffffffff)
200 			break;
201 
202 		p      = msrpm_offsets[i];
203 		offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
204 
205 		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
206 			return false;
207 
208 		svm->nested.msrpm[p] = svm->msrpm[p] | value;
209 	}
210 
211 	svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
212 
213 	return true;
214 }
215 
216 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
217 {
218 	struct vcpu_svm *svm = to_svm(vcpu);
219 
220 	if (WARN_ON(!is_guest_mode(vcpu)))
221 		return true;
222 
223 	if (!nested_svm_vmrun_msrpm(svm)) {
224 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
225 		vcpu->run->internal.suberror =
226 			KVM_INTERNAL_ERROR_EMULATION;
227 		vcpu->run->internal.ndata = 0;
228 		return false;
229 	}
230 
231 	return true;
232 }
233 
234 static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
235 {
236 	if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
237 		return false;
238 
239 	if (control->asid == 0)
240 		return false;
241 
242 	if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
243 	    !npt_enabled)
244 		return false;
245 
246 	return true;
247 }
248 
249 static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
250 {
251 	struct kvm_vcpu *vcpu = &svm->vcpu;
252 	bool vmcb12_lma;
253 
254 	if ((vmcb12->save.efer & EFER_SVME) == 0)
255 		return false;
256 
257 	if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW))
258 		return false;
259 
260 	if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7))
261 		return false;
262 
263 	vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
264 
265 	if (vmcb12_lma) {
266 		if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
267 		    !(vmcb12->save.cr0 & X86_CR0_PE) ||
268 		    kvm_vcpu_is_illegal_gpa(vcpu, vmcb12->save.cr3))
269 			return false;
270 	}
271 	if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
272 		return false;
273 
274 	return nested_vmcb_check_controls(&vmcb12->control);
275 }
276 
277 static void load_nested_vmcb_control(struct vcpu_svm *svm,
278 				     struct vmcb_control_area *control)
279 {
280 	copy_vmcb_control_area(&svm->nested.ctl, control);
281 
282 	/* Copy it here because nested_svm_check_controls will check it.  */
283 	svm->nested.ctl.asid           = control->asid;
284 	svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
285 	svm->nested.ctl.iopm_base_pa  &= ~0x0fffULL;
286 }
287 
288 /*
289  * Synchronize fields that are written by the processor, so that
290  * they can be copied back into the nested_vmcb.
291  */
292 void sync_nested_vmcb_control(struct vcpu_svm *svm)
293 {
294 	u32 mask;
295 	svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
296 	svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;
297 
298 	/* Only a few fields of int_ctl are written by the processor.  */
299 	mask = V_IRQ_MASK | V_TPR_MASK;
300 	if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
301 	    svm_is_intercept(svm, INTERCEPT_VINTR)) {
302 		/*
303 		 * In order to request an interrupt window, L0 is usurping
304 		 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
305 		 * even if it was clear in L1's VMCB.  Restoring it would be
306 		 * wrong.  However, in this case V_IRQ will remain true until
307 		 * interrupt_window_interception calls svm_clear_vintr and
308 		 * restores int_ctl.  We can just leave it aside.
309 		 */
310 		mask &= ~V_IRQ_MASK;
311 	}
312 	svm->nested.ctl.int_ctl        &= ~mask;
313 	svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
314 }
315 
316 /*
317  * Transfer any event that L0 or L1 wanted to inject into L2 to
318  * EXIT_INT_INFO.
319  */
320 static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
321 					   struct vmcb *vmcb12)
322 {
323 	struct kvm_vcpu *vcpu = &svm->vcpu;
324 	u32 exit_int_info = 0;
325 	unsigned int nr;
326 
327 	if (vcpu->arch.exception.injected) {
328 		nr = vcpu->arch.exception.nr;
329 		exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
330 
331 		if (vcpu->arch.exception.has_error_code) {
332 			exit_int_info |= SVM_EVTINJ_VALID_ERR;
333 			vmcb12->control.exit_int_info_err =
334 				vcpu->arch.exception.error_code;
335 		}
336 
337 	} else if (vcpu->arch.nmi_injected) {
338 		exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
339 
340 	} else if (vcpu->arch.interrupt.injected) {
341 		nr = vcpu->arch.interrupt.nr;
342 		exit_int_info = nr | SVM_EVTINJ_VALID;
343 
344 		if (vcpu->arch.interrupt.soft)
345 			exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
346 		else
347 			exit_int_info |= SVM_EVTINJ_TYPE_INTR;
348 	}
349 
350 	vmcb12->control.exit_int_info = exit_int_info;
351 }
352 
353 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
354 {
355 	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
356 }
357 
358 /*
359  * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
360  * if we are emulating VM-Entry into a guest with NPT enabled.
361  */
362 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
363 			       bool nested_npt)
364 {
365 	if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
366 		return -EINVAL;
367 
368 	if (!nested_npt && is_pae_paging(vcpu) &&
369 	    (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
370 		if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
371 			return -EINVAL;
372 	}
373 
374 	/*
375 	 * TODO: optimize unconditional TLB flush/MMU sync here and in
376 	 * kvm_init_shadow_npt_mmu().
377 	 */
378 	if (!nested_npt)
379 		kvm_mmu_new_pgd(vcpu, cr3, false, false);
380 
381 	vcpu->arch.cr3 = cr3;
382 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
383 
384 	kvm_init_mmu(vcpu, false);
385 
386 	return 0;
387 }
388 
389 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
390 {
391 	/* Load the nested guest state */
392 	svm->vmcb->save.es = vmcb12->save.es;
393 	svm->vmcb->save.cs = vmcb12->save.cs;
394 	svm->vmcb->save.ss = vmcb12->save.ss;
395 	svm->vmcb->save.ds = vmcb12->save.ds;
396 	svm->vmcb->save.gdtr = vmcb12->save.gdtr;
397 	svm->vmcb->save.idtr = vmcb12->save.idtr;
398 	kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
399 	svm_set_efer(&svm->vcpu, vmcb12->save.efer);
400 	svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
401 	svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
402 	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
403 	kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
404 	kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
405 	kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
406 
407 	/* In case we don't even reach vcpu_run, the fields are not updated */
408 	svm->vmcb->save.rax = vmcb12->save.rax;
409 	svm->vmcb->save.rsp = vmcb12->save.rsp;
410 	svm->vmcb->save.rip = vmcb12->save.rip;
411 	svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
412 	svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
413 	svm->vmcb->save.cpl = vmcb12->save.cpl;
414 }
415 
416 static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
417 {
418 	const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
419 
420 	if (nested_npt_enabled(svm))
421 		nested_svm_init_mmu_context(&svm->vcpu);
422 
423 	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
424 		svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
425 
426 	svm->vmcb->control.int_ctl             =
427 		(svm->nested.ctl.int_ctl & ~mask) |
428 		(svm->nested.hsave->control.int_ctl & mask);
429 
430 	svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
431 	svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
432 	svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
433 	svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
434 	svm->vmcb->control.event_inj_err       = svm->nested.ctl.event_inj_err;
435 
436 	svm->vmcb->control.pause_filter_count  = svm->nested.ctl.pause_filter_count;
437 	svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
438 
439 	/* Enter Guest-Mode */
440 	enter_guest_mode(&svm->vcpu);
441 
442 	/*
443 	 * Merge guest and host intercepts - must be called  with vcpu in
444 	 * guest-mode to take affect here
445 	 */
446 	recalc_intercepts(svm);
447 
448 	vmcb_mark_all_dirty(svm->vmcb);
449 }
450 
451 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
452 			 struct vmcb *vmcb12)
453 {
454 	int ret;
455 
456 	trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
457 			       vmcb12->save.rip,
458 			       vmcb12->control.int_ctl,
459 			       vmcb12->control.event_inj,
460 			       vmcb12->control.nested_ctl);
461 
462 	trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
463 				    vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
464 				    vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
465 				    vmcb12->control.intercepts[INTERCEPT_WORD3],
466 				    vmcb12->control.intercepts[INTERCEPT_WORD4],
467 				    vmcb12->control.intercepts[INTERCEPT_WORD5]);
468 
469 
470 	svm->nested.vmcb12_gpa = vmcb12_gpa;
471 	load_nested_vmcb_control(svm, &vmcb12->control);
472 	nested_prepare_vmcb_control(svm);
473 	nested_prepare_vmcb_save(svm, vmcb12);
474 
475 	ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
476 				  nested_npt_enabled(svm));
477 	if (ret)
478 		return ret;
479 
480 	if (!npt_enabled)
481 		svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
482 
483 	svm_set_gif(svm, true);
484 
485 	return 0;
486 }
487 
488 int nested_svm_vmrun(struct vcpu_svm *svm)
489 {
490 	int ret;
491 	struct vmcb *vmcb12;
492 	struct vmcb *hsave = svm->nested.hsave;
493 	struct vmcb *vmcb = svm->vmcb;
494 	struct kvm_host_map map;
495 	u64 vmcb12_gpa;
496 
497 	if (is_smm(&svm->vcpu)) {
498 		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
499 		return 1;
500 	}
501 
502 	vmcb12_gpa = svm->vmcb->save.rax;
503 	ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
504 	if (ret == -EINVAL) {
505 		kvm_inject_gp(&svm->vcpu, 0);
506 		return 1;
507 	} else if (ret) {
508 		return kvm_skip_emulated_instruction(&svm->vcpu);
509 	}
510 
511 	ret = kvm_skip_emulated_instruction(&svm->vcpu);
512 
513 	vmcb12 = map.hva;
514 
515 	if (WARN_ON_ONCE(!svm->nested.initialized))
516 		return -EINVAL;
517 
518 	if (!nested_vmcb_checks(svm, vmcb12)) {
519 		vmcb12->control.exit_code    = SVM_EXIT_ERR;
520 		vmcb12->control.exit_code_hi = 0;
521 		vmcb12->control.exit_info_1  = 0;
522 		vmcb12->control.exit_info_2  = 0;
523 		goto out;
524 	}
525 
526 
527 	/* Clear internal status */
528 	kvm_clear_exception_queue(&svm->vcpu);
529 	kvm_clear_interrupt_queue(&svm->vcpu);
530 
531 	/*
532 	 * Save the old vmcb, so we don't need to pick what we save, but can
533 	 * restore everything when a VMEXIT occurs
534 	 */
535 	hsave->save.es     = vmcb->save.es;
536 	hsave->save.cs     = vmcb->save.cs;
537 	hsave->save.ss     = vmcb->save.ss;
538 	hsave->save.ds     = vmcb->save.ds;
539 	hsave->save.gdtr   = vmcb->save.gdtr;
540 	hsave->save.idtr   = vmcb->save.idtr;
541 	hsave->save.efer   = svm->vcpu.arch.efer;
542 	hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
543 	hsave->save.cr4    = svm->vcpu.arch.cr4;
544 	hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
545 	hsave->save.rip    = kvm_rip_read(&svm->vcpu);
546 	hsave->save.rsp    = vmcb->save.rsp;
547 	hsave->save.rax    = vmcb->save.rax;
548 	if (npt_enabled)
549 		hsave->save.cr3    = vmcb->save.cr3;
550 	else
551 		hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
552 
553 	copy_vmcb_control_area(&hsave->control, &vmcb->control);
554 
555 	svm->nested.nested_run_pending = 1;
556 
557 	if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12))
558 		goto out_exit_err;
559 
560 	if (nested_svm_vmrun_msrpm(svm))
561 		goto out;
562 
563 out_exit_err:
564 	svm->nested.nested_run_pending = 0;
565 
566 	svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
567 	svm->vmcb->control.exit_code_hi = 0;
568 	svm->vmcb->control.exit_info_1  = 0;
569 	svm->vmcb->control.exit_info_2  = 0;
570 
571 	nested_svm_vmexit(svm);
572 
573 out:
574 	kvm_vcpu_unmap(&svm->vcpu, &map, true);
575 
576 	return ret;
577 }
578 
579 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
580 {
581 	to_vmcb->save.fs = from_vmcb->save.fs;
582 	to_vmcb->save.gs = from_vmcb->save.gs;
583 	to_vmcb->save.tr = from_vmcb->save.tr;
584 	to_vmcb->save.ldtr = from_vmcb->save.ldtr;
585 	to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
586 	to_vmcb->save.star = from_vmcb->save.star;
587 	to_vmcb->save.lstar = from_vmcb->save.lstar;
588 	to_vmcb->save.cstar = from_vmcb->save.cstar;
589 	to_vmcb->save.sfmask = from_vmcb->save.sfmask;
590 	to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
591 	to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
592 	to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
593 }
594 
595 int nested_svm_vmexit(struct vcpu_svm *svm)
596 {
597 	int rc;
598 	struct vmcb *vmcb12;
599 	struct vmcb *hsave = svm->nested.hsave;
600 	struct vmcb *vmcb = svm->vmcb;
601 	struct kvm_host_map map;
602 
603 	rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
604 	if (rc) {
605 		if (rc == -EINVAL)
606 			kvm_inject_gp(&svm->vcpu, 0);
607 		return 1;
608 	}
609 
610 	vmcb12 = map.hva;
611 
612 	/* Exit Guest-Mode */
613 	leave_guest_mode(&svm->vcpu);
614 	svm->nested.vmcb12_gpa = 0;
615 	WARN_ON_ONCE(svm->nested.nested_run_pending);
616 
617 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
618 
619 	/* in case we halted in L2 */
620 	svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
621 
622 	/* Give the current vmcb to the guest */
623 
624 	vmcb12->save.es     = vmcb->save.es;
625 	vmcb12->save.cs     = vmcb->save.cs;
626 	vmcb12->save.ss     = vmcb->save.ss;
627 	vmcb12->save.ds     = vmcb->save.ds;
628 	vmcb12->save.gdtr   = vmcb->save.gdtr;
629 	vmcb12->save.idtr   = vmcb->save.idtr;
630 	vmcb12->save.efer   = svm->vcpu.arch.efer;
631 	vmcb12->save.cr0    = kvm_read_cr0(&svm->vcpu);
632 	vmcb12->save.cr3    = kvm_read_cr3(&svm->vcpu);
633 	vmcb12->save.cr2    = vmcb->save.cr2;
634 	vmcb12->save.cr4    = svm->vcpu.arch.cr4;
635 	vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
636 	vmcb12->save.rip    = kvm_rip_read(&svm->vcpu);
637 	vmcb12->save.rsp    = kvm_rsp_read(&svm->vcpu);
638 	vmcb12->save.rax    = kvm_rax_read(&svm->vcpu);
639 	vmcb12->save.dr7    = vmcb->save.dr7;
640 	vmcb12->save.dr6    = svm->vcpu.arch.dr6;
641 	vmcb12->save.cpl    = vmcb->save.cpl;
642 
643 	vmcb12->control.int_state         = vmcb->control.int_state;
644 	vmcb12->control.exit_code         = vmcb->control.exit_code;
645 	vmcb12->control.exit_code_hi      = vmcb->control.exit_code_hi;
646 	vmcb12->control.exit_info_1       = vmcb->control.exit_info_1;
647 	vmcb12->control.exit_info_2       = vmcb->control.exit_info_2;
648 
649 	if (vmcb12->control.exit_code != SVM_EXIT_ERR)
650 		nested_vmcb_save_pending_event(svm, vmcb12);
651 
652 	if (svm->nrips_enabled)
653 		vmcb12->control.next_rip  = vmcb->control.next_rip;
654 
655 	vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
656 	vmcb12->control.tlb_ctl           = svm->nested.ctl.tlb_ctl;
657 	vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
658 	vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
659 
660 	vmcb12->control.pause_filter_count =
661 		svm->vmcb->control.pause_filter_count;
662 	vmcb12->control.pause_filter_thresh =
663 		svm->vmcb->control.pause_filter_thresh;
664 
665 	/* Restore the original control entries */
666 	copy_vmcb_control_area(&vmcb->control, &hsave->control);
667 
668 	/* On vmexit the  GIF is set to false */
669 	svm_set_gif(svm, false);
670 
671 	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
672 		svm->vcpu.arch.l1_tsc_offset;
673 
674 	svm->nested.ctl.nested_cr3 = 0;
675 
676 	/* Restore selected save entries */
677 	svm->vmcb->save.es = hsave->save.es;
678 	svm->vmcb->save.cs = hsave->save.cs;
679 	svm->vmcb->save.ss = hsave->save.ss;
680 	svm->vmcb->save.ds = hsave->save.ds;
681 	svm->vmcb->save.gdtr = hsave->save.gdtr;
682 	svm->vmcb->save.idtr = hsave->save.idtr;
683 	kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
684 	kvm_set_rflags(&svm->vcpu, hsave->save.rflags | X86_EFLAGS_FIXED);
685 	svm_set_efer(&svm->vcpu, hsave->save.efer);
686 	svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
687 	svm_set_cr4(&svm->vcpu, hsave->save.cr4);
688 	kvm_rax_write(&svm->vcpu, hsave->save.rax);
689 	kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
690 	kvm_rip_write(&svm->vcpu, hsave->save.rip);
691 	svm->vmcb->save.dr7 = DR7_FIXED_1;
692 	svm->vmcb->save.cpl = 0;
693 	svm->vmcb->control.exit_int_info = 0;
694 
695 	vmcb_mark_all_dirty(svm->vmcb);
696 
697 	trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
698 				       vmcb12->control.exit_info_1,
699 				       vmcb12->control.exit_info_2,
700 				       vmcb12->control.exit_int_info,
701 				       vmcb12->control.exit_int_info_err,
702 				       KVM_ISA_SVM);
703 
704 	kvm_vcpu_unmap(&svm->vcpu, &map, true);
705 
706 	nested_svm_uninit_mmu_context(&svm->vcpu);
707 
708 	rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false);
709 	if (rc)
710 		return 1;
711 
712 	if (npt_enabled)
713 		svm->vmcb->save.cr3 = hsave->save.cr3;
714 
715 	/*
716 	 * Drop what we picked up for L2 via svm_complete_interrupts() so it
717 	 * doesn't end up in L1.
718 	 */
719 	svm->vcpu.arch.nmi_injected = false;
720 	kvm_clear_exception_queue(&svm->vcpu);
721 	kvm_clear_interrupt_queue(&svm->vcpu);
722 
723 	return 0;
724 }
725 
726 int svm_allocate_nested(struct vcpu_svm *svm)
727 {
728 	struct page *hsave_page;
729 
730 	if (svm->nested.initialized)
731 		return 0;
732 
733 	hsave_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
734 	if (!hsave_page)
735 		return -ENOMEM;
736 	svm->nested.hsave = page_address(hsave_page);
737 
738 	svm->nested.msrpm = svm_vcpu_alloc_msrpm();
739 	if (!svm->nested.msrpm)
740 		goto err_free_hsave;
741 	svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
742 
743 	svm->nested.initialized = true;
744 	return 0;
745 
746 err_free_hsave:
747 	__free_page(hsave_page);
748 	return -ENOMEM;
749 }
750 
751 void svm_free_nested(struct vcpu_svm *svm)
752 {
753 	if (!svm->nested.initialized)
754 		return;
755 
756 	svm_vcpu_free_msrpm(svm->nested.msrpm);
757 	svm->nested.msrpm = NULL;
758 
759 	__free_page(virt_to_page(svm->nested.hsave));
760 	svm->nested.hsave = NULL;
761 
762 	svm->nested.initialized = false;
763 }
764 
765 /*
766  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
767  */
768 void svm_leave_nested(struct vcpu_svm *svm)
769 {
770 	if (is_guest_mode(&svm->vcpu)) {
771 		struct vmcb *hsave = svm->nested.hsave;
772 		struct vmcb *vmcb = svm->vmcb;
773 
774 		svm->nested.nested_run_pending = 0;
775 		leave_guest_mode(&svm->vcpu);
776 		copy_vmcb_control_area(&vmcb->control, &hsave->control);
777 		nested_svm_uninit_mmu_context(&svm->vcpu);
778 		vmcb_mark_all_dirty(svm->vmcb);
779 	}
780 
781 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
782 }
783 
784 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
785 {
786 	u32 offset, msr, value;
787 	int write, mask;
788 
789 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
790 		return NESTED_EXIT_HOST;
791 
792 	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
793 	offset = svm_msrpm_offset(msr);
794 	write  = svm->vmcb->control.exit_info_1 & 1;
795 	mask   = 1 << ((2 * (msr & 0xf)) + write);
796 
797 	if (offset == MSR_INVALID)
798 		return NESTED_EXIT_DONE;
799 
800 	/* Offset is in 32 bit units but need in 8 bit units */
801 	offset *= 4;
802 
803 	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
804 		return NESTED_EXIT_DONE;
805 
806 	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
807 }
808 
809 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
810 {
811 	unsigned port, size, iopm_len;
812 	u16 val, mask;
813 	u8 start_bit;
814 	u64 gpa;
815 
816 	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
817 		return NESTED_EXIT_HOST;
818 
819 	port = svm->vmcb->control.exit_info_1 >> 16;
820 	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
821 		SVM_IOIO_SIZE_SHIFT;
822 	gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
823 	start_bit = port % 8;
824 	iopm_len = (start_bit + size > 8) ? 2 : 1;
825 	mask = (0xf >> (4 - size)) << start_bit;
826 	val = 0;
827 
828 	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
829 		return NESTED_EXIT_DONE;
830 
831 	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
832 }
833 
834 static int nested_svm_intercept(struct vcpu_svm *svm)
835 {
836 	u32 exit_code = svm->vmcb->control.exit_code;
837 	int vmexit = NESTED_EXIT_HOST;
838 
839 	switch (exit_code) {
840 	case SVM_EXIT_MSR:
841 		vmexit = nested_svm_exit_handled_msr(svm);
842 		break;
843 	case SVM_EXIT_IOIO:
844 		vmexit = nested_svm_intercept_ioio(svm);
845 		break;
846 	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
847 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
848 			vmexit = NESTED_EXIT_DONE;
849 		break;
850 	}
851 	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
852 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
853 			vmexit = NESTED_EXIT_DONE;
854 		break;
855 	}
856 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
857 		/*
858 		 * Host-intercepted exceptions have been checked already in
859 		 * nested_svm_exit_special.  There is nothing to do here,
860 		 * the vmexit is injected by svm_check_nested_events.
861 		 */
862 		vmexit = NESTED_EXIT_DONE;
863 		break;
864 	}
865 	case SVM_EXIT_ERR: {
866 		vmexit = NESTED_EXIT_DONE;
867 		break;
868 	}
869 	default: {
870 		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
871 			vmexit = NESTED_EXIT_DONE;
872 	}
873 	}
874 
875 	return vmexit;
876 }
877 
878 int nested_svm_exit_handled(struct vcpu_svm *svm)
879 {
880 	int vmexit;
881 
882 	vmexit = nested_svm_intercept(svm);
883 
884 	if (vmexit == NESTED_EXIT_DONE)
885 		nested_svm_vmexit(svm);
886 
887 	return vmexit;
888 }
889 
890 int nested_svm_check_permissions(struct vcpu_svm *svm)
891 {
892 	if (!(svm->vcpu.arch.efer & EFER_SVME) ||
893 	    !is_paging(&svm->vcpu)) {
894 		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
895 		return 1;
896 	}
897 
898 	if (svm->vmcb->save.cpl) {
899 		kvm_inject_gp(&svm->vcpu, 0);
900 		return 1;
901 	}
902 
903 	return 0;
904 }
905 
906 static bool nested_exit_on_exception(struct vcpu_svm *svm)
907 {
908 	unsigned int nr = svm->vcpu.arch.exception.nr;
909 
910 	return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
911 }
912 
913 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
914 {
915 	unsigned int nr = svm->vcpu.arch.exception.nr;
916 
917 	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
918 	svm->vmcb->control.exit_code_hi = 0;
919 
920 	if (svm->vcpu.arch.exception.has_error_code)
921 		svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
922 
923 	/*
924 	 * EXITINFO2 is undefined for all exception intercepts other
925 	 * than #PF.
926 	 */
927 	if (nr == PF_VECTOR) {
928 		if (svm->vcpu.arch.exception.nested_apf)
929 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
930 		else if (svm->vcpu.arch.exception.has_payload)
931 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
932 		else
933 			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
934 	} else if (nr == DB_VECTOR) {
935 		/* See inject_pending_event.  */
936 		kvm_deliver_exception_payload(&svm->vcpu);
937 		if (svm->vcpu.arch.dr7 & DR7_GD) {
938 			svm->vcpu.arch.dr7 &= ~DR7_GD;
939 			kvm_update_dr7(&svm->vcpu);
940 		}
941 	} else
942 		WARN_ON(svm->vcpu.arch.exception.has_payload);
943 
944 	nested_svm_vmexit(svm);
945 }
946 
947 static void nested_svm_smi(struct vcpu_svm *svm)
948 {
949 	svm->vmcb->control.exit_code = SVM_EXIT_SMI;
950 	svm->vmcb->control.exit_info_1 = 0;
951 	svm->vmcb->control.exit_info_2 = 0;
952 
953 	nested_svm_vmexit(svm);
954 }
955 
956 static void nested_svm_nmi(struct vcpu_svm *svm)
957 {
958 	svm->vmcb->control.exit_code = SVM_EXIT_NMI;
959 	svm->vmcb->control.exit_info_1 = 0;
960 	svm->vmcb->control.exit_info_2 = 0;
961 
962 	nested_svm_vmexit(svm);
963 }
964 
965 static void nested_svm_intr(struct vcpu_svm *svm)
966 {
967 	trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
968 
969 	svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
970 	svm->vmcb->control.exit_info_1 = 0;
971 	svm->vmcb->control.exit_info_2 = 0;
972 
973 	nested_svm_vmexit(svm);
974 }
975 
976 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
977 {
978 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
979 }
980 
981 static void nested_svm_init(struct vcpu_svm *svm)
982 {
983 	svm->vmcb->control.exit_code   = SVM_EXIT_INIT;
984 	svm->vmcb->control.exit_info_1 = 0;
985 	svm->vmcb->control.exit_info_2 = 0;
986 
987 	nested_svm_vmexit(svm);
988 }
989 
990 
991 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
992 {
993 	struct vcpu_svm *svm = to_svm(vcpu);
994 	bool block_nested_events =
995 		kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
996 	struct kvm_lapic *apic = vcpu->arch.apic;
997 
998 	if (lapic_in_kernel(vcpu) &&
999 	    test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1000 		if (block_nested_events)
1001 			return -EBUSY;
1002 		if (!nested_exit_on_init(svm))
1003 			return 0;
1004 		nested_svm_init(svm);
1005 		return 0;
1006 	}
1007 
1008 	if (vcpu->arch.exception.pending) {
1009 		if (block_nested_events)
1010                         return -EBUSY;
1011 		if (!nested_exit_on_exception(svm))
1012 			return 0;
1013 		nested_svm_inject_exception_vmexit(svm);
1014 		return 0;
1015 	}
1016 
1017 	if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1018 		if (block_nested_events)
1019 			return -EBUSY;
1020 		if (!nested_exit_on_smi(svm))
1021 			return 0;
1022 		nested_svm_smi(svm);
1023 		return 0;
1024 	}
1025 
1026 	if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1027 		if (block_nested_events)
1028 			return -EBUSY;
1029 		if (!nested_exit_on_nmi(svm))
1030 			return 0;
1031 		nested_svm_nmi(svm);
1032 		return 0;
1033 	}
1034 
1035 	if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1036 		if (block_nested_events)
1037 			return -EBUSY;
1038 		if (!nested_exit_on_intr(svm))
1039 			return 0;
1040 		nested_svm_intr(svm);
1041 		return 0;
1042 	}
1043 
1044 	return 0;
1045 }
1046 
1047 int nested_svm_exit_special(struct vcpu_svm *svm)
1048 {
1049 	u32 exit_code = svm->vmcb->control.exit_code;
1050 
1051 	switch (exit_code) {
1052 	case SVM_EXIT_INTR:
1053 	case SVM_EXIT_NMI:
1054 	case SVM_EXIT_NPF:
1055 		return NESTED_EXIT_HOST;
1056 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1057 		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1058 
1059 		if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] &
1060 				excp_bits)
1061 			return NESTED_EXIT_HOST;
1062 		else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1063 			 svm->vcpu.arch.apf.host_apf_flags)
1064 			/* Trap async PF even if not shadowing */
1065 			return NESTED_EXIT_HOST;
1066 		break;
1067 	}
1068 	default:
1069 		break;
1070 	}
1071 
1072 	return NESTED_EXIT_CONTINUE;
1073 }
1074 
1075 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1076 				struct kvm_nested_state __user *user_kvm_nested_state,
1077 				u32 user_data_size)
1078 {
1079 	struct vcpu_svm *svm;
1080 	struct kvm_nested_state kvm_state = {
1081 		.flags = 0,
1082 		.format = KVM_STATE_NESTED_FORMAT_SVM,
1083 		.size = sizeof(kvm_state),
1084 	};
1085 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1086 		&user_kvm_nested_state->data.svm[0];
1087 
1088 	if (!vcpu)
1089 		return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1090 
1091 	svm = to_svm(vcpu);
1092 
1093 	if (user_data_size < kvm_state.size)
1094 		goto out;
1095 
1096 	/* First fill in the header and copy it out.  */
1097 	if (is_guest_mode(vcpu)) {
1098 		kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1099 		kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1100 		kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1101 
1102 		if (svm->nested.nested_run_pending)
1103 			kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1104 	}
1105 
1106 	if (gif_set(svm))
1107 		kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1108 
1109 	if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1110 		return -EFAULT;
1111 
1112 	if (!is_guest_mode(vcpu))
1113 		goto out;
1114 
1115 	/*
1116 	 * Copy over the full size of the VMCB rather than just the size
1117 	 * of the structs.
1118 	 */
1119 	if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1120 		return -EFAULT;
1121 	if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1122 			 sizeof(user_vmcb->control)))
1123 		return -EFAULT;
1124 	if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
1125 			 sizeof(user_vmcb->save)))
1126 		return -EFAULT;
1127 
1128 out:
1129 	return kvm_state.size;
1130 }
1131 
1132 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1133 				struct kvm_nested_state __user *user_kvm_nested_state,
1134 				struct kvm_nested_state *kvm_state)
1135 {
1136 	struct vcpu_svm *svm = to_svm(vcpu);
1137 	struct vmcb *hsave = svm->nested.hsave;
1138 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1139 		&user_kvm_nested_state->data.svm[0];
1140 	struct vmcb_control_area *ctl;
1141 	struct vmcb_save_area *save;
1142 	int ret;
1143 	u32 cr0;
1144 
1145 	BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1146 		     KVM_STATE_NESTED_SVM_VMCB_SIZE);
1147 
1148 	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1149 		return -EINVAL;
1150 
1151 	if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1152 				 KVM_STATE_NESTED_RUN_PENDING |
1153 				 KVM_STATE_NESTED_GIF_SET))
1154 		return -EINVAL;
1155 
1156 	/*
1157 	 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1158 	 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1159 	 */
1160 	if (!(vcpu->arch.efer & EFER_SVME)) {
1161 		/* GIF=1 and no guest mode are required if SVME=0.  */
1162 		if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1163 			return -EINVAL;
1164 	}
1165 
1166 	/* SMM temporarily disables SVM, so we cannot be in guest mode.  */
1167 	if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1168 		return -EINVAL;
1169 
1170 	if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1171 		svm_leave_nested(svm);
1172 		svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1173 		return 0;
1174 	}
1175 
1176 	if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1177 		return -EINVAL;
1178 	if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1179 		return -EINVAL;
1180 
1181 	ret  = -ENOMEM;
1182 	ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL);
1183 	save = kzalloc(sizeof(*save), GFP_KERNEL);
1184 	if (!ctl || !save)
1185 		goto out_free;
1186 
1187 	ret = -EFAULT;
1188 	if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1189 		goto out_free;
1190 	if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1191 		goto out_free;
1192 
1193 	ret = -EINVAL;
1194 	if (!nested_vmcb_check_controls(ctl))
1195 		goto out_free;
1196 
1197 	/*
1198 	 * Processor state contains L2 state.  Check that it is
1199 	 * valid for guest mode (see nested_vmcb_checks).
1200 	 */
1201 	cr0 = kvm_read_cr0(vcpu);
1202         if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1203 		goto out_free;
1204 
1205 	/*
1206 	 * Validate host state saved from before VMRUN (see
1207 	 * nested_svm_check_permissions).
1208 	 * TODO: validate reserved bits for all saved state.
1209 	 */
1210 	if (!(save->cr0 & X86_CR0_PG))
1211 		goto out_free;
1212 
1213 	/*
1214 	 * All checks done, we can enter guest mode.  L1 control fields
1215 	 * come from the nested save state.  Guest state is already
1216 	 * in the registers, the save area of the nested state instead
1217 	 * contains saved L1 state.
1218 	 */
1219 
1220 	svm->nested.nested_run_pending =
1221 		!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1222 
1223 	copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
1224 	hsave->save = *save;
1225 
1226 	svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1227 	load_nested_vmcb_control(svm, ctl);
1228 	nested_prepare_vmcb_control(svm);
1229 
1230 	kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1231 	ret = 0;
1232 out_free:
1233 	kfree(save);
1234 	kfree(ctl);
1235 
1236 	return ret;
1237 }
1238 
1239 struct kvm_x86_nested_ops svm_nested_ops = {
1240 	.check_events = svm_check_nested_events,
1241 	.get_nested_state_pages = svm_get_nested_state_pages,
1242 	.get_state = svm_get_nested_state,
1243 	.set_state = svm_set_nested_state,
1244 };
1245