xref: /openbmc/linux/arch/x86/kvm/smm.c (revision 32e69f23)
1b0b42197SPaolo Bonzini /* SPDX-License-Identifier: GPL-2.0 */
2*8d20bd63SSean Christopherson #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3b0b42197SPaolo Bonzini 
4b0b42197SPaolo Bonzini #include <linux/kvm_host.h>
5b0b42197SPaolo Bonzini #include "x86.h"
6b0b42197SPaolo Bonzini #include "kvm_cache_regs.h"
7b0b42197SPaolo Bonzini #include "kvm_emulate.h"
8b0b42197SPaolo Bonzini #include "smm.h"
9c53da4f3SPaolo Bonzini #include "cpuid.h"
10b0b42197SPaolo Bonzini #include "trace.h"
11b0b42197SPaolo Bonzini 
1209779c16SMaxim Levitsky #define CHECK_SMRAM32_OFFSET(field, offset) \
1309779c16SMaxim Levitsky 	ASSERT_STRUCT_OFFSET(struct kvm_smram_state_32, field, offset - 0xFE00)
1409779c16SMaxim Levitsky 
1509779c16SMaxim Levitsky #define CHECK_SMRAM64_OFFSET(field, offset) \
1609779c16SMaxim Levitsky 	ASSERT_STRUCT_OFFSET(struct kvm_smram_state_64, field, offset - 0xFE00)
1709779c16SMaxim Levitsky 
check_smram_offsets(void)1809779c16SMaxim Levitsky static void check_smram_offsets(void)
1909779c16SMaxim Levitsky {
2009779c16SMaxim Levitsky 	/* 32 bit SMRAM image */
2109779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(reserved1,			0xFE00);
2209779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(smbase,			0xFEF8);
2309779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(smm_revision,		0xFEFC);
2409779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(io_inst_restart,		0xFF00);
2509779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(auto_hlt_restart,		0xFF02);
2609779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(io_restart_rdi,		0xFF04);
2709779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(io_restart_rcx,		0xFF08);
2809779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(io_restart_rsi,		0xFF0C);
2909779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(io_restart_rip,		0xFF10);
3009779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(cr4,			0xFF14);
31fb28875fSMaxim Levitsky 	CHECK_SMRAM32_OFFSET(reserved2,			0xFF18);
32fb28875fSMaxim Levitsky 	CHECK_SMRAM32_OFFSET(int_shadow,		0xFF1A);
33fb28875fSMaxim Levitsky 	CHECK_SMRAM32_OFFSET(reserved3,			0xFF1B);
3409779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(ds,			0xFF2C);
3509779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(fs,			0xFF38);
3609779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(gs,			0xFF44);
3709779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(idtr,			0xFF50);
3809779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(tr,			0xFF5C);
3909779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(gdtr,			0xFF6C);
4009779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(ldtr,			0xFF78);
4109779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(es,			0xFF84);
4209779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(cs,			0xFF90);
4309779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(ss,			0xFF9C);
4409779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(es_sel,			0xFFA8);
4509779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(cs_sel,			0xFFAC);
4609779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(ss_sel,			0xFFB0);
4709779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(ds_sel,			0xFFB4);
4809779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(fs_sel,			0xFFB8);
4909779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(gs_sel,			0xFFBC);
5009779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(ldtr_sel,			0xFFC0);
5109779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(tr_sel,			0xFFC4);
5209779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(dr7,			0xFFC8);
5309779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(dr6,			0xFFCC);
5409779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(gprs,			0xFFD0);
5509779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(eip,			0xFFF0);
5609779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(eflags,			0xFFF4);
5709779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(cr3,			0xFFF8);
5809779c16SMaxim Levitsky 	CHECK_SMRAM32_OFFSET(cr0,			0xFFFC);
5909779c16SMaxim Levitsky 
6009779c16SMaxim Levitsky 	/* 64 bit SMRAM image */
6109779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(es,			0xFE00);
6209779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(cs,			0xFE10);
6309779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(ss,			0xFE20);
6409779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(ds,			0xFE30);
6509779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(fs,			0xFE40);
6609779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(gs,			0xFE50);
6709779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(gdtr,			0xFE60);
6809779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(ldtr,			0xFE70);
6909779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(idtr,			0xFE80);
7009779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(tr,			0xFE90);
7109779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(io_restart_rip,		0xFEA0);
7209779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(io_restart_rcx,		0xFEA8);
7309779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(io_restart_rsi,		0xFEB0);
7409779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(io_restart_rdi,		0xFEB8);
7509779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(io_restart_dword,		0xFEC0);
7609779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(reserved1,			0xFEC4);
7709779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(io_inst_restart,		0xFEC8);
7809779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(auto_hlt_restart,		0xFEC9);
79fb28875fSMaxim Levitsky 	CHECK_SMRAM64_OFFSET(amd_nmi_mask,		0xFECA);
80fb28875fSMaxim Levitsky 	CHECK_SMRAM64_OFFSET(int_shadow,		0xFECB);
81fb28875fSMaxim Levitsky 	CHECK_SMRAM64_OFFSET(reserved2,			0xFECC);
8209779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(efer,			0xFED0);
8309779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(svm_guest_flag,		0xFED8);
8409779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(svm_guest_vmcb_gpa,	0xFEE0);
8509779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(svm_guest_virtual_int,	0xFEE8);
8609779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(reserved3,			0xFEF0);
8709779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(smm_revison,		0xFEFC);
8809779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(smbase,			0xFF00);
8909779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(reserved4,			0xFF04);
9009779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(ssp,			0xFF18);
9109779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(svm_guest_pat,		0xFF20);
9209779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(svm_host_efer,		0xFF28);
9309779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(svm_host_cr4,		0xFF30);
9409779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(svm_host_cr3,		0xFF38);
9509779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(svm_host_cr0,		0xFF40);
9609779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(cr4,			0xFF48);
9709779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(cr3,			0xFF50);
9809779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(cr0,			0xFF58);
9909779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(dr7,			0xFF60);
10009779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(dr6,			0xFF68);
10109779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(rflags,			0xFF70);
10209779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(rip,			0xFF78);
10309779c16SMaxim Levitsky 	CHECK_SMRAM64_OFFSET(gprs,			0xFF80);
10409779c16SMaxim Levitsky 
10509779c16SMaxim Levitsky 	BUILD_BUG_ON(sizeof(union kvm_smram) != 512);
10609779c16SMaxim Levitsky }
10709779c16SMaxim Levitsky 
10809779c16SMaxim Levitsky #undef CHECK_SMRAM64_OFFSET
10909779c16SMaxim Levitsky #undef CHECK_SMRAM32_OFFSET
11009779c16SMaxim Levitsky 
11109779c16SMaxim Levitsky 
kvm_smm_changed(struct kvm_vcpu * vcpu,bool entering_smm)112b0b42197SPaolo Bonzini void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
113b0b42197SPaolo Bonzini {
114b0b42197SPaolo Bonzini 	trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
115b0b42197SPaolo Bonzini 
116b0b42197SPaolo Bonzini 	if (entering_smm) {
117b0b42197SPaolo Bonzini 		vcpu->arch.hflags |= HF_SMM_MASK;
118b0b42197SPaolo Bonzini 	} else {
119b0b42197SPaolo Bonzini 		vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
120b0b42197SPaolo Bonzini 
121b0b42197SPaolo Bonzini 		/* Process a latched INIT or SMI, if any.  */
122b0b42197SPaolo Bonzini 		kvm_make_request(KVM_REQ_EVENT, vcpu);
123b0b42197SPaolo Bonzini 
124b0b42197SPaolo Bonzini 		/*
125b0b42197SPaolo Bonzini 		 * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
126b0b42197SPaolo Bonzini 		 * on SMM exit we still need to reload them from
127b0b42197SPaolo Bonzini 		 * guest memory
128b0b42197SPaolo Bonzini 		 */
129b0b42197SPaolo Bonzini 		vcpu->arch.pdptrs_from_userspace = false;
130b0b42197SPaolo Bonzini 	}
131b0b42197SPaolo Bonzini 
132b0b42197SPaolo Bonzini 	kvm_mmu_reset_context(vcpu);
133b0b42197SPaolo Bonzini }
134b0b42197SPaolo Bonzini 
process_smi(struct kvm_vcpu * vcpu)135b0b42197SPaolo Bonzini void process_smi(struct kvm_vcpu *vcpu)
136b0b42197SPaolo Bonzini {
137b0b42197SPaolo Bonzini 	vcpu->arch.smi_pending = true;
138b0b42197SPaolo Bonzini 	kvm_make_request(KVM_REQ_EVENT, vcpu);
139b0b42197SPaolo Bonzini }
140c53da4f3SPaolo Bonzini 
enter_smm_get_segment_flags(struct kvm_segment * seg)141c53da4f3SPaolo Bonzini static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
142c53da4f3SPaolo Bonzini {
143c53da4f3SPaolo Bonzini 	u32 flags = 0;
144c53da4f3SPaolo Bonzini 	flags |= seg->g       << 23;
145c53da4f3SPaolo Bonzini 	flags |= seg->db      << 22;
146c53da4f3SPaolo Bonzini 	flags |= seg->l       << 21;
147c53da4f3SPaolo Bonzini 	flags |= seg->avl     << 20;
148c53da4f3SPaolo Bonzini 	flags |= seg->present << 15;
149c53da4f3SPaolo Bonzini 	flags |= seg->dpl     << 13;
150c53da4f3SPaolo Bonzini 	flags |= seg->s       << 12;
151c53da4f3SPaolo Bonzini 	flags |= seg->type    << 8;
152c53da4f3SPaolo Bonzini 	return flags;
153c53da4f3SPaolo Bonzini }
154c53da4f3SPaolo Bonzini 
enter_smm_save_seg_32(struct kvm_vcpu * vcpu,struct kvm_smm_seg_state_32 * state,u32 * selector,int n)155f34bdf4cSMaxim Levitsky static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu,
156f34bdf4cSMaxim Levitsky 				  struct kvm_smm_seg_state_32 *state,
157f34bdf4cSMaxim Levitsky 				  u32 *selector, int n)
158c53da4f3SPaolo Bonzini {
159c53da4f3SPaolo Bonzini 	struct kvm_segment seg;
160c53da4f3SPaolo Bonzini 
161c53da4f3SPaolo Bonzini 	kvm_get_segment(vcpu, &seg, n);
162f34bdf4cSMaxim Levitsky 	*selector = seg.selector;
163f34bdf4cSMaxim Levitsky 	state->base = seg.base;
164f34bdf4cSMaxim Levitsky 	state->limit = seg.limit;
165f34bdf4cSMaxim Levitsky 	state->flags = enter_smm_get_segment_flags(&seg);
166c53da4f3SPaolo Bonzini }
167c53da4f3SPaolo Bonzini 
168c53da4f3SPaolo Bonzini #ifdef CONFIG_X86_64
enter_smm_save_seg_64(struct kvm_vcpu * vcpu,struct kvm_smm_seg_state_64 * state,int n)1698bcda1deSMaxim Levitsky static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu,
1708bcda1deSMaxim Levitsky 				  struct kvm_smm_seg_state_64 *state,
1718bcda1deSMaxim Levitsky 				  int n)
172c53da4f3SPaolo Bonzini {
173c53da4f3SPaolo Bonzini 	struct kvm_segment seg;
174c53da4f3SPaolo Bonzini 
175c53da4f3SPaolo Bonzini 	kvm_get_segment(vcpu, &seg, n);
1768bcda1deSMaxim Levitsky 	state->selector = seg.selector;
1778bcda1deSMaxim Levitsky 	state->attributes = enter_smm_get_segment_flags(&seg) >> 8;
1788bcda1deSMaxim Levitsky 	state->limit = seg.limit;
1798bcda1deSMaxim Levitsky 	state->base = seg.base;
180c53da4f3SPaolo Bonzini }
181c53da4f3SPaolo Bonzini #endif
182c53da4f3SPaolo Bonzini 
enter_smm_save_state_32(struct kvm_vcpu * vcpu,struct kvm_smram_state_32 * smram)183f34bdf4cSMaxim Levitsky static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
184f34bdf4cSMaxim Levitsky 				    struct kvm_smram_state_32 *smram)
185c53da4f3SPaolo Bonzini {
186c53da4f3SPaolo Bonzini 	struct desc_ptr dt;
187c53da4f3SPaolo Bonzini 	unsigned long val;
188c53da4f3SPaolo Bonzini 	int i;
189c53da4f3SPaolo Bonzini 
190f34bdf4cSMaxim Levitsky 	smram->cr0     = kvm_read_cr0(vcpu);
191f34bdf4cSMaxim Levitsky 	smram->cr3     = kvm_read_cr3(vcpu);
192f34bdf4cSMaxim Levitsky 	smram->eflags  = kvm_get_rflags(vcpu);
193f34bdf4cSMaxim Levitsky 	smram->eip     = kvm_rip_read(vcpu);
194c53da4f3SPaolo Bonzini 
195c53da4f3SPaolo Bonzini 	for (i = 0; i < 8; i++)
196f34bdf4cSMaxim Levitsky 		smram->gprs[i] = kvm_register_read_raw(vcpu, i);
197c53da4f3SPaolo Bonzini 
198c53da4f3SPaolo Bonzini 	kvm_get_dr(vcpu, 6, &val);
199f34bdf4cSMaxim Levitsky 	smram->dr6     = (u32)val;
200c53da4f3SPaolo Bonzini 	kvm_get_dr(vcpu, 7, &val);
201f34bdf4cSMaxim Levitsky 	smram->dr7     = (u32)val;
202c53da4f3SPaolo Bonzini 
203f34bdf4cSMaxim Levitsky 	enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR);
204f34bdf4cSMaxim Levitsky 	enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR);
205c53da4f3SPaolo Bonzini 
206c53da4f3SPaolo Bonzini 	static_call(kvm_x86_get_gdt)(vcpu, &dt);
207f34bdf4cSMaxim Levitsky 	smram->gdtr.base = dt.address;
208f34bdf4cSMaxim Levitsky 	smram->gdtr.limit = dt.size;
209c53da4f3SPaolo Bonzini 
210c53da4f3SPaolo Bonzini 	static_call(kvm_x86_get_idt)(vcpu, &dt);
211f34bdf4cSMaxim Levitsky 	smram->idtr.base = dt.address;
212f34bdf4cSMaxim Levitsky 	smram->idtr.limit = dt.size;
213c53da4f3SPaolo Bonzini 
214f34bdf4cSMaxim Levitsky 	enter_smm_save_seg_32(vcpu, &smram->es, &smram->es_sel, VCPU_SREG_ES);
215f34bdf4cSMaxim Levitsky 	enter_smm_save_seg_32(vcpu, &smram->cs, &smram->cs_sel, VCPU_SREG_CS);
216f34bdf4cSMaxim Levitsky 	enter_smm_save_seg_32(vcpu, &smram->ss, &smram->ss_sel, VCPU_SREG_SS);
217c53da4f3SPaolo Bonzini 
218f34bdf4cSMaxim Levitsky 	enter_smm_save_seg_32(vcpu, &smram->ds, &smram->ds_sel, VCPU_SREG_DS);
219f34bdf4cSMaxim Levitsky 	enter_smm_save_seg_32(vcpu, &smram->fs, &smram->fs_sel, VCPU_SREG_FS);
220f34bdf4cSMaxim Levitsky 	enter_smm_save_seg_32(vcpu, &smram->gs, &smram->gs_sel, VCPU_SREG_GS);
221c53da4f3SPaolo Bonzini 
222f34bdf4cSMaxim Levitsky 	smram->cr4 = kvm_read_cr4(vcpu);
223f34bdf4cSMaxim Levitsky 	smram->smm_revision = 0x00020000;
224f34bdf4cSMaxim Levitsky 	smram->smbase = vcpu->arch.smbase;
225fb28875fSMaxim Levitsky 
226fb28875fSMaxim Levitsky 	smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
227c53da4f3SPaolo Bonzini }
228c53da4f3SPaolo Bonzini 
229c53da4f3SPaolo Bonzini #ifdef CONFIG_X86_64
enter_smm_save_state_64(struct kvm_vcpu * vcpu,struct kvm_smram_state_64 * smram)2308bcda1deSMaxim Levitsky static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
2318bcda1deSMaxim Levitsky 				    struct kvm_smram_state_64 *smram)
232c53da4f3SPaolo Bonzini {
233c53da4f3SPaolo Bonzini 	struct desc_ptr dt;
234c53da4f3SPaolo Bonzini 	unsigned long val;
235c53da4f3SPaolo Bonzini 	int i;
236c53da4f3SPaolo Bonzini 
237c53da4f3SPaolo Bonzini 	for (i = 0; i < 16; i++)
2388bcda1deSMaxim Levitsky 		smram->gprs[15 - i] = kvm_register_read_raw(vcpu, i);
239c53da4f3SPaolo Bonzini 
2408bcda1deSMaxim Levitsky 	smram->rip    = kvm_rip_read(vcpu);
2418bcda1deSMaxim Levitsky 	smram->rflags = kvm_get_rflags(vcpu);
2428bcda1deSMaxim Levitsky 
243c53da4f3SPaolo Bonzini 
244c53da4f3SPaolo Bonzini 	kvm_get_dr(vcpu, 6, &val);
2458bcda1deSMaxim Levitsky 	smram->dr6 = val;
246c53da4f3SPaolo Bonzini 	kvm_get_dr(vcpu, 7, &val);
2478bcda1deSMaxim Levitsky 	smram->dr7 = val;
248c53da4f3SPaolo Bonzini 
2498bcda1deSMaxim Levitsky 	smram->cr0 = kvm_read_cr0(vcpu);
2508bcda1deSMaxim Levitsky 	smram->cr3 = kvm_read_cr3(vcpu);
2518bcda1deSMaxim Levitsky 	smram->cr4 = kvm_read_cr4(vcpu);
252c53da4f3SPaolo Bonzini 
2538bcda1deSMaxim Levitsky 	smram->smbase = vcpu->arch.smbase;
2548bcda1deSMaxim Levitsky 	smram->smm_revison = 0x00020064;
255c53da4f3SPaolo Bonzini 
2568bcda1deSMaxim Levitsky 	smram->efer = vcpu->arch.efer;
257c53da4f3SPaolo Bonzini 
2588bcda1deSMaxim Levitsky 	enter_smm_save_seg_64(vcpu, &smram->tr, VCPU_SREG_TR);
259c53da4f3SPaolo Bonzini 
260c53da4f3SPaolo Bonzini 	static_call(kvm_x86_get_idt)(vcpu, &dt);
2618bcda1deSMaxim Levitsky 	smram->idtr.limit = dt.size;
2628bcda1deSMaxim Levitsky 	smram->idtr.base = dt.address;
263c53da4f3SPaolo Bonzini 
2648bcda1deSMaxim Levitsky 	enter_smm_save_seg_64(vcpu, &smram->ldtr, VCPU_SREG_LDTR);
265c53da4f3SPaolo Bonzini 
266c53da4f3SPaolo Bonzini 	static_call(kvm_x86_get_gdt)(vcpu, &dt);
2678bcda1deSMaxim Levitsky 	smram->gdtr.limit = dt.size;
2688bcda1deSMaxim Levitsky 	smram->gdtr.base = dt.address;
269c53da4f3SPaolo Bonzini 
2708bcda1deSMaxim Levitsky 	enter_smm_save_seg_64(vcpu, &smram->es, VCPU_SREG_ES);
2718bcda1deSMaxim Levitsky 	enter_smm_save_seg_64(vcpu, &smram->cs, VCPU_SREG_CS);
2728bcda1deSMaxim Levitsky 	enter_smm_save_seg_64(vcpu, &smram->ss, VCPU_SREG_SS);
2738bcda1deSMaxim Levitsky 	enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
2748bcda1deSMaxim Levitsky 	enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
2758bcda1deSMaxim Levitsky 	enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
276fb28875fSMaxim Levitsky 
277fb28875fSMaxim Levitsky 	smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
278c53da4f3SPaolo Bonzini }
279c53da4f3SPaolo Bonzini #endif
280c53da4f3SPaolo Bonzini 
enter_smm(struct kvm_vcpu * vcpu)281c53da4f3SPaolo Bonzini void enter_smm(struct kvm_vcpu *vcpu)
282c53da4f3SPaolo Bonzini {
283c53da4f3SPaolo Bonzini 	struct kvm_segment cs, ds;
284c53da4f3SPaolo Bonzini 	struct desc_ptr dt;
285c53da4f3SPaolo Bonzini 	unsigned long cr0;
28658c1d206SMaxim Levitsky 	union kvm_smram smram;
287c53da4f3SPaolo Bonzini 
28809779c16SMaxim Levitsky 	check_smram_offsets();
28909779c16SMaxim Levitsky 
29058c1d206SMaxim Levitsky 	memset(smram.bytes, 0, sizeof(smram.bytes));
29158c1d206SMaxim Levitsky 
292c53da4f3SPaolo Bonzini #ifdef CONFIG_X86_64
293c53da4f3SPaolo Bonzini 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
2948bcda1deSMaxim Levitsky 		enter_smm_save_state_64(vcpu, &smram.smram64);
295c53da4f3SPaolo Bonzini 	else
296c53da4f3SPaolo Bonzini #endif
297f34bdf4cSMaxim Levitsky 		enter_smm_save_state_32(vcpu, &smram.smram32);
298c53da4f3SPaolo Bonzini 
299c53da4f3SPaolo Bonzini 	/*
300c53da4f3SPaolo Bonzini 	 * Give enter_smm() a chance to make ISA-specific changes to the vCPU
301c53da4f3SPaolo Bonzini 	 * state (e.g. leave guest mode) after we've saved the state into the
302c53da4f3SPaolo Bonzini 	 * SMM state-save area.
30389dccf82SMaxim Levitsky 	 *
30489dccf82SMaxim Levitsky 	 * Kill the VM in the unlikely case of failure, because the VM
30589dccf82SMaxim Levitsky 	 * can be in undefined state in this case.
306c53da4f3SPaolo Bonzini 	 */
30758c1d206SMaxim Levitsky 	if (static_call(kvm_x86_enter_smm)(vcpu, &smram))
30889dccf82SMaxim Levitsky 		goto error;
309c53da4f3SPaolo Bonzini 
310c53da4f3SPaolo Bonzini 	kvm_smm_changed(vcpu, true);
31189dccf82SMaxim Levitsky 
31258c1d206SMaxim Levitsky 	if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram)))
31389dccf82SMaxim Levitsky 		goto error;
314c53da4f3SPaolo Bonzini 
315c53da4f3SPaolo Bonzini 	if (static_call(kvm_x86_get_nmi_mask)(vcpu))
316c53da4f3SPaolo Bonzini 		vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
317c53da4f3SPaolo Bonzini 	else
318c53da4f3SPaolo Bonzini 		static_call(kvm_x86_set_nmi_mask)(vcpu, true);
319c53da4f3SPaolo Bonzini 
320c53da4f3SPaolo Bonzini 	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
321c53da4f3SPaolo Bonzini 	kvm_rip_write(vcpu, 0x8000);
322c53da4f3SPaolo Bonzini 
323fb28875fSMaxim Levitsky 	static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
324fb28875fSMaxim Levitsky 
325c53da4f3SPaolo Bonzini 	cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
326c53da4f3SPaolo Bonzini 	static_call(kvm_x86_set_cr0)(vcpu, cr0);
327c53da4f3SPaolo Bonzini 	vcpu->arch.cr0 = cr0;
328c53da4f3SPaolo Bonzini 
329c53da4f3SPaolo Bonzini 	static_call(kvm_x86_set_cr4)(vcpu, 0);
330c53da4f3SPaolo Bonzini 
331c53da4f3SPaolo Bonzini 	/* Undocumented: IDT limit is set to zero on entry to SMM.  */
332c53da4f3SPaolo Bonzini 	dt.address = dt.size = 0;
333c53da4f3SPaolo Bonzini 	static_call(kvm_x86_set_idt)(vcpu, &dt);
334c53da4f3SPaolo Bonzini 
33589dccf82SMaxim Levitsky 	if (WARN_ON_ONCE(kvm_set_dr(vcpu, 7, DR7_FIXED_1)))
33689dccf82SMaxim Levitsky 		goto error;
337c53da4f3SPaolo Bonzini 
338c53da4f3SPaolo Bonzini 	cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
339c53da4f3SPaolo Bonzini 	cs.base = vcpu->arch.smbase;
340c53da4f3SPaolo Bonzini 
341c53da4f3SPaolo Bonzini 	ds.selector = 0;
342c53da4f3SPaolo Bonzini 	ds.base = 0;
343c53da4f3SPaolo Bonzini 
344c53da4f3SPaolo Bonzini 	cs.limit    = ds.limit = 0xffffffff;
345c53da4f3SPaolo Bonzini 	cs.type     = ds.type = 0x3;
346c53da4f3SPaolo Bonzini 	cs.dpl      = ds.dpl = 0;
347c53da4f3SPaolo Bonzini 	cs.db       = ds.db = 0;
348c53da4f3SPaolo Bonzini 	cs.s        = ds.s = 1;
349c53da4f3SPaolo Bonzini 	cs.l        = ds.l = 0;
350c53da4f3SPaolo Bonzini 	cs.g        = ds.g = 1;
351c53da4f3SPaolo Bonzini 	cs.avl      = ds.avl = 0;
352c53da4f3SPaolo Bonzini 	cs.present  = ds.present = 1;
353c53da4f3SPaolo Bonzini 	cs.unusable = ds.unusable = 0;
354c53da4f3SPaolo Bonzini 	cs.padding  = ds.padding = 0;
355c53da4f3SPaolo Bonzini 
356c53da4f3SPaolo Bonzini 	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
357c53da4f3SPaolo Bonzini 	kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
358c53da4f3SPaolo Bonzini 	kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
359c53da4f3SPaolo Bonzini 	kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
360c53da4f3SPaolo Bonzini 	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
361c53da4f3SPaolo Bonzini 	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
362c53da4f3SPaolo Bonzini 
363c53da4f3SPaolo Bonzini #ifdef CONFIG_X86_64
364c53da4f3SPaolo Bonzini 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
36589dccf82SMaxim Levitsky 		if (static_call(kvm_x86_set_efer)(vcpu, 0))
36689dccf82SMaxim Levitsky 			goto error;
367c53da4f3SPaolo Bonzini #endif
368c53da4f3SPaolo Bonzini 
369c53da4f3SPaolo Bonzini 	kvm_update_cpuid_runtime(vcpu);
370c53da4f3SPaolo Bonzini 	kvm_mmu_reset_context(vcpu);
37189dccf82SMaxim Levitsky 	return;
37289dccf82SMaxim Levitsky error:
37389dccf82SMaxim Levitsky 	kvm_vm_dead(vcpu->kvm);
374c53da4f3SPaolo Bonzini }
375f1554150SPaolo Bonzini 
rsm_set_desc_flags(struct kvm_segment * desc,u32 flags)3761d0da94cSPaolo Bonzini static void rsm_set_desc_flags(struct kvm_segment *desc, u32 flags)
377f1554150SPaolo Bonzini {
378f1554150SPaolo Bonzini 	desc->g    = (flags >> 23) & 1;
3791d0da94cSPaolo Bonzini 	desc->db   = (flags >> 22) & 1;
380f1554150SPaolo Bonzini 	desc->l    = (flags >> 21) & 1;
381f1554150SPaolo Bonzini 	desc->avl  = (flags >> 20) & 1;
3821d0da94cSPaolo Bonzini 	desc->present = (flags >> 15) & 1;
383f1554150SPaolo Bonzini 	desc->dpl  = (flags >> 13) & 3;
384f1554150SPaolo Bonzini 	desc->s    = (flags >> 12) & 1;
385f1554150SPaolo Bonzini 	desc->type = (flags >>  8) & 15;
3861d0da94cSPaolo Bonzini 
3871d0da94cSPaolo Bonzini 	desc->unusable = !desc->present;
3881d0da94cSPaolo Bonzini 	desc->padding = 0;
389f1554150SPaolo Bonzini }
390f1554150SPaolo Bonzini 
rsm_load_seg_32(struct kvm_vcpu * vcpu,const struct kvm_smm_seg_state_32 * state,u16 selector,int n)391f34bdf4cSMaxim Levitsky static int rsm_load_seg_32(struct kvm_vcpu *vcpu,
392f34bdf4cSMaxim Levitsky 			   const struct kvm_smm_seg_state_32 *state,
393f34bdf4cSMaxim Levitsky 			   u16 selector, int n)
394f1554150SPaolo Bonzini {
3951d0da94cSPaolo Bonzini 	struct kvm_segment desc;
396f1554150SPaolo Bonzini 
397f34bdf4cSMaxim Levitsky 	desc.selector =           selector;
398f34bdf4cSMaxim Levitsky 	desc.base =               state->base;
399f34bdf4cSMaxim Levitsky 	desc.limit =              state->limit;
400f34bdf4cSMaxim Levitsky 	rsm_set_desc_flags(&desc, state->flags);
4011d0da94cSPaolo Bonzini 	kvm_set_segment(vcpu, &desc, n);
402f1554150SPaolo Bonzini 	return X86EMUL_CONTINUE;
403f1554150SPaolo Bonzini }
404f1554150SPaolo Bonzini 
405f1554150SPaolo Bonzini #ifdef CONFIG_X86_64
4068bcda1deSMaxim Levitsky 
rsm_load_seg_64(struct kvm_vcpu * vcpu,const struct kvm_smm_seg_state_64 * state,int n)4078bcda1deSMaxim Levitsky static int rsm_load_seg_64(struct kvm_vcpu *vcpu,
4088bcda1deSMaxim Levitsky 			   const struct kvm_smm_seg_state_64 *state,
409f1554150SPaolo Bonzini 			   int n)
410f1554150SPaolo Bonzini {
4111d0da94cSPaolo Bonzini 	struct kvm_segment desc;
412f1554150SPaolo Bonzini 
4138bcda1deSMaxim Levitsky 	desc.selector =           state->selector;
4148bcda1deSMaxim Levitsky 	rsm_set_desc_flags(&desc, state->attributes << 8);
4158bcda1deSMaxim Levitsky 	desc.limit =              state->limit;
4168bcda1deSMaxim Levitsky 	desc.base =               state->base;
4171d0da94cSPaolo Bonzini 	kvm_set_segment(vcpu, &desc, n);
418f1554150SPaolo Bonzini 	return X86EMUL_CONTINUE;
419f1554150SPaolo Bonzini }
420f1554150SPaolo Bonzini #endif
421f1554150SPaolo Bonzini 
rsm_enter_protected_mode(struct kvm_vcpu * vcpu,u64 cr0,u64 cr3,u64 cr4)4221d0da94cSPaolo Bonzini static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu,
423f1554150SPaolo Bonzini 				    u64 cr0, u64 cr3, u64 cr4)
424f1554150SPaolo Bonzini {
425f1554150SPaolo Bonzini 	int bad;
426f1554150SPaolo Bonzini 	u64 pcid;
427f1554150SPaolo Bonzini 
428f1554150SPaolo Bonzini 	/* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
429f1554150SPaolo Bonzini 	pcid = 0;
430f1554150SPaolo Bonzini 	if (cr4 & X86_CR4_PCIDE) {
431f1554150SPaolo Bonzini 		pcid = cr3 & 0xfff;
432f1554150SPaolo Bonzini 		cr3 &= ~0xfff;
433f1554150SPaolo Bonzini 	}
434f1554150SPaolo Bonzini 
4351d0da94cSPaolo Bonzini 	bad = kvm_set_cr3(vcpu, cr3);
436f1554150SPaolo Bonzini 	if (bad)
437f1554150SPaolo Bonzini 		return X86EMUL_UNHANDLEABLE;
438f1554150SPaolo Bonzini 
439f1554150SPaolo Bonzini 	/*
440f1554150SPaolo Bonzini 	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
441f1554150SPaolo Bonzini 	 * Then enable protected mode.	However, PCID cannot be enabled
442f1554150SPaolo Bonzini 	 * if EFER.LMA=0, so set it separately.
443f1554150SPaolo Bonzini 	 */
4441d0da94cSPaolo Bonzini 	bad = kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE);
445f1554150SPaolo Bonzini 	if (bad)
446f1554150SPaolo Bonzini 		return X86EMUL_UNHANDLEABLE;
447f1554150SPaolo Bonzini 
4481d0da94cSPaolo Bonzini 	bad = kvm_set_cr0(vcpu, cr0);
449f1554150SPaolo Bonzini 	if (bad)
450f1554150SPaolo Bonzini 		return X86EMUL_UNHANDLEABLE;
451f1554150SPaolo Bonzini 
452f1554150SPaolo Bonzini 	if (cr4 & X86_CR4_PCIDE) {
4531d0da94cSPaolo Bonzini 		bad = kvm_set_cr4(vcpu, cr4);
454f1554150SPaolo Bonzini 		if (bad)
455f1554150SPaolo Bonzini 			return X86EMUL_UNHANDLEABLE;
456f1554150SPaolo Bonzini 		if (pcid) {
4571d0da94cSPaolo Bonzini 			bad = kvm_set_cr3(vcpu, cr3 | pcid);
458f1554150SPaolo Bonzini 			if (bad)
459f1554150SPaolo Bonzini 				return X86EMUL_UNHANDLEABLE;
460f1554150SPaolo Bonzini 		}
461f1554150SPaolo Bonzini 
462f1554150SPaolo Bonzini 	}
463f1554150SPaolo Bonzini 
464f1554150SPaolo Bonzini 	return X86EMUL_CONTINUE;
465f1554150SPaolo Bonzini }
466f1554150SPaolo Bonzini 
rsm_load_state_32(struct x86_emulate_ctxt * ctxt,const struct kvm_smram_state_32 * smstate)467f1554150SPaolo Bonzini static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
468f34bdf4cSMaxim Levitsky 			     const struct kvm_smram_state_32 *smstate)
469f1554150SPaolo Bonzini {
4701d0da94cSPaolo Bonzini 	struct kvm_vcpu *vcpu = ctxt->vcpu;
471f1554150SPaolo Bonzini 	struct desc_ptr dt;
472fb28875fSMaxim Levitsky 	int i, r;
473f1554150SPaolo Bonzini 
474f34bdf4cSMaxim Levitsky 	ctxt->eflags =  smstate->eflags | X86_EFLAGS_FIXED;
475f34bdf4cSMaxim Levitsky 	ctxt->_eip =  smstate->eip;
476f1554150SPaolo Bonzini 
477f1554150SPaolo Bonzini 	for (i = 0; i < 8; i++)
478f34bdf4cSMaxim Levitsky 		*reg_write(ctxt, i) = smstate->gprs[i];
479f1554150SPaolo Bonzini 
480f34bdf4cSMaxim Levitsky 	if (kvm_set_dr(vcpu, 6, smstate->dr6))
481f34bdf4cSMaxim Levitsky 		return X86EMUL_UNHANDLEABLE;
482f34bdf4cSMaxim Levitsky 	if (kvm_set_dr(vcpu, 7, smstate->dr7))
483f1554150SPaolo Bonzini 		return X86EMUL_UNHANDLEABLE;
484f1554150SPaolo Bonzini 
485f34bdf4cSMaxim Levitsky 	rsm_load_seg_32(vcpu, &smstate->tr, smstate->tr_sel, VCPU_SREG_TR);
486f34bdf4cSMaxim Levitsky 	rsm_load_seg_32(vcpu, &smstate->ldtr, smstate->ldtr_sel, VCPU_SREG_LDTR);
487f1554150SPaolo Bonzini 
488f34bdf4cSMaxim Levitsky 	dt.address =               smstate->gdtr.base;
489f34bdf4cSMaxim Levitsky 	dt.size =                  smstate->gdtr.limit;
4901d0da94cSPaolo Bonzini 	static_call(kvm_x86_set_gdt)(vcpu, &dt);
491f1554150SPaolo Bonzini 
492f34bdf4cSMaxim Levitsky 	dt.address =               smstate->idtr.base;
493f34bdf4cSMaxim Levitsky 	dt.size =                  smstate->idtr.limit;
4941d0da94cSPaolo Bonzini 	static_call(kvm_x86_set_idt)(vcpu, &dt);
495f1554150SPaolo Bonzini 
496f34bdf4cSMaxim Levitsky 	rsm_load_seg_32(vcpu, &smstate->es, smstate->es_sel, VCPU_SREG_ES);
497f34bdf4cSMaxim Levitsky 	rsm_load_seg_32(vcpu, &smstate->cs, smstate->cs_sel, VCPU_SREG_CS);
498f34bdf4cSMaxim Levitsky 	rsm_load_seg_32(vcpu, &smstate->ss, smstate->ss_sel, VCPU_SREG_SS);
499f1554150SPaolo Bonzini 
500f34bdf4cSMaxim Levitsky 	rsm_load_seg_32(vcpu, &smstate->ds, smstate->ds_sel, VCPU_SREG_DS);
501f34bdf4cSMaxim Levitsky 	rsm_load_seg_32(vcpu, &smstate->fs, smstate->fs_sel, VCPU_SREG_FS);
502f34bdf4cSMaxim Levitsky 	rsm_load_seg_32(vcpu, &smstate->gs, smstate->gs_sel, VCPU_SREG_GS);
503f1554150SPaolo Bonzini 
504f34bdf4cSMaxim Levitsky 	vcpu->arch.smbase = smstate->smbase;
505f1554150SPaolo Bonzini 
506fb28875fSMaxim Levitsky 	r = rsm_enter_protected_mode(vcpu, smstate->cr0,
507f34bdf4cSMaxim Levitsky 					smstate->cr3, smstate->cr4);
508fb28875fSMaxim Levitsky 
509fb28875fSMaxim Levitsky 	if (r != X86EMUL_CONTINUE)
510fb28875fSMaxim Levitsky 		return r;
511fb28875fSMaxim Levitsky 
512fb28875fSMaxim Levitsky 	static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
513fb28875fSMaxim Levitsky 	ctxt->interruptibility = (u8)smstate->int_shadow;
514fb28875fSMaxim Levitsky 
515fb28875fSMaxim Levitsky 	return r;
516f1554150SPaolo Bonzini }
517f1554150SPaolo Bonzini 
518f1554150SPaolo Bonzini #ifdef CONFIG_X86_64
rsm_load_state_64(struct x86_emulate_ctxt * ctxt,const struct kvm_smram_state_64 * smstate)519f1554150SPaolo Bonzini static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
5208bcda1deSMaxim Levitsky 			     const struct kvm_smram_state_64 *smstate)
521f1554150SPaolo Bonzini {
5221d0da94cSPaolo Bonzini 	struct kvm_vcpu *vcpu = ctxt->vcpu;
523f1554150SPaolo Bonzini 	struct desc_ptr dt;
524f1554150SPaolo Bonzini 	int i, r;
525f1554150SPaolo Bonzini 
526f1554150SPaolo Bonzini 	for (i = 0; i < 16; i++)
5278bcda1deSMaxim Levitsky 		*reg_write(ctxt, i) = smstate->gprs[15 - i];
528f1554150SPaolo Bonzini 
5298bcda1deSMaxim Levitsky 	ctxt->_eip   = smstate->rip;
5308bcda1deSMaxim Levitsky 	ctxt->eflags = smstate->rflags | X86_EFLAGS_FIXED;
531f1554150SPaolo Bonzini 
5328bcda1deSMaxim Levitsky 	if (kvm_set_dr(vcpu, 6, smstate->dr6))
5338bcda1deSMaxim Levitsky 		return X86EMUL_UNHANDLEABLE;
5348bcda1deSMaxim Levitsky 	if (kvm_set_dr(vcpu, 7, smstate->dr7))
535f1554150SPaolo Bonzini 		return X86EMUL_UNHANDLEABLE;
536f1554150SPaolo Bonzini 
5378bcda1deSMaxim Levitsky 	vcpu->arch.smbase =         smstate->smbase;
538f1554150SPaolo Bonzini 
5398bcda1deSMaxim Levitsky 	if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
540f1554150SPaolo Bonzini 		return X86EMUL_UNHANDLEABLE;
541f1554150SPaolo Bonzini 
5428bcda1deSMaxim Levitsky 	rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR);
543f1554150SPaolo Bonzini 
5448bcda1deSMaxim Levitsky 	dt.size =                   smstate->idtr.limit;
5458bcda1deSMaxim Levitsky 	dt.address =                smstate->idtr.base;
5461d0da94cSPaolo Bonzini 	static_call(kvm_x86_set_idt)(vcpu, &dt);
547f1554150SPaolo Bonzini 
5488bcda1deSMaxim Levitsky 	rsm_load_seg_64(vcpu, &smstate->ldtr, VCPU_SREG_LDTR);
549f1554150SPaolo Bonzini 
5508bcda1deSMaxim Levitsky 	dt.size =                   smstate->gdtr.limit;
5518bcda1deSMaxim Levitsky 	dt.address =                smstate->gdtr.base;
5521d0da94cSPaolo Bonzini 	static_call(kvm_x86_set_gdt)(vcpu, &dt);
553f1554150SPaolo Bonzini 
5548bcda1deSMaxim Levitsky 	r = rsm_enter_protected_mode(vcpu, smstate->cr0, smstate->cr3, smstate->cr4);
555f1554150SPaolo Bonzini 	if (r != X86EMUL_CONTINUE)
556f1554150SPaolo Bonzini 		return r;
557f1554150SPaolo Bonzini 
5588bcda1deSMaxim Levitsky 	rsm_load_seg_64(vcpu, &smstate->es, VCPU_SREG_ES);
5598bcda1deSMaxim Levitsky 	rsm_load_seg_64(vcpu, &smstate->cs, VCPU_SREG_CS);
5608bcda1deSMaxim Levitsky 	rsm_load_seg_64(vcpu, &smstate->ss, VCPU_SREG_SS);
5618bcda1deSMaxim Levitsky 	rsm_load_seg_64(vcpu, &smstate->ds, VCPU_SREG_DS);
5628bcda1deSMaxim Levitsky 	rsm_load_seg_64(vcpu, &smstate->fs, VCPU_SREG_FS);
5638bcda1deSMaxim Levitsky 	rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS);
564f1554150SPaolo Bonzini 
565fb28875fSMaxim Levitsky 	static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
566fb28875fSMaxim Levitsky 	ctxt->interruptibility = (u8)smstate->int_shadow;
567fb28875fSMaxim Levitsky 
568f1554150SPaolo Bonzini 	return X86EMUL_CONTINUE;
569f1554150SPaolo Bonzini }
570f1554150SPaolo Bonzini #endif
571f1554150SPaolo Bonzini 
emulator_leave_smm(struct x86_emulate_ctxt * ctxt)572f1554150SPaolo Bonzini int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
573f1554150SPaolo Bonzini {
574f1554150SPaolo Bonzini 	struct kvm_vcpu *vcpu = ctxt->vcpu;
5751d0da94cSPaolo Bonzini 	unsigned long cr0;
57658c1d206SMaxim Levitsky 	union kvm_smram smram;
577f1554150SPaolo Bonzini 	u64 smbase;
578f1554150SPaolo Bonzini 	int ret;
579f1554150SPaolo Bonzini 
5801d0da94cSPaolo Bonzini 	smbase = vcpu->arch.smbase;
581f1554150SPaolo Bonzini 
58258c1d206SMaxim Levitsky 	ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, smram.bytes, sizeof(smram));
5831d0da94cSPaolo Bonzini 	if (ret < 0)
584f1554150SPaolo Bonzini 		return X86EMUL_UNHANDLEABLE;
585f1554150SPaolo Bonzini 
5861d0da94cSPaolo Bonzini 	if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0)
5871d0da94cSPaolo Bonzini 		static_call(kvm_x86_set_nmi_mask)(vcpu, false);
588f1554150SPaolo Bonzini 
589f1554150SPaolo Bonzini 	kvm_smm_changed(vcpu, false);
590f1554150SPaolo Bonzini 
591f1554150SPaolo Bonzini 	/*
592f1554150SPaolo Bonzini 	 * Get back to real mode, to prepare a safe state in which to load
593f1554150SPaolo Bonzini 	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
594f1554150SPaolo Bonzini 	 * supports long mode.
595f1554150SPaolo Bonzini 	 */
5961d0da94cSPaolo Bonzini #ifdef CONFIG_X86_64
5971d0da94cSPaolo Bonzini 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
5981d0da94cSPaolo Bonzini 		struct kvm_segment cs_desc;
5991d0da94cSPaolo Bonzini 		unsigned long cr4;
600f1554150SPaolo Bonzini 
601f1554150SPaolo Bonzini 		/* Zero CR4.PCIDE before CR0.PG.  */
6021d0da94cSPaolo Bonzini 		cr4 = kvm_read_cr4(vcpu);
603f1554150SPaolo Bonzini 		if (cr4 & X86_CR4_PCIDE)
6041d0da94cSPaolo Bonzini 			kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE);
605f1554150SPaolo Bonzini 
606f1554150SPaolo Bonzini 		/* A 32-bit code segment is required to clear EFER.LMA.  */
607f1554150SPaolo Bonzini 		memset(&cs_desc, 0, sizeof(cs_desc));
608f1554150SPaolo Bonzini 		cs_desc.type = 0xb;
6091d0da94cSPaolo Bonzini 		cs_desc.s = cs_desc.g = cs_desc.present = 1;
6101d0da94cSPaolo Bonzini 		kvm_set_segment(vcpu, &cs_desc, VCPU_SREG_CS);
611f1554150SPaolo Bonzini 	}
6121d0da94cSPaolo Bonzini #endif
613f1554150SPaolo Bonzini 
614f1554150SPaolo Bonzini 	/* For the 64-bit case, this will clear EFER.LMA.  */
6151d0da94cSPaolo Bonzini 	cr0 = kvm_read_cr0(vcpu);
616f1554150SPaolo Bonzini 	if (cr0 & X86_CR0_PE)
6171d0da94cSPaolo Bonzini 		kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
618f1554150SPaolo Bonzini 
6191d0da94cSPaolo Bonzini #ifdef CONFIG_X86_64
6201d0da94cSPaolo Bonzini 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
6211d0da94cSPaolo Bonzini 		unsigned long cr4, efer;
6221d0da94cSPaolo Bonzini 
623f1554150SPaolo Bonzini 		/* Clear CR4.PAE before clearing EFER.LME. */
6241d0da94cSPaolo Bonzini 		cr4 = kvm_read_cr4(vcpu);
625f1554150SPaolo Bonzini 		if (cr4 & X86_CR4_PAE)
6261d0da94cSPaolo Bonzini 			kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PAE);
627f1554150SPaolo Bonzini 
628f1554150SPaolo Bonzini 		/* And finally go back to 32-bit mode.  */
629f1554150SPaolo Bonzini 		efer = 0;
6301d0da94cSPaolo Bonzini 		kvm_set_msr(vcpu, MSR_EFER, efer);
631f1554150SPaolo Bonzini 	}
6321d0da94cSPaolo Bonzini #endif
633f1554150SPaolo Bonzini 
634f1554150SPaolo Bonzini 	/*
635f1554150SPaolo Bonzini 	 * Give leave_smm() a chance to make ISA-specific changes to the vCPU
636f1554150SPaolo Bonzini 	 * state (e.g. enter guest mode) before loading state from the SMM
637f1554150SPaolo Bonzini 	 * state-save area.
638f1554150SPaolo Bonzini 	 */
63958c1d206SMaxim Levitsky 	if (static_call(kvm_x86_leave_smm)(vcpu, &smram))
640f1554150SPaolo Bonzini 		return X86EMUL_UNHANDLEABLE;
641f1554150SPaolo Bonzini 
642f1554150SPaolo Bonzini #ifdef CONFIG_X86_64
6431d0da94cSPaolo Bonzini 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
6448bcda1deSMaxim Levitsky 		return rsm_load_state_64(ctxt, &smram.smram64);
645f1554150SPaolo Bonzini 	else
646f1554150SPaolo Bonzini #endif
647f34bdf4cSMaxim Levitsky 		return rsm_load_state_32(ctxt, &smram.smram32);
648f1554150SPaolo Bonzini }
649