xref: /openbmc/linux/arch/x86/kvm/x86.c (revision 63dc02bd)
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21 
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29 #include "cpuid.h"
30 
31 #include <linux/clocksource.h>
32 #include <linux/interrupt.h>
33 #include <linux/kvm.h>
34 #include <linux/fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/module.h>
37 #include <linux/mman.h>
38 #include <linux/highmem.h>
39 #include <linux/iommu.h>
40 #include <linux/intel-iommu.h>
41 #include <linux/cpufreq.h>
42 #include <linux/user-return-notifier.h>
43 #include <linux/srcu.h>
44 #include <linux/slab.h>
45 #include <linux/perf_event.h>
46 #include <linux/uaccess.h>
47 #include <linux/hash.h>
48 #include <linux/pci.h>
49 #include <trace/events/kvm.h>
50 
51 #define CREATE_TRACE_POINTS
52 #include "trace.h"
53 
54 #include <asm/debugreg.h>
55 #include <asm/msr.h>
56 #include <asm/desc.h>
57 #include <asm/mtrr.h>
58 #include <asm/mce.h>
59 #include <asm/i387.h>
60 #include <asm/fpu-internal.h> /* Ugh! */
61 #include <asm/xcr.h>
62 #include <asm/pvclock.h>
63 #include <asm/div64.h>
64 
65 #define MAX_IO_MSRS 256
66 #define KVM_MAX_MCE_BANKS 32
67 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
68 
69 #define emul_to_vcpu(ctxt) \
70 	container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
71 
72 /* EFER defaults:
73  * - enable syscall per default because its emulated by KVM
74  * - enable LME and LMA per default on 64 bit KVM
75  */
76 #ifdef CONFIG_X86_64
77 static
78 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
79 #else
80 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
81 #endif
82 
83 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
84 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
85 
86 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
87 static void process_nmi(struct kvm_vcpu *vcpu);
88 
89 struct kvm_x86_ops *kvm_x86_ops;
90 EXPORT_SYMBOL_GPL(kvm_x86_ops);
91 
92 static bool ignore_msrs = 0;
93 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
94 
95 bool kvm_has_tsc_control;
96 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
97 u32  kvm_max_guest_tsc_khz;
98 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
99 
100 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
101 static u32 tsc_tolerance_ppm = 250;
102 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
103 
104 #define KVM_NR_SHARED_MSRS 16
105 
106 struct kvm_shared_msrs_global {
107 	int nr;
108 	u32 msrs[KVM_NR_SHARED_MSRS];
109 };
110 
111 struct kvm_shared_msrs {
112 	struct user_return_notifier urn;
113 	bool registered;
114 	struct kvm_shared_msr_values {
115 		u64 host;
116 		u64 curr;
117 	} values[KVM_NR_SHARED_MSRS];
118 };
119 
120 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
121 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
122 
123 struct kvm_stats_debugfs_item debugfs_entries[] = {
124 	{ "pf_fixed", VCPU_STAT(pf_fixed) },
125 	{ "pf_guest", VCPU_STAT(pf_guest) },
126 	{ "tlb_flush", VCPU_STAT(tlb_flush) },
127 	{ "invlpg", VCPU_STAT(invlpg) },
128 	{ "exits", VCPU_STAT(exits) },
129 	{ "io_exits", VCPU_STAT(io_exits) },
130 	{ "mmio_exits", VCPU_STAT(mmio_exits) },
131 	{ "signal_exits", VCPU_STAT(signal_exits) },
132 	{ "irq_window", VCPU_STAT(irq_window_exits) },
133 	{ "nmi_window", VCPU_STAT(nmi_window_exits) },
134 	{ "halt_exits", VCPU_STAT(halt_exits) },
135 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
136 	{ "hypercalls", VCPU_STAT(hypercalls) },
137 	{ "request_irq", VCPU_STAT(request_irq_exits) },
138 	{ "irq_exits", VCPU_STAT(irq_exits) },
139 	{ "host_state_reload", VCPU_STAT(host_state_reload) },
140 	{ "efer_reload", VCPU_STAT(efer_reload) },
141 	{ "fpu_reload", VCPU_STAT(fpu_reload) },
142 	{ "insn_emulation", VCPU_STAT(insn_emulation) },
143 	{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
144 	{ "irq_injections", VCPU_STAT(irq_injections) },
145 	{ "nmi_injections", VCPU_STAT(nmi_injections) },
146 	{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
147 	{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
148 	{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
149 	{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
150 	{ "mmu_flooded", VM_STAT(mmu_flooded) },
151 	{ "mmu_recycled", VM_STAT(mmu_recycled) },
152 	{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
153 	{ "mmu_unsync", VM_STAT(mmu_unsync) },
154 	{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
155 	{ "largepages", VM_STAT(lpages) },
156 	{ NULL }
157 };
158 
159 u64 __read_mostly host_xcr0;
160 
161 int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
162 
163 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
164 {
165 	int i;
166 	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
167 		vcpu->arch.apf.gfns[i] = ~0;
168 }
169 
170 static void kvm_on_user_return(struct user_return_notifier *urn)
171 {
172 	unsigned slot;
173 	struct kvm_shared_msrs *locals
174 		= container_of(urn, struct kvm_shared_msrs, urn);
175 	struct kvm_shared_msr_values *values;
176 
177 	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
178 		values = &locals->values[slot];
179 		if (values->host != values->curr) {
180 			wrmsrl(shared_msrs_global.msrs[slot], values->host);
181 			values->curr = values->host;
182 		}
183 	}
184 	locals->registered = false;
185 	user_return_notifier_unregister(urn);
186 }
187 
188 static void shared_msr_update(unsigned slot, u32 msr)
189 {
190 	struct kvm_shared_msrs *smsr;
191 	u64 value;
192 
193 	smsr = &__get_cpu_var(shared_msrs);
194 	/* only read, and nobody should modify it at this time,
195 	 * so don't need lock */
196 	if (slot >= shared_msrs_global.nr) {
197 		printk(KERN_ERR "kvm: invalid MSR slot!");
198 		return;
199 	}
200 	rdmsrl_safe(msr, &value);
201 	smsr->values[slot].host = value;
202 	smsr->values[slot].curr = value;
203 }
204 
205 void kvm_define_shared_msr(unsigned slot, u32 msr)
206 {
207 	if (slot >= shared_msrs_global.nr)
208 		shared_msrs_global.nr = slot + 1;
209 	shared_msrs_global.msrs[slot] = msr;
210 	/* we need ensured the shared_msr_global have been updated */
211 	smp_wmb();
212 }
213 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
214 
215 static void kvm_shared_msr_cpu_online(void)
216 {
217 	unsigned i;
218 
219 	for (i = 0; i < shared_msrs_global.nr; ++i)
220 		shared_msr_update(i, shared_msrs_global.msrs[i]);
221 }
222 
223 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
224 {
225 	struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
226 
227 	if (((value ^ smsr->values[slot].curr) & mask) == 0)
228 		return;
229 	smsr->values[slot].curr = value;
230 	wrmsrl(shared_msrs_global.msrs[slot], value);
231 	if (!smsr->registered) {
232 		smsr->urn.on_user_return = kvm_on_user_return;
233 		user_return_notifier_register(&smsr->urn);
234 		smsr->registered = true;
235 	}
236 }
237 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
238 
239 static void drop_user_return_notifiers(void *ignore)
240 {
241 	struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
242 
243 	if (smsr->registered)
244 		kvm_on_user_return(&smsr->urn);
245 }
246 
247 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
248 {
249 	if (irqchip_in_kernel(vcpu->kvm))
250 		return vcpu->arch.apic_base;
251 	else
252 		return vcpu->arch.apic_base;
253 }
254 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
255 
256 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
257 {
258 	/* TODO: reserve bits check */
259 	if (irqchip_in_kernel(vcpu->kvm))
260 		kvm_lapic_set_base(vcpu, data);
261 	else
262 		vcpu->arch.apic_base = data;
263 }
264 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
265 
266 #define EXCPT_BENIGN		0
267 #define EXCPT_CONTRIBUTORY	1
268 #define EXCPT_PF		2
269 
270 static int exception_class(int vector)
271 {
272 	switch (vector) {
273 	case PF_VECTOR:
274 		return EXCPT_PF;
275 	case DE_VECTOR:
276 	case TS_VECTOR:
277 	case NP_VECTOR:
278 	case SS_VECTOR:
279 	case GP_VECTOR:
280 		return EXCPT_CONTRIBUTORY;
281 	default:
282 		break;
283 	}
284 	return EXCPT_BENIGN;
285 }
286 
287 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
288 		unsigned nr, bool has_error, u32 error_code,
289 		bool reinject)
290 {
291 	u32 prev_nr;
292 	int class1, class2;
293 
294 	kvm_make_request(KVM_REQ_EVENT, vcpu);
295 
296 	if (!vcpu->arch.exception.pending) {
297 	queue:
298 		vcpu->arch.exception.pending = true;
299 		vcpu->arch.exception.has_error_code = has_error;
300 		vcpu->arch.exception.nr = nr;
301 		vcpu->arch.exception.error_code = error_code;
302 		vcpu->arch.exception.reinject = reinject;
303 		return;
304 	}
305 
306 	/* to check exception */
307 	prev_nr = vcpu->arch.exception.nr;
308 	if (prev_nr == DF_VECTOR) {
309 		/* triple fault -> shutdown */
310 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
311 		return;
312 	}
313 	class1 = exception_class(prev_nr);
314 	class2 = exception_class(nr);
315 	if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
316 		|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
317 		/* generate double fault per SDM Table 5-5 */
318 		vcpu->arch.exception.pending = true;
319 		vcpu->arch.exception.has_error_code = true;
320 		vcpu->arch.exception.nr = DF_VECTOR;
321 		vcpu->arch.exception.error_code = 0;
322 	} else
323 		/* replace previous exception with a new one in a hope
324 		   that instruction re-execution will regenerate lost
325 		   exception */
326 		goto queue;
327 }
328 
329 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
330 {
331 	kvm_multiple_exception(vcpu, nr, false, 0, false);
332 }
333 EXPORT_SYMBOL_GPL(kvm_queue_exception);
334 
335 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
336 {
337 	kvm_multiple_exception(vcpu, nr, false, 0, true);
338 }
339 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
340 
341 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
342 {
343 	if (err)
344 		kvm_inject_gp(vcpu, 0);
345 	else
346 		kvm_x86_ops->skip_emulated_instruction(vcpu);
347 }
348 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
349 
350 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
351 {
352 	++vcpu->stat.pf_guest;
353 	vcpu->arch.cr2 = fault->address;
354 	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
355 }
356 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
357 
358 void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
359 {
360 	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
361 		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
362 	else
363 		vcpu->arch.mmu.inject_page_fault(vcpu, fault);
364 }
365 
366 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
367 {
368 	atomic_inc(&vcpu->arch.nmi_queued);
369 	kvm_make_request(KVM_REQ_NMI, vcpu);
370 }
371 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
372 
373 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
374 {
375 	kvm_multiple_exception(vcpu, nr, true, error_code, false);
376 }
377 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
378 
379 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
380 {
381 	kvm_multiple_exception(vcpu, nr, true, error_code, true);
382 }
383 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
384 
385 /*
386  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
387  * a #GP and return false.
388  */
389 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
390 {
391 	if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
392 		return true;
393 	kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
394 	return false;
395 }
396 EXPORT_SYMBOL_GPL(kvm_require_cpl);
397 
398 /*
399  * This function will be used to read from the physical memory of the currently
400  * running guest. The difference to kvm_read_guest_page is that this function
401  * can read from guest physical or from the guest's guest physical memory.
402  */
403 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
404 			    gfn_t ngfn, void *data, int offset, int len,
405 			    u32 access)
406 {
407 	gfn_t real_gfn;
408 	gpa_t ngpa;
409 
410 	ngpa     = gfn_to_gpa(ngfn);
411 	real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
412 	if (real_gfn == UNMAPPED_GVA)
413 		return -EFAULT;
414 
415 	real_gfn = gpa_to_gfn(real_gfn);
416 
417 	return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
418 }
419 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
420 
421 int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
422 			       void *data, int offset, int len, u32 access)
423 {
424 	return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
425 				       data, offset, len, access);
426 }
427 
428 /*
429  * Load the pae pdptrs.  Return true is they are all valid.
430  */
431 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
432 {
433 	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
434 	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
435 	int i;
436 	int ret;
437 	u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
438 
439 	ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
440 				      offset * sizeof(u64), sizeof(pdpte),
441 				      PFERR_USER_MASK|PFERR_WRITE_MASK);
442 	if (ret < 0) {
443 		ret = 0;
444 		goto out;
445 	}
446 	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
447 		if (is_present_gpte(pdpte[i]) &&
448 		    (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
449 			ret = 0;
450 			goto out;
451 		}
452 	}
453 	ret = 1;
454 
455 	memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
456 	__set_bit(VCPU_EXREG_PDPTR,
457 		  (unsigned long *)&vcpu->arch.regs_avail);
458 	__set_bit(VCPU_EXREG_PDPTR,
459 		  (unsigned long *)&vcpu->arch.regs_dirty);
460 out:
461 
462 	return ret;
463 }
464 EXPORT_SYMBOL_GPL(load_pdptrs);
465 
466 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
467 {
468 	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
469 	bool changed = true;
470 	int offset;
471 	gfn_t gfn;
472 	int r;
473 
474 	if (is_long_mode(vcpu) || !is_pae(vcpu))
475 		return false;
476 
477 	if (!test_bit(VCPU_EXREG_PDPTR,
478 		      (unsigned long *)&vcpu->arch.regs_avail))
479 		return true;
480 
481 	gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
482 	offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
483 	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
484 				       PFERR_USER_MASK | PFERR_WRITE_MASK);
485 	if (r < 0)
486 		goto out;
487 	changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
488 out:
489 
490 	return changed;
491 }
492 
493 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
494 {
495 	unsigned long old_cr0 = kvm_read_cr0(vcpu);
496 	unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
497 				    X86_CR0_CD | X86_CR0_NW;
498 
499 	cr0 |= X86_CR0_ET;
500 
501 #ifdef CONFIG_X86_64
502 	if (cr0 & 0xffffffff00000000UL)
503 		return 1;
504 #endif
505 
506 	cr0 &= ~CR0_RESERVED_BITS;
507 
508 	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
509 		return 1;
510 
511 	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
512 		return 1;
513 
514 	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
515 #ifdef CONFIG_X86_64
516 		if ((vcpu->arch.efer & EFER_LME)) {
517 			int cs_db, cs_l;
518 
519 			if (!is_pae(vcpu))
520 				return 1;
521 			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
522 			if (cs_l)
523 				return 1;
524 		} else
525 #endif
526 		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
527 						 kvm_read_cr3(vcpu)))
528 			return 1;
529 	}
530 
531 	kvm_x86_ops->set_cr0(vcpu, cr0);
532 
533 	if ((cr0 ^ old_cr0) & X86_CR0_PG) {
534 		kvm_clear_async_pf_completion_queue(vcpu);
535 		kvm_async_pf_hash_reset(vcpu);
536 	}
537 
538 	if ((cr0 ^ old_cr0) & update_bits)
539 		kvm_mmu_reset_context(vcpu);
540 	return 0;
541 }
542 EXPORT_SYMBOL_GPL(kvm_set_cr0);
543 
544 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
545 {
546 	(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
547 }
548 EXPORT_SYMBOL_GPL(kvm_lmsw);
549 
550 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
551 {
552 	u64 xcr0;
553 
554 	/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
555 	if (index != XCR_XFEATURE_ENABLED_MASK)
556 		return 1;
557 	xcr0 = xcr;
558 	if (kvm_x86_ops->get_cpl(vcpu) != 0)
559 		return 1;
560 	if (!(xcr0 & XSTATE_FP))
561 		return 1;
562 	if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
563 		return 1;
564 	if (xcr0 & ~host_xcr0)
565 		return 1;
566 	vcpu->arch.xcr0 = xcr0;
567 	vcpu->guest_xcr0_loaded = 0;
568 	return 0;
569 }
570 
571 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
572 {
573 	if (__kvm_set_xcr(vcpu, index, xcr)) {
574 		kvm_inject_gp(vcpu, 0);
575 		return 1;
576 	}
577 	return 0;
578 }
579 EXPORT_SYMBOL_GPL(kvm_set_xcr);
580 
581 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
582 {
583 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
584 	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
585 				   X86_CR4_PAE | X86_CR4_SMEP;
586 	if (cr4 & CR4_RESERVED_BITS)
587 		return 1;
588 
589 	if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
590 		return 1;
591 
592 	if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
593 		return 1;
594 
595 	if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
596 		return 1;
597 
598 	if (is_long_mode(vcpu)) {
599 		if (!(cr4 & X86_CR4_PAE))
600 			return 1;
601 	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
602 		   && ((cr4 ^ old_cr4) & pdptr_bits)
603 		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
604 				   kvm_read_cr3(vcpu)))
605 		return 1;
606 
607 	if (kvm_x86_ops->set_cr4(vcpu, cr4))
608 		return 1;
609 
610 	if ((cr4 ^ old_cr4) & pdptr_bits)
611 		kvm_mmu_reset_context(vcpu);
612 
613 	if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
614 		kvm_update_cpuid(vcpu);
615 
616 	return 0;
617 }
618 EXPORT_SYMBOL_GPL(kvm_set_cr4);
619 
620 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
621 {
622 	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
623 		kvm_mmu_sync_roots(vcpu);
624 		kvm_mmu_flush_tlb(vcpu);
625 		return 0;
626 	}
627 
628 	if (is_long_mode(vcpu)) {
629 		if (cr3 & CR3_L_MODE_RESERVED_BITS)
630 			return 1;
631 	} else {
632 		if (is_pae(vcpu)) {
633 			if (cr3 & CR3_PAE_RESERVED_BITS)
634 				return 1;
635 			if (is_paging(vcpu) &&
636 			    !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
637 				return 1;
638 		}
639 		/*
640 		 * We don't check reserved bits in nonpae mode, because
641 		 * this isn't enforced, and VMware depends on this.
642 		 */
643 	}
644 
645 	/*
646 	 * Does the new cr3 value map to physical memory? (Note, we
647 	 * catch an invalid cr3 even in real-mode, because it would
648 	 * cause trouble later on when we turn on paging anyway.)
649 	 *
650 	 * A real CPU would silently accept an invalid cr3 and would
651 	 * attempt to use it - with largely undefined (and often hard
652 	 * to debug) behavior on the guest side.
653 	 */
654 	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
655 		return 1;
656 	vcpu->arch.cr3 = cr3;
657 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
658 	vcpu->arch.mmu.new_cr3(vcpu);
659 	return 0;
660 }
661 EXPORT_SYMBOL_GPL(kvm_set_cr3);
662 
663 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
664 {
665 	if (cr8 & CR8_RESERVED_BITS)
666 		return 1;
667 	if (irqchip_in_kernel(vcpu->kvm))
668 		kvm_lapic_set_tpr(vcpu, cr8);
669 	else
670 		vcpu->arch.cr8 = cr8;
671 	return 0;
672 }
673 EXPORT_SYMBOL_GPL(kvm_set_cr8);
674 
675 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
676 {
677 	if (irqchip_in_kernel(vcpu->kvm))
678 		return kvm_lapic_get_cr8(vcpu);
679 	else
680 		return vcpu->arch.cr8;
681 }
682 EXPORT_SYMBOL_GPL(kvm_get_cr8);
683 
684 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
685 {
686 	switch (dr) {
687 	case 0 ... 3:
688 		vcpu->arch.db[dr] = val;
689 		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
690 			vcpu->arch.eff_db[dr] = val;
691 		break;
692 	case 4:
693 		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
694 			return 1; /* #UD */
695 		/* fall through */
696 	case 6:
697 		if (val & 0xffffffff00000000ULL)
698 			return -1; /* #GP */
699 		vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
700 		break;
701 	case 5:
702 		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
703 			return 1; /* #UD */
704 		/* fall through */
705 	default: /* 7 */
706 		if (val & 0xffffffff00000000ULL)
707 			return -1; /* #GP */
708 		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
709 		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
710 			kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
711 			vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
712 		}
713 		break;
714 	}
715 
716 	return 0;
717 }
718 
719 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
720 {
721 	int res;
722 
723 	res = __kvm_set_dr(vcpu, dr, val);
724 	if (res > 0)
725 		kvm_queue_exception(vcpu, UD_VECTOR);
726 	else if (res < 0)
727 		kvm_inject_gp(vcpu, 0);
728 
729 	return res;
730 }
731 EXPORT_SYMBOL_GPL(kvm_set_dr);
732 
733 static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
734 {
735 	switch (dr) {
736 	case 0 ... 3:
737 		*val = vcpu->arch.db[dr];
738 		break;
739 	case 4:
740 		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
741 			return 1;
742 		/* fall through */
743 	case 6:
744 		*val = vcpu->arch.dr6;
745 		break;
746 	case 5:
747 		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
748 			return 1;
749 		/* fall through */
750 	default: /* 7 */
751 		*val = vcpu->arch.dr7;
752 		break;
753 	}
754 
755 	return 0;
756 }
757 
758 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
759 {
760 	if (_kvm_get_dr(vcpu, dr, val)) {
761 		kvm_queue_exception(vcpu, UD_VECTOR);
762 		return 1;
763 	}
764 	return 0;
765 }
766 EXPORT_SYMBOL_GPL(kvm_get_dr);
767 
768 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
769 {
770 	u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
771 	u64 data;
772 	int err;
773 
774 	err = kvm_pmu_read_pmc(vcpu, ecx, &data);
775 	if (err)
776 		return err;
777 	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
778 	kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
779 	return err;
780 }
781 EXPORT_SYMBOL_GPL(kvm_rdpmc);
782 
783 /*
784  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
785  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
786  *
787  * This list is modified at module load time to reflect the
788  * capabilities of the host cpu. This capabilities test skips MSRs that are
789  * kvm-specific. Those are put in the beginning of the list.
790  */
791 
792 #define KVM_SAVE_MSRS_BEGIN	9
793 static u32 msrs_to_save[] = {
794 	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
795 	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
796 	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
797 	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
798 	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
799 	MSR_STAR,
800 #ifdef CONFIG_X86_64
801 	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
802 #endif
803 	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
804 };
805 
806 static unsigned num_msrs_to_save;
807 
808 static u32 emulated_msrs[] = {
809 	MSR_IA32_TSCDEADLINE,
810 	MSR_IA32_MISC_ENABLE,
811 	MSR_IA32_MCG_STATUS,
812 	MSR_IA32_MCG_CTL,
813 };
814 
815 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
816 {
817 	u64 old_efer = vcpu->arch.efer;
818 
819 	if (efer & efer_reserved_bits)
820 		return 1;
821 
822 	if (is_paging(vcpu)
823 	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
824 		return 1;
825 
826 	if (efer & EFER_FFXSR) {
827 		struct kvm_cpuid_entry2 *feat;
828 
829 		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
830 		if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
831 			return 1;
832 	}
833 
834 	if (efer & EFER_SVME) {
835 		struct kvm_cpuid_entry2 *feat;
836 
837 		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
838 		if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
839 			return 1;
840 	}
841 
842 	efer &= ~EFER_LMA;
843 	efer |= vcpu->arch.efer & EFER_LMA;
844 
845 	kvm_x86_ops->set_efer(vcpu, efer);
846 
847 	vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
848 
849 	/* Update reserved bits */
850 	if ((efer ^ old_efer) & EFER_NX)
851 		kvm_mmu_reset_context(vcpu);
852 
853 	return 0;
854 }
855 
856 void kvm_enable_efer_bits(u64 mask)
857 {
858        efer_reserved_bits &= ~mask;
859 }
860 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
861 
862 
863 /*
864  * Writes msr value into into the appropriate "register".
865  * Returns 0 on success, non-0 otherwise.
866  * Assumes vcpu_load() was already called.
867  */
868 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
869 {
870 	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
871 }
872 
873 /*
874  * Adapt set_msr() to msr_io()'s calling convention
875  */
876 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
877 {
878 	return kvm_set_msr(vcpu, index, *data);
879 }
880 
881 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
882 {
883 	int version;
884 	int r;
885 	struct pvclock_wall_clock wc;
886 	struct timespec boot;
887 
888 	if (!wall_clock)
889 		return;
890 
891 	r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
892 	if (r)
893 		return;
894 
895 	if (version & 1)
896 		++version;  /* first time write, random junk */
897 
898 	++version;
899 
900 	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
901 
902 	/*
903 	 * The guest calculates current wall clock time by adding
904 	 * system time (updated by kvm_guest_time_update below) to the
905 	 * wall clock specified here.  guest system time equals host
906 	 * system time for us, thus we must fill in host boot time here.
907 	 */
908 	getboottime(&boot);
909 
910 	wc.sec = boot.tv_sec;
911 	wc.nsec = boot.tv_nsec;
912 	wc.version = version;
913 
914 	kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
915 
916 	version++;
917 	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
918 }
919 
920 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
921 {
922 	uint32_t quotient, remainder;
923 
924 	/* Don't try to replace with do_div(), this one calculates
925 	 * "(dividend << 32) / divisor" */
926 	__asm__ ( "divl %4"
927 		  : "=a" (quotient), "=d" (remainder)
928 		  : "0" (0), "1" (dividend), "r" (divisor) );
929 	return quotient;
930 }
931 
932 static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
933 			       s8 *pshift, u32 *pmultiplier)
934 {
935 	uint64_t scaled64;
936 	int32_t  shift = 0;
937 	uint64_t tps64;
938 	uint32_t tps32;
939 
940 	tps64 = base_khz * 1000LL;
941 	scaled64 = scaled_khz * 1000LL;
942 	while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
943 		tps64 >>= 1;
944 		shift--;
945 	}
946 
947 	tps32 = (uint32_t)tps64;
948 	while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
949 		if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
950 			scaled64 >>= 1;
951 		else
952 			tps32 <<= 1;
953 		shift++;
954 	}
955 
956 	*pshift = shift;
957 	*pmultiplier = div_frac(scaled64, tps32);
958 
959 	pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
960 		 __func__, base_khz, scaled_khz, shift, *pmultiplier);
961 }
962 
963 static inline u64 get_kernel_ns(void)
964 {
965 	struct timespec ts;
966 
967 	WARN_ON(preemptible());
968 	ktime_get_ts(&ts);
969 	monotonic_to_bootbased(&ts);
970 	return timespec_to_ns(&ts);
971 }
972 
973 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
974 unsigned long max_tsc_khz;
975 
976 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
977 {
978 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
979 				   vcpu->arch.virtual_tsc_shift);
980 }
981 
982 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
983 {
984 	u64 v = (u64)khz * (1000000 + ppm);
985 	do_div(v, 1000000);
986 	return v;
987 }
988 
989 static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
990 {
991 	u32 thresh_lo, thresh_hi;
992 	int use_scaling = 0;
993 
994 	/* Compute a scale to convert nanoseconds in TSC cycles */
995 	kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
996 			   &vcpu->arch.virtual_tsc_shift,
997 			   &vcpu->arch.virtual_tsc_mult);
998 	vcpu->arch.virtual_tsc_khz = this_tsc_khz;
999 
1000 	/*
1001 	 * Compute the variation in TSC rate which is acceptable
1002 	 * within the range of tolerance and decide if the
1003 	 * rate being applied is within that bounds of the hardware
1004 	 * rate.  If so, no scaling or compensation need be done.
1005 	 */
1006 	thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
1007 	thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1008 	if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
1009 		pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
1010 		use_scaling = 1;
1011 	}
1012 	kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
1013 }
1014 
1015 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1016 {
1017 	u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1018 				      vcpu->arch.virtual_tsc_mult,
1019 				      vcpu->arch.virtual_tsc_shift);
1020 	tsc += vcpu->arch.this_tsc_write;
1021 	return tsc;
1022 }
1023 
1024 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1025 {
1026 	struct kvm *kvm = vcpu->kvm;
1027 	u64 offset, ns, elapsed;
1028 	unsigned long flags;
1029 	s64 usdiff;
1030 
1031 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1032 	offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1033 	ns = get_kernel_ns();
1034 	elapsed = ns - kvm->arch.last_tsc_nsec;
1035 
1036 	/* n.b - signed multiplication and division required */
1037 	usdiff = data - kvm->arch.last_tsc_write;
1038 #ifdef CONFIG_X86_64
1039 	usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1040 #else
1041 	/* do_div() only does unsigned */
1042 	asm("idivl %2; xor %%edx, %%edx"
1043 	    : "=A"(usdiff)
1044 	    : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
1045 #endif
1046 	do_div(elapsed, 1000);
1047 	usdiff -= elapsed;
1048 	if (usdiff < 0)
1049 		usdiff = -usdiff;
1050 
1051 	/*
1052 	 * Special case: TSC write with a small delta (1 second) of virtual
1053 	 * cycle time against real time is interpreted as an attempt to
1054 	 * synchronize the CPU.
1055          *
1056 	 * For a reliable TSC, we can match TSC offsets, and for an unstable
1057 	 * TSC, we add elapsed time in this computation.  We could let the
1058 	 * compensation code attempt to catch up if we fall behind, but
1059 	 * it's better to try to match offsets from the beginning.
1060          */
1061 	if (usdiff < USEC_PER_SEC &&
1062 	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1063 		if (!check_tsc_unstable()) {
1064 			offset = kvm->arch.cur_tsc_offset;
1065 			pr_debug("kvm: matched tsc offset for %llu\n", data);
1066 		} else {
1067 			u64 delta = nsec_to_cycles(vcpu, elapsed);
1068 			data += delta;
1069 			offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1070 			pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1071 		}
1072 	} else {
1073 		/*
1074 		 * We split periods of matched TSC writes into generations.
1075 		 * For each generation, we track the original measured
1076 		 * nanosecond time, offset, and write, so if TSCs are in
1077 		 * sync, we can match exact offset, and if not, we can match
1078 		 * exact software computaion in compute_guest_tsc()
1079 		 *
1080 		 * These values are tracked in kvm->arch.cur_xxx variables.
1081 		 */
1082 		kvm->arch.cur_tsc_generation++;
1083 		kvm->arch.cur_tsc_nsec = ns;
1084 		kvm->arch.cur_tsc_write = data;
1085 		kvm->arch.cur_tsc_offset = offset;
1086 		pr_debug("kvm: new tsc generation %u, clock %llu\n",
1087 			 kvm->arch.cur_tsc_generation, data);
1088 	}
1089 
1090 	/*
1091 	 * We also track th most recent recorded KHZ, write and time to
1092 	 * allow the matching interval to be extended at each write.
1093 	 */
1094 	kvm->arch.last_tsc_nsec = ns;
1095 	kvm->arch.last_tsc_write = data;
1096 	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1097 
1098 	/* Reset of TSC must disable overshoot protection below */
1099 	vcpu->arch.hv_clock.tsc_timestamp = 0;
1100 	vcpu->arch.last_guest_tsc = data;
1101 
1102 	/* Keep track of which generation this VCPU has synchronized to */
1103 	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
1104 	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
1105 	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
1106 
1107 	kvm_x86_ops->write_tsc_offset(vcpu, offset);
1108 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1109 }
1110 
1111 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1112 
1113 static int kvm_guest_time_update(struct kvm_vcpu *v)
1114 {
1115 	unsigned long flags;
1116 	struct kvm_vcpu_arch *vcpu = &v->arch;
1117 	void *shared_kaddr;
1118 	unsigned long this_tsc_khz;
1119 	s64 kernel_ns, max_kernel_ns;
1120 	u64 tsc_timestamp;
1121 
1122 	/* Keep irq disabled to prevent changes to the clock */
1123 	local_irq_save(flags);
1124 	tsc_timestamp = kvm_x86_ops->read_l1_tsc(v);
1125 	kernel_ns = get_kernel_ns();
1126 	this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1127 	if (unlikely(this_tsc_khz == 0)) {
1128 		local_irq_restore(flags);
1129 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1130 		return 1;
1131 	}
1132 
1133 	/*
1134 	 * We may have to catch up the TSC to match elapsed wall clock
1135 	 * time for two reasons, even if kvmclock is used.
1136 	 *   1) CPU could have been running below the maximum TSC rate
1137 	 *   2) Broken TSC compensation resets the base at each VCPU
1138 	 *      entry to avoid unknown leaps of TSC even when running
1139 	 *      again on the same CPU.  This may cause apparent elapsed
1140 	 *      time to disappear, and the guest to stand still or run
1141 	 *	very slowly.
1142 	 */
1143 	if (vcpu->tsc_catchup) {
1144 		u64 tsc = compute_guest_tsc(v, kernel_ns);
1145 		if (tsc > tsc_timestamp) {
1146 			adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
1147 			tsc_timestamp = tsc;
1148 		}
1149 	}
1150 
1151 	local_irq_restore(flags);
1152 
1153 	if (!vcpu->time_page)
1154 		return 0;
1155 
1156 	/*
1157 	 * Time as measured by the TSC may go backwards when resetting the base
1158 	 * tsc_timestamp.  The reason for this is that the TSC resolution is
1159 	 * higher than the resolution of the other clock scales.  Thus, many
1160 	 * possible measurments of the TSC correspond to one measurement of any
1161 	 * other clock, and so a spread of values is possible.  This is not a
1162 	 * problem for the computation of the nanosecond clock; with TSC rates
1163 	 * around 1GHZ, there can only be a few cycles which correspond to one
1164 	 * nanosecond value, and any path through this code will inevitably
1165 	 * take longer than that.  However, with the kernel_ns value itself,
1166 	 * the precision may be much lower, down to HZ granularity.  If the
1167 	 * first sampling of TSC against kernel_ns ends in the low part of the
1168 	 * range, and the second in the high end of the range, we can get:
1169 	 *
1170 	 * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
1171 	 *
1172 	 * As the sampling errors potentially range in the thousands of cycles,
1173 	 * it is possible such a time value has already been observed by the
1174 	 * guest.  To protect against this, we must compute the system time as
1175 	 * observed by the guest and ensure the new system time is greater.
1176 	 */
1177 	max_kernel_ns = 0;
1178 	if (vcpu->hv_clock.tsc_timestamp) {
1179 		max_kernel_ns = vcpu->last_guest_tsc -
1180 				vcpu->hv_clock.tsc_timestamp;
1181 		max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
1182 				    vcpu->hv_clock.tsc_to_system_mul,
1183 				    vcpu->hv_clock.tsc_shift);
1184 		max_kernel_ns += vcpu->last_kernel_ns;
1185 	}
1186 
1187 	if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
1188 		kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
1189 				   &vcpu->hv_clock.tsc_shift,
1190 				   &vcpu->hv_clock.tsc_to_system_mul);
1191 		vcpu->hw_tsc_khz = this_tsc_khz;
1192 	}
1193 
1194 	if (max_kernel_ns > kernel_ns)
1195 		kernel_ns = max_kernel_ns;
1196 
1197 	/* With all the info we got, fill in the values */
1198 	vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1199 	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1200 	vcpu->last_kernel_ns = kernel_ns;
1201 	vcpu->last_guest_tsc = tsc_timestamp;
1202 	vcpu->hv_clock.flags = 0;
1203 
1204 	/*
1205 	 * The interface expects us to write an even number signaling that the
1206 	 * update is finished. Since the guest won't see the intermediate
1207 	 * state, we just increase by 2 at the end.
1208 	 */
1209 	vcpu->hv_clock.version += 2;
1210 
1211 	shared_kaddr = kmap_atomic(vcpu->time_page);
1212 
1213 	memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
1214 	       sizeof(vcpu->hv_clock));
1215 
1216 	kunmap_atomic(shared_kaddr);
1217 
1218 	mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
1219 	return 0;
1220 }
1221 
1222 static bool msr_mtrr_valid(unsigned msr)
1223 {
1224 	switch (msr) {
1225 	case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
1226 	case MSR_MTRRfix64K_00000:
1227 	case MSR_MTRRfix16K_80000:
1228 	case MSR_MTRRfix16K_A0000:
1229 	case MSR_MTRRfix4K_C0000:
1230 	case MSR_MTRRfix4K_C8000:
1231 	case MSR_MTRRfix4K_D0000:
1232 	case MSR_MTRRfix4K_D8000:
1233 	case MSR_MTRRfix4K_E0000:
1234 	case MSR_MTRRfix4K_E8000:
1235 	case MSR_MTRRfix4K_F0000:
1236 	case MSR_MTRRfix4K_F8000:
1237 	case MSR_MTRRdefType:
1238 	case MSR_IA32_CR_PAT:
1239 		return true;
1240 	case 0x2f8:
1241 		return true;
1242 	}
1243 	return false;
1244 }
1245 
1246 static bool valid_pat_type(unsigned t)
1247 {
1248 	return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
1249 }
1250 
1251 static bool valid_mtrr_type(unsigned t)
1252 {
1253 	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
1254 }
1255 
1256 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1257 {
1258 	int i;
1259 
1260 	if (!msr_mtrr_valid(msr))
1261 		return false;
1262 
1263 	if (msr == MSR_IA32_CR_PAT) {
1264 		for (i = 0; i < 8; i++)
1265 			if (!valid_pat_type((data >> (i * 8)) & 0xff))
1266 				return false;
1267 		return true;
1268 	} else if (msr == MSR_MTRRdefType) {
1269 		if (data & ~0xcff)
1270 			return false;
1271 		return valid_mtrr_type(data & 0xff);
1272 	} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
1273 		for (i = 0; i < 8 ; i++)
1274 			if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
1275 				return false;
1276 		return true;
1277 	}
1278 
1279 	/* variable MTRRs */
1280 	return valid_mtrr_type(data & 0xff);
1281 }
1282 
1283 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1284 {
1285 	u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1286 
1287 	if (!mtrr_valid(vcpu, msr, data))
1288 		return 1;
1289 
1290 	if (msr == MSR_MTRRdefType) {
1291 		vcpu->arch.mtrr_state.def_type = data;
1292 		vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
1293 	} else if (msr == MSR_MTRRfix64K_00000)
1294 		p[0] = data;
1295 	else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1296 		p[1 + msr - MSR_MTRRfix16K_80000] = data;
1297 	else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1298 		p[3 + msr - MSR_MTRRfix4K_C0000] = data;
1299 	else if (msr == MSR_IA32_CR_PAT)
1300 		vcpu->arch.pat = data;
1301 	else {	/* Variable MTRRs */
1302 		int idx, is_mtrr_mask;
1303 		u64 *pt;
1304 
1305 		idx = (msr - 0x200) / 2;
1306 		is_mtrr_mask = msr - 0x200 - 2 * idx;
1307 		if (!is_mtrr_mask)
1308 			pt =
1309 			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1310 		else
1311 			pt =
1312 			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1313 		*pt = data;
1314 	}
1315 
1316 	kvm_mmu_reset_context(vcpu);
1317 	return 0;
1318 }
1319 
1320 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1321 {
1322 	u64 mcg_cap = vcpu->arch.mcg_cap;
1323 	unsigned bank_num = mcg_cap & 0xff;
1324 
1325 	switch (msr) {
1326 	case MSR_IA32_MCG_STATUS:
1327 		vcpu->arch.mcg_status = data;
1328 		break;
1329 	case MSR_IA32_MCG_CTL:
1330 		if (!(mcg_cap & MCG_CTL_P))
1331 			return 1;
1332 		if (data != 0 && data != ~(u64)0)
1333 			return -1;
1334 		vcpu->arch.mcg_ctl = data;
1335 		break;
1336 	default:
1337 		if (msr >= MSR_IA32_MC0_CTL &&
1338 		    msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1339 			u32 offset = msr - MSR_IA32_MC0_CTL;
1340 			/* only 0 or all 1s can be written to IA32_MCi_CTL
1341 			 * some Linux kernels though clear bit 10 in bank 4 to
1342 			 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1343 			 * this to avoid an uncatched #GP in the guest
1344 			 */
1345 			if ((offset & 0x3) == 0 &&
1346 			    data != 0 && (data | (1 << 10)) != ~(u64)0)
1347 				return -1;
1348 			vcpu->arch.mce_banks[offset] = data;
1349 			break;
1350 		}
1351 		return 1;
1352 	}
1353 	return 0;
1354 }
1355 
1356 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1357 {
1358 	struct kvm *kvm = vcpu->kvm;
1359 	int lm = is_long_mode(vcpu);
1360 	u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1361 		: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1362 	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1363 		: kvm->arch.xen_hvm_config.blob_size_32;
1364 	u32 page_num = data & ~PAGE_MASK;
1365 	u64 page_addr = data & PAGE_MASK;
1366 	u8 *page;
1367 	int r;
1368 
1369 	r = -E2BIG;
1370 	if (page_num >= blob_size)
1371 		goto out;
1372 	r = -ENOMEM;
1373 	page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
1374 	if (IS_ERR(page)) {
1375 		r = PTR_ERR(page);
1376 		goto out;
1377 	}
1378 	if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1379 		goto out_free;
1380 	r = 0;
1381 out_free:
1382 	kfree(page);
1383 out:
1384 	return r;
1385 }
1386 
1387 static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1388 {
1389 	return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1390 }
1391 
1392 static bool kvm_hv_msr_partition_wide(u32 msr)
1393 {
1394 	bool r = false;
1395 	switch (msr) {
1396 	case HV_X64_MSR_GUEST_OS_ID:
1397 	case HV_X64_MSR_HYPERCALL:
1398 		r = true;
1399 		break;
1400 	}
1401 
1402 	return r;
1403 }
1404 
1405 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1406 {
1407 	struct kvm *kvm = vcpu->kvm;
1408 
1409 	switch (msr) {
1410 	case HV_X64_MSR_GUEST_OS_ID:
1411 		kvm->arch.hv_guest_os_id = data;
1412 		/* setting guest os id to zero disables hypercall page */
1413 		if (!kvm->arch.hv_guest_os_id)
1414 			kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1415 		break;
1416 	case HV_X64_MSR_HYPERCALL: {
1417 		u64 gfn;
1418 		unsigned long addr;
1419 		u8 instructions[4];
1420 
1421 		/* if guest os id is not set hypercall should remain disabled */
1422 		if (!kvm->arch.hv_guest_os_id)
1423 			break;
1424 		if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1425 			kvm->arch.hv_hypercall = data;
1426 			break;
1427 		}
1428 		gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1429 		addr = gfn_to_hva(kvm, gfn);
1430 		if (kvm_is_error_hva(addr))
1431 			return 1;
1432 		kvm_x86_ops->patch_hypercall(vcpu, instructions);
1433 		((unsigned char *)instructions)[3] = 0xc3; /* ret */
1434 		if (__copy_to_user((void __user *)addr, instructions, 4))
1435 			return 1;
1436 		kvm->arch.hv_hypercall = data;
1437 		break;
1438 	}
1439 	default:
1440 		pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1441 			  "data 0x%llx\n", msr, data);
1442 		return 1;
1443 	}
1444 	return 0;
1445 }
1446 
1447 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1448 {
1449 	switch (msr) {
1450 	case HV_X64_MSR_APIC_ASSIST_PAGE: {
1451 		unsigned long addr;
1452 
1453 		if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1454 			vcpu->arch.hv_vapic = data;
1455 			break;
1456 		}
1457 		addr = gfn_to_hva(vcpu->kvm, data >>
1458 				  HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1459 		if (kvm_is_error_hva(addr))
1460 			return 1;
1461 		if (__clear_user((void __user *)addr, PAGE_SIZE))
1462 			return 1;
1463 		vcpu->arch.hv_vapic = data;
1464 		break;
1465 	}
1466 	case HV_X64_MSR_EOI:
1467 		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1468 	case HV_X64_MSR_ICR:
1469 		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1470 	case HV_X64_MSR_TPR:
1471 		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1472 	default:
1473 		pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1474 			  "data 0x%llx\n", msr, data);
1475 		return 1;
1476 	}
1477 
1478 	return 0;
1479 }
1480 
1481 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1482 {
1483 	gpa_t gpa = data & ~0x3f;
1484 
1485 	/* Bits 2:5 are resrved, Should be zero */
1486 	if (data & 0x3c)
1487 		return 1;
1488 
1489 	vcpu->arch.apf.msr_val = data;
1490 
1491 	if (!(data & KVM_ASYNC_PF_ENABLED)) {
1492 		kvm_clear_async_pf_completion_queue(vcpu);
1493 		kvm_async_pf_hash_reset(vcpu);
1494 		return 0;
1495 	}
1496 
1497 	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
1498 		return 1;
1499 
1500 	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
1501 	kvm_async_pf_wakeup_all(vcpu);
1502 	return 0;
1503 }
1504 
1505 static void kvmclock_reset(struct kvm_vcpu *vcpu)
1506 {
1507 	if (vcpu->arch.time_page) {
1508 		kvm_release_page_dirty(vcpu->arch.time_page);
1509 		vcpu->arch.time_page = NULL;
1510 	}
1511 }
1512 
1513 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
1514 {
1515 	u64 delta;
1516 
1517 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1518 		return;
1519 
1520 	delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
1521 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
1522 	vcpu->arch.st.accum_steal = delta;
1523 }
1524 
1525 static void record_steal_time(struct kvm_vcpu *vcpu)
1526 {
1527 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1528 		return;
1529 
1530 	if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1531 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
1532 		return;
1533 
1534 	vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
1535 	vcpu->arch.st.steal.version += 2;
1536 	vcpu->arch.st.accum_steal = 0;
1537 
1538 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1539 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
1540 }
1541 
1542 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1543 {
1544 	bool pr = false;
1545 
1546 	switch (msr) {
1547 	case MSR_EFER:
1548 		return set_efer(vcpu, data);
1549 	case MSR_K7_HWCR:
1550 		data &= ~(u64)0x40;	/* ignore flush filter disable */
1551 		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
1552 		data &= ~(u64)0x8;	/* ignore TLB cache disable */
1553 		if (data != 0) {
1554 			pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1555 				data);
1556 			return 1;
1557 		}
1558 		break;
1559 	case MSR_FAM10H_MMIO_CONF_BASE:
1560 		if (data != 0) {
1561 			pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1562 				"0x%llx\n", data);
1563 			return 1;
1564 		}
1565 		break;
1566 	case MSR_AMD64_NB_CFG:
1567 		break;
1568 	case MSR_IA32_DEBUGCTLMSR:
1569 		if (!data) {
1570 			/* We support the non-activated case already */
1571 			break;
1572 		} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1573 			/* Values other than LBR and BTF are vendor-specific,
1574 			   thus reserved and should throw a #GP */
1575 			return 1;
1576 		}
1577 		pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1578 			__func__, data);
1579 		break;
1580 	case MSR_IA32_UCODE_REV:
1581 	case MSR_IA32_UCODE_WRITE:
1582 	case MSR_VM_HSAVE_PA:
1583 	case MSR_AMD64_PATCH_LOADER:
1584 		break;
1585 	case 0x200 ... 0x2ff:
1586 		return set_msr_mtrr(vcpu, msr, data);
1587 	case MSR_IA32_APICBASE:
1588 		kvm_set_apic_base(vcpu, data);
1589 		break;
1590 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1591 		return kvm_x2apic_msr_write(vcpu, msr, data);
1592 	case MSR_IA32_TSCDEADLINE:
1593 		kvm_set_lapic_tscdeadline_msr(vcpu, data);
1594 		break;
1595 	case MSR_IA32_MISC_ENABLE:
1596 		vcpu->arch.ia32_misc_enable_msr = data;
1597 		break;
1598 	case MSR_KVM_WALL_CLOCK_NEW:
1599 	case MSR_KVM_WALL_CLOCK:
1600 		vcpu->kvm->arch.wall_clock = data;
1601 		kvm_write_wall_clock(vcpu->kvm, data);
1602 		break;
1603 	case MSR_KVM_SYSTEM_TIME_NEW:
1604 	case MSR_KVM_SYSTEM_TIME: {
1605 		kvmclock_reset(vcpu);
1606 
1607 		vcpu->arch.time = data;
1608 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1609 
1610 		/* we verify if the enable bit is set... */
1611 		if (!(data & 1))
1612 			break;
1613 
1614 		/* ...but clean it before doing the actual write */
1615 		vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1616 
1617 		vcpu->arch.time_page =
1618 				gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
1619 
1620 		if (is_error_page(vcpu->arch.time_page)) {
1621 			kvm_release_page_clean(vcpu->arch.time_page);
1622 			vcpu->arch.time_page = NULL;
1623 		}
1624 		break;
1625 	}
1626 	case MSR_KVM_ASYNC_PF_EN:
1627 		if (kvm_pv_enable_async_pf(vcpu, data))
1628 			return 1;
1629 		break;
1630 	case MSR_KVM_STEAL_TIME:
1631 
1632 		if (unlikely(!sched_info_on()))
1633 			return 1;
1634 
1635 		if (data & KVM_STEAL_RESERVED_MASK)
1636 			return 1;
1637 
1638 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
1639 							data & KVM_STEAL_VALID_BITS))
1640 			return 1;
1641 
1642 		vcpu->arch.st.msr_val = data;
1643 
1644 		if (!(data & KVM_MSR_ENABLED))
1645 			break;
1646 
1647 		vcpu->arch.st.last_steal = current->sched_info.run_delay;
1648 
1649 		preempt_disable();
1650 		accumulate_steal_time(vcpu);
1651 		preempt_enable();
1652 
1653 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1654 
1655 		break;
1656 
1657 	case MSR_IA32_MCG_CTL:
1658 	case MSR_IA32_MCG_STATUS:
1659 	case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1660 		return set_msr_mce(vcpu, msr, data);
1661 
1662 	/* Performance counters are not protected by a CPUID bit,
1663 	 * so we should check all of them in the generic path for the sake of
1664 	 * cross vendor migration.
1665 	 * Writing a zero into the event select MSRs disables them,
1666 	 * which we perfectly emulate ;-). Any other value should be at least
1667 	 * reported, some guests depend on them.
1668 	 */
1669 	case MSR_K7_EVNTSEL0:
1670 	case MSR_K7_EVNTSEL1:
1671 	case MSR_K7_EVNTSEL2:
1672 	case MSR_K7_EVNTSEL3:
1673 		if (data != 0)
1674 			pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1675 				"0x%x data 0x%llx\n", msr, data);
1676 		break;
1677 	/* at least RHEL 4 unconditionally writes to the perfctr registers,
1678 	 * so we ignore writes to make it happy.
1679 	 */
1680 	case MSR_K7_PERFCTR0:
1681 	case MSR_K7_PERFCTR1:
1682 	case MSR_K7_PERFCTR2:
1683 	case MSR_K7_PERFCTR3:
1684 		pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1685 			"0x%x data 0x%llx\n", msr, data);
1686 		break;
1687 	case MSR_P6_PERFCTR0:
1688 	case MSR_P6_PERFCTR1:
1689 		pr = true;
1690 	case MSR_P6_EVNTSEL0:
1691 	case MSR_P6_EVNTSEL1:
1692 		if (kvm_pmu_msr(vcpu, msr))
1693 			return kvm_pmu_set_msr(vcpu, msr, data);
1694 
1695 		if (pr || data != 0)
1696 			pr_unimpl(vcpu, "disabled perfctr wrmsr: "
1697 				"0x%x data 0x%llx\n", msr, data);
1698 		break;
1699 	case MSR_K7_CLK_CTL:
1700 		/*
1701 		 * Ignore all writes to this no longer documented MSR.
1702 		 * Writes are only relevant for old K7 processors,
1703 		 * all pre-dating SVM, but a recommended workaround from
1704 		 * AMD for these chips. It is possible to speicify the
1705 		 * affected processor models on the command line, hence
1706 		 * the need to ignore the workaround.
1707 		 */
1708 		break;
1709 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1710 		if (kvm_hv_msr_partition_wide(msr)) {
1711 			int r;
1712 			mutex_lock(&vcpu->kvm->lock);
1713 			r = set_msr_hyperv_pw(vcpu, msr, data);
1714 			mutex_unlock(&vcpu->kvm->lock);
1715 			return r;
1716 		} else
1717 			return set_msr_hyperv(vcpu, msr, data);
1718 		break;
1719 	case MSR_IA32_BBL_CR_CTL3:
1720 		/* Drop writes to this legacy MSR -- see rdmsr
1721 		 * counterpart for further detail.
1722 		 */
1723 		pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
1724 		break;
1725 	case MSR_AMD64_OSVW_ID_LENGTH:
1726 		if (!guest_cpuid_has_osvw(vcpu))
1727 			return 1;
1728 		vcpu->arch.osvw.length = data;
1729 		break;
1730 	case MSR_AMD64_OSVW_STATUS:
1731 		if (!guest_cpuid_has_osvw(vcpu))
1732 			return 1;
1733 		vcpu->arch.osvw.status = data;
1734 		break;
1735 	default:
1736 		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1737 			return xen_hvm_config(vcpu, data);
1738 		if (kvm_pmu_msr(vcpu, msr))
1739 			return kvm_pmu_set_msr(vcpu, msr, data);
1740 		if (!ignore_msrs) {
1741 			pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1742 				msr, data);
1743 			return 1;
1744 		} else {
1745 			pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1746 				msr, data);
1747 			break;
1748 		}
1749 	}
1750 	return 0;
1751 }
1752 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1753 
1754 
1755 /*
1756  * Reads an msr value (of 'msr_index') into 'pdata'.
1757  * Returns 0 on success, non-0 otherwise.
1758  * Assumes vcpu_load() was already called.
1759  */
1760 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1761 {
1762 	return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1763 }
1764 
1765 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1766 {
1767 	u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1768 
1769 	if (!msr_mtrr_valid(msr))
1770 		return 1;
1771 
1772 	if (msr == MSR_MTRRdefType)
1773 		*pdata = vcpu->arch.mtrr_state.def_type +
1774 			 (vcpu->arch.mtrr_state.enabled << 10);
1775 	else if (msr == MSR_MTRRfix64K_00000)
1776 		*pdata = p[0];
1777 	else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1778 		*pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1779 	else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1780 		*pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1781 	else if (msr == MSR_IA32_CR_PAT)
1782 		*pdata = vcpu->arch.pat;
1783 	else {	/* Variable MTRRs */
1784 		int idx, is_mtrr_mask;
1785 		u64 *pt;
1786 
1787 		idx = (msr - 0x200) / 2;
1788 		is_mtrr_mask = msr - 0x200 - 2 * idx;
1789 		if (!is_mtrr_mask)
1790 			pt =
1791 			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1792 		else
1793 			pt =
1794 			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1795 		*pdata = *pt;
1796 	}
1797 
1798 	return 0;
1799 }
1800 
1801 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1802 {
1803 	u64 data;
1804 	u64 mcg_cap = vcpu->arch.mcg_cap;
1805 	unsigned bank_num = mcg_cap & 0xff;
1806 
1807 	switch (msr) {
1808 	case MSR_IA32_P5_MC_ADDR:
1809 	case MSR_IA32_P5_MC_TYPE:
1810 		data = 0;
1811 		break;
1812 	case MSR_IA32_MCG_CAP:
1813 		data = vcpu->arch.mcg_cap;
1814 		break;
1815 	case MSR_IA32_MCG_CTL:
1816 		if (!(mcg_cap & MCG_CTL_P))
1817 			return 1;
1818 		data = vcpu->arch.mcg_ctl;
1819 		break;
1820 	case MSR_IA32_MCG_STATUS:
1821 		data = vcpu->arch.mcg_status;
1822 		break;
1823 	default:
1824 		if (msr >= MSR_IA32_MC0_CTL &&
1825 		    msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1826 			u32 offset = msr - MSR_IA32_MC0_CTL;
1827 			data = vcpu->arch.mce_banks[offset];
1828 			break;
1829 		}
1830 		return 1;
1831 	}
1832 	*pdata = data;
1833 	return 0;
1834 }
1835 
1836 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1837 {
1838 	u64 data = 0;
1839 	struct kvm *kvm = vcpu->kvm;
1840 
1841 	switch (msr) {
1842 	case HV_X64_MSR_GUEST_OS_ID:
1843 		data = kvm->arch.hv_guest_os_id;
1844 		break;
1845 	case HV_X64_MSR_HYPERCALL:
1846 		data = kvm->arch.hv_hypercall;
1847 		break;
1848 	default:
1849 		pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1850 		return 1;
1851 	}
1852 
1853 	*pdata = data;
1854 	return 0;
1855 }
1856 
1857 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1858 {
1859 	u64 data = 0;
1860 
1861 	switch (msr) {
1862 	case HV_X64_MSR_VP_INDEX: {
1863 		int r;
1864 		struct kvm_vcpu *v;
1865 		kvm_for_each_vcpu(r, v, vcpu->kvm)
1866 			if (v == vcpu)
1867 				data = r;
1868 		break;
1869 	}
1870 	case HV_X64_MSR_EOI:
1871 		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1872 	case HV_X64_MSR_ICR:
1873 		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1874 	case HV_X64_MSR_TPR:
1875 		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1876 	case HV_X64_MSR_APIC_ASSIST_PAGE:
1877 		data = vcpu->arch.hv_vapic;
1878 		break;
1879 	default:
1880 		pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1881 		return 1;
1882 	}
1883 	*pdata = data;
1884 	return 0;
1885 }
1886 
1887 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1888 {
1889 	u64 data;
1890 
1891 	switch (msr) {
1892 	case MSR_IA32_PLATFORM_ID:
1893 	case MSR_IA32_EBL_CR_POWERON:
1894 	case MSR_IA32_DEBUGCTLMSR:
1895 	case MSR_IA32_LASTBRANCHFROMIP:
1896 	case MSR_IA32_LASTBRANCHTOIP:
1897 	case MSR_IA32_LASTINTFROMIP:
1898 	case MSR_IA32_LASTINTTOIP:
1899 	case MSR_K8_SYSCFG:
1900 	case MSR_K7_HWCR:
1901 	case MSR_VM_HSAVE_PA:
1902 	case MSR_K7_EVNTSEL0:
1903 	case MSR_K7_PERFCTR0:
1904 	case MSR_K8_INT_PENDING_MSG:
1905 	case MSR_AMD64_NB_CFG:
1906 	case MSR_FAM10H_MMIO_CONF_BASE:
1907 		data = 0;
1908 		break;
1909 	case MSR_P6_PERFCTR0:
1910 	case MSR_P6_PERFCTR1:
1911 	case MSR_P6_EVNTSEL0:
1912 	case MSR_P6_EVNTSEL1:
1913 		if (kvm_pmu_msr(vcpu, msr))
1914 			return kvm_pmu_get_msr(vcpu, msr, pdata);
1915 		data = 0;
1916 		break;
1917 	case MSR_IA32_UCODE_REV:
1918 		data = 0x100000000ULL;
1919 		break;
1920 	case MSR_MTRRcap:
1921 		data = 0x500 | KVM_NR_VAR_MTRR;
1922 		break;
1923 	case 0x200 ... 0x2ff:
1924 		return get_msr_mtrr(vcpu, msr, pdata);
1925 	case 0xcd: /* fsb frequency */
1926 		data = 3;
1927 		break;
1928 		/*
1929 		 * MSR_EBC_FREQUENCY_ID
1930 		 * Conservative value valid for even the basic CPU models.
1931 		 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
1932 		 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
1933 		 * and 266MHz for model 3, or 4. Set Core Clock
1934 		 * Frequency to System Bus Frequency Ratio to 1 (bits
1935 		 * 31:24) even though these are only valid for CPU
1936 		 * models > 2, however guests may end up dividing or
1937 		 * multiplying by zero otherwise.
1938 		 */
1939 	case MSR_EBC_FREQUENCY_ID:
1940 		data = 1 << 24;
1941 		break;
1942 	case MSR_IA32_APICBASE:
1943 		data = kvm_get_apic_base(vcpu);
1944 		break;
1945 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1946 		return kvm_x2apic_msr_read(vcpu, msr, pdata);
1947 		break;
1948 	case MSR_IA32_TSCDEADLINE:
1949 		data = kvm_get_lapic_tscdeadline_msr(vcpu);
1950 		break;
1951 	case MSR_IA32_MISC_ENABLE:
1952 		data = vcpu->arch.ia32_misc_enable_msr;
1953 		break;
1954 	case MSR_IA32_PERF_STATUS:
1955 		/* TSC increment by tick */
1956 		data = 1000ULL;
1957 		/* CPU multiplier */
1958 		data |= (((uint64_t)4ULL) << 40);
1959 		break;
1960 	case MSR_EFER:
1961 		data = vcpu->arch.efer;
1962 		break;
1963 	case MSR_KVM_WALL_CLOCK:
1964 	case MSR_KVM_WALL_CLOCK_NEW:
1965 		data = vcpu->kvm->arch.wall_clock;
1966 		break;
1967 	case MSR_KVM_SYSTEM_TIME:
1968 	case MSR_KVM_SYSTEM_TIME_NEW:
1969 		data = vcpu->arch.time;
1970 		break;
1971 	case MSR_KVM_ASYNC_PF_EN:
1972 		data = vcpu->arch.apf.msr_val;
1973 		break;
1974 	case MSR_KVM_STEAL_TIME:
1975 		data = vcpu->arch.st.msr_val;
1976 		break;
1977 	case MSR_IA32_P5_MC_ADDR:
1978 	case MSR_IA32_P5_MC_TYPE:
1979 	case MSR_IA32_MCG_CAP:
1980 	case MSR_IA32_MCG_CTL:
1981 	case MSR_IA32_MCG_STATUS:
1982 	case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1983 		return get_msr_mce(vcpu, msr, pdata);
1984 	case MSR_K7_CLK_CTL:
1985 		/*
1986 		 * Provide expected ramp-up count for K7. All other
1987 		 * are set to zero, indicating minimum divisors for
1988 		 * every field.
1989 		 *
1990 		 * This prevents guest kernels on AMD host with CPU
1991 		 * type 6, model 8 and higher from exploding due to
1992 		 * the rdmsr failing.
1993 		 */
1994 		data = 0x20000000;
1995 		break;
1996 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1997 		if (kvm_hv_msr_partition_wide(msr)) {
1998 			int r;
1999 			mutex_lock(&vcpu->kvm->lock);
2000 			r = get_msr_hyperv_pw(vcpu, msr, pdata);
2001 			mutex_unlock(&vcpu->kvm->lock);
2002 			return r;
2003 		} else
2004 			return get_msr_hyperv(vcpu, msr, pdata);
2005 		break;
2006 	case MSR_IA32_BBL_CR_CTL3:
2007 		/* This legacy MSR exists but isn't fully documented in current
2008 		 * silicon.  It is however accessed by winxp in very narrow
2009 		 * scenarios where it sets bit #19, itself documented as
2010 		 * a "reserved" bit.  Best effort attempt to source coherent
2011 		 * read data here should the balance of the register be
2012 		 * interpreted by the guest:
2013 		 *
2014 		 * L2 cache control register 3: 64GB range, 256KB size,
2015 		 * enabled, latency 0x1, configured
2016 		 */
2017 		data = 0xbe702111;
2018 		break;
2019 	case MSR_AMD64_OSVW_ID_LENGTH:
2020 		if (!guest_cpuid_has_osvw(vcpu))
2021 			return 1;
2022 		data = vcpu->arch.osvw.length;
2023 		break;
2024 	case MSR_AMD64_OSVW_STATUS:
2025 		if (!guest_cpuid_has_osvw(vcpu))
2026 			return 1;
2027 		data = vcpu->arch.osvw.status;
2028 		break;
2029 	default:
2030 		if (kvm_pmu_msr(vcpu, msr))
2031 			return kvm_pmu_get_msr(vcpu, msr, pdata);
2032 		if (!ignore_msrs) {
2033 			pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
2034 			return 1;
2035 		} else {
2036 			pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
2037 			data = 0;
2038 		}
2039 		break;
2040 	}
2041 	*pdata = data;
2042 	return 0;
2043 }
2044 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
2045 
2046 /*
2047  * Read or write a bunch of msrs. All parameters are kernel addresses.
2048  *
2049  * @return number of msrs set successfully.
2050  */
2051 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2052 		    struct kvm_msr_entry *entries,
2053 		    int (*do_msr)(struct kvm_vcpu *vcpu,
2054 				  unsigned index, u64 *data))
2055 {
2056 	int i, idx;
2057 
2058 	idx = srcu_read_lock(&vcpu->kvm->srcu);
2059 	for (i = 0; i < msrs->nmsrs; ++i)
2060 		if (do_msr(vcpu, entries[i].index, &entries[i].data))
2061 			break;
2062 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
2063 
2064 	return i;
2065 }
2066 
2067 /*
2068  * Read or write a bunch of msrs. Parameters are user addresses.
2069  *
2070  * @return number of msrs set successfully.
2071  */
2072 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2073 		  int (*do_msr)(struct kvm_vcpu *vcpu,
2074 				unsigned index, u64 *data),
2075 		  int writeback)
2076 {
2077 	struct kvm_msrs msrs;
2078 	struct kvm_msr_entry *entries;
2079 	int r, n;
2080 	unsigned size;
2081 
2082 	r = -EFAULT;
2083 	if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2084 		goto out;
2085 
2086 	r = -E2BIG;
2087 	if (msrs.nmsrs >= MAX_IO_MSRS)
2088 		goto out;
2089 
2090 	size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2091 	entries = memdup_user(user_msrs->entries, size);
2092 	if (IS_ERR(entries)) {
2093 		r = PTR_ERR(entries);
2094 		goto out;
2095 	}
2096 
2097 	r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2098 	if (r < 0)
2099 		goto out_free;
2100 
2101 	r = -EFAULT;
2102 	if (writeback && copy_to_user(user_msrs->entries, entries, size))
2103 		goto out_free;
2104 
2105 	r = n;
2106 
2107 out_free:
2108 	kfree(entries);
2109 out:
2110 	return r;
2111 }
2112 
2113 int kvm_dev_ioctl_check_extension(long ext)
2114 {
2115 	int r;
2116 
2117 	switch (ext) {
2118 	case KVM_CAP_IRQCHIP:
2119 	case KVM_CAP_HLT:
2120 	case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
2121 	case KVM_CAP_SET_TSS_ADDR:
2122 	case KVM_CAP_EXT_CPUID:
2123 	case KVM_CAP_CLOCKSOURCE:
2124 	case KVM_CAP_PIT:
2125 	case KVM_CAP_NOP_IO_DELAY:
2126 	case KVM_CAP_MP_STATE:
2127 	case KVM_CAP_SYNC_MMU:
2128 	case KVM_CAP_USER_NMI:
2129 	case KVM_CAP_REINJECT_CONTROL:
2130 	case KVM_CAP_IRQ_INJECT_STATUS:
2131 	case KVM_CAP_ASSIGN_DEV_IRQ:
2132 	case KVM_CAP_IRQFD:
2133 	case KVM_CAP_IOEVENTFD:
2134 	case KVM_CAP_PIT2:
2135 	case KVM_CAP_PIT_STATE2:
2136 	case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2137 	case KVM_CAP_XEN_HVM:
2138 	case KVM_CAP_ADJUST_CLOCK:
2139 	case KVM_CAP_VCPU_EVENTS:
2140 	case KVM_CAP_HYPERV:
2141 	case KVM_CAP_HYPERV_VAPIC:
2142 	case KVM_CAP_HYPERV_SPIN:
2143 	case KVM_CAP_PCI_SEGMENT:
2144 	case KVM_CAP_DEBUGREGS:
2145 	case KVM_CAP_X86_ROBUST_SINGLESTEP:
2146 	case KVM_CAP_XSAVE:
2147 	case KVM_CAP_ASYNC_PF:
2148 	case KVM_CAP_GET_TSC_KHZ:
2149 	case KVM_CAP_PCI_2_3:
2150 		r = 1;
2151 		break;
2152 	case KVM_CAP_COALESCED_MMIO:
2153 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
2154 		break;
2155 	case KVM_CAP_VAPIC:
2156 		r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2157 		break;
2158 	case KVM_CAP_NR_VCPUS:
2159 		r = KVM_SOFT_MAX_VCPUS;
2160 		break;
2161 	case KVM_CAP_MAX_VCPUS:
2162 		r = KVM_MAX_VCPUS;
2163 		break;
2164 	case KVM_CAP_NR_MEMSLOTS:
2165 		r = KVM_MEMORY_SLOTS;
2166 		break;
2167 	case KVM_CAP_PV_MMU:	/* obsolete */
2168 		r = 0;
2169 		break;
2170 	case KVM_CAP_IOMMU:
2171 		r = iommu_present(&pci_bus_type);
2172 		break;
2173 	case KVM_CAP_MCE:
2174 		r = KVM_MAX_MCE_BANKS;
2175 		break;
2176 	case KVM_CAP_XCRS:
2177 		r = cpu_has_xsave;
2178 		break;
2179 	case KVM_CAP_TSC_CONTROL:
2180 		r = kvm_has_tsc_control;
2181 		break;
2182 	case KVM_CAP_TSC_DEADLINE_TIMER:
2183 		r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
2184 		break;
2185 	default:
2186 		r = 0;
2187 		break;
2188 	}
2189 	return r;
2190 
2191 }
2192 
2193 long kvm_arch_dev_ioctl(struct file *filp,
2194 			unsigned int ioctl, unsigned long arg)
2195 {
2196 	void __user *argp = (void __user *)arg;
2197 	long r;
2198 
2199 	switch (ioctl) {
2200 	case KVM_GET_MSR_INDEX_LIST: {
2201 		struct kvm_msr_list __user *user_msr_list = argp;
2202 		struct kvm_msr_list msr_list;
2203 		unsigned n;
2204 
2205 		r = -EFAULT;
2206 		if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2207 			goto out;
2208 		n = msr_list.nmsrs;
2209 		msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
2210 		if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2211 			goto out;
2212 		r = -E2BIG;
2213 		if (n < msr_list.nmsrs)
2214 			goto out;
2215 		r = -EFAULT;
2216 		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2217 				 num_msrs_to_save * sizeof(u32)))
2218 			goto out;
2219 		if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2220 				 &emulated_msrs,
2221 				 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
2222 			goto out;
2223 		r = 0;
2224 		break;
2225 	}
2226 	case KVM_GET_SUPPORTED_CPUID: {
2227 		struct kvm_cpuid2 __user *cpuid_arg = argp;
2228 		struct kvm_cpuid2 cpuid;
2229 
2230 		r = -EFAULT;
2231 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2232 			goto out;
2233 		r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
2234 						      cpuid_arg->entries);
2235 		if (r)
2236 			goto out;
2237 
2238 		r = -EFAULT;
2239 		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2240 			goto out;
2241 		r = 0;
2242 		break;
2243 	}
2244 	case KVM_X86_GET_MCE_CAP_SUPPORTED: {
2245 		u64 mce_cap;
2246 
2247 		mce_cap = KVM_MCE_CAP_SUPPORTED;
2248 		r = -EFAULT;
2249 		if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
2250 			goto out;
2251 		r = 0;
2252 		break;
2253 	}
2254 	default:
2255 		r = -EINVAL;
2256 	}
2257 out:
2258 	return r;
2259 }
2260 
2261 static void wbinvd_ipi(void *garbage)
2262 {
2263 	wbinvd();
2264 }
2265 
2266 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2267 {
2268 	return vcpu->kvm->arch.iommu_domain &&
2269 		!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
2270 }
2271 
2272 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2273 {
2274 	/* Address WBINVD may be executed by guest */
2275 	if (need_emulate_wbinvd(vcpu)) {
2276 		if (kvm_x86_ops->has_wbinvd_exit())
2277 			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2278 		else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2279 			smp_call_function_single(vcpu->cpu,
2280 					wbinvd_ipi, NULL, 1);
2281 	}
2282 
2283 	kvm_x86_ops->vcpu_load(vcpu, cpu);
2284 
2285 	/* Apply any externally detected TSC adjustments (due to suspend) */
2286 	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
2287 		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
2288 		vcpu->arch.tsc_offset_adjustment = 0;
2289 		set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
2290 	}
2291 
2292 	if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2293 		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
2294 				native_read_tsc() - vcpu->arch.last_host_tsc;
2295 		if (tsc_delta < 0)
2296 			mark_tsc_unstable("KVM discovered backwards TSC");
2297 		if (check_tsc_unstable()) {
2298 			u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
2299 						vcpu->arch.last_guest_tsc);
2300 			kvm_x86_ops->write_tsc_offset(vcpu, offset);
2301 			vcpu->arch.tsc_catchup = 1;
2302 		}
2303 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2304 		if (vcpu->cpu != cpu)
2305 			kvm_migrate_timers(vcpu);
2306 		vcpu->cpu = cpu;
2307 	}
2308 
2309 	accumulate_steal_time(vcpu);
2310 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2311 }
2312 
2313 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2314 {
2315 	kvm_x86_ops->vcpu_put(vcpu);
2316 	kvm_put_guest_fpu(vcpu);
2317 	vcpu->arch.last_host_tsc = native_read_tsc();
2318 }
2319 
2320 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2321 				    struct kvm_lapic_state *s)
2322 {
2323 	memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2324 
2325 	return 0;
2326 }
2327 
2328 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2329 				    struct kvm_lapic_state *s)
2330 {
2331 	memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2332 	kvm_apic_post_state_restore(vcpu);
2333 	update_cr8_intercept(vcpu);
2334 
2335 	return 0;
2336 }
2337 
2338 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2339 				    struct kvm_interrupt *irq)
2340 {
2341 	if (irq->irq < 0 || irq->irq >= 256)
2342 		return -EINVAL;
2343 	if (irqchip_in_kernel(vcpu->kvm))
2344 		return -ENXIO;
2345 
2346 	kvm_queue_interrupt(vcpu, irq->irq, false);
2347 	kvm_make_request(KVM_REQ_EVENT, vcpu);
2348 
2349 	return 0;
2350 }
2351 
2352 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2353 {
2354 	kvm_inject_nmi(vcpu);
2355 
2356 	return 0;
2357 }
2358 
2359 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2360 					   struct kvm_tpr_access_ctl *tac)
2361 {
2362 	if (tac->flags)
2363 		return -EINVAL;
2364 	vcpu->arch.tpr_access_reporting = !!tac->enabled;
2365 	return 0;
2366 }
2367 
2368 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2369 					u64 mcg_cap)
2370 {
2371 	int r;
2372 	unsigned bank_num = mcg_cap & 0xff, bank;
2373 
2374 	r = -EINVAL;
2375 	if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2376 		goto out;
2377 	if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2378 		goto out;
2379 	r = 0;
2380 	vcpu->arch.mcg_cap = mcg_cap;
2381 	/* Init IA32_MCG_CTL to all 1s */
2382 	if (mcg_cap & MCG_CTL_P)
2383 		vcpu->arch.mcg_ctl = ~(u64)0;
2384 	/* Init IA32_MCi_CTL to all 1s */
2385 	for (bank = 0; bank < bank_num; bank++)
2386 		vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2387 out:
2388 	return r;
2389 }
2390 
2391 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2392 				      struct kvm_x86_mce *mce)
2393 {
2394 	u64 mcg_cap = vcpu->arch.mcg_cap;
2395 	unsigned bank_num = mcg_cap & 0xff;
2396 	u64 *banks = vcpu->arch.mce_banks;
2397 
2398 	if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2399 		return -EINVAL;
2400 	/*
2401 	 * if IA32_MCG_CTL is not all 1s, the uncorrected error
2402 	 * reporting is disabled
2403 	 */
2404 	if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2405 	    vcpu->arch.mcg_ctl != ~(u64)0)
2406 		return 0;
2407 	banks += 4 * mce->bank;
2408 	/*
2409 	 * if IA32_MCi_CTL is not all 1s, the uncorrected error
2410 	 * reporting is disabled for the bank
2411 	 */
2412 	if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2413 		return 0;
2414 	if (mce->status & MCI_STATUS_UC) {
2415 		if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
2416 		    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
2417 			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2418 			return 0;
2419 		}
2420 		if (banks[1] & MCI_STATUS_VAL)
2421 			mce->status |= MCI_STATUS_OVER;
2422 		banks[2] = mce->addr;
2423 		banks[3] = mce->misc;
2424 		vcpu->arch.mcg_status = mce->mcg_status;
2425 		banks[1] = mce->status;
2426 		kvm_queue_exception(vcpu, MC_VECTOR);
2427 	} else if (!(banks[1] & MCI_STATUS_VAL)
2428 		   || !(banks[1] & MCI_STATUS_UC)) {
2429 		if (banks[1] & MCI_STATUS_VAL)
2430 			mce->status |= MCI_STATUS_OVER;
2431 		banks[2] = mce->addr;
2432 		banks[3] = mce->misc;
2433 		banks[1] = mce->status;
2434 	} else
2435 		banks[1] |= MCI_STATUS_OVER;
2436 	return 0;
2437 }
2438 
2439 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2440 					       struct kvm_vcpu_events *events)
2441 {
2442 	process_nmi(vcpu);
2443 	events->exception.injected =
2444 		vcpu->arch.exception.pending &&
2445 		!kvm_exception_is_soft(vcpu->arch.exception.nr);
2446 	events->exception.nr = vcpu->arch.exception.nr;
2447 	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2448 	events->exception.pad = 0;
2449 	events->exception.error_code = vcpu->arch.exception.error_code;
2450 
2451 	events->interrupt.injected =
2452 		vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
2453 	events->interrupt.nr = vcpu->arch.interrupt.nr;
2454 	events->interrupt.soft = 0;
2455 	events->interrupt.shadow =
2456 		kvm_x86_ops->get_interrupt_shadow(vcpu,
2457 			KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
2458 
2459 	events->nmi.injected = vcpu->arch.nmi_injected;
2460 	events->nmi.pending = vcpu->arch.nmi_pending != 0;
2461 	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2462 	events->nmi.pad = 0;
2463 
2464 	events->sipi_vector = vcpu->arch.sipi_vector;
2465 
2466 	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2467 			 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2468 			 | KVM_VCPUEVENT_VALID_SHADOW);
2469 	memset(&events->reserved, 0, sizeof(events->reserved));
2470 }
2471 
2472 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2473 					      struct kvm_vcpu_events *events)
2474 {
2475 	if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2476 			      | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2477 			      | KVM_VCPUEVENT_VALID_SHADOW))
2478 		return -EINVAL;
2479 
2480 	process_nmi(vcpu);
2481 	vcpu->arch.exception.pending = events->exception.injected;
2482 	vcpu->arch.exception.nr = events->exception.nr;
2483 	vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2484 	vcpu->arch.exception.error_code = events->exception.error_code;
2485 
2486 	vcpu->arch.interrupt.pending = events->interrupt.injected;
2487 	vcpu->arch.interrupt.nr = events->interrupt.nr;
2488 	vcpu->arch.interrupt.soft = events->interrupt.soft;
2489 	if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2490 		kvm_x86_ops->set_interrupt_shadow(vcpu,
2491 						  events->interrupt.shadow);
2492 
2493 	vcpu->arch.nmi_injected = events->nmi.injected;
2494 	if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2495 		vcpu->arch.nmi_pending = events->nmi.pending;
2496 	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2497 
2498 	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2499 		vcpu->arch.sipi_vector = events->sipi_vector;
2500 
2501 	kvm_make_request(KVM_REQ_EVENT, vcpu);
2502 
2503 	return 0;
2504 }
2505 
2506 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2507 					     struct kvm_debugregs *dbgregs)
2508 {
2509 	memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
2510 	dbgregs->dr6 = vcpu->arch.dr6;
2511 	dbgregs->dr7 = vcpu->arch.dr7;
2512 	dbgregs->flags = 0;
2513 	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2514 }
2515 
2516 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2517 					    struct kvm_debugregs *dbgregs)
2518 {
2519 	if (dbgregs->flags)
2520 		return -EINVAL;
2521 
2522 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
2523 	vcpu->arch.dr6 = dbgregs->dr6;
2524 	vcpu->arch.dr7 = dbgregs->dr7;
2525 
2526 	return 0;
2527 }
2528 
2529 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2530 					 struct kvm_xsave *guest_xsave)
2531 {
2532 	if (cpu_has_xsave)
2533 		memcpy(guest_xsave->region,
2534 			&vcpu->arch.guest_fpu.state->xsave,
2535 			xstate_size);
2536 	else {
2537 		memcpy(guest_xsave->region,
2538 			&vcpu->arch.guest_fpu.state->fxsave,
2539 			sizeof(struct i387_fxsave_struct));
2540 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
2541 			XSTATE_FPSSE;
2542 	}
2543 }
2544 
2545 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2546 					struct kvm_xsave *guest_xsave)
2547 {
2548 	u64 xstate_bv =
2549 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
2550 
2551 	if (cpu_has_xsave)
2552 		memcpy(&vcpu->arch.guest_fpu.state->xsave,
2553 			guest_xsave->region, xstate_size);
2554 	else {
2555 		if (xstate_bv & ~XSTATE_FPSSE)
2556 			return -EINVAL;
2557 		memcpy(&vcpu->arch.guest_fpu.state->fxsave,
2558 			guest_xsave->region, sizeof(struct i387_fxsave_struct));
2559 	}
2560 	return 0;
2561 }
2562 
2563 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
2564 					struct kvm_xcrs *guest_xcrs)
2565 {
2566 	if (!cpu_has_xsave) {
2567 		guest_xcrs->nr_xcrs = 0;
2568 		return;
2569 	}
2570 
2571 	guest_xcrs->nr_xcrs = 1;
2572 	guest_xcrs->flags = 0;
2573 	guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
2574 	guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
2575 }
2576 
2577 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2578 				       struct kvm_xcrs *guest_xcrs)
2579 {
2580 	int i, r = 0;
2581 
2582 	if (!cpu_has_xsave)
2583 		return -EINVAL;
2584 
2585 	if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
2586 		return -EINVAL;
2587 
2588 	for (i = 0; i < guest_xcrs->nr_xcrs; i++)
2589 		/* Only support XCR0 currently */
2590 		if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
2591 			r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
2592 				guest_xcrs->xcrs[0].value);
2593 			break;
2594 		}
2595 	if (r)
2596 		r = -EINVAL;
2597 	return r;
2598 }
2599 
2600 long kvm_arch_vcpu_ioctl(struct file *filp,
2601 			 unsigned int ioctl, unsigned long arg)
2602 {
2603 	struct kvm_vcpu *vcpu = filp->private_data;
2604 	void __user *argp = (void __user *)arg;
2605 	int r;
2606 	union {
2607 		struct kvm_lapic_state *lapic;
2608 		struct kvm_xsave *xsave;
2609 		struct kvm_xcrs *xcrs;
2610 		void *buffer;
2611 	} u;
2612 
2613 	u.buffer = NULL;
2614 	switch (ioctl) {
2615 	case KVM_GET_LAPIC: {
2616 		r = -EINVAL;
2617 		if (!vcpu->arch.apic)
2618 			goto out;
2619 		u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2620 
2621 		r = -ENOMEM;
2622 		if (!u.lapic)
2623 			goto out;
2624 		r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
2625 		if (r)
2626 			goto out;
2627 		r = -EFAULT;
2628 		if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
2629 			goto out;
2630 		r = 0;
2631 		break;
2632 	}
2633 	case KVM_SET_LAPIC: {
2634 		r = -EINVAL;
2635 		if (!vcpu->arch.apic)
2636 			goto out;
2637 		u.lapic = memdup_user(argp, sizeof(*u.lapic));
2638 		if (IS_ERR(u.lapic)) {
2639 			r = PTR_ERR(u.lapic);
2640 			goto out;
2641 		}
2642 
2643 		r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
2644 		if (r)
2645 			goto out;
2646 		r = 0;
2647 		break;
2648 	}
2649 	case KVM_INTERRUPT: {
2650 		struct kvm_interrupt irq;
2651 
2652 		r = -EFAULT;
2653 		if (copy_from_user(&irq, argp, sizeof irq))
2654 			goto out;
2655 		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2656 		if (r)
2657 			goto out;
2658 		r = 0;
2659 		break;
2660 	}
2661 	case KVM_NMI: {
2662 		r = kvm_vcpu_ioctl_nmi(vcpu);
2663 		if (r)
2664 			goto out;
2665 		r = 0;
2666 		break;
2667 	}
2668 	case KVM_SET_CPUID: {
2669 		struct kvm_cpuid __user *cpuid_arg = argp;
2670 		struct kvm_cpuid cpuid;
2671 
2672 		r = -EFAULT;
2673 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2674 			goto out;
2675 		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2676 		if (r)
2677 			goto out;
2678 		break;
2679 	}
2680 	case KVM_SET_CPUID2: {
2681 		struct kvm_cpuid2 __user *cpuid_arg = argp;
2682 		struct kvm_cpuid2 cpuid;
2683 
2684 		r = -EFAULT;
2685 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2686 			goto out;
2687 		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
2688 					      cpuid_arg->entries);
2689 		if (r)
2690 			goto out;
2691 		break;
2692 	}
2693 	case KVM_GET_CPUID2: {
2694 		struct kvm_cpuid2 __user *cpuid_arg = argp;
2695 		struct kvm_cpuid2 cpuid;
2696 
2697 		r = -EFAULT;
2698 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2699 			goto out;
2700 		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
2701 					      cpuid_arg->entries);
2702 		if (r)
2703 			goto out;
2704 		r = -EFAULT;
2705 		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2706 			goto out;
2707 		r = 0;
2708 		break;
2709 	}
2710 	case KVM_GET_MSRS:
2711 		r = msr_io(vcpu, argp, kvm_get_msr, 1);
2712 		break;
2713 	case KVM_SET_MSRS:
2714 		r = msr_io(vcpu, argp, do_set_msr, 0);
2715 		break;
2716 	case KVM_TPR_ACCESS_REPORTING: {
2717 		struct kvm_tpr_access_ctl tac;
2718 
2719 		r = -EFAULT;
2720 		if (copy_from_user(&tac, argp, sizeof tac))
2721 			goto out;
2722 		r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2723 		if (r)
2724 			goto out;
2725 		r = -EFAULT;
2726 		if (copy_to_user(argp, &tac, sizeof tac))
2727 			goto out;
2728 		r = 0;
2729 		break;
2730 	};
2731 	case KVM_SET_VAPIC_ADDR: {
2732 		struct kvm_vapic_addr va;
2733 
2734 		r = -EINVAL;
2735 		if (!irqchip_in_kernel(vcpu->kvm))
2736 			goto out;
2737 		r = -EFAULT;
2738 		if (copy_from_user(&va, argp, sizeof va))
2739 			goto out;
2740 		r = 0;
2741 		kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2742 		break;
2743 	}
2744 	case KVM_X86_SETUP_MCE: {
2745 		u64 mcg_cap;
2746 
2747 		r = -EFAULT;
2748 		if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2749 			goto out;
2750 		r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2751 		break;
2752 	}
2753 	case KVM_X86_SET_MCE: {
2754 		struct kvm_x86_mce mce;
2755 
2756 		r = -EFAULT;
2757 		if (copy_from_user(&mce, argp, sizeof mce))
2758 			goto out;
2759 		r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2760 		break;
2761 	}
2762 	case KVM_GET_VCPU_EVENTS: {
2763 		struct kvm_vcpu_events events;
2764 
2765 		kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2766 
2767 		r = -EFAULT;
2768 		if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2769 			break;
2770 		r = 0;
2771 		break;
2772 	}
2773 	case KVM_SET_VCPU_EVENTS: {
2774 		struct kvm_vcpu_events events;
2775 
2776 		r = -EFAULT;
2777 		if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2778 			break;
2779 
2780 		r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2781 		break;
2782 	}
2783 	case KVM_GET_DEBUGREGS: {
2784 		struct kvm_debugregs dbgregs;
2785 
2786 		kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
2787 
2788 		r = -EFAULT;
2789 		if (copy_to_user(argp, &dbgregs,
2790 				 sizeof(struct kvm_debugregs)))
2791 			break;
2792 		r = 0;
2793 		break;
2794 	}
2795 	case KVM_SET_DEBUGREGS: {
2796 		struct kvm_debugregs dbgregs;
2797 
2798 		r = -EFAULT;
2799 		if (copy_from_user(&dbgregs, argp,
2800 				   sizeof(struct kvm_debugregs)))
2801 			break;
2802 
2803 		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
2804 		break;
2805 	}
2806 	case KVM_GET_XSAVE: {
2807 		u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
2808 		r = -ENOMEM;
2809 		if (!u.xsave)
2810 			break;
2811 
2812 		kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
2813 
2814 		r = -EFAULT;
2815 		if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
2816 			break;
2817 		r = 0;
2818 		break;
2819 	}
2820 	case KVM_SET_XSAVE: {
2821 		u.xsave = memdup_user(argp, sizeof(*u.xsave));
2822 		if (IS_ERR(u.xsave)) {
2823 			r = PTR_ERR(u.xsave);
2824 			goto out;
2825 		}
2826 
2827 		r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
2828 		break;
2829 	}
2830 	case KVM_GET_XCRS: {
2831 		u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
2832 		r = -ENOMEM;
2833 		if (!u.xcrs)
2834 			break;
2835 
2836 		kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
2837 
2838 		r = -EFAULT;
2839 		if (copy_to_user(argp, u.xcrs,
2840 				 sizeof(struct kvm_xcrs)))
2841 			break;
2842 		r = 0;
2843 		break;
2844 	}
2845 	case KVM_SET_XCRS: {
2846 		u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
2847 		if (IS_ERR(u.xcrs)) {
2848 			r = PTR_ERR(u.xcrs);
2849 			goto out;
2850 		}
2851 
2852 		r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
2853 		break;
2854 	}
2855 	case KVM_SET_TSC_KHZ: {
2856 		u32 user_tsc_khz;
2857 
2858 		r = -EINVAL;
2859 		user_tsc_khz = (u32)arg;
2860 
2861 		if (user_tsc_khz >= kvm_max_guest_tsc_khz)
2862 			goto out;
2863 
2864 		if (user_tsc_khz == 0)
2865 			user_tsc_khz = tsc_khz;
2866 
2867 		kvm_set_tsc_khz(vcpu, user_tsc_khz);
2868 
2869 		r = 0;
2870 		goto out;
2871 	}
2872 	case KVM_GET_TSC_KHZ: {
2873 		r = vcpu->arch.virtual_tsc_khz;
2874 		goto out;
2875 	}
2876 	default:
2877 		r = -EINVAL;
2878 	}
2879 out:
2880 	kfree(u.buffer);
2881 	return r;
2882 }
2883 
2884 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2885 {
2886 	return VM_FAULT_SIGBUS;
2887 }
2888 
2889 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2890 {
2891 	int ret;
2892 
2893 	if (addr > (unsigned int)(-3 * PAGE_SIZE))
2894 		return -1;
2895 	ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2896 	return ret;
2897 }
2898 
2899 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2900 					      u64 ident_addr)
2901 {
2902 	kvm->arch.ept_identity_map_addr = ident_addr;
2903 	return 0;
2904 }
2905 
2906 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2907 					  u32 kvm_nr_mmu_pages)
2908 {
2909 	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2910 		return -EINVAL;
2911 
2912 	mutex_lock(&kvm->slots_lock);
2913 	spin_lock(&kvm->mmu_lock);
2914 
2915 	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
2916 	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
2917 
2918 	spin_unlock(&kvm->mmu_lock);
2919 	mutex_unlock(&kvm->slots_lock);
2920 	return 0;
2921 }
2922 
2923 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2924 {
2925 	return kvm->arch.n_max_mmu_pages;
2926 }
2927 
2928 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2929 {
2930 	int r;
2931 
2932 	r = 0;
2933 	switch (chip->chip_id) {
2934 	case KVM_IRQCHIP_PIC_MASTER:
2935 		memcpy(&chip->chip.pic,
2936 			&pic_irqchip(kvm)->pics[0],
2937 			sizeof(struct kvm_pic_state));
2938 		break;
2939 	case KVM_IRQCHIP_PIC_SLAVE:
2940 		memcpy(&chip->chip.pic,
2941 			&pic_irqchip(kvm)->pics[1],
2942 			sizeof(struct kvm_pic_state));
2943 		break;
2944 	case KVM_IRQCHIP_IOAPIC:
2945 		r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
2946 		break;
2947 	default:
2948 		r = -EINVAL;
2949 		break;
2950 	}
2951 	return r;
2952 }
2953 
2954 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2955 {
2956 	int r;
2957 
2958 	r = 0;
2959 	switch (chip->chip_id) {
2960 	case KVM_IRQCHIP_PIC_MASTER:
2961 		spin_lock(&pic_irqchip(kvm)->lock);
2962 		memcpy(&pic_irqchip(kvm)->pics[0],
2963 			&chip->chip.pic,
2964 			sizeof(struct kvm_pic_state));
2965 		spin_unlock(&pic_irqchip(kvm)->lock);
2966 		break;
2967 	case KVM_IRQCHIP_PIC_SLAVE:
2968 		spin_lock(&pic_irqchip(kvm)->lock);
2969 		memcpy(&pic_irqchip(kvm)->pics[1],
2970 			&chip->chip.pic,
2971 			sizeof(struct kvm_pic_state));
2972 		spin_unlock(&pic_irqchip(kvm)->lock);
2973 		break;
2974 	case KVM_IRQCHIP_IOAPIC:
2975 		r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
2976 		break;
2977 	default:
2978 		r = -EINVAL;
2979 		break;
2980 	}
2981 	kvm_pic_update_irq(pic_irqchip(kvm));
2982 	return r;
2983 }
2984 
2985 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2986 {
2987 	int r = 0;
2988 
2989 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
2990 	memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
2991 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2992 	return r;
2993 }
2994 
2995 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2996 {
2997 	int r = 0;
2998 
2999 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
3000 	memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3001 	kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
3002 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3003 	return r;
3004 }
3005 
3006 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3007 {
3008 	int r = 0;
3009 
3010 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
3011 	memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
3012 		sizeof(ps->channels));
3013 	ps->flags = kvm->arch.vpit->pit_state.flags;
3014 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3015 	memset(&ps->reserved, 0, sizeof(ps->reserved));
3016 	return r;
3017 }
3018 
3019 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3020 {
3021 	int r = 0, start = 0;
3022 	u32 prev_legacy, cur_legacy;
3023 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
3024 	prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
3025 	cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
3026 	if (!prev_legacy && cur_legacy)
3027 		start = 1;
3028 	memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3029 	       sizeof(kvm->arch.vpit->pit_state.channels));
3030 	kvm->arch.vpit->pit_state.flags = ps->flags;
3031 	kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
3032 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3033 	return r;
3034 }
3035 
3036 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
3037 				 struct kvm_reinject_control *control)
3038 {
3039 	if (!kvm->arch.vpit)
3040 		return -ENXIO;
3041 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
3042 	kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
3043 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3044 	return 0;
3045 }
3046 
3047 /**
3048  * write_protect_slot - write protect a slot for dirty logging
3049  * @kvm: the kvm instance
3050  * @memslot: the slot we protect
3051  * @dirty_bitmap: the bitmap indicating which pages are dirty
3052  * @nr_dirty_pages: the number of dirty pages
3053  *
3054  * We have two ways to find all sptes to protect:
3055  * 1. Use kvm_mmu_slot_remove_write_access() which walks all shadow pages and
3056  *    checks ones that have a spte mapping a page in the slot.
3057  * 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap.
3058  *
3059  * Generally speaking, if there are not so many dirty pages compared to the
3060  * number of shadow pages, we should use the latter.
3061  *
3062  * Note that letting others write into a page marked dirty in the old bitmap
3063  * by using the remaining tlb entry is not a problem.  That page will become
3064  * write protected again when we flush the tlb and then be reported dirty to
3065  * the user space by copying the old bitmap.
3066  */
3067 static void write_protect_slot(struct kvm *kvm,
3068 			       struct kvm_memory_slot *memslot,
3069 			       unsigned long *dirty_bitmap,
3070 			       unsigned long nr_dirty_pages)
3071 {
3072 	spin_lock(&kvm->mmu_lock);
3073 
3074 	/* Not many dirty pages compared to # of shadow pages. */
3075 	if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
3076 		unsigned long gfn_offset;
3077 
3078 		for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
3079 			unsigned long gfn = memslot->base_gfn + gfn_offset;
3080 
3081 			kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
3082 		}
3083 		kvm_flush_remote_tlbs(kvm);
3084 	} else
3085 		kvm_mmu_slot_remove_write_access(kvm, memslot->id);
3086 
3087 	spin_unlock(&kvm->mmu_lock);
3088 }
3089 
3090 /*
3091  * Get (and clear) the dirty memory log for a memory slot.
3092  */
3093 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3094 				      struct kvm_dirty_log *log)
3095 {
3096 	int r;
3097 	struct kvm_memory_slot *memslot;
3098 	unsigned long n, nr_dirty_pages;
3099 
3100 	mutex_lock(&kvm->slots_lock);
3101 
3102 	r = -EINVAL;
3103 	if (log->slot >= KVM_MEMORY_SLOTS)
3104 		goto out;
3105 
3106 	memslot = id_to_memslot(kvm->memslots, log->slot);
3107 	r = -ENOENT;
3108 	if (!memslot->dirty_bitmap)
3109 		goto out;
3110 
3111 	n = kvm_dirty_bitmap_bytes(memslot);
3112 	nr_dirty_pages = memslot->nr_dirty_pages;
3113 
3114 	/* If nothing is dirty, don't bother messing with page tables. */
3115 	if (nr_dirty_pages) {
3116 		struct kvm_memslots *slots, *old_slots;
3117 		unsigned long *dirty_bitmap, *dirty_bitmap_head;
3118 
3119 		dirty_bitmap = memslot->dirty_bitmap;
3120 		dirty_bitmap_head = memslot->dirty_bitmap_head;
3121 		if (dirty_bitmap == dirty_bitmap_head)
3122 			dirty_bitmap_head += n / sizeof(long);
3123 		memset(dirty_bitmap_head, 0, n);
3124 
3125 		r = -ENOMEM;
3126 		slots = kmemdup(kvm->memslots, sizeof(*kvm->memslots), GFP_KERNEL);
3127 		if (!slots)
3128 			goto out;
3129 
3130 		memslot = id_to_memslot(slots, log->slot);
3131 		memslot->nr_dirty_pages = 0;
3132 		memslot->dirty_bitmap = dirty_bitmap_head;
3133 		update_memslots(slots, NULL);
3134 
3135 		old_slots = kvm->memslots;
3136 		rcu_assign_pointer(kvm->memslots, slots);
3137 		synchronize_srcu_expedited(&kvm->srcu);
3138 		kfree(old_slots);
3139 
3140 		write_protect_slot(kvm, memslot, dirty_bitmap, nr_dirty_pages);
3141 
3142 		r = -EFAULT;
3143 		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
3144 			goto out;
3145 	} else {
3146 		r = -EFAULT;
3147 		if (clear_user(log->dirty_bitmap, n))
3148 			goto out;
3149 	}
3150 
3151 	r = 0;
3152 out:
3153 	mutex_unlock(&kvm->slots_lock);
3154 	return r;
3155 }
3156 
3157 long kvm_arch_vm_ioctl(struct file *filp,
3158 		       unsigned int ioctl, unsigned long arg)
3159 {
3160 	struct kvm *kvm = filp->private_data;
3161 	void __user *argp = (void __user *)arg;
3162 	int r = -ENOTTY;
3163 	/*
3164 	 * This union makes it completely explicit to gcc-3.x
3165 	 * that these two variables' stack usage should be
3166 	 * combined, not added together.
3167 	 */
3168 	union {
3169 		struct kvm_pit_state ps;
3170 		struct kvm_pit_state2 ps2;
3171 		struct kvm_pit_config pit_config;
3172 	} u;
3173 
3174 	switch (ioctl) {
3175 	case KVM_SET_TSS_ADDR:
3176 		r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
3177 		if (r < 0)
3178 			goto out;
3179 		break;
3180 	case KVM_SET_IDENTITY_MAP_ADDR: {
3181 		u64 ident_addr;
3182 
3183 		r = -EFAULT;
3184 		if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3185 			goto out;
3186 		r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3187 		if (r < 0)
3188 			goto out;
3189 		break;
3190 	}
3191 	case KVM_SET_NR_MMU_PAGES:
3192 		r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3193 		if (r)
3194 			goto out;
3195 		break;
3196 	case KVM_GET_NR_MMU_PAGES:
3197 		r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3198 		break;
3199 	case KVM_CREATE_IRQCHIP: {
3200 		struct kvm_pic *vpic;
3201 
3202 		mutex_lock(&kvm->lock);
3203 		r = -EEXIST;
3204 		if (kvm->arch.vpic)
3205 			goto create_irqchip_unlock;
3206 		r = -EINVAL;
3207 		if (atomic_read(&kvm->online_vcpus))
3208 			goto create_irqchip_unlock;
3209 		r = -ENOMEM;
3210 		vpic = kvm_create_pic(kvm);
3211 		if (vpic) {
3212 			r = kvm_ioapic_init(kvm);
3213 			if (r) {
3214 				mutex_lock(&kvm->slots_lock);
3215 				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3216 							  &vpic->dev_master);
3217 				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3218 							  &vpic->dev_slave);
3219 				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3220 							  &vpic->dev_eclr);
3221 				mutex_unlock(&kvm->slots_lock);
3222 				kfree(vpic);
3223 				goto create_irqchip_unlock;
3224 			}
3225 		} else
3226 			goto create_irqchip_unlock;
3227 		smp_wmb();
3228 		kvm->arch.vpic = vpic;
3229 		smp_wmb();
3230 		r = kvm_setup_default_irq_routing(kvm);
3231 		if (r) {
3232 			mutex_lock(&kvm->slots_lock);
3233 			mutex_lock(&kvm->irq_lock);
3234 			kvm_ioapic_destroy(kvm);
3235 			kvm_destroy_pic(kvm);
3236 			mutex_unlock(&kvm->irq_lock);
3237 			mutex_unlock(&kvm->slots_lock);
3238 		}
3239 	create_irqchip_unlock:
3240 		mutex_unlock(&kvm->lock);
3241 		break;
3242 	}
3243 	case KVM_CREATE_PIT:
3244 		u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3245 		goto create_pit;
3246 	case KVM_CREATE_PIT2:
3247 		r = -EFAULT;
3248 		if (copy_from_user(&u.pit_config, argp,
3249 				   sizeof(struct kvm_pit_config)))
3250 			goto out;
3251 	create_pit:
3252 		mutex_lock(&kvm->slots_lock);
3253 		r = -EEXIST;
3254 		if (kvm->arch.vpit)
3255 			goto create_pit_unlock;
3256 		r = -ENOMEM;
3257 		kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
3258 		if (kvm->arch.vpit)
3259 			r = 0;
3260 	create_pit_unlock:
3261 		mutex_unlock(&kvm->slots_lock);
3262 		break;
3263 	case KVM_IRQ_LINE_STATUS:
3264 	case KVM_IRQ_LINE: {
3265 		struct kvm_irq_level irq_event;
3266 
3267 		r = -EFAULT;
3268 		if (copy_from_user(&irq_event, argp, sizeof irq_event))
3269 			goto out;
3270 		r = -ENXIO;
3271 		if (irqchip_in_kernel(kvm)) {
3272 			__s32 status;
3273 			status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3274 					irq_event.irq, irq_event.level);
3275 			if (ioctl == KVM_IRQ_LINE_STATUS) {
3276 				r = -EFAULT;
3277 				irq_event.status = status;
3278 				if (copy_to_user(argp, &irq_event,
3279 							sizeof irq_event))
3280 					goto out;
3281 			}
3282 			r = 0;
3283 		}
3284 		break;
3285 	}
3286 	case KVM_GET_IRQCHIP: {
3287 		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3288 		struct kvm_irqchip *chip;
3289 
3290 		chip = memdup_user(argp, sizeof(*chip));
3291 		if (IS_ERR(chip)) {
3292 			r = PTR_ERR(chip);
3293 			goto out;
3294 		}
3295 
3296 		r = -ENXIO;
3297 		if (!irqchip_in_kernel(kvm))
3298 			goto get_irqchip_out;
3299 		r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3300 		if (r)
3301 			goto get_irqchip_out;
3302 		r = -EFAULT;
3303 		if (copy_to_user(argp, chip, sizeof *chip))
3304 			goto get_irqchip_out;
3305 		r = 0;
3306 	get_irqchip_out:
3307 		kfree(chip);
3308 		if (r)
3309 			goto out;
3310 		break;
3311 	}
3312 	case KVM_SET_IRQCHIP: {
3313 		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3314 		struct kvm_irqchip *chip;
3315 
3316 		chip = memdup_user(argp, sizeof(*chip));
3317 		if (IS_ERR(chip)) {
3318 			r = PTR_ERR(chip);
3319 			goto out;
3320 		}
3321 
3322 		r = -ENXIO;
3323 		if (!irqchip_in_kernel(kvm))
3324 			goto set_irqchip_out;
3325 		r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3326 		if (r)
3327 			goto set_irqchip_out;
3328 		r = 0;
3329 	set_irqchip_out:
3330 		kfree(chip);
3331 		if (r)
3332 			goto out;
3333 		break;
3334 	}
3335 	case KVM_GET_PIT: {
3336 		r = -EFAULT;
3337 		if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
3338 			goto out;
3339 		r = -ENXIO;
3340 		if (!kvm->arch.vpit)
3341 			goto out;
3342 		r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
3343 		if (r)
3344 			goto out;
3345 		r = -EFAULT;
3346 		if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
3347 			goto out;
3348 		r = 0;
3349 		break;
3350 	}
3351 	case KVM_SET_PIT: {
3352 		r = -EFAULT;
3353 		if (copy_from_user(&u.ps, argp, sizeof u.ps))
3354 			goto out;
3355 		r = -ENXIO;
3356 		if (!kvm->arch.vpit)
3357 			goto out;
3358 		r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
3359 		if (r)
3360 			goto out;
3361 		r = 0;
3362 		break;
3363 	}
3364 	case KVM_GET_PIT2: {
3365 		r = -ENXIO;
3366 		if (!kvm->arch.vpit)
3367 			goto out;
3368 		r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
3369 		if (r)
3370 			goto out;
3371 		r = -EFAULT;
3372 		if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
3373 			goto out;
3374 		r = 0;
3375 		break;
3376 	}
3377 	case KVM_SET_PIT2: {
3378 		r = -EFAULT;
3379 		if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
3380 			goto out;
3381 		r = -ENXIO;
3382 		if (!kvm->arch.vpit)
3383 			goto out;
3384 		r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
3385 		if (r)
3386 			goto out;
3387 		r = 0;
3388 		break;
3389 	}
3390 	case KVM_REINJECT_CONTROL: {
3391 		struct kvm_reinject_control control;
3392 		r =  -EFAULT;
3393 		if (copy_from_user(&control, argp, sizeof(control)))
3394 			goto out;
3395 		r = kvm_vm_ioctl_reinject(kvm, &control);
3396 		if (r)
3397 			goto out;
3398 		r = 0;
3399 		break;
3400 	}
3401 	case KVM_XEN_HVM_CONFIG: {
3402 		r = -EFAULT;
3403 		if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
3404 				   sizeof(struct kvm_xen_hvm_config)))
3405 			goto out;
3406 		r = -EINVAL;
3407 		if (kvm->arch.xen_hvm_config.flags)
3408 			goto out;
3409 		r = 0;
3410 		break;
3411 	}
3412 	case KVM_SET_CLOCK: {
3413 		struct kvm_clock_data user_ns;
3414 		u64 now_ns;
3415 		s64 delta;
3416 
3417 		r = -EFAULT;
3418 		if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
3419 			goto out;
3420 
3421 		r = -EINVAL;
3422 		if (user_ns.flags)
3423 			goto out;
3424 
3425 		r = 0;
3426 		local_irq_disable();
3427 		now_ns = get_kernel_ns();
3428 		delta = user_ns.clock - now_ns;
3429 		local_irq_enable();
3430 		kvm->arch.kvmclock_offset = delta;
3431 		break;
3432 	}
3433 	case KVM_GET_CLOCK: {
3434 		struct kvm_clock_data user_ns;
3435 		u64 now_ns;
3436 
3437 		local_irq_disable();
3438 		now_ns = get_kernel_ns();
3439 		user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3440 		local_irq_enable();
3441 		user_ns.flags = 0;
3442 		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3443 
3444 		r = -EFAULT;
3445 		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
3446 			goto out;
3447 		r = 0;
3448 		break;
3449 	}
3450 
3451 	default:
3452 		;
3453 	}
3454 out:
3455 	return r;
3456 }
3457 
3458 static void kvm_init_msr_list(void)
3459 {
3460 	u32 dummy[2];
3461 	unsigned i, j;
3462 
3463 	/* skip the first msrs in the list. KVM-specific */
3464 	for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
3465 		if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3466 			continue;
3467 		if (j < i)
3468 			msrs_to_save[j] = msrs_to_save[i];
3469 		j++;
3470 	}
3471 	num_msrs_to_save = j;
3472 }
3473 
3474 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3475 			   const void *v)
3476 {
3477 	int handled = 0;
3478 	int n;
3479 
3480 	do {
3481 		n = min(len, 8);
3482 		if (!(vcpu->arch.apic &&
3483 		      !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
3484 		    && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
3485 			break;
3486 		handled += n;
3487 		addr += n;
3488 		len -= n;
3489 		v += n;
3490 	} while (len);
3491 
3492 	return handled;
3493 }
3494 
3495 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
3496 {
3497 	int handled = 0;
3498 	int n;
3499 
3500 	do {
3501 		n = min(len, 8);
3502 		if (!(vcpu->arch.apic &&
3503 		      !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
3504 		    && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
3505 			break;
3506 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
3507 		handled += n;
3508 		addr += n;
3509 		len -= n;
3510 		v += n;
3511 	} while (len);
3512 
3513 	return handled;
3514 }
3515 
3516 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3517 			struct kvm_segment *var, int seg)
3518 {
3519 	kvm_x86_ops->set_segment(vcpu, var, seg);
3520 }
3521 
3522 void kvm_get_segment(struct kvm_vcpu *vcpu,
3523 		     struct kvm_segment *var, int seg)
3524 {
3525 	kvm_x86_ops->get_segment(vcpu, var, seg);
3526 }
3527 
3528 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3529 {
3530 	gpa_t t_gpa;
3531 	struct x86_exception exception;
3532 
3533 	BUG_ON(!mmu_is_nested(vcpu));
3534 
3535 	/* NPT walks are always user-walks */
3536 	access |= PFERR_USER_MASK;
3537 	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
3538 
3539 	return t_gpa;
3540 }
3541 
3542 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
3543 			      struct x86_exception *exception)
3544 {
3545 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3546 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3547 }
3548 
3549  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
3550 				struct x86_exception *exception)
3551 {
3552 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3553 	access |= PFERR_FETCH_MASK;
3554 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3555 }
3556 
3557 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
3558 			       struct x86_exception *exception)
3559 {
3560 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3561 	access |= PFERR_WRITE_MASK;
3562 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3563 }
3564 
3565 /* uses this to access any guest's mapped memory without checking CPL */
3566 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
3567 				struct x86_exception *exception)
3568 {
3569 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
3570 }
3571 
3572 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3573 				      struct kvm_vcpu *vcpu, u32 access,
3574 				      struct x86_exception *exception)
3575 {
3576 	void *data = val;
3577 	int r = X86EMUL_CONTINUE;
3578 
3579 	while (bytes) {
3580 		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
3581 							    exception);
3582 		unsigned offset = addr & (PAGE_SIZE-1);
3583 		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3584 		int ret;
3585 
3586 		if (gpa == UNMAPPED_GVA)
3587 			return X86EMUL_PROPAGATE_FAULT;
3588 		ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
3589 		if (ret < 0) {
3590 			r = X86EMUL_IO_NEEDED;
3591 			goto out;
3592 		}
3593 
3594 		bytes -= toread;
3595 		data += toread;
3596 		addr += toread;
3597 	}
3598 out:
3599 	return r;
3600 }
3601 
3602 /* used for instruction fetching */
3603 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
3604 				gva_t addr, void *val, unsigned int bytes,
3605 				struct x86_exception *exception)
3606 {
3607 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3608 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3609 
3610 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3611 					  access | PFERR_FETCH_MASK,
3612 					  exception);
3613 }
3614 
3615 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
3616 			       gva_t addr, void *val, unsigned int bytes,
3617 			       struct x86_exception *exception)
3618 {
3619 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3620 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3621 
3622 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3623 					  exception);
3624 }
3625 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
3626 
3627 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
3628 				      gva_t addr, void *val, unsigned int bytes,
3629 				      struct x86_exception *exception)
3630 {
3631 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3632 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
3633 }
3634 
3635 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
3636 				       gva_t addr, void *val,
3637 				       unsigned int bytes,
3638 				       struct x86_exception *exception)
3639 {
3640 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3641 	void *data = val;
3642 	int r = X86EMUL_CONTINUE;
3643 
3644 	while (bytes) {
3645 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
3646 							     PFERR_WRITE_MASK,
3647 							     exception);
3648 		unsigned offset = addr & (PAGE_SIZE-1);
3649 		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3650 		int ret;
3651 
3652 		if (gpa == UNMAPPED_GVA)
3653 			return X86EMUL_PROPAGATE_FAULT;
3654 		ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3655 		if (ret < 0) {
3656 			r = X86EMUL_IO_NEEDED;
3657 			goto out;
3658 		}
3659 
3660 		bytes -= towrite;
3661 		data += towrite;
3662 		addr += towrite;
3663 	}
3664 out:
3665 	return r;
3666 }
3667 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
3668 
3669 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
3670 				gpa_t *gpa, struct x86_exception *exception,
3671 				bool write)
3672 {
3673 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3674 
3675 	if (vcpu_match_mmio_gva(vcpu, gva) &&
3676 		  check_write_user_access(vcpu, write, access,
3677 		  vcpu->arch.access)) {
3678 		*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
3679 					(gva & (PAGE_SIZE - 1));
3680 		trace_vcpu_match_mmio(gva, *gpa, write, false);
3681 		return 1;
3682 	}
3683 
3684 	if (write)
3685 		access |= PFERR_WRITE_MASK;
3686 
3687 	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3688 
3689 	if (*gpa == UNMAPPED_GVA)
3690 		return -1;
3691 
3692 	/* For APIC access vmexit */
3693 	if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3694 		return 1;
3695 
3696 	if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
3697 		trace_vcpu_match_mmio(gva, *gpa, write, true);
3698 		return 1;
3699 	}
3700 
3701 	return 0;
3702 }
3703 
3704 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
3705 			const void *val, int bytes)
3706 {
3707 	int ret;
3708 
3709 	ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
3710 	if (ret < 0)
3711 		return 0;
3712 	kvm_mmu_pte_write(vcpu, gpa, val, bytes);
3713 	return 1;
3714 }
3715 
3716 struct read_write_emulator_ops {
3717 	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
3718 				  int bytes);
3719 	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
3720 				  void *val, int bytes);
3721 	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
3722 			       int bytes, void *val);
3723 	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
3724 				    void *val, int bytes);
3725 	bool write;
3726 };
3727 
3728 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
3729 {
3730 	if (vcpu->mmio_read_completed) {
3731 		memcpy(val, vcpu->mmio_data, bytes);
3732 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3733 			       vcpu->mmio_phys_addr, *(u64 *)val);
3734 		vcpu->mmio_read_completed = 0;
3735 		return 1;
3736 	}
3737 
3738 	return 0;
3739 }
3740 
3741 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
3742 			void *val, int bytes)
3743 {
3744 	return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
3745 }
3746 
3747 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
3748 			 void *val, int bytes)
3749 {
3750 	return emulator_write_phys(vcpu, gpa, val, bytes);
3751 }
3752 
3753 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
3754 {
3755 	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
3756 	return vcpu_mmio_write(vcpu, gpa, bytes, val);
3757 }
3758 
3759 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
3760 			  void *val, int bytes)
3761 {
3762 	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
3763 	return X86EMUL_IO_NEEDED;
3764 }
3765 
3766 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
3767 			   void *val, int bytes)
3768 {
3769 	memcpy(vcpu->mmio_data, val, bytes);
3770 	memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8);
3771 	return X86EMUL_CONTINUE;
3772 }
3773 
3774 static struct read_write_emulator_ops read_emultor = {
3775 	.read_write_prepare = read_prepare,
3776 	.read_write_emulate = read_emulate,
3777 	.read_write_mmio = vcpu_mmio_read,
3778 	.read_write_exit_mmio = read_exit_mmio,
3779 };
3780 
3781 static struct read_write_emulator_ops write_emultor = {
3782 	.read_write_emulate = write_emulate,
3783 	.read_write_mmio = write_mmio,
3784 	.read_write_exit_mmio = write_exit_mmio,
3785 	.write = true,
3786 };
3787 
3788 static int emulator_read_write_onepage(unsigned long addr, void *val,
3789 				       unsigned int bytes,
3790 				       struct x86_exception *exception,
3791 				       struct kvm_vcpu *vcpu,
3792 				       struct read_write_emulator_ops *ops)
3793 {
3794 	gpa_t gpa;
3795 	int handled, ret;
3796 	bool write = ops->write;
3797 
3798 	if (ops->read_write_prepare &&
3799 		  ops->read_write_prepare(vcpu, val, bytes))
3800 		return X86EMUL_CONTINUE;
3801 
3802 	ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
3803 
3804 	if (ret < 0)
3805 		return X86EMUL_PROPAGATE_FAULT;
3806 
3807 	/* For APIC access vmexit */
3808 	if (ret)
3809 		goto mmio;
3810 
3811 	if (ops->read_write_emulate(vcpu, gpa, val, bytes))
3812 		return X86EMUL_CONTINUE;
3813 
3814 mmio:
3815 	/*
3816 	 * Is this MMIO handled locally?
3817 	 */
3818 	handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
3819 	if (handled == bytes)
3820 		return X86EMUL_CONTINUE;
3821 
3822 	gpa += handled;
3823 	bytes -= handled;
3824 	val += handled;
3825 
3826 	vcpu->mmio_needed = 1;
3827 	vcpu->run->exit_reason = KVM_EXIT_MMIO;
3828 	vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3829 	vcpu->mmio_size = bytes;
3830 	vcpu->run->mmio.len = min(vcpu->mmio_size, 8);
3831 	vcpu->run->mmio.is_write = vcpu->mmio_is_write = write;
3832 	vcpu->mmio_index = 0;
3833 
3834 	return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
3835 }
3836 
3837 int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
3838 			void *val, unsigned int bytes,
3839 			struct x86_exception *exception,
3840 			struct read_write_emulator_ops *ops)
3841 {
3842 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3843 
3844 	/* Crossing a page boundary? */
3845 	if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3846 		int rc, now;
3847 
3848 		now = -addr & ~PAGE_MASK;
3849 		rc = emulator_read_write_onepage(addr, val, now, exception,
3850 						 vcpu, ops);
3851 
3852 		if (rc != X86EMUL_CONTINUE)
3853 			return rc;
3854 		addr += now;
3855 		val += now;
3856 		bytes -= now;
3857 	}
3858 
3859 	return emulator_read_write_onepage(addr, val, bytes, exception,
3860 					   vcpu, ops);
3861 }
3862 
3863 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
3864 				  unsigned long addr,
3865 				  void *val,
3866 				  unsigned int bytes,
3867 				  struct x86_exception *exception)
3868 {
3869 	return emulator_read_write(ctxt, addr, val, bytes,
3870 				   exception, &read_emultor);
3871 }
3872 
3873 int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
3874 			    unsigned long addr,
3875 			    const void *val,
3876 			    unsigned int bytes,
3877 			    struct x86_exception *exception)
3878 {
3879 	return emulator_read_write(ctxt, addr, (void *)val, bytes,
3880 				   exception, &write_emultor);
3881 }
3882 
3883 #define CMPXCHG_TYPE(t, ptr, old, new) \
3884 	(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
3885 
3886 #ifdef CONFIG_X86_64
3887 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
3888 #else
3889 #  define CMPXCHG64(ptr, old, new) \
3890 	(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
3891 #endif
3892 
3893 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
3894 				     unsigned long addr,
3895 				     const void *old,
3896 				     const void *new,
3897 				     unsigned int bytes,
3898 				     struct x86_exception *exception)
3899 {
3900 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3901 	gpa_t gpa;
3902 	struct page *page;
3903 	char *kaddr;
3904 	bool exchanged;
3905 
3906 	/* guests cmpxchg8b have to be emulated atomically */
3907 	if (bytes > 8 || (bytes & (bytes - 1)))
3908 		goto emul_write;
3909 
3910 	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
3911 
3912 	if (gpa == UNMAPPED_GVA ||
3913 	    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3914 		goto emul_write;
3915 
3916 	if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
3917 		goto emul_write;
3918 
3919 	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3920 	if (is_error_page(page)) {
3921 		kvm_release_page_clean(page);
3922 		goto emul_write;
3923 	}
3924 
3925 	kaddr = kmap_atomic(page);
3926 	kaddr += offset_in_page(gpa);
3927 	switch (bytes) {
3928 	case 1:
3929 		exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
3930 		break;
3931 	case 2:
3932 		exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
3933 		break;
3934 	case 4:
3935 		exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
3936 		break;
3937 	case 8:
3938 		exchanged = CMPXCHG64(kaddr, old, new);
3939 		break;
3940 	default:
3941 		BUG();
3942 	}
3943 	kunmap_atomic(kaddr);
3944 	kvm_release_page_dirty(page);
3945 
3946 	if (!exchanged)
3947 		return X86EMUL_CMPXCHG_FAILED;
3948 
3949 	kvm_mmu_pte_write(vcpu, gpa, new, bytes);
3950 
3951 	return X86EMUL_CONTINUE;
3952 
3953 emul_write:
3954 	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
3955 
3956 	return emulator_write_emulated(ctxt, addr, new, bytes, exception);
3957 }
3958 
3959 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
3960 {
3961 	/* TODO: String I/O for in kernel device */
3962 	int r;
3963 
3964 	if (vcpu->arch.pio.in)
3965 		r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
3966 				    vcpu->arch.pio.size, pd);
3967 	else
3968 		r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3969 				     vcpu->arch.pio.port, vcpu->arch.pio.size,
3970 				     pd);
3971 	return r;
3972 }
3973 
3974 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
3975 			       unsigned short port, void *val,
3976 			       unsigned int count, bool in)
3977 {
3978 	trace_kvm_pio(!in, port, size, count);
3979 
3980 	vcpu->arch.pio.port = port;
3981 	vcpu->arch.pio.in = in;
3982 	vcpu->arch.pio.count  = count;
3983 	vcpu->arch.pio.size = size;
3984 
3985 	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3986 		vcpu->arch.pio.count = 0;
3987 		return 1;
3988 	}
3989 
3990 	vcpu->run->exit_reason = KVM_EXIT_IO;
3991 	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
3992 	vcpu->run->io.size = size;
3993 	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3994 	vcpu->run->io.count = count;
3995 	vcpu->run->io.port = port;
3996 
3997 	return 0;
3998 }
3999 
4000 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
4001 				    int size, unsigned short port, void *val,
4002 				    unsigned int count)
4003 {
4004 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4005 	int ret;
4006 
4007 	if (vcpu->arch.pio.count)
4008 		goto data_avail;
4009 
4010 	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
4011 	if (ret) {
4012 data_avail:
4013 		memcpy(val, vcpu->arch.pio_data, size * count);
4014 		vcpu->arch.pio.count = 0;
4015 		return 1;
4016 	}
4017 
4018 	return 0;
4019 }
4020 
4021 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
4022 				     int size, unsigned short port,
4023 				     const void *val, unsigned int count)
4024 {
4025 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4026 
4027 	memcpy(vcpu->arch.pio_data, val, size * count);
4028 	return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
4029 }
4030 
4031 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
4032 {
4033 	return kvm_x86_ops->get_segment_base(vcpu, seg);
4034 }
4035 
4036 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
4037 {
4038 	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
4039 }
4040 
4041 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
4042 {
4043 	if (!need_emulate_wbinvd(vcpu))
4044 		return X86EMUL_CONTINUE;
4045 
4046 	if (kvm_x86_ops->has_wbinvd_exit()) {
4047 		int cpu = get_cpu();
4048 
4049 		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4050 		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
4051 				wbinvd_ipi, NULL, 1);
4052 		put_cpu();
4053 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
4054 	} else
4055 		wbinvd();
4056 	return X86EMUL_CONTINUE;
4057 }
4058 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
4059 
4060 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
4061 {
4062 	kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
4063 }
4064 
4065 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
4066 {
4067 	return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
4068 }
4069 
4070 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
4071 {
4072 
4073 	return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
4074 }
4075 
4076 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
4077 {
4078 	return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
4079 }
4080 
4081 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
4082 {
4083 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4084 	unsigned long value;
4085 
4086 	switch (cr) {
4087 	case 0:
4088 		value = kvm_read_cr0(vcpu);
4089 		break;
4090 	case 2:
4091 		value = vcpu->arch.cr2;
4092 		break;
4093 	case 3:
4094 		value = kvm_read_cr3(vcpu);
4095 		break;
4096 	case 4:
4097 		value = kvm_read_cr4(vcpu);
4098 		break;
4099 	case 8:
4100 		value = kvm_get_cr8(vcpu);
4101 		break;
4102 	default:
4103 		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
4104 		return 0;
4105 	}
4106 
4107 	return value;
4108 }
4109 
4110 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
4111 {
4112 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4113 	int res = 0;
4114 
4115 	switch (cr) {
4116 	case 0:
4117 		res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
4118 		break;
4119 	case 2:
4120 		vcpu->arch.cr2 = val;
4121 		break;
4122 	case 3:
4123 		res = kvm_set_cr3(vcpu, val);
4124 		break;
4125 	case 4:
4126 		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
4127 		break;
4128 	case 8:
4129 		res = kvm_set_cr8(vcpu, val);
4130 		break;
4131 	default:
4132 		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
4133 		res = -1;
4134 	}
4135 
4136 	return res;
4137 }
4138 
4139 static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val)
4140 {
4141 	kvm_set_rflags(emul_to_vcpu(ctxt), val);
4142 }
4143 
4144 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
4145 {
4146 	return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
4147 }
4148 
4149 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4150 {
4151 	kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
4152 }
4153 
4154 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4155 {
4156 	kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
4157 }
4158 
4159 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4160 {
4161 	kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
4162 }
4163 
4164 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4165 {
4166 	kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
4167 }
4168 
4169 static unsigned long emulator_get_cached_segment_base(
4170 	struct x86_emulate_ctxt *ctxt, int seg)
4171 {
4172 	return get_segment_base(emul_to_vcpu(ctxt), seg);
4173 }
4174 
4175 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
4176 				 struct desc_struct *desc, u32 *base3,
4177 				 int seg)
4178 {
4179 	struct kvm_segment var;
4180 
4181 	kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
4182 	*selector = var.selector;
4183 
4184 	if (var.unusable)
4185 		return false;
4186 
4187 	if (var.g)
4188 		var.limit >>= 12;
4189 	set_desc_limit(desc, var.limit);
4190 	set_desc_base(desc, (unsigned long)var.base);
4191 #ifdef CONFIG_X86_64
4192 	if (base3)
4193 		*base3 = var.base >> 32;
4194 #endif
4195 	desc->type = var.type;
4196 	desc->s = var.s;
4197 	desc->dpl = var.dpl;
4198 	desc->p = var.present;
4199 	desc->avl = var.avl;
4200 	desc->l = var.l;
4201 	desc->d = var.db;
4202 	desc->g = var.g;
4203 
4204 	return true;
4205 }
4206 
4207 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
4208 				 struct desc_struct *desc, u32 base3,
4209 				 int seg)
4210 {
4211 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4212 	struct kvm_segment var;
4213 
4214 	var.selector = selector;
4215 	var.base = get_desc_base(desc);
4216 #ifdef CONFIG_X86_64
4217 	var.base |= ((u64)base3) << 32;
4218 #endif
4219 	var.limit = get_desc_limit(desc);
4220 	if (desc->g)
4221 		var.limit = (var.limit << 12) | 0xfff;
4222 	var.type = desc->type;
4223 	var.present = desc->p;
4224 	var.dpl = desc->dpl;
4225 	var.db = desc->d;
4226 	var.s = desc->s;
4227 	var.l = desc->l;
4228 	var.g = desc->g;
4229 	var.avl = desc->avl;
4230 	var.present = desc->p;
4231 	var.unusable = !var.present;
4232 	var.padding = 0;
4233 
4234 	kvm_set_segment(vcpu, &var, seg);
4235 	return;
4236 }
4237 
4238 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
4239 			    u32 msr_index, u64 *pdata)
4240 {
4241 	return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
4242 }
4243 
4244 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
4245 			    u32 msr_index, u64 data)
4246 {
4247 	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
4248 }
4249 
4250 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
4251 			     u32 pmc, u64 *pdata)
4252 {
4253 	return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
4254 }
4255 
4256 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
4257 {
4258 	emul_to_vcpu(ctxt)->arch.halt_request = 1;
4259 }
4260 
4261 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
4262 {
4263 	preempt_disable();
4264 	kvm_load_guest_fpu(emul_to_vcpu(ctxt));
4265 	/*
4266 	 * CR0.TS may reference the host fpu state, not the guest fpu state,
4267 	 * so it may be clear at this point.
4268 	 */
4269 	clts();
4270 }
4271 
4272 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
4273 {
4274 	preempt_enable();
4275 }
4276 
4277 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
4278 			      struct x86_instruction_info *info,
4279 			      enum x86_intercept_stage stage)
4280 {
4281 	return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
4282 }
4283 
4284 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
4285 			       u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
4286 {
4287 	struct kvm_cpuid_entry2 *cpuid = NULL;
4288 
4289 	if (eax && ecx)
4290 		cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
4291 					    *eax, *ecx);
4292 
4293 	if (cpuid) {
4294 		*eax = cpuid->eax;
4295 		*ecx = cpuid->ecx;
4296 		if (ebx)
4297 			*ebx = cpuid->ebx;
4298 		if (edx)
4299 			*edx = cpuid->edx;
4300 		return true;
4301 	}
4302 
4303 	return false;
4304 }
4305 
4306 static struct x86_emulate_ops emulate_ops = {
4307 	.read_std            = kvm_read_guest_virt_system,
4308 	.write_std           = kvm_write_guest_virt_system,
4309 	.fetch               = kvm_fetch_guest_virt,
4310 	.read_emulated       = emulator_read_emulated,
4311 	.write_emulated      = emulator_write_emulated,
4312 	.cmpxchg_emulated    = emulator_cmpxchg_emulated,
4313 	.invlpg              = emulator_invlpg,
4314 	.pio_in_emulated     = emulator_pio_in_emulated,
4315 	.pio_out_emulated    = emulator_pio_out_emulated,
4316 	.get_segment         = emulator_get_segment,
4317 	.set_segment         = emulator_set_segment,
4318 	.get_cached_segment_base = emulator_get_cached_segment_base,
4319 	.get_gdt             = emulator_get_gdt,
4320 	.get_idt	     = emulator_get_idt,
4321 	.set_gdt             = emulator_set_gdt,
4322 	.set_idt	     = emulator_set_idt,
4323 	.get_cr              = emulator_get_cr,
4324 	.set_cr              = emulator_set_cr,
4325 	.set_rflags          = emulator_set_rflags,
4326 	.cpl                 = emulator_get_cpl,
4327 	.get_dr              = emulator_get_dr,
4328 	.set_dr              = emulator_set_dr,
4329 	.set_msr             = emulator_set_msr,
4330 	.get_msr             = emulator_get_msr,
4331 	.read_pmc            = emulator_read_pmc,
4332 	.halt                = emulator_halt,
4333 	.wbinvd              = emulator_wbinvd,
4334 	.fix_hypercall       = emulator_fix_hypercall,
4335 	.get_fpu             = emulator_get_fpu,
4336 	.put_fpu             = emulator_put_fpu,
4337 	.intercept           = emulator_intercept,
4338 	.get_cpuid           = emulator_get_cpuid,
4339 };
4340 
4341 static void cache_all_regs(struct kvm_vcpu *vcpu)
4342 {
4343 	kvm_register_read(vcpu, VCPU_REGS_RAX);
4344 	kvm_register_read(vcpu, VCPU_REGS_RSP);
4345 	kvm_register_read(vcpu, VCPU_REGS_RIP);
4346 	vcpu->arch.regs_dirty = ~0;
4347 }
4348 
4349 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
4350 {
4351 	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
4352 	/*
4353 	 * an sti; sti; sequence only disable interrupts for the first
4354 	 * instruction. So, if the last instruction, be it emulated or
4355 	 * not, left the system with the INT_STI flag enabled, it
4356 	 * means that the last instruction is an sti. We should not
4357 	 * leave the flag on in this case. The same goes for mov ss
4358 	 */
4359 	if (!(int_shadow & mask))
4360 		kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
4361 }
4362 
4363 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
4364 {
4365 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4366 	if (ctxt->exception.vector == PF_VECTOR)
4367 		kvm_propagate_fault(vcpu, &ctxt->exception);
4368 	else if (ctxt->exception.error_code_valid)
4369 		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
4370 				      ctxt->exception.error_code);
4371 	else
4372 		kvm_queue_exception(vcpu, ctxt->exception.vector);
4373 }
4374 
4375 static void init_decode_cache(struct x86_emulate_ctxt *ctxt,
4376 			      const unsigned long *regs)
4377 {
4378 	memset(&ctxt->twobyte, 0,
4379 	       (void *)&ctxt->regs - (void *)&ctxt->twobyte);
4380 	memcpy(ctxt->regs, regs, sizeof(ctxt->regs));
4381 
4382 	ctxt->fetch.start = 0;
4383 	ctxt->fetch.end = 0;
4384 	ctxt->io_read.pos = 0;
4385 	ctxt->io_read.end = 0;
4386 	ctxt->mem_read.pos = 0;
4387 	ctxt->mem_read.end = 0;
4388 }
4389 
4390 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
4391 {
4392 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4393 	int cs_db, cs_l;
4394 
4395 	/*
4396 	 * TODO: fix emulate.c to use guest_read/write_register
4397 	 * instead of direct ->regs accesses, can save hundred cycles
4398 	 * on Intel for instructions that don't read/change RSP, for
4399 	 * for example.
4400 	 */
4401 	cache_all_regs(vcpu);
4402 
4403 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4404 
4405 	ctxt->eflags = kvm_get_rflags(vcpu);
4406 	ctxt->eip = kvm_rip_read(vcpu);
4407 	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
4408 		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
4409 		     cs_l				? X86EMUL_MODE_PROT64 :
4410 		     cs_db				? X86EMUL_MODE_PROT32 :
4411 							  X86EMUL_MODE_PROT16;
4412 	ctxt->guest_mode = is_guest_mode(vcpu);
4413 
4414 	init_decode_cache(ctxt, vcpu->arch.regs);
4415 	vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
4416 }
4417 
4418 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
4419 {
4420 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4421 	int ret;
4422 
4423 	init_emulate_ctxt(vcpu);
4424 
4425 	ctxt->op_bytes = 2;
4426 	ctxt->ad_bytes = 2;
4427 	ctxt->_eip = ctxt->eip + inc_eip;
4428 	ret = emulate_int_real(ctxt, irq);
4429 
4430 	if (ret != X86EMUL_CONTINUE)
4431 		return EMULATE_FAIL;
4432 
4433 	ctxt->eip = ctxt->_eip;
4434 	memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
4435 	kvm_rip_write(vcpu, ctxt->eip);
4436 	kvm_set_rflags(vcpu, ctxt->eflags);
4437 
4438 	if (irq == NMI_VECTOR)
4439 		vcpu->arch.nmi_pending = 0;
4440 	else
4441 		vcpu->arch.interrupt.pending = false;
4442 
4443 	return EMULATE_DONE;
4444 }
4445 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
4446 
4447 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
4448 {
4449 	int r = EMULATE_DONE;
4450 
4451 	++vcpu->stat.insn_emulation_fail;
4452 	trace_kvm_emulate_insn_failed(vcpu);
4453 	if (!is_guest_mode(vcpu)) {
4454 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4455 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4456 		vcpu->run->internal.ndata = 0;
4457 		r = EMULATE_FAIL;
4458 	}
4459 	kvm_queue_exception(vcpu, UD_VECTOR);
4460 
4461 	return r;
4462 }
4463 
4464 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
4465 {
4466 	gpa_t gpa;
4467 
4468 	if (tdp_enabled)
4469 		return false;
4470 
4471 	/*
4472 	 * if emulation was due to access to shadowed page table
4473 	 * and it failed try to unshadow page and re-entetr the
4474 	 * guest to let CPU execute the instruction.
4475 	 */
4476 	if (kvm_mmu_unprotect_page_virt(vcpu, gva))
4477 		return true;
4478 
4479 	gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
4480 
4481 	if (gpa == UNMAPPED_GVA)
4482 		return true; /* let cpu generate fault */
4483 
4484 	if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
4485 		return true;
4486 
4487 	return false;
4488 }
4489 
4490 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
4491 			      unsigned long cr2,  int emulation_type)
4492 {
4493 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4494 	unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
4495 
4496 	last_retry_eip = vcpu->arch.last_retry_eip;
4497 	last_retry_addr = vcpu->arch.last_retry_addr;
4498 
4499 	/*
4500 	 * If the emulation is caused by #PF and it is non-page_table
4501 	 * writing instruction, it means the VM-EXIT is caused by shadow
4502 	 * page protected, we can zap the shadow page and retry this
4503 	 * instruction directly.
4504 	 *
4505 	 * Note: if the guest uses a non-page-table modifying instruction
4506 	 * on the PDE that points to the instruction, then we will unmap
4507 	 * the instruction and go to an infinite loop. So, we cache the
4508 	 * last retried eip and the last fault address, if we meet the eip
4509 	 * and the address again, we can break out of the potential infinite
4510 	 * loop.
4511 	 */
4512 	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
4513 
4514 	if (!(emulation_type & EMULTYPE_RETRY))
4515 		return false;
4516 
4517 	if (x86_page_table_writing_insn(ctxt))
4518 		return false;
4519 
4520 	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
4521 		return false;
4522 
4523 	vcpu->arch.last_retry_eip = ctxt->eip;
4524 	vcpu->arch.last_retry_addr = cr2;
4525 
4526 	if (!vcpu->arch.mmu.direct_map)
4527 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
4528 
4529 	kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
4530 
4531 	return true;
4532 }
4533 
4534 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
4535 			    unsigned long cr2,
4536 			    int emulation_type,
4537 			    void *insn,
4538 			    int insn_len)
4539 {
4540 	int r;
4541 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4542 	bool writeback = true;
4543 
4544 	kvm_clear_exception_queue(vcpu);
4545 
4546 	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
4547 		init_emulate_ctxt(vcpu);
4548 		ctxt->interruptibility = 0;
4549 		ctxt->have_exception = false;
4550 		ctxt->perm_ok = false;
4551 
4552 		ctxt->only_vendor_specific_insn
4553 			= emulation_type & EMULTYPE_TRAP_UD;
4554 
4555 		r = x86_decode_insn(ctxt, insn, insn_len);
4556 
4557 		trace_kvm_emulate_insn_start(vcpu);
4558 		++vcpu->stat.insn_emulation;
4559 		if (r != EMULATION_OK)  {
4560 			if (emulation_type & EMULTYPE_TRAP_UD)
4561 				return EMULATE_FAIL;
4562 			if (reexecute_instruction(vcpu, cr2))
4563 				return EMULATE_DONE;
4564 			if (emulation_type & EMULTYPE_SKIP)
4565 				return EMULATE_FAIL;
4566 			return handle_emulation_failure(vcpu);
4567 		}
4568 	}
4569 
4570 	if (emulation_type & EMULTYPE_SKIP) {
4571 		kvm_rip_write(vcpu, ctxt->_eip);
4572 		return EMULATE_DONE;
4573 	}
4574 
4575 	if (retry_instruction(ctxt, cr2, emulation_type))
4576 		return EMULATE_DONE;
4577 
4578 	/* this is needed for vmware backdoor interface to work since it
4579 	   changes registers values  during IO operation */
4580 	if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
4581 		vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
4582 		memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs);
4583 	}
4584 
4585 restart:
4586 	r = x86_emulate_insn(ctxt);
4587 
4588 	if (r == EMULATION_INTERCEPTED)
4589 		return EMULATE_DONE;
4590 
4591 	if (r == EMULATION_FAILED) {
4592 		if (reexecute_instruction(vcpu, cr2))
4593 			return EMULATE_DONE;
4594 
4595 		return handle_emulation_failure(vcpu);
4596 	}
4597 
4598 	if (ctxt->have_exception) {
4599 		inject_emulated_exception(vcpu);
4600 		r = EMULATE_DONE;
4601 	} else if (vcpu->arch.pio.count) {
4602 		if (!vcpu->arch.pio.in)
4603 			vcpu->arch.pio.count = 0;
4604 		else
4605 			writeback = false;
4606 		r = EMULATE_DO_MMIO;
4607 	} else if (vcpu->mmio_needed) {
4608 		if (!vcpu->mmio_is_write)
4609 			writeback = false;
4610 		r = EMULATE_DO_MMIO;
4611 	} else if (r == EMULATION_RESTART)
4612 		goto restart;
4613 	else
4614 		r = EMULATE_DONE;
4615 
4616 	if (writeback) {
4617 		toggle_interruptibility(vcpu, ctxt->interruptibility);
4618 		kvm_set_rflags(vcpu, ctxt->eflags);
4619 		kvm_make_request(KVM_REQ_EVENT, vcpu);
4620 		memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
4621 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
4622 		kvm_rip_write(vcpu, ctxt->eip);
4623 	} else
4624 		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
4625 
4626 	return r;
4627 }
4628 EXPORT_SYMBOL_GPL(x86_emulate_instruction);
4629 
4630 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
4631 {
4632 	unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
4633 	int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
4634 					    size, port, &val, 1);
4635 	/* do not return to emulator after return from userspace */
4636 	vcpu->arch.pio.count = 0;
4637 	return ret;
4638 }
4639 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
4640 
4641 static void tsc_bad(void *info)
4642 {
4643 	__this_cpu_write(cpu_tsc_khz, 0);
4644 }
4645 
4646 static void tsc_khz_changed(void *data)
4647 {
4648 	struct cpufreq_freqs *freq = data;
4649 	unsigned long khz = 0;
4650 
4651 	if (data)
4652 		khz = freq->new;
4653 	else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4654 		khz = cpufreq_quick_get(raw_smp_processor_id());
4655 	if (!khz)
4656 		khz = tsc_khz;
4657 	__this_cpu_write(cpu_tsc_khz, khz);
4658 }
4659 
4660 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
4661 				     void *data)
4662 {
4663 	struct cpufreq_freqs *freq = data;
4664 	struct kvm *kvm;
4665 	struct kvm_vcpu *vcpu;
4666 	int i, send_ipi = 0;
4667 
4668 	/*
4669 	 * We allow guests to temporarily run on slowing clocks,
4670 	 * provided we notify them after, or to run on accelerating
4671 	 * clocks, provided we notify them before.  Thus time never
4672 	 * goes backwards.
4673 	 *
4674 	 * However, we have a problem.  We can't atomically update
4675 	 * the frequency of a given CPU from this function; it is
4676 	 * merely a notifier, which can be called from any CPU.
4677 	 * Changing the TSC frequency at arbitrary points in time
4678 	 * requires a recomputation of local variables related to
4679 	 * the TSC for each VCPU.  We must flag these local variables
4680 	 * to be updated and be sure the update takes place with the
4681 	 * new frequency before any guests proceed.
4682 	 *
4683 	 * Unfortunately, the combination of hotplug CPU and frequency
4684 	 * change creates an intractable locking scenario; the order
4685 	 * of when these callouts happen is undefined with respect to
4686 	 * CPU hotplug, and they can race with each other.  As such,
4687 	 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
4688 	 * undefined; you can actually have a CPU frequency change take
4689 	 * place in between the computation of X and the setting of the
4690 	 * variable.  To protect against this problem, all updates of
4691 	 * the per_cpu tsc_khz variable are done in an interrupt
4692 	 * protected IPI, and all callers wishing to update the value
4693 	 * must wait for a synchronous IPI to complete (which is trivial
4694 	 * if the caller is on the CPU already).  This establishes the
4695 	 * necessary total order on variable updates.
4696 	 *
4697 	 * Note that because a guest time update may take place
4698 	 * anytime after the setting of the VCPU's request bit, the
4699 	 * correct TSC value must be set before the request.  However,
4700 	 * to ensure the update actually makes it to any guest which
4701 	 * starts running in hardware virtualization between the set
4702 	 * and the acquisition of the spinlock, we must also ping the
4703 	 * CPU after setting the request bit.
4704 	 *
4705 	 */
4706 
4707 	if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
4708 		return 0;
4709 	if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
4710 		return 0;
4711 
4712 	smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
4713 
4714 	raw_spin_lock(&kvm_lock);
4715 	list_for_each_entry(kvm, &vm_list, vm_list) {
4716 		kvm_for_each_vcpu(i, vcpu, kvm) {
4717 			if (vcpu->cpu != freq->cpu)
4718 				continue;
4719 			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4720 			if (vcpu->cpu != smp_processor_id())
4721 				send_ipi = 1;
4722 		}
4723 	}
4724 	raw_spin_unlock(&kvm_lock);
4725 
4726 	if (freq->old < freq->new && send_ipi) {
4727 		/*
4728 		 * We upscale the frequency.  Must make the guest
4729 		 * doesn't see old kvmclock values while running with
4730 		 * the new frequency, otherwise we risk the guest sees
4731 		 * time go backwards.
4732 		 *
4733 		 * In case we update the frequency for another cpu
4734 		 * (which might be in guest context) send an interrupt
4735 		 * to kick the cpu out of guest context.  Next time
4736 		 * guest context is entered kvmclock will be updated,
4737 		 * so the guest will not see stale values.
4738 		 */
4739 		smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
4740 	}
4741 	return 0;
4742 }
4743 
4744 static struct notifier_block kvmclock_cpufreq_notifier_block = {
4745 	.notifier_call  = kvmclock_cpufreq_notifier
4746 };
4747 
4748 static int kvmclock_cpu_notifier(struct notifier_block *nfb,
4749 					unsigned long action, void *hcpu)
4750 {
4751 	unsigned int cpu = (unsigned long)hcpu;
4752 
4753 	switch (action) {
4754 		case CPU_ONLINE:
4755 		case CPU_DOWN_FAILED:
4756 			smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
4757 			break;
4758 		case CPU_DOWN_PREPARE:
4759 			smp_call_function_single(cpu, tsc_bad, NULL, 1);
4760 			break;
4761 	}
4762 	return NOTIFY_OK;
4763 }
4764 
4765 static struct notifier_block kvmclock_cpu_notifier_block = {
4766 	.notifier_call  = kvmclock_cpu_notifier,
4767 	.priority = -INT_MAX
4768 };
4769 
4770 static void kvm_timer_init(void)
4771 {
4772 	int cpu;
4773 
4774 	max_tsc_khz = tsc_khz;
4775 	register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
4776 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
4777 #ifdef CONFIG_CPU_FREQ
4778 		struct cpufreq_policy policy;
4779 		memset(&policy, 0, sizeof(policy));
4780 		cpu = get_cpu();
4781 		cpufreq_get_policy(&policy, cpu);
4782 		if (policy.cpuinfo.max_freq)
4783 			max_tsc_khz = policy.cpuinfo.max_freq;
4784 		put_cpu();
4785 #endif
4786 		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
4787 					  CPUFREQ_TRANSITION_NOTIFIER);
4788 	}
4789 	pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
4790 	for_each_online_cpu(cpu)
4791 		smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
4792 }
4793 
4794 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
4795 
4796 int kvm_is_in_guest(void)
4797 {
4798 	return __this_cpu_read(current_vcpu) != NULL;
4799 }
4800 
4801 static int kvm_is_user_mode(void)
4802 {
4803 	int user_mode = 3;
4804 
4805 	if (__this_cpu_read(current_vcpu))
4806 		user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
4807 
4808 	return user_mode != 0;
4809 }
4810 
4811 static unsigned long kvm_get_guest_ip(void)
4812 {
4813 	unsigned long ip = 0;
4814 
4815 	if (__this_cpu_read(current_vcpu))
4816 		ip = kvm_rip_read(__this_cpu_read(current_vcpu));
4817 
4818 	return ip;
4819 }
4820 
4821 static struct perf_guest_info_callbacks kvm_guest_cbs = {
4822 	.is_in_guest		= kvm_is_in_guest,
4823 	.is_user_mode		= kvm_is_user_mode,
4824 	.get_guest_ip		= kvm_get_guest_ip,
4825 };
4826 
4827 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
4828 {
4829 	__this_cpu_write(current_vcpu, vcpu);
4830 }
4831 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
4832 
4833 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
4834 {
4835 	__this_cpu_write(current_vcpu, NULL);
4836 }
4837 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
4838 
4839 static void kvm_set_mmio_spte_mask(void)
4840 {
4841 	u64 mask;
4842 	int maxphyaddr = boot_cpu_data.x86_phys_bits;
4843 
4844 	/*
4845 	 * Set the reserved bits and the present bit of an paging-structure
4846 	 * entry to generate page fault with PFER.RSV = 1.
4847 	 */
4848 	mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
4849 	mask |= 1ull;
4850 
4851 #ifdef CONFIG_X86_64
4852 	/*
4853 	 * If reserved bit is not supported, clear the present bit to disable
4854 	 * mmio page fault.
4855 	 */
4856 	if (maxphyaddr == 52)
4857 		mask &= ~1ull;
4858 #endif
4859 
4860 	kvm_mmu_set_mmio_spte_mask(mask);
4861 }
4862 
4863 int kvm_arch_init(void *opaque)
4864 {
4865 	int r;
4866 	struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
4867 
4868 	if (kvm_x86_ops) {
4869 		printk(KERN_ERR "kvm: already loaded the other module\n");
4870 		r = -EEXIST;
4871 		goto out;
4872 	}
4873 
4874 	if (!ops->cpu_has_kvm_support()) {
4875 		printk(KERN_ERR "kvm: no hardware support\n");
4876 		r = -EOPNOTSUPP;
4877 		goto out;
4878 	}
4879 	if (ops->disabled_by_bios()) {
4880 		printk(KERN_ERR "kvm: disabled by bios\n");
4881 		r = -EOPNOTSUPP;
4882 		goto out;
4883 	}
4884 
4885 	r = kvm_mmu_module_init();
4886 	if (r)
4887 		goto out;
4888 
4889 	kvm_set_mmio_spte_mask();
4890 	kvm_init_msr_list();
4891 
4892 	kvm_x86_ops = ops;
4893 	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4894 			PT_DIRTY_MASK, PT64_NX_MASK, 0);
4895 
4896 	kvm_timer_init();
4897 
4898 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
4899 
4900 	if (cpu_has_xsave)
4901 		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
4902 
4903 	return 0;
4904 
4905 out:
4906 	return r;
4907 }
4908 
4909 void kvm_arch_exit(void)
4910 {
4911 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
4912 
4913 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4914 		cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
4915 					    CPUFREQ_TRANSITION_NOTIFIER);
4916 	unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
4917 	kvm_x86_ops = NULL;
4918 	kvm_mmu_module_exit();
4919 }
4920 
4921 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
4922 {
4923 	++vcpu->stat.halt_exits;
4924 	if (irqchip_in_kernel(vcpu->kvm)) {
4925 		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
4926 		return 1;
4927 	} else {
4928 		vcpu->run->exit_reason = KVM_EXIT_HLT;
4929 		return 0;
4930 	}
4931 }
4932 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
4933 
4934 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
4935 {
4936 	u64 param, ingpa, outgpa, ret;
4937 	uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
4938 	bool fast, longmode;
4939 	int cs_db, cs_l;
4940 
4941 	/*
4942 	 * hypercall generates UD from non zero cpl and real mode
4943 	 * per HYPER-V spec
4944 	 */
4945 	if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
4946 		kvm_queue_exception(vcpu, UD_VECTOR);
4947 		return 0;
4948 	}
4949 
4950 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4951 	longmode = is_long_mode(vcpu) && cs_l == 1;
4952 
4953 	if (!longmode) {
4954 		param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
4955 			(kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
4956 		ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
4957 			(kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
4958 		outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
4959 			(kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
4960 	}
4961 #ifdef CONFIG_X86_64
4962 	else {
4963 		param = kvm_register_read(vcpu, VCPU_REGS_RCX);
4964 		ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
4965 		outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
4966 	}
4967 #endif
4968 
4969 	code = param & 0xffff;
4970 	fast = (param >> 16) & 0x1;
4971 	rep_cnt = (param >> 32) & 0xfff;
4972 	rep_idx = (param >> 48) & 0xfff;
4973 
4974 	trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
4975 
4976 	switch (code) {
4977 	case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
4978 		kvm_vcpu_on_spin(vcpu);
4979 		break;
4980 	default:
4981 		res = HV_STATUS_INVALID_HYPERCALL_CODE;
4982 		break;
4983 	}
4984 
4985 	ret = res | (((u64)rep_done & 0xfff) << 32);
4986 	if (longmode) {
4987 		kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4988 	} else {
4989 		kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
4990 		kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
4991 	}
4992 
4993 	return 1;
4994 }
4995 
4996 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
4997 {
4998 	unsigned long nr, a0, a1, a2, a3, ret;
4999 	int r = 1;
5000 
5001 	if (kvm_hv_hypercall_enabled(vcpu->kvm))
5002 		return kvm_hv_hypercall(vcpu);
5003 
5004 	nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
5005 	a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
5006 	a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
5007 	a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
5008 	a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
5009 
5010 	trace_kvm_hypercall(nr, a0, a1, a2, a3);
5011 
5012 	if (!is_long_mode(vcpu)) {
5013 		nr &= 0xFFFFFFFF;
5014 		a0 &= 0xFFFFFFFF;
5015 		a1 &= 0xFFFFFFFF;
5016 		a2 &= 0xFFFFFFFF;
5017 		a3 &= 0xFFFFFFFF;
5018 	}
5019 
5020 	if (kvm_x86_ops->get_cpl(vcpu) != 0) {
5021 		ret = -KVM_EPERM;
5022 		goto out;
5023 	}
5024 
5025 	switch (nr) {
5026 	case KVM_HC_VAPIC_POLL_IRQ:
5027 		ret = 0;
5028 		break;
5029 	default:
5030 		ret = -KVM_ENOSYS;
5031 		break;
5032 	}
5033 out:
5034 	kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
5035 	++vcpu->stat.hypercalls;
5036 	return r;
5037 }
5038 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
5039 
5040 int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5041 {
5042 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5043 	char instruction[3];
5044 	unsigned long rip = kvm_rip_read(vcpu);
5045 
5046 	/*
5047 	 * Blow out the MMU to ensure that no other VCPU has an active mapping
5048 	 * to ensure that the updated hypercall appears atomically across all
5049 	 * VCPUs.
5050 	 */
5051 	kvm_mmu_zap_all(vcpu->kvm);
5052 
5053 	kvm_x86_ops->patch_hypercall(vcpu, instruction);
5054 
5055 	return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5056 }
5057 
5058 /*
5059  * Check if userspace requested an interrupt window, and that the
5060  * interrupt window is open.
5061  *
5062  * No need to exit to userspace if we already have an interrupt queued.
5063  */
5064 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
5065 {
5066 	return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
5067 		vcpu->run->request_interrupt_window &&
5068 		kvm_arch_interrupt_allowed(vcpu));
5069 }
5070 
5071 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5072 {
5073 	struct kvm_run *kvm_run = vcpu->run;
5074 
5075 	kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
5076 	kvm_run->cr8 = kvm_get_cr8(vcpu);
5077 	kvm_run->apic_base = kvm_get_apic_base(vcpu);
5078 	if (irqchip_in_kernel(vcpu->kvm))
5079 		kvm_run->ready_for_interrupt_injection = 1;
5080 	else
5081 		kvm_run->ready_for_interrupt_injection =
5082 			kvm_arch_interrupt_allowed(vcpu) &&
5083 			!kvm_cpu_has_interrupt(vcpu) &&
5084 			!kvm_event_needs_reinjection(vcpu);
5085 }
5086 
5087 static void vapic_enter(struct kvm_vcpu *vcpu)
5088 {
5089 	struct kvm_lapic *apic = vcpu->arch.apic;
5090 	struct page *page;
5091 
5092 	if (!apic || !apic->vapic_addr)
5093 		return;
5094 
5095 	page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5096 
5097 	vcpu->arch.apic->vapic_page = page;
5098 }
5099 
5100 static void vapic_exit(struct kvm_vcpu *vcpu)
5101 {
5102 	struct kvm_lapic *apic = vcpu->arch.apic;
5103 	int idx;
5104 
5105 	if (!apic || !apic->vapic_addr)
5106 		return;
5107 
5108 	idx = srcu_read_lock(&vcpu->kvm->srcu);
5109 	kvm_release_page_dirty(apic->vapic_page);
5110 	mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5111 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
5112 }
5113 
5114 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
5115 {
5116 	int max_irr, tpr;
5117 
5118 	if (!kvm_x86_ops->update_cr8_intercept)
5119 		return;
5120 
5121 	if (!vcpu->arch.apic)
5122 		return;
5123 
5124 	if (!vcpu->arch.apic->vapic_addr)
5125 		max_irr = kvm_lapic_find_highest_irr(vcpu);
5126 	else
5127 		max_irr = -1;
5128 
5129 	if (max_irr != -1)
5130 		max_irr >>= 4;
5131 
5132 	tpr = kvm_lapic_get_cr8(vcpu);
5133 
5134 	kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
5135 }
5136 
5137 static void inject_pending_event(struct kvm_vcpu *vcpu)
5138 {
5139 	/* try to reinject previous events if any */
5140 	if (vcpu->arch.exception.pending) {
5141 		trace_kvm_inj_exception(vcpu->arch.exception.nr,
5142 					vcpu->arch.exception.has_error_code,
5143 					vcpu->arch.exception.error_code);
5144 		kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
5145 					  vcpu->arch.exception.has_error_code,
5146 					  vcpu->arch.exception.error_code,
5147 					  vcpu->arch.exception.reinject);
5148 		return;
5149 	}
5150 
5151 	if (vcpu->arch.nmi_injected) {
5152 		kvm_x86_ops->set_nmi(vcpu);
5153 		return;
5154 	}
5155 
5156 	if (vcpu->arch.interrupt.pending) {
5157 		kvm_x86_ops->set_irq(vcpu);
5158 		return;
5159 	}
5160 
5161 	/* try to inject new event if pending */
5162 	if (vcpu->arch.nmi_pending) {
5163 		if (kvm_x86_ops->nmi_allowed(vcpu)) {
5164 			--vcpu->arch.nmi_pending;
5165 			vcpu->arch.nmi_injected = true;
5166 			kvm_x86_ops->set_nmi(vcpu);
5167 		}
5168 	} else if (kvm_cpu_has_interrupt(vcpu)) {
5169 		if (kvm_x86_ops->interrupt_allowed(vcpu)) {
5170 			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
5171 					    false);
5172 			kvm_x86_ops->set_irq(vcpu);
5173 		}
5174 	}
5175 }
5176 
5177 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
5178 {
5179 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
5180 			!vcpu->guest_xcr0_loaded) {
5181 		/* kvm_set_xcr() also depends on this */
5182 		xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
5183 		vcpu->guest_xcr0_loaded = 1;
5184 	}
5185 }
5186 
5187 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
5188 {
5189 	if (vcpu->guest_xcr0_loaded) {
5190 		if (vcpu->arch.xcr0 != host_xcr0)
5191 			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
5192 		vcpu->guest_xcr0_loaded = 0;
5193 	}
5194 }
5195 
5196 static void process_nmi(struct kvm_vcpu *vcpu)
5197 {
5198 	unsigned limit = 2;
5199 
5200 	/*
5201 	 * x86 is limited to one NMI running, and one NMI pending after it.
5202 	 * If an NMI is already in progress, limit further NMIs to just one.
5203 	 * Otherwise, allow two (and we'll inject the first one immediately).
5204 	 */
5205 	if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
5206 		limit = 1;
5207 
5208 	vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
5209 	vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
5210 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5211 }
5212 
5213 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5214 {
5215 	int r;
5216 	bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
5217 		vcpu->run->request_interrupt_window;
5218 	bool req_immediate_exit = 0;
5219 
5220 	if (vcpu->requests) {
5221 		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
5222 			kvm_mmu_unload(vcpu);
5223 		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
5224 			__kvm_migrate_timers(vcpu);
5225 		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
5226 			r = kvm_guest_time_update(vcpu);
5227 			if (unlikely(r))
5228 				goto out;
5229 		}
5230 		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
5231 			kvm_mmu_sync_roots(vcpu);
5232 		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
5233 			kvm_x86_ops->tlb_flush(vcpu);
5234 		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
5235 			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
5236 			r = 0;
5237 			goto out;
5238 		}
5239 		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
5240 			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5241 			r = 0;
5242 			goto out;
5243 		}
5244 		if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
5245 			vcpu->fpu_active = 0;
5246 			kvm_x86_ops->fpu_deactivate(vcpu);
5247 		}
5248 		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
5249 			/* Page is swapped out. Do synthetic halt */
5250 			vcpu->arch.apf.halted = true;
5251 			r = 1;
5252 			goto out;
5253 		}
5254 		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
5255 			record_steal_time(vcpu);
5256 		if (kvm_check_request(KVM_REQ_NMI, vcpu))
5257 			process_nmi(vcpu);
5258 		req_immediate_exit =
5259 			kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
5260 		if (kvm_check_request(KVM_REQ_PMU, vcpu))
5261 			kvm_handle_pmu_event(vcpu);
5262 		if (kvm_check_request(KVM_REQ_PMI, vcpu))
5263 			kvm_deliver_pmi(vcpu);
5264 	}
5265 
5266 	r = kvm_mmu_reload(vcpu);
5267 	if (unlikely(r))
5268 		goto out;
5269 
5270 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
5271 		inject_pending_event(vcpu);
5272 
5273 		/* enable NMI/IRQ window open exits if needed */
5274 		if (vcpu->arch.nmi_pending)
5275 			kvm_x86_ops->enable_nmi_window(vcpu);
5276 		else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5277 			kvm_x86_ops->enable_irq_window(vcpu);
5278 
5279 		if (kvm_lapic_enabled(vcpu)) {
5280 			update_cr8_intercept(vcpu);
5281 			kvm_lapic_sync_to_vapic(vcpu);
5282 		}
5283 	}
5284 
5285 	preempt_disable();
5286 
5287 	kvm_x86_ops->prepare_guest_switch(vcpu);
5288 	if (vcpu->fpu_active)
5289 		kvm_load_guest_fpu(vcpu);
5290 	kvm_load_guest_xcr0(vcpu);
5291 
5292 	vcpu->mode = IN_GUEST_MODE;
5293 
5294 	/* We should set ->mode before check ->requests,
5295 	 * see the comment in make_all_cpus_request.
5296 	 */
5297 	smp_mb();
5298 
5299 	local_irq_disable();
5300 
5301 	if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
5302 	    || need_resched() || signal_pending(current)) {
5303 		vcpu->mode = OUTSIDE_GUEST_MODE;
5304 		smp_wmb();
5305 		local_irq_enable();
5306 		preempt_enable();
5307 		kvm_x86_ops->cancel_injection(vcpu);
5308 		r = 1;
5309 		goto out;
5310 	}
5311 
5312 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5313 
5314 	if (req_immediate_exit)
5315 		smp_send_reschedule(vcpu->cpu);
5316 
5317 	kvm_guest_enter();
5318 
5319 	if (unlikely(vcpu->arch.switch_db_regs)) {
5320 		set_debugreg(0, 7);
5321 		set_debugreg(vcpu->arch.eff_db[0], 0);
5322 		set_debugreg(vcpu->arch.eff_db[1], 1);
5323 		set_debugreg(vcpu->arch.eff_db[2], 2);
5324 		set_debugreg(vcpu->arch.eff_db[3], 3);
5325 	}
5326 
5327 	trace_kvm_entry(vcpu->vcpu_id);
5328 	kvm_x86_ops->run(vcpu);
5329 
5330 	/*
5331 	 * If the guest has used debug registers, at least dr7
5332 	 * will be disabled while returning to the host.
5333 	 * If we don't have active breakpoints in the host, we don't
5334 	 * care about the messed up debug address registers. But if
5335 	 * we have some of them active, restore the old state.
5336 	 */
5337 	if (hw_breakpoint_active())
5338 		hw_breakpoint_restore();
5339 
5340 	vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
5341 
5342 	vcpu->mode = OUTSIDE_GUEST_MODE;
5343 	smp_wmb();
5344 	local_irq_enable();
5345 
5346 	++vcpu->stat.exits;
5347 
5348 	/*
5349 	 * We must have an instruction between local_irq_enable() and
5350 	 * kvm_guest_exit(), so the timer interrupt isn't delayed by
5351 	 * the interrupt shadow.  The stat.exits increment will do nicely.
5352 	 * But we need to prevent reordering, hence this barrier():
5353 	 */
5354 	barrier();
5355 
5356 	kvm_guest_exit();
5357 
5358 	preempt_enable();
5359 
5360 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5361 
5362 	/*
5363 	 * Profile KVM exit RIPs:
5364 	 */
5365 	if (unlikely(prof_on == KVM_PROFILING)) {
5366 		unsigned long rip = kvm_rip_read(vcpu);
5367 		profile_hit(KVM_PROFILING, (void *)rip);
5368 	}
5369 
5370 	if (unlikely(vcpu->arch.tsc_always_catchup))
5371 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5372 
5373 	kvm_lapic_sync_from_vapic(vcpu);
5374 
5375 	r = kvm_x86_ops->handle_exit(vcpu);
5376 out:
5377 	return r;
5378 }
5379 
5380 
5381 static int __vcpu_run(struct kvm_vcpu *vcpu)
5382 {
5383 	int r;
5384 	struct kvm *kvm = vcpu->kvm;
5385 
5386 	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
5387 		pr_debug("vcpu %d received sipi with vector # %x\n",
5388 			 vcpu->vcpu_id, vcpu->arch.sipi_vector);
5389 		kvm_lapic_reset(vcpu);
5390 		r = kvm_arch_vcpu_reset(vcpu);
5391 		if (r)
5392 			return r;
5393 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5394 	}
5395 
5396 	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5397 	vapic_enter(vcpu);
5398 
5399 	r = 1;
5400 	while (r > 0) {
5401 		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
5402 		    !vcpu->arch.apf.halted)
5403 			r = vcpu_enter_guest(vcpu);
5404 		else {
5405 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5406 			kvm_vcpu_block(vcpu);
5407 			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5408 			if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
5409 			{
5410 				switch(vcpu->arch.mp_state) {
5411 				case KVM_MP_STATE_HALTED:
5412 					vcpu->arch.mp_state =
5413 						KVM_MP_STATE_RUNNABLE;
5414 				case KVM_MP_STATE_RUNNABLE:
5415 					vcpu->arch.apf.halted = false;
5416 					break;
5417 				case KVM_MP_STATE_SIPI_RECEIVED:
5418 				default:
5419 					r = -EINTR;
5420 					break;
5421 				}
5422 			}
5423 		}
5424 
5425 		if (r <= 0)
5426 			break;
5427 
5428 		clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
5429 		if (kvm_cpu_has_pending_timer(vcpu))
5430 			kvm_inject_pending_timer_irqs(vcpu);
5431 
5432 		if (dm_request_for_irq_injection(vcpu)) {
5433 			r = -EINTR;
5434 			vcpu->run->exit_reason = KVM_EXIT_INTR;
5435 			++vcpu->stat.request_irq_exits;
5436 		}
5437 
5438 		kvm_check_async_pf_completion(vcpu);
5439 
5440 		if (signal_pending(current)) {
5441 			r = -EINTR;
5442 			vcpu->run->exit_reason = KVM_EXIT_INTR;
5443 			++vcpu->stat.signal_exits;
5444 		}
5445 		if (need_resched()) {
5446 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5447 			kvm_resched(vcpu);
5448 			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5449 		}
5450 	}
5451 
5452 	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5453 
5454 	vapic_exit(vcpu);
5455 
5456 	return r;
5457 }
5458 
5459 static int complete_mmio(struct kvm_vcpu *vcpu)
5460 {
5461 	struct kvm_run *run = vcpu->run;
5462 	int r;
5463 
5464 	if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
5465 		return 1;
5466 
5467 	if (vcpu->mmio_needed) {
5468 		vcpu->mmio_needed = 0;
5469 		if (!vcpu->mmio_is_write)
5470 			memcpy(vcpu->mmio_data + vcpu->mmio_index,
5471 			       run->mmio.data, 8);
5472 		vcpu->mmio_index += 8;
5473 		if (vcpu->mmio_index < vcpu->mmio_size) {
5474 			run->exit_reason = KVM_EXIT_MMIO;
5475 			run->mmio.phys_addr = vcpu->mmio_phys_addr + vcpu->mmio_index;
5476 			memcpy(run->mmio.data, vcpu->mmio_data + vcpu->mmio_index, 8);
5477 			run->mmio.len = min(vcpu->mmio_size - vcpu->mmio_index, 8);
5478 			run->mmio.is_write = vcpu->mmio_is_write;
5479 			vcpu->mmio_needed = 1;
5480 			return 0;
5481 		}
5482 		if (vcpu->mmio_is_write)
5483 			return 1;
5484 		vcpu->mmio_read_completed = 1;
5485 	}
5486 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5487 	r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
5488 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5489 	if (r != EMULATE_DONE)
5490 		return 0;
5491 	return 1;
5492 }
5493 
5494 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
5495 {
5496 	int r;
5497 	sigset_t sigsaved;
5498 
5499 	if (!tsk_used_math(current) && init_fpu(current))
5500 		return -ENOMEM;
5501 
5502 	if (vcpu->sigset_active)
5503 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
5504 
5505 	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
5506 		kvm_vcpu_block(vcpu);
5507 		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
5508 		r = -EAGAIN;
5509 		goto out;
5510 	}
5511 
5512 	/* re-sync apic's tpr */
5513 	if (!irqchip_in_kernel(vcpu->kvm)) {
5514 		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
5515 			r = -EINVAL;
5516 			goto out;
5517 		}
5518 	}
5519 
5520 	r = complete_mmio(vcpu);
5521 	if (r <= 0)
5522 		goto out;
5523 
5524 	r = __vcpu_run(vcpu);
5525 
5526 out:
5527 	post_kvm_run_save(vcpu);
5528 	if (vcpu->sigset_active)
5529 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
5530 
5531 	return r;
5532 }
5533 
5534 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5535 {
5536 	if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
5537 		/*
5538 		 * We are here if userspace calls get_regs() in the middle of
5539 		 * instruction emulation. Registers state needs to be copied
5540 		 * back from emulation context to vcpu. Usrapace shouldn't do
5541 		 * that usually, but some bad designed PV devices (vmware
5542 		 * backdoor interface) need this to work
5543 		 */
5544 		struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5545 		memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
5546 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5547 	}
5548 	regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
5549 	regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
5550 	regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
5551 	regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
5552 	regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
5553 	regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
5554 	regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
5555 	regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
5556 #ifdef CONFIG_X86_64
5557 	regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
5558 	regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
5559 	regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
5560 	regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
5561 	regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
5562 	regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
5563 	regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
5564 	regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
5565 #endif
5566 
5567 	regs->rip = kvm_rip_read(vcpu);
5568 	regs->rflags = kvm_get_rflags(vcpu);
5569 
5570 	return 0;
5571 }
5572 
5573 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5574 {
5575 	vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
5576 	vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5577 
5578 	kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
5579 	kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
5580 	kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
5581 	kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
5582 	kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
5583 	kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
5584 	kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
5585 	kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
5586 #ifdef CONFIG_X86_64
5587 	kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
5588 	kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
5589 	kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
5590 	kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
5591 	kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
5592 	kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
5593 	kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
5594 	kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
5595 #endif
5596 
5597 	kvm_rip_write(vcpu, regs->rip);
5598 	kvm_set_rflags(vcpu, regs->rflags);
5599 
5600 	vcpu->arch.exception.pending = false;
5601 
5602 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5603 
5604 	return 0;
5605 }
5606 
5607 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
5608 {
5609 	struct kvm_segment cs;
5610 
5611 	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
5612 	*db = cs.db;
5613 	*l = cs.l;
5614 }
5615 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
5616 
5617 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
5618 				  struct kvm_sregs *sregs)
5619 {
5620 	struct desc_ptr dt;
5621 
5622 	kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5623 	kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5624 	kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5625 	kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5626 	kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5627 	kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5628 
5629 	kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5630 	kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5631 
5632 	kvm_x86_ops->get_idt(vcpu, &dt);
5633 	sregs->idt.limit = dt.size;
5634 	sregs->idt.base = dt.address;
5635 	kvm_x86_ops->get_gdt(vcpu, &dt);
5636 	sregs->gdt.limit = dt.size;
5637 	sregs->gdt.base = dt.address;
5638 
5639 	sregs->cr0 = kvm_read_cr0(vcpu);
5640 	sregs->cr2 = vcpu->arch.cr2;
5641 	sregs->cr3 = kvm_read_cr3(vcpu);
5642 	sregs->cr4 = kvm_read_cr4(vcpu);
5643 	sregs->cr8 = kvm_get_cr8(vcpu);
5644 	sregs->efer = vcpu->arch.efer;
5645 	sregs->apic_base = kvm_get_apic_base(vcpu);
5646 
5647 	memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
5648 
5649 	if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
5650 		set_bit(vcpu->arch.interrupt.nr,
5651 			(unsigned long *)sregs->interrupt_bitmap);
5652 
5653 	return 0;
5654 }
5655 
5656 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
5657 				    struct kvm_mp_state *mp_state)
5658 {
5659 	mp_state->mp_state = vcpu->arch.mp_state;
5660 	return 0;
5661 }
5662 
5663 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
5664 				    struct kvm_mp_state *mp_state)
5665 {
5666 	vcpu->arch.mp_state = mp_state->mp_state;
5667 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5668 	return 0;
5669 }
5670 
5671 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
5672 		    int reason, bool has_error_code, u32 error_code)
5673 {
5674 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5675 	int ret;
5676 
5677 	init_emulate_ctxt(vcpu);
5678 
5679 	ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
5680 				   has_error_code, error_code);
5681 
5682 	if (ret)
5683 		return EMULATE_FAIL;
5684 
5685 	memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
5686 	kvm_rip_write(vcpu, ctxt->eip);
5687 	kvm_set_rflags(vcpu, ctxt->eflags);
5688 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5689 	return EMULATE_DONE;
5690 }
5691 EXPORT_SYMBOL_GPL(kvm_task_switch);
5692 
5693 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5694 				  struct kvm_sregs *sregs)
5695 {
5696 	int mmu_reset_needed = 0;
5697 	int pending_vec, max_bits, idx;
5698 	struct desc_ptr dt;
5699 
5700 	dt.size = sregs->idt.limit;
5701 	dt.address = sregs->idt.base;
5702 	kvm_x86_ops->set_idt(vcpu, &dt);
5703 	dt.size = sregs->gdt.limit;
5704 	dt.address = sregs->gdt.base;
5705 	kvm_x86_ops->set_gdt(vcpu, &dt);
5706 
5707 	vcpu->arch.cr2 = sregs->cr2;
5708 	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
5709 	vcpu->arch.cr3 = sregs->cr3;
5710 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
5711 
5712 	kvm_set_cr8(vcpu, sregs->cr8);
5713 
5714 	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
5715 	kvm_x86_ops->set_efer(vcpu, sregs->efer);
5716 	kvm_set_apic_base(vcpu, sregs->apic_base);
5717 
5718 	mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
5719 	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
5720 	vcpu->arch.cr0 = sregs->cr0;
5721 
5722 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5723 	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5724 	if (sregs->cr4 & X86_CR4_OSXSAVE)
5725 		kvm_update_cpuid(vcpu);
5726 
5727 	idx = srcu_read_lock(&vcpu->kvm->srcu);
5728 	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5729 		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
5730 		mmu_reset_needed = 1;
5731 	}
5732 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
5733 
5734 	if (mmu_reset_needed)
5735 		kvm_mmu_reset_context(vcpu);
5736 
5737 	max_bits = (sizeof sregs->interrupt_bitmap) << 3;
5738 	pending_vec = find_first_bit(
5739 		(const unsigned long *)sregs->interrupt_bitmap, max_bits);
5740 	if (pending_vec < max_bits) {
5741 		kvm_queue_interrupt(vcpu, pending_vec, false);
5742 		pr_debug("Set back pending irq %d\n", pending_vec);
5743 	}
5744 
5745 	kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5746 	kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5747 	kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5748 	kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5749 	kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5750 	kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5751 
5752 	kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5753 	kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5754 
5755 	update_cr8_intercept(vcpu);
5756 
5757 	/* Older userspace won't unhalt the vcpu on reset. */
5758 	if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
5759 	    sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
5760 	    !is_protmode(vcpu))
5761 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5762 
5763 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5764 
5765 	return 0;
5766 }
5767 
5768 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5769 					struct kvm_guest_debug *dbg)
5770 {
5771 	unsigned long rflags;
5772 	int i, r;
5773 
5774 	if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5775 		r = -EBUSY;
5776 		if (vcpu->arch.exception.pending)
5777 			goto out;
5778 		if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5779 			kvm_queue_exception(vcpu, DB_VECTOR);
5780 		else
5781 			kvm_queue_exception(vcpu, BP_VECTOR);
5782 	}
5783 
5784 	/*
5785 	 * Read rflags as long as potentially injected trace flags are still
5786 	 * filtered out.
5787 	 */
5788 	rflags = kvm_get_rflags(vcpu);
5789 
5790 	vcpu->guest_debug = dbg->control;
5791 	if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
5792 		vcpu->guest_debug = 0;
5793 
5794 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5795 		for (i = 0; i < KVM_NR_DB_REGS; ++i)
5796 			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
5797 		vcpu->arch.switch_db_regs =
5798 			(dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
5799 	} else {
5800 		for (i = 0; i < KVM_NR_DB_REGS; i++)
5801 			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
5802 		vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
5803 	}
5804 
5805 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5806 		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
5807 			get_segment_base(vcpu, VCPU_SREG_CS);
5808 
5809 	/*
5810 	 * Trigger an rflags update that will inject or remove the trace
5811 	 * flags.
5812 	 */
5813 	kvm_set_rflags(vcpu, rflags);
5814 
5815 	kvm_x86_ops->set_guest_debug(vcpu, dbg);
5816 
5817 	r = 0;
5818 
5819 out:
5820 
5821 	return r;
5822 }
5823 
5824 /*
5825  * Translate a guest virtual address to a guest physical address.
5826  */
5827 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5828 				    struct kvm_translation *tr)
5829 {
5830 	unsigned long vaddr = tr->linear_address;
5831 	gpa_t gpa;
5832 	int idx;
5833 
5834 	idx = srcu_read_lock(&vcpu->kvm->srcu);
5835 	gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
5836 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
5837 	tr->physical_address = gpa;
5838 	tr->valid = gpa != UNMAPPED_GVA;
5839 	tr->writeable = 1;
5840 	tr->usermode = 0;
5841 
5842 	return 0;
5843 }
5844 
5845 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5846 {
5847 	struct i387_fxsave_struct *fxsave =
5848 			&vcpu->arch.guest_fpu.state->fxsave;
5849 
5850 	memcpy(fpu->fpr, fxsave->st_space, 128);
5851 	fpu->fcw = fxsave->cwd;
5852 	fpu->fsw = fxsave->swd;
5853 	fpu->ftwx = fxsave->twd;
5854 	fpu->last_opcode = fxsave->fop;
5855 	fpu->last_ip = fxsave->rip;
5856 	fpu->last_dp = fxsave->rdp;
5857 	memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5858 
5859 	return 0;
5860 }
5861 
5862 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5863 {
5864 	struct i387_fxsave_struct *fxsave =
5865 			&vcpu->arch.guest_fpu.state->fxsave;
5866 
5867 	memcpy(fxsave->st_space, fpu->fpr, 128);
5868 	fxsave->cwd = fpu->fcw;
5869 	fxsave->swd = fpu->fsw;
5870 	fxsave->twd = fpu->ftwx;
5871 	fxsave->fop = fpu->last_opcode;
5872 	fxsave->rip = fpu->last_ip;
5873 	fxsave->rdp = fpu->last_dp;
5874 	memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5875 
5876 	return 0;
5877 }
5878 
5879 int fx_init(struct kvm_vcpu *vcpu)
5880 {
5881 	int err;
5882 
5883 	err = fpu_alloc(&vcpu->arch.guest_fpu);
5884 	if (err)
5885 		return err;
5886 
5887 	fpu_finit(&vcpu->arch.guest_fpu);
5888 
5889 	/*
5890 	 * Ensure guest xcr0 is valid for loading
5891 	 */
5892 	vcpu->arch.xcr0 = XSTATE_FP;
5893 
5894 	vcpu->arch.cr0 |= X86_CR0_ET;
5895 
5896 	return 0;
5897 }
5898 EXPORT_SYMBOL_GPL(fx_init);
5899 
5900 static void fx_free(struct kvm_vcpu *vcpu)
5901 {
5902 	fpu_free(&vcpu->arch.guest_fpu);
5903 }
5904 
5905 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
5906 {
5907 	if (vcpu->guest_fpu_loaded)
5908 		return;
5909 
5910 	/*
5911 	 * Restore all possible states in the guest,
5912 	 * and assume host would use all available bits.
5913 	 * Guest xcr0 would be loaded later.
5914 	 */
5915 	kvm_put_guest_xcr0(vcpu);
5916 	vcpu->guest_fpu_loaded = 1;
5917 	unlazy_fpu(current);
5918 	fpu_restore_checking(&vcpu->arch.guest_fpu);
5919 	trace_kvm_fpu(1);
5920 }
5921 
5922 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5923 {
5924 	kvm_put_guest_xcr0(vcpu);
5925 
5926 	if (!vcpu->guest_fpu_loaded)
5927 		return;
5928 
5929 	vcpu->guest_fpu_loaded = 0;
5930 	fpu_save_init(&vcpu->arch.guest_fpu);
5931 	++vcpu->stat.fpu_reload;
5932 	kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
5933 	trace_kvm_fpu(0);
5934 }
5935 
5936 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5937 {
5938 	kvmclock_reset(vcpu);
5939 
5940 	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
5941 	fx_free(vcpu);
5942 	kvm_x86_ops->vcpu_free(vcpu);
5943 }
5944 
5945 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5946 						unsigned int id)
5947 {
5948 	if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
5949 		printk_once(KERN_WARNING
5950 		"kvm: SMP vm created on host with unstable TSC; "
5951 		"guest TSC will not be reliable\n");
5952 	return kvm_x86_ops->vcpu_create(kvm, id);
5953 }
5954 
5955 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5956 {
5957 	int r;
5958 
5959 	vcpu->arch.mtrr_state.have_fixed = 1;
5960 	vcpu_load(vcpu);
5961 	r = kvm_arch_vcpu_reset(vcpu);
5962 	if (r == 0)
5963 		r = kvm_mmu_setup(vcpu);
5964 	vcpu_put(vcpu);
5965 
5966 	return r;
5967 }
5968 
5969 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
5970 {
5971 	vcpu->arch.apf.msr_val = 0;
5972 
5973 	vcpu_load(vcpu);
5974 	kvm_mmu_unload(vcpu);
5975 	vcpu_put(vcpu);
5976 
5977 	fx_free(vcpu);
5978 	kvm_x86_ops->vcpu_free(vcpu);
5979 }
5980 
5981 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5982 {
5983 	atomic_set(&vcpu->arch.nmi_queued, 0);
5984 	vcpu->arch.nmi_pending = 0;
5985 	vcpu->arch.nmi_injected = false;
5986 
5987 	vcpu->arch.switch_db_regs = 0;
5988 	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
5989 	vcpu->arch.dr6 = DR6_FIXED_1;
5990 	vcpu->arch.dr7 = DR7_FIXED_1;
5991 
5992 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5993 	vcpu->arch.apf.msr_val = 0;
5994 	vcpu->arch.st.msr_val = 0;
5995 
5996 	kvmclock_reset(vcpu);
5997 
5998 	kvm_clear_async_pf_completion_queue(vcpu);
5999 	kvm_async_pf_hash_reset(vcpu);
6000 	vcpu->arch.apf.halted = false;
6001 
6002 	kvm_pmu_reset(vcpu);
6003 
6004 	return kvm_x86_ops->vcpu_reset(vcpu);
6005 }
6006 
6007 int kvm_arch_hardware_enable(void *garbage)
6008 {
6009 	struct kvm *kvm;
6010 	struct kvm_vcpu *vcpu;
6011 	int i;
6012 	int ret;
6013 	u64 local_tsc;
6014 	u64 max_tsc = 0;
6015 	bool stable, backwards_tsc = false;
6016 
6017 	kvm_shared_msr_cpu_online();
6018 	ret = kvm_x86_ops->hardware_enable(garbage);
6019 	if (ret != 0)
6020 		return ret;
6021 
6022 	local_tsc = native_read_tsc();
6023 	stable = !check_tsc_unstable();
6024 	list_for_each_entry(kvm, &vm_list, vm_list) {
6025 		kvm_for_each_vcpu(i, vcpu, kvm) {
6026 			if (!stable && vcpu->cpu == smp_processor_id())
6027 				set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
6028 			if (stable && vcpu->arch.last_host_tsc > local_tsc) {
6029 				backwards_tsc = true;
6030 				if (vcpu->arch.last_host_tsc > max_tsc)
6031 					max_tsc = vcpu->arch.last_host_tsc;
6032 			}
6033 		}
6034 	}
6035 
6036 	/*
6037 	 * Sometimes, even reliable TSCs go backwards.  This happens on
6038 	 * platforms that reset TSC during suspend or hibernate actions, but
6039 	 * maintain synchronization.  We must compensate.  Fortunately, we can
6040 	 * detect that condition here, which happens early in CPU bringup,
6041 	 * before any KVM threads can be running.  Unfortunately, we can't
6042 	 * bring the TSCs fully up to date with real time, as we aren't yet far
6043 	 * enough into CPU bringup that we know how much real time has actually
6044 	 * elapsed; our helper function, get_kernel_ns() will be using boot
6045 	 * variables that haven't been updated yet.
6046 	 *
6047 	 * So we simply find the maximum observed TSC above, then record the
6048 	 * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
6049 	 * the adjustment will be applied.  Note that we accumulate
6050 	 * adjustments, in case multiple suspend cycles happen before some VCPU
6051 	 * gets a chance to run again.  In the event that no KVM threads get a
6052 	 * chance to run, we will miss the entire elapsed period, as we'll have
6053 	 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
6054 	 * loose cycle time.  This isn't too big a deal, since the loss will be
6055 	 * uniform across all VCPUs (not to mention the scenario is extremely
6056 	 * unlikely). It is possible that a second hibernate recovery happens
6057 	 * much faster than a first, causing the observed TSC here to be
6058 	 * smaller; this would require additional padding adjustment, which is
6059 	 * why we set last_host_tsc to the local tsc observed here.
6060 	 *
6061 	 * N.B. - this code below runs only on platforms with reliable TSC,
6062 	 * as that is the only way backwards_tsc is set above.  Also note
6063 	 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
6064 	 * have the same delta_cyc adjustment applied if backwards_tsc
6065 	 * is detected.  Note further, this adjustment is only done once,
6066 	 * as we reset last_host_tsc on all VCPUs to stop this from being
6067 	 * called multiple times (one for each physical CPU bringup).
6068 	 *
6069 	 * Platforms with unnreliable TSCs don't have to deal with this, they
6070 	 * will be compensated by the logic in vcpu_load, which sets the TSC to
6071 	 * catchup mode.  This will catchup all VCPUs to real time, but cannot
6072 	 * guarantee that they stay in perfect synchronization.
6073 	 */
6074 	if (backwards_tsc) {
6075 		u64 delta_cyc = max_tsc - local_tsc;
6076 		list_for_each_entry(kvm, &vm_list, vm_list) {
6077 			kvm_for_each_vcpu(i, vcpu, kvm) {
6078 				vcpu->arch.tsc_offset_adjustment += delta_cyc;
6079 				vcpu->arch.last_host_tsc = local_tsc;
6080 			}
6081 
6082 			/*
6083 			 * We have to disable TSC offset matching.. if you were
6084 			 * booting a VM while issuing an S4 host suspend....
6085 			 * you may have some problem.  Solving this issue is
6086 			 * left as an exercise to the reader.
6087 			 */
6088 			kvm->arch.last_tsc_nsec = 0;
6089 			kvm->arch.last_tsc_write = 0;
6090 		}
6091 
6092 	}
6093 	return 0;
6094 }
6095 
6096 void kvm_arch_hardware_disable(void *garbage)
6097 {
6098 	kvm_x86_ops->hardware_disable(garbage);
6099 	drop_user_return_notifiers(garbage);
6100 }
6101 
6102 int kvm_arch_hardware_setup(void)
6103 {
6104 	return kvm_x86_ops->hardware_setup();
6105 }
6106 
6107 void kvm_arch_hardware_unsetup(void)
6108 {
6109 	kvm_x86_ops->hardware_unsetup();
6110 }
6111 
6112 void kvm_arch_check_processor_compat(void *rtn)
6113 {
6114 	kvm_x86_ops->check_processor_compatibility(rtn);
6115 }
6116 
6117 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
6118 {
6119 	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
6120 }
6121 
6122 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6123 {
6124 	struct page *page;
6125 	struct kvm *kvm;
6126 	int r;
6127 
6128 	BUG_ON(vcpu->kvm == NULL);
6129 	kvm = vcpu->kvm;
6130 
6131 	vcpu->arch.emulate_ctxt.ops = &emulate_ops;
6132 	if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
6133 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6134 	else
6135 		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
6136 
6137 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
6138 	if (!page) {
6139 		r = -ENOMEM;
6140 		goto fail;
6141 	}
6142 	vcpu->arch.pio_data = page_address(page);
6143 
6144 	kvm_set_tsc_khz(vcpu, max_tsc_khz);
6145 
6146 	r = kvm_mmu_create(vcpu);
6147 	if (r < 0)
6148 		goto fail_free_pio_data;
6149 
6150 	if (irqchip_in_kernel(kvm)) {
6151 		r = kvm_create_lapic(vcpu);
6152 		if (r < 0)
6153 			goto fail_mmu_destroy;
6154 	}
6155 
6156 	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
6157 				       GFP_KERNEL);
6158 	if (!vcpu->arch.mce_banks) {
6159 		r = -ENOMEM;
6160 		goto fail_free_lapic;
6161 	}
6162 	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
6163 
6164 	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
6165 		goto fail_free_mce_banks;
6166 
6167 	kvm_async_pf_hash_reset(vcpu);
6168 	kvm_pmu_init(vcpu);
6169 
6170 	return 0;
6171 fail_free_mce_banks:
6172 	kfree(vcpu->arch.mce_banks);
6173 fail_free_lapic:
6174 	kvm_free_lapic(vcpu);
6175 fail_mmu_destroy:
6176 	kvm_mmu_destroy(vcpu);
6177 fail_free_pio_data:
6178 	free_page((unsigned long)vcpu->arch.pio_data);
6179 fail:
6180 	return r;
6181 }
6182 
6183 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
6184 {
6185 	int idx;
6186 
6187 	kvm_pmu_destroy(vcpu);
6188 	kfree(vcpu->arch.mce_banks);
6189 	kvm_free_lapic(vcpu);
6190 	idx = srcu_read_lock(&vcpu->kvm->srcu);
6191 	kvm_mmu_destroy(vcpu);
6192 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
6193 	free_page((unsigned long)vcpu->arch.pio_data);
6194 }
6195 
6196 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
6197 {
6198 	if (type)
6199 		return -EINVAL;
6200 
6201 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6202 	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
6203 
6204 	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
6205 	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
6206 
6207 	raw_spin_lock_init(&kvm->arch.tsc_write_lock);
6208 
6209 	return 0;
6210 }
6211 
6212 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
6213 {
6214 	vcpu_load(vcpu);
6215 	kvm_mmu_unload(vcpu);
6216 	vcpu_put(vcpu);
6217 }
6218 
6219 static void kvm_free_vcpus(struct kvm *kvm)
6220 {
6221 	unsigned int i;
6222 	struct kvm_vcpu *vcpu;
6223 
6224 	/*
6225 	 * Unpin any mmu pages first.
6226 	 */
6227 	kvm_for_each_vcpu(i, vcpu, kvm) {
6228 		kvm_clear_async_pf_completion_queue(vcpu);
6229 		kvm_unload_vcpu_mmu(vcpu);
6230 	}
6231 	kvm_for_each_vcpu(i, vcpu, kvm)
6232 		kvm_arch_vcpu_free(vcpu);
6233 
6234 	mutex_lock(&kvm->lock);
6235 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
6236 		kvm->vcpus[i] = NULL;
6237 
6238 	atomic_set(&kvm->online_vcpus, 0);
6239 	mutex_unlock(&kvm->lock);
6240 }
6241 
6242 void kvm_arch_sync_events(struct kvm *kvm)
6243 {
6244 	kvm_free_all_assigned_devices(kvm);
6245 	kvm_free_pit(kvm);
6246 }
6247 
6248 void kvm_arch_destroy_vm(struct kvm *kvm)
6249 {
6250 	kvm_iommu_unmap_guest(kvm);
6251 	kfree(kvm->arch.vpic);
6252 	kfree(kvm->arch.vioapic);
6253 	kvm_free_vcpus(kvm);
6254 	if (kvm->arch.apic_access_page)
6255 		put_page(kvm->arch.apic_access_page);
6256 	if (kvm->arch.ept_identity_pagetable)
6257 		put_page(kvm->arch.ept_identity_pagetable);
6258 }
6259 
6260 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
6261 			   struct kvm_memory_slot *dont)
6262 {
6263 	int i;
6264 
6265 	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6266 		if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) {
6267 			vfree(free->arch.lpage_info[i]);
6268 			free->arch.lpage_info[i] = NULL;
6269 		}
6270 	}
6271 }
6272 
6273 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
6274 {
6275 	int i;
6276 
6277 	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6278 		unsigned long ugfn;
6279 		int lpages;
6280 		int level = i + 2;
6281 
6282 		lpages = gfn_to_index(slot->base_gfn + npages - 1,
6283 				      slot->base_gfn, level) + 1;
6284 
6285 		slot->arch.lpage_info[i] =
6286 			vzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
6287 		if (!slot->arch.lpage_info[i])
6288 			goto out_free;
6289 
6290 		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
6291 			slot->arch.lpage_info[i][0].write_count = 1;
6292 		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
6293 			slot->arch.lpage_info[i][lpages - 1].write_count = 1;
6294 		ugfn = slot->userspace_addr >> PAGE_SHIFT;
6295 		/*
6296 		 * If the gfn and userspace address are not aligned wrt each
6297 		 * other, or if explicitly asked to, disable large page
6298 		 * support for this slot
6299 		 */
6300 		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
6301 		    !kvm_largepages_enabled()) {
6302 			unsigned long j;
6303 
6304 			for (j = 0; j < lpages; ++j)
6305 				slot->arch.lpage_info[i][j].write_count = 1;
6306 		}
6307 	}
6308 
6309 	return 0;
6310 
6311 out_free:
6312 	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6313 		vfree(slot->arch.lpage_info[i]);
6314 		slot->arch.lpage_info[i] = NULL;
6315 	}
6316 	return -ENOMEM;
6317 }
6318 
6319 int kvm_arch_prepare_memory_region(struct kvm *kvm,
6320 				struct kvm_memory_slot *memslot,
6321 				struct kvm_memory_slot old,
6322 				struct kvm_userspace_memory_region *mem,
6323 				int user_alloc)
6324 {
6325 	int npages = memslot->npages;
6326 	int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
6327 
6328 	/* Prevent internal slot pages from being moved by fork()/COW. */
6329 	if (memslot->id >= KVM_MEMORY_SLOTS)
6330 		map_flags = MAP_SHARED | MAP_ANONYMOUS;
6331 
6332 	/*To keep backward compatibility with older userspace,
6333 	 *x86 needs to hanlde !user_alloc case.
6334 	 */
6335 	if (!user_alloc) {
6336 		if (npages && !old.rmap) {
6337 			unsigned long userspace_addr;
6338 
6339 			userspace_addr = vm_mmap(NULL, 0,
6340 						 npages * PAGE_SIZE,
6341 						 PROT_READ | PROT_WRITE,
6342 						 map_flags,
6343 						 0);
6344 
6345 			if (IS_ERR((void *)userspace_addr))
6346 				return PTR_ERR((void *)userspace_addr);
6347 
6348 			memslot->userspace_addr = userspace_addr;
6349 		}
6350 	}
6351 
6352 
6353 	return 0;
6354 }
6355 
6356 void kvm_arch_commit_memory_region(struct kvm *kvm,
6357 				struct kvm_userspace_memory_region *mem,
6358 				struct kvm_memory_slot old,
6359 				int user_alloc)
6360 {
6361 
6362 	int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
6363 
6364 	if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
6365 		int ret;
6366 
6367 		ret = vm_munmap(old.userspace_addr,
6368 				old.npages * PAGE_SIZE);
6369 		if (ret < 0)
6370 			printk(KERN_WARNING
6371 			       "kvm_vm_ioctl_set_memory_region: "
6372 			       "failed to munmap memory\n");
6373 	}
6374 
6375 	if (!kvm->arch.n_requested_mmu_pages)
6376 		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
6377 
6378 	spin_lock(&kvm->mmu_lock);
6379 	if (nr_mmu_pages)
6380 		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
6381 	kvm_mmu_slot_remove_write_access(kvm, mem->slot);
6382 	spin_unlock(&kvm->mmu_lock);
6383 }
6384 
6385 void kvm_arch_flush_shadow(struct kvm *kvm)
6386 {
6387 	kvm_mmu_zap_all(kvm);
6388 	kvm_reload_remote_mmus(kvm);
6389 }
6390 
6391 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
6392 {
6393 	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
6394 		!vcpu->arch.apf.halted)
6395 		|| !list_empty_careful(&vcpu->async_pf.done)
6396 		|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
6397 		|| atomic_read(&vcpu->arch.nmi_queued) ||
6398 		(kvm_arch_interrupt_allowed(vcpu) &&
6399 		 kvm_cpu_has_interrupt(vcpu));
6400 }
6401 
6402 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
6403 {
6404 	int me;
6405 	int cpu = vcpu->cpu;
6406 
6407 	if (waitqueue_active(&vcpu->wq)) {
6408 		wake_up_interruptible(&vcpu->wq);
6409 		++vcpu->stat.halt_wakeup;
6410 	}
6411 
6412 	me = get_cpu();
6413 	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
6414 		if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
6415 			smp_send_reschedule(cpu);
6416 	put_cpu();
6417 }
6418 
6419 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
6420 {
6421 	return kvm_x86_ops->interrupt_allowed(vcpu);
6422 }
6423 
6424 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
6425 {
6426 	unsigned long current_rip = kvm_rip_read(vcpu) +
6427 		get_segment_base(vcpu, VCPU_SREG_CS);
6428 
6429 	return current_rip == linear_rip;
6430 }
6431 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
6432 
6433 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
6434 {
6435 	unsigned long rflags;
6436 
6437 	rflags = kvm_x86_ops->get_rflags(vcpu);
6438 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6439 		rflags &= ~X86_EFLAGS_TF;
6440 	return rflags;
6441 }
6442 EXPORT_SYMBOL_GPL(kvm_get_rflags);
6443 
6444 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
6445 {
6446 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
6447 	    kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
6448 		rflags |= X86_EFLAGS_TF;
6449 	kvm_x86_ops->set_rflags(vcpu, rflags);
6450 	kvm_make_request(KVM_REQ_EVENT, vcpu);
6451 }
6452 EXPORT_SYMBOL_GPL(kvm_set_rflags);
6453 
6454 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
6455 {
6456 	int r;
6457 
6458 	if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
6459 	      is_error_page(work->page))
6460 		return;
6461 
6462 	r = kvm_mmu_reload(vcpu);
6463 	if (unlikely(r))
6464 		return;
6465 
6466 	if (!vcpu->arch.mmu.direct_map &&
6467 	      work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
6468 		return;
6469 
6470 	vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
6471 }
6472 
6473 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
6474 {
6475 	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
6476 }
6477 
6478 static inline u32 kvm_async_pf_next_probe(u32 key)
6479 {
6480 	return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
6481 }
6482 
6483 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6484 {
6485 	u32 key = kvm_async_pf_hash_fn(gfn);
6486 
6487 	while (vcpu->arch.apf.gfns[key] != ~0)
6488 		key = kvm_async_pf_next_probe(key);
6489 
6490 	vcpu->arch.apf.gfns[key] = gfn;
6491 }
6492 
6493 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
6494 {
6495 	int i;
6496 	u32 key = kvm_async_pf_hash_fn(gfn);
6497 
6498 	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
6499 		     (vcpu->arch.apf.gfns[key] != gfn &&
6500 		      vcpu->arch.apf.gfns[key] != ~0); i++)
6501 		key = kvm_async_pf_next_probe(key);
6502 
6503 	return key;
6504 }
6505 
6506 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6507 {
6508 	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
6509 }
6510 
6511 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6512 {
6513 	u32 i, j, k;
6514 
6515 	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
6516 	while (true) {
6517 		vcpu->arch.apf.gfns[i] = ~0;
6518 		do {
6519 			j = kvm_async_pf_next_probe(j);
6520 			if (vcpu->arch.apf.gfns[j] == ~0)
6521 				return;
6522 			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
6523 			/*
6524 			 * k lies cyclically in ]i,j]
6525 			 * |    i.k.j |
6526 			 * |....j i.k.| or  |.k..j i...|
6527 			 */
6528 		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
6529 		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
6530 		i = j;
6531 	}
6532 }
6533 
6534 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
6535 {
6536 
6537 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
6538 				      sizeof(val));
6539 }
6540 
6541 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
6542 				     struct kvm_async_pf *work)
6543 {
6544 	struct x86_exception fault;
6545 
6546 	trace_kvm_async_pf_not_present(work->arch.token, work->gva);
6547 	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
6548 
6549 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
6550 	    (vcpu->arch.apf.send_user_only &&
6551 	     kvm_x86_ops->get_cpl(vcpu) == 0))
6552 		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
6553 	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
6554 		fault.vector = PF_VECTOR;
6555 		fault.error_code_valid = true;
6556 		fault.error_code = 0;
6557 		fault.nested_page_fault = false;
6558 		fault.address = work->arch.token;
6559 		kvm_inject_page_fault(vcpu, &fault);
6560 	}
6561 }
6562 
6563 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
6564 				 struct kvm_async_pf *work)
6565 {
6566 	struct x86_exception fault;
6567 
6568 	trace_kvm_async_pf_ready(work->arch.token, work->gva);
6569 	if (is_error_page(work->page))
6570 		work->arch.token = ~0; /* broadcast wakeup */
6571 	else
6572 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
6573 
6574 	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
6575 	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
6576 		fault.vector = PF_VECTOR;
6577 		fault.error_code_valid = true;
6578 		fault.error_code = 0;
6579 		fault.nested_page_fault = false;
6580 		fault.address = work->arch.token;
6581 		kvm_inject_page_fault(vcpu, &fault);
6582 	}
6583 	vcpu->arch.apf.halted = false;
6584 	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6585 }
6586 
6587 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
6588 {
6589 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
6590 		return true;
6591 	else
6592 		return !kvm_event_needs_reinjection(vcpu) &&
6593 			kvm_x86_ops->interrupt_allowed(vcpu);
6594 }
6595 
6596 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
6597 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
6598 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
6599 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
6600 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
6601 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
6602 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
6603 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
6604 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
6605 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
6606 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
6607 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
6608