xref: /openbmc/linux/arch/x86/kvm/x86.c (revision ae3473231e77a3f1909d48cd144cebe5e1d049b3)
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21 
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29 #include "cpuid.h"
30 #include "assigned-dev.h"
31 #include "pmu.h"
32 #include "hyperv.h"
33 
34 #include <linux/clocksource.h>
35 #include <linux/interrupt.h>
36 #include <linux/kvm.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/export.h>
40 #include <linux/moduleparam.h>
41 #include <linux/mman.h>
42 #include <linux/highmem.h>
43 #include <linux/iommu.h>
44 #include <linux/intel-iommu.h>
45 #include <linux/cpufreq.h>
46 #include <linux/user-return-notifier.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <linux/perf_event.h>
50 #include <linux/uaccess.h>
51 #include <linux/hash.h>
52 #include <linux/pci.h>
53 #include <linux/timekeeper_internal.h>
54 #include <linux/pvclock_gtod.h>
55 #include <linux/kvm_irqfd.h>
56 #include <linux/irqbypass.h>
57 #include <trace/events/kvm.h>
58 
59 #include <asm/debugreg.h>
60 #include <asm/msr.h>
61 #include <asm/desc.h>
62 #include <asm/mce.h>
63 #include <linux/kernel_stat.h>
64 #include <asm/fpu/internal.h> /* Ugh! */
65 #include <asm/pvclock.h>
66 #include <asm/div64.h>
67 #include <asm/irq_remapping.h>
68 
69 #define CREATE_TRACE_POINTS
70 #include "trace.h"
71 
72 #define MAX_IO_MSRS 256
73 #define KVM_MAX_MCE_BANKS 32
74 u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
75 EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
76 
77 #define emul_to_vcpu(ctxt) \
78 	container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
79 
80 /* EFER defaults:
81  * - enable syscall per default because its emulated by KVM
82  * - enable LME and LMA per default on 64 bit KVM
83  */
84 #ifdef CONFIG_X86_64
85 static
86 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
87 #else
88 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
89 #endif
90 
91 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
92 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
93 
94 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
95                                     KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
96 
97 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
98 static void process_nmi(struct kvm_vcpu *vcpu);
99 static void enter_smm(struct kvm_vcpu *vcpu);
100 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
101 
102 struct kvm_x86_ops *kvm_x86_ops __read_mostly;
103 EXPORT_SYMBOL_GPL(kvm_x86_ops);
104 
105 static bool __read_mostly ignore_msrs = 0;
106 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
107 
108 unsigned int min_timer_period_us = 500;
109 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
110 
111 static bool __read_mostly kvmclock_periodic_sync = true;
112 module_param(kvmclock_periodic_sync, bool, S_IRUGO);
113 
114 bool __read_mostly kvm_has_tsc_control;
115 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
116 u32  __read_mostly kvm_max_guest_tsc_khz;
117 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
118 u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;
119 EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
120 u64  __read_mostly kvm_max_tsc_scaling_ratio;
121 EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
122 u64 __read_mostly kvm_default_tsc_scaling_ratio;
123 EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
124 
125 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
126 static u32 __read_mostly tsc_tolerance_ppm = 250;
127 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
128 
129 /* lapic timer advance (tscdeadline mode only) in nanoseconds */
130 unsigned int __read_mostly lapic_timer_advance_ns = 0;
131 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
132 
133 static bool __read_mostly vector_hashing = true;
134 module_param(vector_hashing, bool, S_IRUGO);
135 
136 static bool __read_mostly backwards_tsc_observed = false;
137 
138 #define KVM_NR_SHARED_MSRS 16
139 
140 struct kvm_shared_msrs_global {
141 	int nr;
142 	u32 msrs[KVM_NR_SHARED_MSRS];
143 };
144 
145 struct kvm_shared_msrs {
146 	struct user_return_notifier urn;
147 	bool registered;
148 	struct kvm_shared_msr_values {
149 		u64 host;
150 		u64 curr;
151 	} values[KVM_NR_SHARED_MSRS];
152 };
153 
154 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
155 static struct kvm_shared_msrs __percpu *shared_msrs;
156 
157 struct kvm_stats_debugfs_item debugfs_entries[] = {
158 	{ "pf_fixed", VCPU_STAT(pf_fixed) },
159 	{ "pf_guest", VCPU_STAT(pf_guest) },
160 	{ "tlb_flush", VCPU_STAT(tlb_flush) },
161 	{ "invlpg", VCPU_STAT(invlpg) },
162 	{ "exits", VCPU_STAT(exits) },
163 	{ "io_exits", VCPU_STAT(io_exits) },
164 	{ "mmio_exits", VCPU_STAT(mmio_exits) },
165 	{ "signal_exits", VCPU_STAT(signal_exits) },
166 	{ "irq_window", VCPU_STAT(irq_window_exits) },
167 	{ "nmi_window", VCPU_STAT(nmi_window_exits) },
168 	{ "halt_exits", VCPU_STAT(halt_exits) },
169 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
170 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
171 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
172 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
173 	{ "hypercalls", VCPU_STAT(hypercalls) },
174 	{ "request_irq", VCPU_STAT(request_irq_exits) },
175 	{ "irq_exits", VCPU_STAT(irq_exits) },
176 	{ "host_state_reload", VCPU_STAT(host_state_reload) },
177 	{ "efer_reload", VCPU_STAT(efer_reload) },
178 	{ "fpu_reload", VCPU_STAT(fpu_reload) },
179 	{ "insn_emulation", VCPU_STAT(insn_emulation) },
180 	{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
181 	{ "irq_injections", VCPU_STAT(irq_injections) },
182 	{ "nmi_injections", VCPU_STAT(nmi_injections) },
183 	{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
184 	{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
185 	{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
186 	{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
187 	{ "mmu_flooded", VM_STAT(mmu_flooded) },
188 	{ "mmu_recycled", VM_STAT(mmu_recycled) },
189 	{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
190 	{ "mmu_unsync", VM_STAT(mmu_unsync) },
191 	{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
192 	{ "largepages", VM_STAT(lpages) },
193 	{ NULL }
194 };
195 
196 u64 __read_mostly host_xcr0;
197 
198 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
199 
200 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
201 {
202 	int i;
203 	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
204 		vcpu->arch.apf.gfns[i] = ~0;
205 }
206 
207 static void kvm_on_user_return(struct user_return_notifier *urn)
208 {
209 	unsigned slot;
210 	struct kvm_shared_msrs *locals
211 		= container_of(urn, struct kvm_shared_msrs, urn);
212 	struct kvm_shared_msr_values *values;
213 	unsigned long flags;
214 
215 	/*
216 	 * Disabling irqs at this point since the following code could be
217 	 * interrupted and executed through kvm_arch_hardware_disable()
218 	 */
219 	local_irq_save(flags);
220 	if (locals->registered) {
221 		locals->registered = false;
222 		user_return_notifier_unregister(urn);
223 	}
224 	local_irq_restore(flags);
225 	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
226 		values = &locals->values[slot];
227 		if (values->host != values->curr) {
228 			wrmsrl(shared_msrs_global.msrs[slot], values->host);
229 			values->curr = values->host;
230 		}
231 	}
232 }
233 
234 static void shared_msr_update(unsigned slot, u32 msr)
235 {
236 	u64 value;
237 	unsigned int cpu = smp_processor_id();
238 	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
239 
240 	/* only read, and nobody should modify it at this time,
241 	 * so don't need lock */
242 	if (slot >= shared_msrs_global.nr) {
243 		printk(KERN_ERR "kvm: invalid MSR slot!");
244 		return;
245 	}
246 	rdmsrl_safe(msr, &value);
247 	smsr->values[slot].host = value;
248 	smsr->values[slot].curr = value;
249 }
250 
251 void kvm_define_shared_msr(unsigned slot, u32 msr)
252 {
253 	BUG_ON(slot >= KVM_NR_SHARED_MSRS);
254 	shared_msrs_global.msrs[slot] = msr;
255 	if (slot >= shared_msrs_global.nr)
256 		shared_msrs_global.nr = slot + 1;
257 }
258 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
259 
260 static void kvm_shared_msr_cpu_online(void)
261 {
262 	unsigned i;
263 
264 	for (i = 0; i < shared_msrs_global.nr; ++i)
265 		shared_msr_update(i, shared_msrs_global.msrs[i]);
266 }
267 
268 int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
269 {
270 	unsigned int cpu = smp_processor_id();
271 	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
272 	int err;
273 
274 	if (((value ^ smsr->values[slot].curr) & mask) == 0)
275 		return 0;
276 	smsr->values[slot].curr = value;
277 	err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
278 	if (err)
279 		return 1;
280 
281 	if (!smsr->registered) {
282 		smsr->urn.on_user_return = kvm_on_user_return;
283 		user_return_notifier_register(&smsr->urn);
284 		smsr->registered = true;
285 	}
286 	return 0;
287 }
288 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
289 
290 static void drop_user_return_notifiers(void)
291 {
292 	unsigned int cpu = smp_processor_id();
293 	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
294 
295 	if (smsr->registered)
296 		kvm_on_user_return(&smsr->urn);
297 }
298 
299 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
300 {
301 	return vcpu->arch.apic_base;
302 }
303 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
304 
305 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
306 {
307 	u64 old_state = vcpu->arch.apic_base &
308 		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
309 	u64 new_state = msr_info->data &
310 		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
311 	u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
312 		0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
313 
314 	if (!msr_info->host_initiated &&
315 	    ((msr_info->data & reserved_bits) != 0 ||
316 	     new_state == X2APIC_ENABLE ||
317 	     (new_state == MSR_IA32_APICBASE_ENABLE &&
318 	      old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
319 	     (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
320 	      old_state == 0)))
321 		return 1;
322 
323 	kvm_lapic_set_base(vcpu, msr_info->data);
324 	return 0;
325 }
326 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
327 
328 asmlinkage __visible void kvm_spurious_fault(void)
329 {
330 	/* Fault while not rebooting.  We want the trace. */
331 	BUG();
332 }
333 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
334 
335 #define EXCPT_BENIGN		0
336 #define EXCPT_CONTRIBUTORY	1
337 #define EXCPT_PF		2
338 
339 static int exception_class(int vector)
340 {
341 	switch (vector) {
342 	case PF_VECTOR:
343 		return EXCPT_PF;
344 	case DE_VECTOR:
345 	case TS_VECTOR:
346 	case NP_VECTOR:
347 	case SS_VECTOR:
348 	case GP_VECTOR:
349 		return EXCPT_CONTRIBUTORY;
350 	default:
351 		break;
352 	}
353 	return EXCPT_BENIGN;
354 }
355 
356 #define EXCPT_FAULT		0
357 #define EXCPT_TRAP		1
358 #define EXCPT_ABORT		2
359 #define EXCPT_INTERRUPT		3
360 
361 static int exception_type(int vector)
362 {
363 	unsigned int mask;
364 
365 	if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
366 		return EXCPT_INTERRUPT;
367 
368 	mask = 1 << vector;
369 
370 	/* #DB is trap, as instruction watchpoints are handled elsewhere */
371 	if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
372 		return EXCPT_TRAP;
373 
374 	if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
375 		return EXCPT_ABORT;
376 
377 	/* Reserved exceptions will result in fault */
378 	return EXCPT_FAULT;
379 }
380 
381 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
382 		unsigned nr, bool has_error, u32 error_code,
383 		bool reinject)
384 {
385 	u32 prev_nr;
386 	int class1, class2;
387 
388 	kvm_make_request(KVM_REQ_EVENT, vcpu);
389 
390 	if (!vcpu->arch.exception.pending) {
391 	queue:
392 		if (has_error && !is_protmode(vcpu))
393 			has_error = false;
394 		vcpu->arch.exception.pending = true;
395 		vcpu->arch.exception.has_error_code = has_error;
396 		vcpu->arch.exception.nr = nr;
397 		vcpu->arch.exception.error_code = error_code;
398 		vcpu->arch.exception.reinject = reinject;
399 		return;
400 	}
401 
402 	/* to check exception */
403 	prev_nr = vcpu->arch.exception.nr;
404 	if (prev_nr == DF_VECTOR) {
405 		/* triple fault -> shutdown */
406 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
407 		return;
408 	}
409 	class1 = exception_class(prev_nr);
410 	class2 = exception_class(nr);
411 	if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
412 		|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
413 		/* generate double fault per SDM Table 5-5 */
414 		vcpu->arch.exception.pending = true;
415 		vcpu->arch.exception.has_error_code = true;
416 		vcpu->arch.exception.nr = DF_VECTOR;
417 		vcpu->arch.exception.error_code = 0;
418 	} else
419 		/* replace previous exception with a new one in a hope
420 		   that instruction re-execution will regenerate lost
421 		   exception */
422 		goto queue;
423 }
424 
425 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
426 {
427 	kvm_multiple_exception(vcpu, nr, false, 0, false);
428 }
429 EXPORT_SYMBOL_GPL(kvm_queue_exception);
430 
431 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
432 {
433 	kvm_multiple_exception(vcpu, nr, false, 0, true);
434 }
435 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
436 
437 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
438 {
439 	if (err)
440 		kvm_inject_gp(vcpu, 0);
441 	else
442 		return kvm_skip_emulated_instruction(vcpu);
443 
444 	return 1;
445 }
446 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
447 
448 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
449 {
450 	++vcpu->stat.pf_guest;
451 	vcpu->arch.cr2 = fault->address;
452 	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
453 }
454 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
455 
456 static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
457 {
458 	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
459 		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
460 	else
461 		vcpu->arch.mmu.inject_page_fault(vcpu, fault);
462 
463 	return fault->nested_page_fault;
464 }
465 
466 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
467 {
468 	atomic_inc(&vcpu->arch.nmi_queued);
469 	kvm_make_request(KVM_REQ_NMI, vcpu);
470 }
471 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
472 
473 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
474 {
475 	kvm_multiple_exception(vcpu, nr, true, error_code, false);
476 }
477 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
478 
479 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
480 {
481 	kvm_multiple_exception(vcpu, nr, true, error_code, true);
482 }
483 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
484 
485 /*
486  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
487  * a #GP and return false.
488  */
489 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
490 {
491 	if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
492 		return true;
493 	kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
494 	return false;
495 }
496 EXPORT_SYMBOL_GPL(kvm_require_cpl);
497 
498 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
499 {
500 	if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
501 		return true;
502 
503 	kvm_queue_exception(vcpu, UD_VECTOR);
504 	return false;
505 }
506 EXPORT_SYMBOL_GPL(kvm_require_dr);
507 
508 /*
509  * This function will be used to read from the physical memory of the currently
510  * running guest. The difference to kvm_vcpu_read_guest_page is that this function
511  * can read from guest physical or from the guest's guest physical memory.
512  */
513 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
514 			    gfn_t ngfn, void *data, int offset, int len,
515 			    u32 access)
516 {
517 	struct x86_exception exception;
518 	gfn_t real_gfn;
519 	gpa_t ngpa;
520 
521 	ngpa     = gfn_to_gpa(ngfn);
522 	real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
523 	if (real_gfn == UNMAPPED_GVA)
524 		return -EFAULT;
525 
526 	real_gfn = gpa_to_gfn(real_gfn);
527 
528 	return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
529 }
530 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
531 
532 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
533 			       void *data, int offset, int len, u32 access)
534 {
535 	return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
536 				       data, offset, len, access);
537 }
538 
539 /*
540  * Load the pae pdptrs.  Return true is they are all valid.
541  */
542 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
543 {
544 	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
545 	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
546 	int i;
547 	int ret;
548 	u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
549 
550 	ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
551 				      offset * sizeof(u64), sizeof(pdpte),
552 				      PFERR_USER_MASK|PFERR_WRITE_MASK);
553 	if (ret < 0) {
554 		ret = 0;
555 		goto out;
556 	}
557 	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
558 		if ((pdpte[i] & PT_PRESENT_MASK) &&
559 		    (pdpte[i] &
560 		     vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
561 			ret = 0;
562 			goto out;
563 		}
564 	}
565 	ret = 1;
566 
567 	memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
568 	__set_bit(VCPU_EXREG_PDPTR,
569 		  (unsigned long *)&vcpu->arch.regs_avail);
570 	__set_bit(VCPU_EXREG_PDPTR,
571 		  (unsigned long *)&vcpu->arch.regs_dirty);
572 out:
573 
574 	return ret;
575 }
576 EXPORT_SYMBOL_GPL(load_pdptrs);
577 
578 bool pdptrs_changed(struct kvm_vcpu *vcpu)
579 {
580 	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
581 	bool changed = true;
582 	int offset;
583 	gfn_t gfn;
584 	int r;
585 
586 	if (is_long_mode(vcpu) || !is_pae(vcpu))
587 		return false;
588 
589 	if (!test_bit(VCPU_EXREG_PDPTR,
590 		      (unsigned long *)&vcpu->arch.regs_avail))
591 		return true;
592 
593 	gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
594 	offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
595 	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
596 				       PFERR_USER_MASK | PFERR_WRITE_MASK);
597 	if (r < 0)
598 		goto out;
599 	changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
600 out:
601 
602 	return changed;
603 }
604 EXPORT_SYMBOL_GPL(pdptrs_changed);
605 
606 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
607 {
608 	unsigned long old_cr0 = kvm_read_cr0(vcpu);
609 	unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
610 
611 	cr0 |= X86_CR0_ET;
612 
613 #ifdef CONFIG_X86_64
614 	if (cr0 & 0xffffffff00000000UL)
615 		return 1;
616 #endif
617 
618 	cr0 &= ~CR0_RESERVED_BITS;
619 
620 	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
621 		return 1;
622 
623 	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
624 		return 1;
625 
626 	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
627 #ifdef CONFIG_X86_64
628 		if ((vcpu->arch.efer & EFER_LME)) {
629 			int cs_db, cs_l;
630 
631 			if (!is_pae(vcpu))
632 				return 1;
633 			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
634 			if (cs_l)
635 				return 1;
636 		} else
637 #endif
638 		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
639 						 kvm_read_cr3(vcpu)))
640 			return 1;
641 	}
642 
643 	if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
644 		return 1;
645 
646 	kvm_x86_ops->set_cr0(vcpu, cr0);
647 
648 	if ((cr0 ^ old_cr0) & X86_CR0_PG) {
649 		kvm_clear_async_pf_completion_queue(vcpu);
650 		kvm_async_pf_hash_reset(vcpu);
651 	}
652 
653 	if ((cr0 ^ old_cr0) & update_bits)
654 		kvm_mmu_reset_context(vcpu);
655 
656 	if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
657 	    kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
658 	    !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
659 		kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
660 
661 	return 0;
662 }
663 EXPORT_SYMBOL_GPL(kvm_set_cr0);
664 
665 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
666 {
667 	(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
668 }
669 EXPORT_SYMBOL_GPL(kvm_lmsw);
670 
671 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
672 {
673 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
674 			!vcpu->guest_xcr0_loaded) {
675 		/* kvm_set_xcr() also depends on this */
676 		xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
677 		vcpu->guest_xcr0_loaded = 1;
678 	}
679 }
680 
681 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
682 {
683 	if (vcpu->guest_xcr0_loaded) {
684 		if (vcpu->arch.xcr0 != host_xcr0)
685 			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
686 		vcpu->guest_xcr0_loaded = 0;
687 	}
688 }
689 
690 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
691 {
692 	u64 xcr0 = xcr;
693 	u64 old_xcr0 = vcpu->arch.xcr0;
694 	u64 valid_bits;
695 
696 	/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
697 	if (index != XCR_XFEATURE_ENABLED_MASK)
698 		return 1;
699 	if (!(xcr0 & XFEATURE_MASK_FP))
700 		return 1;
701 	if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
702 		return 1;
703 
704 	/*
705 	 * Do not allow the guest to set bits that we do not support
706 	 * saving.  However, xcr0 bit 0 is always set, even if the
707 	 * emulated CPU does not support XSAVE (see fx_init).
708 	 */
709 	valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
710 	if (xcr0 & ~valid_bits)
711 		return 1;
712 
713 	if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
714 	    (!(xcr0 & XFEATURE_MASK_BNDCSR)))
715 		return 1;
716 
717 	if (xcr0 & XFEATURE_MASK_AVX512) {
718 		if (!(xcr0 & XFEATURE_MASK_YMM))
719 			return 1;
720 		if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
721 			return 1;
722 	}
723 	vcpu->arch.xcr0 = xcr0;
724 
725 	if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
726 		kvm_update_cpuid(vcpu);
727 	return 0;
728 }
729 
730 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
731 {
732 	if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
733 	    __kvm_set_xcr(vcpu, index, xcr)) {
734 		kvm_inject_gp(vcpu, 0);
735 		return 1;
736 	}
737 	return 0;
738 }
739 EXPORT_SYMBOL_GPL(kvm_set_xcr);
740 
741 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
742 {
743 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
744 	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
745 				   X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
746 
747 	if (cr4 & CR4_RESERVED_BITS)
748 		return 1;
749 
750 	if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
751 		return 1;
752 
753 	if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
754 		return 1;
755 
756 	if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
757 		return 1;
758 
759 	if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
760 		return 1;
761 
762 	if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE))
763 		return 1;
764 
765 	if (is_long_mode(vcpu)) {
766 		if (!(cr4 & X86_CR4_PAE))
767 			return 1;
768 	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
769 		   && ((cr4 ^ old_cr4) & pdptr_bits)
770 		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
771 				   kvm_read_cr3(vcpu)))
772 		return 1;
773 
774 	if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
775 		if (!guest_cpuid_has_pcid(vcpu))
776 			return 1;
777 
778 		/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
779 		if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
780 			return 1;
781 	}
782 
783 	if (kvm_x86_ops->set_cr4(vcpu, cr4))
784 		return 1;
785 
786 	if (((cr4 ^ old_cr4) & pdptr_bits) ||
787 	    (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
788 		kvm_mmu_reset_context(vcpu);
789 
790 	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
791 		kvm_update_cpuid(vcpu);
792 
793 	return 0;
794 }
795 EXPORT_SYMBOL_GPL(kvm_set_cr4);
796 
797 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
798 {
799 #ifdef CONFIG_X86_64
800 	cr3 &= ~CR3_PCID_INVD;
801 #endif
802 
803 	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
804 		kvm_mmu_sync_roots(vcpu);
805 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
806 		return 0;
807 	}
808 
809 	if (is_long_mode(vcpu)) {
810 		if (cr3 & CR3_L_MODE_RESERVED_BITS)
811 			return 1;
812 	} else if (is_pae(vcpu) && is_paging(vcpu) &&
813 		   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
814 		return 1;
815 
816 	vcpu->arch.cr3 = cr3;
817 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
818 	kvm_mmu_new_cr3(vcpu);
819 	return 0;
820 }
821 EXPORT_SYMBOL_GPL(kvm_set_cr3);
822 
823 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
824 {
825 	if (cr8 & CR8_RESERVED_BITS)
826 		return 1;
827 	if (lapic_in_kernel(vcpu))
828 		kvm_lapic_set_tpr(vcpu, cr8);
829 	else
830 		vcpu->arch.cr8 = cr8;
831 	return 0;
832 }
833 EXPORT_SYMBOL_GPL(kvm_set_cr8);
834 
835 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
836 {
837 	if (lapic_in_kernel(vcpu))
838 		return kvm_lapic_get_cr8(vcpu);
839 	else
840 		return vcpu->arch.cr8;
841 }
842 EXPORT_SYMBOL_GPL(kvm_get_cr8);
843 
844 static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
845 {
846 	int i;
847 
848 	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
849 		for (i = 0; i < KVM_NR_DB_REGS; i++)
850 			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
851 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
852 	}
853 }
854 
855 static void kvm_update_dr6(struct kvm_vcpu *vcpu)
856 {
857 	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
858 		kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
859 }
860 
861 static void kvm_update_dr7(struct kvm_vcpu *vcpu)
862 {
863 	unsigned long dr7;
864 
865 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
866 		dr7 = vcpu->arch.guest_debug_dr7;
867 	else
868 		dr7 = vcpu->arch.dr7;
869 	kvm_x86_ops->set_dr7(vcpu, dr7);
870 	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
871 	if (dr7 & DR7_BP_EN_MASK)
872 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
873 }
874 
875 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
876 {
877 	u64 fixed = DR6_FIXED_1;
878 
879 	if (!guest_cpuid_has_rtm(vcpu))
880 		fixed |= DR6_RTM;
881 	return fixed;
882 }
883 
884 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
885 {
886 	switch (dr) {
887 	case 0 ... 3:
888 		vcpu->arch.db[dr] = val;
889 		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
890 			vcpu->arch.eff_db[dr] = val;
891 		break;
892 	case 4:
893 		/* fall through */
894 	case 6:
895 		if (val & 0xffffffff00000000ULL)
896 			return -1; /* #GP */
897 		vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
898 		kvm_update_dr6(vcpu);
899 		break;
900 	case 5:
901 		/* fall through */
902 	default: /* 7 */
903 		if (val & 0xffffffff00000000ULL)
904 			return -1; /* #GP */
905 		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
906 		kvm_update_dr7(vcpu);
907 		break;
908 	}
909 
910 	return 0;
911 }
912 
913 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
914 {
915 	if (__kvm_set_dr(vcpu, dr, val)) {
916 		kvm_inject_gp(vcpu, 0);
917 		return 1;
918 	}
919 	return 0;
920 }
921 EXPORT_SYMBOL_GPL(kvm_set_dr);
922 
923 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
924 {
925 	switch (dr) {
926 	case 0 ... 3:
927 		*val = vcpu->arch.db[dr];
928 		break;
929 	case 4:
930 		/* fall through */
931 	case 6:
932 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
933 			*val = vcpu->arch.dr6;
934 		else
935 			*val = kvm_x86_ops->get_dr6(vcpu);
936 		break;
937 	case 5:
938 		/* fall through */
939 	default: /* 7 */
940 		*val = vcpu->arch.dr7;
941 		break;
942 	}
943 	return 0;
944 }
945 EXPORT_SYMBOL_GPL(kvm_get_dr);
946 
947 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
948 {
949 	u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
950 	u64 data;
951 	int err;
952 
953 	err = kvm_pmu_rdpmc(vcpu, ecx, &data);
954 	if (err)
955 		return err;
956 	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
957 	kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
958 	return err;
959 }
960 EXPORT_SYMBOL_GPL(kvm_rdpmc);
961 
962 /*
963  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
964  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
965  *
966  * This list is modified at module load time to reflect the
967  * capabilities of the host cpu. This capabilities test skips MSRs that are
968  * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
969  * may depend on host virtualization features rather than host cpu features.
970  */
971 
972 static u32 msrs_to_save[] = {
973 	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
974 	MSR_STAR,
975 #ifdef CONFIG_X86_64
976 	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
977 #endif
978 	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
979 	MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
980 };
981 
982 static unsigned num_msrs_to_save;
983 
984 static u32 emulated_msrs[] = {
985 	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
986 	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
987 	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
988 	HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
989 	HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
990 	HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
991 	HV_X64_MSR_RESET,
992 	HV_X64_MSR_VP_INDEX,
993 	HV_X64_MSR_VP_RUNTIME,
994 	HV_X64_MSR_SCONTROL,
995 	HV_X64_MSR_STIMER0_CONFIG,
996 	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
997 	MSR_KVM_PV_EOI_EN,
998 
999 	MSR_IA32_TSC_ADJUST,
1000 	MSR_IA32_TSCDEADLINE,
1001 	MSR_IA32_MISC_ENABLE,
1002 	MSR_IA32_MCG_STATUS,
1003 	MSR_IA32_MCG_CTL,
1004 	MSR_IA32_MCG_EXT_CTL,
1005 	MSR_IA32_SMBASE,
1006 };
1007 
1008 static unsigned num_emulated_msrs;
1009 
1010 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1011 {
1012 	if (efer & efer_reserved_bits)
1013 		return false;
1014 
1015 	if (efer & EFER_FFXSR) {
1016 		struct kvm_cpuid_entry2 *feat;
1017 
1018 		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1019 		if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
1020 			return false;
1021 	}
1022 
1023 	if (efer & EFER_SVME) {
1024 		struct kvm_cpuid_entry2 *feat;
1025 
1026 		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1027 		if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
1028 			return false;
1029 	}
1030 
1031 	return true;
1032 }
1033 EXPORT_SYMBOL_GPL(kvm_valid_efer);
1034 
1035 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
1036 {
1037 	u64 old_efer = vcpu->arch.efer;
1038 
1039 	if (!kvm_valid_efer(vcpu, efer))
1040 		return 1;
1041 
1042 	if (is_paging(vcpu)
1043 	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1044 		return 1;
1045 
1046 	efer &= ~EFER_LMA;
1047 	efer |= vcpu->arch.efer & EFER_LMA;
1048 
1049 	kvm_x86_ops->set_efer(vcpu, efer);
1050 
1051 	/* Update reserved bits */
1052 	if ((efer ^ old_efer) & EFER_NX)
1053 		kvm_mmu_reset_context(vcpu);
1054 
1055 	return 0;
1056 }
1057 
1058 void kvm_enable_efer_bits(u64 mask)
1059 {
1060        efer_reserved_bits &= ~mask;
1061 }
1062 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1063 
1064 /*
1065  * Writes msr value into into the appropriate "register".
1066  * Returns 0 on success, non-0 otherwise.
1067  * Assumes vcpu_load() was already called.
1068  */
1069 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1070 {
1071 	switch (msr->index) {
1072 	case MSR_FS_BASE:
1073 	case MSR_GS_BASE:
1074 	case MSR_KERNEL_GS_BASE:
1075 	case MSR_CSTAR:
1076 	case MSR_LSTAR:
1077 		if (is_noncanonical_address(msr->data))
1078 			return 1;
1079 		break;
1080 	case MSR_IA32_SYSENTER_EIP:
1081 	case MSR_IA32_SYSENTER_ESP:
1082 		/*
1083 		 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1084 		 * non-canonical address is written on Intel but not on
1085 		 * AMD (which ignores the top 32-bits, because it does
1086 		 * not implement 64-bit SYSENTER).
1087 		 *
1088 		 * 64-bit code should hence be able to write a non-canonical
1089 		 * value on AMD.  Making the address canonical ensures that
1090 		 * vmentry does not fail on Intel after writing a non-canonical
1091 		 * value, and that something deterministic happens if the guest
1092 		 * invokes 64-bit SYSENTER.
1093 		 */
1094 		msr->data = get_canonical(msr->data);
1095 	}
1096 	return kvm_x86_ops->set_msr(vcpu, msr);
1097 }
1098 EXPORT_SYMBOL_GPL(kvm_set_msr);
1099 
1100 /*
1101  * Adapt set_msr() to msr_io()'s calling convention
1102  */
1103 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1104 {
1105 	struct msr_data msr;
1106 	int r;
1107 
1108 	msr.index = index;
1109 	msr.host_initiated = true;
1110 	r = kvm_get_msr(vcpu, &msr);
1111 	if (r)
1112 		return r;
1113 
1114 	*data = msr.data;
1115 	return 0;
1116 }
1117 
1118 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1119 {
1120 	struct msr_data msr;
1121 
1122 	msr.data = *data;
1123 	msr.index = index;
1124 	msr.host_initiated = true;
1125 	return kvm_set_msr(vcpu, &msr);
1126 }
1127 
1128 #ifdef CONFIG_X86_64
1129 struct pvclock_gtod_data {
1130 	seqcount_t	seq;
1131 
1132 	struct { /* extract of a clocksource struct */
1133 		int vclock_mode;
1134 		u64	cycle_last;
1135 		u64	mask;
1136 		u32	mult;
1137 		u32	shift;
1138 	} clock;
1139 
1140 	u64		boot_ns;
1141 	u64		nsec_base;
1142 };
1143 
1144 static struct pvclock_gtod_data pvclock_gtod_data;
1145 
1146 static void update_pvclock_gtod(struct timekeeper *tk)
1147 {
1148 	struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
1149 	u64 boot_ns;
1150 
1151 	boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
1152 
1153 	write_seqcount_begin(&vdata->seq);
1154 
1155 	/* copy pvclock gtod data */
1156 	vdata->clock.vclock_mode	= tk->tkr_mono.clock->archdata.vclock_mode;
1157 	vdata->clock.cycle_last		= tk->tkr_mono.cycle_last;
1158 	vdata->clock.mask		= tk->tkr_mono.mask;
1159 	vdata->clock.mult		= tk->tkr_mono.mult;
1160 	vdata->clock.shift		= tk->tkr_mono.shift;
1161 
1162 	vdata->boot_ns			= boot_ns;
1163 	vdata->nsec_base		= tk->tkr_mono.xtime_nsec;
1164 
1165 	write_seqcount_end(&vdata->seq);
1166 }
1167 #endif
1168 
1169 void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
1170 {
1171 	/*
1172 	 * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
1173 	 * vcpu_enter_guest.  This function is only called from
1174 	 * the physical CPU that is running vcpu.
1175 	 */
1176 	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1177 }
1178 
1179 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
1180 {
1181 	int version;
1182 	int r;
1183 	struct pvclock_wall_clock wc;
1184 	struct timespec64 boot;
1185 
1186 	if (!wall_clock)
1187 		return;
1188 
1189 	r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
1190 	if (r)
1191 		return;
1192 
1193 	if (version & 1)
1194 		++version;  /* first time write, random junk */
1195 
1196 	++version;
1197 
1198 	if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
1199 		return;
1200 
1201 	/*
1202 	 * The guest calculates current wall clock time by adding
1203 	 * system time (updated by kvm_guest_time_update below) to the
1204 	 * wall clock specified here.  guest system time equals host
1205 	 * system time for us, thus we must fill in host boot time here.
1206 	 */
1207 	getboottime64(&boot);
1208 
1209 	if (kvm->arch.kvmclock_offset) {
1210 		struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset);
1211 		boot = timespec64_sub(boot, ts);
1212 	}
1213 	wc.sec = (u32)boot.tv_sec; /* overflow in 2106 guest time */
1214 	wc.nsec = boot.tv_nsec;
1215 	wc.version = version;
1216 
1217 	kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
1218 
1219 	version++;
1220 	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
1221 }
1222 
1223 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
1224 {
1225 	do_shl32_div32(dividend, divisor);
1226 	return dividend;
1227 }
1228 
1229 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
1230 			       s8 *pshift, u32 *pmultiplier)
1231 {
1232 	uint64_t scaled64;
1233 	int32_t  shift = 0;
1234 	uint64_t tps64;
1235 	uint32_t tps32;
1236 
1237 	tps64 = base_hz;
1238 	scaled64 = scaled_hz;
1239 	while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
1240 		tps64 >>= 1;
1241 		shift--;
1242 	}
1243 
1244 	tps32 = (uint32_t)tps64;
1245 	while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
1246 		if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
1247 			scaled64 >>= 1;
1248 		else
1249 			tps32 <<= 1;
1250 		shift++;
1251 	}
1252 
1253 	*pshift = shift;
1254 	*pmultiplier = div_frac(scaled64, tps32);
1255 
1256 	pr_debug("%s: base_hz %llu => %llu, shift %d, mul %u\n",
1257 		 __func__, base_hz, scaled_hz, shift, *pmultiplier);
1258 }
1259 
1260 #ifdef CONFIG_X86_64
1261 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
1262 #endif
1263 
1264 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1265 static unsigned long max_tsc_khz;
1266 
1267 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1268 {
1269 	u64 v = (u64)khz * (1000000 + ppm);
1270 	do_div(v, 1000000);
1271 	return v;
1272 }
1273 
1274 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1275 {
1276 	u64 ratio;
1277 
1278 	/* Guest TSC same frequency as host TSC? */
1279 	if (!scale) {
1280 		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1281 		return 0;
1282 	}
1283 
1284 	/* TSC scaling supported? */
1285 	if (!kvm_has_tsc_control) {
1286 		if (user_tsc_khz > tsc_khz) {
1287 			vcpu->arch.tsc_catchup = 1;
1288 			vcpu->arch.tsc_always_catchup = 1;
1289 			return 0;
1290 		} else {
1291 			WARN(1, "user requested TSC rate below hardware speed\n");
1292 			return -1;
1293 		}
1294 	}
1295 
1296 	/* TSC scaling required  - calculate ratio */
1297 	ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
1298 				user_tsc_khz, tsc_khz);
1299 
1300 	if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
1301 		WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
1302 			  user_tsc_khz);
1303 		return -1;
1304 	}
1305 
1306 	vcpu->arch.tsc_scaling_ratio = ratio;
1307 	return 0;
1308 }
1309 
1310 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
1311 {
1312 	u32 thresh_lo, thresh_hi;
1313 	int use_scaling = 0;
1314 
1315 	/* tsc_khz can be zero if TSC calibration fails */
1316 	if (user_tsc_khz == 0) {
1317 		/* set tsc_scaling_ratio to a safe value */
1318 		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1319 		return -1;
1320 	}
1321 
1322 	/* Compute a scale to convert nanoseconds in TSC cycles */
1323 	kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
1324 			   &vcpu->arch.virtual_tsc_shift,
1325 			   &vcpu->arch.virtual_tsc_mult);
1326 	vcpu->arch.virtual_tsc_khz = user_tsc_khz;
1327 
1328 	/*
1329 	 * Compute the variation in TSC rate which is acceptable
1330 	 * within the range of tolerance and decide if the
1331 	 * rate being applied is within that bounds of the hardware
1332 	 * rate.  If so, no scaling or compensation need be done.
1333 	 */
1334 	thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
1335 	thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1336 	if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
1337 		pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
1338 		use_scaling = 1;
1339 	}
1340 	return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
1341 }
1342 
1343 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1344 {
1345 	u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1346 				      vcpu->arch.virtual_tsc_mult,
1347 				      vcpu->arch.virtual_tsc_shift);
1348 	tsc += vcpu->arch.this_tsc_write;
1349 	return tsc;
1350 }
1351 
1352 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1353 {
1354 #ifdef CONFIG_X86_64
1355 	bool vcpus_matched;
1356 	struct kvm_arch *ka = &vcpu->kvm->arch;
1357 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1358 
1359 	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1360 			 atomic_read(&vcpu->kvm->online_vcpus));
1361 
1362 	/*
1363 	 * Once the masterclock is enabled, always perform request in
1364 	 * order to update it.
1365 	 *
1366 	 * In order to enable masterclock, the host clocksource must be TSC
1367 	 * and the vcpus need to have matched TSCs.  When that happens,
1368 	 * perform request to enable masterclock.
1369 	 */
1370 	if (ka->use_master_clock ||
1371 	    (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
1372 		kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1373 
1374 	trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
1375 			    atomic_read(&vcpu->kvm->online_vcpus),
1376 		            ka->use_master_clock, gtod->clock.vclock_mode);
1377 #endif
1378 }
1379 
1380 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
1381 {
1382 	u64 curr_offset = vcpu->arch.tsc_offset;
1383 	vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
1384 }
1385 
1386 /*
1387  * Multiply tsc by a fixed point number represented by ratio.
1388  *
1389  * The most significant 64-N bits (mult) of ratio represent the
1390  * integral part of the fixed point number; the remaining N bits
1391  * (frac) represent the fractional part, ie. ratio represents a fixed
1392  * point number (mult + frac * 2^(-N)).
1393  *
1394  * N equals to kvm_tsc_scaling_ratio_frac_bits.
1395  */
1396 static inline u64 __scale_tsc(u64 ratio, u64 tsc)
1397 {
1398 	return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
1399 }
1400 
1401 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
1402 {
1403 	u64 _tsc = tsc;
1404 	u64 ratio = vcpu->arch.tsc_scaling_ratio;
1405 
1406 	if (ratio != kvm_default_tsc_scaling_ratio)
1407 		_tsc = __scale_tsc(ratio, tsc);
1408 
1409 	return _tsc;
1410 }
1411 EXPORT_SYMBOL_GPL(kvm_scale_tsc);
1412 
1413 static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1414 {
1415 	u64 tsc;
1416 
1417 	tsc = kvm_scale_tsc(vcpu, rdtsc());
1418 
1419 	return target_tsc - tsc;
1420 }
1421 
1422 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
1423 {
1424 	return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
1425 }
1426 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
1427 
1428 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1429 {
1430 	kvm_x86_ops->write_tsc_offset(vcpu, offset);
1431 	vcpu->arch.tsc_offset = offset;
1432 }
1433 
1434 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1435 {
1436 	struct kvm *kvm = vcpu->kvm;
1437 	u64 offset, ns, elapsed;
1438 	unsigned long flags;
1439 	s64 usdiff;
1440 	bool matched;
1441 	bool already_matched;
1442 	u64 data = msr->data;
1443 
1444 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1445 	offset = kvm_compute_tsc_offset(vcpu, data);
1446 	ns = ktime_get_boot_ns();
1447 	elapsed = ns - kvm->arch.last_tsc_nsec;
1448 
1449 	if (vcpu->arch.virtual_tsc_khz) {
1450 		int faulted = 0;
1451 
1452 		/* n.b - signed multiplication and division required */
1453 		usdiff = data - kvm->arch.last_tsc_write;
1454 #ifdef CONFIG_X86_64
1455 		usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1456 #else
1457 		/* do_div() only does unsigned */
1458 		asm("1: idivl %[divisor]\n"
1459 		    "2: xor %%edx, %%edx\n"
1460 		    "   movl $0, %[faulted]\n"
1461 		    "3:\n"
1462 		    ".section .fixup,\"ax\"\n"
1463 		    "4: movl $1, %[faulted]\n"
1464 		    "   jmp  3b\n"
1465 		    ".previous\n"
1466 
1467 		_ASM_EXTABLE(1b, 4b)
1468 
1469 		: "=A"(usdiff), [faulted] "=r" (faulted)
1470 		: "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
1471 
1472 #endif
1473 		do_div(elapsed, 1000);
1474 		usdiff -= elapsed;
1475 		if (usdiff < 0)
1476 			usdiff = -usdiff;
1477 
1478 		/* idivl overflow => difference is larger than USEC_PER_SEC */
1479 		if (faulted)
1480 			usdiff = USEC_PER_SEC;
1481 	} else
1482 		usdiff = USEC_PER_SEC; /* disable TSC match window below */
1483 
1484 	/*
1485 	 * Special case: TSC write with a small delta (1 second) of virtual
1486 	 * cycle time against real time is interpreted as an attempt to
1487 	 * synchronize the CPU.
1488          *
1489 	 * For a reliable TSC, we can match TSC offsets, and for an unstable
1490 	 * TSC, we add elapsed time in this computation.  We could let the
1491 	 * compensation code attempt to catch up if we fall behind, but
1492 	 * it's better to try to match offsets from the beginning.
1493          */
1494 	if (usdiff < USEC_PER_SEC &&
1495 	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1496 		if (!check_tsc_unstable()) {
1497 			offset = kvm->arch.cur_tsc_offset;
1498 			pr_debug("kvm: matched tsc offset for %llu\n", data);
1499 		} else {
1500 			u64 delta = nsec_to_cycles(vcpu, elapsed);
1501 			data += delta;
1502 			offset = kvm_compute_tsc_offset(vcpu, data);
1503 			pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1504 		}
1505 		matched = true;
1506 		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
1507 	} else {
1508 		/*
1509 		 * We split periods of matched TSC writes into generations.
1510 		 * For each generation, we track the original measured
1511 		 * nanosecond time, offset, and write, so if TSCs are in
1512 		 * sync, we can match exact offset, and if not, we can match
1513 		 * exact software computation in compute_guest_tsc()
1514 		 *
1515 		 * These values are tracked in kvm->arch.cur_xxx variables.
1516 		 */
1517 		kvm->arch.cur_tsc_generation++;
1518 		kvm->arch.cur_tsc_nsec = ns;
1519 		kvm->arch.cur_tsc_write = data;
1520 		kvm->arch.cur_tsc_offset = offset;
1521 		matched = false;
1522 		pr_debug("kvm: new tsc generation %llu, clock %llu\n",
1523 			 kvm->arch.cur_tsc_generation, data);
1524 	}
1525 
1526 	/*
1527 	 * We also track th most recent recorded KHZ, write and time to
1528 	 * allow the matching interval to be extended at each write.
1529 	 */
1530 	kvm->arch.last_tsc_nsec = ns;
1531 	kvm->arch.last_tsc_write = data;
1532 	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1533 
1534 	vcpu->arch.last_guest_tsc = data;
1535 
1536 	/* Keep track of which generation this VCPU has synchronized to */
1537 	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
1538 	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
1539 	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
1540 
1541 	if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
1542 		update_ia32_tsc_adjust_msr(vcpu, offset);
1543 	kvm_vcpu_write_tsc_offset(vcpu, offset);
1544 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1545 
1546 	spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
1547 	if (!matched) {
1548 		kvm->arch.nr_vcpus_matched_tsc = 0;
1549 	} else if (!already_matched) {
1550 		kvm->arch.nr_vcpus_matched_tsc++;
1551 	}
1552 
1553 	kvm_track_tsc_matching(vcpu);
1554 	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
1555 }
1556 
1557 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1558 
1559 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
1560 					   s64 adjustment)
1561 {
1562 	kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
1563 }
1564 
1565 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
1566 {
1567 	if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
1568 		WARN_ON(adjustment < 0);
1569 	adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
1570 	adjust_tsc_offset_guest(vcpu, adjustment);
1571 }
1572 
1573 #ifdef CONFIG_X86_64
1574 
1575 static u64 read_tsc(void)
1576 {
1577 	u64 ret = (u64)rdtsc_ordered();
1578 	u64 last = pvclock_gtod_data.clock.cycle_last;
1579 
1580 	if (likely(ret >= last))
1581 		return ret;
1582 
1583 	/*
1584 	 * GCC likes to generate cmov here, but this branch is extremely
1585 	 * predictable (it's just a function of time and the likely is
1586 	 * very likely) and there's a data dependence, so force GCC
1587 	 * to generate a branch instead.  I don't barrier() because
1588 	 * we don't actually need a barrier, and if this function
1589 	 * ever gets inlined it will generate worse code.
1590 	 */
1591 	asm volatile ("");
1592 	return last;
1593 }
1594 
1595 static inline u64 vgettsc(u64 *cycle_now)
1596 {
1597 	long v;
1598 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1599 
1600 	*cycle_now = read_tsc();
1601 
1602 	v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
1603 	return v * gtod->clock.mult;
1604 }
1605 
1606 static int do_monotonic_boot(s64 *t, u64 *cycle_now)
1607 {
1608 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1609 	unsigned long seq;
1610 	int mode;
1611 	u64 ns;
1612 
1613 	do {
1614 		seq = read_seqcount_begin(&gtod->seq);
1615 		mode = gtod->clock.vclock_mode;
1616 		ns = gtod->nsec_base;
1617 		ns += vgettsc(cycle_now);
1618 		ns >>= gtod->clock.shift;
1619 		ns += gtod->boot_ns;
1620 	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
1621 	*t = ns;
1622 
1623 	return mode;
1624 }
1625 
1626 /* returns true if host is using tsc clocksource */
1627 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
1628 {
1629 	/* checked again under seqlock below */
1630 	if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
1631 		return false;
1632 
1633 	return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
1634 }
1635 #endif
1636 
1637 /*
1638  *
1639  * Assuming a stable TSC across physical CPUS, and a stable TSC
1640  * across virtual CPUs, the following condition is possible.
1641  * Each numbered line represents an event visible to both
1642  * CPUs at the next numbered event.
1643  *
1644  * "timespecX" represents host monotonic time. "tscX" represents
1645  * RDTSC value.
1646  *
1647  * 		VCPU0 on CPU0		|	VCPU1 on CPU1
1648  *
1649  * 1.  read timespec0,tsc0
1650  * 2.					| timespec1 = timespec0 + N
1651  * 					| tsc1 = tsc0 + M
1652  * 3. transition to guest		| transition to guest
1653  * 4. ret0 = timespec0 + (rdtsc - tsc0) |
1654  * 5.				        | ret1 = timespec1 + (rdtsc - tsc1)
1655  * 				        | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
1656  *
1657  * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
1658  *
1659  * 	- ret0 < ret1
1660  *	- timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
1661  *		...
1662  *	- 0 < N - M => M < N
1663  *
1664  * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
1665  * always the case (the difference between two distinct xtime instances
1666  * might be smaller then the difference between corresponding TSC reads,
1667  * when updating guest vcpus pvclock areas).
1668  *
1669  * To avoid that problem, do not allow visibility of distinct
1670  * system_timestamp/tsc_timestamp values simultaneously: use a master
1671  * copy of host monotonic time values. Update that master copy
1672  * in lockstep.
1673  *
1674  * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
1675  *
1676  */
1677 
1678 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
1679 {
1680 #ifdef CONFIG_X86_64
1681 	struct kvm_arch *ka = &kvm->arch;
1682 	int vclock_mode;
1683 	bool host_tsc_clocksource, vcpus_matched;
1684 
1685 	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1686 			atomic_read(&kvm->online_vcpus));
1687 
1688 	/*
1689 	 * If the host uses TSC clock, then passthrough TSC as stable
1690 	 * to the guest.
1691 	 */
1692 	host_tsc_clocksource = kvm_get_time_and_clockread(
1693 					&ka->master_kernel_ns,
1694 					&ka->master_cycle_now);
1695 
1696 	ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1697 				&& !backwards_tsc_observed
1698 				&& !ka->boot_vcpu_runs_old_kvmclock;
1699 
1700 	if (ka->use_master_clock)
1701 		atomic_set(&kvm_guest_has_master_clock, 1);
1702 
1703 	vclock_mode = pvclock_gtod_data.clock.vclock_mode;
1704 	trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
1705 					vcpus_matched);
1706 #endif
1707 }
1708 
1709 void kvm_make_mclock_inprogress_request(struct kvm *kvm)
1710 {
1711 	kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
1712 }
1713 
1714 static void kvm_gen_update_masterclock(struct kvm *kvm)
1715 {
1716 #ifdef CONFIG_X86_64
1717 	int i;
1718 	struct kvm_vcpu *vcpu;
1719 	struct kvm_arch *ka = &kvm->arch;
1720 
1721 	spin_lock(&ka->pvclock_gtod_sync_lock);
1722 	kvm_make_mclock_inprogress_request(kvm);
1723 	/* no guest entries from this point */
1724 	pvclock_update_vm_gtod_copy(kvm);
1725 
1726 	kvm_for_each_vcpu(i, vcpu, kvm)
1727 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1728 
1729 	/* guest entries allowed */
1730 	kvm_for_each_vcpu(i, vcpu, kvm)
1731 		clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
1732 
1733 	spin_unlock(&ka->pvclock_gtod_sync_lock);
1734 #endif
1735 }
1736 
1737 static u64 __get_kvmclock_ns(struct kvm *kvm)
1738 {
1739 	struct kvm_arch *ka = &kvm->arch;
1740 	struct pvclock_vcpu_time_info hv_clock;
1741 
1742 	spin_lock(&ka->pvclock_gtod_sync_lock);
1743 	if (!ka->use_master_clock) {
1744 		spin_unlock(&ka->pvclock_gtod_sync_lock);
1745 		return ktime_get_boot_ns() + ka->kvmclock_offset;
1746 	}
1747 
1748 	hv_clock.tsc_timestamp = ka->master_cycle_now;
1749 	hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
1750 	spin_unlock(&ka->pvclock_gtod_sync_lock);
1751 
1752 	kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
1753 			   &hv_clock.tsc_shift,
1754 			   &hv_clock.tsc_to_system_mul);
1755 	return __pvclock_read_cycles(&hv_clock, rdtsc());
1756 }
1757 
1758 u64 get_kvmclock_ns(struct kvm *kvm)
1759 {
1760 	unsigned long flags;
1761 	s64 ns;
1762 
1763 	local_irq_save(flags);
1764 	ns = __get_kvmclock_ns(kvm);
1765 	local_irq_restore(flags);
1766 
1767 	return ns;
1768 }
1769 
1770 static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
1771 {
1772 	struct kvm_vcpu_arch *vcpu = &v->arch;
1773 	struct pvclock_vcpu_time_info guest_hv_clock;
1774 
1775 	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1776 		&guest_hv_clock, sizeof(guest_hv_clock))))
1777 		return;
1778 
1779 	/* This VCPU is paused, but it's legal for a guest to read another
1780 	 * VCPU's kvmclock, so we really have to follow the specification where
1781 	 * it says that version is odd if data is being modified, and even after
1782 	 * it is consistent.
1783 	 *
1784 	 * Version field updates must be kept separate.  This is because
1785 	 * kvm_write_guest_cached might use a "rep movs" instruction, and
1786 	 * writes within a string instruction are weakly ordered.  So there
1787 	 * are three writes overall.
1788 	 *
1789 	 * As a small optimization, only write the version field in the first
1790 	 * and third write.  The vcpu->pv_time cache is still valid, because the
1791 	 * version field is the first in the struct.
1792 	 */
1793 	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
1794 
1795 	vcpu->hv_clock.version = guest_hv_clock.version + 1;
1796 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1797 				&vcpu->hv_clock,
1798 				sizeof(vcpu->hv_clock.version));
1799 
1800 	smp_wmb();
1801 
1802 	/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1803 	vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
1804 
1805 	if (vcpu->pvclock_set_guest_stopped_request) {
1806 		vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
1807 		vcpu->pvclock_set_guest_stopped_request = false;
1808 	}
1809 
1810 	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
1811 
1812 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1813 				&vcpu->hv_clock,
1814 				sizeof(vcpu->hv_clock));
1815 
1816 	smp_wmb();
1817 
1818 	vcpu->hv_clock.version++;
1819 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1820 				&vcpu->hv_clock,
1821 				sizeof(vcpu->hv_clock.version));
1822 }
1823 
1824 static int kvm_guest_time_update(struct kvm_vcpu *v)
1825 {
1826 	unsigned long flags, tgt_tsc_khz;
1827 	struct kvm_vcpu_arch *vcpu = &v->arch;
1828 	struct kvm_arch *ka = &v->kvm->arch;
1829 	s64 kernel_ns;
1830 	u64 tsc_timestamp, host_tsc;
1831 	u8 pvclock_flags;
1832 	bool use_master_clock;
1833 
1834 	kernel_ns = 0;
1835 	host_tsc = 0;
1836 
1837 	/*
1838 	 * If the host uses TSC clock, then passthrough TSC as stable
1839 	 * to the guest.
1840 	 */
1841 	spin_lock(&ka->pvclock_gtod_sync_lock);
1842 	use_master_clock = ka->use_master_clock;
1843 	if (use_master_clock) {
1844 		host_tsc = ka->master_cycle_now;
1845 		kernel_ns = ka->master_kernel_ns;
1846 	}
1847 	spin_unlock(&ka->pvclock_gtod_sync_lock);
1848 
1849 	/* Keep irq disabled to prevent changes to the clock */
1850 	local_irq_save(flags);
1851 	tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
1852 	if (unlikely(tgt_tsc_khz == 0)) {
1853 		local_irq_restore(flags);
1854 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1855 		return 1;
1856 	}
1857 	if (!use_master_clock) {
1858 		host_tsc = rdtsc();
1859 		kernel_ns = ktime_get_boot_ns();
1860 	}
1861 
1862 	tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
1863 
1864 	/*
1865 	 * We may have to catch up the TSC to match elapsed wall clock
1866 	 * time for two reasons, even if kvmclock is used.
1867 	 *   1) CPU could have been running below the maximum TSC rate
1868 	 *   2) Broken TSC compensation resets the base at each VCPU
1869 	 *      entry to avoid unknown leaps of TSC even when running
1870 	 *      again on the same CPU.  This may cause apparent elapsed
1871 	 *      time to disappear, and the guest to stand still or run
1872 	 *	very slowly.
1873 	 */
1874 	if (vcpu->tsc_catchup) {
1875 		u64 tsc = compute_guest_tsc(v, kernel_ns);
1876 		if (tsc > tsc_timestamp) {
1877 			adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
1878 			tsc_timestamp = tsc;
1879 		}
1880 	}
1881 
1882 	local_irq_restore(flags);
1883 
1884 	/* With all the info we got, fill in the values */
1885 
1886 	if (kvm_has_tsc_control)
1887 		tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
1888 
1889 	if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
1890 		kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
1891 				   &vcpu->hv_clock.tsc_shift,
1892 				   &vcpu->hv_clock.tsc_to_system_mul);
1893 		vcpu->hw_tsc_khz = tgt_tsc_khz;
1894 	}
1895 
1896 	vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1897 	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1898 	vcpu->last_guest_tsc = tsc_timestamp;
1899 
1900 	/* If the host uses TSC clocksource, then it is stable */
1901 	pvclock_flags = 0;
1902 	if (use_master_clock)
1903 		pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
1904 
1905 	vcpu->hv_clock.flags = pvclock_flags;
1906 
1907 	if (vcpu->pv_time_enabled)
1908 		kvm_setup_pvclock_page(v);
1909 	if (v == kvm_get_vcpu(v->kvm, 0))
1910 		kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
1911 	return 0;
1912 }
1913 
1914 /*
1915  * kvmclock updates which are isolated to a given vcpu, such as
1916  * vcpu->cpu migration, should not allow system_timestamp from
1917  * the rest of the vcpus to remain static. Otherwise ntp frequency
1918  * correction applies to one vcpu's system_timestamp but not
1919  * the others.
1920  *
1921  * So in those cases, request a kvmclock update for all vcpus.
1922  * We need to rate-limit these requests though, as they can
1923  * considerably slow guests that have a large number of vcpus.
1924  * The time for a remote vcpu to update its kvmclock is bound
1925  * by the delay we use to rate-limit the updates.
1926  */
1927 
1928 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
1929 
1930 static void kvmclock_update_fn(struct work_struct *work)
1931 {
1932 	int i;
1933 	struct delayed_work *dwork = to_delayed_work(work);
1934 	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1935 					   kvmclock_update_work);
1936 	struct kvm *kvm = container_of(ka, struct kvm, arch);
1937 	struct kvm_vcpu *vcpu;
1938 
1939 	kvm_for_each_vcpu(i, vcpu, kvm) {
1940 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1941 		kvm_vcpu_kick(vcpu);
1942 	}
1943 }
1944 
1945 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
1946 {
1947 	struct kvm *kvm = v->kvm;
1948 
1949 	kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1950 	schedule_delayed_work(&kvm->arch.kvmclock_update_work,
1951 					KVMCLOCK_UPDATE_DELAY);
1952 }
1953 
1954 #define KVMCLOCK_SYNC_PERIOD (300 * HZ)
1955 
1956 static void kvmclock_sync_fn(struct work_struct *work)
1957 {
1958 	struct delayed_work *dwork = to_delayed_work(work);
1959 	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1960 					   kvmclock_sync_work);
1961 	struct kvm *kvm = container_of(ka, struct kvm, arch);
1962 
1963 	if (!kvmclock_periodic_sync)
1964 		return;
1965 
1966 	schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
1967 	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
1968 					KVMCLOCK_SYNC_PERIOD);
1969 }
1970 
1971 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1972 {
1973 	u64 mcg_cap = vcpu->arch.mcg_cap;
1974 	unsigned bank_num = mcg_cap & 0xff;
1975 
1976 	switch (msr) {
1977 	case MSR_IA32_MCG_STATUS:
1978 		vcpu->arch.mcg_status = data;
1979 		break;
1980 	case MSR_IA32_MCG_CTL:
1981 		if (!(mcg_cap & MCG_CTL_P))
1982 			return 1;
1983 		if (data != 0 && data != ~(u64)0)
1984 			return -1;
1985 		vcpu->arch.mcg_ctl = data;
1986 		break;
1987 	default:
1988 		if (msr >= MSR_IA32_MC0_CTL &&
1989 		    msr < MSR_IA32_MCx_CTL(bank_num)) {
1990 			u32 offset = msr - MSR_IA32_MC0_CTL;
1991 			/* only 0 or all 1s can be written to IA32_MCi_CTL
1992 			 * some Linux kernels though clear bit 10 in bank 4 to
1993 			 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1994 			 * this to avoid an uncatched #GP in the guest
1995 			 */
1996 			if ((offset & 0x3) == 0 &&
1997 			    data != 0 && (data | (1 << 10)) != ~(u64)0)
1998 				return -1;
1999 			vcpu->arch.mce_banks[offset] = data;
2000 			break;
2001 		}
2002 		return 1;
2003 	}
2004 	return 0;
2005 }
2006 
2007 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
2008 {
2009 	struct kvm *kvm = vcpu->kvm;
2010 	int lm = is_long_mode(vcpu);
2011 	u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
2012 		: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
2013 	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
2014 		: kvm->arch.xen_hvm_config.blob_size_32;
2015 	u32 page_num = data & ~PAGE_MASK;
2016 	u64 page_addr = data & PAGE_MASK;
2017 	u8 *page;
2018 	int r;
2019 
2020 	r = -E2BIG;
2021 	if (page_num >= blob_size)
2022 		goto out;
2023 	r = -ENOMEM;
2024 	page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
2025 	if (IS_ERR(page)) {
2026 		r = PTR_ERR(page);
2027 		goto out;
2028 	}
2029 	if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
2030 		goto out_free;
2031 	r = 0;
2032 out_free:
2033 	kfree(page);
2034 out:
2035 	return r;
2036 }
2037 
2038 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
2039 {
2040 	gpa_t gpa = data & ~0x3f;
2041 
2042 	/* Bits 2:5 are reserved, Should be zero */
2043 	if (data & 0x3c)
2044 		return 1;
2045 
2046 	vcpu->arch.apf.msr_val = data;
2047 
2048 	if (!(data & KVM_ASYNC_PF_ENABLED)) {
2049 		kvm_clear_async_pf_completion_queue(vcpu);
2050 		kvm_async_pf_hash_reset(vcpu);
2051 		return 0;
2052 	}
2053 
2054 	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
2055 					sizeof(u32)))
2056 		return 1;
2057 
2058 	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
2059 	kvm_async_pf_wakeup_all(vcpu);
2060 	return 0;
2061 }
2062 
2063 static void kvmclock_reset(struct kvm_vcpu *vcpu)
2064 {
2065 	vcpu->arch.pv_time_enabled = false;
2066 }
2067 
2068 static void record_steal_time(struct kvm_vcpu *vcpu)
2069 {
2070 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2071 		return;
2072 
2073 	if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2074 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
2075 		return;
2076 
2077 	vcpu->arch.st.steal.preempted = 0;
2078 
2079 	if (vcpu->arch.st.steal.version & 1)
2080 		vcpu->arch.st.steal.version += 1;  /* first time write, random junk */
2081 
2082 	vcpu->arch.st.steal.version += 1;
2083 
2084 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2085 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2086 
2087 	smp_wmb();
2088 
2089 	vcpu->arch.st.steal.steal += current->sched_info.run_delay -
2090 		vcpu->arch.st.last_steal;
2091 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
2092 
2093 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2094 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2095 
2096 	smp_wmb();
2097 
2098 	vcpu->arch.st.steal.version += 1;
2099 
2100 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2101 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2102 }
2103 
2104 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2105 {
2106 	bool pr = false;
2107 	u32 msr = msr_info->index;
2108 	u64 data = msr_info->data;
2109 
2110 	switch (msr) {
2111 	case MSR_AMD64_NB_CFG:
2112 	case MSR_IA32_UCODE_REV:
2113 	case MSR_IA32_UCODE_WRITE:
2114 	case MSR_VM_HSAVE_PA:
2115 	case MSR_AMD64_PATCH_LOADER:
2116 	case MSR_AMD64_BU_CFG2:
2117 		break;
2118 
2119 	case MSR_EFER:
2120 		return set_efer(vcpu, data);
2121 	case MSR_K7_HWCR:
2122 		data &= ~(u64)0x40;	/* ignore flush filter disable */
2123 		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
2124 		data &= ~(u64)0x8;	/* ignore TLB cache disable */
2125 		data &= ~(u64)0x40000;  /* ignore Mc status write enable */
2126 		if (data != 0) {
2127 			vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
2128 				    data);
2129 			return 1;
2130 		}
2131 		break;
2132 	case MSR_FAM10H_MMIO_CONF_BASE:
2133 		if (data != 0) {
2134 			vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
2135 				    "0x%llx\n", data);
2136 			return 1;
2137 		}
2138 		break;
2139 	case MSR_IA32_DEBUGCTLMSR:
2140 		if (!data) {
2141 			/* We support the non-activated case already */
2142 			break;
2143 		} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
2144 			/* Values other than LBR and BTF are vendor-specific,
2145 			   thus reserved and should throw a #GP */
2146 			return 1;
2147 		}
2148 		vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
2149 			    __func__, data);
2150 		break;
2151 	case 0x200 ... 0x2ff:
2152 		return kvm_mtrr_set_msr(vcpu, msr, data);
2153 	case MSR_IA32_APICBASE:
2154 		return kvm_set_apic_base(vcpu, msr_info);
2155 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2156 		return kvm_x2apic_msr_write(vcpu, msr, data);
2157 	case MSR_IA32_TSCDEADLINE:
2158 		kvm_set_lapic_tscdeadline_msr(vcpu, data);
2159 		break;
2160 	case MSR_IA32_TSC_ADJUST:
2161 		if (guest_cpuid_has_tsc_adjust(vcpu)) {
2162 			if (!msr_info->host_initiated) {
2163 				s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2164 				adjust_tsc_offset_guest(vcpu, adj);
2165 			}
2166 			vcpu->arch.ia32_tsc_adjust_msr = data;
2167 		}
2168 		break;
2169 	case MSR_IA32_MISC_ENABLE:
2170 		vcpu->arch.ia32_misc_enable_msr = data;
2171 		break;
2172 	case MSR_IA32_SMBASE:
2173 		if (!msr_info->host_initiated)
2174 			return 1;
2175 		vcpu->arch.smbase = data;
2176 		break;
2177 	case MSR_KVM_WALL_CLOCK_NEW:
2178 	case MSR_KVM_WALL_CLOCK:
2179 		vcpu->kvm->arch.wall_clock = data;
2180 		kvm_write_wall_clock(vcpu->kvm, data);
2181 		break;
2182 	case MSR_KVM_SYSTEM_TIME_NEW:
2183 	case MSR_KVM_SYSTEM_TIME: {
2184 		struct kvm_arch *ka = &vcpu->kvm->arch;
2185 
2186 		kvmclock_reset(vcpu);
2187 
2188 		if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
2189 			bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
2190 
2191 			if (ka->boot_vcpu_runs_old_kvmclock != tmp)
2192 				set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
2193 					&vcpu->requests);
2194 
2195 			ka->boot_vcpu_runs_old_kvmclock = tmp;
2196 		}
2197 
2198 		vcpu->arch.time = data;
2199 		kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2200 
2201 		/* we verify if the enable bit is set... */
2202 		if (!(data & 1))
2203 			break;
2204 
2205 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2206 		     &vcpu->arch.pv_time, data & ~1ULL,
2207 		     sizeof(struct pvclock_vcpu_time_info)))
2208 			vcpu->arch.pv_time_enabled = false;
2209 		else
2210 			vcpu->arch.pv_time_enabled = true;
2211 
2212 		break;
2213 	}
2214 	case MSR_KVM_ASYNC_PF_EN:
2215 		if (kvm_pv_enable_async_pf(vcpu, data))
2216 			return 1;
2217 		break;
2218 	case MSR_KVM_STEAL_TIME:
2219 
2220 		if (unlikely(!sched_info_on()))
2221 			return 1;
2222 
2223 		if (data & KVM_STEAL_RESERVED_MASK)
2224 			return 1;
2225 
2226 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
2227 						data & KVM_STEAL_VALID_BITS,
2228 						sizeof(struct kvm_steal_time)))
2229 			return 1;
2230 
2231 		vcpu->arch.st.msr_val = data;
2232 
2233 		if (!(data & KVM_MSR_ENABLED))
2234 			break;
2235 
2236 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2237 
2238 		break;
2239 	case MSR_KVM_PV_EOI_EN:
2240 		if (kvm_lapic_enable_pv_eoi(vcpu, data))
2241 			return 1;
2242 		break;
2243 
2244 	case MSR_IA32_MCG_CTL:
2245 	case MSR_IA32_MCG_STATUS:
2246 	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2247 		return set_msr_mce(vcpu, msr, data);
2248 
2249 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
2250 	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
2251 		pr = true; /* fall through */
2252 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
2253 	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2254 		if (kvm_pmu_is_valid_msr(vcpu, msr))
2255 			return kvm_pmu_set_msr(vcpu, msr_info);
2256 
2257 		if (pr || data != 0)
2258 			vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
2259 				    "0x%x data 0x%llx\n", msr, data);
2260 		break;
2261 	case MSR_K7_CLK_CTL:
2262 		/*
2263 		 * Ignore all writes to this no longer documented MSR.
2264 		 * Writes are only relevant for old K7 processors,
2265 		 * all pre-dating SVM, but a recommended workaround from
2266 		 * AMD for these chips. It is possible to specify the
2267 		 * affected processor models on the command line, hence
2268 		 * the need to ignore the workaround.
2269 		 */
2270 		break;
2271 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2272 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2273 	case HV_X64_MSR_CRASH_CTL:
2274 	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2275 		return kvm_hv_set_msr_common(vcpu, msr, data,
2276 					     msr_info->host_initiated);
2277 	case MSR_IA32_BBL_CR_CTL3:
2278 		/* Drop writes to this legacy MSR -- see rdmsr
2279 		 * counterpart for further detail.
2280 		 */
2281 		vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
2282 		break;
2283 	case MSR_AMD64_OSVW_ID_LENGTH:
2284 		if (!guest_cpuid_has_osvw(vcpu))
2285 			return 1;
2286 		vcpu->arch.osvw.length = data;
2287 		break;
2288 	case MSR_AMD64_OSVW_STATUS:
2289 		if (!guest_cpuid_has_osvw(vcpu))
2290 			return 1;
2291 		vcpu->arch.osvw.status = data;
2292 		break;
2293 	default:
2294 		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
2295 			return xen_hvm_config(vcpu, data);
2296 		if (kvm_pmu_is_valid_msr(vcpu, msr))
2297 			return kvm_pmu_set_msr(vcpu, msr_info);
2298 		if (!ignore_msrs) {
2299 			vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
2300 				    msr, data);
2301 			return 1;
2302 		} else {
2303 			vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
2304 				    msr, data);
2305 			break;
2306 		}
2307 	}
2308 	return 0;
2309 }
2310 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
2311 
2312 
2313 /*
2314  * Reads an msr value (of 'msr_index') into 'pdata'.
2315  * Returns 0 on success, non-0 otherwise.
2316  * Assumes vcpu_load() was already called.
2317  */
2318 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2319 {
2320 	return kvm_x86_ops->get_msr(vcpu, msr);
2321 }
2322 EXPORT_SYMBOL_GPL(kvm_get_msr);
2323 
2324 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2325 {
2326 	u64 data;
2327 	u64 mcg_cap = vcpu->arch.mcg_cap;
2328 	unsigned bank_num = mcg_cap & 0xff;
2329 
2330 	switch (msr) {
2331 	case MSR_IA32_P5_MC_ADDR:
2332 	case MSR_IA32_P5_MC_TYPE:
2333 		data = 0;
2334 		break;
2335 	case MSR_IA32_MCG_CAP:
2336 		data = vcpu->arch.mcg_cap;
2337 		break;
2338 	case MSR_IA32_MCG_CTL:
2339 		if (!(mcg_cap & MCG_CTL_P))
2340 			return 1;
2341 		data = vcpu->arch.mcg_ctl;
2342 		break;
2343 	case MSR_IA32_MCG_STATUS:
2344 		data = vcpu->arch.mcg_status;
2345 		break;
2346 	default:
2347 		if (msr >= MSR_IA32_MC0_CTL &&
2348 		    msr < MSR_IA32_MCx_CTL(bank_num)) {
2349 			u32 offset = msr - MSR_IA32_MC0_CTL;
2350 			data = vcpu->arch.mce_banks[offset];
2351 			break;
2352 		}
2353 		return 1;
2354 	}
2355 	*pdata = data;
2356 	return 0;
2357 }
2358 
2359 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2360 {
2361 	switch (msr_info->index) {
2362 	case MSR_IA32_PLATFORM_ID:
2363 	case MSR_IA32_EBL_CR_POWERON:
2364 	case MSR_IA32_DEBUGCTLMSR:
2365 	case MSR_IA32_LASTBRANCHFROMIP:
2366 	case MSR_IA32_LASTBRANCHTOIP:
2367 	case MSR_IA32_LASTINTFROMIP:
2368 	case MSR_IA32_LASTINTTOIP:
2369 	case MSR_K8_SYSCFG:
2370 	case MSR_K8_TSEG_ADDR:
2371 	case MSR_K8_TSEG_MASK:
2372 	case MSR_K7_HWCR:
2373 	case MSR_VM_HSAVE_PA:
2374 	case MSR_K8_INT_PENDING_MSG:
2375 	case MSR_AMD64_NB_CFG:
2376 	case MSR_FAM10H_MMIO_CONF_BASE:
2377 	case MSR_AMD64_BU_CFG2:
2378 	case MSR_IA32_PERF_CTL:
2379 		msr_info->data = 0;
2380 		break;
2381 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
2382 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
2383 	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
2384 	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2385 		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2386 			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2387 		msr_info->data = 0;
2388 		break;
2389 	case MSR_IA32_UCODE_REV:
2390 		msr_info->data = 0x100000000ULL;
2391 		break;
2392 	case MSR_MTRRcap:
2393 	case 0x200 ... 0x2ff:
2394 		return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
2395 	case 0xcd: /* fsb frequency */
2396 		msr_info->data = 3;
2397 		break;
2398 		/*
2399 		 * MSR_EBC_FREQUENCY_ID
2400 		 * Conservative value valid for even the basic CPU models.
2401 		 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
2402 		 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
2403 		 * and 266MHz for model 3, or 4. Set Core Clock
2404 		 * Frequency to System Bus Frequency Ratio to 1 (bits
2405 		 * 31:24) even though these are only valid for CPU
2406 		 * models > 2, however guests may end up dividing or
2407 		 * multiplying by zero otherwise.
2408 		 */
2409 	case MSR_EBC_FREQUENCY_ID:
2410 		msr_info->data = 1 << 24;
2411 		break;
2412 	case MSR_IA32_APICBASE:
2413 		msr_info->data = kvm_get_apic_base(vcpu);
2414 		break;
2415 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2416 		return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
2417 		break;
2418 	case MSR_IA32_TSCDEADLINE:
2419 		msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
2420 		break;
2421 	case MSR_IA32_TSC_ADJUST:
2422 		msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
2423 		break;
2424 	case MSR_IA32_MISC_ENABLE:
2425 		msr_info->data = vcpu->arch.ia32_misc_enable_msr;
2426 		break;
2427 	case MSR_IA32_SMBASE:
2428 		if (!msr_info->host_initiated)
2429 			return 1;
2430 		msr_info->data = vcpu->arch.smbase;
2431 		break;
2432 	case MSR_IA32_PERF_STATUS:
2433 		/* TSC increment by tick */
2434 		msr_info->data = 1000ULL;
2435 		/* CPU multiplier */
2436 		msr_info->data |= (((uint64_t)4ULL) << 40);
2437 		break;
2438 	case MSR_EFER:
2439 		msr_info->data = vcpu->arch.efer;
2440 		break;
2441 	case MSR_KVM_WALL_CLOCK:
2442 	case MSR_KVM_WALL_CLOCK_NEW:
2443 		msr_info->data = vcpu->kvm->arch.wall_clock;
2444 		break;
2445 	case MSR_KVM_SYSTEM_TIME:
2446 	case MSR_KVM_SYSTEM_TIME_NEW:
2447 		msr_info->data = vcpu->arch.time;
2448 		break;
2449 	case MSR_KVM_ASYNC_PF_EN:
2450 		msr_info->data = vcpu->arch.apf.msr_val;
2451 		break;
2452 	case MSR_KVM_STEAL_TIME:
2453 		msr_info->data = vcpu->arch.st.msr_val;
2454 		break;
2455 	case MSR_KVM_PV_EOI_EN:
2456 		msr_info->data = vcpu->arch.pv_eoi.msr_val;
2457 		break;
2458 	case MSR_IA32_P5_MC_ADDR:
2459 	case MSR_IA32_P5_MC_TYPE:
2460 	case MSR_IA32_MCG_CAP:
2461 	case MSR_IA32_MCG_CTL:
2462 	case MSR_IA32_MCG_STATUS:
2463 	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2464 		return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
2465 	case MSR_K7_CLK_CTL:
2466 		/*
2467 		 * Provide expected ramp-up count for K7. All other
2468 		 * are set to zero, indicating minimum divisors for
2469 		 * every field.
2470 		 *
2471 		 * This prevents guest kernels on AMD host with CPU
2472 		 * type 6, model 8 and higher from exploding due to
2473 		 * the rdmsr failing.
2474 		 */
2475 		msr_info->data = 0x20000000;
2476 		break;
2477 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2478 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2479 	case HV_X64_MSR_CRASH_CTL:
2480 	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2481 		return kvm_hv_get_msr_common(vcpu,
2482 					     msr_info->index, &msr_info->data);
2483 		break;
2484 	case MSR_IA32_BBL_CR_CTL3:
2485 		/* This legacy MSR exists but isn't fully documented in current
2486 		 * silicon.  It is however accessed by winxp in very narrow
2487 		 * scenarios where it sets bit #19, itself documented as
2488 		 * a "reserved" bit.  Best effort attempt to source coherent
2489 		 * read data here should the balance of the register be
2490 		 * interpreted by the guest:
2491 		 *
2492 		 * L2 cache control register 3: 64GB range, 256KB size,
2493 		 * enabled, latency 0x1, configured
2494 		 */
2495 		msr_info->data = 0xbe702111;
2496 		break;
2497 	case MSR_AMD64_OSVW_ID_LENGTH:
2498 		if (!guest_cpuid_has_osvw(vcpu))
2499 			return 1;
2500 		msr_info->data = vcpu->arch.osvw.length;
2501 		break;
2502 	case MSR_AMD64_OSVW_STATUS:
2503 		if (!guest_cpuid_has_osvw(vcpu))
2504 			return 1;
2505 		msr_info->data = vcpu->arch.osvw.status;
2506 		break;
2507 	default:
2508 		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2509 			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2510 		if (!ignore_msrs) {
2511 			vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
2512 					       msr_info->index);
2513 			return 1;
2514 		} else {
2515 			vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
2516 			msr_info->data = 0;
2517 		}
2518 		break;
2519 	}
2520 	return 0;
2521 }
2522 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
2523 
2524 /*
2525  * Read or write a bunch of msrs. All parameters are kernel addresses.
2526  *
2527  * @return number of msrs set successfully.
2528  */
2529 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2530 		    struct kvm_msr_entry *entries,
2531 		    int (*do_msr)(struct kvm_vcpu *vcpu,
2532 				  unsigned index, u64 *data))
2533 {
2534 	int i, idx;
2535 
2536 	idx = srcu_read_lock(&vcpu->kvm->srcu);
2537 	for (i = 0; i < msrs->nmsrs; ++i)
2538 		if (do_msr(vcpu, entries[i].index, &entries[i].data))
2539 			break;
2540 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
2541 
2542 	return i;
2543 }
2544 
2545 /*
2546  * Read or write a bunch of msrs. Parameters are user addresses.
2547  *
2548  * @return number of msrs set successfully.
2549  */
2550 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2551 		  int (*do_msr)(struct kvm_vcpu *vcpu,
2552 				unsigned index, u64 *data),
2553 		  int writeback)
2554 {
2555 	struct kvm_msrs msrs;
2556 	struct kvm_msr_entry *entries;
2557 	int r, n;
2558 	unsigned size;
2559 
2560 	r = -EFAULT;
2561 	if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2562 		goto out;
2563 
2564 	r = -E2BIG;
2565 	if (msrs.nmsrs >= MAX_IO_MSRS)
2566 		goto out;
2567 
2568 	size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2569 	entries = memdup_user(user_msrs->entries, size);
2570 	if (IS_ERR(entries)) {
2571 		r = PTR_ERR(entries);
2572 		goto out;
2573 	}
2574 
2575 	r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2576 	if (r < 0)
2577 		goto out_free;
2578 
2579 	r = -EFAULT;
2580 	if (writeback && copy_to_user(user_msrs->entries, entries, size))
2581 		goto out_free;
2582 
2583 	r = n;
2584 
2585 out_free:
2586 	kfree(entries);
2587 out:
2588 	return r;
2589 }
2590 
2591 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2592 {
2593 	int r;
2594 
2595 	switch (ext) {
2596 	case KVM_CAP_IRQCHIP:
2597 	case KVM_CAP_HLT:
2598 	case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
2599 	case KVM_CAP_SET_TSS_ADDR:
2600 	case KVM_CAP_EXT_CPUID:
2601 	case KVM_CAP_EXT_EMUL_CPUID:
2602 	case KVM_CAP_CLOCKSOURCE:
2603 	case KVM_CAP_PIT:
2604 	case KVM_CAP_NOP_IO_DELAY:
2605 	case KVM_CAP_MP_STATE:
2606 	case KVM_CAP_SYNC_MMU:
2607 	case KVM_CAP_USER_NMI:
2608 	case KVM_CAP_REINJECT_CONTROL:
2609 	case KVM_CAP_IRQ_INJECT_STATUS:
2610 	case KVM_CAP_IOEVENTFD:
2611 	case KVM_CAP_IOEVENTFD_NO_LENGTH:
2612 	case KVM_CAP_PIT2:
2613 	case KVM_CAP_PIT_STATE2:
2614 	case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2615 	case KVM_CAP_XEN_HVM:
2616 	case KVM_CAP_VCPU_EVENTS:
2617 	case KVM_CAP_HYPERV:
2618 	case KVM_CAP_HYPERV_VAPIC:
2619 	case KVM_CAP_HYPERV_SPIN:
2620 	case KVM_CAP_HYPERV_SYNIC:
2621 	case KVM_CAP_PCI_SEGMENT:
2622 	case KVM_CAP_DEBUGREGS:
2623 	case KVM_CAP_X86_ROBUST_SINGLESTEP:
2624 	case KVM_CAP_XSAVE:
2625 	case KVM_CAP_ASYNC_PF:
2626 	case KVM_CAP_GET_TSC_KHZ:
2627 	case KVM_CAP_KVMCLOCK_CTRL:
2628 	case KVM_CAP_READONLY_MEM:
2629 	case KVM_CAP_HYPERV_TIME:
2630 	case KVM_CAP_IOAPIC_POLARITY_IGNORED:
2631 	case KVM_CAP_TSC_DEADLINE_TIMER:
2632 	case KVM_CAP_ENABLE_CAP_VM:
2633 	case KVM_CAP_DISABLE_QUIRKS:
2634 	case KVM_CAP_SET_BOOT_CPU_ID:
2635  	case KVM_CAP_SPLIT_IRQCHIP:
2636 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2637 	case KVM_CAP_ASSIGN_DEV_IRQ:
2638 	case KVM_CAP_PCI_2_3:
2639 #endif
2640 		r = 1;
2641 		break;
2642 	case KVM_CAP_ADJUST_CLOCK:
2643 		r = KVM_CLOCK_TSC_STABLE;
2644 		break;
2645 	case KVM_CAP_X86_SMM:
2646 		/* SMBASE is usually relocated above 1M on modern chipsets,
2647 		 * and SMM handlers might indeed rely on 4G segment limits,
2648 		 * so do not report SMM to be available if real mode is
2649 		 * emulated via vm86 mode.  Still, do not go to great lengths
2650 		 * to avoid userspace's usage of the feature, because it is a
2651 		 * fringe case that is not enabled except via specific settings
2652 		 * of the module parameters.
2653 		 */
2654 		r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
2655 		break;
2656 	case KVM_CAP_COALESCED_MMIO:
2657 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
2658 		break;
2659 	case KVM_CAP_VAPIC:
2660 		r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2661 		break;
2662 	case KVM_CAP_NR_VCPUS:
2663 		r = KVM_SOFT_MAX_VCPUS;
2664 		break;
2665 	case KVM_CAP_MAX_VCPUS:
2666 		r = KVM_MAX_VCPUS;
2667 		break;
2668 	case KVM_CAP_NR_MEMSLOTS:
2669 		r = KVM_USER_MEM_SLOTS;
2670 		break;
2671 	case KVM_CAP_PV_MMU:	/* obsolete */
2672 		r = 0;
2673 		break;
2674 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2675 	case KVM_CAP_IOMMU:
2676 		r = iommu_present(&pci_bus_type);
2677 		break;
2678 #endif
2679 	case KVM_CAP_MCE:
2680 		r = KVM_MAX_MCE_BANKS;
2681 		break;
2682 	case KVM_CAP_XCRS:
2683 		r = boot_cpu_has(X86_FEATURE_XSAVE);
2684 		break;
2685 	case KVM_CAP_TSC_CONTROL:
2686 		r = kvm_has_tsc_control;
2687 		break;
2688 	case KVM_CAP_X2APIC_API:
2689 		r = KVM_X2APIC_API_VALID_FLAGS;
2690 		break;
2691 	default:
2692 		r = 0;
2693 		break;
2694 	}
2695 	return r;
2696 
2697 }
2698 
2699 long kvm_arch_dev_ioctl(struct file *filp,
2700 			unsigned int ioctl, unsigned long arg)
2701 {
2702 	void __user *argp = (void __user *)arg;
2703 	long r;
2704 
2705 	switch (ioctl) {
2706 	case KVM_GET_MSR_INDEX_LIST: {
2707 		struct kvm_msr_list __user *user_msr_list = argp;
2708 		struct kvm_msr_list msr_list;
2709 		unsigned n;
2710 
2711 		r = -EFAULT;
2712 		if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2713 			goto out;
2714 		n = msr_list.nmsrs;
2715 		msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
2716 		if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2717 			goto out;
2718 		r = -E2BIG;
2719 		if (n < msr_list.nmsrs)
2720 			goto out;
2721 		r = -EFAULT;
2722 		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2723 				 num_msrs_to_save * sizeof(u32)))
2724 			goto out;
2725 		if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2726 				 &emulated_msrs,
2727 				 num_emulated_msrs * sizeof(u32)))
2728 			goto out;
2729 		r = 0;
2730 		break;
2731 	}
2732 	case KVM_GET_SUPPORTED_CPUID:
2733 	case KVM_GET_EMULATED_CPUID: {
2734 		struct kvm_cpuid2 __user *cpuid_arg = argp;
2735 		struct kvm_cpuid2 cpuid;
2736 
2737 		r = -EFAULT;
2738 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2739 			goto out;
2740 
2741 		r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
2742 					    ioctl);
2743 		if (r)
2744 			goto out;
2745 
2746 		r = -EFAULT;
2747 		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2748 			goto out;
2749 		r = 0;
2750 		break;
2751 	}
2752 	case KVM_X86_GET_MCE_CAP_SUPPORTED: {
2753 		r = -EFAULT;
2754 		if (copy_to_user(argp, &kvm_mce_cap_supported,
2755 				 sizeof(kvm_mce_cap_supported)))
2756 			goto out;
2757 		r = 0;
2758 		break;
2759 	}
2760 	default:
2761 		r = -EINVAL;
2762 	}
2763 out:
2764 	return r;
2765 }
2766 
2767 static void wbinvd_ipi(void *garbage)
2768 {
2769 	wbinvd();
2770 }
2771 
2772 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2773 {
2774 	return kvm_arch_has_noncoherent_dma(vcpu->kvm);
2775 }
2776 
2777 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
2778 {
2779 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
2780 }
2781 
2782 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2783 {
2784 	/* Address WBINVD may be executed by guest */
2785 	if (need_emulate_wbinvd(vcpu)) {
2786 		if (kvm_x86_ops->has_wbinvd_exit())
2787 			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2788 		else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2789 			smp_call_function_single(vcpu->cpu,
2790 					wbinvd_ipi, NULL, 1);
2791 	}
2792 
2793 	kvm_x86_ops->vcpu_load(vcpu, cpu);
2794 
2795 	/* Apply any externally detected TSC adjustments (due to suspend) */
2796 	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
2797 		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
2798 		vcpu->arch.tsc_offset_adjustment = 0;
2799 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2800 	}
2801 
2802 	if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2803 		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
2804 				rdtsc() - vcpu->arch.last_host_tsc;
2805 		if (tsc_delta < 0)
2806 			mark_tsc_unstable("KVM discovered backwards TSC");
2807 
2808 		if (check_tsc_unstable()) {
2809 			u64 offset = kvm_compute_tsc_offset(vcpu,
2810 						vcpu->arch.last_guest_tsc);
2811 			kvm_vcpu_write_tsc_offset(vcpu, offset);
2812 			vcpu->arch.tsc_catchup = 1;
2813 		}
2814 		if (kvm_lapic_hv_timer_in_use(vcpu) &&
2815 				kvm_x86_ops->set_hv_timer(vcpu,
2816 					kvm_get_lapic_target_expiration_tsc(vcpu)))
2817 			kvm_lapic_switch_to_sw_timer(vcpu);
2818 		/*
2819 		 * On a host with synchronized TSC, there is no need to update
2820 		 * kvmclock on vcpu->cpu migration
2821 		 */
2822 		if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
2823 			kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2824 		if (vcpu->cpu != cpu)
2825 			kvm_migrate_timers(vcpu);
2826 		vcpu->cpu = cpu;
2827 	}
2828 
2829 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2830 }
2831 
2832 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
2833 {
2834 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2835 		return;
2836 
2837 	vcpu->arch.st.steal.preempted = 1;
2838 
2839 	kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
2840 			&vcpu->arch.st.steal.preempted,
2841 			offsetof(struct kvm_steal_time, preempted),
2842 			sizeof(vcpu->arch.st.steal.preempted));
2843 }
2844 
2845 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2846 {
2847 	int idx;
2848 	/*
2849 	 * Disable page faults because we're in atomic context here.
2850 	 * kvm_write_guest_offset_cached() would call might_fault()
2851 	 * that relies on pagefault_disable() to tell if there's a
2852 	 * bug. NOTE: the write to guest memory may not go through if
2853 	 * during postcopy live migration or if there's heavy guest
2854 	 * paging.
2855 	 */
2856 	pagefault_disable();
2857 	/*
2858 	 * kvm_memslots() will be called by
2859 	 * kvm_write_guest_offset_cached() so take the srcu lock.
2860 	 */
2861 	idx = srcu_read_lock(&vcpu->kvm->srcu);
2862 	kvm_steal_time_set_preempted(vcpu);
2863 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
2864 	pagefault_enable();
2865 	kvm_x86_ops->vcpu_put(vcpu);
2866 	kvm_put_guest_fpu(vcpu);
2867 	vcpu->arch.last_host_tsc = rdtsc();
2868 }
2869 
2870 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2871 				    struct kvm_lapic_state *s)
2872 {
2873 	if (vcpu->arch.apicv_active)
2874 		kvm_x86_ops->sync_pir_to_irr(vcpu);
2875 
2876 	return kvm_apic_get_state(vcpu, s);
2877 }
2878 
2879 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2880 				    struct kvm_lapic_state *s)
2881 {
2882 	int r;
2883 
2884 	r = kvm_apic_set_state(vcpu, s);
2885 	if (r)
2886 		return r;
2887 	update_cr8_intercept(vcpu);
2888 
2889 	return 0;
2890 }
2891 
2892 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
2893 {
2894 	return (!lapic_in_kernel(vcpu) ||
2895 		kvm_apic_accept_pic_intr(vcpu));
2896 }
2897 
2898 /*
2899  * if userspace requested an interrupt window, check that the
2900  * interrupt window is open.
2901  *
2902  * No need to exit to userspace if we already have an interrupt queued.
2903  */
2904 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
2905 {
2906 	return kvm_arch_interrupt_allowed(vcpu) &&
2907 		!kvm_cpu_has_interrupt(vcpu) &&
2908 		!kvm_event_needs_reinjection(vcpu) &&
2909 		kvm_cpu_accept_dm_intr(vcpu);
2910 }
2911 
2912 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2913 				    struct kvm_interrupt *irq)
2914 {
2915 	if (irq->irq >= KVM_NR_INTERRUPTS)
2916 		return -EINVAL;
2917 
2918 	if (!irqchip_in_kernel(vcpu->kvm)) {
2919 		kvm_queue_interrupt(vcpu, irq->irq, false);
2920 		kvm_make_request(KVM_REQ_EVENT, vcpu);
2921 		return 0;
2922 	}
2923 
2924 	/*
2925 	 * With in-kernel LAPIC, we only use this to inject EXTINT, so
2926 	 * fail for in-kernel 8259.
2927 	 */
2928 	if (pic_in_kernel(vcpu->kvm))
2929 		return -ENXIO;
2930 
2931 	if (vcpu->arch.pending_external_vector != -1)
2932 		return -EEXIST;
2933 
2934 	vcpu->arch.pending_external_vector = irq->irq;
2935 	kvm_make_request(KVM_REQ_EVENT, vcpu);
2936 	return 0;
2937 }
2938 
2939 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2940 {
2941 	kvm_inject_nmi(vcpu);
2942 
2943 	return 0;
2944 }
2945 
2946 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
2947 {
2948 	kvm_make_request(KVM_REQ_SMI, vcpu);
2949 
2950 	return 0;
2951 }
2952 
2953 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2954 					   struct kvm_tpr_access_ctl *tac)
2955 {
2956 	if (tac->flags)
2957 		return -EINVAL;
2958 	vcpu->arch.tpr_access_reporting = !!tac->enabled;
2959 	return 0;
2960 }
2961 
2962 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2963 					u64 mcg_cap)
2964 {
2965 	int r;
2966 	unsigned bank_num = mcg_cap & 0xff, bank;
2967 
2968 	r = -EINVAL;
2969 	if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2970 		goto out;
2971 	if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
2972 		goto out;
2973 	r = 0;
2974 	vcpu->arch.mcg_cap = mcg_cap;
2975 	/* Init IA32_MCG_CTL to all 1s */
2976 	if (mcg_cap & MCG_CTL_P)
2977 		vcpu->arch.mcg_ctl = ~(u64)0;
2978 	/* Init IA32_MCi_CTL to all 1s */
2979 	for (bank = 0; bank < bank_num; bank++)
2980 		vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2981 
2982 	if (kvm_x86_ops->setup_mce)
2983 		kvm_x86_ops->setup_mce(vcpu);
2984 out:
2985 	return r;
2986 }
2987 
2988 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2989 				      struct kvm_x86_mce *mce)
2990 {
2991 	u64 mcg_cap = vcpu->arch.mcg_cap;
2992 	unsigned bank_num = mcg_cap & 0xff;
2993 	u64 *banks = vcpu->arch.mce_banks;
2994 
2995 	if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2996 		return -EINVAL;
2997 	/*
2998 	 * if IA32_MCG_CTL is not all 1s, the uncorrected error
2999 	 * reporting is disabled
3000 	 */
3001 	if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
3002 	    vcpu->arch.mcg_ctl != ~(u64)0)
3003 		return 0;
3004 	banks += 4 * mce->bank;
3005 	/*
3006 	 * if IA32_MCi_CTL is not all 1s, the uncorrected error
3007 	 * reporting is disabled for the bank
3008 	 */
3009 	if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
3010 		return 0;
3011 	if (mce->status & MCI_STATUS_UC) {
3012 		if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
3013 		    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
3014 			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3015 			return 0;
3016 		}
3017 		if (banks[1] & MCI_STATUS_VAL)
3018 			mce->status |= MCI_STATUS_OVER;
3019 		banks[2] = mce->addr;
3020 		banks[3] = mce->misc;
3021 		vcpu->arch.mcg_status = mce->mcg_status;
3022 		banks[1] = mce->status;
3023 		kvm_queue_exception(vcpu, MC_VECTOR);
3024 	} else if (!(banks[1] & MCI_STATUS_VAL)
3025 		   || !(banks[1] & MCI_STATUS_UC)) {
3026 		if (banks[1] & MCI_STATUS_VAL)
3027 			mce->status |= MCI_STATUS_OVER;
3028 		banks[2] = mce->addr;
3029 		banks[3] = mce->misc;
3030 		banks[1] = mce->status;
3031 	} else
3032 		banks[1] |= MCI_STATUS_OVER;
3033 	return 0;
3034 }
3035 
3036 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
3037 					       struct kvm_vcpu_events *events)
3038 {
3039 	process_nmi(vcpu);
3040 	events->exception.injected =
3041 		vcpu->arch.exception.pending &&
3042 		!kvm_exception_is_soft(vcpu->arch.exception.nr);
3043 	events->exception.nr = vcpu->arch.exception.nr;
3044 	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
3045 	events->exception.pad = 0;
3046 	events->exception.error_code = vcpu->arch.exception.error_code;
3047 
3048 	events->interrupt.injected =
3049 		vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
3050 	events->interrupt.nr = vcpu->arch.interrupt.nr;
3051 	events->interrupt.soft = 0;
3052 	events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
3053 
3054 	events->nmi.injected = vcpu->arch.nmi_injected;
3055 	events->nmi.pending = vcpu->arch.nmi_pending != 0;
3056 	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
3057 	events->nmi.pad = 0;
3058 
3059 	events->sipi_vector = 0; /* never valid when reporting to user space */
3060 
3061 	events->smi.smm = is_smm(vcpu);
3062 	events->smi.pending = vcpu->arch.smi_pending;
3063 	events->smi.smm_inside_nmi =
3064 		!!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
3065 	events->smi.latched_init = kvm_lapic_latched_init(vcpu);
3066 
3067 	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
3068 			 | KVM_VCPUEVENT_VALID_SHADOW
3069 			 | KVM_VCPUEVENT_VALID_SMM);
3070 	memset(&events->reserved, 0, sizeof(events->reserved));
3071 }
3072 
3073 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
3074 					      struct kvm_vcpu_events *events)
3075 {
3076 	if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
3077 			      | KVM_VCPUEVENT_VALID_SIPI_VECTOR
3078 			      | KVM_VCPUEVENT_VALID_SHADOW
3079 			      | KVM_VCPUEVENT_VALID_SMM))
3080 		return -EINVAL;
3081 
3082 	if (events->exception.injected &&
3083 	    (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
3084 		return -EINVAL;
3085 
3086 	process_nmi(vcpu);
3087 	vcpu->arch.exception.pending = events->exception.injected;
3088 	vcpu->arch.exception.nr = events->exception.nr;
3089 	vcpu->arch.exception.has_error_code = events->exception.has_error_code;
3090 	vcpu->arch.exception.error_code = events->exception.error_code;
3091 
3092 	vcpu->arch.interrupt.pending = events->interrupt.injected;
3093 	vcpu->arch.interrupt.nr = events->interrupt.nr;
3094 	vcpu->arch.interrupt.soft = events->interrupt.soft;
3095 	if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
3096 		kvm_x86_ops->set_interrupt_shadow(vcpu,
3097 						  events->interrupt.shadow);
3098 
3099 	vcpu->arch.nmi_injected = events->nmi.injected;
3100 	if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
3101 		vcpu->arch.nmi_pending = events->nmi.pending;
3102 	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
3103 
3104 	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
3105 	    lapic_in_kernel(vcpu))
3106 		vcpu->arch.apic->sipi_vector = events->sipi_vector;
3107 
3108 	if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
3109 		if (events->smi.smm)
3110 			vcpu->arch.hflags |= HF_SMM_MASK;
3111 		else
3112 			vcpu->arch.hflags &= ~HF_SMM_MASK;
3113 		vcpu->arch.smi_pending = events->smi.pending;
3114 		if (events->smi.smm_inside_nmi)
3115 			vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
3116 		else
3117 			vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
3118 		if (lapic_in_kernel(vcpu)) {
3119 			if (events->smi.latched_init)
3120 				set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
3121 			else
3122 				clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
3123 		}
3124 	}
3125 
3126 	kvm_make_request(KVM_REQ_EVENT, vcpu);
3127 
3128 	return 0;
3129 }
3130 
3131 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
3132 					     struct kvm_debugregs *dbgregs)
3133 {
3134 	unsigned long val;
3135 
3136 	memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
3137 	kvm_get_dr(vcpu, 6, &val);
3138 	dbgregs->dr6 = val;
3139 	dbgregs->dr7 = vcpu->arch.dr7;
3140 	dbgregs->flags = 0;
3141 	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
3142 }
3143 
3144 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
3145 					    struct kvm_debugregs *dbgregs)
3146 {
3147 	if (dbgregs->flags)
3148 		return -EINVAL;
3149 
3150 	if (dbgregs->dr6 & ~0xffffffffull)
3151 		return -EINVAL;
3152 	if (dbgregs->dr7 & ~0xffffffffull)
3153 		return -EINVAL;
3154 
3155 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
3156 	kvm_update_dr0123(vcpu);
3157 	vcpu->arch.dr6 = dbgregs->dr6;
3158 	kvm_update_dr6(vcpu);
3159 	vcpu->arch.dr7 = dbgregs->dr7;
3160 	kvm_update_dr7(vcpu);
3161 
3162 	return 0;
3163 }
3164 
3165 #define XSTATE_COMPACTION_ENABLED (1ULL << 63)
3166 
3167 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
3168 {
3169 	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3170 	u64 xstate_bv = xsave->header.xfeatures;
3171 	u64 valid;
3172 
3173 	/*
3174 	 * Copy legacy XSAVE area, to avoid complications with CPUID
3175 	 * leaves 0 and 1 in the loop below.
3176 	 */
3177 	memcpy(dest, xsave, XSAVE_HDR_OFFSET);
3178 
3179 	/* Set XSTATE_BV */
3180 	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
3181 
3182 	/*
3183 	 * Copy each region from the possibly compacted offset to the
3184 	 * non-compacted offset.
3185 	 */
3186 	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3187 	while (valid) {
3188 		u64 feature = valid & -valid;
3189 		int index = fls64(feature) - 1;
3190 		void *src = get_xsave_addr(xsave, feature);
3191 
3192 		if (src) {
3193 			u32 size, offset, ecx, edx;
3194 			cpuid_count(XSTATE_CPUID, index,
3195 				    &size, &offset, &ecx, &edx);
3196 			memcpy(dest + offset, src, size);
3197 		}
3198 
3199 		valid -= feature;
3200 	}
3201 }
3202 
3203 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
3204 {
3205 	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3206 	u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
3207 	u64 valid;
3208 
3209 	/*
3210 	 * Copy legacy XSAVE area, to avoid complications with CPUID
3211 	 * leaves 0 and 1 in the loop below.
3212 	 */
3213 	memcpy(xsave, src, XSAVE_HDR_OFFSET);
3214 
3215 	/* Set XSTATE_BV and possibly XCOMP_BV.  */
3216 	xsave->header.xfeatures = xstate_bv;
3217 	if (boot_cpu_has(X86_FEATURE_XSAVES))
3218 		xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
3219 
3220 	/*
3221 	 * Copy each region from the non-compacted offset to the
3222 	 * possibly compacted offset.
3223 	 */
3224 	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3225 	while (valid) {
3226 		u64 feature = valid & -valid;
3227 		int index = fls64(feature) - 1;
3228 		void *dest = get_xsave_addr(xsave, feature);
3229 
3230 		if (dest) {
3231 			u32 size, offset, ecx, edx;
3232 			cpuid_count(XSTATE_CPUID, index,
3233 				    &size, &offset, &ecx, &edx);
3234 			memcpy(dest, src + offset, size);
3235 		}
3236 
3237 		valid -= feature;
3238 	}
3239 }
3240 
3241 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
3242 					 struct kvm_xsave *guest_xsave)
3243 {
3244 	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3245 		memset(guest_xsave, 0, sizeof(struct kvm_xsave));
3246 		fill_xsave((u8 *) guest_xsave->region, vcpu);
3247 	} else {
3248 		memcpy(guest_xsave->region,
3249 			&vcpu->arch.guest_fpu.state.fxsave,
3250 			sizeof(struct fxregs_state));
3251 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
3252 			XFEATURE_MASK_FPSSE;
3253 	}
3254 }
3255 
3256 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
3257 					struct kvm_xsave *guest_xsave)
3258 {
3259 	u64 xstate_bv =
3260 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
3261 
3262 	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3263 		/*
3264 		 * Here we allow setting states that are not present in
3265 		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
3266 		 * with old userspace.
3267 		 */
3268 		if (xstate_bv & ~kvm_supported_xcr0())
3269 			return -EINVAL;
3270 		load_xsave(vcpu, (u8 *)guest_xsave->region);
3271 	} else {
3272 		if (xstate_bv & ~XFEATURE_MASK_FPSSE)
3273 			return -EINVAL;
3274 		memcpy(&vcpu->arch.guest_fpu.state.fxsave,
3275 			guest_xsave->region, sizeof(struct fxregs_state));
3276 	}
3277 	return 0;
3278 }
3279 
3280 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
3281 					struct kvm_xcrs *guest_xcrs)
3282 {
3283 	if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
3284 		guest_xcrs->nr_xcrs = 0;
3285 		return;
3286 	}
3287 
3288 	guest_xcrs->nr_xcrs = 1;
3289 	guest_xcrs->flags = 0;
3290 	guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
3291 	guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
3292 }
3293 
3294 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
3295 				       struct kvm_xcrs *guest_xcrs)
3296 {
3297 	int i, r = 0;
3298 
3299 	if (!boot_cpu_has(X86_FEATURE_XSAVE))
3300 		return -EINVAL;
3301 
3302 	if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
3303 		return -EINVAL;
3304 
3305 	for (i = 0; i < guest_xcrs->nr_xcrs; i++)
3306 		/* Only support XCR0 currently */
3307 		if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
3308 			r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
3309 				guest_xcrs->xcrs[i].value);
3310 			break;
3311 		}
3312 	if (r)
3313 		r = -EINVAL;
3314 	return r;
3315 }
3316 
3317 /*
3318  * kvm_set_guest_paused() indicates to the guest kernel that it has been
3319  * stopped by the hypervisor.  This function will be called from the host only.
3320  * EINVAL is returned when the host attempts to set the flag for a guest that
3321  * does not support pv clocks.
3322  */
3323 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
3324 {
3325 	if (!vcpu->arch.pv_time_enabled)
3326 		return -EINVAL;
3327 	vcpu->arch.pvclock_set_guest_stopped_request = true;
3328 	kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3329 	return 0;
3330 }
3331 
3332 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3333 				     struct kvm_enable_cap *cap)
3334 {
3335 	if (cap->flags)
3336 		return -EINVAL;
3337 
3338 	switch (cap->cap) {
3339 	case KVM_CAP_HYPERV_SYNIC:
3340 		return kvm_hv_activate_synic(vcpu);
3341 	default:
3342 		return -EINVAL;
3343 	}
3344 }
3345 
3346 long kvm_arch_vcpu_ioctl(struct file *filp,
3347 			 unsigned int ioctl, unsigned long arg)
3348 {
3349 	struct kvm_vcpu *vcpu = filp->private_data;
3350 	void __user *argp = (void __user *)arg;
3351 	int r;
3352 	union {
3353 		struct kvm_lapic_state *lapic;
3354 		struct kvm_xsave *xsave;
3355 		struct kvm_xcrs *xcrs;
3356 		void *buffer;
3357 	} u;
3358 
3359 	u.buffer = NULL;
3360 	switch (ioctl) {
3361 	case KVM_GET_LAPIC: {
3362 		r = -EINVAL;
3363 		if (!lapic_in_kernel(vcpu))
3364 			goto out;
3365 		u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3366 
3367 		r = -ENOMEM;
3368 		if (!u.lapic)
3369 			goto out;
3370 		r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
3371 		if (r)
3372 			goto out;
3373 		r = -EFAULT;
3374 		if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
3375 			goto out;
3376 		r = 0;
3377 		break;
3378 	}
3379 	case KVM_SET_LAPIC: {
3380 		r = -EINVAL;
3381 		if (!lapic_in_kernel(vcpu))
3382 			goto out;
3383 		u.lapic = memdup_user(argp, sizeof(*u.lapic));
3384 		if (IS_ERR(u.lapic))
3385 			return PTR_ERR(u.lapic);
3386 
3387 		r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
3388 		break;
3389 	}
3390 	case KVM_INTERRUPT: {
3391 		struct kvm_interrupt irq;
3392 
3393 		r = -EFAULT;
3394 		if (copy_from_user(&irq, argp, sizeof irq))
3395 			goto out;
3396 		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
3397 		break;
3398 	}
3399 	case KVM_NMI: {
3400 		r = kvm_vcpu_ioctl_nmi(vcpu);
3401 		break;
3402 	}
3403 	case KVM_SMI: {
3404 		r = kvm_vcpu_ioctl_smi(vcpu);
3405 		break;
3406 	}
3407 	case KVM_SET_CPUID: {
3408 		struct kvm_cpuid __user *cpuid_arg = argp;
3409 		struct kvm_cpuid cpuid;
3410 
3411 		r = -EFAULT;
3412 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3413 			goto out;
3414 		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
3415 		break;
3416 	}
3417 	case KVM_SET_CPUID2: {
3418 		struct kvm_cpuid2 __user *cpuid_arg = argp;
3419 		struct kvm_cpuid2 cpuid;
3420 
3421 		r = -EFAULT;
3422 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3423 			goto out;
3424 		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
3425 					      cpuid_arg->entries);
3426 		break;
3427 	}
3428 	case KVM_GET_CPUID2: {
3429 		struct kvm_cpuid2 __user *cpuid_arg = argp;
3430 		struct kvm_cpuid2 cpuid;
3431 
3432 		r = -EFAULT;
3433 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3434 			goto out;
3435 		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
3436 					      cpuid_arg->entries);
3437 		if (r)
3438 			goto out;
3439 		r = -EFAULT;
3440 		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
3441 			goto out;
3442 		r = 0;
3443 		break;
3444 	}
3445 	case KVM_GET_MSRS:
3446 		r = msr_io(vcpu, argp, do_get_msr, 1);
3447 		break;
3448 	case KVM_SET_MSRS:
3449 		r = msr_io(vcpu, argp, do_set_msr, 0);
3450 		break;
3451 	case KVM_TPR_ACCESS_REPORTING: {
3452 		struct kvm_tpr_access_ctl tac;
3453 
3454 		r = -EFAULT;
3455 		if (copy_from_user(&tac, argp, sizeof tac))
3456 			goto out;
3457 		r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
3458 		if (r)
3459 			goto out;
3460 		r = -EFAULT;
3461 		if (copy_to_user(argp, &tac, sizeof tac))
3462 			goto out;
3463 		r = 0;
3464 		break;
3465 	};
3466 	case KVM_SET_VAPIC_ADDR: {
3467 		struct kvm_vapic_addr va;
3468 		int idx;
3469 
3470 		r = -EINVAL;
3471 		if (!lapic_in_kernel(vcpu))
3472 			goto out;
3473 		r = -EFAULT;
3474 		if (copy_from_user(&va, argp, sizeof va))
3475 			goto out;
3476 		idx = srcu_read_lock(&vcpu->kvm->srcu);
3477 		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3478 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
3479 		break;
3480 	}
3481 	case KVM_X86_SETUP_MCE: {
3482 		u64 mcg_cap;
3483 
3484 		r = -EFAULT;
3485 		if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
3486 			goto out;
3487 		r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
3488 		break;
3489 	}
3490 	case KVM_X86_SET_MCE: {
3491 		struct kvm_x86_mce mce;
3492 
3493 		r = -EFAULT;
3494 		if (copy_from_user(&mce, argp, sizeof mce))
3495 			goto out;
3496 		r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
3497 		break;
3498 	}
3499 	case KVM_GET_VCPU_EVENTS: {
3500 		struct kvm_vcpu_events events;
3501 
3502 		kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
3503 
3504 		r = -EFAULT;
3505 		if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
3506 			break;
3507 		r = 0;
3508 		break;
3509 	}
3510 	case KVM_SET_VCPU_EVENTS: {
3511 		struct kvm_vcpu_events events;
3512 
3513 		r = -EFAULT;
3514 		if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
3515 			break;
3516 
3517 		r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
3518 		break;
3519 	}
3520 	case KVM_GET_DEBUGREGS: {
3521 		struct kvm_debugregs dbgregs;
3522 
3523 		kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
3524 
3525 		r = -EFAULT;
3526 		if (copy_to_user(argp, &dbgregs,
3527 				 sizeof(struct kvm_debugregs)))
3528 			break;
3529 		r = 0;
3530 		break;
3531 	}
3532 	case KVM_SET_DEBUGREGS: {
3533 		struct kvm_debugregs dbgregs;
3534 
3535 		r = -EFAULT;
3536 		if (copy_from_user(&dbgregs, argp,
3537 				   sizeof(struct kvm_debugregs)))
3538 			break;
3539 
3540 		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
3541 		break;
3542 	}
3543 	case KVM_GET_XSAVE: {
3544 		u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
3545 		r = -ENOMEM;
3546 		if (!u.xsave)
3547 			break;
3548 
3549 		kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
3550 
3551 		r = -EFAULT;
3552 		if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
3553 			break;
3554 		r = 0;
3555 		break;
3556 	}
3557 	case KVM_SET_XSAVE: {
3558 		u.xsave = memdup_user(argp, sizeof(*u.xsave));
3559 		if (IS_ERR(u.xsave))
3560 			return PTR_ERR(u.xsave);
3561 
3562 		r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
3563 		break;
3564 	}
3565 	case KVM_GET_XCRS: {
3566 		u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
3567 		r = -ENOMEM;
3568 		if (!u.xcrs)
3569 			break;
3570 
3571 		kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
3572 
3573 		r = -EFAULT;
3574 		if (copy_to_user(argp, u.xcrs,
3575 				 sizeof(struct kvm_xcrs)))
3576 			break;
3577 		r = 0;
3578 		break;
3579 	}
3580 	case KVM_SET_XCRS: {
3581 		u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
3582 		if (IS_ERR(u.xcrs))
3583 			return PTR_ERR(u.xcrs);
3584 
3585 		r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
3586 		break;
3587 	}
3588 	case KVM_SET_TSC_KHZ: {
3589 		u32 user_tsc_khz;
3590 
3591 		r = -EINVAL;
3592 		user_tsc_khz = (u32)arg;
3593 
3594 		if (user_tsc_khz >= kvm_max_guest_tsc_khz)
3595 			goto out;
3596 
3597 		if (user_tsc_khz == 0)
3598 			user_tsc_khz = tsc_khz;
3599 
3600 		if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
3601 			r = 0;
3602 
3603 		goto out;
3604 	}
3605 	case KVM_GET_TSC_KHZ: {
3606 		r = vcpu->arch.virtual_tsc_khz;
3607 		goto out;
3608 	}
3609 	case KVM_KVMCLOCK_CTRL: {
3610 		r = kvm_set_guest_paused(vcpu);
3611 		goto out;
3612 	}
3613 	case KVM_ENABLE_CAP: {
3614 		struct kvm_enable_cap cap;
3615 
3616 		r = -EFAULT;
3617 		if (copy_from_user(&cap, argp, sizeof(cap)))
3618 			goto out;
3619 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3620 		break;
3621 	}
3622 	default:
3623 		r = -EINVAL;
3624 	}
3625 out:
3626 	kfree(u.buffer);
3627 	return r;
3628 }
3629 
3630 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3631 {
3632 	return VM_FAULT_SIGBUS;
3633 }
3634 
3635 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
3636 {
3637 	int ret;
3638 
3639 	if (addr > (unsigned int)(-3 * PAGE_SIZE))
3640 		return -EINVAL;
3641 	ret = kvm_x86_ops->set_tss_addr(kvm, addr);
3642 	return ret;
3643 }
3644 
3645 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
3646 					      u64 ident_addr)
3647 {
3648 	kvm->arch.ept_identity_map_addr = ident_addr;
3649 	return 0;
3650 }
3651 
3652 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
3653 					  u32 kvm_nr_mmu_pages)
3654 {
3655 	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
3656 		return -EINVAL;
3657 
3658 	mutex_lock(&kvm->slots_lock);
3659 
3660 	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
3661 	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
3662 
3663 	mutex_unlock(&kvm->slots_lock);
3664 	return 0;
3665 }
3666 
3667 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
3668 {
3669 	return kvm->arch.n_max_mmu_pages;
3670 }
3671 
3672 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3673 {
3674 	int r;
3675 
3676 	r = 0;
3677 	switch (chip->chip_id) {
3678 	case KVM_IRQCHIP_PIC_MASTER:
3679 		memcpy(&chip->chip.pic,
3680 			&pic_irqchip(kvm)->pics[0],
3681 			sizeof(struct kvm_pic_state));
3682 		break;
3683 	case KVM_IRQCHIP_PIC_SLAVE:
3684 		memcpy(&chip->chip.pic,
3685 			&pic_irqchip(kvm)->pics[1],
3686 			sizeof(struct kvm_pic_state));
3687 		break;
3688 	case KVM_IRQCHIP_IOAPIC:
3689 		r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
3690 		break;
3691 	default:
3692 		r = -EINVAL;
3693 		break;
3694 	}
3695 	return r;
3696 }
3697 
3698 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3699 {
3700 	int r;
3701 
3702 	r = 0;
3703 	switch (chip->chip_id) {
3704 	case KVM_IRQCHIP_PIC_MASTER:
3705 		spin_lock(&pic_irqchip(kvm)->lock);
3706 		memcpy(&pic_irqchip(kvm)->pics[0],
3707 			&chip->chip.pic,
3708 			sizeof(struct kvm_pic_state));
3709 		spin_unlock(&pic_irqchip(kvm)->lock);
3710 		break;
3711 	case KVM_IRQCHIP_PIC_SLAVE:
3712 		spin_lock(&pic_irqchip(kvm)->lock);
3713 		memcpy(&pic_irqchip(kvm)->pics[1],
3714 			&chip->chip.pic,
3715 			sizeof(struct kvm_pic_state));
3716 		spin_unlock(&pic_irqchip(kvm)->lock);
3717 		break;
3718 	case KVM_IRQCHIP_IOAPIC:
3719 		r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
3720 		break;
3721 	default:
3722 		r = -EINVAL;
3723 		break;
3724 	}
3725 	kvm_pic_update_irq(pic_irqchip(kvm));
3726 	return r;
3727 }
3728 
3729 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3730 {
3731 	struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
3732 
3733 	BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
3734 
3735 	mutex_lock(&kps->lock);
3736 	memcpy(ps, &kps->channels, sizeof(*ps));
3737 	mutex_unlock(&kps->lock);
3738 	return 0;
3739 }
3740 
3741 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3742 {
3743 	int i;
3744 	struct kvm_pit *pit = kvm->arch.vpit;
3745 
3746 	mutex_lock(&pit->pit_state.lock);
3747 	memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
3748 	for (i = 0; i < 3; i++)
3749 		kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
3750 	mutex_unlock(&pit->pit_state.lock);
3751 	return 0;
3752 }
3753 
3754 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3755 {
3756 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
3757 	memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
3758 		sizeof(ps->channels));
3759 	ps->flags = kvm->arch.vpit->pit_state.flags;
3760 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3761 	memset(&ps->reserved, 0, sizeof(ps->reserved));
3762 	return 0;
3763 }
3764 
3765 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3766 {
3767 	int start = 0;
3768 	int i;
3769 	u32 prev_legacy, cur_legacy;
3770 	struct kvm_pit *pit = kvm->arch.vpit;
3771 
3772 	mutex_lock(&pit->pit_state.lock);
3773 	prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
3774 	cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
3775 	if (!prev_legacy && cur_legacy)
3776 		start = 1;
3777 	memcpy(&pit->pit_state.channels, &ps->channels,
3778 	       sizeof(pit->pit_state.channels));
3779 	pit->pit_state.flags = ps->flags;
3780 	for (i = 0; i < 3; i++)
3781 		kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
3782 				   start && i == 0);
3783 	mutex_unlock(&pit->pit_state.lock);
3784 	return 0;
3785 }
3786 
3787 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
3788 				 struct kvm_reinject_control *control)
3789 {
3790 	struct kvm_pit *pit = kvm->arch.vpit;
3791 
3792 	if (!pit)
3793 		return -ENXIO;
3794 
3795 	/* pit->pit_state.lock was overloaded to prevent userspace from getting
3796 	 * an inconsistent state after running multiple KVM_REINJECT_CONTROL
3797 	 * ioctls in parallel.  Use a separate lock if that ioctl isn't rare.
3798 	 */
3799 	mutex_lock(&pit->pit_state.lock);
3800 	kvm_pit_set_reinject(pit, control->pit_reinject);
3801 	mutex_unlock(&pit->pit_state.lock);
3802 
3803 	return 0;
3804 }
3805 
3806 /**
3807  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
3808  * @kvm: kvm instance
3809  * @log: slot id and address to which we copy the log
3810  *
3811  * Steps 1-4 below provide general overview of dirty page logging. See
3812  * kvm_get_dirty_log_protect() function description for additional details.
3813  *
3814  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
3815  * always flush the TLB (step 4) even if previous step failed  and the dirty
3816  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
3817  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
3818  * writes will be marked dirty for next log read.
3819  *
3820  *   1. Take a snapshot of the bit and clear it if needed.
3821  *   2. Write protect the corresponding page.
3822  *   3. Copy the snapshot to the userspace.
3823  *   4. Flush TLB's if needed.
3824  */
3825 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
3826 {
3827 	bool is_dirty = false;
3828 	int r;
3829 
3830 	mutex_lock(&kvm->slots_lock);
3831 
3832 	/*
3833 	 * Flush potentially hardware-cached dirty pages to dirty_bitmap.
3834 	 */
3835 	if (kvm_x86_ops->flush_log_dirty)
3836 		kvm_x86_ops->flush_log_dirty(kvm);
3837 
3838 	r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
3839 
3840 	/*
3841 	 * All the TLBs can be flushed out of mmu lock, see the comments in
3842 	 * kvm_mmu_slot_remove_write_access().
3843 	 */
3844 	lockdep_assert_held(&kvm->slots_lock);
3845 	if (is_dirty)
3846 		kvm_flush_remote_tlbs(kvm);
3847 
3848 	mutex_unlock(&kvm->slots_lock);
3849 	return r;
3850 }
3851 
3852 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
3853 			bool line_status)
3854 {
3855 	if (!irqchip_in_kernel(kvm))
3856 		return -ENXIO;
3857 
3858 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3859 					irq_event->irq, irq_event->level,
3860 					line_status);
3861 	return 0;
3862 }
3863 
3864 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
3865 				   struct kvm_enable_cap *cap)
3866 {
3867 	int r;
3868 
3869 	if (cap->flags)
3870 		return -EINVAL;
3871 
3872 	switch (cap->cap) {
3873 	case KVM_CAP_DISABLE_QUIRKS:
3874 		kvm->arch.disabled_quirks = cap->args[0];
3875 		r = 0;
3876 		break;
3877 	case KVM_CAP_SPLIT_IRQCHIP: {
3878 		mutex_lock(&kvm->lock);
3879 		r = -EINVAL;
3880 		if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
3881 			goto split_irqchip_unlock;
3882 		r = -EEXIST;
3883 		if (irqchip_in_kernel(kvm))
3884 			goto split_irqchip_unlock;
3885 		if (kvm->created_vcpus)
3886 			goto split_irqchip_unlock;
3887 		r = kvm_setup_empty_irq_routing(kvm);
3888 		if (r)
3889 			goto split_irqchip_unlock;
3890 		/* Pairs with irqchip_in_kernel. */
3891 		smp_wmb();
3892 		kvm->arch.irqchip_split = true;
3893 		kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
3894 		r = 0;
3895 split_irqchip_unlock:
3896 		mutex_unlock(&kvm->lock);
3897 		break;
3898 	}
3899 	case KVM_CAP_X2APIC_API:
3900 		r = -EINVAL;
3901 		if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
3902 			break;
3903 
3904 		if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
3905 			kvm->arch.x2apic_format = true;
3906 		if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
3907 			kvm->arch.x2apic_broadcast_quirk_disabled = true;
3908 
3909 		r = 0;
3910 		break;
3911 	default:
3912 		r = -EINVAL;
3913 		break;
3914 	}
3915 	return r;
3916 }
3917 
3918 long kvm_arch_vm_ioctl(struct file *filp,
3919 		       unsigned int ioctl, unsigned long arg)
3920 {
3921 	struct kvm *kvm = filp->private_data;
3922 	void __user *argp = (void __user *)arg;
3923 	int r = -ENOTTY;
3924 	/*
3925 	 * This union makes it completely explicit to gcc-3.x
3926 	 * that these two variables' stack usage should be
3927 	 * combined, not added together.
3928 	 */
3929 	union {
3930 		struct kvm_pit_state ps;
3931 		struct kvm_pit_state2 ps2;
3932 		struct kvm_pit_config pit_config;
3933 	} u;
3934 
3935 	switch (ioctl) {
3936 	case KVM_SET_TSS_ADDR:
3937 		r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
3938 		break;
3939 	case KVM_SET_IDENTITY_MAP_ADDR: {
3940 		u64 ident_addr;
3941 
3942 		r = -EFAULT;
3943 		if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3944 			goto out;
3945 		r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3946 		break;
3947 	}
3948 	case KVM_SET_NR_MMU_PAGES:
3949 		r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3950 		break;
3951 	case KVM_GET_NR_MMU_PAGES:
3952 		r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3953 		break;
3954 	case KVM_CREATE_IRQCHIP: {
3955 		struct kvm_pic *vpic;
3956 
3957 		mutex_lock(&kvm->lock);
3958 		r = -EEXIST;
3959 		if (kvm->arch.vpic)
3960 			goto create_irqchip_unlock;
3961 		r = -EINVAL;
3962 		if (kvm->created_vcpus)
3963 			goto create_irqchip_unlock;
3964 		r = -ENOMEM;
3965 		vpic = kvm_create_pic(kvm);
3966 		if (vpic) {
3967 			r = kvm_ioapic_init(kvm);
3968 			if (r) {
3969 				mutex_lock(&kvm->slots_lock);
3970 				kvm_destroy_pic(vpic);
3971 				mutex_unlock(&kvm->slots_lock);
3972 				goto create_irqchip_unlock;
3973 			}
3974 		} else
3975 			goto create_irqchip_unlock;
3976 		r = kvm_setup_default_irq_routing(kvm);
3977 		if (r) {
3978 			mutex_lock(&kvm->slots_lock);
3979 			mutex_lock(&kvm->irq_lock);
3980 			kvm_ioapic_destroy(kvm);
3981 			kvm_destroy_pic(vpic);
3982 			mutex_unlock(&kvm->irq_lock);
3983 			mutex_unlock(&kvm->slots_lock);
3984 			goto create_irqchip_unlock;
3985 		}
3986 		/* Write kvm->irq_routing before kvm->arch.vpic.  */
3987 		smp_wmb();
3988 		kvm->arch.vpic = vpic;
3989 	create_irqchip_unlock:
3990 		mutex_unlock(&kvm->lock);
3991 		break;
3992 	}
3993 	case KVM_CREATE_PIT:
3994 		u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3995 		goto create_pit;
3996 	case KVM_CREATE_PIT2:
3997 		r = -EFAULT;
3998 		if (copy_from_user(&u.pit_config, argp,
3999 				   sizeof(struct kvm_pit_config)))
4000 			goto out;
4001 	create_pit:
4002 		mutex_lock(&kvm->lock);
4003 		r = -EEXIST;
4004 		if (kvm->arch.vpit)
4005 			goto create_pit_unlock;
4006 		r = -ENOMEM;
4007 		kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
4008 		if (kvm->arch.vpit)
4009 			r = 0;
4010 	create_pit_unlock:
4011 		mutex_unlock(&kvm->lock);
4012 		break;
4013 	case KVM_GET_IRQCHIP: {
4014 		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
4015 		struct kvm_irqchip *chip;
4016 
4017 		chip = memdup_user(argp, sizeof(*chip));
4018 		if (IS_ERR(chip)) {
4019 			r = PTR_ERR(chip);
4020 			goto out;
4021 		}
4022 
4023 		r = -ENXIO;
4024 		if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
4025 			goto get_irqchip_out;
4026 		r = kvm_vm_ioctl_get_irqchip(kvm, chip);
4027 		if (r)
4028 			goto get_irqchip_out;
4029 		r = -EFAULT;
4030 		if (copy_to_user(argp, chip, sizeof *chip))
4031 			goto get_irqchip_out;
4032 		r = 0;
4033 	get_irqchip_out:
4034 		kfree(chip);
4035 		break;
4036 	}
4037 	case KVM_SET_IRQCHIP: {
4038 		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
4039 		struct kvm_irqchip *chip;
4040 
4041 		chip = memdup_user(argp, sizeof(*chip));
4042 		if (IS_ERR(chip)) {
4043 			r = PTR_ERR(chip);
4044 			goto out;
4045 		}
4046 
4047 		r = -ENXIO;
4048 		if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
4049 			goto set_irqchip_out;
4050 		r = kvm_vm_ioctl_set_irqchip(kvm, chip);
4051 		if (r)
4052 			goto set_irqchip_out;
4053 		r = 0;
4054 	set_irqchip_out:
4055 		kfree(chip);
4056 		break;
4057 	}
4058 	case KVM_GET_PIT: {
4059 		r = -EFAULT;
4060 		if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
4061 			goto out;
4062 		r = -ENXIO;
4063 		if (!kvm->arch.vpit)
4064 			goto out;
4065 		r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
4066 		if (r)
4067 			goto out;
4068 		r = -EFAULT;
4069 		if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
4070 			goto out;
4071 		r = 0;
4072 		break;
4073 	}
4074 	case KVM_SET_PIT: {
4075 		r = -EFAULT;
4076 		if (copy_from_user(&u.ps, argp, sizeof u.ps))
4077 			goto out;
4078 		r = -ENXIO;
4079 		if (!kvm->arch.vpit)
4080 			goto out;
4081 		r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
4082 		break;
4083 	}
4084 	case KVM_GET_PIT2: {
4085 		r = -ENXIO;
4086 		if (!kvm->arch.vpit)
4087 			goto out;
4088 		r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
4089 		if (r)
4090 			goto out;
4091 		r = -EFAULT;
4092 		if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
4093 			goto out;
4094 		r = 0;
4095 		break;
4096 	}
4097 	case KVM_SET_PIT2: {
4098 		r = -EFAULT;
4099 		if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
4100 			goto out;
4101 		r = -ENXIO;
4102 		if (!kvm->arch.vpit)
4103 			goto out;
4104 		r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
4105 		break;
4106 	}
4107 	case KVM_REINJECT_CONTROL: {
4108 		struct kvm_reinject_control control;
4109 		r =  -EFAULT;
4110 		if (copy_from_user(&control, argp, sizeof(control)))
4111 			goto out;
4112 		r = kvm_vm_ioctl_reinject(kvm, &control);
4113 		break;
4114 	}
4115 	case KVM_SET_BOOT_CPU_ID:
4116 		r = 0;
4117 		mutex_lock(&kvm->lock);
4118 		if (kvm->created_vcpus)
4119 			r = -EBUSY;
4120 		else
4121 			kvm->arch.bsp_vcpu_id = arg;
4122 		mutex_unlock(&kvm->lock);
4123 		break;
4124 	case KVM_XEN_HVM_CONFIG: {
4125 		r = -EFAULT;
4126 		if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
4127 				   sizeof(struct kvm_xen_hvm_config)))
4128 			goto out;
4129 		r = -EINVAL;
4130 		if (kvm->arch.xen_hvm_config.flags)
4131 			goto out;
4132 		r = 0;
4133 		break;
4134 	}
4135 	case KVM_SET_CLOCK: {
4136 		struct kvm_clock_data user_ns;
4137 		u64 now_ns;
4138 
4139 		r = -EFAULT;
4140 		if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
4141 			goto out;
4142 
4143 		r = -EINVAL;
4144 		if (user_ns.flags)
4145 			goto out;
4146 
4147 		r = 0;
4148 		local_irq_disable();
4149 		now_ns = __get_kvmclock_ns(kvm);
4150 		kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
4151 		local_irq_enable();
4152 		kvm_gen_update_masterclock(kvm);
4153 		break;
4154 	}
4155 	case KVM_GET_CLOCK: {
4156 		struct kvm_clock_data user_ns;
4157 		u64 now_ns;
4158 
4159 		local_irq_disable();
4160 		now_ns = __get_kvmclock_ns(kvm);
4161 		user_ns.clock = now_ns;
4162 		user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
4163 		local_irq_enable();
4164 		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
4165 
4166 		r = -EFAULT;
4167 		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
4168 			goto out;
4169 		r = 0;
4170 		break;
4171 	}
4172 	case KVM_ENABLE_CAP: {
4173 		struct kvm_enable_cap cap;
4174 
4175 		r = -EFAULT;
4176 		if (copy_from_user(&cap, argp, sizeof(cap)))
4177 			goto out;
4178 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
4179 		break;
4180 	}
4181 	default:
4182 		r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
4183 	}
4184 out:
4185 	return r;
4186 }
4187 
4188 static void kvm_init_msr_list(void)
4189 {
4190 	u32 dummy[2];
4191 	unsigned i, j;
4192 
4193 	for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
4194 		if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
4195 			continue;
4196 
4197 		/*
4198 		 * Even MSRs that are valid in the host may not be exposed
4199 		 * to the guests in some cases.
4200 		 */
4201 		switch (msrs_to_save[i]) {
4202 		case MSR_IA32_BNDCFGS:
4203 			if (!kvm_x86_ops->mpx_supported())
4204 				continue;
4205 			break;
4206 		case MSR_TSC_AUX:
4207 			if (!kvm_x86_ops->rdtscp_supported())
4208 				continue;
4209 			break;
4210 		default:
4211 			break;
4212 		}
4213 
4214 		if (j < i)
4215 			msrs_to_save[j] = msrs_to_save[i];
4216 		j++;
4217 	}
4218 	num_msrs_to_save = j;
4219 
4220 	for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
4221 		switch (emulated_msrs[i]) {
4222 		case MSR_IA32_SMBASE:
4223 			if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
4224 				continue;
4225 			break;
4226 		default:
4227 			break;
4228 		}
4229 
4230 		if (j < i)
4231 			emulated_msrs[j] = emulated_msrs[i];
4232 		j++;
4233 	}
4234 	num_emulated_msrs = j;
4235 }
4236 
4237 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
4238 			   const void *v)
4239 {
4240 	int handled = 0;
4241 	int n;
4242 
4243 	do {
4244 		n = min(len, 8);
4245 		if (!(lapic_in_kernel(vcpu) &&
4246 		      !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
4247 		    && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
4248 			break;
4249 		handled += n;
4250 		addr += n;
4251 		len -= n;
4252 		v += n;
4253 	} while (len);
4254 
4255 	return handled;
4256 }
4257 
4258 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
4259 {
4260 	int handled = 0;
4261 	int n;
4262 
4263 	do {
4264 		n = min(len, 8);
4265 		if (!(lapic_in_kernel(vcpu) &&
4266 		      !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
4267 					 addr, n, v))
4268 		    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
4269 			break;
4270 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
4271 		handled += n;
4272 		addr += n;
4273 		len -= n;
4274 		v += n;
4275 	} while (len);
4276 
4277 	return handled;
4278 }
4279 
4280 static void kvm_set_segment(struct kvm_vcpu *vcpu,
4281 			struct kvm_segment *var, int seg)
4282 {
4283 	kvm_x86_ops->set_segment(vcpu, var, seg);
4284 }
4285 
4286 void kvm_get_segment(struct kvm_vcpu *vcpu,
4287 		     struct kvm_segment *var, int seg)
4288 {
4289 	kvm_x86_ops->get_segment(vcpu, var, seg);
4290 }
4291 
4292 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
4293 			   struct x86_exception *exception)
4294 {
4295 	gpa_t t_gpa;
4296 
4297 	BUG_ON(!mmu_is_nested(vcpu));
4298 
4299 	/* NPT walks are always user-walks */
4300 	access |= PFERR_USER_MASK;
4301 	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
4302 
4303 	return t_gpa;
4304 }
4305 
4306 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
4307 			      struct x86_exception *exception)
4308 {
4309 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4310 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4311 }
4312 
4313  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
4314 				struct x86_exception *exception)
4315 {
4316 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4317 	access |= PFERR_FETCH_MASK;
4318 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4319 }
4320 
4321 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
4322 			       struct x86_exception *exception)
4323 {
4324 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4325 	access |= PFERR_WRITE_MASK;
4326 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4327 }
4328 
4329 /* uses this to access any guest's mapped memory without checking CPL */
4330 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
4331 				struct x86_exception *exception)
4332 {
4333 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
4334 }
4335 
4336 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
4337 				      struct kvm_vcpu *vcpu, u32 access,
4338 				      struct x86_exception *exception)
4339 {
4340 	void *data = val;
4341 	int r = X86EMUL_CONTINUE;
4342 
4343 	while (bytes) {
4344 		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
4345 							    exception);
4346 		unsigned offset = addr & (PAGE_SIZE-1);
4347 		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
4348 		int ret;
4349 
4350 		if (gpa == UNMAPPED_GVA)
4351 			return X86EMUL_PROPAGATE_FAULT;
4352 		ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
4353 					       offset, toread);
4354 		if (ret < 0) {
4355 			r = X86EMUL_IO_NEEDED;
4356 			goto out;
4357 		}
4358 
4359 		bytes -= toread;
4360 		data += toread;
4361 		addr += toread;
4362 	}
4363 out:
4364 	return r;
4365 }
4366 
4367 /* used for instruction fetching */
4368 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
4369 				gva_t addr, void *val, unsigned int bytes,
4370 				struct x86_exception *exception)
4371 {
4372 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4373 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4374 	unsigned offset;
4375 	int ret;
4376 
4377 	/* Inline kvm_read_guest_virt_helper for speed.  */
4378 	gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
4379 						    exception);
4380 	if (unlikely(gpa == UNMAPPED_GVA))
4381 		return X86EMUL_PROPAGATE_FAULT;
4382 
4383 	offset = addr & (PAGE_SIZE-1);
4384 	if (WARN_ON(offset + bytes > PAGE_SIZE))
4385 		bytes = (unsigned)PAGE_SIZE - offset;
4386 	ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
4387 				       offset, bytes);
4388 	if (unlikely(ret < 0))
4389 		return X86EMUL_IO_NEEDED;
4390 
4391 	return X86EMUL_CONTINUE;
4392 }
4393 
4394 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
4395 			       gva_t addr, void *val, unsigned int bytes,
4396 			       struct x86_exception *exception)
4397 {
4398 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4399 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4400 
4401 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
4402 					  exception);
4403 }
4404 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
4405 
4406 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4407 				      gva_t addr, void *val, unsigned int bytes,
4408 				      struct x86_exception *exception)
4409 {
4410 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4411 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
4412 }
4413 
4414 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
4415 		unsigned long addr, void *val, unsigned int bytes)
4416 {
4417 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4418 	int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);
4419 
4420 	return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
4421 }
4422 
4423 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4424 				       gva_t addr, void *val,
4425 				       unsigned int bytes,
4426 				       struct x86_exception *exception)
4427 {
4428 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4429 	void *data = val;
4430 	int r = X86EMUL_CONTINUE;
4431 
4432 	while (bytes) {
4433 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
4434 							     PFERR_WRITE_MASK,
4435 							     exception);
4436 		unsigned offset = addr & (PAGE_SIZE-1);
4437 		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
4438 		int ret;
4439 
4440 		if (gpa == UNMAPPED_GVA)
4441 			return X86EMUL_PROPAGATE_FAULT;
4442 		ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
4443 		if (ret < 0) {
4444 			r = X86EMUL_IO_NEEDED;
4445 			goto out;
4446 		}
4447 
4448 		bytes -= towrite;
4449 		data += towrite;
4450 		addr += towrite;
4451 	}
4452 out:
4453 	return r;
4454 }
4455 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
4456 
4457 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4458 				gpa_t *gpa, struct x86_exception *exception,
4459 				bool write)
4460 {
4461 	u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
4462 		| (write ? PFERR_WRITE_MASK : 0);
4463 
4464 	/*
4465 	 * currently PKRU is only applied to ept enabled guest so
4466 	 * there is no pkey in EPT page table for L1 guest or EPT
4467 	 * shadow page table for L2 guest.
4468 	 */
4469 	if (vcpu_match_mmio_gva(vcpu, gva)
4470 	    && !permission_fault(vcpu, vcpu->arch.walk_mmu,
4471 				 vcpu->arch.access, 0, access)) {
4472 		*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
4473 					(gva & (PAGE_SIZE - 1));
4474 		trace_vcpu_match_mmio(gva, *gpa, write, false);
4475 		return 1;
4476 	}
4477 
4478 	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4479 
4480 	if (*gpa == UNMAPPED_GVA)
4481 		return -1;
4482 
4483 	/* For APIC access vmexit */
4484 	if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4485 		return 1;
4486 
4487 	if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
4488 		trace_vcpu_match_mmio(gva, *gpa, write, true);
4489 		return 1;
4490 	}
4491 
4492 	return 0;
4493 }
4494 
4495 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4496 			const void *val, int bytes)
4497 {
4498 	int ret;
4499 
4500 	ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
4501 	if (ret < 0)
4502 		return 0;
4503 	kvm_page_track_write(vcpu, gpa, val, bytes);
4504 	return 1;
4505 }
4506 
4507 struct read_write_emulator_ops {
4508 	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
4509 				  int bytes);
4510 	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
4511 				  void *val, int bytes);
4512 	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4513 			       int bytes, void *val);
4514 	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4515 				    void *val, int bytes);
4516 	bool write;
4517 };
4518 
4519 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
4520 {
4521 	if (vcpu->mmio_read_completed) {
4522 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
4523 			       vcpu->mmio_fragments[0].gpa, *(u64 *)val);
4524 		vcpu->mmio_read_completed = 0;
4525 		return 1;
4526 	}
4527 
4528 	return 0;
4529 }
4530 
4531 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4532 			void *val, int bytes)
4533 {
4534 	return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
4535 }
4536 
4537 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4538 			 void *val, int bytes)
4539 {
4540 	return emulator_write_phys(vcpu, gpa, val, bytes);
4541 }
4542 
4543 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
4544 {
4545 	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
4546 	return vcpu_mmio_write(vcpu, gpa, bytes, val);
4547 }
4548 
4549 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4550 			  void *val, int bytes)
4551 {
4552 	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
4553 	return X86EMUL_IO_NEEDED;
4554 }
4555 
4556 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4557 			   void *val, int bytes)
4558 {
4559 	struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
4560 
4561 	memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
4562 	return X86EMUL_CONTINUE;
4563 }
4564 
4565 static const struct read_write_emulator_ops read_emultor = {
4566 	.read_write_prepare = read_prepare,
4567 	.read_write_emulate = read_emulate,
4568 	.read_write_mmio = vcpu_mmio_read,
4569 	.read_write_exit_mmio = read_exit_mmio,
4570 };
4571 
4572 static const struct read_write_emulator_ops write_emultor = {
4573 	.read_write_emulate = write_emulate,
4574 	.read_write_mmio = write_mmio,
4575 	.read_write_exit_mmio = write_exit_mmio,
4576 	.write = true,
4577 };
4578 
4579 static int emulator_read_write_onepage(unsigned long addr, void *val,
4580 				       unsigned int bytes,
4581 				       struct x86_exception *exception,
4582 				       struct kvm_vcpu *vcpu,
4583 				       const struct read_write_emulator_ops *ops)
4584 {
4585 	gpa_t gpa;
4586 	int handled, ret;
4587 	bool write = ops->write;
4588 	struct kvm_mmio_fragment *frag;
4589 
4590 	ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
4591 
4592 	if (ret < 0)
4593 		return X86EMUL_PROPAGATE_FAULT;
4594 
4595 	/* For APIC access vmexit */
4596 	if (ret)
4597 		goto mmio;
4598 
4599 	if (ops->read_write_emulate(vcpu, gpa, val, bytes))
4600 		return X86EMUL_CONTINUE;
4601 
4602 mmio:
4603 	/*
4604 	 * Is this MMIO handled locally?
4605 	 */
4606 	handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
4607 	if (handled == bytes)
4608 		return X86EMUL_CONTINUE;
4609 
4610 	gpa += handled;
4611 	bytes -= handled;
4612 	val += handled;
4613 
4614 	WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
4615 	frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
4616 	frag->gpa = gpa;
4617 	frag->data = val;
4618 	frag->len = bytes;
4619 	return X86EMUL_CONTINUE;
4620 }
4621 
4622 static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
4623 			unsigned long addr,
4624 			void *val, unsigned int bytes,
4625 			struct x86_exception *exception,
4626 			const struct read_write_emulator_ops *ops)
4627 {
4628 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4629 	gpa_t gpa;
4630 	int rc;
4631 
4632 	if (ops->read_write_prepare &&
4633 		  ops->read_write_prepare(vcpu, val, bytes))
4634 		return X86EMUL_CONTINUE;
4635 
4636 	vcpu->mmio_nr_fragments = 0;
4637 
4638 	/* Crossing a page boundary? */
4639 	if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
4640 		int now;
4641 
4642 		now = -addr & ~PAGE_MASK;
4643 		rc = emulator_read_write_onepage(addr, val, now, exception,
4644 						 vcpu, ops);
4645 
4646 		if (rc != X86EMUL_CONTINUE)
4647 			return rc;
4648 		addr += now;
4649 		if (ctxt->mode != X86EMUL_MODE_PROT64)
4650 			addr = (u32)addr;
4651 		val += now;
4652 		bytes -= now;
4653 	}
4654 
4655 	rc = emulator_read_write_onepage(addr, val, bytes, exception,
4656 					 vcpu, ops);
4657 	if (rc != X86EMUL_CONTINUE)
4658 		return rc;
4659 
4660 	if (!vcpu->mmio_nr_fragments)
4661 		return rc;
4662 
4663 	gpa = vcpu->mmio_fragments[0].gpa;
4664 
4665 	vcpu->mmio_needed = 1;
4666 	vcpu->mmio_cur_fragment = 0;
4667 
4668 	vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
4669 	vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
4670 	vcpu->run->exit_reason = KVM_EXIT_MMIO;
4671 	vcpu->run->mmio.phys_addr = gpa;
4672 
4673 	return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
4674 }
4675 
4676 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
4677 				  unsigned long addr,
4678 				  void *val,
4679 				  unsigned int bytes,
4680 				  struct x86_exception *exception)
4681 {
4682 	return emulator_read_write(ctxt, addr, val, bytes,
4683 				   exception, &read_emultor);
4684 }
4685 
4686 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
4687 			    unsigned long addr,
4688 			    const void *val,
4689 			    unsigned int bytes,
4690 			    struct x86_exception *exception)
4691 {
4692 	return emulator_read_write(ctxt, addr, (void *)val, bytes,
4693 				   exception, &write_emultor);
4694 }
4695 
4696 #define CMPXCHG_TYPE(t, ptr, old, new) \
4697 	(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
4698 
4699 #ifdef CONFIG_X86_64
4700 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
4701 #else
4702 #  define CMPXCHG64(ptr, old, new) \
4703 	(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
4704 #endif
4705 
4706 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
4707 				     unsigned long addr,
4708 				     const void *old,
4709 				     const void *new,
4710 				     unsigned int bytes,
4711 				     struct x86_exception *exception)
4712 {
4713 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4714 	gpa_t gpa;
4715 	struct page *page;
4716 	char *kaddr;
4717 	bool exchanged;
4718 
4719 	/* guests cmpxchg8b have to be emulated atomically */
4720 	if (bytes > 8 || (bytes & (bytes - 1)))
4721 		goto emul_write;
4722 
4723 	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
4724 
4725 	if (gpa == UNMAPPED_GVA ||
4726 	    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4727 		goto emul_write;
4728 
4729 	if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
4730 		goto emul_write;
4731 
4732 	page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
4733 	if (is_error_page(page))
4734 		goto emul_write;
4735 
4736 	kaddr = kmap_atomic(page);
4737 	kaddr += offset_in_page(gpa);
4738 	switch (bytes) {
4739 	case 1:
4740 		exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
4741 		break;
4742 	case 2:
4743 		exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
4744 		break;
4745 	case 4:
4746 		exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
4747 		break;
4748 	case 8:
4749 		exchanged = CMPXCHG64(kaddr, old, new);
4750 		break;
4751 	default:
4752 		BUG();
4753 	}
4754 	kunmap_atomic(kaddr);
4755 	kvm_release_page_dirty(page);
4756 
4757 	if (!exchanged)
4758 		return X86EMUL_CMPXCHG_FAILED;
4759 
4760 	kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
4761 	kvm_page_track_write(vcpu, gpa, new, bytes);
4762 
4763 	return X86EMUL_CONTINUE;
4764 
4765 emul_write:
4766 	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
4767 
4768 	return emulator_write_emulated(ctxt, addr, new, bytes, exception);
4769 }
4770 
4771 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
4772 {
4773 	/* TODO: String I/O for in kernel device */
4774 	int r;
4775 
4776 	if (vcpu->arch.pio.in)
4777 		r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
4778 				    vcpu->arch.pio.size, pd);
4779 	else
4780 		r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
4781 				     vcpu->arch.pio.port, vcpu->arch.pio.size,
4782 				     pd);
4783 	return r;
4784 }
4785 
4786 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
4787 			       unsigned short port, void *val,
4788 			       unsigned int count, bool in)
4789 {
4790 	vcpu->arch.pio.port = port;
4791 	vcpu->arch.pio.in = in;
4792 	vcpu->arch.pio.count  = count;
4793 	vcpu->arch.pio.size = size;
4794 
4795 	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
4796 		vcpu->arch.pio.count = 0;
4797 		return 1;
4798 	}
4799 
4800 	vcpu->run->exit_reason = KVM_EXIT_IO;
4801 	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
4802 	vcpu->run->io.size = size;
4803 	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
4804 	vcpu->run->io.count = count;
4805 	vcpu->run->io.port = port;
4806 
4807 	return 0;
4808 }
4809 
4810 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
4811 				    int size, unsigned short port, void *val,
4812 				    unsigned int count)
4813 {
4814 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4815 	int ret;
4816 
4817 	if (vcpu->arch.pio.count)
4818 		goto data_avail;
4819 
4820 	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
4821 	if (ret) {
4822 data_avail:
4823 		memcpy(val, vcpu->arch.pio_data, size * count);
4824 		trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
4825 		vcpu->arch.pio.count = 0;
4826 		return 1;
4827 	}
4828 
4829 	return 0;
4830 }
4831 
4832 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
4833 				     int size, unsigned short port,
4834 				     const void *val, unsigned int count)
4835 {
4836 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4837 
4838 	memcpy(vcpu->arch.pio_data, val, size * count);
4839 	trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
4840 	return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
4841 }
4842 
4843 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
4844 {
4845 	return kvm_x86_ops->get_segment_base(vcpu, seg);
4846 }
4847 
4848 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
4849 {
4850 	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
4851 }
4852 
4853 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
4854 {
4855 	if (!need_emulate_wbinvd(vcpu))
4856 		return X86EMUL_CONTINUE;
4857 
4858 	if (kvm_x86_ops->has_wbinvd_exit()) {
4859 		int cpu = get_cpu();
4860 
4861 		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4862 		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
4863 				wbinvd_ipi, NULL, 1);
4864 		put_cpu();
4865 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
4866 	} else
4867 		wbinvd();
4868 	return X86EMUL_CONTINUE;
4869 }
4870 
4871 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
4872 {
4873 	kvm_emulate_wbinvd_noskip(vcpu);
4874 	return kvm_skip_emulated_instruction(vcpu);
4875 }
4876 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
4877 
4878 
4879 
4880 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
4881 {
4882 	kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
4883 }
4884 
4885 static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
4886 			   unsigned long *dest)
4887 {
4888 	return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
4889 }
4890 
4891 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
4892 			   unsigned long value)
4893 {
4894 
4895 	return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
4896 }
4897 
4898 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
4899 {
4900 	return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
4901 }
4902 
4903 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
4904 {
4905 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4906 	unsigned long value;
4907 
4908 	switch (cr) {
4909 	case 0:
4910 		value = kvm_read_cr0(vcpu);
4911 		break;
4912 	case 2:
4913 		value = vcpu->arch.cr2;
4914 		break;
4915 	case 3:
4916 		value = kvm_read_cr3(vcpu);
4917 		break;
4918 	case 4:
4919 		value = kvm_read_cr4(vcpu);
4920 		break;
4921 	case 8:
4922 		value = kvm_get_cr8(vcpu);
4923 		break;
4924 	default:
4925 		kvm_err("%s: unexpected cr %u\n", __func__, cr);
4926 		return 0;
4927 	}
4928 
4929 	return value;
4930 }
4931 
4932 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
4933 {
4934 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4935 	int res = 0;
4936 
4937 	switch (cr) {
4938 	case 0:
4939 		res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
4940 		break;
4941 	case 2:
4942 		vcpu->arch.cr2 = val;
4943 		break;
4944 	case 3:
4945 		res = kvm_set_cr3(vcpu, val);
4946 		break;
4947 	case 4:
4948 		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
4949 		break;
4950 	case 8:
4951 		res = kvm_set_cr8(vcpu, val);
4952 		break;
4953 	default:
4954 		kvm_err("%s: unexpected cr %u\n", __func__, cr);
4955 		res = -1;
4956 	}
4957 
4958 	return res;
4959 }
4960 
4961 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
4962 {
4963 	return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
4964 }
4965 
4966 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4967 {
4968 	kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
4969 }
4970 
4971 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4972 {
4973 	kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
4974 }
4975 
4976 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4977 {
4978 	kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
4979 }
4980 
4981 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4982 {
4983 	kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
4984 }
4985 
4986 static unsigned long emulator_get_cached_segment_base(
4987 	struct x86_emulate_ctxt *ctxt, int seg)
4988 {
4989 	return get_segment_base(emul_to_vcpu(ctxt), seg);
4990 }
4991 
4992 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
4993 				 struct desc_struct *desc, u32 *base3,
4994 				 int seg)
4995 {
4996 	struct kvm_segment var;
4997 
4998 	kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
4999 	*selector = var.selector;
5000 
5001 	if (var.unusable) {
5002 		memset(desc, 0, sizeof(*desc));
5003 		return false;
5004 	}
5005 
5006 	if (var.g)
5007 		var.limit >>= 12;
5008 	set_desc_limit(desc, var.limit);
5009 	set_desc_base(desc, (unsigned long)var.base);
5010 #ifdef CONFIG_X86_64
5011 	if (base3)
5012 		*base3 = var.base >> 32;
5013 #endif
5014 	desc->type = var.type;
5015 	desc->s = var.s;
5016 	desc->dpl = var.dpl;
5017 	desc->p = var.present;
5018 	desc->avl = var.avl;
5019 	desc->l = var.l;
5020 	desc->d = var.db;
5021 	desc->g = var.g;
5022 
5023 	return true;
5024 }
5025 
5026 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
5027 				 struct desc_struct *desc, u32 base3,
5028 				 int seg)
5029 {
5030 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5031 	struct kvm_segment var;
5032 
5033 	var.selector = selector;
5034 	var.base = get_desc_base(desc);
5035 #ifdef CONFIG_X86_64
5036 	var.base |= ((u64)base3) << 32;
5037 #endif
5038 	var.limit = get_desc_limit(desc);
5039 	if (desc->g)
5040 		var.limit = (var.limit << 12) | 0xfff;
5041 	var.type = desc->type;
5042 	var.dpl = desc->dpl;
5043 	var.db = desc->d;
5044 	var.s = desc->s;
5045 	var.l = desc->l;
5046 	var.g = desc->g;
5047 	var.avl = desc->avl;
5048 	var.present = desc->p;
5049 	var.unusable = !var.present;
5050 	var.padding = 0;
5051 
5052 	kvm_set_segment(vcpu, &var, seg);
5053 	return;
5054 }
5055 
5056 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
5057 			    u32 msr_index, u64 *pdata)
5058 {
5059 	struct msr_data msr;
5060 	int r;
5061 
5062 	msr.index = msr_index;
5063 	msr.host_initiated = false;
5064 	r = kvm_get_msr(emul_to_vcpu(ctxt), &msr);
5065 	if (r)
5066 		return r;
5067 
5068 	*pdata = msr.data;
5069 	return 0;
5070 }
5071 
5072 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
5073 			    u32 msr_index, u64 data)
5074 {
5075 	struct msr_data msr;
5076 
5077 	msr.data = data;
5078 	msr.index = msr_index;
5079 	msr.host_initiated = false;
5080 	return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
5081 }
5082 
5083 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
5084 {
5085 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5086 
5087 	return vcpu->arch.smbase;
5088 }
5089 
5090 static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
5091 {
5092 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5093 
5094 	vcpu->arch.smbase = smbase;
5095 }
5096 
5097 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
5098 			      u32 pmc)
5099 {
5100 	return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc);
5101 }
5102 
5103 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
5104 			     u32 pmc, u64 *pdata)
5105 {
5106 	return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
5107 }
5108 
5109 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
5110 {
5111 	emul_to_vcpu(ctxt)->arch.halt_request = 1;
5112 }
5113 
5114 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
5115 {
5116 	preempt_disable();
5117 	kvm_load_guest_fpu(emul_to_vcpu(ctxt));
5118 }
5119 
5120 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
5121 {
5122 	preempt_enable();
5123 }
5124 
5125 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
5126 			      struct x86_instruction_info *info,
5127 			      enum x86_intercept_stage stage)
5128 {
5129 	return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
5130 }
5131 
5132 static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
5133 			       u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
5134 {
5135 	kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
5136 }
5137 
5138 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
5139 {
5140 	return kvm_register_read(emul_to_vcpu(ctxt), reg);
5141 }
5142 
5143 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
5144 {
5145 	kvm_register_write(emul_to_vcpu(ctxt), reg, val);
5146 }
5147 
5148 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
5149 {
5150 	kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
5151 }
5152 
5153 static const struct x86_emulate_ops emulate_ops = {
5154 	.read_gpr            = emulator_read_gpr,
5155 	.write_gpr           = emulator_write_gpr,
5156 	.read_std            = kvm_read_guest_virt_system,
5157 	.write_std           = kvm_write_guest_virt_system,
5158 	.read_phys           = kvm_read_guest_phys_system,
5159 	.fetch               = kvm_fetch_guest_virt,
5160 	.read_emulated       = emulator_read_emulated,
5161 	.write_emulated      = emulator_write_emulated,
5162 	.cmpxchg_emulated    = emulator_cmpxchg_emulated,
5163 	.invlpg              = emulator_invlpg,
5164 	.pio_in_emulated     = emulator_pio_in_emulated,
5165 	.pio_out_emulated    = emulator_pio_out_emulated,
5166 	.get_segment         = emulator_get_segment,
5167 	.set_segment         = emulator_set_segment,
5168 	.get_cached_segment_base = emulator_get_cached_segment_base,
5169 	.get_gdt             = emulator_get_gdt,
5170 	.get_idt	     = emulator_get_idt,
5171 	.set_gdt             = emulator_set_gdt,
5172 	.set_idt	     = emulator_set_idt,
5173 	.get_cr              = emulator_get_cr,
5174 	.set_cr              = emulator_set_cr,
5175 	.cpl                 = emulator_get_cpl,
5176 	.get_dr              = emulator_get_dr,
5177 	.set_dr              = emulator_set_dr,
5178 	.get_smbase          = emulator_get_smbase,
5179 	.set_smbase          = emulator_set_smbase,
5180 	.set_msr             = emulator_set_msr,
5181 	.get_msr             = emulator_get_msr,
5182 	.check_pmc	     = emulator_check_pmc,
5183 	.read_pmc            = emulator_read_pmc,
5184 	.halt                = emulator_halt,
5185 	.wbinvd              = emulator_wbinvd,
5186 	.fix_hypercall       = emulator_fix_hypercall,
5187 	.get_fpu             = emulator_get_fpu,
5188 	.put_fpu             = emulator_put_fpu,
5189 	.intercept           = emulator_intercept,
5190 	.get_cpuid           = emulator_get_cpuid,
5191 	.set_nmi_mask        = emulator_set_nmi_mask,
5192 };
5193 
5194 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
5195 {
5196 	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
5197 	/*
5198 	 * an sti; sti; sequence only disable interrupts for the first
5199 	 * instruction. So, if the last instruction, be it emulated or
5200 	 * not, left the system with the INT_STI flag enabled, it
5201 	 * means that the last instruction is an sti. We should not
5202 	 * leave the flag on in this case. The same goes for mov ss
5203 	 */
5204 	if (int_shadow & mask)
5205 		mask = 0;
5206 	if (unlikely(int_shadow || mask)) {
5207 		kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
5208 		if (!mask)
5209 			kvm_make_request(KVM_REQ_EVENT, vcpu);
5210 	}
5211 }
5212 
5213 static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
5214 {
5215 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5216 	if (ctxt->exception.vector == PF_VECTOR)
5217 		return kvm_propagate_fault(vcpu, &ctxt->exception);
5218 
5219 	if (ctxt->exception.error_code_valid)
5220 		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
5221 				      ctxt->exception.error_code);
5222 	else
5223 		kvm_queue_exception(vcpu, ctxt->exception.vector);
5224 	return false;
5225 }
5226 
5227 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
5228 {
5229 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5230 	int cs_db, cs_l;
5231 
5232 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5233 
5234 	ctxt->eflags = kvm_get_rflags(vcpu);
5235 	ctxt->eip = kvm_rip_read(vcpu);
5236 	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
5237 		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
5238 		     (cs_l && is_long_mode(vcpu))	? X86EMUL_MODE_PROT64 :
5239 		     cs_db				? X86EMUL_MODE_PROT32 :
5240 							  X86EMUL_MODE_PROT16;
5241 	BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
5242 	BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
5243 	BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
5244 	ctxt->emul_flags = vcpu->arch.hflags;
5245 
5246 	init_decode_cache(ctxt);
5247 	vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
5248 }
5249 
5250 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
5251 {
5252 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5253 	int ret;
5254 
5255 	init_emulate_ctxt(vcpu);
5256 
5257 	ctxt->op_bytes = 2;
5258 	ctxt->ad_bytes = 2;
5259 	ctxt->_eip = ctxt->eip + inc_eip;
5260 	ret = emulate_int_real(ctxt, irq);
5261 
5262 	if (ret != X86EMUL_CONTINUE)
5263 		return EMULATE_FAIL;
5264 
5265 	ctxt->eip = ctxt->_eip;
5266 	kvm_rip_write(vcpu, ctxt->eip);
5267 	kvm_set_rflags(vcpu, ctxt->eflags);
5268 
5269 	if (irq == NMI_VECTOR)
5270 		vcpu->arch.nmi_pending = 0;
5271 	else
5272 		vcpu->arch.interrupt.pending = false;
5273 
5274 	return EMULATE_DONE;
5275 }
5276 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
5277 
5278 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
5279 {
5280 	int r = EMULATE_DONE;
5281 
5282 	++vcpu->stat.insn_emulation_fail;
5283 	trace_kvm_emulate_insn_failed(vcpu);
5284 	if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
5285 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5286 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5287 		vcpu->run->internal.ndata = 0;
5288 		r = EMULATE_FAIL;
5289 	}
5290 	kvm_queue_exception(vcpu, UD_VECTOR);
5291 
5292 	return r;
5293 }
5294 
5295 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
5296 				  bool write_fault_to_shadow_pgtable,
5297 				  int emulation_type)
5298 {
5299 	gpa_t gpa = cr2;
5300 	kvm_pfn_t pfn;
5301 
5302 	if (emulation_type & EMULTYPE_NO_REEXECUTE)
5303 		return false;
5304 
5305 	if (!vcpu->arch.mmu.direct_map) {
5306 		/*
5307 		 * Write permission should be allowed since only
5308 		 * write access need to be emulated.
5309 		 */
5310 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
5311 
5312 		/*
5313 		 * If the mapping is invalid in guest, let cpu retry
5314 		 * it to generate fault.
5315 		 */
5316 		if (gpa == UNMAPPED_GVA)
5317 			return true;
5318 	}
5319 
5320 	/*
5321 	 * Do not retry the unhandleable instruction if it faults on the
5322 	 * readonly host memory, otherwise it will goto a infinite loop:
5323 	 * retry instruction -> write #PF -> emulation fail -> retry
5324 	 * instruction -> ...
5325 	 */
5326 	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
5327 
5328 	/*
5329 	 * If the instruction failed on the error pfn, it can not be fixed,
5330 	 * report the error to userspace.
5331 	 */
5332 	if (is_error_noslot_pfn(pfn))
5333 		return false;
5334 
5335 	kvm_release_pfn_clean(pfn);
5336 
5337 	/* The instructions are well-emulated on direct mmu. */
5338 	if (vcpu->arch.mmu.direct_map) {
5339 		unsigned int indirect_shadow_pages;
5340 
5341 		spin_lock(&vcpu->kvm->mmu_lock);
5342 		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
5343 		spin_unlock(&vcpu->kvm->mmu_lock);
5344 
5345 		if (indirect_shadow_pages)
5346 			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5347 
5348 		return true;
5349 	}
5350 
5351 	/*
5352 	 * if emulation was due to access to shadowed page table
5353 	 * and it failed try to unshadow page and re-enter the
5354 	 * guest to let CPU execute the instruction.
5355 	 */
5356 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5357 
5358 	/*
5359 	 * If the access faults on its page table, it can not
5360 	 * be fixed by unprotecting shadow page and it should
5361 	 * be reported to userspace.
5362 	 */
5363 	return !write_fault_to_shadow_pgtable;
5364 }
5365 
5366 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
5367 			      unsigned long cr2,  int emulation_type)
5368 {
5369 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5370 	unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
5371 
5372 	last_retry_eip = vcpu->arch.last_retry_eip;
5373 	last_retry_addr = vcpu->arch.last_retry_addr;
5374 
5375 	/*
5376 	 * If the emulation is caused by #PF and it is non-page_table
5377 	 * writing instruction, it means the VM-EXIT is caused by shadow
5378 	 * page protected, we can zap the shadow page and retry this
5379 	 * instruction directly.
5380 	 *
5381 	 * Note: if the guest uses a non-page-table modifying instruction
5382 	 * on the PDE that points to the instruction, then we will unmap
5383 	 * the instruction and go to an infinite loop. So, we cache the
5384 	 * last retried eip and the last fault address, if we meet the eip
5385 	 * and the address again, we can break out of the potential infinite
5386 	 * loop.
5387 	 */
5388 	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
5389 
5390 	if (!(emulation_type & EMULTYPE_RETRY))
5391 		return false;
5392 
5393 	if (x86_page_table_writing_insn(ctxt))
5394 		return false;
5395 
5396 	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
5397 		return false;
5398 
5399 	vcpu->arch.last_retry_eip = ctxt->eip;
5400 	vcpu->arch.last_retry_addr = cr2;
5401 
5402 	if (!vcpu->arch.mmu.direct_map)
5403 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
5404 
5405 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5406 
5407 	return true;
5408 }
5409 
5410 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
5411 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
5412 
5413 static void kvm_smm_changed(struct kvm_vcpu *vcpu)
5414 {
5415 	if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
5416 		/* This is a good place to trace that we are exiting SMM.  */
5417 		trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
5418 
5419 		/* Process a latched INIT or SMI, if any.  */
5420 		kvm_make_request(KVM_REQ_EVENT, vcpu);
5421 	}
5422 
5423 	kvm_mmu_reset_context(vcpu);
5424 }
5425 
5426 static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
5427 {
5428 	unsigned changed = vcpu->arch.hflags ^ emul_flags;
5429 
5430 	vcpu->arch.hflags = emul_flags;
5431 
5432 	if (changed & HF_SMM_MASK)
5433 		kvm_smm_changed(vcpu);
5434 }
5435 
5436 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
5437 				unsigned long *db)
5438 {
5439 	u32 dr6 = 0;
5440 	int i;
5441 	u32 enable, rwlen;
5442 
5443 	enable = dr7;
5444 	rwlen = dr7 >> 16;
5445 	for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
5446 		if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
5447 			dr6 |= (1 << i);
5448 	return dr6;
5449 }
5450 
5451 static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
5452 {
5453 	struct kvm_run *kvm_run = vcpu->run;
5454 
5455 	/*
5456 	 * rflags is the old, "raw" value of the flags.  The new value has
5457 	 * not been saved yet.
5458 	 *
5459 	 * This is correct even for TF set by the guest, because "the
5460 	 * processor will not generate this exception after the instruction
5461 	 * that sets the TF flag".
5462 	 */
5463 	if (unlikely(rflags & X86_EFLAGS_TF)) {
5464 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5465 			kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
5466 						  DR6_RTM;
5467 			kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
5468 			kvm_run->debug.arch.exception = DB_VECTOR;
5469 			kvm_run->exit_reason = KVM_EXIT_DEBUG;
5470 			*r = EMULATE_USER_EXIT;
5471 		} else {
5472 			/*
5473 			 * "Certain debug exceptions may clear bit 0-3.  The
5474 			 * remaining contents of the DR6 register are never
5475 			 * cleared by the processor".
5476 			 */
5477 			vcpu->arch.dr6 &= ~15;
5478 			vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
5479 			kvm_queue_exception(vcpu, DB_VECTOR);
5480 		}
5481 	}
5482 }
5483 
5484 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
5485 {
5486 	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
5487 	int r = EMULATE_DONE;
5488 
5489 	kvm_x86_ops->skip_emulated_instruction(vcpu);
5490 	kvm_vcpu_check_singlestep(vcpu, rflags, &r);
5491 	return r == EMULATE_DONE;
5492 }
5493 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
5494 
5495 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
5496 {
5497 	if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
5498 	    (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
5499 		struct kvm_run *kvm_run = vcpu->run;
5500 		unsigned long eip = kvm_get_linear_rip(vcpu);
5501 		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5502 					   vcpu->arch.guest_debug_dr7,
5503 					   vcpu->arch.eff_db);
5504 
5505 		if (dr6 != 0) {
5506 			kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
5507 			kvm_run->debug.arch.pc = eip;
5508 			kvm_run->debug.arch.exception = DB_VECTOR;
5509 			kvm_run->exit_reason = KVM_EXIT_DEBUG;
5510 			*r = EMULATE_USER_EXIT;
5511 			return true;
5512 		}
5513 	}
5514 
5515 	if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
5516 	    !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
5517 		unsigned long eip = kvm_get_linear_rip(vcpu);
5518 		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5519 					   vcpu->arch.dr7,
5520 					   vcpu->arch.db);
5521 
5522 		if (dr6 != 0) {
5523 			vcpu->arch.dr6 &= ~15;
5524 			vcpu->arch.dr6 |= dr6 | DR6_RTM;
5525 			kvm_queue_exception(vcpu, DB_VECTOR);
5526 			*r = EMULATE_DONE;
5527 			return true;
5528 		}
5529 	}
5530 
5531 	return false;
5532 }
5533 
5534 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
5535 			    unsigned long cr2,
5536 			    int emulation_type,
5537 			    void *insn,
5538 			    int insn_len)
5539 {
5540 	int r;
5541 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5542 	bool writeback = true;
5543 	bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
5544 
5545 	/*
5546 	 * Clear write_fault_to_shadow_pgtable here to ensure it is
5547 	 * never reused.
5548 	 */
5549 	vcpu->arch.write_fault_to_shadow_pgtable = false;
5550 	kvm_clear_exception_queue(vcpu);
5551 
5552 	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
5553 		init_emulate_ctxt(vcpu);
5554 
5555 		/*
5556 		 * We will reenter on the same instruction since
5557 		 * we do not set complete_userspace_io.  This does not
5558 		 * handle watchpoints yet, those would be handled in
5559 		 * the emulate_ops.
5560 		 */
5561 		if (kvm_vcpu_check_breakpoint(vcpu, &r))
5562 			return r;
5563 
5564 		ctxt->interruptibility = 0;
5565 		ctxt->have_exception = false;
5566 		ctxt->exception.vector = -1;
5567 		ctxt->perm_ok = false;
5568 
5569 		ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
5570 
5571 		r = x86_decode_insn(ctxt, insn, insn_len);
5572 
5573 		trace_kvm_emulate_insn_start(vcpu);
5574 		++vcpu->stat.insn_emulation;
5575 		if (r != EMULATION_OK)  {
5576 			if (emulation_type & EMULTYPE_TRAP_UD)
5577 				return EMULATE_FAIL;
5578 			if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
5579 						emulation_type))
5580 				return EMULATE_DONE;
5581 			if (emulation_type & EMULTYPE_SKIP)
5582 				return EMULATE_FAIL;
5583 			return handle_emulation_failure(vcpu);
5584 		}
5585 	}
5586 
5587 	if (emulation_type & EMULTYPE_SKIP) {
5588 		kvm_rip_write(vcpu, ctxt->_eip);
5589 		if (ctxt->eflags & X86_EFLAGS_RF)
5590 			kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
5591 		return EMULATE_DONE;
5592 	}
5593 
5594 	if (retry_instruction(ctxt, cr2, emulation_type))
5595 		return EMULATE_DONE;
5596 
5597 	/* this is needed for vmware backdoor interface to work since it
5598 	   changes registers values  during IO operation */
5599 	if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
5600 		vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
5601 		emulator_invalidate_register_cache(ctxt);
5602 	}
5603 
5604 restart:
5605 	r = x86_emulate_insn(ctxt);
5606 
5607 	if (r == EMULATION_INTERCEPTED)
5608 		return EMULATE_DONE;
5609 
5610 	if (r == EMULATION_FAILED) {
5611 		if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
5612 					emulation_type))
5613 			return EMULATE_DONE;
5614 
5615 		return handle_emulation_failure(vcpu);
5616 	}
5617 
5618 	if (ctxt->have_exception) {
5619 		r = EMULATE_DONE;
5620 		if (inject_emulated_exception(vcpu))
5621 			return r;
5622 	} else if (vcpu->arch.pio.count) {
5623 		if (!vcpu->arch.pio.in) {
5624 			/* FIXME: return into emulator if single-stepping.  */
5625 			vcpu->arch.pio.count = 0;
5626 		} else {
5627 			writeback = false;
5628 			vcpu->arch.complete_userspace_io = complete_emulated_pio;
5629 		}
5630 		r = EMULATE_USER_EXIT;
5631 	} else if (vcpu->mmio_needed) {
5632 		if (!vcpu->mmio_is_write)
5633 			writeback = false;
5634 		r = EMULATE_USER_EXIT;
5635 		vcpu->arch.complete_userspace_io = complete_emulated_mmio;
5636 	} else if (r == EMULATION_RESTART)
5637 		goto restart;
5638 	else
5639 		r = EMULATE_DONE;
5640 
5641 	if (writeback) {
5642 		unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
5643 		toggle_interruptibility(vcpu, ctxt->interruptibility);
5644 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5645 		if (vcpu->arch.hflags != ctxt->emul_flags)
5646 			kvm_set_hflags(vcpu, ctxt->emul_flags);
5647 		kvm_rip_write(vcpu, ctxt->eip);
5648 		if (r == EMULATE_DONE)
5649 			kvm_vcpu_check_singlestep(vcpu, rflags, &r);
5650 		if (!ctxt->have_exception ||
5651 		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
5652 			__kvm_set_rflags(vcpu, ctxt->eflags);
5653 
5654 		/*
5655 		 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
5656 		 * do nothing, and it will be requested again as soon as
5657 		 * the shadow expires.  But we still need to check here,
5658 		 * because POPF has no interrupt shadow.
5659 		 */
5660 		if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
5661 			kvm_make_request(KVM_REQ_EVENT, vcpu);
5662 	} else
5663 		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
5664 
5665 	return r;
5666 }
5667 EXPORT_SYMBOL_GPL(x86_emulate_instruction);
5668 
5669 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
5670 {
5671 	unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
5672 	int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
5673 					    size, port, &val, 1);
5674 	/* do not return to emulator after return from userspace */
5675 	vcpu->arch.pio.count = 0;
5676 	return ret;
5677 }
5678 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
5679 
5680 static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
5681 {
5682 	unsigned long val;
5683 
5684 	/* We should only ever be called with arch.pio.count equal to 1 */
5685 	BUG_ON(vcpu->arch.pio.count != 1);
5686 
5687 	/* For size less than 4 we merge, else we zero extend */
5688 	val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
5689 					: 0;
5690 
5691 	/*
5692 	 * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
5693 	 * the copy and tracing
5694 	 */
5695 	emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
5696 				 vcpu->arch.pio.port, &val, 1);
5697 	kvm_register_write(vcpu, VCPU_REGS_RAX, val);
5698 
5699 	return 1;
5700 }
5701 
5702 int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port)
5703 {
5704 	unsigned long val;
5705 	int ret;
5706 
5707 	/* For size less than 4 we merge, else we zero extend */
5708 	val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;
5709 
5710 	ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
5711 				       &val, 1);
5712 	if (ret) {
5713 		kvm_register_write(vcpu, VCPU_REGS_RAX, val);
5714 		return ret;
5715 	}
5716 
5717 	vcpu->arch.complete_userspace_io = complete_fast_pio_in;
5718 
5719 	return 0;
5720 }
5721 EXPORT_SYMBOL_GPL(kvm_fast_pio_in);
5722 
5723 static int kvmclock_cpu_down_prep(unsigned int cpu)
5724 {
5725 	__this_cpu_write(cpu_tsc_khz, 0);
5726 	return 0;
5727 }
5728 
5729 static void tsc_khz_changed(void *data)
5730 {
5731 	struct cpufreq_freqs *freq = data;
5732 	unsigned long khz = 0;
5733 
5734 	if (data)
5735 		khz = freq->new;
5736 	else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
5737 		khz = cpufreq_quick_get(raw_smp_processor_id());
5738 	if (!khz)
5739 		khz = tsc_khz;
5740 	__this_cpu_write(cpu_tsc_khz, khz);
5741 }
5742 
5743 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
5744 				     void *data)
5745 {
5746 	struct cpufreq_freqs *freq = data;
5747 	struct kvm *kvm;
5748 	struct kvm_vcpu *vcpu;
5749 	int i, send_ipi = 0;
5750 
5751 	/*
5752 	 * We allow guests to temporarily run on slowing clocks,
5753 	 * provided we notify them after, or to run on accelerating
5754 	 * clocks, provided we notify them before.  Thus time never
5755 	 * goes backwards.
5756 	 *
5757 	 * However, we have a problem.  We can't atomically update
5758 	 * the frequency of a given CPU from this function; it is
5759 	 * merely a notifier, which can be called from any CPU.
5760 	 * Changing the TSC frequency at arbitrary points in time
5761 	 * requires a recomputation of local variables related to
5762 	 * the TSC for each VCPU.  We must flag these local variables
5763 	 * to be updated and be sure the update takes place with the
5764 	 * new frequency before any guests proceed.
5765 	 *
5766 	 * Unfortunately, the combination of hotplug CPU and frequency
5767 	 * change creates an intractable locking scenario; the order
5768 	 * of when these callouts happen is undefined with respect to
5769 	 * CPU hotplug, and they can race with each other.  As such,
5770 	 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
5771 	 * undefined; you can actually have a CPU frequency change take
5772 	 * place in between the computation of X and the setting of the
5773 	 * variable.  To protect against this problem, all updates of
5774 	 * the per_cpu tsc_khz variable are done in an interrupt
5775 	 * protected IPI, and all callers wishing to update the value
5776 	 * must wait for a synchronous IPI to complete (which is trivial
5777 	 * if the caller is on the CPU already).  This establishes the
5778 	 * necessary total order on variable updates.
5779 	 *
5780 	 * Note that because a guest time update may take place
5781 	 * anytime after the setting of the VCPU's request bit, the
5782 	 * correct TSC value must be set before the request.  However,
5783 	 * to ensure the update actually makes it to any guest which
5784 	 * starts running in hardware virtualization between the set
5785 	 * and the acquisition of the spinlock, we must also ping the
5786 	 * CPU after setting the request bit.
5787 	 *
5788 	 */
5789 
5790 	if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
5791 		return 0;
5792 	if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
5793 		return 0;
5794 
5795 	smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5796 
5797 	spin_lock(&kvm_lock);
5798 	list_for_each_entry(kvm, &vm_list, vm_list) {
5799 		kvm_for_each_vcpu(i, vcpu, kvm) {
5800 			if (vcpu->cpu != freq->cpu)
5801 				continue;
5802 			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5803 			if (vcpu->cpu != smp_processor_id())
5804 				send_ipi = 1;
5805 		}
5806 	}
5807 	spin_unlock(&kvm_lock);
5808 
5809 	if (freq->old < freq->new && send_ipi) {
5810 		/*
5811 		 * We upscale the frequency.  Must make the guest
5812 		 * doesn't see old kvmclock values while running with
5813 		 * the new frequency, otherwise we risk the guest sees
5814 		 * time go backwards.
5815 		 *
5816 		 * In case we update the frequency for another cpu
5817 		 * (which might be in guest context) send an interrupt
5818 		 * to kick the cpu out of guest context.  Next time
5819 		 * guest context is entered kvmclock will be updated,
5820 		 * so the guest will not see stale values.
5821 		 */
5822 		smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5823 	}
5824 	return 0;
5825 }
5826 
5827 static struct notifier_block kvmclock_cpufreq_notifier_block = {
5828 	.notifier_call  = kvmclock_cpufreq_notifier
5829 };
5830 
5831 static int kvmclock_cpu_online(unsigned int cpu)
5832 {
5833 	tsc_khz_changed(NULL);
5834 	return 0;
5835 }
5836 
5837 static void kvm_timer_init(void)
5838 {
5839 	max_tsc_khz = tsc_khz;
5840 
5841 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5842 #ifdef CONFIG_CPU_FREQ
5843 		struct cpufreq_policy policy;
5844 		int cpu;
5845 
5846 		memset(&policy, 0, sizeof(policy));
5847 		cpu = get_cpu();
5848 		cpufreq_get_policy(&policy, cpu);
5849 		if (policy.cpuinfo.max_freq)
5850 			max_tsc_khz = policy.cpuinfo.max_freq;
5851 		put_cpu();
5852 #endif
5853 		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
5854 					  CPUFREQ_TRANSITION_NOTIFIER);
5855 	}
5856 	pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
5857 
5858 	cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
5859 			  kvmclock_cpu_online, kvmclock_cpu_down_prep);
5860 }
5861 
5862 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
5863 
5864 int kvm_is_in_guest(void)
5865 {
5866 	return __this_cpu_read(current_vcpu) != NULL;
5867 }
5868 
5869 static int kvm_is_user_mode(void)
5870 {
5871 	int user_mode = 3;
5872 
5873 	if (__this_cpu_read(current_vcpu))
5874 		user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
5875 
5876 	return user_mode != 0;
5877 }
5878 
5879 static unsigned long kvm_get_guest_ip(void)
5880 {
5881 	unsigned long ip = 0;
5882 
5883 	if (__this_cpu_read(current_vcpu))
5884 		ip = kvm_rip_read(__this_cpu_read(current_vcpu));
5885 
5886 	return ip;
5887 }
5888 
5889 static struct perf_guest_info_callbacks kvm_guest_cbs = {
5890 	.is_in_guest		= kvm_is_in_guest,
5891 	.is_user_mode		= kvm_is_user_mode,
5892 	.get_guest_ip		= kvm_get_guest_ip,
5893 };
5894 
5895 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
5896 {
5897 	__this_cpu_write(current_vcpu, vcpu);
5898 }
5899 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
5900 
5901 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
5902 {
5903 	__this_cpu_write(current_vcpu, NULL);
5904 }
5905 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
5906 
5907 static void kvm_set_mmio_spte_mask(void)
5908 {
5909 	u64 mask;
5910 	int maxphyaddr = boot_cpu_data.x86_phys_bits;
5911 
5912 	/*
5913 	 * Set the reserved bits and the present bit of an paging-structure
5914 	 * entry to generate page fault with PFER.RSV = 1.
5915 	 */
5916 	 /* Mask the reserved physical address bits. */
5917 	mask = rsvd_bits(maxphyaddr, 51);
5918 
5919 	/* Bit 62 is always reserved for 32bit host. */
5920 	mask |= 0x3ull << 62;
5921 
5922 	/* Set the present bit. */
5923 	mask |= 1ull;
5924 
5925 #ifdef CONFIG_X86_64
5926 	/*
5927 	 * If reserved bit is not supported, clear the present bit to disable
5928 	 * mmio page fault.
5929 	 */
5930 	if (maxphyaddr == 52)
5931 		mask &= ~1ull;
5932 #endif
5933 
5934 	kvm_mmu_set_mmio_spte_mask(mask);
5935 }
5936 
5937 #ifdef CONFIG_X86_64
5938 static void pvclock_gtod_update_fn(struct work_struct *work)
5939 {
5940 	struct kvm *kvm;
5941 
5942 	struct kvm_vcpu *vcpu;
5943 	int i;
5944 
5945 	spin_lock(&kvm_lock);
5946 	list_for_each_entry(kvm, &vm_list, vm_list)
5947 		kvm_for_each_vcpu(i, vcpu, kvm)
5948 			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
5949 	atomic_set(&kvm_guest_has_master_clock, 0);
5950 	spin_unlock(&kvm_lock);
5951 }
5952 
5953 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
5954 
5955 /*
5956  * Notification about pvclock gtod data update.
5957  */
5958 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
5959 			       void *priv)
5960 {
5961 	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
5962 	struct timekeeper *tk = priv;
5963 
5964 	update_pvclock_gtod(tk);
5965 
5966 	/* disable master clock if host does not trust, or does not
5967 	 * use, TSC clocksource
5968 	 */
5969 	if (gtod->clock.vclock_mode != VCLOCK_TSC &&
5970 	    atomic_read(&kvm_guest_has_master_clock) != 0)
5971 		queue_work(system_long_wq, &pvclock_gtod_work);
5972 
5973 	return 0;
5974 }
5975 
5976 static struct notifier_block pvclock_gtod_notifier = {
5977 	.notifier_call = pvclock_gtod_notify,
5978 };
5979 #endif
5980 
5981 int kvm_arch_init(void *opaque)
5982 {
5983 	int r;
5984 	struct kvm_x86_ops *ops = opaque;
5985 
5986 	if (kvm_x86_ops) {
5987 		printk(KERN_ERR "kvm: already loaded the other module\n");
5988 		r = -EEXIST;
5989 		goto out;
5990 	}
5991 
5992 	if (!ops->cpu_has_kvm_support()) {
5993 		printk(KERN_ERR "kvm: no hardware support\n");
5994 		r = -EOPNOTSUPP;
5995 		goto out;
5996 	}
5997 	if (ops->disabled_by_bios()) {
5998 		printk(KERN_ERR "kvm: disabled by bios\n");
5999 		r = -EOPNOTSUPP;
6000 		goto out;
6001 	}
6002 
6003 	r = -ENOMEM;
6004 	shared_msrs = alloc_percpu(struct kvm_shared_msrs);
6005 	if (!shared_msrs) {
6006 		printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
6007 		goto out;
6008 	}
6009 
6010 	r = kvm_mmu_module_init();
6011 	if (r)
6012 		goto out_free_percpu;
6013 
6014 	kvm_set_mmio_spte_mask();
6015 
6016 	kvm_x86_ops = ops;
6017 
6018 	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
6019 			PT_DIRTY_MASK, PT64_NX_MASK, 0,
6020 			PT_PRESENT_MASK);
6021 	kvm_timer_init();
6022 
6023 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
6024 
6025 	if (boot_cpu_has(X86_FEATURE_XSAVE))
6026 		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
6027 
6028 	kvm_lapic_init();
6029 #ifdef CONFIG_X86_64
6030 	pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
6031 #endif
6032 
6033 	return 0;
6034 
6035 out_free_percpu:
6036 	free_percpu(shared_msrs);
6037 out:
6038 	return r;
6039 }
6040 
6041 void kvm_arch_exit(void)
6042 {
6043 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6044 
6045 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
6046 		cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
6047 					    CPUFREQ_TRANSITION_NOTIFIER);
6048 	cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
6049 #ifdef CONFIG_X86_64
6050 	pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
6051 #endif
6052 	kvm_x86_ops = NULL;
6053 	kvm_mmu_module_exit();
6054 	free_percpu(shared_msrs);
6055 }
6056 
6057 int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
6058 {
6059 	++vcpu->stat.halt_exits;
6060 	if (lapic_in_kernel(vcpu)) {
6061 		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
6062 		return 1;
6063 	} else {
6064 		vcpu->run->exit_reason = KVM_EXIT_HLT;
6065 		return 0;
6066 	}
6067 }
6068 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
6069 
6070 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
6071 {
6072 	int ret = kvm_skip_emulated_instruction(vcpu);
6073 	/*
6074 	 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
6075 	 * KVM_EXIT_DEBUG here.
6076 	 */
6077 	return kvm_vcpu_halt(vcpu) && ret;
6078 }
6079 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
6080 
6081 /*
6082  * kvm_pv_kick_cpu_op:  Kick a vcpu.
6083  *
6084  * @apicid - apicid of vcpu to be kicked.
6085  */
6086 static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
6087 {
6088 	struct kvm_lapic_irq lapic_irq;
6089 
6090 	lapic_irq.shorthand = 0;
6091 	lapic_irq.dest_mode = 0;
6092 	lapic_irq.dest_id = apicid;
6093 	lapic_irq.msi_redir_hint = false;
6094 
6095 	lapic_irq.delivery_mode = APIC_DM_REMRD;
6096 	kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
6097 }
6098 
6099 void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
6100 {
6101 	vcpu->arch.apicv_active = false;
6102 	kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
6103 }
6104 
6105 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
6106 {
6107 	unsigned long nr, a0, a1, a2, a3, ret;
6108 	int op_64_bit, r;
6109 
6110 	r = kvm_skip_emulated_instruction(vcpu);
6111 
6112 	if (kvm_hv_hypercall_enabled(vcpu->kvm))
6113 		return kvm_hv_hypercall(vcpu);
6114 
6115 	nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
6116 	a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
6117 	a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
6118 	a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
6119 	a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
6120 
6121 	trace_kvm_hypercall(nr, a0, a1, a2, a3);
6122 
6123 	op_64_bit = is_64_bit_mode(vcpu);
6124 	if (!op_64_bit) {
6125 		nr &= 0xFFFFFFFF;
6126 		a0 &= 0xFFFFFFFF;
6127 		a1 &= 0xFFFFFFFF;
6128 		a2 &= 0xFFFFFFFF;
6129 		a3 &= 0xFFFFFFFF;
6130 	}
6131 
6132 	if (kvm_x86_ops->get_cpl(vcpu) != 0) {
6133 		ret = -KVM_EPERM;
6134 		goto out;
6135 	}
6136 
6137 	switch (nr) {
6138 	case KVM_HC_VAPIC_POLL_IRQ:
6139 		ret = 0;
6140 		break;
6141 	case KVM_HC_KICK_CPU:
6142 		kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
6143 		ret = 0;
6144 		break;
6145 	default:
6146 		ret = -KVM_ENOSYS;
6147 		break;
6148 	}
6149 out:
6150 	if (!op_64_bit)
6151 		ret = (u32)ret;
6152 	kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
6153 	++vcpu->stat.hypercalls;
6154 	return r;
6155 }
6156 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
6157 
6158 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
6159 {
6160 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6161 	char instruction[3];
6162 	unsigned long rip = kvm_rip_read(vcpu);
6163 
6164 	kvm_x86_ops->patch_hypercall(vcpu, instruction);
6165 
6166 	return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
6167 }
6168 
6169 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
6170 {
6171 	return vcpu->run->request_interrupt_window &&
6172 		likely(!pic_in_kernel(vcpu->kvm));
6173 }
6174 
6175 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
6176 {
6177 	struct kvm_run *kvm_run = vcpu->run;
6178 
6179 	kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
6180 	kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
6181 	kvm_run->cr8 = kvm_get_cr8(vcpu);
6182 	kvm_run->apic_base = kvm_get_apic_base(vcpu);
6183 	kvm_run->ready_for_interrupt_injection =
6184 		pic_in_kernel(vcpu->kvm) ||
6185 		kvm_vcpu_ready_for_interrupt_injection(vcpu);
6186 }
6187 
6188 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
6189 {
6190 	int max_irr, tpr;
6191 
6192 	if (!kvm_x86_ops->update_cr8_intercept)
6193 		return;
6194 
6195 	if (!lapic_in_kernel(vcpu))
6196 		return;
6197 
6198 	if (vcpu->arch.apicv_active)
6199 		return;
6200 
6201 	if (!vcpu->arch.apic->vapic_addr)
6202 		max_irr = kvm_lapic_find_highest_irr(vcpu);
6203 	else
6204 		max_irr = -1;
6205 
6206 	if (max_irr != -1)
6207 		max_irr >>= 4;
6208 
6209 	tpr = kvm_lapic_get_cr8(vcpu);
6210 
6211 	kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
6212 }
6213 
6214 static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
6215 {
6216 	int r;
6217 
6218 	/* try to reinject previous events if any */
6219 	if (vcpu->arch.exception.pending) {
6220 		trace_kvm_inj_exception(vcpu->arch.exception.nr,
6221 					vcpu->arch.exception.has_error_code,
6222 					vcpu->arch.exception.error_code);
6223 
6224 		if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
6225 			__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
6226 					     X86_EFLAGS_RF);
6227 
6228 		if (vcpu->arch.exception.nr == DB_VECTOR &&
6229 		    (vcpu->arch.dr7 & DR7_GD)) {
6230 			vcpu->arch.dr7 &= ~DR7_GD;
6231 			kvm_update_dr7(vcpu);
6232 		}
6233 
6234 		kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
6235 					  vcpu->arch.exception.has_error_code,
6236 					  vcpu->arch.exception.error_code,
6237 					  vcpu->arch.exception.reinject);
6238 		return 0;
6239 	}
6240 
6241 	if (vcpu->arch.nmi_injected) {
6242 		kvm_x86_ops->set_nmi(vcpu);
6243 		return 0;
6244 	}
6245 
6246 	if (vcpu->arch.interrupt.pending) {
6247 		kvm_x86_ops->set_irq(vcpu);
6248 		return 0;
6249 	}
6250 
6251 	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
6252 		r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
6253 		if (r != 0)
6254 			return r;
6255 	}
6256 
6257 	/* try to inject new event if pending */
6258 	if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
6259 		vcpu->arch.smi_pending = false;
6260 		enter_smm(vcpu);
6261 	} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
6262 		--vcpu->arch.nmi_pending;
6263 		vcpu->arch.nmi_injected = true;
6264 		kvm_x86_ops->set_nmi(vcpu);
6265 	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
6266 		/*
6267 		 * Because interrupts can be injected asynchronously, we are
6268 		 * calling check_nested_events again here to avoid a race condition.
6269 		 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
6270 		 * proposal and current concerns.  Perhaps we should be setting
6271 		 * KVM_REQ_EVENT only on certain events and not unconditionally?
6272 		 */
6273 		if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
6274 			r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
6275 			if (r != 0)
6276 				return r;
6277 		}
6278 		if (kvm_x86_ops->interrupt_allowed(vcpu)) {
6279 			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
6280 					    false);
6281 			kvm_x86_ops->set_irq(vcpu);
6282 		}
6283 	}
6284 
6285 	return 0;
6286 }
6287 
6288 static void process_nmi(struct kvm_vcpu *vcpu)
6289 {
6290 	unsigned limit = 2;
6291 
6292 	/*
6293 	 * x86 is limited to one NMI running, and one NMI pending after it.
6294 	 * If an NMI is already in progress, limit further NMIs to just one.
6295 	 * Otherwise, allow two (and we'll inject the first one immediately).
6296 	 */
6297 	if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
6298 		limit = 1;
6299 
6300 	vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
6301 	vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
6302 	kvm_make_request(KVM_REQ_EVENT, vcpu);
6303 }
6304 
6305 #define put_smstate(type, buf, offset, val)			  \
6306 	*(type *)((buf) + (offset) - 0x7e00) = val
6307 
6308 static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
6309 {
6310 	u32 flags = 0;
6311 	flags |= seg->g       << 23;
6312 	flags |= seg->db      << 22;
6313 	flags |= seg->l       << 21;
6314 	flags |= seg->avl     << 20;
6315 	flags |= seg->present << 15;
6316 	flags |= seg->dpl     << 13;
6317 	flags |= seg->s       << 12;
6318 	flags |= seg->type    << 8;
6319 	return flags;
6320 }
6321 
6322 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
6323 {
6324 	struct kvm_segment seg;
6325 	int offset;
6326 
6327 	kvm_get_segment(vcpu, &seg, n);
6328 	put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
6329 
6330 	if (n < 3)
6331 		offset = 0x7f84 + n * 12;
6332 	else
6333 		offset = 0x7f2c + (n - 3) * 12;
6334 
6335 	put_smstate(u32, buf, offset + 8, seg.base);
6336 	put_smstate(u32, buf, offset + 4, seg.limit);
6337 	put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
6338 }
6339 
6340 #ifdef CONFIG_X86_64
6341 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
6342 {
6343 	struct kvm_segment seg;
6344 	int offset;
6345 	u16 flags;
6346 
6347 	kvm_get_segment(vcpu, &seg, n);
6348 	offset = 0x7e00 + n * 16;
6349 
6350 	flags = enter_smm_get_segment_flags(&seg) >> 8;
6351 	put_smstate(u16, buf, offset, seg.selector);
6352 	put_smstate(u16, buf, offset + 2, flags);
6353 	put_smstate(u32, buf, offset + 4, seg.limit);
6354 	put_smstate(u64, buf, offset + 8, seg.base);
6355 }
6356 #endif
6357 
6358 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
6359 {
6360 	struct desc_ptr dt;
6361 	struct kvm_segment seg;
6362 	unsigned long val;
6363 	int i;
6364 
6365 	put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
6366 	put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
6367 	put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
6368 	put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
6369 
6370 	for (i = 0; i < 8; i++)
6371 		put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));
6372 
6373 	kvm_get_dr(vcpu, 6, &val);
6374 	put_smstate(u32, buf, 0x7fcc, (u32)val);
6375 	kvm_get_dr(vcpu, 7, &val);
6376 	put_smstate(u32, buf, 0x7fc8, (u32)val);
6377 
6378 	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
6379 	put_smstate(u32, buf, 0x7fc4, seg.selector);
6380 	put_smstate(u32, buf, 0x7f64, seg.base);
6381 	put_smstate(u32, buf, 0x7f60, seg.limit);
6382 	put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
6383 
6384 	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
6385 	put_smstate(u32, buf, 0x7fc0, seg.selector);
6386 	put_smstate(u32, buf, 0x7f80, seg.base);
6387 	put_smstate(u32, buf, 0x7f7c, seg.limit);
6388 	put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
6389 
6390 	kvm_x86_ops->get_gdt(vcpu, &dt);
6391 	put_smstate(u32, buf, 0x7f74, dt.address);
6392 	put_smstate(u32, buf, 0x7f70, dt.size);
6393 
6394 	kvm_x86_ops->get_idt(vcpu, &dt);
6395 	put_smstate(u32, buf, 0x7f58, dt.address);
6396 	put_smstate(u32, buf, 0x7f54, dt.size);
6397 
6398 	for (i = 0; i < 6; i++)
6399 		enter_smm_save_seg_32(vcpu, buf, i);
6400 
6401 	put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
6402 
6403 	/* revision id */
6404 	put_smstate(u32, buf, 0x7efc, 0x00020000);
6405 	put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
6406 }
6407 
6408 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
6409 {
6410 #ifdef CONFIG_X86_64
6411 	struct desc_ptr dt;
6412 	struct kvm_segment seg;
6413 	unsigned long val;
6414 	int i;
6415 
6416 	for (i = 0; i < 16; i++)
6417 		put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));
6418 
6419 	put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
6420 	put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
6421 
6422 	kvm_get_dr(vcpu, 6, &val);
6423 	put_smstate(u64, buf, 0x7f68, val);
6424 	kvm_get_dr(vcpu, 7, &val);
6425 	put_smstate(u64, buf, 0x7f60, val);
6426 
6427 	put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
6428 	put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
6429 	put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
6430 
6431 	put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
6432 
6433 	/* revision id */
6434 	put_smstate(u32, buf, 0x7efc, 0x00020064);
6435 
6436 	put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
6437 
6438 	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
6439 	put_smstate(u16, buf, 0x7e90, seg.selector);
6440 	put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
6441 	put_smstate(u32, buf, 0x7e94, seg.limit);
6442 	put_smstate(u64, buf, 0x7e98, seg.base);
6443 
6444 	kvm_x86_ops->get_idt(vcpu, &dt);
6445 	put_smstate(u32, buf, 0x7e84, dt.size);
6446 	put_smstate(u64, buf, 0x7e88, dt.address);
6447 
6448 	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
6449 	put_smstate(u16, buf, 0x7e70, seg.selector);
6450 	put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
6451 	put_smstate(u32, buf, 0x7e74, seg.limit);
6452 	put_smstate(u64, buf, 0x7e78, seg.base);
6453 
6454 	kvm_x86_ops->get_gdt(vcpu, &dt);
6455 	put_smstate(u32, buf, 0x7e64, dt.size);
6456 	put_smstate(u64, buf, 0x7e68, dt.address);
6457 
6458 	for (i = 0; i < 6; i++)
6459 		enter_smm_save_seg_64(vcpu, buf, i);
6460 #else
6461 	WARN_ON_ONCE(1);
6462 #endif
6463 }
6464 
6465 static void enter_smm(struct kvm_vcpu *vcpu)
6466 {
6467 	struct kvm_segment cs, ds;
6468 	struct desc_ptr dt;
6469 	char buf[512];
6470 	u32 cr0;
6471 
6472 	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
6473 	vcpu->arch.hflags |= HF_SMM_MASK;
6474 	memset(buf, 0, 512);
6475 	if (guest_cpuid_has_longmode(vcpu))
6476 		enter_smm_save_state_64(vcpu, buf);
6477 	else
6478 		enter_smm_save_state_32(vcpu, buf);
6479 
6480 	kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
6481 
6482 	if (kvm_x86_ops->get_nmi_mask(vcpu))
6483 		vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
6484 	else
6485 		kvm_x86_ops->set_nmi_mask(vcpu, true);
6486 
6487 	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
6488 	kvm_rip_write(vcpu, 0x8000);
6489 
6490 	cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
6491 	kvm_x86_ops->set_cr0(vcpu, cr0);
6492 	vcpu->arch.cr0 = cr0;
6493 
6494 	kvm_x86_ops->set_cr4(vcpu, 0);
6495 
6496 	/* Undocumented: IDT limit is set to zero on entry to SMM.  */
6497 	dt.address = dt.size = 0;
6498 	kvm_x86_ops->set_idt(vcpu, &dt);
6499 
6500 	__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
6501 
6502 	cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
6503 	cs.base = vcpu->arch.smbase;
6504 
6505 	ds.selector = 0;
6506 	ds.base = 0;
6507 
6508 	cs.limit    = ds.limit = 0xffffffff;
6509 	cs.type     = ds.type = 0x3;
6510 	cs.dpl      = ds.dpl = 0;
6511 	cs.db       = ds.db = 0;
6512 	cs.s        = ds.s = 1;
6513 	cs.l        = ds.l = 0;
6514 	cs.g        = ds.g = 1;
6515 	cs.avl      = ds.avl = 0;
6516 	cs.present  = ds.present = 1;
6517 	cs.unusable = ds.unusable = 0;
6518 	cs.padding  = ds.padding = 0;
6519 
6520 	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
6521 	kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
6522 	kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
6523 	kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
6524 	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
6525 	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
6526 
6527 	if (guest_cpuid_has_longmode(vcpu))
6528 		kvm_x86_ops->set_efer(vcpu, 0);
6529 
6530 	kvm_update_cpuid(vcpu);
6531 	kvm_mmu_reset_context(vcpu);
6532 }
6533 
6534 static void process_smi(struct kvm_vcpu *vcpu)
6535 {
6536 	vcpu->arch.smi_pending = true;
6537 	kvm_make_request(KVM_REQ_EVENT, vcpu);
6538 }
6539 
6540 void kvm_make_scan_ioapic_request(struct kvm *kvm)
6541 {
6542 	kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
6543 }
6544 
6545 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
6546 {
6547 	u64 eoi_exit_bitmap[4];
6548 
6549 	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
6550 		return;
6551 
6552 	bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
6553 
6554 	if (irqchip_split(vcpu->kvm))
6555 		kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
6556 	else {
6557 		if (vcpu->arch.apicv_active)
6558 			kvm_x86_ops->sync_pir_to_irr(vcpu);
6559 		kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
6560 	}
6561 	bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
6562 		  vcpu_to_synic(vcpu)->vec_bitmap, 256);
6563 	kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
6564 }
6565 
6566 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
6567 {
6568 	++vcpu->stat.tlb_flush;
6569 	kvm_x86_ops->tlb_flush(vcpu);
6570 }
6571 
6572 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
6573 {
6574 	struct page *page = NULL;
6575 
6576 	if (!lapic_in_kernel(vcpu))
6577 		return;
6578 
6579 	if (!kvm_x86_ops->set_apic_access_page_addr)
6580 		return;
6581 
6582 	page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6583 	if (is_error_page(page))
6584 		return;
6585 	kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
6586 
6587 	/*
6588 	 * Do not pin apic access page in memory, the MMU notifier
6589 	 * will call us again if it is migrated or swapped out.
6590 	 */
6591 	put_page(page);
6592 }
6593 EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
6594 
6595 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
6596 					   unsigned long address)
6597 {
6598 	/*
6599 	 * The physical address of apic access page is stored in the VMCS.
6600 	 * Update it when it becomes invalid.
6601 	 */
6602 	if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
6603 		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
6604 }
6605 
6606 /*
6607  * Returns 1 to let vcpu_run() continue the guest execution loop without
6608  * exiting to the userspace.  Otherwise, the value will be returned to the
6609  * userspace.
6610  */
6611 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6612 {
6613 	int r;
6614 	bool req_int_win =
6615 		dm_request_for_irq_injection(vcpu) &&
6616 		kvm_cpu_accept_dm_intr(vcpu);
6617 
6618 	bool req_immediate_exit = false;
6619 
6620 	if (vcpu->requests) {
6621 		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
6622 			kvm_mmu_unload(vcpu);
6623 		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
6624 			__kvm_migrate_timers(vcpu);
6625 		if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
6626 			kvm_gen_update_masterclock(vcpu->kvm);
6627 		if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
6628 			kvm_gen_kvmclock_update(vcpu);
6629 		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
6630 			r = kvm_guest_time_update(vcpu);
6631 			if (unlikely(r))
6632 				goto out;
6633 		}
6634 		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
6635 			kvm_mmu_sync_roots(vcpu);
6636 		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
6637 			kvm_vcpu_flush_tlb(vcpu);
6638 		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
6639 			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
6640 			r = 0;
6641 			goto out;
6642 		}
6643 		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
6644 			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
6645 			r = 0;
6646 			goto out;
6647 		}
6648 		if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
6649 			vcpu->fpu_active = 0;
6650 			kvm_x86_ops->fpu_deactivate(vcpu);
6651 		}
6652 		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
6653 			/* Page is swapped out. Do synthetic halt */
6654 			vcpu->arch.apf.halted = true;
6655 			r = 1;
6656 			goto out;
6657 		}
6658 		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
6659 			record_steal_time(vcpu);
6660 		if (kvm_check_request(KVM_REQ_SMI, vcpu))
6661 			process_smi(vcpu);
6662 		if (kvm_check_request(KVM_REQ_NMI, vcpu))
6663 			process_nmi(vcpu);
6664 		if (kvm_check_request(KVM_REQ_PMU, vcpu))
6665 			kvm_pmu_handle_event(vcpu);
6666 		if (kvm_check_request(KVM_REQ_PMI, vcpu))
6667 			kvm_pmu_deliver_pmi(vcpu);
6668 		if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
6669 			BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
6670 			if (test_bit(vcpu->arch.pending_ioapic_eoi,
6671 				     vcpu->arch.ioapic_handled_vectors)) {
6672 				vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
6673 				vcpu->run->eoi.vector =
6674 						vcpu->arch.pending_ioapic_eoi;
6675 				r = 0;
6676 				goto out;
6677 			}
6678 		}
6679 		if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
6680 			vcpu_scan_ioapic(vcpu);
6681 		if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
6682 			kvm_vcpu_reload_apic_access_page(vcpu);
6683 		if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
6684 			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
6685 			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
6686 			r = 0;
6687 			goto out;
6688 		}
6689 		if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
6690 			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
6691 			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
6692 			r = 0;
6693 			goto out;
6694 		}
6695 		if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
6696 			vcpu->run->exit_reason = KVM_EXIT_HYPERV;
6697 			vcpu->run->hyperv = vcpu->arch.hyperv.exit;
6698 			r = 0;
6699 			goto out;
6700 		}
6701 
6702 		/*
6703 		 * KVM_REQ_HV_STIMER has to be processed after
6704 		 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
6705 		 * depend on the guest clock being up-to-date
6706 		 */
6707 		if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
6708 			kvm_hv_process_stimers(vcpu);
6709 	}
6710 
6711 	/*
6712 	 * KVM_REQ_EVENT is not set when posted interrupts are set by
6713 	 * VT-d hardware, so we have to update RVI unconditionally.
6714 	 */
6715 	if (kvm_lapic_enabled(vcpu)) {
6716 		/*
6717 		 * Update architecture specific hints for APIC
6718 		 * virtual interrupt delivery.
6719 		 */
6720 		if (vcpu->arch.apicv_active)
6721 			kvm_x86_ops->hwapic_irr_update(vcpu,
6722 				kvm_lapic_find_highest_irr(vcpu));
6723 	}
6724 
6725 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
6726 		kvm_apic_accept_events(vcpu);
6727 		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
6728 			r = 1;
6729 			goto out;
6730 		}
6731 
6732 		if (inject_pending_event(vcpu, req_int_win) != 0)
6733 			req_immediate_exit = true;
6734 		else {
6735 			/* Enable NMI/IRQ window open exits if needed.
6736 			 *
6737 			 * SMIs have two cases: 1) they can be nested, and
6738 			 * then there is nothing to do here because RSM will
6739 			 * cause a vmexit anyway; 2) or the SMI can be pending
6740 			 * because inject_pending_event has completed the
6741 			 * injection of an IRQ or NMI from the previous vmexit,
6742 			 * and then we request an immediate exit to inject the SMI.
6743 			 */
6744 			if (vcpu->arch.smi_pending && !is_smm(vcpu))
6745 				req_immediate_exit = true;
6746 			if (vcpu->arch.nmi_pending)
6747 				kvm_x86_ops->enable_nmi_window(vcpu);
6748 			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
6749 				kvm_x86_ops->enable_irq_window(vcpu);
6750 		}
6751 
6752 		if (kvm_lapic_enabled(vcpu)) {
6753 			update_cr8_intercept(vcpu);
6754 			kvm_lapic_sync_to_vapic(vcpu);
6755 		}
6756 	}
6757 
6758 	r = kvm_mmu_reload(vcpu);
6759 	if (unlikely(r)) {
6760 		goto cancel_injection;
6761 	}
6762 
6763 	preempt_disable();
6764 
6765 	kvm_x86_ops->prepare_guest_switch(vcpu);
6766 	if (vcpu->fpu_active)
6767 		kvm_load_guest_fpu(vcpu);
6768 	vcpu->mode = IN_GUEST_MODE;
6769 
6770 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
6771 
6772 	/*
6773 	 * We should set ->mode before check ->requests,
6774 	 * Please see the comment in kvm_make_all_cpus_request.
6775 	 * This also orders the write to mode from any reads
6776 	 * to the page tables done while the VCPU is running.
6777 	 * Please see the comment in kvm_flush_remote_tlbs.
6778 	 */
6779 	smp_mb__after_srcu_read_unlock();
6780 
6781 	local_irq_disable();
6782 
6783 	if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
6784 	    || need_resched() || signal_pending(current)) {
6785 		vcpu->mode = OUTSIDE_GUEST_MODE;
6786 		smp_wmb();
6787 		local_irq_enable();
6788 		preempt_enable();
6789 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6790 		r = 1;
6791 		goto cancel_injection;
6792 	}
6793 
6794 	kvm_load_guest_xcr0(vcpu);
6795 
6796 	if (req_immediate_exit) {
6797 		kvm_make_request(KVM_REQ_EVENT, vcpu);
6798 		smp_send_reschedule(vcpu->cpu);
6799 	}
6800 
6801 	trace_kvm_entry(vcpu->vcpu_id);
6802 	wait_lapic_expire(vcpu);
6803 	guest_enter_irqoff();
6804 
6805 	if (unlikely(vcpu->arch.switch_db_regs)) {
6806 		set_debugreg(0, 7);
6807 		set_debugreg(vcpu->arch.eff_db[0], 0);
6808 		set_debugreg(vcpu->arch.eff_db[1], 1);
6809 		set_debugreg(vcpu->arch.eff_db[2], 2);
6810 		set_debugreg(vcpu->arch.eff_db[3], 3);
6811 		set_debugreg(vcpu->arch.dr6, 6);
6812 		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6813 	}
6814 
6815 	kvm_x86_ops->run(vcpu);
6816 
6817 	/*
6818 	 * Do this here before restoring debug registers on the host.  And
6819 	 * since we do this before handling the vmexit, a DR access vmexit
6820 	 * can (a) read the correct value of the debug registers, (b) set
6821 	 * KVM_DEBUGREG_WONT_EXIT again.
6822 	 */
6823 	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
6824 		WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
6825 		kvm_x86_ops->sync_dirty_debug_regs(vcpu);
6826 		kvm_update_dr0123(vcpu);
6827 		kvm_update_dr6(vcpu);
6828 		kvm_update_dr7(vcpu);
6829 		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6830 	}
6831 
6832 	/*
6833 	 * If the guest has used debug registers, at least dr7
6834 	 * will be disabled while returning to the host.
6835 	 * If we don't have active breakpoints in the host, we don't
6836 	 * care about the messed up debug address registers. But if
6837 	 * we have some of them active, restore the old state.
6838 	 */
6839 	if (hw_breakpoint_active())
6840 		hw_breakpoint_restore();
6841 
6842 	vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
6843 
6844 	vcpu->mode = OUTSIDE_GUEST_MODE;
6845 	smp_wmb();
6846 
6847 	kvm_put_guest_xcr0(vcpu);
6848 
6849 	kvm_x86_ops->handle_external_intr(vcpu);
6850 
6851 	++vcpu->stat.exits;
6852 
6853 	guest_exit_irqoff();
6854 
6855 	local_irq_enable();
6856 	preempt_enable();
6857 
6858 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6859 
6860 	/*
6861 	 * Profile KVM exit RIPs:
6862 	 */
6863 	if (unlikely(prof_on == KVM_PROFILING)) {
6864 		unsigned long rip = kvm_rip_read(vcpu);
6865 		profile_hit(KVM_PROFILING, (void *)rip);
6866 	}
6867 
6868 	if (unlikely(vcpu->arch.tsc_always_catchup))
6869 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
6870 
6871 	if (vcpu->arch.apic_attention)
6872 		kvm_lapic_sync_from_vapic(vcpu);
6873 
6874 	r = kvm_x86_ops->handle_exit(vcpu);
6875 	return r;
6876 
6877 cancel_injection:
6878 	kvm_x86_ops->cancel_injection(vcpu);
6879 	if (unlikely(vcpu->arch.apic_attention))
6880 		kvm_lapic_sync_from_vapic(vcpu);
6881 out:
6882 	return r;
6883 }
6884 
6885 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
6886 {
6887 	if (!kvm_arch_vcpu_runnable(vcpu) &&
6888 	    (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) {
6889 		srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6890 		kvm_vcpu_block(vcpu);
6891 		vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6892 
6893 		if (kvm_x86_ops->post_block)
6894 			kvm_x86_ops->post_block(vcpu);
6895 
6896 		if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
6897 			return 1;
6898 	}
6899 
6900 	kvm_apic_accept_events(vcpu);
6901 	switch(vcpu->arch.mp_state) {
6902 	case KVM_MP_STATE_HALTED:
6903 		vcpu->arch.pv.pv_unhalted = false;
6904 		vcpu->arch.mp_state =
6905 			KVM_MP_STATE_RUNNABLE;
6906 	case KVM_MP_STATE_RUNNABLE:
6907 		vcpu->arch.apf.halted = false;
6908 		break;
6909 	case KVM_MP_STATE_INIT_RECEIVED:
6910 		break;
6911 	default:
6912 		return -EINTR;
6913 		break;
6914 	}
6915 	return 1;
6916 }
6917 
6918 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
6919 {
6920 	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
6921 		!vcpu->arch.apf.halted);
6922 }
6923 
6924 static int vcpu_run(struct kvm_vcpu *vcpu)
6925 {
6926 	int r;
6927 	struct kvm *kvm = vcpu->kvm;
6928 
6929 	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6930 
6931 	for (;;) {
6932 		if (kvm_vcpu_running(vcpu)) {
6933 			r = vcpu_enter_guest(vcpu);
6934 		} else {
6935 			r = vcpu_block(kvm, vcpu);
6936 		}
6937 
6938 		if (r <= 0)
6939 			break;
6940 
6941 		clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
6942 		if (kvm_cpu_has_pending_timer(vcpu))
6943 			kvm_inject_pending_timer_irqs(vcpu);
6944 
6945 		if (dm_request_for_irq_injection(vcpu) &&
6946 			kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
6947 			r = 0;
6948 			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
6949 			++vcpu->stat.request_irq_exits;
6950 			break;
6951 		}
6952 
6953 		kvm_check_async_pf_completion(vcpu);
6954 
6955 		if (signal_pending(current)) {
6956 			r = -EINTR;
6957 			vcpu->run->exit_reason = KVM_EXIT_INTR;
6958 			++vcpu->stat.signal_exits;
6959 			break;
6960 		}
6961 		if (need_resched()) {
6962 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6963 			cond_resched();
6964 			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6965 		}
6966 	}
6967 
6968 	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6969 
6970 	return r;
6971 }
6972 
6973 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
6974 {
6975 	int r;
6976 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6977 	r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
6978 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
6979 	if (r != EMULATE_DONE)
6980 		return 0;
6981 	return 1;
6982 }
6983 
6984 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
6985 {
6986 	BUG_ON(!vcpu->arch.pio.count);
6987 
6988 	return complete_emulated_io(vcpu);
6989 }
6990 
6991 /*
6992  * Implements the following, as a state machine:
6993  *
6994  * read:
6995  *   for each fragment
6996  *     for each mmio piece in the fragment
6997  *       write gpa, len
6998  *       exit
6999  *       copy data
7000  *   execute insn
7001  *
7002  * write:
7003  *   for each fragment
7004  *     for each mmio piece in the fragment
7005  *       write gpa, len
7006  *       copy data
7007  *       exit
7008  */
7009 static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
7010 {
7011 	struct kvm_run *run = vcpu->run;
7012 	struct kvm_mmio_fragment *frag;
7013 	unsigned len;
7014 
7015 	BUG_ON(!vcpu->mmio_needed);
7016 
7017 	/* Complete previous fragment */
7018 	frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
7019 	len = min(8u, frag->len);
7020 	if (!vcpu->mmio_is_write)
7021 		memcpy(frag->data, run->mmio.data, len);
7022 
7023 	if (frag->len <= 8) {
7024 		/* Switch to the next fragment. */
7025 		frag++;
7026 		vcpu->mmio_cur_fragment++;
7027 	} else {
7028 		/* Go forward to the next mmio piece. */
7029 		frag->data += len;
7030 		frag->gpa += len;
7031 		frag->len -= len;
7032 	}
7033 
7034 	if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
7035 		vcpu->mmio_needed = 0;
7036 
7037 		/* FIXME: return into emulator if single-stepping.  */
7038 		if (vcpu->mmio_is_write)
7039 			return 1;
7040 		vcpu->mmio_read_completed = 1;
7041 		return complete_emulated_io(vcpu);
7042 	}
7043 
7044 	run->exit_reason = KVM_EXIT_MMIO;
7045 	run->mmio.phys_addr = frag->gpa;
7046 	if (vcpu->mmio_is_write)
7047 		memcpy(run->mmio.data, frag->data, min(8u, frag->len));
7048 	run->mmio.len = min(8u, frag->len);
7049 	run->mmio.is_write = vcpu->mmio_is_write;
7050 	vcpu->arch.complete_userspace_io = complete_emulated_mmio;
7051 	return 0;
7052 }
7053 
7054 
7055 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
7056 {
7057 	struct fpu *fpu = &current->thread.fpu;
7058 	int r;
7059 	sigset_t sigsaved;
7060 
7061 	fpu__activate_curr(fpu);
7062 
7063 	if (vcpu->sigset_active)
7064 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
7065 
7066 	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
7067 		kvm_vcpu_block(vcpu);
7068 		kvm_apic_accept_events(vcpu);
7069 		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
7070 		r = -EAGAIN;
7071 		goto out;
7072 	}
7073 
7074 	/* re-sync apic's tpr */
7075 	if (!lapic_in_kernel(vcpu)) {
7076 		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
7077 			r = -EINVAL;
7078 			goto out;
7079 		}
7080 	}
7081 
7082 	if (unlikely(vcpu->arch.complete_userspace_io)) {
7083 		int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
7084 		vcpu->arch.complete_userspace_io = NULL;
7085 		r = cui(vcpu);
7086 		if (r <= 0)
7087 			goto out;
7088 	} else
7089 		WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
7090 
7091 	r = vcpu_run(vcpu);
7092 
7093 out:
7094 	post_kvm_run_save(vcpu);
7095 	if (vcpu->sigset_active)
7096 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
7097 
7098 	return r;
7099 }
7100 
7101 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
7102 {
7103 	if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
7104 		/*
7105 		 * We are here if userspace calls get_regs() in the middle of
7106 		 * instruction emulation. Registers state needs to be copied
7107 		 * back from emulation context to vcpu. Userspace shouldn't do
7108 		 * that usually, but some bad designed PV devices (vmware
7109 		 * backdoor interface) need this to work
7110 		 */
7111 		emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
7112 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
7113 	}
7114 	regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
7115 	regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
7116 	regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
7117 	regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
7118 	regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
7119 	regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
7120 	regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
7121 	regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
7122 #ifdef CONFIG_X86_64
7123 	regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
7124 	regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
7125 	regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
7126 	regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
7127 	regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
7128 	regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
7129 	regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
7130 	regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
7131 #endif
7132 
7133 	regs->rip = kvm_rip_read(vcpu);
7134 	regs->rflags = kvm_get_rflags(vcpu);
7135 
7136 	return 0;
7137 }
7138 
7139 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
7140 {
7141 	vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
7142 	vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
7143 
7144 	kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
7145 	kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
7146 	kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
7147 	kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
7148 	kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
7149 	kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
7150 	kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
7151 	kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
7152 #ifdef CONFIG_X86_64
7153 	kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
7154 	kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
7155 	kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
7156 	kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
7157 	kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
7158 	kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
7159 	kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
7160 	kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
7161 #endif
7162 
7163 	kvm_rip_write(vcpu, regs->rip);
7164 	kvm_set_rflags(vcpu, regs->rflags);
7165 
7166 	vcpu->arch.exception.pending = false;
7167 
7168 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7169 
7170 	return 0;
7171 }
7172 
7173 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
7174 {
7175 	struct kvm_segment cs;
7176 
7177 	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
7178 	*db = cs.db;
7179 	*l = cs.l;
7180 }
7181 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
7182 
7183 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
7184 				  struct kvm_sregs *sregs)
7185 {
7186 	struct desc_ptr dt;
7187 
7188 	kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
7189 	kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
7190 	kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
7191 	kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
7192 	kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
7193 	kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
7194 
7195 	kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
7196 	kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
7197 
7198 	kvm_x86_ops->get_idt(vcpu, &dt);
7199 	sregs->idt.limit = dt.size;
7200 	sregs->idt.base = dt.address;
7201 	kvm_x86_ops->get_gdt(vcpu, &dt);
7202 	sregs->gdt.limit = dt.size;
7203 	sregs->gdt.base = dt.address;
7204 
7205 	sregs->cr0 = kvm_read_cr0(vcpu);
7206 	sregs->cr2 = vcpu->arch.cr2;
7207 	sregs->cr3 = kvm_read_cr3(vcpu);
7208 	sregs->cr4 = kvm_read_cr4(vcpu);
7209 	sregs->cr8 = kvm_get_cr8(vcpu);
7210 	sregs->efer = vcpu->arch.efer;
7211 	sregs->apic_base = kvm_get_apic_base(vcpu);
7212 
7213 	memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
7214 
7215 	if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
7216 		set_bit(vcpu->arch.interrupt.nr,
7217 			(unsigned long *)sregs->interrupt_bitmap);
7218 
7219 	return 0;
7220 }
7221 
7222 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
7223 				    struct kvm_mp_state *mp_state)
7224 {
7225 	kvm_apic_accept_events(vcpu);
7226 	if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
7227 					vcpu->arch.pv.pv_unhalted)
7228 		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
7229 	else
7230 		mp_state->mp_state = vcpu->arch.mp_state;
7231 
7232 	return 0;
7233 }
7234 
7235 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
7236 				    struct kvm_mp_state *mp_state)
7237 {
7238 	if (!lapic_in_kernel(vcpu) &&
7239 	    mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
7240 		return -EINVAL;
7241 
7242 	if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
7243 		vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
7244 		set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
7245 	} else
7246 		vcpu->arch.mp_state = mp_state->mp_state;
7247 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7248 	return 0;
7249 }
7250 
7251 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
7252 		    int reason, bool has_error_code, u32 error_code)
7253 {
7254 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
7255 	int ret;
7256 
7257 	init_emulate_ctxt(vcpu);
7258 
7259 	ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
7260 				   has_error_code, error_code);
7261 
7262 	if (ret)
7263 		return EMULATE_FAIL;
7264 
7265 	kvm_rip_write(vcpu, ctxt->eip);
7266 	kvm_set_rflags(vcpu, ctxt->eflags);
7267 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7268 	return EMULATE_DONE;
7269 }
7270 EXPORT_SYMBOL_GPL(kvm_task_switch);
7271 
7272 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
7273 				  struct kvm_sregs *sregs)
7274 {
7275 	struct msr_data apic_base_msr;
7276 	int mmu_reset_needed = 0;
7277 	int pending_vec, max_bits, idx;
7278 	struct desc_ptr dt;
7279 
7280 	if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
7281 		return -EINVAL;
7282 
7283 	dt.size = sregs->idt.limit;
7284 	dt.address = sregs->idt.base;
7285 	kvm_x86_ops->set_idt(vcpu, &dt);
7286 	dt.size = sregs->gdt.limit;
7287 	dt.address = sregs->gdt.base;
7288 	kvm_x86_ops->set_gdt(vcpu, &dt);
7289 
7290 	vcpu->arch.cr2 = sregs->cr2;
7291 	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
7292 	vcpu->arch.cr3 = sregs->cr3;
7293 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
7294 
7295 	kvm_set_cr8(vcpu, sregs->cr8);
7296 
7297 	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
7298 	kvm_x86_ops->set_efer(vcpu, sregs->efer);
7299 	apic_base_msr.data = sregs->apic_base;
7300 	apic_base_msr.host_initiated = true;
7301 	kvm_set_apic_base(vcpu, &apic_base_msr);
7302 
7303 	mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
7304 	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
7305 	vcpu->arch.cr0 = sregs->cr0;
7306 
7307 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
7308 	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
7309 	if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
7310 		kvm_update_cpuid(vcpu);
7311 
7312 	idx = srcu_read_lock(&vcpu->kvm->srcu);
7313 	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
7314 		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
7315 		mmu_reset_needed = 1;
7316 	}
7317 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
7318 
7319 	if (mmu_reset_needed)
7320 		kvm_mmu_reset_context(vcpu);
7321 
7322 	max_bits = KVM_NR_INTERRUPTS;
7323 	pending_vec = find_first_bit(
7324 		(const unsigned long *)sregs->interrupt_bitmap, max_bits);
7325 	if (pending_vec < max_bits) {
7326 		kvm_queue_interrupt(vcpu, pending_vec, false);
7327 		pr_debug("Set back pending irq %d\n", pending_vec);
7328 	}
7329 
7330 	kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
7331 	kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
7332 	kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
7333 	kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
7334 	kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
7335 	kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
7336 
7337 	kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
7338 	kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
7339 
7340 	update_cr8_intercept(vcpu);
7341 
7342 	/* Older userspace won't unhalt the vcpu on reset. */
7343 	if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
7344 	    sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
7345 	    !is_protmode(vcpu))
7346 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
7347 
7348 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7349 
7350 	return 0;
7351 }
7352 
7353 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
7354 					struct kvm_guest_debug *dbg)
7355 {
7356 	unsigned long rflags;
7357 	int i, r;
7358 
7359 	if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
7360 		r = -EBUSY;
7361 		if (vcpu->arch.exception.pending)
7362 			goto out;
7363 		if (dbg->control & KVM_GUESTDBG_INJECT_DB)
7364 			kvm_queue_exception(vcpu, DB_VECTOR);
7365 		else
7366 			kvm_queue_exception(vcpu, BP_VECTOR);
7367 	}
7368 
7369 	/*
7370 	 * Read rflags as long as potentially injected trace flags are still
7371 	 * filtered out.
7372 	 */
7373 	rflags = kvm_get_rflags(vcpu);
7374 
7375 	vcpu->guest_debug = dbg->control;
7376 	if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
7377 		vcpu->guest_debug = 0;
7378 
7379 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
7380 		for (i = 0; i < KVM_NR_DB_REGS; ++i)
7381 			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
7382 		vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
7383 	} else {
7384 		for (i = 0; i < KVM_NR_DB_REGS; i++)
7385 			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
7386 	}
7387 	kvm_update_dr7(vcpu);
7388 
7389 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7390 		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
7391 			get_segment_base(vcpu, VCPU_SREG_CS);
7392 
7393 	/*
7394 	 * Trigger an rflags update that will inject or remove the trace
7395 	 * flags.
7396 	 */
7397 	kvm_set_rflags(vcpu, rflags);
7398 
7399 	kvm_x86_ops->update_bp_intercept(vcpu);
7400 
7401 	r = 0;
7402 
7403 out:
7404 
7405 	return r;
7406 }
7407 
7408 /*
7409  * Translate a guest virtual address to a guest physical address.
7410  */
7411 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
7412 				    struct kvm_translation *tr)
7413 {
7414 	unsigned long vaddr = tr->linear_address;
7415 	gpa_t gpa;
7416 	int idx;
7417 
7418 	idx = srcu_read_lock(&vcpu->kvm->srcu);
7419 	gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
7420 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
7421 	tr->physical_address = gpa;
7422 	tr->valid = gpa != UNMAPPED_GVA;
7423 	tr->writeable = 1;
7424 	tr->usermode = 0;
7425 
7426 	return 0;
7427 }
7428 
7429 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
7430 {
7431 	struct fxregs_state *fxsave =
7432 			&vcpu->arch.guest_fpu.state.fxsave;
7433 
7434 	memcpy(fpu->fpr, fxsave->st_space, 128);
7435 	fpu->fcw = fxsave->cwd;
7436 	fpu->fsw = fxsave->swd;
7437 	fpu->ftwx = fxsave->twd;
7438 	fpu->last_opcode = fxsave->fop;
7439 	fpu->last_ip = fxsave->rip;
7440 	fpu->last_dp = fxsave->rdp;
7441 	memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
7442 
7443 	return 0;
7444 }
7445 
7446 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
7447 {
7448 	struct fxregs_state *fxsave =
7449 			&vcpu->arch.guest_fpu.state.fxsave;
7450 
7451 	memcpy(fxsave->st_space, fpu->fpr, 128);
7452 	fxsave->cwd = fpu->fcw;
7453 	fxsave->swd = fpu->fsw;
7454 	fxsave->twd = fpu->ftwx;
7455 	fxsave->fop = fpu->last_opcode;
7456 	fxsave->rip = fpu->last_ip;
7457 	fxsave->rdp = fpu->last_dp;
7458 	memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
7459 
7460 	return 0;
7461 }
7462 
7463 static void fx_init(struct kvm_vcpu *vcpu)
7464 {
7465 	fpstate_init(&vcpu->arch.guest_fpu.state);
7466 	if (boot_cpu_has(X86_FEATURE_XSAVES))
7467 		vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
7468 			host_xcr0 | XSTATE_COMPACTION_ENABLED;
7469 
7470 	/*
7471 	 * Ensure guest xcr0 is valid for loading
7472 	 */
7473 	vcpu->arch.xcr0 = XFEATURE_MASK_FP;
7474 
7475 	vcpu->arch.cr0 |= X86_CR0_ET;
7476 }
7477 
7478 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
7479 {
7480 	if (vcpu->guest_fpu_loaded)
7481 		return;
7482 
7483 	/*
7484 	 * Restore all possible states in the guest,
7485 	 * and assume host would use all available bits.
7486 	 * Guest xcr0 would be loaded later.
7487 	 */
7488 	vcpu->guest_fpu_loaded = 1;
7489 	__kernel_fpu_begin();
7490 	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
7491 	trace_kvm_fpu(1);
7492 }
7493 
7494 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7495 {
7496 	if (!vcpu->guest_fpu_loaded)
7497 		return;
7498 
7499 	vcpu->guest_fpu_loaded = 0;
7500 	copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
7501 	__kernel_fpu_end();
7502 	++vcpu->stat.fpu_reload;
7503 	trace_kvm_fpu(0);
7504 }
7505 
7506 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
7507 {
7508 	void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
7509 
7510 	kvmclock_reset(vcpu);
7511 
7512 	kvm_x86_ops->vcpu_free(vcpu);
7513 	free_cpumask_var(wbinvd_dirty_mask);
7514 }
7515 
7516 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
7517 						unsigned int id)
7518 {
7519 	struct kvm_vcpu *vcpu;
7520 
7521 	if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
7522 		printk_once(KERN_WARNING
7523 		"kvm: SMP vm created on host with unstable TSC; "
7524 		"guest TSC will not be reliable\n");
7525 
7526 	vcpu = kvm_x86_ops->vcpu_create(kvm, id);
7527 
7528 	return vcpu;
7529 }
7530 
7531 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
7532 {
7533 	int r;
7534 
7535 	kvm_vcpu_mtrr_init(vcpu);
7536 	r = vcpu_load(vcpu);
7537 	if (r)
7538 		return r;
7539 	kvm_vcpu_reset(vcpu, false);
7540 	kvm_mmu_setup(vcpu);
7541 	vcpu_put(vcpu);
7542 	return r;
7543 }
7544 
7545 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
7546 {
7547 	struct msr_data msr;
7548 	struct kvm *kvm = vcpu->kvm;
7549 
7550 	if (vcpu_load(vcpu))
7551 		return;
7552 	msr.data = 0x0;
7553 	msr.index = MSR_IA32_TSC;
7554 	msr.host_initiated = true;
7555 	kvm_write_tsc(vcpu, &msr);
7556 	vcpu_put(vcpu);
7557 
7558 	if (!kvmclock_periodic_sync)
7559 		return;
7560 
7561 	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
7562 					KVMCLOCK_SYNC_PERIOD);
7563 }
7564 
7565 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
7566 {
7567 	int r;
7568 	vcpu->arch.apf.msr_val = 0;
7569 
7570 	r = vcpu_load(vcpu);
7571 	BUG_ON(r);
7572 	kvm_mmu_unload(vcpu);
7573 	vcpu_put(vcpu);
7574 
7575 	kvm_x86_ops->vcpu_free(vcpu);
7576 }
7577 
7578 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
7579 {
7580 	vcpu->arch.hflags = 0;
7581 
7582 	vcpu->arch.smi_pending = 0;
7583 	atomic_set(&vcpu->arch.nmi_queued, 0);
7584 	vcpu->arch.nmi_pending = 0;
7585 	vcpu->arch.nmi_injected = false;
7586 	kvm_clear_interrupt_queue(vcpu);
7587 	kvm_clear_exception_queue(vcpu);
7588 
7589 	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
7590 	kvm_update_dr0123(vcpu);
7591 	vcpu->arch.dr6 = DR6_INIT;
7592 	kvm_update_dr6(vcpu);
7593 	vcpu->arch.dr7 = DR7_FIXED_1;
7594 	kvm_update_dr7(vcpu);
7595 
7596 	vcpu->arch.cr2 = 0;
7597 
7598 	kvm_make_request(KVM_REQ_EVENT, vcpu);
7599 	vcpu->arch.apf.msr_val = 0;
7600 	vcpu->arch.st.msr_val = 0;
7601 
7602 	kvmclock_reset(vcpu);
7603 
7604 	kvm_clear_async_pf_completion_queue(vcpu);
7605 	kvm_async_pf_hash_reset(vcpu);
7606 	vcpu->arch.apf.halted = false;
7607 
7608 	if (!init_event) {
7609 		kvm_pmu_reset(vcpu);
7610 		vcpu->arch.smbase = 0x30000;
7611 	}
7612 
7613 	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
7614 	vcpu->arch.regs_avail = ~0;
7615 	vcpu->arch.regs_dirty = ~0;
7616 
7617 	kvm_x86_ops->vcpu_reset(vcpu, init_event);
7618 }
7619 
7620 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
7621 {
7622 	struct kvm_segment cs;
7623 
7624 	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
7625 	cs.selector = vector << 8;
7626 	cs.base = vector << 12;
7627 	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
7628 	kvm_rip_write(vcpu, 0);
7629 }
7630 
7631 int kvm_arch_hardware_enable(void)
7632 {
7633 	struct kvm *kvm;
7634 	struct kvm_vcpu *vcpu;
7635 	int i;
7636 	int ret;
7637 	u64 local_tsc;
7638 	u64 max_tsc = 0;
7639 	bool stable, backwards_tsc = false;
7640 
7641 	kvm_shared_msr_cpu_online();
7642 	ret = kvm_x86_ops->hardware_enable();
7643 	if (ret != 0)
7644 		return ret;
7645 
7646 	local_tsc = rdtsc();
7647 	stable = !check_tsc_unstable();
7648 	list_for_each_entry(kvm, &vm_list, vm_list) {
7649 		kvm_for_each_vcpu(i, vcpu, kvm) {
7650 			if (!stable && vcpu->cpu == smp_processor_id())
7651 				kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
7652 			if (stable && vcpu->arch.last_host_tsc > local_tsc) {
7653 				backwards_tsc = true;
7654 				if (vcpu->arch.last_host_tsc > max_tsc)
7655 					max_tsc = vcpu->arch.last_host_tsc;
7656 			}
7657 		}
7658 	}
7659 
7660 	/*
7661 	 * Sometimes, even reliable TSCs go backwards.  This happens on
7662 	 * platforms that reset TSC during suspend or hibernate actions, but
7663 	 * maintain synchronization.  We must compensate.  Fortunately, we can
7664 	 * detect that condition here, which happens early in CPU bringup,
7665 	 * before any KVM threads can be running.  Unfortunately, we can't
7666 	 * bring the TSCs fully up to date with real time, as we aren't yet far
7667 	 * enough into CPU bringup that we know how much real time has actually
7668 	 * elapsed; our helper function, ktime_get_boot_ns() will be using boot
7669 	 * variables that haven't been updated yet.
7670 	 *
7671 	 * So we simply find the maximum observed TSC above, then record the
7672 	 * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
7673 	 * the adjustment will be applied.  Note that we accumulate
7674 	 * adjustments, in case multiple suspend cycles happen before some VCPU
7675 	 * gets a chance to run again.  In the event that no KVM threads get a
7676 	 * chance to run, we will miss the entire elapsed period, as we'll have
7677 	 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
7678 	 * loose cycle time.  This isn't too big a deal, since the loss will be
7679 	 * uniform across all VCPUs (not to mention the scenario is extremely
7680 	 * unlikely). It is possible that a second hibernate recovery happens
7681 	 * much faster than a first, causing the observed TSC here to be
7682 	 * smaller; this would require additional padding adjustment, which is
7683 	 * why we set last_host_tsc to the local tsc observed here.
7684 	 *
7685 	 * N.B. - this code below runs only on platforms with reliable TSC,
7686 	 * as that is the only way backwards_tsc is set above.  Also note
7687 	 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
7688 	 * have the same delta_cyc adjustment applied if backwards_tsc
7689 	 * is detected.  Note further, this adjustment is only done once,
7690 	 * as we reset last_host_tsc on all VCPUs to stop this from being
7691 	 * called multiple times (one for each physical CPU bringup).
7692 	 *
7693 	 * Platforms with unreliable TSCs don't have to deal with this, they
7694 	 * will be compensated by the logic in vcpu_load, which sets the TSC to
7695 	 * catchup mode.  This will catchup all VCPUs to real time, but cannot
7696 	 * guarantee that they stay in perfect synchronization.
7697 	 */
7698 	if (backwards_tsc) {
7699 		u64 delta_cyc = max_tsc - local_tsc;
7700 		backwards_tsc_observed = true;
7701 		list_for_each_entry(kvm, &vm_list, vm_list) {
7702 			kvm_for_each_vcpu(i, vcpu, kvm) {
7703 				vcpu->arch.tsc_offset_adjustment += delta_cyc;
7704 				vcpu->arch.last_host_tsc = local_tsc;
7705 				kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
7706 			}
7707 
7708 			/*
7709 			 * We have to disable TSC offset matching.. if you were
7710 			 * booting a VM while issuing an S4 host suspend....
7711 			 * you may have some problem.  Solving this issue is
7712 			 * left as an exercise to the reader.
7713 			 */
7714 			kvm->arch.last_tsc_nsec = 0;
7715 			kvm->arch.last_tsc_write = 0;
7716 		}
7717 
7718 	}
7719 	return 0;
7720 }
7721 
7722 void kvm_arch_hardware_disable(void)
7723 {
7724 	kvm_x86_ops->hardware_disable();
7725 	drop_user_return_notifiers();
7726 }
7727 
7728 int kvm_arch_hardware_setup(void)
7729 {
7730 	int r;
7731 
7732 	r = kvm_x86_ops->hardware_setup();
7733 	if (r != 0)
7734 		return r;
7735 
7736 	if (kvm_has_tsc_control) {
7737 		/*
7738 		 * Make sure the user can only configure tsc_khz values that
7739 		 * fit into a signed integer.
7740 		 * A min value is not calculated needed because it will always
7741 		 * be 1 on all machines.
7742 		 */
7743 		u64 max = min(0x7fffffffULL,
7744 			      __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
7745 		kvm_max_guest_tsc_khz = max;
7746 
7747 		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
7748 	}
7749 
7750 	kvm_init_msr_list();
7751 	return 0;
7752 }
7753 
7754 void kvm_arch_hardware_unsetup(void)
7755 {
7756 	kvm_x86_ops->hardware_unsetup();
7757 }
7758 
7759 void kvm_arch_check_processor_compat(void *rtn)
7760 {
7761 	kvm_x86_ops->check_processor_compatibility(rtn);
7762 }
7763 
7764 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
7765 {
7766 	return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
7767 }
7768 EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);
7769 
7770 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
7771 {
7772 	return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
7773 }
7774 
7775 struct static_key kvm_no_apic_vcpu __read_mostly;
7776 EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
7777 
7778 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
7779 {
7780 	struct page *page;
7781 	struct kvm *kvm;
7782 	int r;
7783 
7784 	BUG_ON(vcpu->kvm == NULL);
7785 	kvm = vcpu->kvm;
7786 
7787 	vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv();
7788 	vcpu->arch.pv.pv_unhalted = false;
7789 	vcpu->arch.emulate_ctxt.ops = &emulate_ops;
7790 	if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu))
7791 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
7792 	else
7793 		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
7794 
7795 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
7796 	if (!page) {
7797 		r = -ENOMEM;
7798 		goto fail;
7799 	}
7800 	vcpu->arch.pio_data = page_address(page);
7801 
7802 	kvm_set_tsc_khz(vcpu, max_tsc_khz);
7803 
7804 	r = kvm_mmu_create(vcpu);
7805 	if (r < 0)
7806 		goto fail_free_pio_data;
7807 
7808 	if (irqchip_in_kernel(kvm)) {
7809 		r = kvm_create_lapic(vcpu);
7810 		if (r < 0)
7811 			goto fail_mmu_destroy;
7812 	} else
7813 		static_key_slow_inc(&kvm_no_apic_vcpu);
7814 
7815 	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
7816 				       GFP_KERNEL);
7817 	if (!vcpu->arch.mce_banks) {
7818 		r = -ENOMEM;
7819 		goto fail_free_lapic;
7820 	}
7821 	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
7822 
7823 	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) {
7824 		r = -ENOMEM;
7825 		goto fail_free_mce_banks;
7826 	}
7827 
7828 	fx_init(vcpu);
7829 
7830 	vcpu->arch.ia32_tsc_adjust_msr = 0x0;
7831 	vcpu->arch.pv_time_enabled = false;
7832 
7833 	vcpu->arch.guest_supported_xcr0 = 0;
7834 	vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
7835 
7836 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
7837 
7838 	vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
7839 
7840 	kvm_async_pf_hash_reset(vcpu);
7841 	kvm_pmu_init(vcpu);
7842 
7843 	vcpu->arch.pending_external_vector = -1;
7844 
7845 	kvm_hv_vcpu_init(vcpu);
7846 
7847 	return 0;
7848 
7849 fail_free_mce_banks:
7850 	kfree(vcpu->arch.mce_banks);
7851 fail_free_lapic:
7852 	kvm_free_lapic(vcpu);
7853 fail_mmu_destroy:
7854 	kvm_mmu_destroy(vcpu);
7855 fail_free_pio_data:
7856 	free_page((unsigned long)vcpu->arch.pio_data);
7857 fail:
7858 	return r;
7859 }
7860 
7861 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
7862 {
7863 	int idx;
7864 
7865 	kvm_hv_vcpu_uninit(vcpu);
7866 	kvm_pmu_destroy(vcpu);
7867 	kfree(vcpu->arch.mce_banks);
7868 	kvm_free_lapic(vcpu);
7869 	idx = srcu_read_lock(&vcpu->kvm->srcu);
7870 	kvm_mmu_destroy(vcpu);
7871 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
7872 	free_page((unsigned long)vcpu->arch.pio_data);
7873 	if (!lapic_in_kernel(vcpu))
7874 		static_key_slow_dec(&kvm_no_apic_vcpu);
7875 }
7876 
7877 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
7878 {
7879 	kvm_x86_ops->sched_in(vcpu, cpu);
7880 }
7881 
7882 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
7883 {
7884 	if (type)
7885 		return -EINVAL;
7886 
7887 	INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
7888 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
7889 	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
7890 	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
7891 	atomic_set(&kvm->arch.noncoherent_dma_count, 0);
7892 
7893 	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
7894 	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
7895 	/* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
7896 	set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
7897 		&kvm->arch.irq_sources_bitmap);
7898 
7899 	raw_spin_lock_init(&kvm->arch.tsc_write_lock);
7900 	mutex_init(&kvm->arch.apic_map_lock);
7901 	mutex_init(&kvm->arch.hyperv.hv_lock);
7902 	spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
7903 
7904 	kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
7905 	pvclock_update_vm_gtod_copy(kvm);
7906 
7907 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
7908 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
7909 
7910 	kvm_page_track_init(kvm);
7911 	kvm_mmu_init_vm(kvm);
7912 
7913 	if (kvm_x86_ops->vm_init)
7914 		return kvm_x86_ops->vm_init(kvm);
7915 
7916 	return 0;
7917 }
7918 
7919 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
7920 {
7921 	int r;
7922 	r = vcpu_load(vcpu);
7923 	BUG_ON(r);
7924 	kvm_mmu_unload(vcpu);
7925 	vcpu_put(vcpu);
7926 }
7927 
7928 static void kvm_free_vcpus(struct kvm *kvm)
7929 {
7930 	unsigned int i;
7931 	struct kvm_vcpu *vcpu;
7932 
7933 	/*
7934 	 * Unpin any mmu pages first.
7935 	 */
7936 	kvm_for_each_vcpu(i, vcpu, kvm) {
7937 		kvm_clear_async_pf_completion_queue(vcpu);
7938 		kvm_unload_vcpu_mmu(vcpu);
7939 	}
7940 	kvm_for_each_vcpu(i, vcpu, kvm)
7941 		kvm_arch_vcpu_free(vcpu);
7942 
7943 	mutex_lock(&kvm->lock);
7944 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
7945 		kvm->vcpus[i] = NULL;
7946 
7947 	atomic_set(&kvm->online_vcpus, 0);
7948 	mutex_unlock(&kvm->lock);
7949 }
7950 
7951 void kvm_arch_sync_events(struct kvm *kvm)
7952 {
7953 	cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
7954 	cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
7955 	kvm_free_all_assigned_devices(kvm);
7956 	kvm_free_pit(kvm);
7957 }
7958 
7959 int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7960 {
7961 	int i, r;
7962 	unsigned long hva;
7963 	struct kvm_memslots *slots = kvm_memslots(kvm);
7964 	struct kvm_memory_slot *slot, old;
7965 
7966 	/* Called with kvm->slots_lock held.  */
7967 	if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
7968 		return -EINVAL;
7969 
7970 	slot = id_to_memslot(slots, id);
7971 	if (size) {
7972 		if (slot->npages)
7973 			return -EEXIST;
7974 
7975 		/*
7976 		 * MAP_SHARED to prevent internal slot pages from being moved
7977 		 * by fork()/COW.
7978 		 */
7979 		hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
7980 			      MAP_SHARED | MAP_ANONYMOUS, 0);
7981 		if (IS_ERR((void *)hva))
7982 			return PTR_ERR((void *)hva);
7983 	} else {
7984 		if (!slot->npages)
7985 			return 0;
7986 
7987 		hva = 0;
7988 	}
7989 
7990 	old = *slot;
7991 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
7992 		struct kvm_userspace_memory_region m;
7993 
7994 		m.slot = id | (i << 16);
7995 		m.flags = 0;
7996 		m.guest_phys_addr = gpa;
7997 		m.userspace_addr = hva;
7998 		m.memory_size = size;
7999 		r = __kvm_set_memory_region(kvm, &m);
8000 		if (r < 0)
8001 			return r;
8002 	}
8003 
8004 	if (!size) {
8005 		r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
8006 		WARN_ON(r < 0);
8007 	}
8008 
8009 	return 0;
8010 }
8011 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
8012 
8013 int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
8014 {
8015 	int r;
8016 
8017 	mutex_lock(&kvm->slots_lock);
8018 	r = __x86_set_memory_region(kvm, id, gpa, size);
8019 	mutex_unlock(&kvm->slots_lock);
8020 
8021 	return r;
8022 }
8023 EXPORT_SYMBOL_GPL(x86_set_memory_region);
8024 
8025 void kvm_arch_destroy_vm(struct kvm *kvm)
8026 {
8027 	if (current->mm == kvm->mm) {
8028 		/*
8029 		 * Free memory regions allocated on behalf of userspace,
8030 		 * unless the the memory map has changed due to process exit
8031 		 * or fd copying.
8032 		 */
8033 		x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
8034 		x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
8035 		x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
8036 	}
8037 	if (kvm_x86_ops->vm_destroy)
8038 		kvm_x86_ops->vm_destroy(kvm);
8039 	kvm_iommu_unmap_guest(kvm);
8040 	kfree(kvm->arch.vpic);
8041 	kfree(kvm->arch.vioapic);
8042 	kvm_free_vcpus(kvm);
8043 	kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
8044 	kvm_mmu_uninit_vm(kvm);
8045 }
8046 
8047 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
8048 			   struct kvm_memory_slot *dont)
8049 {
8050 	int i;
8051 
8052 	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
8053 		if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
8054 			kvfree(free->arch.rmap[i]);
8055 			free->arch.rmap[i] = NULL;
8056 		}
8057 		if (i == 0)
8058 			continue;
8059 
8060 		if (!dont || free->arch.lpage_info[i - 1] !=
8061 			     dont->arch.lpage_info[i - 1]) {
8062 			kvfree(free->arch.lpage_info[i - 1]);
8063 			free->arch.lpage_info[i - 1] = NULL;
8064 		}
8065 	}
8066 
8067 	kvm_page_track_free_memslot(free, dont);
8068 }
8069 
8070 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
8071 			    unsigned long npages)
8072 {
8073 	int i;
8074 
8075 	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
8076 		struct kvm_lpage_info *linfo;
8077 		unsigned long ugfn;
8078 		int lpages;
8079 		int level = i + 1;
8080 
8081 		lpages = gfn_to_index(slot->base_gfn + npages - 1,
8082 				      slot->base_gfn, level) + 1;
8083 
8084 		slot->arch.rmap[i] =
8085 			kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
8086 		if (!slot->arch.rmap[i])
8087 			goto out_free;
8088 		if (i == 0)
8089 			continue;
8090 
8091 		linfo = kvm_kvzalloc(lpages * sizeof(*linfo));
8092 		if (!linfo)
8093 			goto out_free;
8094 
8095 		slot->arch.lpage_info[i - 1] = linfo;
8096 
8097 		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
8098 			linfo[0].disallow_lpage = 1;
8099 		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
8100 			linfo[lpages - 1].disallow_lpage = 1;
8101 		ugfn = slot->userspace_addr >> PAGE_SHIFT;
8102 		/*
8103 		 * If the gfn and userspace address are not aligned wrt each
8104 		 * other, or if explicitly asked to, disable large page
8105 		 * support for this slot
8106 		 */
8107 		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
8108 		    !kvm_largepages_enabled()) {
8109 			unsigned long j;
8110 
8111 			for (j = 0; j < lpages; ++j)
8112 				linfo[j].disallow_lpage = 1;
8113 		}
8114 	}
8115 
8116 	if (kvm_page_track_create_memslot(slot, npages))
8117 		goto out_free;
8118 
8119 	return 0;
8120 
8121 out_free:
8122 	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
8123 		kvfree(slot->arch.rmap[i]);
8124 		slot->arch.rmap[i] = NULL;
8125 		if (i == 0)
8126 			continue;
8127 
8128 		kvfree(slot->arch.lpage_info[i - 1]);
8129 		slot->arch.lpage_info[i - 1] = NULL;
8130 	}
8131 	return -ENOMEM;
8132 }
8133 
8134 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
8135 {
8136 	/*
8137 	 * memslots->generation has been incremented.
8138 	 * mmio generation may have reached its maximum value.
8139 	 */
8140 	kvm_mmu_invalidate_mmio_sptes(kvm, slots);
8141 }
8142 
8143 int kvm_arch_prepare_memory_region(struct kvm *kvm,
8144 				struct kvm_memory_slot *memslot,
8145 				const struct kvm_userspace_memory_region *mem,
8146 				enum kvm_mr_change change)
8147 {
8148 	return 0;
8149 }
8150 
8151 static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
8152 				     struct kvm_memory_slot *new)
8153 {
8154 	/* Still write protect RO slot */
8155 	if (new->flags & KVM_MEM_READONLY) {
8156 		kvm_mmu_slot_remove_write_access(kvm, new);
8157 		return;
8158 	}
8159 
8160 	/*
8161 	 * Call kvm_x86_ops dirty logging hooks when they are valid.
8162 	 *
8163 	 * kvm_x86_ops->slot_disable_log_dirty is called when:
8164 	 *
8165 	 *  - KVM_MR_CREATE with dirty logging is disabled
8166 	 *  - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
8167 	 *
8168 	 * The reason is, in case of PML, we need to set D-bit for any slots
8169 	 * with dirty logging disabled in order to eliminate unnecessary GPA
8170 	 * logging in PML buffer (and potential PML buffer full VMEXT). This
8171 	 * guarantees leaving PML enabled during guest's lifetime won't have
8172 	 * any additonal overhead from PML when guest is running with dirty
8173 	 * logging disabled for memory slots.
8174 	 *
8175 	 * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
8176 	 * to dirty logging mode.
8177 	 *
8178 	 * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
8179 	 *
8180 	 * In case of write protect:
8181 	 *
8182 	 * Write protect all pages for dirty logging.
8183 	 *
8184 	 * All the sptes including the large sptes which point to this
8185 	 * slot are set to readonly. We can not create any new large
8186 	 * spte on this slot until the end of the logging.
8187 	 *
8188 	 * See the comments in fast_page_fault().
8189 	 */
8190 	if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
8191 		if (kvm_x86_ops->slot_enable_log_dirty)
8192 			kvm_x86_ops->slot_enable_log_dirty(kvm, new);
8193 		else
8194 			kvm_mmu_slot_remove_write_access(kvm, new);
8195 	} else {
8196 		if (kvm_x86_ops->slot_disable_log_dirty)
8197 			kvm_x86_ops->slot_disable_log_dirty(kvm, new);
8198 	}
8199 }
8200 
8201 void kvm_arch_commit_memory_region(struct kvm *kvm,
8202 				const struct kvm_userspace_memory_region *mem,
8203 				const struct kvm_memory_slot *old,
8204 				const struct kvm_memory_slot *new,
8205 				enum kvm_mr_change change)
8206 {
8207 	int nr_mmu_pages = 0;
8208 
8209 	if (!kvm->arch.n_requested_mmu_pages)
8210 		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
8211 
8212 	if (nr_mmu_pages)
8213 		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
8214 
8215 	/*
8216 	 * Dirty logging tracks sptes in 4k granularity, meaning that large
8217 	 * sptes have to be split.  If live migration is successful, the guest
8218 	 * in the source machine will be destroyed and large sptes will be
8219 	 * created in the destination. However, if the guest continues to run
8220 	 * in the source machine (for example if live migration fails), small
8221 	 * sptes will remain around and cause bad performance.
8222 	 *
8223 	 * Scan sptes if dirty logging has been stopped, dropping those
8224 	 * which can be collapsed into a single large-page spte.  Later
8225 	 * page faults will create the large-page sptes.
8226 	 */
8227 	if ((change != KVM_MR_DELETE) &&
8228 		(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
8229 		!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
8230 		kvm_mmu_zap_collapsible_sptes(kvm, new);
8231 
8232 	/*
8233 	 * Set up write protection and/or dirty logging for the new slot.
8234 	 *
8235 	 * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
8236 	 * been zapped so no dirty logging staff is needed for old slot. For
8237 	 * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
8238 	 * new and it's also covered when dealing with the new slot.
8239 	 *
8240 	 * FIXME: const-ify all uses of struct kvm_memory_slot.
8241 	 */
8242 	if (change != KVM_MR_DELETE)
8243 		kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
8244 }
8245 
8246 void kvm_arch_flush_shadow_all(struct kvm *kvm)
8247 {
8248 	kvm_mmu_invalidate_zap_all_pages(kvm);
8249 }
8250 
8251 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
8252 				   struct kvm_memory_slot *slot)
8253 {
8254 	kvm_page_track_flush_slot(kvm, slot);
8255 }
8256 
8257 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
8258 {
8259 	if (!list_empty_careful(&vcpu->async_pf.done))
8260 		return true;
8261 
8262 	if (kvm_apic_has_events(vcpu))
8263 		return true;
8264 
8265 	if (vcpu->arch.pv.pv_unhalted)
8266 		return true;
8267 
8268 	if (atomic_read(&vcpu->arch.nmi_queued))
8269 		return true;
8270 
8271 	if (test_bit(KVM_REQ_SMI, &vcpu->requests))
8272 		return true;
8273 
8274 	if (kvm_arch_interrupt_allowed(vcpu) &&
8275 	    kvm_cpu_has_interrupt(vcpu))
8276 		return true;
8277 
8278 	if (kvm_hv_has_stimer_pending(vcpu))
8279 		return true;
8280 
8281 	return false;
8282 }
8283 
8284 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
8285 {
8286 	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
8287 		kvm_x86_ops->check_nested_events(vcpu, false);
8288 
8289 	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
8290 }
8291 
8292 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
8293 {
8294 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
8295 }
8296 
8297 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
8298 {
8299 	return kvm_x86_ops->interrupt_allowed(vcpu);
8300 }
8301 
8302 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
8303 {
8304 	if (is_64_bit_mode(vcpu))
8305 		return kvm_rip_read(vcpu);
8306 	return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
8307 		     kvm_rip_read(vcpu));
8308 }
8309 EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
8310 
8311 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
8312 {
8313 	return kvm_get_linear_rip(vcpu) == linear_rip;
8314 }
8315 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
8316 
8317 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
8318 {
8319 	unsigned long rflags;
8320 
8321 	rflags = kvm_x86_ops->get_rflags(vcpu);
8322 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
8323 		rflags &= ~X86_EFLAGS_TF;
8324 	return rflags;
8325 }
8326 EXPORT_SYMBOL_GPL(kvm_get_rflags);
8327 
8328 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
8329 {
8330 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
8331 	    kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
8332 		rflags |= X86_EFLAGS_TF;
8333 	kvm_x86_ops->set_rflags(vcpu, rflags);
8334 }
8335 
8336 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
8337 {
8338 	__kvm_set_rflags(vcpu, rflags);
8339 	kvm_make_request(KVM_REQ_EVENT, vcpu);
8340 }
8341 EXPORT_SYMBOL_GPL(kvm_set_rflags);
8342 
8343 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
8344 {
8345 	int r;
8346 
8347 	if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
8348 	      work->wakeup_all)
8349 		return;
8350 
8351 	r = kvm_mmu_reload(vcpu);
8352 	if (unlikely(r))
8353 		return;
8354 
8355 	if (!vcpu->arch.mmu.direct_map &&
8356 	      work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
8357 		return;
8358 
8359 	vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
8360 }
8361 
8362 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
8363 {
8364 	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
8365 }
8366 
8367 static inline u32 kvm_async_pf_next_probe(u32 key)
8368 {
8369 	return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
8370 }
8371 
8372 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8373 {
8374 	u32 key = kvm_async_pf_hash_fn(gfn);
8375 
8376 	while (vcpu->arch.apf.gfns[key] != ~0)
8377 		key = kvm_async_pf_next_probe(key);
8378 
8379 	vcpu->arch.apf.gfns[key] = gfn;
8380 }
8381 
8382 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
8383 {
8384 	int i;
8385 	u32 key = kvm_async_pf_hash_fn(gfn);
8386 
8387 	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
8388 		     (vcpu->arch.apf.gfns[key] != gfn &&
8389 		      vcpu->arch.apf.gfns[key] != ~0); i++)
8390 		key = kvm_async_pf_next_probe(key);
8391 
8392 	return key;
8393 }
8394 
8395 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8396 {
8397 	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
8398 }
8399 
8400 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8401 {
8402 	u32 i, j, k;
8403 
8404 	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
8405 	while (true) {
8406 		vcpu->arch.apf.gfns[i] = ~0;
8407 		do {
8408 			j = kvm_async_pf_next_probe(j);
8409 			if (vcpu->arch.apf.gfns[j] == ~0)
8410 				return;
8411 			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
8412 			/*
8413 			 * k lies cyclically in ]i,j]
8414 			 * |    i.k.j |
8415 			 * |....j i.k.| or  |.k..j i...|
8416 			 */
8417 		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
8418 		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
8419 		i = j;
8420 	}
8421 }
8422 
8423 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
8424 {
8425 
8426 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
8427 				      sizeof(val));
8428 }
8429 
8430 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
8431 				     struct kvm_async_pf *work)
8432 {
8433 	struct x86_exception fault;
8434 
8435 	trace_kvm_async_pf_not_present(work->arch.token, work->gva);
8436 	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
8437 
8438 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
8439 	    (vcpu->arch.apf.send_user_only &&
8440 	     kvm_x86_ops->get_cpl(vcpu) == 0))
8441 		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
8442 	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
8443 		fault.vector = PF_VECTOR;
8444 		fault.error_code_valid = true;
8445 		fault.error_code = 0;
8446 		fault.nested_page_fault = false;
8447 		fault.address = work->arch.token;
8448 		kvm_inject_page_fault(vcpu, &fault);
8449 	}
8450 }
8451 
8452 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
8453 				 struct kvm_async_pf *work)
8454 {
8455 	struct x86_exception fault;
8456 
8457 	trace_kvm_async_pf_ready(work->arch.token, work->gva);
8458 	if (work->wakeup_all)
8459 		work->arch.token = ~0; /* broadcast wakeup */
8460 	else
8461 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
8462 
8463 	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
8464 	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
8465 		fault.vector = PF_VECTOR;
8466 		fault.error_code_valid = true;
8467 		fault.error_code = 0;
8468 		fault.nested_page_fault = false;
8469 		fault.address = work->arch.token;
8470 		kvm_inject_page_fault(vcpu, &fault);
8471 	}
8472 	vcpu->arch.apf.halted = false;
8473 	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
8474 }
8475 
8476 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
8477 {
8478 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
8479 		return true;
8480 	else
8481 		return !kvm_event_needs_reinjection(vcpu) &&
8482 			kvm_x86_ops->interrupt_allowed(vcpu);
8483 }
8484 
8485 void kvm_arch_start_assignment(struct kvm *kvm)
8486 {
8487 	atomic_inc(&kvm->arch.assigned_device_count);
8488 }
8489 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
8490 
8491 void kvm_arch_end_assignment(struct kvm *kvm)
8492 {
8493 	atomic_dec(&kvm->arch.assigned_device_count);
8494 }
8495 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
8496 
8497 bool kvm_arch_has_assigned_device(struct kvm *kvm)
8498 {
8499 	return atomic_read(&kvm->arch.assigned_device_count);
8500 }
8501 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
8502 
8503 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
8504 {
8505 	atomic_inc(&kvm->arch.noncoherent_dma_count);
8506 }
8507 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
8508 
8509 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
8510 {
8511 	atomic_dec(&kvm->arch.noncoherent_dma_count);
8512 }
8513 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
8514 
8515 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
8516 {
8517 	return atomic_read(&kvm->arch.noncoherent_dma_count);
8518 }
8519 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
8520 
8521 bool kvm_arch_has_irq_bypass(void)
8522 {
8523 	return kvm_x86_ops->update_pi_irte != NULL;
8524 }
8525 
8526 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
8527 				      struct irq_bypass_producer *prod)
8528 {
8529 	struct kvm_kernel_irqfd *irqfd =
8530 		container_of(cons, struct kvm_kernel_irqfd, consumer);
8531 
8532 	irqfd->producer = prod;
8533 
8534 	return kvm_x86_ops->update_pi_irte(irqfd->kvm,
8535 					   prod->irq, irqfd->gsi, 1);
8536 }
8537 
8538 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
8539 				      struct irq_bypass_producer *prod)
8540 {
8541 	int ret;
8542 	struct kvm_kernel_irqfd *irqfd =
8543 		container_of(cons, struct kvm_kernel_irqfd, consumer);
8544 
8545 	WARN_ON(irqfd->producer != prod);
8546 	irqfd->producer = NULL;
8547 
8548 	/*
8549 	 * When producer of consumer is unregistered, we change back to
8550 	 * remapped mode, so we can re-use the current implementation
8551 	 * when the irq is masked/disabled or the consumer side (KVM
8552 	 * int this case doesn't want to receive the interrupts.
8553 	*/
8554 	ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
8555 	if (ret)
8556 		printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
8557 		       " fails: %d\n", irqfd->consumer.token, ret);
8558 }
8559 
8560 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
8561 				   uint32_t guest_irq, bool set)
8562 {
8563 	if (!kvm_x86_ops->update_pi_irte)
8564 		return -EINVAL;
8565 
8566 	return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
8567 }
8568 
8569 bool kvm_vector_hashing_enabled(void)
8570 {
8571 	return vector_hashing;
8572 }
8573 EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);
8574 
8575 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
8576 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
8577 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
8578 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
8579 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
8580 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
8581 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
8582 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
8583 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
8584 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
8585 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
8586 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
8587 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
8588 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
8589 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
8590 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
8591 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
8592 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
8593 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
8594