xref: /openbmc/linux/virt/kvm/kvm_main.c (revision 34fa67e7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * Copyright (C) 2006 Qumranet, Inc.
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  */
15 
16 #include <kvm/iodev.h>
17 
18 #include <linux/kvm_host.h>
19 #include <linux/kvm.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/percpu.h>
23 #include <linux/mm.h>
24 #include <linux/miscdevice.h>
25 #include <linux/vmalloc.h>
26 #include <linux/reboot.h>
27 #include <linux/debugfs.h>
28 #include <linux/highmem.h>
29 #include <linux/file.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/cpu.h>
32 #include <linux/sched/signal.h>
33 #include <linux/sched/mm.h>
34 #include <linux/sched/stat.h>
35 #include <linux/cpumask.h>
36 #include <linux/smp.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/profile.h>
39 #include <linux/kvm_para.h>
40 #include <linux/pagemap.h>
41 #include <linux/mman.h>
42 #include <linux/swap.h>
43 #include <linux/bitops.h>
44 #include <linux/spinlock.h>
45 #include <linux/compat.h>
46 #include <linux/srcu.h>
47 #include <linux/hugetlb.h>
48 #include <linux/slab.h>
49 #include <linux/sort.h>
50 #include <linux/bsearch.h>
51 #include <linux/io.h>
52 #include <linux/lockdep.h>
53 #include <linux/kthread.h>
54 #include <linux/suspend.h>
55 
56 #include <asm/processor.h>
57 #include <asm/ioctl.h>
58 #include <linux/uaccess.h>
59 
60 #include "coalesced_mmio.h"
61 #include "async_pf.h"
62 #include "kvm_mm.h"
63 #include "vfio.h"
64 
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/kvm.h>
67 
68 #include <linux/kvm_dirty_ring.h>
69 
70 /* Worst case buffer size needed for holding an integer. */
71 #define ITOA_MAX_LEN 12
72 
73 MODULE_AUTHOR("Qumranet");
74 MODULE_LICENSE("GPL");
75 
76 /* Architectures should define their poll value according to the halt latency */
77 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
78 module_param(halt_poll_ns, uint, 0644);
79 EXPORT_SYMBOL_GPL(halt_poll_ns);
80 
81 /* Default doubles per-vcpu halt_poll_ns. */
82 unsigned int halt_poll_ns_grow = 2;
83 module_param(halt_poll_ns_grow, uint, 0644);
84 EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
85 
86 /* The start value to grow halt_poll_ns from */
87 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
88 module_param(halt_poll_ns_grow_start, uint, 0644);
89 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
90 
91 /* Default resets per-vcpu halt_poll_ns . */
92 unsigned int halt_poll_ns_shrink;
93 module_param(halt_poll_ns_shrink, uint, 0644);
94 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
95 
96 /*
97  * Ordering of locks:
98  *
99  *	kvm->lock --> kvm->slots_lock --> kvm->irq_lock
100  */
101 
102 DEFINE_MUTEX(kvm_lock);
103 static DEFINE_RAW_SPINLOCK(kvm_count_lock);
104 LIST_HEAD(vm_list);
105 
106 static cpumask_var_t cpus_hardware_enabled;
107 static int kvm_usage_count;
108 static atomic_t hardware_enable_failed;
109 
110 static struct kmem_cache *kvm_vcpu_cache;
111 
112 static __read_mostly struct preempt_ops kvm_preempt_ops;
113 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
114 
115 struct dentry *kvm_debugfs_dir;
116 EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
117 
118 static const struct file_operations stat_fops_per_vm;
119 
120 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
121 			   unsigned long arg);
122 #ifdef CONFIG_KVM_COMPAT
123 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
124 				  unsigned long arg);
125 #define KVM_COMPAT(c)	.compat_ioctl	= (c)
126 #else
127 /*
128  * For architectures that don't implement a compat infrastructure,
129  * adopt a double line of defense:
130  * - Prevent a compat task from opening /dev/kvm
131  * - If the open has been done by a 64bit task, and the KVM fd
132  *   passed to a compat task, let the ioctls fail.
133  */
134 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
135 				unsigned long arg) { return -EINVAL; }
136 
137 static int kvm_no_compat_open(struct inode *inode, struct file *file)
138 {
139 	return is_compat_task() ? -ENODEV : 0;
140 }
141 #define KVM_COMPAT(c)	.compat_ioctl	= kvm_no_compat_ioctl,	\
142 			.open		= kvm_no_compat_open
143 #endif
144 static int hardware_enable_all(void);
145 static void hardware_disable_all(void);
146 
147 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
148 
149 __visible bool kvm_rebooting;
150 EXPORT_SYMBOL_GPL(kvm_rebooting);
151 
152 #define KVM_EVENT_CREATE_VM 0
153 #define KVM_EVENT_DESTROY_VM 1
154 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
155 static unsigned long long kvm_createvm_count;
156 static unsigned long long kvm_active_vms;
157 
158 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
159 
160 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
161 						   unsigned long start, unsigned long end)
162 {
163 }
164 
165 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
166 {
167 	/*
168 	 * The metadata used by is_zone_device_page() to determine whether or
169 	 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
170 	 * the device has been pinned, e.g. by get_user_pages().  WARN if the
171 	 * page_count() is zero to help detect bad usage of this helper.
172 	 */
173 	if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
174 		return false;
175 
176 	return is_zone_device_page(pfn_to_page(pfn));
177 }
178 
179 bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
180 {
181 	/*
182 	 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
183 	 * perspective they are "normal" pages, albeit with slightly different
184 	 * usage rules.
185 	 */
186 	if (pfn_valid(pfn))
187 		return PageReserved(pfn_to_page(pfn)) &&
188 		       !is_zero_pfn(pfn) &&
189 		       !kvm_is_zone_device_pfn(pfn);
190 
191 	return true;
192 }
193 
194 /*
195  * Switches to specified vcpu, until a matching vcpu_put()
196  */
197 void vcpu_load(struct kvm_vcpu *vcpu)
198 {
199 	int cpu = get_cpu();
200 
201 	__this_cpu_write(kvm_running_vcpu, vcpu);
202 	preempt_notifier_register(&vcpu->preempt_notifier);
203 	kvm_arch_vcpu_load(vcpu, cpu);
204 	put_cpu();
205 }
206 EXPORT_SYMBOL_GPL(vcpu_load);
207 
208 void vcpu_put(struct kvm_vcpu *vcpu)
209 {
210 	preempt_disable();
211 	kvm_arch_vcpu_put(vcpu);
212 	preempt_notifier_unregister(&vcpu->preempt_notifier);
213 	__this_cpu_write(kvm_running_vcpu, NULL);
214 	preempt_enable();
215 }
216 EXPORT_SYMBOL_GPL(vcpu_put);
217 
218 /* TODO: merge with kvm_arch_vcpu_should_kick */
219 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
220 {
221 	int mode = kvm_vcpu_exiting_guest_mode(vcpu);
222 
223 	/*
224 	 * We need to wait for the VCPU to reenable interrupts and get out of
225 	 * READING_SHADOW_PAGE_TABLES mode.
226 	 */
227 	if (req & KVM_REQUEST_WAIT)
228 		return mode != OUTSIDE_GUEST_MODE;
229 
230 	/*
231 	 * Need to kick a running VCPU, but otherwise there is nothing to do.
232 	 */
233 	return mode == IN_GUEST_MODE;
234 }
235 
236 static void ack_flush(void *_completed)
237 {
238 }
239 
240 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
241 {
242 	if (cpumask_empty(cpus))
243 		return false;
244 
245 	smp_call_function_many(cpus, ack_flush, NULL, wait);
246 	return true;
247 }
248 
249 static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu,
250 				  unsigned int req, struct cpumask *tmp,
251 				  int current_cpu)
252 {
253 	int cpu;
254 
255 	kvm_make_request(req, vcpu);
256 
257 	if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
258 		return;
259 
260 	/*
261 	 * Note, the vCPU could get migrated to a different pCPU at any point
262 	 * after kvm_request_needs_ipi(), which could result in sending an IPI
263 	 * to the previous pCPU.  But, that's OK because the purpose of the IPI
264 	 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
265 	 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
266 	 * after this point is also OK, as the requirement is only that KVM wait
267 	 * for vCPUs that were reading SPTEs _before_ any changes were
268 	 * finalized. See kvm_vcpu_kick() for more details on handling requests.
269 	 */
270 	if (kvm_request_needs_ipi(vcpu, req)) {
271 		cpu = READ_ONCE(vcpu->cpu);
272 		if (cpu != -1 && cpu != current_cpu)
273 			__cpumask_set_cpu(cpu, tmp);
274 	}
275 }
276 
277 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
278 				 unsigned long *vcpu_bitmap)
279 {
280 	struct kvm_vcpu *vcpu;
281 	struct cpumask *cpus;
282 	int i, me;
283 	bool called;
284 
285 	me = get_cpu();
286 
287 	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
288 	cpumask_clear(cpus);
289 
290 	for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
291 		vcpu = kvm_get_vcpu(kvm, i);
292 		if (!vcpu)
293 			continue;
294 		kvm_make_vcpu_request(kvm, vcpu, req, cpus, me);
295 	}
296 
297 	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
298 	put_cpu();
299 
300 	return called;
301 }
302 
303 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
304 				      struct kvm_vcpu *except)
305 {
306 	struct kvm_vcpu *vcpu;
307 	struct cpumask *cpus;
308 	unsigned long i;
309 	bool called;
310 	int me;
311 
312 	me = get_cpu();
313 
314 	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
315 	cpumask_clear(cpus);
316 
317 	kvm_for_each_vcpu(i, vcpu, kvm) {
318 		if (vcpu == except)
319 			continue;
320 		kvm_make_vcpu_request(kvm, vcpu, req, cpus, me);
321 	}
322 
323 	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
324 	put_cpu();
325 
326 	return called;
327 }
328 
329 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
330 {
331 	return kvm_make_all_cpus_request_except(kvm, req, NULL);
332 }
333 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
334 
335 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
336 void kvm_flush_remote_tlbs(struct kvm *kvm)
337 {
338 	++kvm->stat.generic.remote_tlb_flush_requests;
339 
340 	/*
341 	 * We want to publish modifications to the page tables before reading
342 	 * mode. Pairs with a memory barrier in arch-specific code.
343 	 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
344 	 * and smp_mb in walk_shadow_page_lockless_begin/end.
345 	 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
346 	 *
347 	 * There is already an smp_mb__after_atomic() before
348 	 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
349 	 * barrier here.
350 	 */
351 	if (!kvm_arch_flush_remote_tlb(kvm)
352 	    || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
353 		++kvm->stat.generic.remote_tlb_flush;
354 }
355 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
356 #endif
357 
358 void kvm_reload_remote_mmus(struct kvm *kvm)
359 {
360 	kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
361 }
362 
363 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
364 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
365 					       gfp_t gfp_flags)
366 {
367 	gfp_flags |= mc->gfp_zero;
368 
369 	if (mc->kmem_cache)
370 		return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
371 	else
372 		return (void *)__get_free_page(gfp_flags);
373 }
374 
375 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
376 {
377 	void *obj;
378 
379 	if (mc->nobjs >= min)
380 		return 0;
381 	while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
382 		obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
383 		if (!obj)
384 			return mc->nobjs >= min ? 0 : -ENOMEM;
385 		mc->objects[mc->nobjs++] = obj;
386 	}
387 	return 0;
388 }
389 
390 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
391 {
392 	return mc->nobjs;
393 }
394 
395 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
396 {
397 	while (mc->nobjs) {
398 		if (mc->kmem_cache)
399 			kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
400 		else
401 			free_page((unsigned long)mc->objects[--mc->nobjs]);
402 	}
403 }
404 
405 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
406 {
407 	void *p;
408 
409 	if (WARN_ON(!mc->nobjs))
410 		p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
411 	else
412 		p = mc->objects[--mc->nobjs];
413 	BUG_ON(!p);
414 	return p;
415 }
416 #endif
417 
418 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
419 {
420 	mutex_init(&vcpu->mutex);
421 	vcpu->cpu = -1;
422 	vcpu->kvm = kvm;
423 	vcpu->vcpu_id = id;
424 	vcpu->pid = NULL;
425 #ifndef __KVM_HAVE_ARCH_WQP
426 	rcuwait_init(&vcpu->wait);
427 #endif
428 	kvm_async_pf_vcpu_init(vcpu);
429 
430 	vcpu->pre_pcpu = -1;
431 	INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
432 
433 	kvm_vcpu_set_in_spin_loop(vcpu, false);
434 	kvm_vcpu_set_dy_eligible(vcpu, false);
435 	vcpu->preempted = false;
436 	vcpu->ready = false;
437 	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
438 	vcpu->last_used_slot = NULL;
439 }
440 
441 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
442 {
443 	kvm_dirty_ring_free(&vcpu->dirty_ring);
444 	kvm_arch_vcpu_destroy(vcpu);
445 
446 	/*
447 	 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
448 	 * the vcpu->pid pointer, and at destruction time all file descriptors
449 	 * are already gone.
450 	 */
451 	put_pid(rcu_dereference_protected(vcpu->pid, 1));
452 
453 	free_page((unsigned long)vcpu->run);
454 	kmem_cache_free(kvm_vcpu_cache, vcpu);
455 }
456 
457 void kvm_destroy_vcpus(struct kvm *kvm)
458 {
459 	unsigned long i;
460 	struct kvm_vcpu *vcpu;
461 
462 	kvm_for_each_vcpu(i, vcpu, kvm) {
463 		kvm_vcpu_destroy(vcpu);
464 		xa_erase(&kvm->vcpu_array, i);
465 	}
466 
467 	atomic_set(&kvm->online_vcpus, 0);
468 }
469 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
470 
471 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
472 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
473 {
474 	return container_of(mn, struct kvm, mmu_notifier);
475 }
476 
477 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
478 					      struct mm_struct *mm,
479 					      unsigned long start, unsigned long end)
480 {
481 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
482 	int idx;
483 
484 	idx = srcu_read_lock(&kvm->srcu);
485 	kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
486 	srcu_read_unlock(&kvm->srcu, idx);
487 }
488 
489 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
490 
491 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
492 			     unsigned long end);
493 
494 struct kvm_hva_range {
495 	unsigned long start;
496 	unsigned long end;
497 	pte_t pte;
498 	hva_handler_t handler;
499 	on_lock_fn_t on_lock;
500 	bool flush_on_ret;
501 	bool may_block;
502 };
503 
504 /*
505  * Use a dedicated stub instead of NULL to indicate that there is no callback
506  * function/handler.  The compiler technically can't guarantee that a real
507  * function will have a non-zero address, and so it will generate code to
508  * check for !NULL, whereas comparing against a stub will be elided at compile
509  * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
510  */
511 static void kvm_null_fn(void)
512 {
513 
514 }
515 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
516 
517 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
518 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last)	     \
519 	for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
520 	     node;							     \
521 	     node = interval_tree_iter_next(node, start, last))	     \
522 
523 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
524 						  const struct kvm_hva_range *range)
525 {
526 	bool ret = false, locked = false;
527 	struct kvm_gfn_range gfn_range;
528 	struct kvm_memory_slot *slot;
529 	struct kvm_memslots *slots;
530 	int i, idx;
531 
532 	if (WARN_ON_ONCE(range->end <= range->start))
533 		return 0;
534 
535 	/* A null handler is allowed if and only if on_lock() is provided. */
536 	if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
537 			 IS_KVM_NULL_FN(range->handler)))
538 		return 0;
539 
540 	idx = srcu_read_lock(&kvm->srcu);
541 
542 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
543 		struct interval_tree_node *node;
544 
545 		slots = __kvm_memslots(kvm, i);
546 		kvm_for_each_memslot_in_hva_range(node, slots,
547 						  range->start, range->end - 1) {
548 			unsigned long hva_start, hva_end;
549 
550 			slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
551 			hva_start = max(range->start, slot->userspace_addr);
552 			hva_end = min(range->end, slot->userspace_addr +
553 						  (slot->npages << PAGE_SHIFT));
554 
555 			/*
556 			 * To optimize for the likely case where the address
557 			 * range is covered by zero or one memslots, don't
558 			 * bother making these conditional (to avoid writes on
559 			 * the second or later invocation of the handler).
560 			 */
561 			gfn_range.pte = range->pte;
562 			gfn_range.may_block = range->may_block;
563 
564 			/*
565 			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
566 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
567 			 */
568 			gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
569 			gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
570 			gfn_range.slot = slot;
571 
572 			if (!locked) {
573 				locked = true;
574 				KVM_MMU_LOCK(kvm);
575 				if (!IS_KVM_NULL_FN(range->on_lock))
576 					range->on_lock(kvm, range->start, range->end);
577 				if (IS_KVM_NULL_FN(range->handler))
578 					break;
579 			}
580 			ret |= range->handler(kvm, &gfn_range);
581 		}
582 	}
583 
584 	if (range->flush_on_ret && ret)
585 		kvm_flush_remote_tlbs(kvm);
586 
587 	if (locked)
588 		KVM_MMU_UNLOCK(kvm);
589 
590 	srcu_read_unlock(&kvm->srcu, idx);
591 
592 	/* The notifiers are averse to booleans. :-( */
593 	return (int)ret;
594 }
595 
596 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
597 						unsigned long start,
598 						unsigned long end,
599 						pte_t pte,
600 						hva_handler_t handler)
601 {
602 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
603 	const struct kvm_hva_range range = {
604 		.start		= start,
605 		.end		= end,
606 		.pte		= pte,
607 		.handler	= handler,
608 		.on_lock	= (void *)kvm_null_fn,
609 		.flush_on_ret	= true,
610 		.may_block	= false,
611 	};
612 
613 	return __kvm_handle_hva_range(kvm, &range);
614 }
615 
616 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
617 							 unsigned long start,
618 							 unsigned long end,
619 							 hva_handler_t handler)
620 {
621 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
622 	const struct kvm_hva_range range = {
623 		.start		= start,
624 		.end		= end,
625 		.pte		= __pte(0),
626 		.handler	= handler,
627 		.on_lock	= (void *)kvm_null_fn,
628 		.flush_on_ret	= false,
629 		.may_block	= false,
630 	};
631 
632 	return __kvm_handle_hva_range(kvm, &range);
633 }
634 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
635 					struct mm_struct *mm,
636 					unsigned long address,
637 					pte_t pte)
638 {
639 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
640 
641 	trace_kvm_set_spte_hva(address);
642 
643 	/*
644 	 * .change_pte() must be surrounded by .invalidate_range_{start,end}().
645 	 * If mmu_notifier_count is zero, then no in-progress invalidations,
646 	 * including this one, found a relevant memslot at start(); rechecking
647 	 * memslots here is unnecessary.  Note, a false positive (count elevated
648 	 * by a different invalidation) is sub-optimal but functionally ok.
649 	 */
650 	WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
651 	if (!READ_ONCE(kvm->mmu_notifier_count))
652 		return;
653 
654 	kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
655 }
656 
657 void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
658 				   unsigned long end)
659 {
660 	/*
661 	 * The count increase must become visible at unlock time as no
662 	 * spte can be established without taking the mmu_lock and
663 	 * count is also read inside the mmu_lock critical section.
664 	 */
665 	kvm->mmu_notifier_count++;
666 	if (likely(kvm->mmu_notifier_count == 1)) {
667 		kvm->mmu_notifier_range_start = start;
668 		kvm->mmu_notifier_range_end = end;
669 	} else {
670 		/*
671 		 * Fully tracking multiple concurrent ranges has dimishing
672 		 * returns. Keep things simple and just find the minimal range
673 		 * which includes the current and new ranges. As there won't be
674 		 * enough information to subtract a range after its invalidate
675 		 * completes, any ranges invalidated concurrently will
676 		 * accumulate and persist until all outstanding invalidates
677 		 * complete.
678 		 */
679 		kvm->mmu_notifier_range_start =
680 			min(kvm->mmu_notifier_range_start, start);
681 		kvm->mmu_notifier_range_end =
682 			max(kvm->mmu_notifier_range_end, end);
683 	}
684 }
685 
686 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
687 					const struct mmu_notifier_range *range)
688 {
689 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
690 	const struct kvm_hva_range hva_range = {
691 		.start		= range->start,
692 		.end		= range->end,
693 		.pte		= __pte(0),
694 		.handler	= kvm_unmap_gfn_range,
695 		.on_lock	= kvm_inc_notifier_count,
696 		.flush_on_ret	= true,
697 		.may_block	= mmu_notifier_range_blockable(range),
698 	};
699 
700 	trace_kvm_unmap_hva_range(range->start, range->end);
701 
702 	/*
703 	 * Prevent memslot modification between range_start() and range_end()
704 	 * so that conditionally locking provides the same result in both
705 	 * functions.  Without that guarantee, the mmu_notifier_count
706 	 * adjustments will be imbalanced.
707 	 *
708 	 * Pairs with the decrement in range_end().
709 	 */
710 	spin_lock(&kvm->mn_invalidate_lock);
711 	kvm->mn_active_invalidate_count++;
712 	spin_unlock(&kvm->mn_invalidate_lock);
713 
714 	gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
715 					  hva_range.may_block);
716 
717 	__kvm_handle_hva_range(kvm, &hva_range);
718 
719 	return 0;
720 }
721 
722 void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
723 				   unsigned long end)
724 {
725 	/*
726 	 * This sequence increase will notify the kvm page fault that
727 	 * the page that is going to be mapped in the spte could have
728 	 * been freed.
729 	 */
730 	kvm->mmu_notifier_seq++;
731 	smp_wmb();
732 	/*
733 	 * The above sequence increase must be visible before the
734 	 * below count decrease, which is ensured by the smp_wmb above
735 	 * in conjunction with the smp_rmb in mmu_notifier_retry().
736 	 */
737 	kvm->mmu_notifier_count--;
738 }
739 
740 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
741 					const struct mmu_notifier_range *range)
742 {
743 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
744 	const struct kvm_hva_range hva_range = {
745 		.start		= range->start,
746 		.end		= range->end,
747 		.pte		= __pte(0),
748 		.handler	= (void *)kvm_null_fn,
749 		.on_lock	= kvm_dec_notifier_count,
750 		.flush_on_ret	= false,
751 		.may_block	= mmu_notifier_range_blockable(range),
752 	};
753 	bool wake;
754 
755 	__kvm_handle_hva_range(kvm, &hva_range);
756 
757 	/* Pairs with the increment in range_start(). */
758 	spin_lock(&kvm->mn_invalidate_lock);
759 	wake = (--kvm->mn_active_invalidate_count == 0);
760 	spin_unlock(&kvm->mn_invalidate_lock);
761 
762 	/*
763 	 * There can only be one waiter, since the wait happens under
764 	 * slots_lock.
765 	 */
766 	if (wake)
767 		rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
768 
769 	BUG_ON(kvm->mmu_notifier_count < 0);
770 }
771 
772 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
773 					      struct mm_struct *mm,
774 					      unsigned long start,
775 					      unsigned long end)
776 {
777 	trace_kvm_age_hva(start, end);
778 
779 	return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn);
780 }
781 
782 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
783 					struct mm_struct *mm,
784 					unsigned long start,
785 					unsigned long end)
786 {
787 	trace_kvm_age_hva(start, end);
788 
789 	/*
790 	 * Even though we do not flush TLB, this will still adversely
791 	 * affect performance on pre-Haswell Intel EPT, where there is
792 	 * no EPT Access Bit to clear so that we have to tear down EPT
793 	 * tables instead. If we find this unacceptable, we can always
794 	 * add a parameter to kvm_age_hva so that it effectively doesn't
795 	 * do anything on clear_young.
796 	 *
797 	 * Also note that currently we never issue secondary TLB flushes
798 	 * from clear_young, leaving this job up to the regular system
799 	 * cadence. If we find this inaccurate, we might come up with a
800 	 * more sophisticated heuristic later.
801 	 */
802 	return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
803 }
804 
805 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
806 				       struct mm_struct *mm,
807 				       unsigned long address)
808 {
809 	trace_kvm_test_age_hva(address);
810 
811 	return kvm_handle_hva_range_no_flush(mn, address, address + 1,
812 					     kvm_test_age_gfn);
813 }
814 
815 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
816 				     struct mm_struct *mm)
817 {
818 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
819 	int idx;
820 
821 	idx = srcu_read_lock(&kvm->srcu);
822 	kvm_arch_flush_shadow_all(kvm);
823 	srcu_read_unlock(&kvm->srcu, idx);
824 }
825 
826 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
827 	.invalidate_range	= kvm_mmu_notifier_invalidate_range,
828 	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
829 	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
830 	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
831 	.clear_young		= kvm_mmu_notifier_clear_young,
832 	.test_young		= kvm_mmu_notifier_test_young,
833 	.change_pte		= kvm_mmu_notifier_change_pte,
834 	.release		= kvm_mmu_notifier_release,
835 };
836 
837 static int kvm_init_mmu_notifier(struct kvm *kvm)
838 {
839 	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
840 	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
841 }
842 
843 #else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
844 
845 static int kvm_init_mmu_notifier(struct kvm *kvm)
846 {
847 	return 0;
848 }
849 
850 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
851 
852 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
853 static int kvm_pm_notifier_call(struct notifier_block *bl,
854 				unsigned long state,
855 				void *unused)
856 {
857 	struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
858 
859 	return kvm_arch_pm_notifier(kvm, state);
860 }
861 
862 static void kvm_init_pm_notifier(struct kvm *kvm)
863 {
864 	kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
865 	/* Suspend KVM before we suspend ftrace, RCU, etc. */
866 	kvm->pm_notifier.priority = INT_MAX;
867 	register_pm_notifier(&kvm->pm_notifier);
868 }
869 
870 static void kvm_destroy_pm_notifier(struct kvm *kvm)
871 {
872 	unregister_pm_notifier(&kvm->pm_notifier);
873 }
874 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
875 static void kvm_init_pm_notifier(struct kvm *kvm)
876 {
877 }
878 
879 static void kvm_destroy_pm_notifier(struct kvm *kvm)
880 {
881 }
882 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
883 
884 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
885 {
886 	if (!memslot->dirty_bitmap)
887 		return;
888 
889 	kvfree(memslot->dirty_bitmap);
890 	memslot->dirty_bitmap = NULL;
891 }
892 
893 /* This does not remove the slot from struct kvm_memslots data structures */
894 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
895 {
896 	kvm_destroy_dirty_bitmap(slot);
897 
898 	kvm_arch_free_memslot(kvm, slot);
899 
900 	kfree(slot);
901 }
902 
903 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
904 {
905 	struct hlist_node *idnode;
906 	struct kvm_memory_slot *memslot;
907 	int bkt;
908 
909 	/*
910 	 * The same memslot objects live in both active and inactive sets,
911 	 * arbitrarily free using index '1' so the second invocation of this
912 	 * function isn't operating over a structure with dangling pointers
913 	 * (even though this function isn't actually touching them).
914 	 */
915 	if (!slots->node_idx)
916 		return;
917 
918 	hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
919 		kvm_free_memslot(kvm, memslot);
920 }
921 
922 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
923 {
924 	switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
925 	case KVM_STATS_TYPE_INSTANT:
926 		return 0444;
927 	case KVM_STATS_TYPE_CUMULATIVE:
928 	case KVM_STATS_TYPE_PEAK:
929 	default:
930 		return 0644;
931 	}
932 }
933 
934 
935 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
936 {
937 	int i;
938 	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
939 				      kvm_vcpu_stats_header.num_desc;
940 
941 	if (!kvm->debugfs_dentry)
942 		return;
943 
944 	debugfs_remove_recursive(kvm->debugfs_dentry);
945 
946 	if (kvm->debugfs_stat_data) {
947 		for (i = 0; i < kvm_debugfs_num_entries; i++)
948 			kfree(kvm->debugfs_stat_data[i]);
949 		kfree(kvm->debugfs_stat_data);
950 	}
951 }
952 
953 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
954 {
955 	static DEFINE_MUTEX(kvm_debugfs_lock);
956 	struct dentry *dent;
957 	char dir_name[ITOA_MAX_LEN * 2];
958 	struct kvm_stat_data *stat_data;
959 	const struct _kvm_stats_desc *pdesc;
960 	int i, ret;
961 	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
962 				      kvm_vcpu_stats_header.num_desc;
963 
964 	if (!debugfs_initialized())
965 		return 0;
966 
967 	snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
968 	mutex_lock(&kvm_debugfs_lock);
969 	dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
970 	if (dent) {
971 		pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
972 		dput(dent);
973 		mutex_unlock(&kvm_debugfs_lock);
974 		return 0;
975 	}
976 	dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
977 	mutex_unlock(&kvm_debugfs_lock);
978 	if (IS_ERR(dent))
979 		return 0;
980 
981 	kvm->debugfs_dentry = dent;
982 	kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
983 					 sizeof(*kvm->debugfs_stat_data),
984 					 GFP_KERNEL_ACCOUNT);
985 	if (!kvm->debugfs_stat_data)
986 		return -ENOMEM;
987 
988 	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
989 		pdesc = &kvm_vm_stats_desc[i];
990 		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
991 		if (!stat_data)
992 			return -ENOMEM;
993 
994 		stat_data->kvm = kvm;
995 		stat_data->desc = pdesc;
996 		stat_data->kind = KVM_STAT_VM;
997 		kvm->debugfs_stat_data[i] = stat_data;
998 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
999 				    kvm->debugfs_dentry, stat_data,
1000 				    &stat_fops_per_vm);
1001 	}
1002 
1003 	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1004 		pdesc = &kvm_vcpu_stats_desc[i];
1005 		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1006 		if (!stat_data)
1007 			return -ENOMEM;
1008 
1009 		stat_data->kvm = kvm;
1010 		stat_data->desc = pdesc;
1011 		stat_data->kind = KVM_STAT_VCPU;
1012 		kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1013 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1014 				    kvm->debugfs_dentry, stat_data,
1015 				    &stat_fops_per_vm);
1016 	}
1017 
1018 	ret = kvm_arch_create_vm_debugfs(kvm);
1019 	if (ret) {
1020 		kvm_destroy_vm_debugfs(kvm);
1021 		return i;
1022 	}
1023 
1024 	return 0;
1025 }
1026 
1027 /*
1028  * Called after the VM is otherwise initialized, but just before adding it to
1029  * the vm_list.
1030  */
1031 int __weak kvm_arch_post_init_vm(struct kvm *kvm)
1032 {
1033 	return 0;
1034 }
1035 
1036 /*
1037  * Called just after removing the VM from the vm_list, but before doing any
1038  * other destruction.
1039  */
1040 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1041 {
1042 }
1043 
1044 /*
1045  * Called after per-vm debugfs created.  When called kvm->debugfs_dentry should
1046  * be setup already, so we can create arch-specific debugfs entries under it.
1047  * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1048  * a per-arch destroy interface is not needed.
1049  */
1050 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1051 {
1052 	return 0;
1053 }
1054 
1055 static struct kvm *kvm_create_vm(unsigned long type)
1056 {
1057 	struct kvm *kvm = kvm_arch_alloc_vm();
1058 	struct kvm_memslots *slots;
1059 	int r = -ENOMEM;
1060 	int i, j;
1061 
1062 	if (!kvm)
1063 		return ERR_PTR(-ENOMEM);
1064 
1065 	KVM_MMU_LOCK_INIT(kvm);
1066 	mmgrab(current->mm);
1067 	kvm->mm = current->mm;
1068 	kvm_eventfd_init(kvm);
1069 	mutex_init(&kvm->lock);
1070 	mutex_init(&kvm->irq_lock);
1071 	mutex_init(&kvm->slots_lock);
1072 	mutex_init(&kvm->slots_arch_lock);
1073 	spin_lock_init(&kvm->mn_invalidate_lock);
1074 	rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1075 	xa_init(&kvm->vcpu_array);
1076 
1077 	INIT_LIST_HEAD(&kvm->gpc_list);
1078 	spin_lock_init(&kvm->gpc_lock);
1079 
1080 	INIT_LIST_HEAD(&kvm->devices);
1081 
1082 	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1083 
1084 	if (init_srcu_struct(&kvm->srcu))
1085 		goto out_err_no_srcu;
1086 	if (init_srcu_struct(&kvm->irq_srcu))
1087 		goto out_err_no_irq_srcu;
1088 
1089 	refcount_set(&kvm->users_count, 1);
1090 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1091 		for (j = 0; j < 2; j++) {
1092 			slots = &kvm->__memslots[i][j];
1093 
1094 			atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1095 			slots->hva_tree = RB_ROOT_CACHED;
1096 			slots->gfn_tree = RB_ROOT;
1097 			hash_init(slots->id_hash);
1098 			slots->node_idx = j;
1099 
1100 			/* Generations must be different for each address space. */
1101 			slots->generation = i;
1102 		}
1103 
1104 		rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1105 	}
1106 
1107 	for (i = 0; i < KVM_NR_BUSES; i++) {
1108 		rcu_assign_pointer(kvm->buses[i],
1109 			kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1110 		if (!kvm->buses[i])
1111 			goto out_err_no_arch_destroy_vm;
1112 	}
1113 
1114 	kvm->max_halt_poll_ns = halt_poll_ns;
1115 
1116 	r = kvm_arch_init_vm(kvm, type);
1117 	if (r)
1118 		goto out_err_no_arch_destroy_vm;
1119 
1120 	r = hardware_enable_all();
1121 	if (r)
1122 		goto out_err_no_disable;
1123 
1124 #ifdef CONFIG_HAVE_KVM_IRQFD
1125 	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1126 #endif
1127 
1128 	r = kvm_init_mmu_notifier(kvm);
1129 	if (r)
1130 		goto out_err_no_mmu_notifier;
1131 
1132 	r = kvm_arch_post_init_vm(kvm);
1133 	if (r)
1134 		goto out_err;
1135 
1136 	mutex_lock(&kvm_lock);
1137 	list_add(&kvm->vm_list, &vm_list);
1138 	mutex_unlock(&kvm_lock);
1139 
1140 	preempt_notifier_inc();
1141 	kvm_init_pm_notifier(kvm);
1142 
1143 	return kvm;
1144 
1145 out_err:
1146 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1147 	if (kvm->mmu_notifier.ops)
1148 		mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1149 #endif
1150 out_err_no_mmu_notifier:
1151 	hardware_disable_all();
1152 out_err_no_disable:
1153 	kvm_arch_destroy_vm(kvm);
1154 out_err_no_arch_destroy_vm:
1155 	WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1156 	for (i = 0; i < KVM_NR_BUSES; i++)
1157 		kfree(kvm_get_bus(kvm, i));
1158 	cleanup_srcu_struct(&kvm->irq_srcu);
1159 out_err_no_irq_srcu:
1160 	cleanup_srcu_struct(&kvm->srcu);
1161 out_err_no_srcu:
1162 	kvm_arch_free_vm(kvm);
1163 	mmdrop(current->mm);
1164 	return ERR_PTR(r);
1165 }
1166 
1167 static void kvm_destroy_devices(struct kvm *kvm)
1168 {
1169 	struct kvm_device *dev, *tmp;
1170 
1171 	/*
1172 	 * We do not need to take the kvm->lock here, because nobody else
1173 	 * has a reference to the struct kvm at this point and therefore
1174 	 * cannot access the devices list anyhow.
1175 	 */
1176 	list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1177 		list_del(&dev->vm_node);
1178 		dev->ops->destroy(dev);
1179 	}
1180 }
1181 
1182 static void kvm_destroy_vm(struct kvm *kvm)
1183 {
1184 	int i;
1185 	struct mm_struct *mm = kvm->mm;
1186 
1187 	kvm_destroy_pm_notifier(kvm);
1188 	kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1189 	kvm_destroy_vm_debugfs(kvm);
1190 	kvm_arch_sync_events(kvm);
1191 	mutex_lock(&kvm_lock);
1192 	list_del(&kvm->vm_list);
1193 	mutex_unlock(&kvm_lock);
1194 	kvm_arch_pre_destroy_vm(kvm);
1195 
1196 	kvm_free_irq_routing(kvm);
1197 	for (i = 0; i < KVM_NR_BUSES; i++) {
1198 		struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1199 
1200 		if (bus)
1201 			kvm_io_bus_destroy(bus);
1202 		kvm->buses[i] = NULL;
1203 	}
1204 	kvm_coalesced_mmio_free(kvm);
1205 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1206 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1207 	/*
1208 	 * At this point, pending calls to invalidate_range_start()
1209 	 * have completed but no more MMU notifiers will run, so
1210 	 * mn_active_invalidate_count may remain unbalanced.
1211 	 * No threads can be waiting in install_new_memslots as the
1212 	 * last reference on KVM has been dropped, but freeing
1213 	 * memslots would deadlock without this manual intervention.
1214 	 */
1215 	WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1216 	kvm->mn_active_invalidate_count = 0;
1217 #else
1218 	kvm_arch_flush_shadow_all(kvm);
1219 #endif
1220 	kvm_arch_destroy_vm(kvm);
1221 	kvm_destroy_devices(kvm);
1222 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1223 		kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1224 		kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1225 	}
1226 	cleanup_srcu_struct(&kvm->irq_srcu);
1227 	cleanup_srcu_struct(&kvm->srcu);
1228 	kvm_arch_free_vm(kvm);
1229 	preempt_notifier_dec();
1230 	hardware_disable_all();
1231 	mmdrop(mm);
1232 }
1233 
1234 void kvm_get_kvm(struct kvm *kvm)
1235 {
1236 	refcount_inc(&kvm->users_count);
1237 }
1238 EXPORT_SYMBOL_GPL(kvm_get_kvm);
1239 
1240 /*
1241  * Make sure the vm is not during destruction, which is a safe version of
1242  * kvm_get_kvm().  Return true if kvm referenced successfully, false otherwise.
1243  */
1244 bool kvm_get_kvm_safe(struct kvm *kvm)
1245 {
1246 	return refcount_inc_not_zero(&kvm->users_count);
1247 }
1248 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1249 
1250 void kvm_put_kvm(struct kvm *kvm)
1251 {
1252 	if (refcount_dec_and_test(&kvm->users_count))
1253 		kvm_destroy_vm(kvm);
1254 }
1255 EXPORT_SYMBOL_GPL(kvm_put_kvm);
1256 
1257 /*
1258  * Used to put a reference that was taken on behalf of an object associated
1259  * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1260  * of the new file descriptor fails and the reference cannot be transferred to
1261  * its final owner.  In such cases, the caller is still actively using @kvm and
1262  * will fail miserably if the refcount unexpectedly hits zero.
1263  */
1264 void kvm_put_kvm_no_destroy(struct kvm *kvm)
1265 {
1266 	WARN_ON(refcount_dec_and_test(&kvm->users_count));
1267 }
1268 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1269 
1270 static int kvm_vm_release(struct inode *inode, struct file *filp)
1271 {
1272 	struct kvm *kvm = filp->private_data;
1273 
1274 	kvm_irqfd_release(kvm);
1275 
1276 	kvm_put_kvm(kvm);
1277 	return 0;
1278 }
1279 
1280 /*
1281  * Allocation size is twice as large as the actual dirty bitmap size.
1282  * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1283  */
1284 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1285 {
1286 	unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
1287 
1288 	memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
1289 	if (!memslot->dirty_bitmap)
1290 		return -ENOMEM;
1291 
1292 	return 0;
1293 }
1294 
1295 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1296 {
1297 	struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1298 	int node_idx_inactive = active->node_idx ^ 1;
1299 
1300 	return &kvm->__memslots[as_id][node_idx_inactive];
1301 }
1302 
1303 /*
1304  * Helper to get the address space ID when one of memslot pointers may be NULL.
1305  * This also serves as a sanity that at least one of the pointers is non-NULL,
1306  * and that their address space IDs don't diverge.
1307  */
1308 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1309 				  struct kvm_memory_slot *b)
1310 {
1311 	if (WARN_ON_ONCE(!a && !b))
1312 		return 0;
1313 
1314 	if (!a)
1315 		return b->as_id;
1316 	if (!b)
1317 		return a->as_id;
1318 
1319 	WARN_ON_ONCE(a->as_id != b->as_id);
1320 	return a->as_id;
1321 }
1322 
1323 static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1324 				struct kvm_memory_slot *slot)
1325 {
1326 	struct rb_root *gfn_tree = &slots->gfn_tree;
1327 	struct rb_node **node, *parent;
1328 	int idx = slots->node_idx;
1329 
1330 	parent = NULL;
1331 	for (node = &gfn_tree->rb_node; *node; ) {
1332 		struct kvm_memory_slot *tmp;
1333 
1334 		tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1335 		parent = *node;
1336 		if (slot->base_gfn < tmp->base_gfn)
1337 			node = &(*node)->rb_left;
1338 		else if (slot->base_gfn > tmp->base_gfn)
1339 			node = &(*node)->rb_right;
1340 		else
1341 			BUG();
1342 	}
1343 
1344 	rb_link_node(&slot->gfn_node[idx], parent, node);
1345 	rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1346 }
1347 
1348 static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1349 			       struct kvm_memory_slot *slot)
1350 {
1351 	rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1352 }
1353 
1354 static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1355 				 struct kvm_memory_slot *old,
1356 				 struct kvm_memory_slot *new)
1357 {
1358 	int idx = slots->node_idx;
1359 
1360 	WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1361 
1362 	rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1363 			&slots->gfn_tree);
1364 }
1365 
1366 /*
1367  * Replace @old with @new in the inactive memslots.
1368  *
1369  * With NULL @old this simply adds @new.
1370  * With NULL @new this simply removes @old.
1371  *
1372  * If @new is non-NULL its hva_node[slots_idx] range has to be set
1373  * appropriately.
1374  */
1375 static void kvm_replace_memslot(struct kvm *kvm,
1376 				struct kvm_memory_slot *old,
1377 				struct kvm_memory_slot *new)
1378 {
1379 	int as_id = kvm_memslots_get_as_id(old, new);
1380 	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1381 	int idx = slots->node_idx;
1382 
1383 	if (old) {
1384 		hash_del(&old->id_node[idx]);
1385 		interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1386 
1387 		if ((long)old == atomic_long_read(&slots->last_used_slot))
1388 			atomic_long_set(&slots->last_used_slot, (long)new);
1389 
1390 		if (!new) {
1391 			kvm_erase_gfn_node(slots, old);
1392 			return;
1393 		}
1394 	}
1395 
1396 	/*
1397 	 * Initialize @new's hva range.  Do this even when replacing an @old
1398 	 * slot, kvm_copy_memslot() deliberately does not touch node data.
1399 	 */
1400 	new->hva_node[idx].start = new->userspace_addr;
1401 	new->hva_node[idx].last = new->userspace_addr +
1402 				  (new->npages << PAGE_SHIFT) - 1;
1403 
1404 	/*
1405 	 * (Re)Add the new memslot.  There is no O(1) interval_tree_replace(),
1406 	 * hva_node needs to be swapped with remove+insert even though hva can't
1407 	 * change when replacing an existing slot.
1408 	 */
1409 	hash_add(slots->id_hash, &new->id_node[idx], new->id);
1410 	interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1411 
1412 	/*
1413 	 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1414 	 * switch the node in the gfn tree instead of removing the old and
1415 	 * inserting the new as two separate operations. Replacement is a
1416 	 * single O(1) operation versus two O(log(n)) operations for
1417 	 * remove+insert.
1418 	 */
1419 	if (old && old->base_gfn == new->base_gfn) {
1420 		kvm_replace_gfn_node(slots, old, new);
1421 	} else {
1422 		if (old)
1423 			kvm_erase_gfn_node(slots, old);
1424 		kvm_insert_gfn_node(slots, new);
1425 	}
1426 }
1427 
1428 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
1429 {
1430 	u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1431 
1432 #ifdef __KVM_HAVE_READONLY_MEM
1433 	valid_flags |= KVM_MEM_READONLY;
1434 #endif
1435 
1436 	if (mem->flags & ~valid_flags)
1437 		return -EINVAL;
1438 
1439 	return 0;
1440 }
1441 
1442 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1443 {
1444 	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1445 
1446 	/* Grab the generation from the activate memslots. */
1447 	u64 gen = __kvm_memslots(kvm, as_id)->generation;
1448 
1449 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1450 	slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1451 
1452 	/*
1453 	 * Do not store the new memslots while there are invalidations in
1454 	 * progress, otherwise the locking in invalidate_range_start and
1455 	 * invalidate_range_end will be unbalanced.
1456 	 */
1457 	spin_lock(&kvm->mn_invalidate_lock);
1458 	prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1459 	while (kvm->mn_active_invalidate_count) {
1460 		set_current_state(TASK_UNINTERRUPTIBLE);
1461 		spin_unlock(&kvm->mn_invalidate_lock);
1462 		schedule();
1463 		spin_lock(&kvm->mn_invalidate_lock);
1464 	}
1465 	finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1466 	rcu_assign_pointer(kvm->memslots[as_id], slots);
1467 	spin_unlock(&kvm->mn_invalidate_lock);
1468 
1469 	/*
1470 	 * Acquired in kvm_set_memslot. Must be released before synchronize
1471 	 * SRCU below in order to avoid deadlock with another thread
1472 	 * acquiring the slots_arch_lock in an srcu critical section.
1473 	 */
1474 	mutex_unlock(&kvm->slots_arch_lock);
1475 
1476 	synchronize_srcu_expedited(&kvm->srcu);
1477 
1478 	/*
1479 	 * Increment the new memslot generation a second time, dropping the
1480 	 * update in-progress flag and incrementing the generation based on
1481 	 * the number of address spaces.  This provides a unique and easily
1482 	 * identifiable generation number while the memslots are in flux.
1483 	 */
1484 	gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1485 
1486 	/*
1487 	 * Generations must be unique even across address spaces.  We do not need
1488 	 * a global counter for that, instead the generation space is evenly split
1489 	 * across address spaces.  For example, with two address spaces, address
1490 	 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1491 	 * use generations 1, 3, 5, ...
1492 	 */
1493 	gen += KVM_ADDRESS_SPACE_NUM;
1494 
1495 	kvm_arch_memslots_updated(kvm, gen);
1496 
1497 	slots->generation = gen;
1498 }
1499 
1500 static int kvm_prepare_memory_region(struct kvm *kvm,
1501 				     const struct kvm_memory_slot *old,
1502 				     struct kvm_memory_slot *new,
1503 				     enum kvm_mr_change change)
1504 {
1505 	int r;
1506 
1507 	/*
1508 	 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1509 	 * will be freed on "commit".  If logging is enabled in both old and
1510 	 * new, reuse the existing bitmap.  If logging is enabled only in the
1511 	 * new and KVM isn't using a ring buffer, allocate and initialize a
1512 	 * new bitmap.
1513 	 */
1514 	if (change != KVM_MR_DELETE) {
1515 		if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1516 			new->dirty_bitmap = NULL;
1517 		else if (old && old->dirty_bitmap)
1518 			new->dirty_bitmap = old->dirty_bitmap;
1519 		else if (!kvm->dirty_ring_size) {
1520 			r = kvm_alloc_dirty_bitmap(new);
1521 			if (r)
1522 				return r;
1523 
1524 			if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1525 				bitmap_set(new->dirty_bitmap, 0, new->npages);
1526 		}
1527 	}
1528 
1529 	r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1530 
1531 	/* Free the bitmap on failure if it was allocated above. */
1532 	if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap)
1533 		kvm_destroy_dirty_bitmap(new);
1534 
1535 	return r;
1536 }
1537 
1538 static void kvm_commit_memory_region(struct kvm *kvm,
1539 				     struct kvm_memory_slot *old,
1540 				     const struct kvm_memory_slot *new,
1541 				     enum kvm_mr_change change)
1542 {
1543 	/*
1544 	 * Update the total number of memslot pages before calling the arch
1545 	 * hook so that architectures can consume the result directly.
1546 	 */
1547 	if (change == KVM_MR_DELETE)
1548 		kvm->nr_memslot_pages -= old->npages;
1549 	else if (change == KVM_MR_CREATE)
1550 		kvm->nr_memslot_pages += new->npages;
1551 
1552 	kvm_arch_commit_memory_region(kvm, old, new, change);
1553 
1554 	switch (change) {
1555 	case KVM_MR_CREATE:
1556 		/* Nothing more to do. */
1557 		break;
1558 	case KVM_MR_DELETE:
1559 		/* Free the old memslot and all its metadata. */
1560 		kvm_free_memslot(kvm, old);
1561 		break;
1562 	case KVM_MR_MOVE:
1563 	case KVM_MR_FLAGS_ONLY:
1564 		/*
1565 		 * Free the dirty bitmap as needed; the below check encompasses
1566 		 * both the flags and whether a ring buffer is being used)
1567 		 */
1568 		if (old->dirty_bitmap && !new->dirty_bitmap)
1569 			kvm_destroy_dirty_bitmap(old);
1570 
1571 		/*
1572 		 * The final quirk.  Free the detached, old slot, but only its
1573 		 * memory, not any metadata.  Metadata, including arch specific
1574 		 * data, may be reused by @new.
1575 		 */
1576 		kfree(old);
1577 		break;
1578 	default:
1579 		BUG();
1580 	}
1581 }
1582 
1583 /*
1584  * Activate @new, which must be installed in the inactive slots by the caller,
1585  * by swapping the active slots and then propagating @new to @old once @old is
1586  * unreachable and can be safely modified.
1587  *
1588  * With NULL @old this simply adds @new to @active (while swapping the sets).
1589  * With NULL @new this simply removes @old from @active and frees it
1590  * (while also swapping the sets).
1591  */
1592 static void kvm_activate_memslot(struct kvm *kvm,
1593 				 struct kvm_memory_slot *old,
1594 				 struct kvm_memory_slot *new)
1595 {
1596 	int as_id = kvm_memslots_get_as_id(old, new);
1597 
1598 	kvm_swap_active_memslots(kvm, as_id);
1599 
1600 	/* Propagate the new memslot to the now inactive memslots. */
1601 	kvm_replace_memslot(kvm, old, new);
1602 }
1603 
1604 static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1605 			     const struct kvm_memory_slot *src)
1606 {
1607 	dest->base_gfn = src->base_gfn;
1608 	dest->npages = src->npages;
1609 	dest->dirty_bitmap = src->dirty_bitmap;
1610 	dest->arch = src->arch;
1611 	dest->userspace_addr = src->userspace_addr;
1612 	dest->flags = src->flags;
1613 	dest->id = src->id;
1614 	dest->as_id = src->as_id;
1615 }
1616 
1617 static void kvm_invalidate_memslot(struct kvm *kvm,
1618 				   struct kvm_memory_slot *old,
1619 				   struct kvm_memory_slot *invalid_slot)
1620 {
1621 	/*
1622 	 * Mark the current slot INVALID.  As with all memslot modifications,
1623 	 * this must be done on an unreachable slot to avoid modifying the
1624 	 * current slot in the active tree.
1625 	 */
1626 	kvm_copy_memslot(invalid_slot, old);
1627 	invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1628 	kvm_replace_memslot(kvm, old, invalid_slot);
1629 
1630 	/*
1631 	 * Activate the slot that is now marked INVALID, but don't propagate
1632 	 * the slot to the now inactive slots. The slot is either going to be
1633 	 * deleted or recreated as a new slot.
1634 	 */
1635 	kvm_swap_active_memslots(kvm, old->as_id);
1636 
1637 	/*
1638 	 * From this point no new shadow pages pointing to a deleted, or moved,
1639 	 * memslot will be created.  Validation of sp->gfn happens in:
1640 	 *	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1641 	 *	- kvm_is_visible_gfn (mmu_check_root)
1642 	 */
1643 	kvm_arch_flush_shadow_memslot(kvm, old);
1644 
1645 	/* Was released by kvm_swap_active_memslots, reacquire. */
1646 	mutex_lock(&kvm->slots_arch_lock);
1647 
1648 	/*
1649 	 * Copy the arch-specific field of the newly-installed slot back to the
1650 	 * old slot as the arch data could have changed between releasing
1651 	 * slots_arch_lock in install_new_memslots() and re-acquiring the lock
1652 	 * above.  Writers are required to retrieve memslots *after* acquiring
1653 	 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1654 	 */
1655 	old->arch = invalid_slot->arch;
1656 }
1657 
1658 static void kvm_create_memslot(struct kvm *kvm,
1659 			       struct kvm_memory_slot *new)
1660 {
1661 	/* Add the new memslot to the inactive set and activate. */
1662 	kvm_replace_memslot(kvm, NULL, new);
1663 	kvm_activate_memslot(kvm, NULL, new);
1664 }
1665 
1666 static void kvm_delete_memslot(struct kvm *kvm,
1667 			       struct kvm_memory_slot *old,
1668 			       struct kvm_memory_slot *invalid_slot)
1669 {
1670 	/*
1671 	 * Remove the old memslot (in the inactive memslots) by passing NULL as
1672 	 * the "new" slot, and for the invalid version in the active slots.
1673 	 */
1674 	kvm_replace_memslot(kvm, old, NULL);
1675 	kvm_activate_memslot(kvm, invalid_slot, NULL);
1676 }
1677 
1678 static void kvm_move_memslot(struct kvm *kvm,
1679 			     struct kvm_memory_slot *old,
1680 			     struct kvm_memory_slot *new,
1681 			     struct kvm_memory_slot *invalid_slot)
1682 {
1683 	/*
1684 	 * Replace the old memslot in the inactive slots, and then swap slots
1685 	 * and replace the current INVALID with the new as well.
1686 	 */
1687 	kvm_replace_memslot(kvm, old, new);
1688 	kvm_activate_memslot(kvm, invalid_slot, new);
1689 }
1690 
1691 static void kvm_update_flags_memslot(struct kvm *kvm,
1692 				     struct kvm_memory_slot *old,
1693 				     struct kvm_memory_slot *new)
1694 {
1695 	/*
1696 	 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1697 	 * an intermediate step. Instead, the old memslot is simply replaced
1698 	 * with a new, updated copy in both memslot sets.
1699 	 */
1700 	kvm_replace_memslot(kvm, old, new);
1701 	kvm_activate_memslot(kvm, old, new);
1702 }
1703 
1704 static int kvm_set_memslot(struct kvm *kvm,
1705 			   struct kvm_memory_slot *old,
1706 			   struct kvm_memory_slot *new,
1707 			   enum kvm_mr_change change)
1708 {
1709 	struct kvm_memory_slot *invalid_slot;
1710 	int r;
1711 
1712 	/*
1713 	 * Released in kvm_swap_active_memslots.
1714 	 *
1715 	 * Must be held from before the current memslots are copied until
1716 	 * after the new memslots are installed with rcu_assign_pointer,
1717 	 * then released before the synchronize srcu in kvm_swap_active_memslots.
1718 	 *
1719 	 * When modifying memslots outside of the slots_lock, must be held
1720 	 * before reading the pointer to the current memslots until after all
1721 	 * changes to those memslots are complete.
1722 	 *
1723 	 * These rules ensure that installing new memslots does not lose
1724 	 * changes made to the previous memslots.
1725 	 */
1726 	mutex_lock(&kvm->slots_arch_lock);
1727 
1728 	/*
1729 	 * Invalidate the old slot if it's being deleted or moved.  This is
1730 	 * done prior to actually deleting/moving the memslot to allow vCPUs to
1731 	 * continue running by ensuring there are no mappings or shadow pages
1732 	 * for the memslot when it is deleted/moved.  Without pre-invalidation
1733 	 * (and without a lock), a window would exist between effecting the
1734 	 * delete/move and committing the changes in arch code where KVM or a
1735 	 * guest could access a non-existent memslot.
1736 	 *
1737 	 * Modifications are done on a temporary, unreachable slot.  The old
1738 	 * slot needs to be preserved in case a later step fails and the
1739 	 * invalidation needs to be reverted.
1740 	 */
1741 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1742 		invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1743 		if (!invalid_slot) {
1744 			mutex_unlock(&kvm->slots_arch_lock);
1745 			return -ENOMEM;
1746 		}
1747 		kvm_invalidate_memslot(kvm, old, invalid_slot);
1748 	}
1749 
1750 	r = kvm_prepare_memory_region(kvm, old, new, change);
1751 	if (r) {
1752 		/*
1753 		 * For DELETE/MOVE, revert the above INVALID change.  No
1754 		 * modifications required since the original slot was preserved
1755 		 * in the inactive slots.  Changing the active memslots also
1756 		 * release slots_arch_lock.
1757 		 */
1758 		if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1759 			kvm_activate_memslot(kvm, invalid_slot, old);
1760 			kfree(invalid_slot);
1761 		} else {
1762 			mutex_unlock(&kvm->slots_arch_lock);
1763 		}
1764 		return r;
1765 	}
1766 
1767 	/*
1768 	 * For DELETE and MOVE, the working slot is now active as the INVALID
1769 	 * version of the old slot.  MOVE is particularly special as it reuses
1770 	 * the old slot and returns a copy of the old slot (in working_slot).
1771 	 * For CREATE, there is no old slot.  For DELETE and FLAGS_ONLY, the
1772 	 * old slot is detached but otherwise preserved.
1773 	 */
1774 	if (change == KVM_MR_CREATE)
1775 		kvm_create_memslot(kvm, new);
1776 	else if (change == KVM_MR_DELETE)
1777 		kvm_delete_memslot(kvm, old, invalid_slot);
1778 	else if (change == KVM_MR_MOVE)
1779 		kvm_move_memslot(kvm, old, new, invalid_slot);
1780 	else if (change == KVM_MR_FLAGS_ONLY)
1781 		kvm_update_flags_memslot(kvm, old, new);
1782 	else
1783 		BUG();
1784 
1785 	/* Free the temporary INVALID slot used for DELETE and MOVE. */
1786 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1787 		kfree(invalid_slot);
1788 
1789 	/*
1790 	 * No need to refresh new->arch, changes after dropping slots_arch_lock
1791 	 * will directly hit the final, active memsot.  Architectures are
1792 	 * responsible for knowing that new->arch may be stale.
1793 	 */
1794 	kvm_commit_memory_region(kvm, old, new, change);
1795 
1796 	return 0;
1797 }
1798 
1799 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
1800 				      gfn_t start, gfn_t end)
1801 {
1802 	struct kvm_memslot_iter iter;
1803 
1804 	kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
1805 		if (iter.slot->id != id)
1806 			return true;
1807 	}
1808 
1809 	return false;
1810 }
1811 
1812 /*
1813  * Allocate some memory and give it an address in the guest physical address
1814  * space.
1815  *
1816  * Discontiguous memory is allowed, mostly for framebuffers.
1817  *
1818  * Must be called holding kvm->slots_lock for write.
1819  */
1820 int __kvm_set_memory_region(struct kvm *kvm,
1821 			    const struct kvm_userspace_memory_region *mem)
1822 {
1823 	struct kvm_memory_slot *old, *new;
1824 	struct kvm_memslots *slots;
1825 	enum kvm_mr_change change;
1826 	unsigned long npages;
1827 	gfn_t base_gfn;
1828 	int as_id, id;
1829 	int r;
1830 
1831 	r = check_memory_region_flags(mem);
1832 	if (r)
1833 		return r;
1834 
1835 	as_id = mem->slot >> 16;
1836 	id = (u16)mem->slot;
1837 
1838 	/* General sanity checks */
1839 	if ((mem->memory_size & (PAGE_SIZE - 1)) ||
1840 	    (mem->memory_size != (unsigned long)mem->memory_size))
1841 		return -EINVAL;
1842 	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1843 		return -EINVAL;
1844 	/* We can read the guest memory with __xxx_user() later on. */
1845 	if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
1846 	    (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
1847 	     !access_ok((void __user *)(unsigned long)mem->userspace_addr,
1848 			mem->memory_size))
1849 		return -EINVAL;
1850 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
1851 		return -EINVAL;
1852 	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1853 		return -EINVAL;
1854 	if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
1855 		return -EINVAL;
1856 
1857 	slots = __kvm_memslots(kvm, as_id);
1858 
1859 	/*
1860 	 * Note, the old memslot (and the pointer itself!) may be invalidated
1861 	 * and/or destroyed by kvm_set_memslot().
1862 	 */
1863 	old = id_to_memslot(slots, id);
1864 
1865 	if (!mem->memory_size) {
1866 		if (!old || !old->npages)
1867 			return -EINVAL;
1868 
1869 		if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
1870 			return -EIO;
1871 
1872 		return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
1873 	}
1874 
1875 	base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
1876 	npages = (mem->memory_size >> PAGE_SHIFT);
1877 
1878 	if (!old || !old->npages) {
1879 		change = KVM_MR_CREATE;
1880 
1881 		/*
1882 		 * To simplify KVM internals, the total number of pages across
1883 		 * all memslots must fit in an unsigned long.
1884 		 */
1885 		if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
1886 			return -EINVAL;
1887 	} else { /* Modify an existing slot. */
1888 		if ((mem->userspace_addr != old->userspace_addr) ||
1889 		    (npages != old->npages) ||
1890 		    ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
1891 			return -EINVAL;
1892 
1893 		if (base_gfn != old->base_gfn)
1894 			change = KVM_MR_MOVE;
1895 		else if (mem->flags != old->flags)
1896 			change = KVM_MR_FLAGS_ONLY;
1897 		else /* Nothing to change. */
1898 			return 0;
1899 	}
1900 
1901 	if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
1902 	    kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
1903 		return -EEXIST;
1904 
1905 	/* Allocate a slot that will persist in the memslot. */
1906 	new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
1907 	if (!new)
1908 		return -ENOMEM;
1909 
1910 	new->as_id = as_id;
1911 	new->id = id;
1912 	new->base_gfn = base_gfn;
1913 	new->npages = npages;
1914 	new->flags = mem->flags;
1915 	new->userspace_addr = mem->userspace_addr;
1916 
1917 	r = kvm_set_memslot(kvm, old, new, change);
1918 	if (r)
1919 		kfree(new);
1920 	return r;
1921 }
1922 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1923 
1924 int kvm_set_memory_region(struct kvm *kvm,
1925 			  const struct kvm_userspace_memory_region *mem)
1926 {
1927 	int r;
1928 
1929 	mutex_lock(&kvm->slots_lock);
1930 	r = __kvm_set_memory_region(kvm, mem);
1931 	mutex_unlock(&kvm->slots_lock);
1932 	return r;
1933 }
1934 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1935 
1936 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1937 					  struct kvm_userspace_memory_region *mem)
1938 {
1939 	if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
1940 		return -EINVAL;
1941 
1942 	return kvm_set_memory_region(kvm, mem);
1943 }
1944 
1945 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1946 /**
1947  * kvm_get_dirty_log - get a snapshot of dirty pages
1948  * @kvm:	pointer to kvm instance
1949  * @log:	slot id and address to which we copy the log
1950  * @is_dirty:	set to '1' if any dirty pages were found
1951  * @memslot:	set to the associated memslot, always valid on success
1952  */
1953 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1954 		      int *is_dirty, struct kvm_memory_slot **memslot)
1955 {
1956 	struct kvm_memslots *slots;
1957 	int i, as_id, id;
1958 	unsigned long n;
1959 	unsigned long any = 0;
1960 
1961 	/* Dirty ring tracking is exclusive to dirty log tracking */
1962 	if (kvm->dirty_ring_size)
1963 		return -ENXIO;
1964 
1965 	*memslot = NULL;
1966 	*is_dirty = 0;
1967 
1968 	as_id = log->slot >> 16;
1969 	id = (u16)log->slot;
1970 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1971 		return -EINVAL;
1972 
1973 	slots = __kvm_memslots(kvm, as_id);
1974 	*memslot = id_to_memslot(slots, id);
1975 	if (!(*memslot) || !(*memslot)->dirty_bitmap)
1976 		return -ENOENT;
1977 
1978 	kvm_arch_sync_dirty_log(kvm, *memslot);
1979 
1980 	n = kvm_dirty_bitmap_bytes(*memslot);
1981 
1982 	for (i = 0; !any && i < n/sizeof(long); ++i)
1983 		any = (*memslot)->dirty_bitmap[i];
1984 
1985 	if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
1986 		return -EFAULT;
1987 
1988 	if (any)
1989 		*is_dirty = 1;
1990 	return 0;
1991 }
1992 EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
1993 
1994 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
1995 /**
1996  * kvm_get_dirty_log_protect - get a snapshot of dirty pages
1997  *	and reenable dirty page tracking for the corresponding pages.
1998  * @kvm:	pointer to kvm instance
1999  * @log:	slot id and address to which we copy the log
2000  *
2001  * We need to keep it in mind that VCPU threads can write to the bitmap
2002  * concurrently. So, to avoid losing track of dirty pages we keep the
2003  * following order:
2004  *
2005  *    1. Take a snapshot of the bit and clear it if needed.
2006  *    2. Write protect the corresponding page.
2007  *    3. Copy the snapshot to the userspace.
2008  *    4. Upon return caller flushes TLB's if needed.
2009  *
2010  * Between 2 and 4, the guest may write to the page using the remaining TLB
2011  * entry.  This is not a problem because the page is reported dirty using
2012  * the snapshot taken before and step 4 ensures that writes done after
2013  * exiting to userspace will be logged for the next call.
2014  *
2015  */
2016 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2017 {
2018 	struct kvm_memslots *slots;
2019 	struct kvm_memory_slot *memslot;
2020 	int i, as_id, id;
2021 	unsigned long n;
2022 	unsigned long *dirty_bitmap;
2023 	unsigned long *dirty_bitmap_buffer;
2024 	bool flush;
2025 
2026 	/* Dirty ring tracking is exclusive to dirty log tracking */
2027 	if (kvm->dirty_ring_size)
2028 		return -ENXIO;
2029 
2030 	as_id = log->slot >> 16;
2031 	id = (u16)log->slot;
2032 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2033 		return -EINVAL;
2034 
2035 	slots = __kvm_memslots(kvm, as_id);
2036 	memslot = id_to_memslot(slots, id);
2037 	if (!memslot || !memslot->dirty_bitmap)
2038 		return -ENOENT;
2039 
2040 	dirty_bitmap = memslot->dirty_bitmap;
2041 
2042 	kvm_arch_sync_dirty_log(kvm, memslot);
2043 
2044 	n = kvm_dirty_bitmap_bytes(memslot);
2045 	flush = false;
2046 	if (kvm->manual_dirty_log_protect) {
2047 		/*
2048 		 * Unlike kvm_get_dirty_log, we always return false in *flush,
2049 		 * because no flush is needed until KVM_CLEAR_DIRTY_LOG.  There
2050 		 * is some code duplication between this function and
2051 		 * kvm_get_dirty_log, but hopefully all architecture
2052 		 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2053 		 * can be eliminated.
2054 		 */
2055 		dirty_bitmap_buffer = dirty_bitmap;
2056 	} else {
2057 		dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2058 		memset(dirty_bitmap_buffer, 0, n);
2059 
2060 		KVM_MMU_LOCK(kvm);
2061 		for (i = 0; i < n / sizeof(long); i++) {
2062 			unsigned long mask;
2063 			gfn_t offset;
2064 
2065 			if (!dirty_bitmap[i])
2066 				continue;
2067 
2068 			flush = true;
2069 			mask = xchg(&dirty_bitmap[i], 0);
2070 			dirty_bitmap_buffer[i] = mask;
2071 
2072 			offset = i * BITS_PER_LONG;
2073 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2074 								offset, mask);
2075 		}
2076 		KVM_MMU_UNLOCK(kvm);
2077 	}
2078 
2079 	if (flush)
2080 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
2081 
2082 	if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2083 		return -EFAULT;
2084 	return 0;
2085 }
2086 
2087 
2088 /**
2089  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2090  * @kvm: kvm instance
2091  * @log: slot id and address to which we copy the log
2092  *
2093  * Steps 1-4 below provide general overview of dirty page logging. See
2094  * kvm_get_dirty_log_protect() function description for additional details.
2095  *
2096  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2097  * always flush the TLB (step 4) even if previous step failed  and the dirty
2098  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2099  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2100  * writes will be marked dirty for next log read.
2101  *
2102  *   1. Take a snapshot of the bit and clear it if needed.
2103  *   2. Write protect the corresponding page.
2104  *   3. Copy the snapshot to the userspace.
2105  *   4. Flush TLB's if needed.
2106  */
2107 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2108 				      struct kvm_dirty_log *log)
2109 {
2110 	int r;
2111 
2112 	mutex_lock(&kvm->slots_lock);
2113 
2114 	r = kvm_get_dirty_log_protect(kvm, log);
2115 
2116 	mutex_unlock(&kvm->slots_lock);
2117 	return r;
2118 }
2119 
2120 /**
2121  * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2122  *	and reenable dirty page tracking for the corresponding pages.
2123  * @kvm:	pointer to kvm instance
2124  * @log:	slot id and address from which to fetch the bitmap of dirty pages
2125  */
2126 static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2127 				       struct kvm_clear_dirty_log *log)
2128 {
2129 	struct kvm_memslots *slots;
2130 	struct kvm_memory_slot *memslot;
2131 	int as_id, id;
2132 	gfn_t offset;
2133 	unsigned long i, n;
2134 	unsigned long *dirty_bitmap;
2135 	unsigned long *dirty_bitmap_buffer;
2136 	bool flush;
2137 
2138 	/* Dirty ring tracking is exclusive to dirty log tracking */
2139 	if (kvm->dirty_ring_size)
2140 		return -ENXIO;
2141 
2142 	as_id = log->slot >> 16;
2143 	id = (u16)log->slot;
2144 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2145 		return -EINVAL;
2146 
2147 	if (log->first_page & 63)
2148 		return -EINVAL;
2149 
2150 	slots = __kvm_memslots(kvm, as_id);
2151 	memslot = id_to_memslot(slots, id);
2152 	if (!memslot || !memslot->dirty_bitmap)
2153 		return -ENOENT;
2154 
2155 	dirty_bitmap = memslot->dirty_bitmap;
2156 
2157 	n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2158 
2159 	if (log->first_page > memslot->npages ||
2160 	    log->num_pages > memslot->npages - log->first_page ||
2161 	    (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2162 	    return -EINVAL;
2163 
2164 	kvm_arch_sync_dirty_log(kvm, memslot);
2165 
2166 	flush = false;
2167 	dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2168 	if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2169 		return -EFAULT;
2170 
2171 	KVM_MMU_LOCK(kvm);
2172 	for (offset = log->first_page, i = offset / BITS_PER_LONG,
2173 		 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2174 	     i++, offset += BITS_PER_LONG) {
2175 		unsigned long mask = *dirty_bitmap_buffer++;
2176 		atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2177 		if (!mask)
2178 			continue;
2179 
2180 		mask &= atomic_long_fetch_andnot(mask, p);
2181 
2182 		/*
2183 		 * mask contains the bits that really have been cleared.  This
2184 		 * never includes any bits beyond the length of the memslot (if
2185 		 * the length is not aligned to 64 pages), therefore it is not
2186 		 * a problem if userspace sets them in log->dirty_bitmap.
2187 		*/
2188 		if (mask) {
2189 			flush = true;
2190 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2191 								offset, mask);
2192 		}
2193 	}
2194 	KVM_MMU_UNLOCK(kvm);
2195 
2196 	if (flush)
2197 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
2198 
2199 	return 0;
2200 }
2201 
2202 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2203 					struct kvm_clear_dirty_log *log)
2204 {
2205 	int r;
2206 
2207 	mutex_lock(&kvm->slots_lock);
2208 
2209 	r = kvm_clear_dirty_log_protect(kvm, log);
2210 
2211 	mutex_unlock(&kvm->slots_lock);
2212 	return r;
2213 }
2214 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2215 
2216 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2217 {
2218 	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2219 }
2220 EXPORT_SYMBOL_GPL(gfn_to_memslot);
2221 
2222 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2223 {
2224 	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2225 	u64 gen = slots->generation;
2226 	struct kvm_memory_slot *slot;
2227 
2228 	/*
2229 	 * This also protects against using a memslot from a different address space,
2230 	 * since different address spaces have different generation numbers.
2231 	 */
2232 	if (unlikely(gen != vcpu->last_used_slot_gen)) {
2233 		vcpu->last_used_slot = NULL;
2234 		vcpu->last_used_slot_gen = gen;
2235 	}
2236 
2237 	slot = try_get_memslot(vcpu->last_used_slot, gfn);
2238 	if (slot)
2239 		return slot;
2240 
2241 	/*
2242 	 * Fall back to searching all memslots. We purposely use
2243 	 * search_memslots() instead of __gfn_to_memslot() to avoid
2244 	 * thrashing the VM-wide last_used_slot in kvm_memslots.
2245 	 */
2246 	slot = search_memslots(slots, gfn, false);
2247 	if (slot) {
2248 		vcpu->last_used_slot = slot;
2249 		return slot;
2250 	}
2251 
2252 	return NULL;
2253 }
2254 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot);
2255 
2256 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2257 {
2258 	struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2259 
2260 	return kvm_is_visible_memslot(memslot);
2261 }
2262 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
2263 
2264 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2265 {
2266 	struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2267 
2268 	return kvm_is_visible_memslot(memslot);
2269 }
2270 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2271 
2272 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2273 {
2274 	struct vm_area_struct *vma;
2275 	unsigned long addr, size;
2276 
2277 	size = PAGE_SIZE;
2278 
2279 	addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2280 	if (kvm_is_error_hva(addr))
2281 		return PAGE_SIZE;
2282 
2283 	mmap_read_lock(current->mm);
2284 	vma = find_vma(current->mm, addr);
2285 	if (!vma)
2286 		goto out;
2287 
2288 	size = vma_kernel_pagesize(vma);
2289 
2290 out:
2291 	mmap_read_unlock(current->mm);
2292 
2293 	return size;
2294 }
2295 
2296 static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2297 {
2298 	return slot->flags & KVM_MEM_READONLY;
2299 }
2300 
2301 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2302 				       gfn_t *nr_pages, bool write)
2303 {
2304 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2305 		return KVM_HVA_ERR_BAD;
2306 
2307 	if (memslot_is_readonly(slot) && write)
2308 		return KVM_HVA_ERR_RO_BAD;
2309 
2310 	if (nr_pages)
2311 		*nr_pages = slot->npages - (gfn - slot->base_gfn);
2312 
2313 	return __gfn_to_hva_memslot(slot, gfn);
2314 }
2315 
2316 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2317 				     gfn_t *nr_pages)
2318 {
2319 	return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2320 }
2321 
2322 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2323 					gfn_t gfn)
2324 {
2325 	return gfn_to_hva_many(slot, gfn, NULL);
2326 }
2327 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2328 
2329 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2330 {
2331 	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2332 }
2333 EXPORT_SYMBOL_GPL(gfn_to_hva);
2334 
2335 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2336 {
2337 	return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2338 }
2339 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2340 
2341 /*
2342  * Return the hva of a @gfn and the R/W attribute if possible.
2343  *
2344  * @slot: the kvm_memory_slot which contains @gfn
2345  * @gfn: the gfn to be translated
2346  * @writable: used to return the read/write attribute of the @slot if the hva
2347  * is valid and @writable is not NULL
2348  */
2349 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2350 				      gfn_t gfn, bool *writable)
2351 {
2352 	unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2353 
2354 	if (!kvm_is_error_hva(hva) && writable)
2355 		*writable = !memslot_is_readonly(slot);
2356 
2357 	return hva;
2358 }
2359 
2360 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2361 {
2362 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2363 
2364 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
2365 }
2366 
2367 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2368 {
2369 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2370 
2371 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
2372 }
2373 
2374 static inline int check_user_page_hwpoison(unsigned long addr)
2375 {
2376 	int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2377 
2378 	rc = get_user_pages(addr, 1, flags, NULL, NULL);
2379 	return rc == -EHWPOISON;
2380 }
2381 
2382 /*
2383  * The fast path to get the writable pfn which will be stored in @pfn,
2384  * true indicates success, otherwise false is returned.  It's also the
2385  * only part that runs if we can in atomic context.
2386  */
2387 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2388 			    bool *writable, kvm_pfn_t *pfn)
2389 {
2390 	struct page *page[1];
2391 
2392 	/*
2393 	 * Fast pin a writable pfn only if it is a write fault request
2394 	 * or the caller allows to map a writable pfn for a read fault
2395 	 * request.
2396 	 */
2397 	if (!(write_fault || writable))
2398 		return false;
2399 
2400 	if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2401 		*pfn = page_to_pfn(page[0]);
2402 
2403 		if (writable)
2404 			*writable = true;
2405 		return true;
2406 	}
2407 
2408 	return false;
2409 }
2410 
2411 /*
2412  * The slow path to get the pfn of the specified host virtual address,
2413  * 1 indicates success, -errno is returned if error is detected.
2414  */
2415 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2416 			   bool *writable, kvm_pfn_t *pfn)
2417 {
2418 	unsigned int flags = FOLL_HWPOISON;
2419 	struct page *page;
2420 	int npages = 0;
2421 
2422 	might_sleep();
2423 
2424 	if (writable)
2425 		*writable = write_fault;
2426 
2427 	if (write_fault)
2428 		flags |= FOLL_WRITE;
2429 	if (async)
2430 		flags |= FOLL_NOWAIT;
2431 
2432 	npages = get_user_pages_unlocked(addr, 1, &page, flags);
2433 	if (npages != 1)
2434 		return npages;
2435 
2436 	/* map read fault as writable if possible */
2437 	if (unlikely(!write_fault) && writable) {
2438 		struct page *wpage;
2439 
2440 		if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2441 			*writable = true;
2442 			put_page(page);
2443 			page = wpage;
2444 		}
2445 	}
2446 	*pfn = page_to_pfn(page);
2447 	return npages;
2448 }
2449 
2450 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2451 {
2452 	if (unlikely(!(vma->vm_flags & VM_READ)))
2453 		return false;
2454 
2455 	if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2456 		return false;
2457 
2458 	return true;
2459 }
2460 
2461 static int kvm_try_get_pfn(kvm_pfn_t pfn)
2462 {
2463 	if (kvm_is_reserved_pfn(pfn))
2464 		return 1;
2465 	return get_page_unless_zero(pfn_to_page(pfn));
2466 }
2467 
2468 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2469 			       unsigned long addr, bool *async,
2470 			       bool write_fault, bool *writable,
2471 			       kvm_pfn_t *p_pfn)
2472 {
2473 	kvm_pfn_t pfn;
2474 	pte_t *ptep;
2475 	spinlock_t *ptl;
2476 	int r;
2477 
2478 	r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2479 	if (r) {
2480 		/*
2481 		 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2482 		 * not call the fault handler, so do it here.
2483 		 */
2484 		bool unlocked = false;
2485 		r = fixup_user_fault(current->mm, addr,
2486 				     (write_fault ? FAULT_FLAG_WRITE : 0),
2487 				     &unlocked);
2488 		if (unlocked)
2489 			return -EAGAIN;
2490 		if (r)
2491 			return r;
2492 
2493 		r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2494 		if (r)
2495 			return r;
2496 	}
2497 
2498 	if (write_fault && !pte_write(*ptep)) {
2499 		pfn = KVM_PFN_ERR_RO_FAULT;
2500 		goto out;
2501 	}
2502 
2503 	if (writable)
2504 		*writable = pte_write(*ptep);
2505 	pfn = pte_pfn(*ptep);
2506 
2507 	/*
2508 	 * Get a reference here because callers of *hva_to_pfn* and
2509 	 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2510 	 * returned pfn.  This is only needed if the VMA has VM_MIXEDMAP
2511 	 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
2512 	 * simply do nothing for reserved pfns.
2513 	 *
2514 	 * Whoever called remap_pfn_range is also going to call e.g.
2515 	 * unmap_mapping_range before the underlying pages are freed,
2516 	 * causing a call to our MMU notifier.
2517 	 *
2518 	 * Certain IO or PFNMAP mappings can be backed with valid
2519 	 * struct pages, but be allocated without refcounting e.g.,
2520 	 * tail pages of non-compound higher order allocations, which
2521 	 * would then underflow the refcount when the caller does the
2522 	 * required put_page. Don't allow those pages here.
2523 	 */
2524 	if (!kvm_try_get_pfn(pfn))
2525 		r = -EFAULT;
2526 
2527 out:
2528 	pte_unmap_unlock(ptep, ptl);
2529 	*p_pfn = pfn;
2530 
2531 	return r;
2532 }
2533 
2534 /*
2535  * Pin guest page in memory and return its pfn.
2536  * @addr: host virtual address which maps memory to the guest
2537  * @atomic: whether this function can sleep
2538  * @async: whether this function need to wait IO complete if the
2539  *         host page is not in the memory
2540  * @write_fault: whether we should get a writable host page
2541  * @writable: whether it allows to map a writable host page for !@write_fault
2542  *
2543  * The function will map a writable host page for these two cases:
2544  * 1): @write_fault = true
2545  * 2): @write_fault = false && @writable, @writable will tell the caller
2546  *     whether the mapping is writable.
2547  */
2548 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
2549 		     bool write_fault, bool *writable)
2550 {
2551 	struct vm_area_struct *vma;
2552 	kvm_pfn_t pfn = 0;
2553 	int npages, r;
2554 
2555 	/* we can do it either atomically or asynchronously, not both */
2556 	BUG_ON(atomic && async);
2557 
2558 	if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2559 		return pfn;
2560 
2561 	if (atomic)
2562 		return KVM_PFN_ERR_FAULT;
2563 
2564 	npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
2565 	if (npages == 1)
2566 		return pfn;
2567 
2568 	mmap_read_lock(current->mm);
2569 	if (npages == -EHWPOISON ||
2570 	      (!async && check_user_page_hwpoison(addr))) {
2571 		pfn = KVM_PFN_ERR_HWPOISON;
2572 		goto exit;
2573 	}
2574 
2575 retry:
2576 	vma = vma_lookup(current->mm, addr);
2577 
2578 	if (vma == NULL)
2579 		pfn = KVM_PFN_ERR_FAULT;
2580 	else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
2581 		r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
2582 		if (r == -EAGAIN)
2583 			goto retry;
2584 		if (r < 0)
2585 			pfn = KVM_PFN_ERR_FAULT;
2586 	} else {
2587 		if (async && vma_is_valid(vma, write_fault))
2588 			*async = true;
2589 		pfn = KVM_PFN_ERR_FAULT;
2590 	}
2591 exit:
2592 	mmap_read_unlock(current->mm);
2593 	return pfn;
2594 }
2595 
2596 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
2597 			       bool atomic, bool *async, bool write_fault,
2598 			       bool *writable, hva_t *hva)
2599 {
2600 	unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2601 
2602 	if (hva)
2603 		*hva = addr;
2604 
2605 	if (addr == KVM_HVA_ERR_RO_BAD) {
2606 		if (writable)
2607 			*writable = false;
2608 		return KVM_PFN_ERR_RO_FAULT;
2609 	}
2610 
2611 	if (kvm_is_error_hva(addr)) {
2612 		if (writable)
2613 			*writable = false;
2614 		return KVM_PFN_NOSLOT;
2615 	}
2616 
2617 	/* Do not map writable pfn in the readonly memslot. */
2618 	if (writable && memslot_is_readonly(slot)) {
2619 		*writable = false;
2620 		writable = NULL;
2621 	}
2622 
2623 	return hva_to_pfn(addr, atomic, async, write_fault,
2624 			  writable);
2625 }
2626 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
2627 
2628 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
2629 		      bool *writable)
2630 {
2631 	return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
2632 				    write_fault, writable, NULL);
2633 }
2634 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
2635 
2636 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
2637 {
2638 	return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
2639 }
2640 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
2641 
2642 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
2643 {
2644 	return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL);
2645 }
2646 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
2647 
2648 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
2649 {
2650 	return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2651 }
2652 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
2653 
2654 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
2655 {
2656 	return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
2657 }
2658 EXPORT_SYMBOL_GPL(gfn_to_pfn);
2659 
2660 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2661 {
2662 	return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2663 }
2664 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
2665 
2666 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2667 			    struct page **pages, int nr_pages)
2668 {
2669 	unsigned long addr;
2670 	gfn_t entry = 0;
2671 
2672 	addr = gfn_to_hva_many(slot, gfn, &entry);
2673 	if (kvm_is_error_hva(addr))
2674 		return -1;
2675 
2676 	if (entry < nr_pages)
2677 		return 0;
2678 
2679 	return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
2680 }
2681 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
2682 
2683 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
2684 {
2685 	if (is_error_noslot_pfn(pfn))
2686 		return KVM_ERR_PTR_BAD_PAGE;
2687 
2688 	if (kvm_is_reserved_pfn(pfn)) {
2689 		WARN_ON(1);
2690 		return KVM_ERR_PTR_BAD_PAGE;
2691 	}
2692 
2693 	return pfn_to_page(pfn);
2694 }
2695 
2696 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2697 {
2698 	kvm_pfn_t pfn;
2699 
2700 	pfn = gfn_to_pfn(kvm, gfn);
2701 
2702 	return kvm_pfn_to_page(pfn);
2703 }
2704 EXPORT_SYMBOL_GPL(gfn_to_page);
2705 
2706 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
2707 {
2708 	if (pfn == 0)
2709 		return;
2710 
2711 	if (dirty)
2712 		kvm_release_pfn_dirty(pfn);
2713 	else
2714 		kvm_release_pfn_clean(pfn);
2715 }
2716 
2717 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
2718 {
2719 	kvm_pfn_t pfn;
2720 	void *hva = NULL;
2721 	struct page *page = KVM_UNMAPPED_PAGE;
2722 
2723 	if (!map)
2724 		return -EINVAL;
2725 
2726 	pfn = gfn_to_pfn(vcpu->kvm, gfn);
2727 	if (is_error_noslot_pfn(pfn))
2728 		return -EINVAL;
2729 
2730 	if (pfn_valid(pfn)) {
2731 		page = pfn_to_page(pfn);
2732 		hva = kmap(page);
2733 #ifdef CONFIG_HAS_IOMEM
2734 	} else {
2735 		hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
2736 #endif
2737 	}
2738 
2739 	if (!hva)
2740 		return -EFAULT;
2741 
2742 	map->page = page;
2743 	map->hva = hva;
2744 	map->pfn = pfn;
2745 	map->gfn = gfn;
2746 
2747 	return 0;
2748 }
2749 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
2750 
2751 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
2752 {
2753 	if (!map)
2754 		return;
2755 
2756 	if (!map->hva)
2757 		return;
2758 
2759 	if (map->page != KVM_UNMAPPED_PAGE)
2760 		kunmap(map->page);
2761 #ifdef CONFIG_HAS_IOMEM
2762 	else
2763 		memunmap(map->hva);
2764 #endif
2765 
2766 	if (dirty)
2767 		kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
2768 
2769 	kvm_release_pfn(map->pfn, dirty);
2770 
2771 	map->hva = NULL;
2772 	map->page = NULL;
2773 }
2774 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
2775 
2776 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2777 {
2778 	kvm_pfn_t pfn;
2779 
2780 	pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
2781 
2782 	return kvm_pfn_to_page(pfn);
2783 }
2784 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
2785 
2786 void kvm_release_page_clean(struct page *page)
2787 {
2788 	WARN_ON(is_error_page(page));
2789 
2790 	kvm_release_pfn_clean(page_to_pfn(page));
2791 }
2792 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
2793 
2794 void kvm_release_pfn_clean(kvm_pfn_t pfn)
2795 {
2796 	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
2797 		put_page(pfn_to_page(pfn));
2798 }
2799 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
2800 
2801 void kvm_release_page_dirty(struct page *page)
2802 {
2803 	WARN_ON(is_error_page(page));
2804 
2805 	kvm_release_pfn_dirty(page_to_pfn(page));
2806 }
2807 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
2808 
2809 void kvm_release_pfn_dirty(kvm_pfn_t pfn)
2810 {
2811 	kvm_set_pfn_dirty(pfn);
2812 	kvm_release_pfn_clean(pfn);
2813 }
2814 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
2815 
2816 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
2817 {
2818 	if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2819 		SetPageDirty(pfn_to_page(pfn));
2820 }
2821 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
2822 
2823 void kvm_set_pfn_accessed(kvm_pfn_t pfn)
2824 {
2825 	if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2826 		mark_page_accessed(pfn_to_page(pfn));
2827 }
2828 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
2829 
2830 static int next_segment(unsigned long len, int offset)
2831 {
2832 	if (len > PAGE_SIZE - offset)
2833 		return PAGE_SIZE - offset;
2834 	else
2835 		return len;
2836 }
2837 
2838 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
2839 				 void *data, int offset, int len)
2840 {
2841 	int r;
2842 	unsigned long addr;
2843 
2844 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2845 	if (kvm_is_error_hva(addr))
2846 		return -EFAULT;
2847 	r = __copy_from_user(data, (void __user *)addr + offset, len);
2848 	if (r)
2849 		return -EFAULT;
2850 	return 0;
2851 }
2852 
2853 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
2854 			int len)
2855 {
2856 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2857 
2858 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
2859 }
2860 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
2861 
2862 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
2863 			     int offset, int len)
2864 {
2865 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2866 
2867 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
2868 }
2869 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
2870 
2871 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
2872 {
2873 	gfn_t gfn = gpa >> PAGE_SHIFT;
2874 	int seg;
2875 	int offset = offset_in_page(gpa);
2876 	int ret;
2877 
2878 	while ((seg = next_segment(len, offset)) != 0) {
2879 		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
2880 		if (ret < 0)
2881 			return ret;
2882 		offset = 0;
2883 		len -= seg;
2884 		data += seg;
2885 		++gfn;
2886 	}
2887 	return 0;
2888 }
2889 EXPORT_SYMBOL_GPL(kvm_read_guest);
2890 
2891 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
2892 {
2893 	gfn_t gfn = gpa >> PAGE_SHIFT;
2894 	int seg;
2895 	int offset = offset_in_page(gpa);
2896 	int ret;
2897 
2898 	while ((seg = next_segment(len, offset)) != 0) {
2899 		ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
2900 		if (ret < 0)
2901 			return ret;
2902 		offset = 0;
2903 		len -= seg;
2904 		data += seg;
2905 		++gfn;
2906 	}
2907 	return 0;
2908 }
2909 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
2910 
2911 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2912 			           void *data, int offset, unsigned long len)
2913 {
2914 	int r;
2915 	unsigned long addr;
2916 
2917 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2918 	if (kvm_is_error_hva(addr))
2919 		return -EFAULT;
2920 	pagefault_disable();
2921 	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
2922 	pagefault_enable();
2923 	if (r)
2924 		return -EFAULT;
2925 	return 0;
2926 }
2927 
2928 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
2929 			       void *data, unsigned long len)
2930 {
2931 	gfn_t gfn = gpa >> PAGE_SHIFT;
2932 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2933 	int offset = offset_in_page(gpa);
2934 
2935 	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
2936 }
2937 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
2938 
2939 static int __kvm_write_guest_page(struct kvm *kvm,
2940 				  struct kvm_memory_slot *memslot, gfn_t gfn,
2941 			          const void *data, int offset, int len)
2942 {
2943 	int r;
2944 	unsigned long addr;
2945 
2946 	addr = gfn_to_hva_memslot(memslot, gfn);
2947 	if (kvm_is_error_hva(addr))
2948 		return -EFAULT;
2949 	r = __copy_to_user((void __user *)addr + offset, data, len);
2950 	if (r)
2951 		return -EFAULT;
2952 	mark_page_dirty_in_slot(kvm, memslot, gfn);
2953 	return 0;
2954 }
2955 
2956 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
2957 			 const void *data, int offset, int len)
2958 {
2959 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2960 
2961 	return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
2962 }
2963 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
2964 
2965 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
2966 			      const void *data, int offset, int len)
2967 {
2968 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2969 
2970 	return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
2971 }
2972 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
2973 
2974 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
2975 		    unsigned long len)
2976 {
2977 	gfn_t gfn = gpa >> PAGE_SHIFT;
2978 	int seg;
2979 	int offset = offset_in_page(gpa);
2980 	int ret;
2981 
2982 	while ((seg = next_segment(len, offset)) != 0) {
2983 		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
2984 		if (ret < 0)
2985 			return ret;
2986 		offset = 0;
2987 		len -= seg;
2988 		data += seg;
2989 		++gfn;
2990 	}
2991 	return 0;
2992 }
2993 EXPORT_SYMBOL_GPL(kvm_write_guest);
2994 
2995 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
2996 		         unsigned long len)
2997 {
2998 	gfn_t gfn = gpa >> PAGE_SHIFT;
2999 	int seg;
3000 	int offset = offset_in_page(gpa);
3001 	int ret;
3002 
3003 	while ((seg = next_segment(len, offset)) != 0) {
3004 		ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3005 		if (ret < 0)
3006 			return ret;
3007 		offset = 0;
3008 		len -= seg;
3009 		data += seg;
3010 		++gfn;
3011 	}
3012 	return 0;
3013 }
3014 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
3015 
3016 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3017 				       struct gfn_to_hva_cache *ghc,
3018 				       gpa_t gpa, unsigned long len)
3019 {
3020 	int offset = offset_in_page(gpa);
3021 	gfn_t start_gfn = gpa >> PAGE_SHIFT;
3022 	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3023 	gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3024 	gfn_t nr_pages_avail;
3025 
3026 	/* Update ghc->generation before performing any error checks. */
3027 	ghc->generation = slots->generation;
3028 
3029 	if (start_gfn > end_gfn) {
3030 		ghc->hva = KVM_HVA_ERR_BAD;
3031 		return -EINVAL;
3032 	}
3033 
3034 	/*
3035 	 * If the requested region crosses two memslots, we still
3036 	 * verify that the entire region is valid here.
3037 	 */
3038 	for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3039 		ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3040 		ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3041 					   &nr_pages_avail);
3042 		if (kvm_is_error_hva(ghc->hva))
3043 			return -EFAULT;
3044 	}
3045 
3046 	/* Use the slow path for cross page reads and writes. */
3047 	if (nr_pages_needed == 1)
3048 		ghc->hva += offset;
3049 	else
3050 		ghc->memslot = NULL;
3051 
3052 	ghc->gpa = gpa;
3053 	ghc->len = len;
3054 	return 0;
3055 }
3056 
3057 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3058 			      gpa_t gpa, unsigned long len)
3059 {
3060 	struct kvm_memslots *slots = kvm_memslots(kvm);
3061 	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3062 }
3063 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
3064 
3065 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3066 				  void *data, unsigned int offset,
3067 				  unsigned long len)
3068 {
3069 	struct kvm_memslots *slots = kvm_memslots(kvm);
3070 	int r;
3071 	gpa_t gpa = ghc->gpa + offset;
3072 
3073 	if (WARN_ON_ONCE(len + offset > ghc->len))
3074 		return -EINVAL;
3075 
3076 	if (slots->generation != ghc->generation) {
3077 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3078 			return -EFAULT;
3079 	}
3080 
3081 	if (kvm_is_error_hva(ghc->hva))
3082 		return -EFAULT;
3083 
3084 	if (unlikely(!ghc->memslot))
3085 		return kvm_write_guest(kvm, gpa, data, len);
3086 
3087 	r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3088 	if (r)
3089 		return -EFAULT;
3090 	mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3091 
3092 	return 0;
3093 }
3094 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
3095 
3096 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3097 			   void *data, unsigned long len)
3098 {
3099 	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3100 }
3101 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
3102 
3103 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3104 				 void *data, unsigned int offset,
3105 				 unsigned long len)
3106 {
3107 	struct kvm_memslots *slots = kvm_memslots(kvm);
3108 	int r;
3109 	gpa_t gpa = ghc->gpa + offset;
3110 
3111 	if (WARN_ON_ONCE(len + offset > ghc->len))
3112 		return -EINVAL;
3113 
3114 	if (slots->generation != ghc->generation) {
3115 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3116 			return -EFAULT;
3117 	}
3118 
3119 	if (kvm_is_error_hva(ghc->hva))
3120 		return -EFAULT;
3121 
3122 	if (unlikely(!ghc->memslot))
3123 		return kvm_read_guest(kvm, gpa, data, len);
3124 
3125 	r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3126 	if (r)
3127 		return -EFAULT;
3128 
3129 	return 0;
3130 }
3131 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
3132 
3133 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3134 			  void *data, unsigned long len)
3135 {
3136 	return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3137 }
3138 EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
3139 
3140 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3141 {
3142 	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3143 	gfn_t gfn = gpa >> PAGE_SHIFT;
3144 	int seg;
3145 	int offset = offset_in_page(gpa);
3146 	int ret;
3147 
3148 	while ((seg = next_segment(len, offset)) != 0) {
3149 		ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
3150 		if (ret < 0)
3151 			return ret;
3152 		offset = 0;
3153 		len -= seg;
3154 		++gfn;
3155 	}
3156 	return 0;
3157 }
3158 EXPORT_SYMBOL_GPL(kvm_clear_guest);
3159 
3160 void mark_page_dirty_in_slot(struct kvm *kvm,
3161 			     const struct kvm_memory_slot *memslot,
3162 		 	     gfn_t gfn)
3163 {
3164 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3165 
3166 	if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm))
3167 		return;
3168 
3169 	if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3170 		unsigned long rel_gfn = gfn - memslot->base_gfn;
3171 		u32 slot = (memslot->as_id << 16) | memslot->id;
3172 
3173 		if (kvm->dirty_ring_size)
3174 			kvm_dirty_ring_push(&vcpu->dirty_ring,
3175 					    slot, rel_gfn);
3176 		else
3177 			set_bit_le(rel_gfn, memslot->dirty_bitmap);
3178 	}
3179 }
3180 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
3181 
3182 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3183 {
3184 	struct kvm_memory_slot *memslot;
3185 
3186 	memslot = gfn_to_memslot(kvm, gfn);
3187 	mark_page_dirty_in_slot(kvm, memslot, gfn);
3188 }
3189 EXPORT_SYMBOL_GPL(mark_page_dirty);
3190 
3191 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3192 {
3193 	struct kvm_memory_slot *memslot;
3194 
3195 	memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3196 	mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3197 }
3198 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3199 
3200 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3201 {
3202 	if (!vcpu->sigset_active)
3203 		return;
3204 
3205 	/*
3206 	 * This does a lockless modification of ->real_blocked, which is fine
3207 	 * because, only current can change ->real_blocked and all readers of
3208 	 * ->real_blocked don't care as long ->real_blocked is always a subset
3209 	 * of ->blocked.
3210 	 */
3211 	sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
3212 }
3213 
3214 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3215 {
3216 	if (!vcpu->sigset_active)
3217 		return;
3218 
3219 	sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
3220 	sigemptyset(&current->real_blocked);
3221 }
3222 
3223 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3224 {
3225 	unsigned int old, val, grow, grow_start;
3226 
3227 	old = val = vcpu->halt_poll_ns;
3228 	grow_start = READ_ONCE(halt_poll_ns_grow_start);
3229 	grow = READ_ONCE(halt_poll_ns_grow);
3230 	if (!grow)
3231 		goto out;
3232 
3233 	val *= grow;
3234 	if (val < grow_start)
3235 		val = grow_start;
3236 
3237 	if (val > vcpu->kvm->max_halt_poll_ns)
3238 		val = vcpu->kvm->max_halt_poll_ns;
3239 
3240 	vcpu->halt_poll_ns = val;
3241 out:
3242 	trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3243 }
3244 
3245 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3246 {
3247 	unsigned int old, val, shrink, grow_start;
3248 
3249 	old = val = vcpu->halt_poll_ns;
3250 	shrink = READ_ONCE(halt_poll_ns_shrink);
3251 	grow_start = READ_ONCE(halt_poll_ns_grow_start);
3252 	if (shrink == 0)
3253 		val = 0;
3254 	else
3255 		val /= shrink;
3256 
3257 	if (val < grow_start)
3258 		val = 0;
3259 
3260 	vcpu->halt_poll_ns = val;
3261 	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3262 }
3263 
3264 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3265 {
3266 	int ret = -EINTR;
3267 	int idx = srcu_read_lock(&vcpu->kvm->srcu);
3268 
3269 	if (kvm_arch_vcpu_runnable(vcpu)) {
3270 		kvm_make_request(KVM_REQ_UNHALT, vcpu);
3271 		goto out;
3272 	}
3273 	if (kvm_cpu_has_pending_timer(vcpu))
3274 		goto out;
3275 	if (signal_pending(current))
3276 		goto out;
3277 	if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3278 		goto out;
3279 
3280 	ret = 0;
3281 out:
3282 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
3283 	return ret;
3284 }
3285 
3286 /*
3287  * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3288  * pending.  This is mostly used when halting a vCPU, but may also be used
3289  * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3290  */
3291 bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3292 {
3293 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3294 	bool waited = false;
3295 
3296 	vcpu->stat.generic.blocking = 1;
3297 
3298 	kvm_arch_vcpu_blocking(vcpu);
3299 
3300 	prepare_to_rcuwait(wait);
3301 	for (;;) {
3302 		set_current_state(TASK_INTERRUPTIBLE);
3303 
3304 		if (kvm_vcpu_check_block(vcpu) < 0)
3305 			break;
3306 
3307 		waited = true;
3308 		schedule();
3309 	}
3310 	finish_rcuwait(wait);
3311 
3312 	kvm_arch_vcpu_unblocking(vcpu);
3313 
3314 	vcpu->stat.generic.blocking = 0;
3315 
3316 	return waited;
3317 }
3318 
3319 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3320 					  ktime_t end, bool success)
3321 {
3322 	struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3323 	u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3324 
3325 	++vcpu->stat.generic.halt_attempted_poll;
3326 
3327 	if (success) {
3328 		++vcpu->stat.generic.halt_successful_poll;
3329 
3330 		if (!vcpu_valid_wakeup(vcpu))
3331 			++vcpu->stat.generic.halt_poll_invalid;
3332 
3333 		stats->halt_poll_success_ns += poll_ns;
3334 		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3335 	} else {
3336 		stats->halt_poll_fail_ns += poll_ns;
3337 		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3338 	}
3339 }
3340 
3341 /*
3342  * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
3343  * polling is enabled, busy wait for a short time before blocking to avoid the
3344  * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3345  * is halted.
3346  */
3347 void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3348 {
3349 	bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3350 	bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3351 	ktime_t start, cur, poll_end;
3352 	bool waited = false;
3353 	u64 halt_ns;
3354 
3355 	start = cur = poll_end = ktime_get();
3356 	if (do_halt_poll) {
3357 		ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3358 
3359 		do {
3360 			/*
3361 			 * This sets KVM_REQ_UNHALT if an interrupt
3362 			 * arrives.
3363 			 */
3364 			if (kvm_vcpu_check_block(vcpu) < 0)
3365 				goto out;
3366 			cpu_relax();
3367 			poll_end = cur = ktime_get();
3368 		} while (kvm_vcpu_can_poll(cur, stop));
3369 	}
3370 
3371 	waited = kvm_vcpu_block(vcpu);
3372 
3373 	cur = ktime_get();
3374 	if (waited) {
3375 		vcpu->stat.generic.halt_wait_ns +=
3376 			ktime_to_ns(cur) - ktime_to_ns(poll_end);
3377 		KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3378 				ktime_to_ns(cur) - ktime_to_ns(poll_end));
3379 	}
3380 out:
3381 	/* The total time the vCPU was "halted", including polling time. */
3382 	halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3383 
3384 	/*
3385 	 * Note, halt-polling is considered successful so long as the vCPU was
3386 	 * never actually scheduled out, i.e. even if the wake event arrived
3387 	 * after of the halt-polling loop itself, but before the full wait.
3388 	 */
3389 	if (do_halt_poll)
3390 		update_halt_poll_stats(vcpu, start, poll_end, !waited);
3391 
3392 	if (halt_poll_allowed) {
3393 		if (!vcpu_valid_wakeup(vcpu)) {
3394 			shrink_halt_poll_ns(vcpu);
3395 		} else if (vcpu->kvm->max_halt_poll_ns) {
3396 			if (halt_ns <= vcpu->halt_poll_ns)
3397 				;
3398 			/* we had a long block, shrink polling */
3399 			else if (vcpu->halt_poll_ns &&
3400 				 halt_ns > vcpu->kvm->max_halt_poll_ns)
3401 				shrink_halt_poll_ns(vcpu);
3402 			/* we had a short halt and our poll time is too small */
3403 			else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
3404 				 halt_ns < vcpu->kvm->max_halt_poll_ns)
3405 				grow_halt_poll_ns(vcpu);
3406 		} else {
3407 			vcpu->halt_poll_ns = 0;
3408 		}
3409 	}
3410 
3411 	trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3412 }
3413 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
3414 
3415 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3416 {
3417 	if (__kvm_vcpu_wake_up(vcpu)) {
3418 		WRITE_ONCE(vcpu->ready, true);
3419 		++vcpu->stat.generic.halt_wakeup;
3420 		return true;
3421 	}
3422 
3423 	return false;
3424 }
3425 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3426 
3427 #ifndef CONFIG_S390
3428 /*
3429  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3430  */
3431 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3432 {
3433 	int me, cpu;
3434 
3435 	if (kvm_vcpu_wake_up(vcpu))
3436 		return;
3437 
3438 	me = get_cpu();
3439 	/*
3440 	 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3441 	 * to EXITING_GUEST_MODE.  Therefore the moderately expensive "should
3442 	 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3443 	 * within the vCPU thread itself.
3444 	 */
3445 	if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3446 		if (vcpu->mode == IN_GUEST_MODE)
3447 			WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3448 		goto out;
3449 	}
3450 
3451 	/*
3452 	 * Note, the vCPU could get migrated to a different pCPU at any point
3453 	 * after kvm_arch_vcpu_should_kick(), which could result in sending an
3454 	 * IPI to the previous pCPU.  But, that's ok because the purpose of the
3455 	 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3456 	 * vCPU also requires it to leave IN_GUEST_MODE.
3457 	 */
3458 	if (kvm_arch_vcpu_should_kick(vcpu)) {
3459 		cpu = READ_ONCE(vcpu->cpu);
3460 		if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3461 			smp_send_reschedule(cpu);
3462 	}
3463 out:
3464 	put_cpu();
3465 }
3466 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3467 #endif /* !CONFIG_S390 */
3468 
3469 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3470 {
3471 	struct pid *pid;
3472 	struct task_struct *task = NULL;
3473 	int ret = 0;
3474 
3475 	rcu_read_lock();
3476 	pid = rcu_dereference(target->pid);
3477 	if (pid)
3478 		task = get_pid_task(pid, PIDTYPE_PID);
3479 	rcu_read_unlock();
3480 	if (!task)
3481 		return ret;
3482 	ret = yield_to(task, 1);
3483 	put_task_struct(task);
3484 
3485 	return ret;
3486 }
3487 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3488 
3489 /*
3490  * Helper that checks whether a VCPU is eligible for directed yield.
3491  * Most eligible candidate to yield is decided by following heuristics:
3492  *
3493  *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3494  *  (preempted lock holder), indicated by @in_spin_loop.
3495  *  Set at the beginning and cleared at the end of interception/PLE handler.
3496  *
3497  *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3498  *  chance last time (mostly it has become eligible now since we have probably
3499  *  yielded to lockholder in last iteration. This is done by toggling
3500  *  @dy_eligible each time a VCPU checked for eligibility.)
3501  *
3502  *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3503  *  to preempted lock-holder could result in wrong VCPU selection and CPU
3504  *  burning. Giving priority for a potential lock-holder increases lock
3505  *  progress.
3506  *
3507  *  Since algorithm is based on heuristics, accessing another VCPU data without
3508  *  locking does not harm. It may result in trying to yield to  same VCPU, fail
3509  *  and continue with next VCPU and so on.
3510  */
3511 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
3512 {
3513 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
3514 	bool eligible;
3515 
3516 	eligible = !vcpu->spin_loop.in_spin_loop ||
3517 		    vcpu->spin_loop.dy_eligible;
3518 
3519 	if (vcpu->spin_loop.in_spin_loop)
3520 		kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3521 
3522 	return eligible;
3523 #else
3524 	return true;
3525 #endif
3526 }
3527 
3528 /*
3529  * Unlike kvm_arch_vcpu_runnable, this function is called outside
3530  * a vcpu_load/vcpu_put pair.  However, for most architectures
3531  * kvm_arch_vcpu_runnable does not require vcpu_load.
3532  */
3533 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3534 {
3535 	return kvm_arch_vcpu_runnable(vcpu);
3536 }
3537 
3538 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3539 {
3540 	if (kvm_arch_dy_runnable(vcpu))
3541 		return true;
3542 
3543 #ifdef CONFIG_KVM_ASYNC_PF
3544 	if (!list_empty_careful(&vcpu->async_pf.done))
3545 		return true;
3546 #endif
3547 
3548 	return false;
3549 }
3550 
3551 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
3552 {
3553 	return false;
3554 }
3555 
3556 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
3557 {
3558 	struct kvm *kvm = me->kvm;
3559 	struct kvm_vcpu *vcpu;
3560 	int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
3561 	unsigned long i;
3562 	int yielded = 0;
3563 	int try = 3;
3564 	int pass;
3565 
3566 	kvm_vcpu_set_in_spin_loop(me, true);
3567 	/*
3568 	 * We boost the priority of a VCPU that is runnable but not
3569 	 * currently running, because it got preempted by something
3570 	 * else and called schedule in __vcpu_run.  Hopefully that
3571 	 * VCPU is holding the lock that we need and will release it.
3572 	 * We approximate round-robin by starting at the last boosted VCPU.
3573 	 */
3574 	for (pass = 0; pass < 2 && !yielded && try; pass++) {
3575 		kvm_for_each_vcpu(i, vcpu, kvm) {
3576 			if (!pass && i <= last_boosted_vcpu) {
3577 				i = last_boosted_vcpu;
3578 				continue;
3579 			} else if (pass && i > last_boosted_vcpu)
3580 				break;
3581 			if (!READ_ONCE(vcpu->ready))
3582 				continue;
3583 			if (vcpu == me)
3584 				continue;
3585 			if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
3586 				continue;
3587 			if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
3588 			    !kvm_arch_dy_has_pending_interrupt(vcpu) &&
3589 			    !kvm_arch_vcpu_in_kernel(vcpu))
3590 				continue;
3591 			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
3592 				continue;
3593 
3594 			yielded = kvm_vcpu_yield_to(vcpu);
3595 			if (yielded > 0) {
3596 				kvm->last_boosted_vcpu = i;
3597 				break;
3598 			} else if (yielded < 0) {
3599 				try--;
3600 				if (!try)
3601 					break;
3602 			}
3603 		}
3604 	}
3605 	kvm_vcpu_set_in_spin_loop(me, false);
3606 
3607 	/* Ensure vcpu is not eligible during next spinloop */
3608 	kvm_vcpu_set_dy_eligible(me, false);
3609 }
3610 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
3611 
3612 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
3613 {
3614 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3615 	return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
3616 	    (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
3617 	     kvm->dirty_ring_size / PAGE_SIZE);
3618 #else
3619 	return false;
3620 #endif
3621 }
3622 
3623 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
3624 {
3625 	struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
3626 	struct page *page;
3627 
3628 	if (vmf->pgoff == 0)
3629 		page = virt_to_page(vcpu->run);
3630 #ifdef CONFIG_X86
3631 	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
3632 		page = virt_to_page(vcpu->arch.pio_data);
3633 #endif
3634 #ifdef CONFIG_KVM_MMIO
3635 	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
3636 		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
3637 #endif
3638 	else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
3639 		page = kvm_dirty_ring_get_page(
3640 		    &vcpu->dirty_ring,
3641 		    vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
3642 	else
3643 		return kvm_arch_vcpu_fault(vcpu, vmf);
3644 	get_page(page);
3645 	vmf->page = page;
3646 	return 0;
3647 }
3648 
3649 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
3650 	.fault = kvm_vcpu_fault,
3651 };
3652 
3653 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
3654 {
3655 	struct kvm_vcpu *vcpu = file->private_data;
3656 	unsigned long pages = vma_pages(vma);
3657 
3658 	if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
3659 	     kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
3660 	    ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
3661 		return -EINVAL;
3662 
3663 	vma->vm_ops = &kvm_vcpu_vm_ops;
3664 	return 0;
3665 }
3666 
3667 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
3668 {
3669 	struct kvm_vcpu *vcpu = filp->private_data;
3670 
3671 	kvm_put_kvm(vcpu->kvm);
3672 	return 0;
3673 }
3674 
3675 static struct file_operations kvm_vcpu_fops = {
3676 	.release        = kvm_vcpu_release,
3677 	.unlocked_ioctl = kvm_vcpu_ioctl,
3678 	.mmap           = kvm_vcpu_mmap,
3679 	.llseek		= noop_llseek,
3680 	KVM_COMPAT(kvm_vcpu_compat_ioctl),
3681 };
3682 
3683 /*
3684  * Allocates an inode for the vcpu.
3685  */
3686 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
3687 {
3688 	char name[8 + 1 + ITOA_MAX_LEN + 1];
3689 
3690 	snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
3691 	return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
3692 }
3693 
3694 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
3695 {
3696 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
3697 	struct dentry *debugfs_dentry;
3698 	char dir_name[ITOA_MAX_LEN * 2];
3699 
3700 	if (!debugfs_initialized())
3701 		return;
3702 
3703 	snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
3704 	debugfs_dentry = debugfs_create_dir(dir_name,
3705 					    vcpu->kvm->debugfs_dentry);
3706 
3707 	kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
3708 #endif
3709 }
3710 
3711 /*
3712  * Creates some virtual cpus.  Good luck creating more than one.
3713  */
3714 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
3715 {
3716 	int r;
3717 	struct kvm_vcpu *vcpu;
3718 	struct page *page;
3719 
3720 	if (id >= KVM_MAX_VCPU_IDS)
3721 		return -EINVAL;
3722 
3723 	mutex_lock(&kvm->lock);
3724 	if (kvm->created_vcpus == KVM_MAX_VCPUS) {
3725 		mutex_unlock(&kvm->lock);
3726 		return -EINVAL;
3727 	}
3728 
3729 	kvm->created_vcpus++;
3730 	mutex_unlock(&kvm->lock);
3731 
3732 	r = kvm_arch_vcpu_precreate(kvm, id);
3733 	if (r)
3734 		goto vcpu_decrement;
3735 
3736 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
3737 	if (!vcpu) {
3738 		r = -ENOMEM;
3739 		goto vcpu_decrement;
3740 	}
3741 
3742 	BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
3743 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3744 	if (!page) {
3745 		r = -ENOMEM;
3746 		goto vcpu_free;
3747 	}
3748 	vcpu->run = page_address(page);
3749 
3750 	kvm_vcpu_init(vcpu, kvm, id);
3751 
3752 	r = kvm_arch_vcpu_create(vcpu);
3753 	if (r)
3754 		goto vcpu_free_run_page;
3755 
3756 	if (kvm->dirty_ring_size) {
3757 		r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
3758 					 id, kvm->dirty_ring_size);
3759 		if (r)
3760 			goto arch_vcpu_destroy;
3761 	}
3762 
3763 	mutex_lock(&kvm->lock);
3764 	if (kvm_get_vcpu_by_id(kvm, id)) {
3765 		r = -EEXIST;
3766 		goto unlock_vcpu_destroy;
3767 	}
3768 
3769 	vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
3770 	r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
3771 	BUG_ON(r == -EBUSY);
3772 	if (r)
3773 		goto unlock_vcpu_destroy;
3774 
3775 	/* Fill the stats id string for the vcpu */
3776 	snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
3777 		 task_pid_nr(current), id);
3778 
3779 	/* Now it's all set up, let userspace reach it */
3780 	kvm_get_kvm(kvm);
3781 	r = create_vcpu_fd(vcpu);
3782 	if (r < 0) {
3783 		xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
3784 		kvm_put_kvm_no_destroy(kvm);
3785 		goto unlock_vcpu_destroy;
3786 	}
3787 
3788 	/*
3789 	 * Pairs with smp_rmb() in kvm_get_vcpu.  Store the vcpu
3790 	 * pointer before kvm->online_vcpu's incremented value.
3791 	 */
3792 	smp_wmb();
3793 	atomic_inc(&kvm->online_vcpus);
3794 
3795 	mutex_unlock(&kvm->lock);
3796 	kvm_arch_vcpu_postcreate(vcpu);
3797 	kvm_create_vcpu_debugfs(vcpu);
3798 	return r;
3799 
3800 unlock_vcpu_destroy:
3801 	mutex_unlock(&kvm->lock);
3802 	kvm_dirty_ring_free(&vcpu->dirty_ring);
3803 arch_vcpu_destroy:
3804 	kvm_arch_vcpu_destroy(vcpu);
3805 vcpu_free_run_page:
3806 	free_page((unsigned long)vcpu->run);
3807 vcpu_free:
3808 	kmem_cache_free(kvm_vcpu_cache, vcpu);
3809 vcpu_decrement:
3810 	mutex_lock(&kvm->lock);
3811 	kvm->created_vcpus--;
3812 	mutex_unlock(&kvm->lock);
3813 	return r;
3814 }
3815 
3816 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
3817 {
3818 	if (sigset) {
3819 		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3820 		vcpu->sigset_active = 1;
3821 		vcpu->sigset = *sigset;
3822 	} else
3823 		vcpu->sigset_active = 0;
3824 	return 0;
3825 }
3826 
3827 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
3828 			      size_t size, loff_t *offset)
3829 {
3830 	struct kvm_vcpu *vcpu = file->private_data;
3831 
3832 	return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
3833 			&kvm_vcpu_stats_desc[0], &vcpu->stat,
3834 			sizeof(vcpu->stat), user_buffer, size, offset);
3835 }
3836 
3837 static const struct file_operations kvm_vcpu_stats_fops = {
3838 	.read = kvm_vcpu_stats_read,
3839 	.llseek = noop_llseek,
3840 };
3841 
3842 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
3843 {
3844 	int fd;
3845 	struct file *file;
3846 	char name[15 + ITOA_MAX_LEN + 1];
3847 
3848 	snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
3849 
3850 	fd = get_unused_fd_flags(O_CLOEXEC);
3851 	if (fd < 0)
3852 		return fd;
3853 
3854 	file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
3855 	if (IS_ERR(file)) {
3856 		put_unused_fd(fd);
3857 		return PTR_ERR(file);
3858 	}
3859 	file->f_mode |= FMODE_PREAD;
3860 	fd_install(fd, file);
3861 
3862 	return fd;
3863 }
3864 
3865 static long kvm_vcpu_ioctl(struct file *filp,
3866 			   unsigned int ioctl, unsigned long arg)
3867 {
3868 	struct kvm_vcpu *vcpu = filp->private_data;
3869 	void __user *argp = (void __user *)arg;
3870 	int r;
3871 	struct kvm_fpu *fpu = NULL;
3872 	struct kvm_sregs *kvm_sregs = NULL;
3873 
3874 	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
3875 		return -EIO;
3876 
3877 	if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
3878 		return -EINVAL;
3879 
3880 	/*
3881 	 * Some architectures have vcpu ioctls that are asynchronous to vcpu
3882 	 * execution; mutex_lock() would break them.
3883 	 */
3884 	r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
3885 	if (r != -ENOIOCTLCMD)
3886 		return r;
3887 
3888 	if (mutex_lock_killable(&vcpu->mutex))
3889 		return -EINTR;
3890 	switch (ioctl) {
3891 	case KVM_RUN: {
3892 		struct pid *oldpid;
3893 		r = -EINVAL;
3894 		if (arg)
3895 			goto out;
3896 		oldpid = rcu_access_pointer(vcpu->pid);
3897 		if (unlikely(oldpid != task_pid(current))) {
3898 			/* The thread running this VCPU changed. */
3899 			struct pid *newpid;
3900 
3901 			r = kvm_arch_vcpu_run_pid_change(vcpu);
3902 			if (r)
3903 				break;
3904 
3905 			newpid = get_task_pid(current, PIDTYPE_PID);
3906 			rcu_assign_pointer(vcpu->pid, newpid);
3907 			if (oldpid)
3908 				synchronize_rcu();
3909 			put_pid(oldpid);
3910 		}
3911 		r = kvm_arch_vcpu_ioctl_run(vcpu);
3912 		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
3913 		break;
3914 	}
3915 	case KVM_GET_REGS: {
3916 		struct kvm_regs *kvm_regs;
3917 
3918 		r = -ENOMEM;
3919 		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
3920 		if (!kvm_regs)
3921 			goto out;
3922 		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
3923 		if (r)
3924 			goto out_free1;
3925 		r = -EFAULT;
3926 		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
3927 			goto out_free1;
3928 		r = 0;
3929 out_free1:
3930 		kfree(kvm_regs);
3931 		break;
3932 	}
3933 	case KVM_SET_REGS: {
3934 		struct kvm_regs *kvm_regs;
3935 
3936 		kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
3937 		if (IS_ERR(kvm_regs)) {
3938 			r = PTR_ERR(kvm_regs);
3939 			goto out;
3940 		}
3941 		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
3942 		kfree(kvm_regs);
3943 		break;
3944 	}
3945 	case KVM_GET_SREGS: {
3946 		kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
3947 				    GFP_KERNEL_ACCOUNT);
3948 		r = -ENOMEM;
3949 		if (!kvm_sregs)
3950 			goto out;
3951 		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
3952 		if (r)
3953 			goto out;
3954 		r = -EFAULT;
3955 		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
3956 			goto out;
3957 		r = 0;
3958 		break;
3959 	}
3960 	case KVM_SET_SREGS: {
3961 		kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
3962 		if (IS_ERR(kvm_sregs)) {
3963 			r = PTR_ERR(kvm_sregs);
3964 			kvm_sregs = NULL;
3965 			goto out;
3966 		}
3967 		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
3968 		break;
3969 	}
3970 	case KVM_GET_MP_STATE: {
3971 		struct kvm_mp_state mp_state;
3972 
3973 		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
3974 		if (r)
3975 			goto out;
3976 		r = -EFAULT;
3977 		if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
3978 			goto out;
3979 		r = 0;
3980 		break;
3981 	}
3982 	case KVM_SET_MP_STATE: {
3983 		struct kvm_mp_state mp_state;
3984 
3985 		r = -EFAULT;
3986 		if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
3987 			goto out;
3988 		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
3989 		break;
3990 	}
3991 	case KVM_TRANSLATE: {
3992 		struct kvm_translation tr;
3993 
3994 		r = -EFAULT;
3995 		if (copy_from_user(&tr, argp, sizeof(tr)))
3996 			goto out;
3997 		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
3998 		if (r)
3999 			goto out;
4000 		r = -EFAULT;
4001 		if (copy_to_user(argp, &tr, sizeof(tr)))
4002 			goto out;
4003 		r = 0;
4004 		break;
4005 	}
4006 	case KVM_SET_GUEST_DEBUG: {
4007 		struct kvm_guest_debug dbg;
4008 
4009 		r = -EFAULT;
4010 		if (copy_from_user(&dbg, argp, sizeof(dbg)))
4011 			goto out;
4012 		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4013 		break;
4014 	}
4015 	case KVM_SET_SIGNAL_MASK: {
4016 		struct kvm_signal_mask __user *sigmask_arg = argp;
4017 		struct kvm_signal_mask kvm_sigmask;
4018 		sigset_t sigset, *p;
4019 
4020 		p = NULL;
4021 		if (argp) {
4022 			r = -EFAULT;
4023 			if (copy_from_user(&kvm_sigmask, argp,
4024 					   sizeof(kvm_sigmask)))
4025 				goto out;
4026 			r = -EINVAL;
4027 			if (kvm_sigmask.len != sizeof(sigset))
4028 				goto out;
4029 			r = -EFAULT;
4030 			if (copy_from_user(&sigset, sigmask_arg->sigset,
4031 					   sizeof(sigset)))
4032 				goto out;
4033 			p = &sigset;
4034 		}
4035 		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4036 		break;
4037 	}
4038 	case KVM_GET_FPU: {
4039 		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
4040 		r = -ENOMEM;
4041 		if (!fpu)
4042 			goto out;
4043 		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4044 		if (r)
4045 			goto out;
4046 		r = -EFAULT;
4047 		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4048 			goto out;
4049 		r = 0;
4050 		break;
4051 	}
4052 	case KVM_SET_FPU: {
4053 		fpu = memdup_user(argp, sizeof(*fpu));
4054 		if (IS_ERR(fpu)) {
4055 			r = PTR_ERR(fpu);
4056 			fpu = NULL;
4057 			goto out;
4058 		}
4059 		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4060 		break;
4061 	}
4062 	case KVM_GET_STATS_FD: {
4063 		r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4064 		break;
4065 	}
4066 	default:
4067 		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4068 	}
4069 out:
4070 	mutex_unlock(&vcpu->mutex);
4071 	kfree(fpu);
4072 	kfree(kvm_sregs);
4073 	return r;
4074 }
4075 
4076 #ifdef CONFIG_KVM_COMPAT
4077 static long kvm_vcpu_compat_ioctl(struct file *filp,
4078 				  unsigned int ioctl, unsigned long arg)
4079 {
4080 	struct kvm_vcpu *vcpu = filp->private_data;
4081 	void __user *argp = compat_ptr(arg);
4082 	int r;
4083 
4084 	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4085 		return -EIO;
4086 
4087 	switch (ioctl) {
4088 	case KVM_SET_SIGNAL_MASK: {
4089 		struct kvm_signal_mask __user *sigmask_arg = argp;
4090 		struct kvm_signal_mask kvm_sigmask;
4091 		sigset_t sigset;
4092 
4093 		if (argp) {
4094 			r = -EFAULT;
4095 			if (copy_from_user(&kvm_sigmask, argp,
4096 					   sizeof(kvm_sigmask)))
4097 				goto out;
4098 			r = -EINVAL;
4099 			if (kvm_sigmask.len != sizeof(compat_sigset_t))
4100 				goto out;
4101 			r = -EFAULT;
4102 			if (get_compat_sigset(&sigset,
4103 					      (compat_sigset_t __user *)sigmask_arg->sigset))
4104 				goto out;
4105 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4106 		} else
4107 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4108 		break;
4109 	}
4110 	default:
4111 		r = kvm_vcpu_ioctl(filp, ioctl, arg);
4112 	}
4113 
4114 out:
4115 	return r;
4116 }
4117 #endif
4118 
4119 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4120 {
4121 	struct kvm_device *dev = filp->private_data;
4122 
4123 	if (dev->ops->mmap)
4124 		return dev->ops->mmap(dev, vma);
4125 
4126 	return -ENODEV;
4127 }
4128 
4129 static int kvm_device_ioctl_attr(struct kvm_device *dev,
4130 				 int (*accessor)(struct kvm_device *dev,
4131 						 struct kvm_device_attr *attr),
4132 				 unsigned long arg)
4133 {
4134 	struct kvm_device_attr attr;
4135 
4136 	if (!accessor)
4137 		return -EPERM;
4138 
4139 	if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4140 		return -EFAULT;
4141 
4142 	return accessor(dev, &attr);
4143 }
4144 
4145 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4146 			     unsigned long arg)
4147 {
4148 	struct kvm_device *dev = filp->private_data;
4149 
4150 	if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4151 		return -EIO;
4152 
4153 	switch (ioctl) {
4154 	case KVM_SET_DEVICE_ATTR:
4155 		return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4156 	case KVM_GET_DEVICE_ATTR:
4157 		return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4158 	case KVM_HAS_DEVICE_ATTR:
4159 		return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4160 	default:
4161 		if (dev->ops->ioctl)
4162 			return dev->ops->ioctl(dev, ioctl, arg);
4163 
4164 		return -ENOTTY;
4165 	}
4166 }
4167 
4168 static int kvm_device_release(struct inode *inode, struct file *filp)
4169 {
4170 	struct kvm_device *dev = filp->private_data;
4171 	struct kvm *kvm = dev->kvm;
4172 
4173 	if (dev->ops->release) {
4174 		mutex_lock(&kvm->lock);
4175 		list_del(&dev->vm_node);
4176 		dev->ops->release(dev);
4177 		mutex_unlock(&kvm->lock);
4178 	}
4179 
4180 	kvm_put_kvm(kvm);
4181 	return 0;
4182 }
4183 
4184 static const struct file_operations kvm_device_fops = {
4185 	.unlocked_ioctl = kvm_device_ioctl,
4186 	.release = kvm_device_release,
4187 	KVM_COMPAT(kvm_device_ioctl),
4188 	.mmap = kvm_device_mmap,
4189 };
4190 
4191 struct kvm_device *kvm_device_from_filp(struct file *filp)
4192 {
4193 	if (filp->f_op != &kvm_device_fops)
4194 		return NULL;
4195 
4196 	return filp->private_data;
4197 }
4198 
4199 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4200 #ifdef CONFIG_KVM_MPIC
4201 	[KVM_DEV_TYPE_FSL_MPIC_20]	= &kvm_mpic_ops,
4202 	[KVM_DEV_TYPE_FSL_MPIC_42]	= &kvm_mpic_ops,
4203 #endif
4204 };
4205 
4206 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4207 {
4208 	if (type >= ARRAY_SIZE(kvm_device_ops_table))
4209 		return -ENOSPC;
4210 
4211 	if (kvm_device_ops_table[type] != NULL)
4212 		return -EEXIST;
4213 
4214 	kvm_device_ops_table[type] = ops;
4215 	return 0;
4216 }
4217 
4218 void kvm_unregister_device_ops(u32 type)
4219 {
4220 	if (kvm_device_ops_table[type] != NULL)
4221 		kvm_device_ops_table[type] = NULL;
4222 }
4223 
4224 static int kvm_ioctl_create_device(struct kvm *kvm,
4225 				   struct kvm_create_device *cd)
4226 {
4227 	const struct kvm_device_ops *ops = NULL;
4228 	struct kvm_device *dev;
4229 	bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4230 	int type;
4231 	int ret;
4232 
4233 	if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4234 		return -ENODEV;
4235 
4236 	type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4237 	ops = kvm_device_ops_table[type];
4238 	if (ops == NULL)
4239 		return -ENODEV;
4240 
4241 	if (test)
4242 		return 0;
4243 
4244 	dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4245 	if (!dev)
4246 		return -ENOMEM;
4247 
4248 	dev->ops = ops;
4249 	dev->kvm = kvm;
4250 
4251 	mutex_lock(&kvm->lock);
4252 	ret = ops->create(dev, type);
4253 	if (ret < 0) {
4254 		mutex_unlock(&kvm->lock);
4255 		kfree(dev);
4256 		return ret;
4257 	}
4258 	list_add(&dev->vm_node, &kvm->devices);
4259 	mutex_unlock(&kvm->lock);
4260 
4261 	if (ops->init)
4262 		ops->init(dev);
4263 
4264 	kvm_get_kvm(kvm);
4265 	ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4266 	if (ret < 0) {
4267 		kvm_put_kvm_no_destroy(kvm);
4268 		mutex_lock(&kvm->lock);
4269 		list_del(&dev->vm_node);
4270 		mutex_unlock(&kvm->lock);
4271 		ops->destroy(dev);
4272 		return ret;
4273 	}
4274 
4275 	cd->fd = ret;
4276 	return 0;
4277 }
4278 
4279 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4280 {
4281 	switch (arg) {
4282 	case KVM_CAP_USER_MEMORY:
4283 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4284 	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4285 	case KVM_CAP_INTERNAL_ERROR_DATA:
4286 #ifdef CONFIG_HAVE_KVM_MSI
4287 	case KVM_CAP_SIGNAL_MSI:
4288 #endif
4289 #ifdef CONFIG_HAVE_KVM_IRQFD
4290 	case KVM_CAP_IRQFD:
4291 	case KVM_CAP_IRQFD_RESAMPLE:
4292 #endif
4293 	case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4294 	case KVM_CAP_CHECK_EXTENSION_VM:
4295 	case KVM_CAP_ENABLE_CAP_VM:
4296 	case KVM_CAP_HALT_POLL:
4297 		return 1;
4298 #ifdef CONFIG_KVM_MMIO
4299 	case KVM_CAP_COALESCED_MMIO:
4300 		return KVM_COALESCED_MMIO_PAGE_OFFSET;
4301 	case KVM_CAP_COALESCED_PIO:
4302 		return 1;
4303 #endif
4304 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4305 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4306 		return KVM_DIRTY_LOG_MANUAL_CAPS;
4307 #endif
4308 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4309 	case KVM_CAP_IRQ_ROUTING:
4310 		return KVM_MAX_IRQ_ROUTES;
4311 #endif
4312 #if KVM_ADDRESS_SPACE_NUM > 1
4313 	case KVM_CAP_MULTI_ADDRESS_SPACE:
4314 		return KVM_ADDRESS_SPACE_NUM;
4315 #endif
4316 	case KVM_CAP_NR_MEMSLOTS:
4317 		return KVM_USER_MEM_SLOTS;
4318 	case KVM_CAP_DIRTY_LOG_RING:
4319 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
4320 		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4321 #else
4322 		return 0;
4323 #endif
4324 	case KVM_CAP_BINARY_STATS_FD:
4325 		return 1;
4326 	default:
4327 		break;
4328 	}
4329 	return kvm_vm_ioctl_check_extension(kvm, arg);
4330 }
4331 
4332 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4333 {
4334 	int r;
4335 
4336 	if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4337 		return -EINVAL;
4338 
4339 	/* the size should be power of 2 */
4340 	if (!size || (size & (size - 1)))
4341 		return -EINVAL;
4342 
4343 	/* Should be bigger to keep the reserved entries, or a page */
4344 	if (size < kvm_dirty_ring_get_rsvd_entries() *
4345 	    sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4346 		return -EINVAL;
4347 
4348 	if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4349 	    sizeof(struct kvm_dirty_gfn))
4350 		return -E2BIG;
4351 
4352 	/* We only allow it to set once */
4353 	if (kvm->dirty_ring_size)
4354 		return -EINVAL;
4355 
4356 	mutex_lock(&kvm->lock);
4357 
4358 	if (kvm->created_vcpus) {
4359 		/* We don't allow to change this value after vcpu created */
4360 		r = -EINVAL;
4361 	} else {
4362 		kvm->dirty_ring_size = size;
4363 		r = 0;
4364 	}
4365 
4366 	mutex_unlock(&kvm->lock);
4367 	return r;
4368 }
4369 
4370 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4371 {
4372 	unsigned long i;
4373 	struct kvm_vcpu *vcpu;
4374 	int cleared = 0;
4375 
4376 	if (!kvm->dirty_ring_size)
4377 		return -EINVAL;
4378 
4379 	mutex_lock(&kvm->slots_lock);
4380 
4381 	kvm_for_each_vcpu(i, vcpu, kvm)
4382 		cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4383 
4384 	mutex_unlock(&kvm->slots_lock);
4385 
4386 	if (cleared)
4387 		kvm_flush_remote_tlbs(kvm);
4388 
4389 	return cleared;
4390 }
4391 
4392 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
4393 						  struct kvm_enable_cap *cap)
4394 {
4395 	return -EINVAL;
4396 }
4397 
4398 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
4399 					   struct kvm_enable_cap *cap)
4400 {
4401 	switch (cap->cap) {
4402 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4403 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
4404 		u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
4405 
4406 		if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
4407 			allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
4408 
4409 		if (cap->flags || (cap->args[0] & ~allowed_options))
4410 			return -EINVAL;
4411 		kvm->manual_dirty_log_protect = cap->args[0];
4412 		return 0;
4413 	}
4414 #endif
4415 	case KVM_CAP_HALT_POLL: {
4416 		if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
4417 			return -EINVAL;
4418 
4419 		kvm->max_halt_poll_ns = cap->args[0];
4420 		return 0;
4421 	}
4422 	case KVM_CAP_DIRTY_LOG_RING:
4423 		return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
4424 	default:
4425 		return kvm_vm_ioctl_enable_cap(kvm, cap);
4426 	}
4427 }
4428 
4429 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
4430 			      size_t size, loff_t *offset)
4431 {
4432 	struct kvm *kvm = file->private_data;
4433 
4434 	return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
4435 				&kvm_vm_stats_desc[0], &kvm->stat,
4436 				sizeof(kvm->stat), user_buffer, size, offset);
4437 }
4438 
4439 static const struct file_operations kvm_vm_stats_fops = {
4440 	.read = kvm_vm_stats_read,
4441 	.llseek = noop_llseek,
4442 };
4443 
4444 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
4445 {
4446 	int fd;
4447 	struct file *file;
4448 
4449 	fd = get_unused_fd_flags(O_CLOEXEC);
4450 	if (fd < 0)
4451 		return fd;
4452 
4453 	file = anon_inode_getfile("kvm-vm-stats",
4454 			&kvm_vm_stats_fops, kvm, O_RDONLY);
4455 	if (IS_ERR(file)) {
4456 		put_unused_fd(fd);
4457 		return PTR_ERR(file);
4458 	}
4459 	file->f_mode |= FMODE_PREAD;
4460 	fd_install(fd, file);
4461 
4462 	return fd;
4463 }
4464 
4465 static long kvm_vm_ioctl(struct file *filp,
4466 			   unsigned int ioctl, unsigned long arg)
4467 {
4468 	struct kvm *kvm = filp->private_data;
4469 	void __user *argp = (void __user *)arg;
4470 	int r;
4471 
4472 	if (kvm->mm != current->mm || kvm->vm_dead)
4473 		return -EIO;
4474 	switch (ioctl) {
4475 	case KVM_CREATE_VCPU:
4476 		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
4477 		break;
4478 	case KVM_ENABLE_CAP: {
4479 		struct kvm_enable_cap cap;
4480 
4481 		r = -EFAULT;
4482 		if (copy_from_user(&cap, argp, sizeof(cap)))
4483 			goto out;
4484 		r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
4485 		break;
4486 	}
4487 	case KVM_SET_USER_MEMORY_REGION: {
4488 		struct kvm_userspace_memory_region kvm_userspace_mem;
4489 
4490 		r = -EFAULT;
4491 		if (copy_from_user(&kvm_userspace_mem, argp,
4492 						sizeof(kvm_userspace_mem)))
4493 			goto out;
4494 
4495 		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
4496 		break;
4497 	}
4498 	case KVM_GET_DIRTY_LOG: {
4499 		struct kvm_dirty_log log;
4500 
4501 		r = -EFAULT;
4502 		if (copy_from_user(&log, argp, sizeof(log)))
4503 			goto out;
4504 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
4505 		break;
4506 	}
4507 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4508 	case KVM_CLEAR_DIRTY_LOG: {
4509 		struct kvm_clear_dirty_log log;
4510 
4511 		r = -EFAULT;
4512 		if (copy_from_user(&log, argp, sizeof(log)))
4513 			goto out;
4514 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4515 		break;
4516 	}
4517 #endif
4518 #ifdef CONFIG_KVM_MMIO
4519 	case KVM_REGISTER_COALESCED_MMIO: {
4520 		struct kvm_coalesced_mmio_zone zone;
4521 
4522 		r = -EFAULT;
4523 		if (copy_from_user(&zone, argp, sizeof(zone)))
4524 			goto out;
4525 		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
4526 		break;
4527 	}
4528 	case KVM_UNREGISTER_COALESCED_MMIO: {
4529 		struct kvm_coalesced_mmio_zone zone;
4530 
4531 		r = -EFAULT;
4532 		if (copy_from_user(&zone, argp, sizeof(zone)))
4533 			goto out;
4534 		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
4535 		break;
4536 	}
4537 #endif
4538 	case KVM_IRQFD: {
4539 		struct kvm_irqfd data;
4540 
4541 		r = -EFAULT;
4542 		if (copy_from_user(&data, argp, sizeof(data)))
4543 			goto out;
4544 		r = kvm_irqfd(kvm, &data);
4545 		break;
4546 	}
4547 	case KVM_IOEVENTFD: {
4548 		struct kvm_ioeventfd data;
4549 
4550 		r = -EFAULT;
4551 		if (copy_from_user(&data, argp, sizeof(data)))
4552 			goto out;
4553 		r = kvm_ioeventfd(kvm, &data);
4554 		break;
4555 	}
4556 #ifdef CONFIG_HAVE_KVM_MSI
4557 	case KVM_SIGNAL_MSI: {
4558 		struct kvm_msi msi;
4559 
4560 		r = -EFAULT;
4561 		if (copy_from_user(&msi, argp, sizeof(msi)))
4562 			goto out;
4563 		r = kvm_send_userspace_msi(kvm, &msi);
4564 		break;
4565 	}
4566 #endif
4567 #ifdef __KVM_HAVE_IRQ_LINE
4568 	case KVM_IRQ_LINE_STATUS:
4569 	case KVM_IRQ_LINE: {
4570 		struct kvm_irq_level irq_event;
4571 
4572 		r = -EFAULT;
4573 		if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
4574 			goto out;
4575 
4576 		r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
4577 					ioctl == KVM_IRQ_LINE_STATUS);
4578 		if (r)
4579 			goto out;
4580 
4581 		r = -EFAULT;
4582 		if (ioctl == KVM_IRQ_LINE_STATUS) {
4583 			if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
4584 				goto out;
4585 		}
4586 
4587 		r = 0;
4588 		break;
4589 	}
4590 #endif
4591 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4592 	case KVM_SET_GSI_ROUTING: {
4593 		struct kvm_irq_routing routing;
4594 		struct kvm_irq_routing __user *urouting;
4595 		struct kvm_irq_routing_entry *entries = NULL;
4596 
4597 		r = -EFAULT;
4598 		if (copy_from_user(&routing, argp, sizeof(routing)))
4599 			goto out;
4600 		r = -EINVAL;
4601 		if (!kvm_arch_can_set_irq_routing(kvm))
4602 			goto out;
4603 		if (routing.nr > KVM_MAX_IRQ_ROUTES)
4604 			goto out;
4605 		if (routing.flags)
4606 			goto out;
4607 		if (routing.nr) {
4608 			urouting = argp;
4609 			entries = vmemdup_user(urouting->entries,
4610 					       array_size(sizeof(*entries),
4611 							  routing.nr));
4612 			if (IS_ERR(entries)) {
4613 				r = PTR_ERR(entries);
4614 				goto out;
4615 			}
4616 		}
4617 		r = kvm_set_irq_routing(kvm, entries, routing.nr,
4618 					routing.flags);
4619 		kvfree(entries);
4620 		break;
4621 	}
4622 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
4623 	case KVM_CREATE_DEVICE: {
4624 		struct kvm_create_device cd;
4625 
4626 		r = -EFAULT;
4627 		if (copy_from_user(&cd, argp, sizeof(cd)))
4628 			goto out;
4629 
4630 		r = kvm_ioctl_create_device(kvm, &cd);
4631 		if (r)
4632 			goto out;
4633 
4634 		r = -EFAULT;
4635 		if (copy_to_user(argp, &cd, sizeof(cd)))
4636 			goto out;
4637 
4638 		r = 0;
4639 		break;
4640 	}
4641 	case KVM_CHECK_EXTENSION:
4642 		r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
4643 		break;
4644 	case KVM_RESET_DIRTY_RINGS:
4645 		r = kvm_vm_ioctl_reset_dirty_pages(kvm);
4646 		break;
4647 	case KVM_GET_STATS_FD:
4648 		r = kvm_vm_ioctl_get_stats_fd(kvm);
4649 		break;
4650 	default:
4651 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
4652 	}
4653 out:
4654 	return r;
4655 }
4656 
4657 #ifdef CONFIG_KVM_COMPAT
4658 struct compat_kvm_dirty_log {
4659 	__u32 slot;
4660 	__u32 padding1;
4661 	union {
4662 		compat_uptr_t dirty_bitmap; /* one bit per page */
4663 		__u64 padding2;
4664 	};
4665 };
4666 
4667 struct compat_kvm_clear_dirty_log {
4668 	__u32 slot;
4669 	__u32 num_pages;
4670 	__u64 first_page;
4671 	union {
4672 		compat_uptr_t dirty_bitmap; /* one bit per page */
4673 		__u64 padding2;
4674 	};
4675 };
4676 
4677 static long kvm_vm_compat_ioctl(struct file *filp,
4678 			   unsigned int ioctl, unsigned long arg)
4679 {
4680 	struct kvm *kvm = filp->private_data;
4681 	int r;
4682 
4683 	if (kvm->mm != current->mm || kvm->vm_dead)
4684 		return -EIO;
4685 	switch (ioctl) {
4686 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4687 	case KVM_CLEAR_DIRTY_LOG: {
4688 		struct compat_kvm_clear_dirty_log compat_log;
4689 		struct kvm_clear_dirty_log log;
4690 
4691 		if (copy_from_user(&compat_log, (void __user *)arg,
4692 				   sizeof(compat_log)))
4693 			return -EFAULT;
4694 		log.slot	 = compat_log.slot;
4695 		log.num_pages	 = compat_log.num_pages;
4696 		log.first_page	 = compat_log.first_page;
4697 		log.padding2	 = compat_log.padding2;
4698 		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4699 
4700 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4701 		break;
4702 	}
4703 #endif
4704 	case KVM_GET_DIRTY_LOG: {
4705 		struct compat_kvm_dirty_log compat_log;
4706 		struct kvm_dirty_log log;
4707 
4708 		if (copy_from_user(&compat_log, (void __user *)arg,
4709 				   sizeof(compat_log)))
4710 			return -EFAULT;
4711 		log.slot	 = compat_log.slot;
4712 		log.padding1	 = compat_log.padding1;
4713 		log.padding2	 = compat_log.padding2;
4714 		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4715 
4716 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
4717 		break;
4718 	}
4719 	default:
4720 		r = kvm_vm_ioctl(filp, ioctl, arg);
4721 	}
4722 	return r;
4723 }
4724 #endif
4725 
4726 static struct file_operations kvm_vm_fops = {
4727 	.release        = kvm_vm_release,
4728 	.unlocked_ioctl = kvm_vm_ioctl,
4729 	.llseek		= noop_llseek,
4730 	KVM_COMPAT(kvm_vm_compat_ioctl),
4731 };
4732 
4733 bool file_is_kvm(struct file *file)
4734 {
4735 	return file && file->f_op == &kvm_vm_fops;
4736 }
4737 EXPORT_SYMBOL_GPL(file_is_kvm);
4738 
4739 static int kvm_dev_ioctl_create_vm(unsigned long type)
4740 {
4741 	int r;
4742 	struct kvm *kvm;
4743 	struct file *file;
4744 
4745 	kvm = kvm_create_vm(type);
4746 	if (IS_ERR(kvm))
4747 		return PTR_ERR(kvm);
4748 #ifdef CONFIG_KVM_MMIO
4749 	r = kvm_coalesced_mmio_init(kvm);
4750 	if (r < 0)
4751 		goto put_kvm;
4752 #endif
4753 	r = get_unused_fd_flags(O_CLOEXEC);
4754 	if (r < 0)
4755 		goto put_kvm;
4756 
4757 	snprintf(kvm->stats_id, sizeof(kvm->stats_id),
4758 			"kvm-%d", task_pid_nr(current));
4759 
4760 	file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
4761 	if (IS_ERR(file)) {
4762 		put_unused_fd(r);
4763 		r = PTR_ERR(file);
4764 		goto put_kvm;
4765 	}
4766 
4767 	/*
4768 	 * Don't call kvm_put_kvm anymore at this point; file->f_op is
4769 	 * already set, with ->release() being kvm_vm_release().  In error
4770 	 * cases it will be called by the final fput(file) and will take
4771 	 * care of doing kvm_put_kvm(kvm).
4772 	 */
4773 	if (kvm_create_vm_debugfs(kvm, r) < 0) {
4774 		put_unused_fd(r);
4775 		fput(file);
4776 		return -ENOMEM;
4777 	}
4778 	kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
4779 
4780 	fd_install(r, file);
4781 	return r;
4782 
4783 put_kvm:
4784 	kvm_put_kvm(kvm);
4785 	return r;
4786 }
4787 
4788 static long kvm_dev_ioctl(struct file *filp,
4789 			  unsigned int ioctl, unsigned long arg)
4790 {
4791 	long r = -EINVAL;
4792 
4793 	switch (ioctl) {
4794 	case KVM_GET_API_VERSION:
4795 		if (arg)
4796 			goto out;
4797 		r = KVM_API_VERSION;
4798 		break;
4799 	case KVM_CREATE_VM:
4800 		r = kvm_dev_ioctl_create_vm(arg);
4801 		break;
4802 	case KVM_CHECK_EXTENSION:
4803 		r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
4804 		break;
4805 	case KVM_GET_VCPU_MMAP_SIZE:
4806 		if (arg)
4807 			goto out;
4808 		r = PAGE_SIZE;     /* struct kvm_run */
4809 #ifdef CONFIG_X86
4810 		r += PAGE_SIZE;    /* pio data page */
4811 #endif
4812 #ifdef CONFIG_KVM_MMIO
4813 		r += PAGE_SIZE;    /* coalesced mmio ring page */
4814 #endif
4815 		break;
4816 	case KVM_TRACE_ENABLE:
4817 	case KVM_TRACE_PAUSE:
4818 	case KVM_TRACE_DISABLE:
4819 		r = -EOPNOTSUPP;
4820 		break;
4821 	default:
4822 		return kvm_arch_dev_ioctl(filp, ioctl, arg);
4823 	}
4824 out:
4825 	return r;
4826 }
4827 
4828 static struct file_operations kvm_chardev_ops = {
4829 	.unlocked_ioctl = kvm_dev_ioctl,
4830 	.llseek		= noop_llseek,
4831 	KVM_COMPAT(kvm_dev_ioctl),
4832 };
4833 
4834 static struct miscdevice kvm_dev = {
4835 	KVM_MINOR,
4836 	"kvm",
4837 	&kvm_chardev_ops,
4838 };
4839 
4840 static void hardware_enable_nolock(void *junk)
4841 {
4842 	int cpu = raw_smp_processor_id();
4843 	int r;
4844 
4845 	if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
4846 		return;
4847 
4848 	cpumask_set_cpu(cpu, cpus_hardware_enabled);
4849 
4850 	r = kvm_arch_hardware_enable();
4851 
4852 	if (r) {
4853 		cpumask_clear_cpu(cpu, cpus_hardware_enabled);
4854 		atomic_inc(&hardware_enable_failed);
4855 		pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
4856 	}
4857 }
4858 
4859 static int kvm_starting_cpu(unsigned int cpu)
4860 {
4861 	raw_spin_lock(&kvm_count_lock);
4862 	if (kvm_usage_count)
4863 		hardware_enable_nolock(NULL);
4864 	raw_spin_unlock(&kvm_count_lock);
4865 	return 0;
4866 }
4867 
4868 static void hardware_disable_nolock(void *junk)
4869 {
4870 	int cpu = raw_smp_processor_id();
4871 
4872 	if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
4873 		return;
4874 	cpumask_clear_cpu(cpu, cpus_hardware_enabled);
4875 	kvm_arch_hardware_disable();
4876 }
4877 
4878 static int kvm_dying_cpu(unsigned int cpu)
4879 {
4880 	raw_spin_lock(&kvm_count_lock);
4881 	if (kvm_usage_count)
4882 		hardware_disable_nolock(NULL);
4883 	raw_spin_unlock(&kvm_count_lock);
4884 	return 0;
4885 }
4886 
4887 static void hardware_disable_all_nolock(void)
4888 {
4889 	BUG_ON(!kvm_usage_count);
4890 
4891 	kvm_usage_count--;
4892 	if (!kvm_usage_count)
4893 		on_each_cpu(hardware_disable_nolock, NULL, 1);
4894 }
4895 
4896 static void hardware_disable_all(void)
4897 {
4898 	raw_spin_lock(&kvm_count_lock);
4899 	hardware_disable_all_nolock();
4900 	raw_spin_unlock(&kvm_count_lock);
4901 }
4902 
4903 static int hardware_enable_all(void)
4904 {
4905 	int r = 0;
4906 
4907 	raw_spin_lock(&kvm_count_lock);
4908 
4909 	kvm_usage_count++;
4910 	if (kvm_usage_count == 1) {
4911 		atomic_set(&hardware_enable_failed, 0);
4912 		on_each_cpu(hardware_enable_nolock, NULL, 1);
4913 
4914 		if (atomic_read(&hardware_enable_failed)) {
4915 			hardware_disable_all_nolock();
4916 			r = -EBUSY;
4917 		}
4918 	}
4919 
4920 	raw_spin_unlock(&kvm_count_lock);
4921 
4922 	return r;
4923 }
4924 
4925 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
4926 		      void *v)
4927 {
4928 	/*
4929 	 * Some (well, at least mine) BIOSes hang on reboot if
4930 	 * in vmx root mode.
4931 	 *
4932 	 * And Intel TXT required VMX off for all cpu when system shutdown.
4933 	 */
4934 	pr_info("kvm: exiting hardware virtualization\n");
4935 	kvm_rebooting = true;
4936 	on_each_cpu(hardware_disable_nolock, NULL, 1);
4937 	return NOTIFY_OK;
4938 }
4939 
4940 static struct notifier_block kvm_reboot_notifier = {
4941 	.notifier_call = kvm_reboot,
4942 	.priority = 0,
4943 };
4944 
4945 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
4946 {
4947 	int i;
4948 
4949 	for (i = 0; i < bus->dev_count; i++) {
4950 		struct kvm_io_device *pos = bus->range[i].dev;
4951 
4952 		kvm_iodevice_destructor(pos);
4953 	}
4954 	kfree(bus);
4955 }
4956 
4957 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
4958 				 const struct kvm_io_range *r2)
4959 {
4960 	gpa_t addr1 = r1->addr;
4961 	gpa_t addr2 = r2->addr;
4962 
4963 	if (addr1 < addr2)
4964 		return -1;
4965 
4966 	/* If r2->len == 0, match the exact address.  If r2->len != 0,
4967 	 * accept any overlapping write.  Any order is acceptable for
4968 	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
4969 	 * we process all of them.
4970 	 */
4971 	if (r2->len) {
4972 		addr1 += r1->len;
4973 		addr2 += r2->len;
4974 	}
4975 
4976 	if (addr1 > addr2)
4977 		return 1;
4978 
4979 	return 0;
4980 }
4981 
4982 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
4983 {
4984 	return kvm_io_bus_cmp(p1, p2);
4985 }
4986 
4987 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
4988 			     gpa_t addr, int len)
4989 {
4990 	struct kvm_io_range *range, key;
4991 	int off;
4992 
4993 	key = (struct kvm_io_range) {
4994 		.addr = addr,
4995 		.len = len,
4996 	};
4997 
4998 	range = bsearch(&key, bus->range, bus->dev_count,
4999 			sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5000 	if (range == NULL)
5001 		return -ENOENT;
5002 
5003 	off = range - bus->range;
5004 
5005 	while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5006 		off--;
5007 
5008 	return off;
5009 }
5010 
5011 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5012 			      struct kvm_io_range *range, const void *val)
5013 {
5014 	int idx;
5015 
5016 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5017 	if (idx < 0)
5018 		return -EOPNOTSUPP;
5019 
5020 	while (idx < bus->dev_count &&
5021 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5022 		if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5023 					range->len, val))
5024 			return idx;
5025 		idx++;
5026 	}
5027 
5028 	return -EOPNOTSUPP;
5029 }
5030 
5031 /* kvm_io_bus_write - called under kvm->slots_lock */
5032 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5033 		     int len, const void *val)
5034 {
5035 	struct kvm_io_bus *bus;
5036 	struct kvm_io_range range;
5037 	int r;
5038 
5039 	range = (struct kvm_io_range) {
5040 		.addr = addr,
5041 		.len = len,
5042 	};
5043 
5044 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5045 	if (!bus)
5046 		return -ENOMEM;
5047 	r = __kvm_io_bus_write(vcpu, bus, &range, val);
5048 	return r < 0 ? r : 0;
5049 }
5050 EXPORT_SYMBOL_GPL(kvm_io_bus_write);
5051 
5052 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
5053 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5054 			    gpa_t addr, int len, const void *val, long cookie)
5055 {
5056 	struct kvm_io_bus *bus;
5057 	struct kvm_io_range range;
5058 
5059 	range = (struct kvm_io_range) {
5060 		.addr = addr,
5061 		.len = len,
5062 	};
5063 
5064 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5065 	if (!bus)
5066 		return -ENOMEM;
5067 
5068 	/* First try the device referenced by cookie. */
5069 	if ((cookie >= 0) && (cookie < bus->dev_count) &&
5070 	    (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5071 		if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5072 					val))
5073 			return cookie;
5074 
5075 	/*
5076 	 * cookie contained garbage; fall back to search and return the
5077 	 * correct cookie value.
5078 	 */
5079 	return __kvm_io_bus_write(vcpu, bus, &range, val);
5080 }
5081 
5082 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5083 			     struct kvm_io_range *range, void *val)
5084 {
5085 	int idx;
5086 
5087 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5088 	if (idx < 0)
5089 		return -EOPNOTSUPP;
5090 
5091 	while (idx < bus->dev_count &&
5092 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5093 		if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5094 				       range->len, val))
5095 			return idx;
5096 		idx++;
5097 	}
5098 
5099 	return -EOPNOTSUPP;
5100 }
5101 
5102 /* kvm_io_bus_read - called under kvm->slots_lock */
5103 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5104 		    int len, void *val)
5105 {
5106 	struct kvm_io_bus *bus;
5107 	struct kvm_io_range range;
5108 	int r;
5109 
5110 	range = (struct kvm_io_range) {
5111 		.addr = addr,
5112 		.len = len,
5113 	};
5114 
5115 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5116 	if (!bus)
5117 		return -ENOMEM;
5118 	r = __kvm_io_bus_read(vcpu, bus, &range, val);
5119 	return r < 0 ? r : 0;
5120 }
5121 
5122 /* Caller must hold slots_lock. */
5123 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5124 			    int len, struct kvm_io_device *dev)
5125 {
5126 	int i;
5127 	struct kvm_io_bus *new_bus, *bus;
5128 	struct kvm_io_range range;
5129 
5130 	bus = kvm_get_bus(kvm, bus_idx);
5131 	if (!bus)
5132 		return -ENOMEM;
5133 
5134 	/* exclude ioeventfd which is limited by maximum fd */
5135 	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5136 		return -ENOSPC;
5137 
5138 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5139 			  GFP_KERNEL_ACCOUNT);
5140 	if (!new_bus)
5141 		return -ENOMEM;
5142 
5143 	range = (struct kvm_io_range) {
5144 		.addr = addr,
5145 		.len = len,
5146 		.dev = dev,
5147 	};
5148 
5149 	for (i = 0; i < bus->dev_count; i++)
5150 		if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5151 			break;
5152 
5153 	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5154 	new_bus->dev_count++;
5155 	new_bus->range[i] = range;
5156 	memcpy(new_bus->range + i + 1, bus->range + i,
5157 		(bus->dev_count - i) * sizeof(struct kvm_io_range));
5158 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5159 	synchronize_srcu_expedited(&kvm->srcu);
5160 	kfree(bus);
5161 
5162 	return 0;
5163 }
5164 
5165 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5166 			      struct kvm_io_device *dev)
5167 {
5168 	int i, j;
5169 	struct kvm_io_bus *new_bus, *bus;
5170 
5171 	lockdep_assert_held(&kvm->slots_lock);
5172 
5173 	bus = kvm_get_bus(kvm, bus_idx);
5174 	if (!bus)
5175 		return 0;
5176 
5177 	for (i = 0; i < bus->dev_count; i++) {
5178 		if (bus->range[i].dev == dev) {
5179 			break;
5180 		}
5181 	}
5182 
5183 	if (i == bus->dev_count)
5184 		return 0;
5185 
5186 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
5187 			  GFP_KERNEL_ACCOUNT);
5188 	if (new_bus) {
5189 		memcpy(new_bus, bus, struct_size(bus, range, i));
5190 		new_bus->dev_count--;
5191 		memcpy(new_bus->range + i, bus->range + i + 1,
5192 				flex_array_size(new_bus, range, new_bus->dev_count - i));
5193 	}
5194 
5195 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5196 	synchronize_srcu_expedited(&kvm->srcu);
5197 
5198 	/* Destroy the old bus _after_ installing the (null) bus. */
5199 	if (!new_bus) {
5200 		pr_err("kvm: failed to shrink bus, removing it completely\n");
5201 		for (j = 0; j < bus->dev_count; j++) {
5202 			if (j == i)
5203 				continue;
5204 			kvm_iodevice_destructor(bus->range[j].dev);
5205 		}
5206 	}
5207 
5208 	kfree(bus);
5209 	return new_bus ? 0 : -ENOMEM;
5210 }
5211 
5212 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5213 					 gpa_t addr)
5214 {
5215 	struct kvm_io_bus *bus;
5216 	int dev_idx, srcu_idx;
5217 	struct kvm_io_device *iodev = NULL;
5218 
5219 	srcu_idx = srcu_read_lock(&kvm->srcu);
5220 
5221 	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
5222 	if (!bus)
5223 		goto out_unlock;
5224 
5225 	dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
5226 	if (dev_idx < 0)
5227 		goto out_unlock;
5228 
5229 	iodev = bus->range[dev_idx].dev;
5230 
5231 out_unlock:
5232 	srcu_read_unlock(&kvm->srcu, srcu_idx);
5233 
5234 	return iodev;
5235 }
5236 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
5237 
5238 static int kvm_debugfs_open(struct inode *inode, struct file *file,
5239 			   int (*get)(void *, u64 *), int (*set)(void *, u64),
5240 			   const char *fmt)
5241 {
5242 	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
5243 					  inode->i_private;
5244 
5245 	/*
5246 	 * The debugfs files are a reference to the kvm struct which
5247         * is still valid when kvm_destroy_vm is called.  kvm_get_kvm_safe
5248         * avoids the race between open and the removal of the debugfs directory.
5249 	 */
5250 	if (!kvm_get_kvm_safe(stat_data->kvm))
5251 		return -ENOENT;
5252 
5253 	if (simple_attr_open(inode, file, get,
5254 		    kvm_stats_debugfs_mode(stat_data->desc) & 0222
5255 		    ? set : NULL,
5256 		    fmt)) {
5257 		kvm_put_kvm(stat_data->kvm);
5258 		return -ENOMEM;
5259 	}
5260 
5261 	return 0;
5262 }
5263 
5264 static int kvm_debugfs_release(struct inode *inode, struct file *file)
5265 {
5266 	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
5267 					  inode->i_private;
5268 
5269 	simple_attr_release(inode, file);
5270 	kvm_put_kvm(stat_data->kvm);
5271 
5272 	return 0;
5273 }
5274 
5275 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
5276 {
5277 	*val = *(u64 *)((void *)(&kvm->stat) + offset);
5278 
5279 	return 0;
5280 }
5281 
5282 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
5283 {
5284 	*(u64 *)((void *)(&kvm->stat) + offset) = 0;
5285 
5286 	return 0;
5287 }
5288 
5289 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
5290 {
5291 	unsigned long i;
5292 	struct kvm_vcpu *vcpu;
5293 
5294 	*val = 0;
5295 
5296 	kvm_for_each_vcpu(i, vcpu, kvm)
5297 		*val += *(u64 *)((void *)(&vcpu->stat) + offset);
5298 
5299 	return 0;
5300 }
5301 
5302 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
5303 {
5304 	unsigned long i;
5305 	struct kvm_vcpu *vcpu;
5306 
5307 	kvm_for_each_vcpu(i, vcpu, kvm)
5308 		*(u64 *)((void *)(&vcpu->stat) + offset) = 0;
5309 
5310 	return 0;
5311 }
5312 
5313 static int kvm_stat_data_get(void *data, u64 *val)
5314 {
5315 	int r = -EFAULT;
5316 	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
5317 
5318 	switch (stat_data->kind) {
5319 	case KVM_STAT_VM:
5320 		r = kvm_get_stat_per_vm(stat_data->kvm,
5321 					stat_data->desc->desc.offset, val);
5322 		break;
5323 	case KVM_STAT_VCPU:
5324 		r = kvm_get_stat_per_vcpu(stat_data->kvm,
5325 					  stat_data->desc->desc.offset, val);
5326 		break;
5327 	}
5328 
5329 	return r;
5330 }
5331 
5332 static int kvm_stat_data_clear(void *data, u64 val)
5333 {
5334 	int r = -EFAULT;
5335 	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
5336 
5337 	if (val)
5338 		return -EINVAL;
5339 
5340 	switch (stat_data->kind) {
5341 	case KVM_STAT_VM:
5342 		r = kvm_clear_stat_per_vm(stat_data->kvm,
5343 					  stat_data->desc->desc.offset);
5344 		break;
5345 	case KVM_STAT_VCPU:
5346 		r = kvm_clear_stat_per_vcpu(stat_data->kvm,
5347 					    stat_data->desc->desc.offset);
5348 		break;
5349 	}
5350 
5351 	return r;
5352 }
5353 
5354 static int kvm_stat_data_open(struct inode *inode, struct file *file)
5355 {
5356 	__simple_attr_check_format("%llu\n", 0ull);
5357 	return kvm_debugfs_open(inode, file, kvm_stat_data_get,
5358 				kvm_stat_data_clear, "%llu\n");
5359 }
5360 
5361 static const struct file_operations stat_fops_per_vm = {
5362 	.owner = THIS_MODULE,
5363 	.open = kvm_stat_data_open,
5364 	.release = kvm_debugfs_release,
5365 	.read = simple_attr_read,
5366 	.write = simple_attr_write,
5367 	.llseek = no_llseek,
5368 };
5369 
5370 static int vm_stat_get(void *_offset, u64 *val)
5371 {
5372 	unsigned offset = (long)_offset;
5373 	struct kvm *kvm;
5374 	u64 tmp_val;
5375 
5376 	*val = 0;
5377 	mutex_lock(&kvm_lock);
5378 	list_for_each_entry(kvm, &vm_list, vm_list) {
5379 		kvm_get_stat_per_vm(kvm, offset, &tmp_val);
5380 		*val += tmp_val;
5381 	}
5382 	mutex_unlock(&kvm_lock);
5383 	return 0;
5384 }
5385 
5386 static int vm_stat_clear(void *_offset, u64 val)
5387 {
5388 	unsigned offset = (long)_offset;
5389 	struct kvm *kvm;
5390 
5391 	if (val)
5392 		return -EINVAL;
5393 
5394 	mutex_lock(&kvm_lock);
5395 	list_for_each_entry(kvm, &vm_list, vm_list) {
5396 		kvm_clear_stat_per_vm(kvm, offset);
5397 	}
5398 	mutex_unlock(&kvm_lock);
5399 
5400 	return 0;
5401 }
5402 
5403 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
5404 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
5405 
5406 static int vcpu_stat_get(void *_offset, u64 *val)
5407 {
5408 	unsigned offset = (long)_offset;
5409 	struct kvm *kvm;
5410 	u64 tmp_val;
5411 
5412 	*val = 0;
5413 	mutex_lock(&kvm_lock);
5414 	list_for_each_entry(kvm, &vm_list, vm_list) {
5415 		kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
5416 		*val += tmp_val;
5417 	}
5418 	mutex_unlock(&kvm_lock);
5419 	return 0;
5420 }
5421 
5422 static int vcpu_stat_clear(void *_offset, u64 val)
5423 {
5424 	unsigned offset = (long)_offset;
5425 	struct kvm *kvm;
5426 
5427 	if (val)
5428 		return -EINVAL;
5429 
5430 	mutex_lock(&kvm_lock);
5431 	list_for_each_entry(kvm, &vm_list, vm_list) {
5432 		kvm_clear_stat_per_vcpu(kvm, offset);
5433 	}
5434 	mutex_unlock(&kvm_lock);
5435 
5436 	return 0;
5437 }
5438 
5439 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
5440 			"%llu\n");
5441 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
5442 
5443 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
5444 {
5445 	struct kobj_uevent_env *env;
5446 	unsigned long long created, active;
5447 
5448 	if (!kvm_dev.this_device || !kvm)
5449 		return;
5450 
5451 	mutex_lock(&kvm_lock);
5452 	if (type == KVM_EVENT_CREATE_VM) {
5453 		kvm_createvm_count++;
5454 		kvm_active_vms++;
5455 	} else if (type == KVM_EVENT_DESTROY_VM) {
5456 		kvm_active_vms--;
5457 	}
5458 	created = kvm_createvm_count;
5459 	active = kvm_active_vms;
5460 	mutex_unlock(&kvm_lock);
5461 
5462 	env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
5463 	if (!env)
5464 		return;
5465 
5466 	add_uevent_var(env, "CREATED=%llu", created);
5467 	add_uevent_var(env, "COUNT=%llu", active);
5468 
5469 	if (type == KVM_EVENT_CREATE_VM) {
5470 		add_uevent_var(env, "EVENT=create");
5471 		kvm->userspace_pid = task_pid_nr(current);
5472 	} else if (type == KVM_EVENT_DESTROY_VM) {
5473 		add_uevent_var(env, "EVENT=destroy");
5474 	}
5475 	add_uevent_var(env, "PID=%d", kvm->userspace_pid);
5476 
5477 	if (kvm->debugfs_dentry) {
5478 		char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
5479 
5480 		if (p) {
5481 			tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
5482 			if (!IS_ERR(tmp))
5483 				add_uevent_var(env, "STATS_PATH=%s", tmp);
5484 			kfree(p);
5485 		}
5486 	}
5487 	/* no need for checks, since we are adding at most only 5 keys */
5488 	env->envp[env->envp_idx++] = NULL;
5489 	kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
5490 	kfree(env);
5491 }
5492 
5493 static void kvm_init_debug(void)
5494 {
5495 	const struct file_operations *fops;
5496 	const struct _kvm_stats_desc *pdesc;
5497 	int i;
5498 
5499 	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
5500 
5501 	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
5502 		pdesc = &kvm_vm_stats_desc[i];
5503 		if (kvm_stats_debugfs_mode(pdesc) & 0222)
5504 			fops = &vm_stat_fops;
5505 		else
5506 			fops = &vm_stat_readonly_fops;
5507 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5508 				kvm_debugfs_dir,
5509 				(void *)(long)pdesc->desc.offset, fops);
5510 	}
5511 
5512 	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
5513 		pdesc = &kvm_vcpu_stats_desc[i];
5514 		if (kvm_stats_debugfs_mode(pdesc) & 0222)
5515 			fops = &vcpu_stat_fops;
5516 		else
5517 			fops = &vcpu_stat_readonly_fops;
5518 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5519 				kvm_debugfs_dir,
5520 				(void *)(long)pdesc->desc.offset, fops);
5521 	}
5522 }
5523 
5524 static int kvm_suspend(void)
5525 {
5526 	if (kvm_usage_count)
5527 		hardware_disable_nolock(NULL);
5528 	return 0;
5529 }
5530 
5531 static void kvm_resume(void)
5532 {
5533 	if (kvm_usage_count) {
5534 #ifdef CONFIG_LOCKDEP
5535 		WARN_ON(lockdep_is_held(&kvm_count_lock));
5536 #endif
5537 		hardware_enable_nolock(NULL);
5538 	}
5539 }
5540 
5541 static struct syscore_ops kvm_syscore_ops = {
5542 	.suspend = kvm_suspend,
5543 	.resume = kvm_resume,
5544 };
5545 
5546 static inline
5547 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
5548 {
5549 	return container_of(pn, struct kvm_vcpu, preempt_notifier);
5550 }
5551 
5552 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
5553 {
5554 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
5555 
5556 	WRITE_ONCE(vcpu->preempted, false);
5557 	WRITE_ONCE(vcpu->ready, false);
5558 
5559 	__this_cpu_write(kvm_running_vcpu, vcpu);
5560 	kvm_arch_sched_in(vcpu, cpu);
5561 	kvm_arch_vcpu_load(vcpu, cpu);
5562 }
5563 
5564 static void kvm_sched_out(struct preempt_notifier *pn,
5565 			  struct task_struct *next)
5566 {
5567 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
5568 
5569 	if (current->on_rq) {
5570 		WRITE_ONCE(vcpu->preempted, true);
5571 		WRITE_ONCE(vcpu->ready, true);
5572 	}
5573 	kvm_arch_vcpu_put(vcpu);
5574 	__this_cpu_write(kvm_running_vcpu, NULL);
5575 }
5576 
5577 /**
5578  * kvm_get_running_vcpu - get the vcpu running on the current CPU.
5579  *
5580  * We can disable preemption locally around accessing the per-CPU variable,
5581  * and use the resolved vcpu pointer after enabling preemption again,
5582  * because even if the current thread is migrated to another CPU, reading
5583  * the per-CPU value later will give us the same value as we update the
5584  * per-CPU variable in the preempt notifier handlers.
5585  */
5586 struct kvm_vcpu *kvm_get_running_vcpu(void)
5587 {
5588 	struct kvm_vcpu *vcpu;
5589 
5590 	preempt_disable();
5591 	vcpu = __this_cpu_read(kvm_running_vcpu);
5592 	preempt_enable();
5593 
5594 	return vcpu;
5595 }
5596 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
5597 
5598 /**
5599  * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
5600  */
5601 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
5602 {
5603         return &kvm_running_vcpu;
5604 }
5605 
5606 #ifdef CONFIG_GUEST_PERF_EVENTS
5607 static unsigned int kvm_guest_state(void)
5608 {
5609 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
5610 	unsigned int state;
5611 
5612 	if (!kvm_arch_pmi_in_guest(vcpu))
5613 		return 0;
5614 
5615 	state = PERF_GUEST_ACTIVE;
5616 	if (!kvm_arch_vcpu_in_kernel(vcpu))
5617 		state |= PERF_GUEST_USER;
5618 
5619 	return state;
5620 }
5621 
5622 static unsigned long kvm_guest_get_ip(void)
5623 {
5624 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
5625 
5626 	/* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
5627 	if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
5628 		return 0;
5629 
5630 	return kvm_arch_vcpu_get_ip(vcpu);
5631 }
5632 
5633 static struct perf_guest_info_callbacks kvm_guest_cbs = {
5634 	.state			= kvm_guest_state,
5635 	.get_ip			= kvm_guest_get_ip,
5636 	.handle_intel_pt_intr	= NULL,
5637 };
5638 
5639 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
5640 {
5641 	kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
5642 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
5643 }
5644 void kvm_unregister_perf_callbacks(void)
5645 {
5646 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
5647 }
5648 #endif
5649 
5650 struct kvm_cpu_compat_check {
5651 	void *opaque;
5652 	int *ret;
5653 };
5654 
5655 static void check_processor_compat(void *data)
5656 {
5657 	struct kvm_cpu_compat_check *c = data;
5658 
5659 	*c->ret = kvm_arch_check_processor_compat(c->opaque);
5660 }
5661 
5662 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
5663 		  struct module *module)
5664 {
5665 	struct kvm_cpu_compat_check c;
5666 	int r;
5667 	int cpu;
5668 
5669 	r = kvm_arch_init(opaque);
5670 	if (r)
5671 		goto out_fail;
5672 
5673 	/*
5674 	 * kvm_arch_init makes sure there's at most one caller
5675 	 * for architectures that support multiple implementations,
5676 	 * like intel and amd on x86.
5677 	 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
5678 	 * conflicts in case kvm is already setup for another implementation.
5679 	 */
5680 	r = kvm_irqfd_init();
5681 	if (r)
5682 		goto out_irqfd;
5683 
5684 	if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
5685 		r = -ENOMEM;
5686 		goto out_free_0;
5687 	}
5688 
5689 	r = kvm_arch_hardware_setup(opaque);
5690 	if (r < 0)
5691 		goto out_free_1;
5692 
5693 	c.ret = &r;
5694 	c.opaque = opaque;
5695 	for_each_online_cpu(cpu) {
5696 		smp_call_function_single(cpu, check_processor_compat, &c, 1);
5697 		if (r < 0)
5698 			goto out_free_2;
5699 	}
5700 
5701 	r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
5702 				      kvm_starting_cpu, kvm_dying_cpu);
5703 	if (r)
5704 		goto out_free_2;
5705 	register_reboot_notifier(&kvm_reboot_notifier);
5706 
5707 	/* A kmem cache lets us meet the alignment requirements of fx_save. */
5708 	if (!vcpu_align)
5709 		vcpu_align = __alignof__(struct kvm_vcpu);
5710 	kvm_vcpu_cache =
5711 		kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
5712 					   SLAB_ACCOUNT,
5713 					   offsetof(struct kvm_vcpu, arch),
5714 					   offsetofend(struct kvm_vcpu, stats_id)
5715 					   - offsetof(struct kvm_vcpu, arch),
5716 					   NULL);
5717 	if (!kvm_vcpu_cache) {
5718 		r = -ENOMEM;
5719 		goto out_free_3;
5720 	}
5721 
5722 	for_each_possible_cpu(cpu) {
5723 		if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
5724 					    GFP_KERNEL, cpu_to_node(cpu))) {
5725 			r = -ENOMEM;
5726 			goto out_free_4;
5727 		}
5728 	}
5729 
5730 	r = kvm_async_pf_init();
5731 	if (r)
5732 		goto out_free_5;
5733 
5734 	kvm_chardev_ops.owner = module;
5735 	kvm_vm_fops.owner = module;
5736 	kvm_vcpu_fops.owner = module;
5737 
5738 	r = misc_register(&kvm_dev);
5739 	if (r) {
5740 		pr_err("kvm: misc device register failed\n");
5741 		goto out_unreg;
5742 	}
5743 
5744 	register_syscore_ops(&kvm_syscore_ops);
5745 
5746 	kvm_preempt_ops.sched_in = kvm_sched_in;
5747 	kvm_preempt_ops.sched_out = kvm_sched_out;
5748 
5749 	kvm_init_debug();
5750 
5751 	r = kvm_vfio_ops_init();
5752 	WARN_ON(r);
5753 
5754 	return 0;
5755 
5756 out_unreg:
5757 	kvm_async_pf_deinit();
5758 out_free_5:
5759 	for_each_possible_cpu(cpu)
5760 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
5761 out_free_4:
5762 	kmem_cache_destroy(kvm_vcpu_cache);
5763 out_free_3:
5764 	unregister_reboot_notifier(&kvm_reboot_notifier);
5765 	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5766 out_free_2:
5767 	kvm_arch_hardware_unsetup();
5768 out_free_1:
5769 	free_cpumask_var(cpus_hardware_enabled);
5770 out_free_0:
5771 	kvm_irqfd_exit();
5772 out_irqfd:
5773 	kvm_arch_exit();
5774 out_fail:
5775 	return r;
5776 }
5777 EXPORT_SYMBOL_GPL(kvm_init);
5778 
5779 void kvm_exit(void)
5780 {
5781 	int cpu;
5782 
5783 	debugfs_remove_recursive(kvm_debugfs_dir);
5784 	misc_deregister(&kvm_dev);
5785 	for_each_possible_cpu(cpu)
5786 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
5787 	kmem_cache_destroy(kvm_vcpu_cache);
5788 	kvm_async_pf_deinit();
5789 	unregister_syscore_ops(&kvm_syscore_ops);
5790 	unregister_reboot_notifier(&kvm_reboot_notifier);
5791 	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5792 	on_each_cpu(hardware_disable_nolock, NULL, 1);
5793 	kvm_arch_hardware_unsetup();
5794 	kvm_arch_exit();
5795 	kvm_irqfd_exit();
5796 	free_cpumask_var(cpus_hardware_enabled);
5797 	kvm_vfio_ops_exit();
5798 }
5799 EXPORT_SYMBOL_GPL(kvm_exit);
5800 
5801 struct kvm_vm_worker_thread_context {
5802 	struct kvm *kvm;
5803 	struct task_struct *parent;
5804 	struct completion init_done;
5805 	kvm_vm_thread_fn_t thread_fn;
5806 	uintptr_t data;
5807 	int err;
5808 };
5809 
5810 static int kvm_vm_worker_thread(void *context)
5811 {
5812 	/*
5813 	 * The init_context is allocated on the stack of the parent thread, so
5814 	 * we have to locally copy anything that is needed beyond initialization
5815 	 */
5816 	struct kvm_vm_worker_thread_context *init_context = context;
5817 	struct kvm *kvm = init_context->kvm;
5818 	kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
5819 	uintptr_t data = init_context->data;
5820 	int err;
5821 
5822 	err = kthread_park(current);
5823 	/* kthread_park(current) is never supposed to return an error */
5824 	WARN_ON(err != 0);
5825 	if (err)
5826 		goto init_complete;
5827 
5828 	err = cgroup_attach_task_all(init_context->parent, current);
5829 	if (err) {
5830 		kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
5831 			__func__, err);
5832 		goto init_complete;
5833 	}
5834 
5835 	set_user_nice(current, task_nice(init_context->parent));
5836 
5837 init_complete:
5838 	init_context->err = err;
5839 	complete(&init_context->init_done);
5840 	init_context = NULL;
5841 
5842 	if (err)
5843 		return err;
5844 
5845 	/* Wait to be woken up by the spawner before proceeding. */
5846 	kthread_parkme();
5847 
5848 	if (!kthread_should_stop())
5849 		err = thread_fn(kvm, data);
5850 
5851 	return err;
5852 }
5853 
5854 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
5855 				uintptr_t data, const char *name,
5856 				struct task_struct **thread_ptr)
5857 {
5858 	struct kvm_vm_worker_thread_context init_context = {};
5859 	struct task_struct *thread;
5860 
5861 	*thread_ptr = NULL;
5862 	init_context.kvm = kvm;
5863 	init_context.parent = current;
5864 	init_context.thread_fn = thread_fn;
5865 	init_context.data = data;
5866 	init_completion(&init_context.init_done);
5867 
5868 	thread = kthread_run(kvm_vm_worker_thread, &init_context,
5869 			     "%s-%d", name, task_pid_nr(current));
5870 	if (IS_ERR(thread))
5871 		return PTR_ERR(thread);
5872 
5873 	/* kthread_run is never supposed to return NULL */
5874 	WARN_ON(thread == NULL);
5875 
5876 	wait_for_completion(&init_context.init_done);
5877 
5878 	if (!init_context.err)
5879 		*thread_ptr = thread;
5880 
5881 	return init_context.err;
5882 }
5883