xref: /openbmc/linux/virt/kvm/kvm_main.c (revision 738f6ba1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * Copyright (C) 2006 Qumranet, Inc.
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  */
15 
16 #include <kvm/iodev.h>
17 
18 #include <linux/kvm_host.h>
19 #include <linux/kvm.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/percpu.h>
23 #include <linux/mm.h>
24 #include <linux/miscdevice.h>
25 #include <linux/vmalloc.h>
26 #include <linux/reboot.h>
27 #include <linux/debugfs.h>
28 #include <linux/highmem.h>
29 #include <linux/file.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/cpu.h>
32 #include <linux/sched/signal.h>
33 #include <linux/sched/mm.h>
34 #include <linux/sched/stat.h>
35 #include <linux/cpumask.h>
36 #include <linux/smp.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/profile.h>
39 #include <linux/kvm_para.h>
40 #include <linux/pagemap.h>
41 #include <linux/mman.h>
42 #include <linux/swap.h>
43 #include <linux/bitops.h>
44 #include <linux/spinlock.h>
45 #include <linux/compat.h>
46 #include <linux/srcu.h>
47 #include <linux/hugetlb.h>
48 #include <linux/slab.h>
49 #include <linux/sort.h>
50 #include <linux/bsearch.h>
51 #include <linux/io.h>
52 #include <linux/lockdep.h>
53 #include <linux/kthread.h>
54 
55 #include <asm/processor.h>
56 #include <asm/ioctl.h>
57 #include <linux/uaccess.h>
58 
59 #include "coalesced_mmio.h"
60 #include "async_pf.h"
61 #include "mmu_lock.h"
62 #include "vfio.h"
63 
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/kvm.h>
66 
67 #include <linux/kvm_dirty_ring.h>
68 
69 /* Worst case buffer size needed for holding an integer. */
70 #define ITOA_MAX_LEN 12
71 
72 MODULE_AUTHOR("Qumranet");
73 MODULE_LICENSE("GPL");
74 
75 /* Architectures should define their poll value according to the halt latency */
76 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
77 module_param(halt_poll_ns, uint, 0644);
78 EXPORT_SYMBOL_GPL(halt_poll_ns);
79 
80 /* Default doubles per-vcpu halt_poll_ns. */
81 unsigned int halt_poll_ns_grow = 2;
82 module_param(halt_poll_ns_grow, uint, 0644);
83 EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
84 
85 /* The start value to grow halt_poll_ns from */
86 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
87 module_param(halt_poll_ns_grow_start, uint, 0644);
88 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
89 
90 /* Default resets per-vcpu halt_poll_ns . */
91 unsigned int halt_poll_ns_shrink;
92 module_param(halt_poll_ns_shrink, uint, 0644);
93 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
94 
95 /*
96  * Ordering of locks:
97  *
98  *	kvm->lock --> kvm->slots_lock --> kvm->irq_lock
99  */
100 
101 DEFINE_MUTEX(kvm_lock);
102 static DEFINE_RAW_SPINLOCK(kvm_count_lock);
103 LIST_HEAD(vm_list);
104 
105 static cpumask_var_t cpus_hardware_enabled;
106 static int kvm_usage_count;
107 static atomic_t hardware_enable_failed;
108 
109 static struct kmem_cache *kvm_vcpu_cache;
110 
111 static __read_mostly struct preempt_ops kvm_preempt_ops;
112 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
113 
114 struct dentry *kvm_debugfs_dir;
115 EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
116 
117 static int kvm_debugfs_num_entries;
118 static const struct file_operations stat_fops_per_vm;
119 
120 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
121 			   unsigned long arg);
122 #ifdef CONFIG_KVM_COMPAT
123 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
124 				  unsigned long arg);
125 #define KVM_COMPAT(c)	.compat_ioctl	= (c)
126 #else
127 /*
128  * For architectures that don't implement a compat infrastructure,
129  * adopt a double line of defense:
130  * - Prevent a compat task from opening /dev/kvm
131  * - If the open has been done by a 64bit task, and the KVM fd
132  *   passed to a compat task, let the ioctls fail.
133  */
134 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
135 				unsigned long arg) { return -EINVAL; }
136 
137 static int kvm_no_compat_open(struct inode *inode, struct file *file)
138 {
139 	return is_compat_task() ? -ENODEV : 0;
140 }
141 #define KVM_COMPAT(c)	.compat_ioctl	= kvm_no_compat_ioctl,	\
142 			.open		= kvm_no_compat_open
143 #endif
144 static int hardware_enable_all(void);
145 static void hardware_disable_all(void);
146 
147 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
148 
149 __visible bool kvm_rebooting;
150 EXPORT_SYMBOL_GPL(kvm_rebooting);
151 
152 #define KVM_EVENT_CREATE_VM 0
153 #define KVM_EVENT_DESTROY_VM 1
154 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
155 static unsigned long long kvm_createvm_count;
156 static unsigned long long kvm_active_vms;
157 
158 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
159 						   unsigned long start, unsigned long end)
160 {
161 }
162 
163 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
164 {
165 	/*
166 	 * The metadata used by is_zone_device_page() to determine whether or
167 	 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
168 	 * the device has been pinned, e.g. by get_user_pages().  WARN if the
169 	 * page_count() is zero to help detect bad usage of this helper.
170 	 */
171 	if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
172 		return false;
173 
174 	return is_zone_device_page(pfn_to_page(pfn));
175 }
176 
177 bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
178 {
179 	/*
180 	 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
181 	 * perspective they are "normal" pages, albeit with slightly different
182 	 * usage rules.
183 	 */
184 	if (pfn_valid(pfn))
185 		return PageReserved(pfn_to_page(pfn)) &&
186 		       !is_zero_pfn(pfn) &&
187 		       !kvm_is_zone_device_pfn(pfn);
188 
189 	return true;
190 }
191 
192 bool kvm_is_transparent_hugepage(kvm_pfn_t pfn)
193 {
194 	struct page *page = pfn_to_page(pfn);
195 
196 	if (!PageTransCompoundMap(page))
197 		return false;
198 
199 	return is_transparent_hugepage(compound_head(page));
200 }
201 
202 /*
203  * Switches to specified vcpu, until a matching vcpu_put()
204  */
205 void vcpu_load(struct kvm_vcpu *vcpu)
206 {
207 	int cpu = get_cpu();
208 
209 	__this_cpu_write(kvm_running_vcpu, vcpu);
210 	preempt_notifier_register(&vcpu->preempt_notifier);
211 	kvm_arch_vcpu_load(vcpu, cpu);
212 	put_cpu();
213 }
214 EXPORT_SYMBOL_GPL(vcpu_load);
215 
216 void vcpu_put(struct kvm_vcpu *vcpu)
217 {
218 	preempt_disable();
219 	kvm_arch_vcpu_put(vcpu);
220 	preempt_notifier_unregister(&vcpu->preempt_notifier);
221 	__this_cpu_write(kvm_running_vcpu, NULL);
222 	preempt_enable();
223 }
224 EXPORT_SYMBOL_GPL(vcpu_put);
225 
226 /* TODO: merge with kvm_arch_vcpu_should_kick */
227 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
228 {
229 	int mode = kvm_vcpu_exiting_guest_mode(vcpu);
230 
231 	/*
232 	 * We need to wait for the VCPU to reenable interrupts and get out of
233 	 * READING_SHADOW_PAGE_TABLES mode.
234 	 */
235 	if (req & KVM_REQUEST_WAIT)
236 		return mode != OUTSIDE_GUEST_MODE;
237 
238 	/*
239 	 * Need to kick a running VCPU, but otherwise there is nothing to do.
240 	 */
241 	return mode == IN_GUEST_MODE;
242 }
243 
244 static void ack_flush(void *_completed)
245 {
246 }
247 
248 static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
249 {
250 	if (unlikely(!cpus))
251 		cpus = cpu_online_mask;
252 
253 	if (cpumask_empty(cpus))
254 		return false;
255 
256 	smp_call_function_many(cpus, ack_flush, NULL, wait);
257 	return true;
258 }
259 
260 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
261 				 struct kvm_vcpu *except,
262 				 unsigned long *vcpu_bitmap, cpumask_var_t tmp)
263 {
264 	int i, cpu, me;
265 	struct kvm_vcpu *vcpu;
266 	bool called;
267 
268 	me = get_cpu();
269 
270 	kvm_for_each_vcpu(i, vcpu, kvm) {
271 		if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) ||
272 		    vcpu == except)
273 			continue;
274 
275 		kvm_make_request(req, vcpu);
276 		cpu = vcpu->cpu;
277 
278 		if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
279 			continue;
280 
281 		if (tmp != NULL && cpu != -1 && cpu != me &&
282 		    kvm_request_needs_ipi(vcpu, req))
283 			__cpumask_set_cpu(cpu, tmp);
284 	}
285 
286 	called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
287 	put_cpu();
288 
289 	return called;
290 }
291 
292 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
293 				      struct kvm_vcpu *except)
294 {
295 	cpumask_var_t cpus;
296 	bool called;
297 
298 	zalloc_cpumask_var(&cpus, GFP_ATOMIC);
299 
300 	called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus);
301 
302 	free_cpumask_var(cpus);
303 	return called;
304 }
305 
306 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
307 {
308 	return kvm_make_all_cpus_request_except(kvm, req, NULL);
309 }
310 
311 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
312 void kvm_flush_remote_tlbs(struct kvm *kvm)
313 {
314 	/*
315 	 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
316 	 * kvm_make_all_cpus_request.
317 	 */
318 	long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
319 
320 	/*
321 	 * We want to publish modifications to the page tables before reading
322 	 * mode. Pairs with a memory barrier in arch-specific code.
323 	 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
324 	 * and smp_mb in walk_shadow_page_lockless_begin/end.
325 	 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
326 	 *
327 	 * There is already an smp_mb__after_atomic() before
328 	 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
329 	 * barrier here.
330 	 */
331 	if (!kvm_arch_flush_remote_tlb(kvm)
332 	    || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
333 		++kvm->stat.remote_tlb_flush;
334 	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
335 }
336 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
337 #endif
338 
339 void kvm_reload_remote_mmus(struct kvm *kvm)
340 {
341 	kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
342 }
343 
344 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
345 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
346 					       gfp_t gfp_flags)
347 {
348 	gfp_flags |= mc->gfp_zero;
349 
350 	if (mc->kmem_cache)
351 		return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
352 	else
353 		return (void *)__get_free_page(gfp_flags);
354 }
355 
356 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
357 {
358 	void *obj;
359 
360 	if (mc->nobjs >= min)
361 		return 0;
362 	while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
363 		obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
364 		if (!obj)
365 			return mc->nobjs >= min ? 0 : -ENOMEM;
366 		mc->objects[mc->nobjs++] = obj;
367 	}
368 	return 0;
369 }
370 
371 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
372 {
373 	return mc->nobjs;
374 }
375 
376 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
377 {
378 	while (mc->nobjs) {
379 		if (mc->kmem_cache)
380 			kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
381 		else
382 			free_page((unsigned long)mc->objects[--mc->nobjs]);
383 	}
384 }
385 
386 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
387 {
388 	void *p;
389 
390 	if (WARN_ON(!mc->nobjs))
391 		p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
392 	else
393 		p = mc->objects[--mc->nobjs];
394 	BUG_ON(!p);
395 	return p;
396 }
397 #endif
398 
399 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
400 {
401 	mutex_init(&vcpu->mutex);
402 	vcpu->cpu = -1;
403 	vcpu->kvm = kvm;
404 	vcpu->vcpu_id = id;
405 	vcpu->pid = NULL;
406 	rcuwait_init(&vcpu->wait);
407 	kvm_async_pf_vcpu_init(vcpu);
408 
409 	vcpu->pre_pcpu = -1;
410 	INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
411 
412 	kvm_vcpu_set_in_spin_loop(vcpu, false);
413 	kvm_vcpu_set_dy_eligible(vcpu, false);
414 	vcpu->preempted = false;
415 	vcpu->ready = false;
416 	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
417 }
418 
419 void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
420 {
421 	kvm_dirty_ring_free(&vcpu->dirty_ring);
422 	kvm_arch_vcpu_destroy(vcpu);
423 
424 	/*
425 	 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
426 	 * the vcpu->pid pointer, and at destruction time all file descriptors
427 	 * are already gone.
428 	 */
429 	put_pid(rcu_dereference_protected(vcpu->pid, 1));
430 
431 	free_page((unsigned long)vcpu->run);
432 	kmem_cache_free(kvm_vcpu_cache, vcpu);
433 }
434 EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
435 
436 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
437 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
438 {
439 	return container_of(mn, struct kvm, mmu_notifier);
440 }
441 
442 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
443 					      struct mm_struct *mm,
444 					      unsigned long start, unsigned long end)
445 {
446 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
447 	int idx;
448 
449 	idx = srcu_read_lock(&kvm->srcu);
450 	kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
451 	srcu_read_unlock(&kvm->srcu, idx);
452 }
453 
454 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
455 					struct mm_struct *mm,
456 					unsigned long address,
457 					pte_t pte)
458 {
459 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
460 	int idx;
461 
462 	idx = srcu_read_lock(&kvm->srcu);
463 
464 	KVM_MMU_LOCK(kvm);
465 
466 	kvm->mmu_notifier_seq++;
467 
468 	if (kvm_set_spte_hva(kvm, address, pte))
469 		kvm_flush_remote_tlbs(kvm);
470 
471 	KVM_MMU_UNLOCK(kvm);
472 	srcu_read_unlock(&kvm->srcu, idx);
473 }
474 
475 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
476 					const struct mmu_notifier_range *range)
477 {
478 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
479 	int need_tlb_flush = 0, idx;
480 
481 	idx = srcu_read_lock(&kvm->srcu);
482 	KVM_MMU_LOCK(kvm);
483 	/*
484 	 * The count increase must become visible at unlock time as no
485 	 * spte can be established without taking the mmu_lock and
486 	 * count is also read inside the mmu_lock critical section.
487 	 */
488 	kvm->mmu_notifier_count++;
489 	if (likely(kvm->mmu_notifier_count == 1)) {
490 		kvm->mmu_notifier_range_start = range->start;
491 		kvm->mmu_notifier_range_end = range->end;
492 	} else {
493 		/*
494 		 * Fully tracking multiple concurrent ranges has dimishing
495 		 * returns. Keep things simple and just find the minimal range
496 		 * which includes the current and new ranges. As there won't be
497 		 * enough information to subtract a range after its invalidate
498 		 * completes, any ranges invalidated concurrently will
499 		 * accumulate and persist until all outstanding invalidates
500 		 * complete.
501 		 */
502 		kvm->mmu_notifier_range_start =
503 			min(kvm->mmu_notifier_range_start, range->start);
504 		kvm->mmu_notifier_range_end =
505 			max(kvm->mmu_notifier_range_end, range->end);
506 	}
507 	need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
508 					     range->flags);
509 	/* we've to flush the tlb before the pages can be freed */
510 	if (need_tlb_flush || kvm->tlbs_dirty)
511 		kvm_flush_remote_tlbs(kvm);
512 
513 	KVM_MMU_UNLOCK(kvm);
514 	srcu_read_unlock(&kvm->srcu, idx);
515 
516 	return 0;
517 }
518 
519 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
520 					const struct mmu_notifier_range *range)
521 {
522 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
523 
524 	KVM_MMU_LOCK(kvm);
525 	/*
526 	 * This sequence increase will notify the kvm page fault that
527 	 * the page that is going to be mapped in the spte could have
528 	 * been freed.
529 	 */
530 	kvm->mmu_notifier_seq++;
531 	smp_wmb();
532 	/*
533 	 * The above sequence increase must be visible before the
534 	 * below count decrease, which is ensured by the smp_wmb above
535 	 * in conjunction with the smp_rmb in mmu_notifier_retry().
536 	 */
537 	kvm->mmu_notifier_count--;
538 	KVM_MMU_UNLOCK(kvm);
539 
540 	BUG_ON(kvm->mmu_notifier_count < 0);
541 }
542 
543 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
544 					      struct mm_struct *mm,
545 					      unsigned long start,
546 					      unsigned long end)
547 {
548 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
549 	int young, idx;
550 
551 	idx = srcu_read_lock(&kvm->srcu);
552 	KVM_MMU_LOCK(kvm);
553 
554 	young = kvm_age_hva(kvm, start, end);
555 	if (young)
556 		kvm_flush_remote_tlbs(kvm);
557 
558 	KVM_MMU_UNLOCK(kvm);
559 	srcu_read_unlock(&kvm->srcu, idx);
560 
561 	return young;
562 }
563 
564 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
565 					struct mm_struct *mm,
566 					unsigned long start,
567 					unsigned long end)
568 {
569 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
570 	int young, idx;
571 
572 	idx = srcu_read_lock(&kvm->srcu);
573 	KVM_MMU_LOCK(kvm);
574 	/*
575 	 * Even though we do not flush TLB, this will still adversely
576 	 * affect performance on pre-Haswell Intel EPT, where there is
577 	 * no EPT Access Bit to clear so that we have to tear down EPT
578 	 * tables instead. If we find this unacceptable, we can always
579 	 * add a parameter to kvm_age_hva so that it effectively doesn't
580 	 * do anything on clear_young.
581 	 *
582 	 * Also note that currently we never issue secondary TLB flushes
583 	 * from clear_young, leaving this job up to the regular system
584 	 * cadence. If we find this inaccurate, we might come up with a
585 	 * more sophisticated heuristic later.
586 	 */
587 	young = kvm_age_hva(kvm, start, end);
588 	KVM_MMU_UNLOCK(kvm);
589 	srcu_read_unlock(&kvm->srcu, idx);
590 
591 	return young;
592 }
593 
594 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
595 				       struct mm_struct *mm,
596 				       unsigned long address)
597 {
598 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
599 	int young, idx;
600 
601 	idx = srcu_read_lock(&kvm->srcu);
602 	KVM_MMU_LOCK(kvm);
603 	young = kvm_test_age_hva(kvm, address);
604 	KVM_MMU_UNLOCK(kvm);
605 	srcu_read_unlock(&kvm->srcu, idx);
606 
607 	return young;
608 }
609 
610 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
611 				     struct mm_struct *mm)
612 {
613 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
614 	int idx;
615 
616 	idx = srcu_read_lock(&kvm->srcu);
617 	kvm_arch_flush_shadow_all(kvm);
618 	srcu_read_unlock(&kvm->srcu, idx);
619 }
620 
621 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
622 	.invalidate_range	= kvm_mmu_notifier_invalidate_range,
623 	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
624 	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
625 	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
626 	.clear_young		= kvm_mmu_notifier_clear_young,
627 	.test_young		= kvm_mmu_notifier_test_young,
628 	.change_pte		= kvm_mmu_notifier_change_pte,
629 	.release		= kvm_mmu_notifier_release,
630 };
631 
632 static int kvm_init_mmu_notifier(struct kvm *kvm)
633 {
634 	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
635 	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
636 }
637 
638 #else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
639 
640 static int kvm_init_mmu_notifier(struct kvm *kvm)
641 {
642 	return 0;
643 }
644 
645 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
646 
647 static struct kvm_memslots *kvm_alloc_memslots(void)
648 {
649 	int i;
650 	struct kvm_memslots *slots;
651 
652 	slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
653 	if (!slots)
654 		return NULL;
655 
656 	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
657 		slots->id_to_index[i] = -1;
658 
659 	return slots;
660 }
661 
662 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
663 {
664 	if (!memslot->dirty_bitmap)
665 		return;
666 
667 	kvfree(memslot->dirty_bitmap);
668 	memslot->dirty_bitmap = NULL;
669 }
670 
671 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
672 {
673 	kvm_destroy_dirty_bitmap(slot);
674 
675 	kvm_arch_free_memslot(kvm, slot);
676 
677 	slot->flags = 0;
678 	slot->npages = 0;
679 }
680 
681 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
682 {
683 	struct kvm_memory_slot *memslot;
684 
685 	if (!slots)
686 		return;
687 
688 	kvm_for_each_memslot(memslot, slots)
689 		kvm_free_memslot(kvm, memslot);
690 
691 	kvfree(slots);
692 }
693 
694 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
695 {
696 	int i;
697 
698 	if (!kvm->debugfs_dentry)
699 		return;
700 
701 	debugfs_remove_recursive(kvm->debugfs_dentry);
702 
703 	if (kvm->debugfs_stat_data) {
704 		for (i = 0; i < kvm_debugfs_num_entries; i++)
705 			kfree(kvm->debugfs_stat_data[i]);
706 		kfree(kvm->debugfs_stat_data);
707 	}
708 }
709 
710 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
711 {
712 	char dir_name[ITOA_MAX_LEN * 2];
713 	struct kvm_stat_data *stat_data;
714 	struct kvm_stats_debugfs_item *p;
715 
716 	if (!debugfs_initialized())
717 		return 0;
718 
719 	snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
720 	kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir);
721 
722 	kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
723 					 sizeof(*kvm->debugfs_stat_data),
724 					 GFP_KERNEL_ACCOUNT);
725 	if (!kvm->debugfs_stat_data)
726 		return -ENOMEM;
727 
728 	for (p = debugfs_entries; p->name; p++) {
729 		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
730 		if (!stat_data)
731 			return -ENOMEM;
732 
733 		stat_data->kvm = kvm;
734 		stat_data->dbgfs_item = p;
735 		kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
736 		debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
737 				    kvm->debugfs_dentry, stat_data,
738 				    &stat_fops_per_vm);
739 	}
740 	return 0;
741 }
742 
743 /*
744  * Called after the VM is otherwise initialized, but just before adding it to
745  * the vm_list.
746  */
747 int __weak kvm_arch_post_init_vm(struct kvm *kvm)
748 {
749 	return 0;
750 }
751 
752 /*
753  * Called just after removing the VM from the vm_list, but before doing any
754  * other destruction.
755  */
756 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
757 {
758 }
759 
760 static struct kvm *kvm_create_vm(unsigned long type)
761 {
762 	struct kvm *kvm = kvm_arch_alloc_vm();
763 	int r = -ENOMEM;
764 	int i;
765 
766 	if (!kvm)
767 		return ERR_PTR(-ENOMEM);
768 
769 	KVM_MMU_LOCK_INIT(kvm);
770 	mmgrab(current->mm);
771 	kvm->mm = current->mm;
772 	kvm_eventfd_init(kvm);
773 	mutex_init(&kvm->lock);
774 	mutex_init(&kvm->irq_lock);
775 	mutex_init(&kvm->slots_lock);
776 	INIT_LIST_HEAD(&kvm->devices);
777 
778 	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
779 
780 	if (init_srcu_struct(&kvm->srcu))
781 		goto out_err_no_srcu;
782 	if (init_srcu_struct(&kvm->irq_srcu))
783 		goto out_err_no_irq_srcu;
784 
785 	refcount_set(&kvm->users_count, 1);
786 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
787 		struct kvm_memslots *slots = kvm_alloc_memslots();
788 
789 		if (!slots)
790 			goto out_err_no_arch_destroy_vm;
791 		/* Generations must be different for each address space. */
792 		slots->generation = i;
793 		rcu_assign_pointer(kvm->memslots[i], slots);
794 	}
795 
796 	for (i = 0; i < KVM_NR_BUSES; i++) {
797 		rcu_assign_pointer(kvm->buses[i],
798 			kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
799 		if (!kvm->buses[i])
800 			goto out_err_no_arch_destroy_vm;
801 	}
802 
803 	kvm->max_halt_poll_ns = halt_poll_ns;
804 
805 	r = kvm_arch_init_vm(kvm, type);
806 	if (r)
807 		goto out_err_no_arch_destroy_vm;
808 
809 	r = hardware_enable_all();
810 	if (r)
811 		goto out_err_no_disable;
812 
813 #ifdef CONFIG_HAVE_KVM_IRQFD
814 	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
815 #endif
816 
817 	r = kvm_init_mmu_notifier(kvm);
818 	if (r)
819 		goto out_err_no_mmu_notifier;
820 
821 	r = kvm_arch_post_init_vm(kvm);
822 	if (r)
823 		goto out_err;
824 
825 	mutex_lock(&kvm_lock);
826 	list_add(&kvm->vm_list, &vm_list);
827 	mutex_unlock(&kvm_lock);
828 
829 	preempt_notifier_inc();
830 
831 	return kvm;
832 
833 out_err:
834 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
835 	if (kvm->mmu_notifier.ops)
836 		mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
837 #endif
838 out_err_no_mmu_notifier:
839 	hardware_disable_all();
840 out_err_no_disable:
841 	kvm_arch_destroy_vm(kvm);
842 out_err_no_arch_destroy_vm:
843 	WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
844 	for (i = 0; i < KVM_NR_BUSES; i++)
845 		kfree(kvm_get_bus(kvm, i));
846 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
847 		kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
848 	cleanup_srcu_struct(&kvm->irq_srcu);
849 out_err_no_irq_srcu:
850 	cleanup_srcu_struct(&kvm->srcu);
851 out_err_no_srcu:
852 	kvm_arch_free_vm(kvm);
853 	mmdrop(current->mm);
854 	return ERR_PTR(r);
855 }
856 
857 static void kvm_destroy_devices(struct kvm *kvm)
858 {
859 	struct kvm_device *dev, *tmp;
860 
861 	/*
862 	 * We do not need to take the kvm->lock here, because nobody else
863 	 * has a reference to the struct kvm at this point and therefore
864 	 * cannot access the devices list anyhow.
865 	 */
866 	list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
867 		list_del(&dev->vm_node);
868 		dev->ops->destroy(dev);
869 	}
870 }
871 
872 static void kvm_destroy_vm(struct kvm *kvm)
873 {
874 	int i;
875 	struct mm_struct *mm = kvm->mm;
876 
877 	kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
878 	kvm_destroy_vm_debugfs(kvm);
879 	kvm_arch_sync_events(kvm);
880 	mutex_lock(&kvm_lock);
881 	list_del(&kvm->vm_list);
882 	mutex_unlock(&kvm_lock);
883 	kvm_arch_pre_destroy_vm(kvm);
884 
885 	kvm_free_irq_routing(kvm);
886 	for (i = 0; i < KVM_NR_BUSES; i++) {
887 		struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
888 
889 		if (bus)
890 			kvm_io_bus_destroy(bus);
891 		kvm->buses[i] = NULL;
892 	}
893 	kvm_coalesced_mmio_free(kvm);
894 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
895 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
896 #else
897 	kvm_arch_flush_shadow_all(kvm);
898 #endif
899 	kvm_arch_destroy_vm(kvm);
900 	kvm_destroy_devices(kvm);
901 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
902 		kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
903 	cleanup_srcu_struct(&kvm->irq_srcu);
904 	cleanup_srcu_struct(&kvm->srcu);
905 	kvm_arch_free_vm(kvm);
906 	preempt_notifier_dec();
907 	hardware_disable_all();
908 	mmdrop(mm);
909 }
910 
911 void kvm_get_kvm(struct kvm *kvm)
912 {
913 	refcount_inc(&kvm->users_count);
914 }
915 EXPORT_SYMBOL_GPL(kvm_get_kvm);
916 
917 void kvm_put_kvm(struct kvm *kvm)
918 {
919 	if (refcount_dec_and_test(&kvm->users_count))
920 		kvm_destroy_vm(kvm);
921 }
922 EXPORT_SYMBOL_GPL(kvm_put_kvm);
923 
924 /*
925  * Used to put a reference that was taken on behalf of an object associated
926  * with a user-visible file descriptor, e.g. a vcpu or device, if installation
927  * of the new file descriptor fails and the reference cannot be transferred to
928  * its final owner.  In such cases, the caller is still actively using @kvm and
929  * will fail miserably if the refcount unexpectedly hits zero.
930  */
931 void kvm_put_kvm_no_destroy(struct kvm *kvm)
932 {
933 	WARN_ON(refcount_dec_and_test(&kvm->users_count));
934 }
935 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
936 
937 static int kvm_vm_release(struct inode *inode, struct file *filp)
938 {
939 	struct kvm *kvm = filp->private_data;
940 
941 	kvm_irqfd_release(kvm);
942 
943 	kvm_put_kvm(kvm);
944 	return 0;
945 }
946 
947 /*
948  * Allocation size is twice as large as the actual dirty bitmap size.
949  * See kvm_vm_ioctl_get_dirty_log() why this is needed.
950  */
951 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
952 {
953 	unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
954 
955 	memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
956 	if (!memslot->dirty_bitmap)
957 		return -ENOMEM;
958 
959 	return 0;
960 }
961 
962 /*
963  * Delete a memslot by decrementing the number of used slots and shifting all
964  * other entries in the array forward one spot.
965  */
966 static inline void kvm_memslot_delete(struct kvm_memslots *slots,
967 				      struct kvm_memory_slot *memslot)
968 {
969 	struct kvm_memory_slot *mslots = slots->memslots;
970 	int i;
971 
972 	if (WARN_ON(slots->id_to_index[memslot->id] == -1))
973 		return;
974 
975 	slots->used_slots--;
976 
977 	if (atomic_read(&slots->lru_slot) >= slots->used_slots)
978 		atomic_set(&slots->lru_slot, 0);
979 
980 	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
981 		mslots[i] = mslots[i + 1];
982 		slots->id_to_index[mslots[i].id] = i;
983 	}
984 	mslots[i] = *memslot;
985 	slots->id_to_index[memslot->id] = -1;
986 }
987 
988 /*
989  * "Insert" a new memslot by incrementing the number of used slots.  Returns
990  * the new slot's initial index into the memslots array.
991  */
992 static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
993 {
994 	return slots->used_slots++;
995 }
996 
997 /*
998  * Move a changed memslot backwards in the array by shifting existing slots
999  * with a higher GFN toward the front of the array.  Note, the changed memslot
1000  * itself is not preserved in the array, i.e. not swapped at this time, only
1001  * its new index into the array is tracked.  Returns the changed memslot's
1002  * current index into the memslots array.
1003  */
1004 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
1005 					    struct kvm_memory_slot *memslot)
1006 {
1007 	struct kvm_memory_slot *mslots = slots->memslots;
1008 	int i;
1009 
1010 	if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) ||
1011 	    WARN_ON_ONCE(!slots->used_slots))
1012 		return -1;
1013 
1014 	/*
1015 	 * Move the target memslot backward in the array by shifting existing
1016 	 * memslots with a higher GFN (than the target memslot) towards the
1017 	 * front of the array.
1018 	 */
1019 	for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
1020 		if (memslot->base_gfn > mslots[i + 1].base_gfn)
1021 			break;
1022 
1023 		WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
1024 
1025 		/* Shift the next memslot forward one and update its index. */
1026 		mslots[i] = mslots[i + 1];
1027 		slots->id_to_index[mslots[i].id] = i;
1028 	}
1029 	return i;
1030 }
1031 
1032 /*
1033  * Move a changed memslot forwards in the array by shifting existing slots with
1034  * a lower GFN toward the back of the array.  Note, the changed memslot itself
1035  * is not preserved in the array, i.e. not swapped at this time, only its new
1036  * index into the array is tracked.  Returns the changed memslot's final index
1037  * into the memslots array.
1038  */
1039 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
1040 					   struct kvm_memory_slot *memslot,
1041 					   int start)
1042 {
1043 	struct kvm_memory_slot *mslots = slots->memslots;
1044 	int i;
1045 
1046 	for (i = start; i > 0; i--) {
1047 		if (memslot->base_gfn < mslots[i - 1].base_gfn)
1048 			break;
1049 
1050 		WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
1051 
1052 		/* Shift the next memslot back one and update its index. */
1053 		mslots[i] = mslots[i - 1];
1054 		slots->id_to_index[mslots[i].id] = i;
1055 	}
1056 	return i;
1057 }
1058 
1059 /*
1060  * Re-sort memslots based on their GFN to account for an added, deleted, or
1061  * moved memslot.  Sorting memslots by GFN allows using a binary search during
1062  * memslot lookup.
1063  *
1064  * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!  I.e. the entry
1065  * at memslots[0] has the highest GFN.
1066  *
1067  * The sorting algorithm takes advantage of having initially sorted memslots
1068  * and knowing the position of the changed memslot.  Sorting is also optimized
1069  * by not swapping the updated memslot and instead only shifting other memslots
1070  * and tracking the new index for the update memslot.  Only once its final
1071  * index is known is the updated memslot copied into its position in the array.
1072  *
1073  *  - When deleting a memslot, the deleted memslot simply needs to be moved to
1074  *    the end of the array.
1075  *
1076  *  - When creating a memslot, the algorithm "inserts" the new memslot at the
1077  *    end of the array and then it forward to its correct location.
1078  *
1079  *  - When moving a memslot, the algorithm first moves the updated memslot
1080  *    backward to handle the scenario where the memslot's GFN was changed to a
1081  *    lower value.  update_memslots() then falls through and runs the same flow
1082  *    as creating a memslot to move the memslot forward to handle the scenario
1083  *    where its GFN was changed to a higher value.
1084  *
1085  * Note, slots are sorted from highest->lowest instead of lowest->highest for
1086  * historical reasons.  Originally, invalid memslots where denoted by having
1087  * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots
1088  * to the end of the array.  The current algorithm uses dedicated logic to
1089  * delete a memslot and thus does not rely on invalid memslots having GFN=0.
1090  *
1091  * The other historical motiviation for highest->lowest was to improve the
1092  * performance of memslot lookup.  KVM originally used a linear search starting
1093  * at memslots[0].  On x86, the largest memslot usually has one of the highest,
1094  * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a
1095  * single memslot above the 4gb boundary.  As the largest memslot is also the
1096  * most likely to be referenced, sorting it to the front of the array was
1097  * advantageous.  The current binary search starts from the middle of the array
1098  * and uses an LRU pointer to improve performance for all memslots and GFNs.
1099  */
1100 static void update_memslots(struct kvm_memslots *slots,
1101 			    struct kvm_memory_slot *memslot,
1102 			    enum kvm_mr_change change)
1103 {
1104 	int i;
1105 
1106 	if (change == KVM_MR_DELETE) {
1107 		kvm_memslot_delete(slots, memslot);
1108 	} else {
1109 		if (change == KVM_MR_CREATE)
1110 			i = kvm_memslot_insert_back(slots);
1111 		else
1112 			i = kvm_memslot_move_backward(slots, memslot);
1113 		i = kvm_memslot_move_forward(slots, memslot, i);
1114 
1115 		/*
1116 		 * Copy the memslot to its new position in memslots and update
1117 		 * its index accordingly.
1118 		 */
1119 		slots->memslots[i] = *memslot;
1120 		slots->id_to_index[memslot->id] = i;
1121 	}
1122 }
1123 
1124 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
1125 {
1126 	u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1127 
1128 #ifdef __KVM_HAVE_READONLY_MEM
1129 	valid_flags |= KVM_MEM_READONLY;
1130 #endif
1131 
1132 	if (mem->flags & ~valid_flags)
1133 		return -EINVAL;
1134 
1135 	return 0;
1136 }
1137 
1138 static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
1139 		int as_id, struct kvm_memslots *slots)
1140 {
1141 	struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
1142 	u64 gen = old_memslots->generation;
1143 
1144 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1145 	slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1146 
1147 	rcu_assign_pointer(kvm->memslots[as_id], slots);
1148 	synchronize_srcu_expedited(&kvm->srcu);
1149 
1150 	/*
1151 	 * Increment the new memslot generation a second time, dropping the
1152 	 * update in-progress flag and incrementing the generation based on
1153 	 * the number of address spaces.  This provides a unique and easily
1154 	 * identifiable generation number while the memslots are in flux.
1155 	 */
1156 	gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1157 
1158 	/*
1159 	 * Generations must be unique even across address spaces.  We do not need
1160 	 * a global counter for that, instead the generation space is evenly split
1161 	 * across address spaces.  For example, with two address spaces, address
1162 	 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1163 	 * use generations 1, 3, 5, ...
1164 	 */
1165 	gen += KVM_ADDRESS_SPACE_NUM;
1166 
1167 	kvm_arch_memslots_updated(kvm, gen);
1168 
1169 	slots->generation = gen;
1170 
1171 	return old_memslots;
1172 }
1173 
1174 /*
1175  * Note, at a minimum, the current number of used slots must be allocated, even
1176  * when deleting a memslot, as we need a complete duplicate of the memslots for
1177  * use when invalidating a memslot prior to deleting/moving the memslot.
1178  */
1179 static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
1180 					     enum kvm_mr_change change)
1181 {
1182 	struct kvm_memslots *slots;
1183 	size_t old_size, new_size;
1184 
1185 	old_size = sizeof(struct kvm_memslots) +
1186 		   (sizeof(struct kvm_memory_slot) * old->used_slots);
1187 
1188 	if (change == KVM_MR_CREATE)
1189 		new_size = old_size + sizeof(struct kvm_memory_slot);
1190 	else
1191 		new_size = old_size;
1192 
1193 	slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
1194 	if (likely(slots))
1195 		memcpy(slots, old, old_size);
1196 
1197 	return slots;
1198 }
1199 
1200 static int kvm_set_memslot(struct kvm *kvm,
1201 			   const struct kvm_userspace_memory_region *mem,
1202 			   struct kvm_memory_slot *old,
1203 			   struct kvm_memory_slot *new, int as_id,
1204 			   enum kvm_mr_change change)
1205 {
1206 	struct kvm_memory_slot *slot;
1207 	struct kvm_memslots *slots;
1208 	int r;
1209 
1210 	slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change);
1211 	if (!slots)
1212 		return -ENOMEM;
1213 
1214 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1215 		/*
1216 		 * Note, the INVALID flag needs to be in the appropriate entry
1217 		 * in the freshly allocated memslots, not in @old or @new.
1218 		 */
1219 		slot = id_to_memslot(slots, old->id);
1220 		slot->flags |= KVM_MEMSLOT_INVALID;
1221 
1222 		/*
1223 		 * We can re-use the old memslots, the only difference from the
1224 		 * newly installed memslots is the invalid flag, which will get
1225 		 * dropped by update_memslots anyway.  We'll also revert to the
1226 		 * old memslots if preparing the new memory region fails.
1227 		 */
1228 		slots = install_new_memslots(kvm, as_id, slots);
1229 
1230 		/* From this point no new shadow pages pointing to a deleted,
1231 		 * or moved, memslot will be created.
1232 		 *
1233 		 * validation of sp->gfn happens in:
1234 		 *	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1235 		 *	- kvm_is_visible_gfn (mmu_check_root)
1236 		 */
1237 		kvm_arch_flush_shadow_memslot(kvm, slot);
1238 	}
1239 
1240 	r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
1241 	if (r)
1242 		goto out_slots;
1243 
1244 	update_memslots(slots, new, change);
1245 	slots = install_new_memslots(kvm, as_id, slots);
1246 
1247 	kvm_arch_commit_memory_region(kvm, mem, old, new, change);
1248 
1249 	kvfree(slots);
1250 	return 0;
1251 
1252 out_slots:
1253 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1254 		slots = install_new_memslots(kvm, as_id, slots);
1255 	kvfree(slots);
1256 	return r;
1257 }
1258 
1259 static int kvm_delete_memslot(struct kvm *kvm,
1260 			      const struct kvm_userspace_memory_region *mem,
1261 			      struct kvm_memory_slot *old, int as_id)
1262 {
1263 	struct kvm_memory_slot new;
1264 	int r;
1265 
1266 	if (!old->npages)
1267 		return -EINVAL;
1268 
1269 	memset(&new, 0, sizeof(new));
1270 	new.id = old->id;
1271 	/*
1272 	 * This is only for debugging purpose; it should never be referenced
1273 	 * for a removed memslot.
1274 	 */
1275 	new.as_id = as_id;
1276 
1277 	r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
1278 	if (r)
1279 		return r;
1280 
1281 	kvm_free_memslot(kvm, old);
1282 	return 0;
1283 }
1284 
1285 /*
1286  * Allocate some memory and give it an address in the guest physical address
1287  * space.
1288  *
1289  * Discontiguous memory is allowed, mostly for framebuffers.
1290  *
1291  * Must be called holding kvm->slots_lock for write.
1292  */
1293 int __kvm_set_memory_region(struct kvm *kvm,
1294 			    const struct kvm_userspace_memory_region *mem)
1295 {
1296 	struct kvm_memory_slot old, new;
1297 	struct kvm_memory_slot *tmp;
1298 	enum kvm_mr_change change;
1299 	int as_id, id;
1300 	int r;
1301 
1302 	r = check_memory_region_flags(mem);
1303 	if (r)
1304 		return r;
1305 
1306 	as_id = mem->slot >> 16;
1307 	id = (u16)mem->slot;
1308 
1309 	/* General sanity checks */
1310 	if (mem->memory_size & (PAGE_SIZE - 1))
1311 		return -EINVAL;
1312 	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1313 		return -EINVAL;
1314 	/* We can read the guest memory with __xxx_user() later on. */
1315 	if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
1316 	    (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
1317 	     !access_ok((void __user *)(unsigned long)mem->userspace_addr,
1318 			mem->memory_size))
1319 		return -EINVAL;
1320 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
1321 		return -EINVAL;
1322 	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1323 		return -EINVAL;
1324 
1325 	/*
1326 	 * Make a full copy of the old memslot, the pointer will become stale
1327 	 * when the memslots are re-sorted by update_memslots(), and the old
1328 	 * memslot needs to be referenced after calling update_memslots(), e.g.
1329 	 * to free its resources and for arch specific behavior.
1330 	 */
1331 	tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id);
1332 	if (tmp) {
1333 		old = *tmp;
1334 		tmp = NULL;
1335 	} else {
1336 		memset(&old, 0, sizeof(old));
1337 		old.id = id;
1338 	}
1339 
1340 	if (!mem->memory_size)
1341 		return kvm_delete_memslot(kvm, mem, &old, as_id);
1342 
1343 	new.as_id = as_id;
1344 	new.id = id;
1345 	new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
1346 	new.npages = mem->memory_size >> PAGE_SHIFT;
1347 	new.flags = mem->flags;
1348 	new.userspace_addr = mem->userspace_addr;
1349 
1350 	if (new.npages > KVM_MEM_MAX_NR_PAGES)
1351 		return -EINVAL;
1352 
1353 	if (!old.npages) {
1354 		change = KVM_MR_CREATE;
1355 		new.dirty_bitmap = NULL;
1356 		memset(&new.arch, 0, sizeof(new.arch));
1357 	} else { /* Modify an existing slot. */
1358 		if ((new.userspace_addr != old.userspace_addr) ||
1359 		    (new.npages != old.npages) ||
1360 		    ((new.flags ^ old.flags) & KVM_MEM_READONLY))
1361 			return -EINVAL;
1362 
1363 		if (new.base_gfn != old.base_gfn)
1364 			change = KVM_MR_MOVE;
1365 		else if (new.flags != old.flags)
1366 			change = KVM_MR_FLAGS_ONLY;
1367 		else /* Nothing to change. */
1368 			return 0;
1369 
1370 		/* Copy dirty_bitmap and arch from the current memslot. */
1371 		new.dirty_bitmap = old.dirty_bitmap;
1372 		memcpy(&new.arch, &old.arch, sizeof(new.arch));
1373 	}
1374 
1375 	if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
1376 		/* Check for overlaps */
1377 		kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) {
1378 			if (tmp->id == id)
1379 				continue;
1380 			if (!((new.base_gfn + new.npages <= tmp->base_gfn) ||
1381 			      (new.base_gfn >= tmp->base_gfn + tmp->npages)))
1382 				return -EEXIST;
1383 		}
1384 	}
1385 
1386 	/* Allocate/free page dirty bitmap as needed */
1387 	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
1388 		new.dirty_bitmap = NULL;
1389 	else if (!new.dirty_bitmap && !kvm->dirty_ring_size) {
1390 		r = kvm_alloc_dirty_bitmap(&new);
1391 		if (r)
1392 			return r;
1393 
1394 		if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1395 			bitmap_set(new.dirty_bitmap, 0, new.npages);
1396 	}
1397 
1398 	r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
1399 	if (r)
1400 		goto out_bitmap;
1401 
1402 	if (old.dirty_bitmap && !new.dirty_bitmap)
1403 		kvm_destroy_dirty_bitmap(&old);
1404 	return 0;
1405 
1406 out_bitmap:
1407 	if (new.dirty_bitmap && !old.dirty_bitmap)
1408 		kvm_destroy_dirty_bitmap(&new);
1409 	return r;
1410 }
1411 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1412 
1413 int kvm_set_memory_region(struct kvm *kvm,
1414 			  const struct kvm_userspace_memory_region *mem)
1415 {
1416 	int r;
1417 
1418 	mutex_lock(&kvm->slots_lock);
1419 	r = __kvm_set_memory_region(kvm, mem);
1420 	mutex_unlock(&kvm->slots_lock);
1421 	return r;
1422 }
1423 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1424 
1425 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1426 					  struct kvm_userspace_memory_region *mem)
1427 {
1428 	if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
1429 		return -EINVAL;
1430 
1431 	return kvm_set_memory_region(kvm, mem);
1432 }
1433 
1434 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1435 /**
1436  * kvm_get_dirty_log - get a snapshot of dirty pages
1437  * @kvm:	pointer to kvm instance
1438  * @log:	slot id and address to which we copy the log
1439  * @is_dirty:	set to '1' if any dirty pages were found
1440  * @memslot:	set to the associated memslot, always valid on success
1441  */
1442 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1443 		      int *is_dirty, struct kvm_memory_slot **memslot)
1444 {
1445 	struct kvm_memslots *slots;
1446 	int i, as_id, id;
1447 	unsigned long n;
1448 	unsigned long any = 0;
1449 
1450 	/* Dirty ring tracking is exclusive to dirty log tracking */
1451 	if (kvm->dirty_ring_size)
1452 		return -ENXIO;
1453 
1454 	*memslot = NULL;
1455 	*is_dirty = 0;
1456 
1457 	as_id = log->slot >> 16;
1458 	id = (u16)log->slot;
1459 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1460 		return -EINVAL;
1461 
1462 	slots = __kvm_memslots(kvm, as_id);
1463 	*memslot = id_to_memslot(slots, id);
1464 	if (!(*memslot) || !(*memslot)->dirty_bitmap)
1465 		return -ENOENT;
1466 
1467 	kvm_arch_sync_dirty_log(kvm, *memslot);
1468 
1469 	n = kvm_dirty_bitmap_bytes(*memslot);
1470 
1471 	for (i = 0; !any && i < n/sizeof(long); ++i)
1472 		any = (*memslot)->dirty_bitmap[i];
1473 
1474 	if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
1475 		return -EFAULT;
1476 
1477 	if (any)
1478 		*is_dirty = 1;
1479 	return 0;
1480 }
1481 EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
1482 
1483 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
1484 /**
1485  * kvm_get_dirty_log_protect - get a snapshot of dirty pages
1486  *	and reenable dirty page tracking for the corresponding pages.
1487  * @kvm:	pointer to kvm instance
1488  * @log:	slot id and address to which we copy the log
1489  *
1490  * We need to keep it in mind that VCPU threads can write to the bitmap
1491  * concurrently. So, to avoid losing track of dirty pages we keep the
1492  * following order:
1493  *
1494  *    1. Take a snapshot of the bit and clear it if needed.
1495  *    2. Write protect the corresponding page.
1496  *    3. Copy the snapshot to the userspace.
1497  *    4. Upon return caller flushes TLB's if needed.
1498  *
1499  * Between 2 and 4, the guest may write to the page using the remaining TLB
1500  * entry.  This is not a problem because the page is reported dirty using
1501  * the snapshot taken before and step 4 ensures that writes done after
1502  * exiting to userspace will be logged for the next call.
1503  *
1504  */
1505 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
1506 {
1507 	struct kvm_memslots *slots;
1508 	struct kvm_memory_slot *memslot;
1509 	int i, as_id, id;
1510 	unsigned long n;
1511 	unsigned long *dirty_bitmap;
1512 	unsigned long *dirty_bitmap_buffer;
1513 	bool flush;
1514 
1515 	/* Dirty ring tracking is exclusive to dirty log tracking */
1516 	if (kvm->dirty_ring_size)
1517 		return -ENXIO;
1518 
1519 	as_id = log->slot >> 16;
1520 	id = (u16)log->slot;
1521 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1522 		return -EINVAL;
1523 
1524 	slots = __kvm_memslots(kvm, as_id);
1525 	memslot = id_to_memslot(slots, id);
1526 	if (!memslot || !memslot->dirty_bitmap)
1527 		return -ENOENT;
1528 
1529 	dirty_bitmap = memslot->dirty_bitmap;
1530 
1531 	kvm_arch_sync_dirty_log(kvm, memslot);
1532 
1533 	n = kvm_dirty_bitmap_bytes(memslot);
1534 	flush = false;
1535 	if (kvm->manual_dirty_log_protect) {
1536 		/*
1537 		 * Unlike kvm_get_dirty_log, we always return false in *flush,
1538 		 * because no flush is needed until KVM_CLEAR_DIRTY_LOG.  There
1539 		 * is some code duplication between this function and
1540 		 * kvm_get_dirty_log, but hopefully all architecture
1541 		 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
1542 		 * can be eliminated.
1543 		 */
1544 		dirty_bitmap_buffer = dirty_bitmap;
1545 	} else {
1546 		dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1547 		memset(dirty_bitmap_buffer, 0, n);
1548 
1549 		KVM_MMU_LOCK(kvm);
1550 		for (i = 0; i < n / sizeof(long); i++) {
1551 			unsigned long mask;
1552 			gfn_t offset;
1553 
1554 			if (!dirty_bitmap[i])
1555 				continue;
1556 
1557 			flush = true;
1558 			mask = xchg(&dirty_bitmap[i], 0);
1559 			dirty_bitmap_buffer[i] = mask;
1560 
1561 			offset = i * BITS_PER_LONG;
1562 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1563 								offset, mask);
1564 		}
1565 		KVM_MMU_UNLOCK(kvm);
1566 	}
1567 
1568 	if (flush)
1569 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
1570 
1571 	if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
1572 		return -EFAULT;
1573 	return 0;
1574 }
1575 
1576 
1577 /**
1578  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
1579  * @kvm: kvm instance
1580  * @log: slot id and address to which we copy the log
1581  *
1582  * Steps 1-4 below provide general overview of dirty page logging. See
1583  * kvm_get_dirty_log_protect() function description for additional details.
1584  *
1585  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
1586  * always flush the TLB (step 4) even if previous step failed  and the dirty
1587  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
1588  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
1589  * writes will be marked dirty for next log read.
1590  *
1591  *   1. Take a snapshot of the bit and clear it if needed.
1592  *   2. Write protect the corresponding page.
1593  *   3. Copy the snapshot to the userspace.
1594  *   4. Flush TLB's if needed.
1595  */
1596 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1597 				      struct kvm_dirty_log *log)
1598 {
1599 	int r;
1600 
1601 	mutex_lock(&kvm->slots_lock);
1602 
1603 	r = kvm_get_dirty_log_protect(kvm, log);
1604 
1605 	mutex_unlock(&kvm->slots_lock);
1606 	return r;
1607 }
1608 
1609 /**
1610  * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
1611  *	and reenable dirty page tracking for the corresponding pages.
1612  * @kvm:	pointer to kvm instance
1613  * @log:	slot id and address from which to fetch the bitmap of dirty pages
1614  */
1615 static int kvm_clear_dirty_log_protect(struct kvm *kvm,
1616 				       struct kvm_clear_dirty_log *log)
1617 {
1618 	struct kvm_memslots *slots;
1619 	struct kvm_memory_slot *memslot;
1620 	int as_id, id;
1621 	gfn_t offset;
1622 	unsigned long i, n;
1623 	unsigned long *dirty_bitmap;
1624 	unsigned long *dirty_bitmap_buffer;
1625 	bool flush;
1626 
1627 	/* Dirty ring tracking is exclusive to dirty log tracking */
1628 	if (kvm->dirty_ring_size)
1629 		return -ENXIO;
1630 
1631 	as_id = log->slot >> 16;
1632 	id = (u16)log->slot;
1633 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1634 		return -EINVAL;
1635 
1636 	if (log->first_page & 63)
1637 		return -EINVAL;
1638 
1639 	slots = __kvm_memslots(kvm, as_id);
1640 	memslot = id_to_memslot(slots, id);
1641 	if (!memslot || !memslot->dirty_bitmap)
1642 		return -ENOENT;
1643 
1644 	dirty_bitmap = memslot->dirty_bitmap;
1645 
1646 	n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
1647 
1648 	if (log->first_page > memslot->npages ||
1649 	    log->num_pages > memslot->npages - log->first_page ||
1650 	    (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
1651 	    return -EINVAL;
1652 
1653 	kvm_arch_sync_dirty_log(kvm, memslot);
1654 
1655 	flush = false;
1656 	dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1657 	if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
1658 		return -EFAULT;
1659 
1660 	KVM_MMU_LOCK(kvm);
1661 	for (offset = log->first_page, i = offset / BITS_PER_LONG,
1662 		 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
1663 	     i++, offset += BITS_PER_LONG) {
1664 		unsigned long mask = *dirty_bitmap_buffer++;
1665 		atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
1666 		if (!mask)
1667 			continue;
1668 
1669 		mask &= atomic_long_fetch_andnot(mask, p);
1670 
1671 		/*
1672 		 * mask contains the bits that really have been cleared.  This
1673 		 * never includes any bits beyond the length of the memslot (if
1674 		 * the length is not aligned to 64 pages), therefore it is not
1675 		 * a problem if userspace sets them in log->dirty_bitmap.
1676 		*/
1677 		if (mask) {
1678 			flush = true;
1679 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1680 								offset, mask);
1681 		}
1682 	}
1683 	KVM_MMU_UNLOCK(kvm);
1684 
1685 	if (flush)
1686 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
1687 
1688 	return 0;
1689 }
1690 
1691 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
1692 					struct kvm_clear_dirty_log *log)
1693 {
1694 	int r;
1695 
1696 	mutex_lock(&kvm->slots_lock);
1697 
1698 	r = kvm_clear_dirty_log_protect(kvm, log);
1699 
1700 	mutex_unlock(&kvm->slots_lock);
1701 	return r;
1702 }
1703 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
1704 
1705 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1706 {
1707 	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
1708 }
1709 EXPORT_SYMBOL_GPL(gfn_to_memslot);
1710 
1711 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
1712 {
1713 	return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
1714 }
1715 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot);
1716 
1717 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1718 {
1719 	struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1720 
1721 	return kvm_is_visible_memslot(memslot);
1722 }
1723 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1724 
1725 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
1726 {
1727 	struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1728 
1729 	return kvm_is_visible_memslot(memslot);
1730 }
1731 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
1732 
1733 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
1734 {
1735 	struct vm_area_struct *vma;
1736 	unsigned long addr, size;
1737 
1738 	size = PAGE_SIZE;
1739 
1740 	addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
1741 	if (kvm_is_error_hva(addr))
1742 		return PAGE_SIZE;
1743 
1744 	mmap_read_lock(current->mm);
1745 	vma = find_vma(current->mm, addr);
1746 	if (!vma)
1747 		goto out;
1748 
1749 	size = vma_kernel_pagesize(vma);
1750 
1751 out:
1752 	mmap_read_unlock(current->mm);
1753 
1754 	return size;
1755 }
1756 
1757 static bool memslot_is_readonly(struct kvm_memory_slot *slot)
1758 {
1759 	return slot->flags & KVM_MEM_READONLY;
1760 }
1761 
1762 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1763 				       gfn_t *nr_pages, bool write)
1764 {
1765 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
1766 		return KVM_HVA_ERR_BAD;
1767 
1768 	if (memslot_is_readonly(slot) && write)
1769 		return KVM_HVA_ERR_RO_BAD;
1770 
1771 	if (nr_pages)
1772 		*nr_pages = slot->npages - (gfn - slot->base_gfn);
1773 
1774 	return __gfn_to_hva_memslot(slot, gfn);
1775 }
1776 
1777 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1778 				     gfn_t *nr_pages)
1779 {
1780 	return __gfn_to_hva_many(slot, gfn, nr_pages, true);
1781 }
1782 
1783 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
1784 					gfn_t gfn)
1785 {
1786 	return gfn_to_hva_many(slot, gfn, NULL);
1787 }
1788 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
1789 
1790 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1791 {
1792 	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1793 }
1794 EXPORT_SYMBOL_GPL(gfn_to_hva);
1795 
1796 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
1797 {
1798 	return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
1799 }
1800 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
1801 
1802 /*
1803  * Return the hva of a @gfn and the R/W attribute if possible.
1804  *
1805  * @slot: the kvm_memory_slot which contains @gfn
1806  * @gfn: the gfn to be translated
1807  * @writable: used to return the read/write attribute of the @slot if the hva
1808  * is valid and @writable is not NULL
1809  */
1810 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
1811 				      gfn_t gfn, bool *writable)
1812 {
1813 	unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1814 
1815 	if (!kvm_is_error_hva(hva) && writable)
1816 		*writable = !memslot_is_readonly(slot);
1817 
1818 	return hva;
1819 }
1820 
1821 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1822 {
1823 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1824 
1825 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
1826 }
1827 
1828 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
1829 {
1830 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1831 
1832 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
1833 }
1834 
1835 static inline int check_user_page_hwpoison(unsigned long addr)
1836 {
1837 	int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
1838 
1839 	rc = get_user_pages(addr, 1, flags, NULL, NULL);
1840 	return rc == -EHWPOISON;
1841 }
1842 
1843 /*
1844  * The fast path to get the writable pfn which will be stored in @pfn,
1845  * true indicates success, otherwise false is returned.  It's also the
1846  * only part that runs if we can in atomic context.
1847  */
1848 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
1849 			    bool *writable, kvm_pfn_t *pfn)
1850 {
1851 	struct page *page[1];
1852 
1853 	/*
1854 	 * Fast pin a writable pfn only if it is a write fault request
1855 	 * or the caller allows to map a writable pfn for a read fault
1856 	 * request.
1857 	 */
1858 	if (!(write_fault || writable))
1859 		return false;
1860 
1861 	if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
1862 		*pfn = page_to_pfn(page[0]);
1863 
1864 		if (writable)
1865 			*writable = true;
1866 		return true;
1867 	}
1868 
1869 	return false;
1870 }
1871 
1872 /*
1873  * The slow path to get the pfn of the specified host virtual address,
1874  * 1 indicates success, -errno is returned if error is detected.
1875  */
1876 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1877 			   bool *writable, kvm_pfn_t *pfn)
1878 {
1879 	unsigned int flags = FOLL_HWPOISON;
1880 	struct page *page;
1881 	int npages = 0;
1882 
1883 	might_sleep();
1884 
1885 	if (writable)
1886 		*writable = write_fault;
1887 
1888 	if (write_fault)
1889 		flags |= FOLL_WRITE;
1890 	if (async)
1891 		flags |= FOLL_NOWAIT;
1892 
1893 	npages = get_user_pages_unlocked(addr, 1, &page, flags);
1894 	if (npages != 1)
1895 		return npages;
1896 
1897 	/* map read fault as writable if possible */
1898 	if (unlikely(!write_fault) && writable) {
1899 		struct page *wpage;
1900 
1901 		if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
1902 			*writable = true;
1903 			put_page(page);
1904 			page = wpage;
1905 		}
1906 	}
1907 	*pfn = page_to_pfn(page);
1908 	return npages;
1909 }
1910 
1911 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
1912 {
1913 	if (unlikely(!(vma->vm_flags & VM_READ)))
1914 		return false;
1915 
1916 	if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
1917 		return false;
1918 
1919 	return true;
1920 }
1921 
1922 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
1923 			       unsigned long addr, bool *async,
1924 			       bool write_fault, bool *writable,
1925 			       kvm_pfn_t *p_pfn)
1926 {
1927 	kvm_pfn_t pfn;
1928 	pte_t *ptep;
1929 	spinlock_t *ptl;
1930 	int r;
1931 
1932 	r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
1933 	if (r) {
1934 		/*
1935 		 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
1936 		 * not call the fault handler, so do it here.
1937 		 */
1938 		bool unlocked = false;
1939 		r = fixup_user_fault(current->mm, addr,
1940 				     (write_fault ? FAULT_FLAG_WRITE : 0),
1941 				     &unlocked);
1942 		if (unlocked)
1943 			return -EAGAIN;
1944 		if (r)
1945 			return r;
1946 
1947 		r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
1948 		if (r)
1949 			return r;
1950 	}
1951 
1952 	if (write_fault && !pte_write(*ptep)) {
1953 		pfn = KVM_PFN_ERR_RO_FAULT;
1954 		goto out;
1955 	}
1956 
1957 	if (writable)
1958 		*writable = pte_write(*ptep);
1959 	pfn = pte_pfn(*ptep);
1960 
1961 	/*
1962 	 * Get a reference here because callers of *hva_to_pfn* and
1963 	 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
1964 	 * returned pfn.  This is only needed if the VMA has VM_MIXEDMAP
1965 	 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
1966 	 * simply do nothing for reserved pfns.
1967 	 *
1968 	 * Whoever called remap_pfn_range is also going to call e.g.
1969 	 * unmap_mapping_range before the underlying pages are freed,
1970 	 * causing a call to our MMU notifier.
1971 	 */
1972 	kvm_get_pfn(pfn);
1973 
1974 out:
1975 	pte_unmap_unlock(ptep, ptl);
1976 	*p_pfn = pfn;
1977 	return 0;
1978 }
1979 
1980 /*
1981  * Pin guest page in memory and return its pfn.
1982  * @addr: host virtual address which maps memory to the guest
1983  * @atomic: whether this function can sleep
1984  * @async: whether this function need to wait IO complete if the
1985  *         host page is not in the memory
1986  * @write_fault: whether we should get a writable host page
1987  * @writable: whether it allows to map a writable host page for !@write_fault
1988  *
1989  * The function will map a writable host page for these two cases:
1990  * 1): @write_fault = true
1991  * 2): @write_fault = false && @writable, @writable will tell the caller
1992  *     whether the mapping is writable.
1993  */
1994 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1995 			bool write_fault, bool *writable)
1996 {
1997 	struct vm_area_struct *vma;
1998 	kvm_pfn_t pfn = 0;
1999 	int npages, r;
2000 
2001 	/* we can do it either atomically or asynchronously, not both */
2002 	BUG_ON(atomic && async);
2003 
2004 	if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2005 		return pfn;
2006 
2007 	if (atomic)
2008 		return KVM_PFN_ERR_FAULT;
2009 
2010 	npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
2011 	if (npages == 1)
2012 		return pfn;
2013 
2014 	mmap_read_lock(current->mm);
2015 	if (npages == -EHWPOISON ||
2016 	      (!async && check_user_page_hwpoison(addr))) {
2017 		pfn = KVM_PFN_ERR_HWPOISON;
2018 		goto exit;
2019 	}
2020 
2021 retry:
2022 	vma = find_vma_intersection(current->mm, addr, addr + 1);
2023 
2024 	if (vma == NULL)
2025 		pfn = KVM_PFN_ERR_FAULT;
2026 	else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
2027 		r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
2028 		if (r == -EAGAIN)
2029 			goto retry;
2030 		if (r < 0)
2031 			pfn = KVM_PFN_ERR_FAULT;
2032 	} else {
2033 		if (async && vma_is_valid(vma, write_fault))
2034 			*async = true;
2035 		pfn = KVM_PFN_ERR_FAULT;
2036 	}
2037 exit:
2038 	mmap_read_unlock(current->mm);
2039 	return pfn;
2040 }
2041 
2042 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
2043 			       bool atomic, bool *async, bool write_fault,
2044 			       bool *writable, hva_t *hva)
2045 {
2046 	unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2047 
2048 	if (hva)
2049 		*hva = addr;
2050 
2051 	if (addr == KVM_HVA_ERR_RO_BAD) {
2052 		if (writable)
2053 			*writable = false;
2054 		return KVM_PFN_ERR_RO_FAULT;
2055 	}
2056 
2057 	if (kvm_is_error_hva(addr)) {
2058 		if (writable)
2059 			*writable = false;
2060 		return KVM_PFN_NOSLOT;
2061 	}
2062 
2063 	/* Do not map writable pfn in the readonly memslot. */
2064 	if (writable && memslot_is_readonly(slot)) {
2065 		*writable = false;
2066 		writable = NULL;
2067 	}
2068 
2069 	return hva_to_pfn(addr, atomic, async, write_fault,
2070 			  writable);
2071 }
2072 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
2073 
2074 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
2075 		      bool *writable)
2076 {
2077 	return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
2078 				    write_fault, writable, NULL);
2079 }
2080 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
2081 
2082 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
2083 {
2084 	return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
2085 }
2086 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
2087 
2088 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
2089 {
2090 	return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL);
2091 }
2092 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
2093 
2094 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
2095 {
2096 	return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2097 }
2098 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
2099 
2100 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
2101 {
2102 	return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
2103 }
2104 EXPORT_SYMBOL_GPL(gfn_to_pfn);
2105 
2106 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2107 {
2108 	return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2109 }
2110 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
2111 
2112 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2113 			    struct page **pages, int nr_pages)
2114 {
2115 	unsigned long addr;
2116 	gfn_t entry = 0;
2117 
2118 	addr = gfn_to_hva_many(slot, gfn, &entry);
2119 	if (kvm_is_error_hva(addr))
2120 		return -1;
2121 
2122 	if (entry < nr_pages)
2123 		return 0;
2124 
2125 	return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
2126 }
2127 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
2128 
2129 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
2130 {
2131 	if (is_error_noslot_pfn(pfn))
2132 		return KVM_ERR_PTR_BAD_PAGE;
2133 
2134 	if (kvm_is_reserved_pfn(pfn)) {
2135 		WARN_ON(1);
2136 		return KVM_ERR_PTR_BAD_PAGE;
2137 	}
2138 
2139 	return pfn_to_page(pfn);
2140 }
2141 
2142 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2143 {
2144 	kvm_pfn_t pfn;
2145 
2146 	pfn = gfn_to_pfn(kvm, gfn);
2147 
2148 	return kvm_pfn_to_page(pfn);
2149 }
2150 EXPORT_SYMBOL_GPL(gfn_to_page);
2151 
2152 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
2153 {
2154 	if (pfn == 0)
2155 		return;
2156 
2157 	if (cache)
2158 		cache->pfn = cache->gfn = 0;
2159 
2160 	if (dirty)
2161 		kvm_release_pfn_dirty(pfn);
2162 	else
2163 		kvm_release_pfn_clean(pfn);
2164 }
2165 
2166 static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
2167 				 struct gfn_to_pfn_cache *cache, u64 gen)
2168 {
2169 	kvm_release_pfn(cache->pfn, cache->dirty, cache);
2170 
2171 	cache->pfn = gfn_to_pfn_memslot(slot, gfn);
2172 	cache->gfn = gfn;
2173 	cache->dirty = false;
2174 	cache->generation = gen;
2175 }
2176 
2177 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
2178 			 struct kvm_host_map *map,
2179 			 struct gfn_to_pfn_cache *cache,
2180 			 bool atomic)
2181 {
2182 	kvm_pfn_t pfn;
2183 	void *hva = NULL;
2184 	struct page *page = KVM_UNMAPPED_PAGE;
2185 	struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
2186 	u64 gen = slots->generation;
2187 
2188 	if (!map)
2189 		return -EINVAL;
2190 
2191 	if (cache) {
2192 		if (!cache->pfn || cache->gfn != gfn ||
2193 			cache->generation != gen) {
2194 			if (atomic)
2195 				return -EAGAIN;
2196 			kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
2197 		}
2198 		pfn = cache->pfn;
2199 	} else {
2200 		if (atomic)
2201 			return -EAGAIN;
2202 		pfn = gfn_to_pfn_memslot(slot, gfn);
2203 	}
2204 	if (is_error_noslot_pfn(pfn))
2205 		return -EINVAL;
2206 
2207 	if (pfn_valid(pfn)) {
2208 		page = pfn_to_page(pfn);
2209 		if (atomic)
2210 			hva = kmap_atomic(page);
2211 		else
2212 			hva = kmap(page);
2213 #ifdef CONFIG_HAS_IOMEM
2214 	} else if (!atomic) {
2215 		hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
2216 	} else {
2217 		return -EINVAL;
2218 #endif
2219 	}
2220 
2221 	if (!hva)
2222 		return -EFAULT;
2223 
2224 	map->page = page;
2225 	map->hva = hva;
2226 	map->pfn = pfn;
2227 	map->gfn = gfn;
2228 
2229 	return 0;
2230 }
2231 
2232 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
2233 		struct gfn_to_pfn_cache *cache, bool atomic)
2234 {
2235 	return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
2236 			cache, atomic);
2237 }
2238 EXPORT_SYMBOL_GPL(kvm_map_gfn);
2239 
2240 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
2241 {
2242 	return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
2243 		NULL, false);
2244 }
2245 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
2246 
2247 static void __kvm_unmap_gfn(struct kvm *kvm,
2248 			struct kvm_memory_slot *memslot,
2249 			struct kvm_host_map *map,
2250 			struct gfn_to_pfn_cache *cache,
2251 			bool dirty, bool atomic)
2252 {
2253 	if (!map)
2254 		return;
2255 
2256 	if (!map->hva)
2257 		return;
2258 
2259 	if (map->page != KVM_UNMAPPED_PAGE) {
2260 		if (atomic)
2261 			kunmap_atomic(map->hva);
2262 		else
2263 			kunmap(map->page);
2264 	}
2265 #ifdef CONFIG_HAS_IOMEM
2266 	else if (!atomic)
2267 		memunmap(map->hva);
2268 	else
2269 		WARN_ONCE(1, "Unexpected unmapping in atomic context");
2270 #endif
2271 
2272 	if (dirty)
2273 		mark_page_dirty_in_slot(kvm, memslot, map->gfn);
2274 
2275 	if (cache)
2276 		cache->dirty |= dirty;
2277 	else
2278 		kvm_release_pfn(map->pfn, dirty, NULL);
2279 
2280 	map->hva = NULL;
2281 	map->page = NULL;
2282 }
2283 
2284 int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
2285 		  struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
2286 {
2287 	__kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
2288 			cache, dirty, atomic);
2289 	return 0;
2290 }
2291 EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
2292 
2293 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
2294 {
2295 	__kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn),
2296 			map, NULL, dirty, false);
2297 }
2298 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
2299 
2300 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2301 {
2302 	kvm_pfn_t pfn;
2303 
2304 	pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
2305 
2306 	return kvm_pfn_to_page(pfn);
2307 }
2308 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
2309 
2310 void kvm_release_page_clean(struct page *page)
2311 {
2312 	WARN_ON(is_error_page(page));
2313 
2314 	kvm_release_pfn_clean(page_to_pfn(page));
2315 }
2316 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
2317 
2318 void kvm_release_pfn_clean(kvm_pfn_t pfn)
2319 {
2320 	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
2321 		put_page(pfn_to_page(pfn));
2322 }
2323 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
2324 
2325 void kvm_release_page_dirty(struct page *page)
2326 {
2327 	WARN_ON(is_error_page(page));
2328 
2329 	kvm_release_pfn_dirty(page_to_pfn(page));
2330 }
2331 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
2332 
2333 void kvm_release_pfn_dirty(kvm_pfn_t pfn)
2334 {
2335 	kvm_set_pfn_dirty(pfn);
2336 	kvm_release_pfn_clean(pfn);
2337 }
2338 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
2339 
2340 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
2341 {
2342 	if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2343 		SetPageDirty(pfn_to_page(pfn));
2344 }
2345 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
2346 
2347 void kvm_set_pfn_accessed(kvm_pfn_t pfn)
2348 {
2349 	if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2350 		mark_page_accessed(pfn_to_page(pfn));
2351 }
2352 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
2353 
2354 void kvm_get_pfn(kvm_pfn_t pfn)
2355 {
2356 	if (!kvm_is_reserved_pfn(pfn))
2357 		get_page(pfn_to_page(pfn));
2358 }
2359 EXPORT_SYMBOL_GPL(kvm_get_pfn);
2360 
2361 static int next_segment(unsigned long len, int offset)
2362 {
2363 	if (len > PAGE_SIZE - offset)
2364 		return PAGE_SIZE - offset;
2365 	else
2366 		return len;
2367 }
2368 
2369 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
2370 				 void *data, int offset, int len)
2371 {
2372 	int r;
2373 	unsigned long addr;
2374 
2375 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2376 	if (kvm_is_error_hva(addr))
2377 		return -EFAULT;
2378 	r = __copy_from_user(data, (void __user *)addr + offset, len);
2379 	if (r)
2380 		return -EFAULT;
2381 	return 0;
2382 }
2383 
2384 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
2385 			int len)
2386 {
2387 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2388 
2389 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
2390 }
2391 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
2392 
2393 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
2394 			     int offset, int len)
2395 {
2396 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2397 
2398 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
2399 }
2400 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
2401 
2402 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
2403 {
2404 	gfn_t gfn = gpa >> PAGE_SHIFT;
2405 	int seg;
2406 	int offset = offset_in_page(gpa);
2407 	int ret;
2408 
2409 	while ((seg = next_segment(len, offset)) != 0) {
2410 		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
2411 		if (ret < 0)
2412 			return ret;
2413 		offset = 0;
2414 		len -= seg;
2415 		data += seg;
2416 		++gfn;
2417 	}
2418 	return 0;
2419 }
2420 EXPORT_SYMBOL_GPL(kvm_read_guest);
2421 
2422 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
2423 {
2424 	gfn_t gfn = gpa >> PAGE_SHIFT;
2425 	int seg;
2426 	int offset = offset_in_page(gpa);
2427 	int ret;
2428 
2429 	while ((seg = next_segment(len, offset)) != 0) {
2430 		ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
2431 		if (ret < 0)
2432 			return ret;
2433 		offset = 0;
2434 		len -= seg;
2435 		data += seg;
2436 		++gfn;
2437 	}
2438 	return 0;
2439 }
2440 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
2441 
2442 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2443 			           void *data, int offset, unsigned long len)
2444 {
2445 	int r;
2446 	unsigned long addr;
2447 
2448 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2449 	if (kvm_is_error_hva(addr))
2450 		return -EFAULT;
2451 	pagefault_disable();
2452 	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
2453 	pagefault_enable();
2454 	if (r)
2455 		return -EFAULT;
2456 	return 0;
2457 }
2458 
2459 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
2460 			       void *data, unsigned long len)
2461 {
2462 	gfn_t gfn = gpa >> PAGE_SHIFT;
2463 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2464 	int offset = offset_in_page(gpa);
2465 
2466 	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
2467 }
2468 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
2469 
2470 static int __kvm_write_guest_page(struct kvm *kvm,
2471 				  struct kvm_memory_slot *memslot, gfn_t gfn,
2472 			          const void *data, int offset, int len)
2473 {
2474 	int r;
2475 	unsigned long addr;
2476 
2477 	addr = gfn_to_hva_memslot(memslot, gfn);
2478 	if (kvm_is_error_hva(addr))
2479 		return -EFAULT;
2480 	r = __copy_to_user((void __user *)addr + offset, data, len);
2481 	if (r)
2482 		return -EFAULT;
2483 	mark_page_dirty_in_slot(kvm, memslot, gfn);
2484 	return 0;
2485 }
2486 
2487 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
2488 			 const void *data, int offset, int len)
2489 {
2490 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2491 
2492 	return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
2493 }
2494 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
2495 
2496 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
2497 			      const void *data, int offset, int len)
2498 {
2499 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2500 
2501 	return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
2502 }
2503 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
2504 
2505 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
2506 		    unsigned long len)
2507 {
2508 	gfn_t gfn = gpa >> PAGE_SHIFT;
2509 	int seg;
2510 	int offset = offset_in_page(gpa);
2511 	int ret;
2512 
2513 	while ((seg = next_segment(len, offset)) != 0) {
2514 		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
2515 		if (ret < 0)
2516 			return ret;
2517 		offset = 0;
2518 		len -= seg;
2519 		data += seg;
2520 		++gfn;
2521 	}
2522 	return 0;
2523 }
2524 EXPORT_SYMBOL_GPL(kvm_write_guest);
2525 
2526 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
2527 		         unsigned long len)
2528 {
2529 	gfn_t gfn = gpa >> PAGE_SHIFT;
2530 	int seg;
2531 	int offset = offset_in_page(gpa);
2532 	int ret;
2533 
2534 	while ((seg = next_segment(len, offset)) != 0) {
2535 		ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
2536 		if (ret < 0)
2537 			return ret;
2538 		offset = 0;
2539 		len -= seg;
2540 		data += seg;
2541 		++gfn;
2542 	}
2543 	return 0;
2544 }
2545 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
2546 
2547 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
2548 				       struct gfn_to_hva_cache *ghc,
2549 				       gpa_t gpa, unsigned long len)
2550 {
2551 	int offset = offset_in_page(gpa);
2552 	gfn_t start_gfn = gpa >> PAGE_SHIFT;
2553 	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
2554 	gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
2555 	gfn_t nr_pages_avail;
2556 
2557 	/* Update ghc->generation before performing any error checks. */
2558 	ghc->generation = slots->generation;
2559 
2560 	if (start_gfn > end_gfn) {
2561 		ghc->hva = KVM_HVA_ERR_BAD;
2562 		return -EINVAL;
2563 	}
2564 
2565 	/*
2566 	 * If the requested region crosses two memslots, we still
2567 	 * verify that the entire region is valid here.
2568 	 */
2569 	for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
2570 		ghc->memslot = __gfn_to_memslot(slots, start_gfn);
2571 		ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
2572 					   &nr_pages_avail);
2573 		if (kvm_is_error_hva(ghc->hva))
2574 			return -EFAULT;
2575 	}
2576 
2577 	/* Use the slow path for cross page reads and writes. */
2578 	if (nr_pages_needed == 1)
2579 		ghc->hva += offset;
2580 	else
2581 		ghc->memslot = NULL;
2582 
2583 	ghc->gpa = gpa;
2584 	ghc->len = len;
2585 	return 0;
2586 }
2587 
2588 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2589 			      gpa_t gpa, unsigned long len)
2590 {
2591 	struct kvm_memslots *slots = kvm_memslots(kvm);
2592 	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
2593 }
2594 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
2595 
2596 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2597 				  void *data, unsigned int offset,
2598 				  unsigned long len)
2599 {
2600 	struct kvm_memslots *slots = kvm_memslots(kvm);
2601 	int r;
2602 	gpa_t gpa = ghc->gpa + offset;
2603 
2604 	BUG_ON(len + offset > ghc->len);
2605 
2606 	if (slots->generation != ghc->generation) {
2607 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
2608 			return -EFAULT;
2609 	}
2610 
2611 	if (kvm_is_error_hva(ghc->hva))
2612 		return -EFAULT;
2613 
2614 	if (unlikely(!ghc->memslot))
2615 		return kvm_write_guest(kvm, gpa, data, len);
2616 
2617 	r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
2618 	if (r)
2619 		return -EFAULT;
2620 	mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
2621 
2622 	return 0;
2623 }
2624 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
2625 
2626 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2627 			   void *data, unsigned long len)
2628 {
2629 	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
2630 }
2631 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
2632 
2633 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2634 				 void *data, unsigned int offset,
2635 				 unsigned long len)
2636 {
2637 	struct kvm_memslots *slots = kvm_memslots(kvm);
2638 	int r;
2639 	gpa_t gpa = ghc->gpa + offset;
2640 
2641 	BUG_ON(len + offset > ghc->len);
2642 
2643 	if (slots->generation != ghc->generation) {
2644 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
2645 			return -EFAULT;
2646 	}
2647 
2648 	if (kvm_is_error_hva(ghc->hva))
2649 		return -EFAULT;
2650 
2651 	if (unlikely(!ghc->memslot))
2652 		return kvm_read_guest(kvm, gpa, data, len);
2653 
2654 	r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
2655 	if (r)
2656 		return -EFAULT;
2657 
2658 	return 0;
2659 }
2660 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
2661 
2662 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2663 			  void *data, unsigned long len)
2664 {
2665 	return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
2666 }
2667 EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
2668 
2669 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
2670 {
2671 	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
2672 	gfn_t gfn = gpa >> PAGE_SHIFT;
2673 	int seg;
2674 	int offset = offset_in_page(gpa);
2675 	int ret;
2676 
2677 	while ((seg = next_segment(len, offset)) != 0) {
2678 		ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
2679 		if (ret < 0)
2680 			return ret;
2681 		offset = 0;
2682 		len -= seg;
2683 		++gfn;
2684 	}
2685 	return 0;
2686 }
2687 EXPORT_SYMBOL_GPL(kvm_clear_guest);
2688 
2689 void mark_page_dirty_in_slot(struct kvm *kvm,
2690 			     struct kvm_memory_slot *memslot,
2691 		 	     gfn_t gfn)
2692 {
2693 	if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
2694 		unsigned long rel_gfn = gfn - memslot->base_gfn;
2695 		u32 slot = (memslot->as_id << 16) | memslot->id;
2696 
2697 		if (kvm->dirty_ring_size)
2698 			kvm_dirty_ring_push(kvm_dirty_ring_get(kvm),
2699 					    slot, rel_gfn);
2700 		else
2701 			set_bit_le(rel_gfn, memslot->dirty_bitmap);
2702 	}
2703 }
2704 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
2705 
2706 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
2707 {
2708 	struct kvm_memory_slot *memslot;
2709 
2710 	memslot = gfn_to_memslot(kvm, gfn);
2711 	mark_page_dirty_in_slot(kvm, memslot, gfn);
2712 }
2713 EXPORT_SYMBOL_GPL(mark_page_dirty);
2714 
2715 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
2716 {
2717 	struct kvm_memory_slot *memslot;
2718 
2719 	memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2720 	mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
2721 }
2722 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
2723 
2724 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
2725 {
2726 	if (!vcpu->sigset_active)
2727 		return;
2728 
2729 	/*
2730 	 * This does a lockless modification of ->real_blocked, which is fine
2731 	 * because, only current can change ->real_blocked and all readers of
2732 	 * ->real_blocked don't care as long ->real_blocked is always a subset
2733 	 * of ->blocked.
2734 	 */
2735 	sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
2736 }
2737 
2738 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
2739 {
2740 	if (!vcpu->sigset_active)
2741 		return;
2742 
2743 	sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
2744 	sigemptyset(&current->real_blocked);
2745 }
2746 
2747 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
2748 {
2749 	unsigned int old, val, grow, grow_start;
2750 
2751 	old = val = vcpu->halt_poll_ns;
2752 	grow_start = READ_ONCE(halt_poll_ns_grow_start);
2753 	grow = READ_ONCE(halt_poll_ns_grow);
2754 	if (!grow)
2755 		goto out;
2756 
2757 	val *= grow;
2758 	if (val < grow_start)
2759 		val = grow_start;
2760 
2761 	if (val > halt_poll_ns)
2762 		val = halt_poll_ns;
2763 
2764 	vcpu->halt_poll_ns = val;
2765 out:
2766 	trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
2767 }
2768 
2769 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
2770 {
2771 	unsigned int old, val, shrink;
2772 
2773 	old = val = vcpu->halt_poll_ns;
2774 	shrink = READ_ONCE(halt_poll_ns_shrink);
2775 	if (shrink == 0)
2776 		val = 0;
2777 	else
2778 		val /= shrink;
2779 
2780 	vcpu->halt_poll_ns = val;
2781 	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
2782 }
2783 
2784 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
2785 {
2786 	int ret = -EINTR;
2787 	int idx = srcu_read_lock(&vcpu->kvm->srcu);
2788 
2789 	if (kvm_arch_vcpu_runnable(vcpu)) {
2790 		kvm_make_request(KVM_REQ_UNHALT, vcpu);
2791 		goto out;
2792 	}
2793 	if (kvm_cpu_has_pending_timer(vcpu))
2794 		goto out;
2795 	if (signal_pending(current))
2796 		goto out;
2797 
2798 	ret = 0;
2799 out:
2800 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
2801 	return ret;
2802 }
2803 
2804 static inline void
2805 update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
2806 {
2807 	if (waited)
2808 		vcpu->stat.halt_poll_fail_ns += poll_ns;
2809 	else
2810 		vcpu->stat.halt_poll_success_ns += poll_ns;
2811 }
2812 
2813 /*
2814  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
2815  */
2816 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
2817 {
2818 	ktime_t start, cur, poll_end;
2819 	bool waited = false;
2820 	u64 block_ns;
2821 
2822 	kvm_arch_vcpu_blocking(vcpu);
2823 
2824 	start = cur = poll_end = ktime_get();
2825 	if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
2826 		ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
2827 
2828 		++vcpu->stat.halt_attempted_poll;
2829 		do {
2830 			/*
2831 			 * This sets KVM_REQ_UNHALT if an interrupt
2832 			 * arrives.
2833 			 */
2834 			if (kvm_vcpu_check_block(vcpu) < 0) {
2835 				++vcpu->stat.halt_successful_poll;
2836 				if (!vcpu_valid_wakeup(vcpu))
2837 					++vcpu->stat.halt_poll_invalid;
2838 				goto out;
2839 			}
2840 			poll_end = cur = ktime_get();
2841 		} while (single_task_running() && ktime_before(cur, stop));
2842 	}
2843 
2844 	prepare_to_rcuwait(&vcpu->wait);
2845 	for (;;) {
2846 		set_current_state(TASK_INTERRUPTIBLE);
2847 
2848 		if (kvm_vcpu_check_block(vcpu) < 0)
2849 			break;
2850 
2851 		waited = true;
2852 		schedule();
2853 	}
2854 	finish_rcuwait(&vcpu->wait);
2855 	cur = ktime_get();
2856 out:
2857 	kvm_arch_vcpu_unblocking(vcpu);
2858 	block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
2859 
2860 	update_halt_poll_stats(
2861 		vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited);
2862 
2863 	if (!kvm_arch_no_poll(vcpu)) {
2864 		if (!vcpu_valid_wakeup(vcpu)) {
2865 			shrink_halt_poll_ns(vcpu);
2866 		} else if (vcpu->kvm->max_halt_poll_ns) {
2867 			if (block_ns <= vcpu->halt_poll_ns)
2868 				;
2869 			/* we had a long block, shrink polling */
2870 			else if (vcpu->halt_poll_ns &&
2871 					block_ns > vcpu->kvm->max_halt_poll_ns)
2872 				shrink_halt_poll_ns(vcpu);
2873 			/* we had a short halt and our poll time is too small */
2874 			else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
2875 					block_ns < vcpu->kvm->max_halt_poll_ns)
2876 				grow_halt_poll_ns(vcpu);
2877 		} else {
2878 			vcpu->halt_poll_ns = 0;
2879 		}
2880 	}
2881 
2882 	trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
2883 	kvm_arch_vcpu_block_finish(vcpu);
2884 }
2885 EXPORT_SYMBOL_GPL(kvm_vcpu_block);
2886 
2887 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
2888 {
2889 	struct rcuwait *waitp;
2890 
2891 	waitp = kvm_arch_vcpu_get_wait(vcpu);
2892 	if (rcuwait_wake_up(waitp)) {
2893 		WRITE_ONCE(vcpu->ready, true);
2894 		++vcpu->stat.halt_wakeup;
2895 		return true;
2896 	}
2897 
2898 	return false;
2899 }
2900 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
2901 
2902 #ifndef CONFIG_S390
2903 /*
2904  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
2905  */
2906 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
2907 {
2908 	int me;
2909 	int cpu = vcpu->cpu;
2910 
2911 	if (kvm_vcpu_wake_up(vcpu))
2912 		return;
2913 
2914 	me = get_cpu();
2915 	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
2916 		if (kvm_arch_vcpu_should_kick(vcpu))
2917 			smp_send_reschedule(cpu);
2918 	put_cpu();
2919 }
2920 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
2921 #endif /* !CONFIG_S390 */
2922 
2923 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
2924 {
2925 	struct pid *pid;
2926 	struct task_struct *task = NULL;
2927 	int ret = 0;
2928 
2929 	rcu_read_lock();
2930 	pid = rcu_dereference(target->pid);
2931 	if (pid)
2932 		task = get_pid_task(pid, PIDTYPE_PID);
2933 	rcu_read_unlock();
2934 	if (!task)
2935 		return ret;
2936 	ret = yield_to(task, 1);
2937 	put_task_struct(task);
2938 
2939 	return ret;
2940 }
2941 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
2942 
2943 /*
2944  * Helper that checks whether a VCPU is eligible for directed yield.
2945  * Most eligible candidate to yield is decided by following heuristics:
2946  *
2947  *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
2948  *  (preempted lock holder), indicated by @in_spin_loop.
2949  *  Set at the beginning and cleared at the end of interception/PLE handler.
2950  *
2951  *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
2952  *  chance last time (mostly it has become eligible now since we have probably
2953  *  yielded to lockholder in last iteration. This is done by toggling
2954  *  @dy_eligible each time a VCPU checked for eligibility.)
2955  *
2956  *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
2957  *  to preempted lock-holder could result in wrong VCPU selection and CPU
2958  *  burning. Giving priority for a potential lock-holder increases lock
2959  *  progress.
2960  *
2961  *  Since algorithm is based on heuristics, accessing another VCPU data without
2962  *  locking does not harm. It may result in trying to yield to  same VCPU, fail
2963  *  and continue with next VCPU and so on.
2964  */
2965 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
2966 {
2967 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2968 	bool eligible;
2969 
2970 	eligible = !vcpu->spin_loop.in_spin_loop ||
2971 		    vcpu->spin_loop.dy_eligible;
2972 
2973 	if (vcpu->spin_loop.in_spin_loop)
2974 		kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
2975 
2976 	return eligible;
2977 #else
2978 	return true;
2979 #endif
2980 }
2981 
2982 /*
2983  * Unlike kvm_arch_vcpu_runnable, this function is called outside
2984  * a vcpu_load/vcpu_put pair.  However, for most architectures
2985  * kvm_arch_vcpu_runnable does not require vcpu_load.
2986  */
2987 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
2988 {
2989 	return kvm_arch_vcpu_runnable(vcpu);
2990 }
2991 
2992 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
2993 {
2994 	if (kvm_arch_dy_runnable(vcpu))
2995 		return true;
2996 
2997 #ifdef CONFIG_KVM_ASYNC_PF
2998 	if (!list_empty_careful(&vcpu->async_pf.done))
2999 		return true;
3000 #endif
3001 
3002 	return false;
3003 }
3004 
3005 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
3006 {
3007 	struct kvm *kvm = me->kvm;
3008 	struct kvm_vcpu *vcpu;
3009 	int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
3010 	int yielded = 0;
3011 	int try = 3;
3012 	int pass;
3013 	int i;
3014 
3015 	kvm_vcpu_set_in_spin_loop(me, true);
3016 	/*
3017 	 * We boost the priority of a VCPU that is runnable but not
3018 	 * currently running, because it got preempted by something
3019 	 * else and called schedule in __vcpu_run.  Hopefully that
3020 	 * VCPU is holding the lock that we need and will release it.
3021 	 * We approximate round-robin by starting at the last boosted VCPU.
3022 	 */
3023 	for (pass = 0; pass < 2 && !yielded && try; pass++) {
3024 		kvm_for_each_vcpu(i, vcpu, kvm) {
3025 			if (!pass && i <= last_boosted_vcpu) {
3026 				i = last_boosted_vcpu;
3027 				continue;
3028 			} else if (pass && i > last_boosted_vcpu)
3029 				break;
3030 			if (!READ_ONCE(vcpu->ready))
3031 				continue;
3032 			if (vcpu == me)
3033 				continue;
3034 			if (rcuwait_active(&vcpu->wait) &&
3035 			    !vcpu_dy_runnable(vcpu))
3036 				continue;
3037 			if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
3038 				!kvm_arch_vcpu_in_kernel(vcpu))
3039 				continue;
3040 			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
3041 				continue;
3042 
3043 			yielded = kvm_vcpu_yield_to(vcpu);
3044 			if (yielded > 0) {
3045 				kvm->last_boosted_vcpu = i;
3046 				break;
3047 			} else if (yielded < 0) {
3048 				try--;
3049 				if (!try)
3050 					break;
3051 			}
3052 		}
3053 	}
3054 	kvm_vcpu_set_in_spin_loop(me, false);
3055 
3056 	/* Ensure vcpu is not eligible during next spinloop */
3057 	kvm_vcpu_set_dy_eligible(me, false);
3058 }
3059 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
3060 
3061 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
3062 {
3063 #if KVM_DIRTY_LOG_PAGE_OFFSET > 0
3064 	return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
3065 	    (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
3066 	     kvm->dirty_ring_size / PAGE_SIZE);
3067 #else
3068 	return false;
3069 #endif
3070 }
3071 
3072 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
3073 {
3074 	struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
3075 	struct page *page;
3076 
3077 	if (vmf->pgoff == 0)
3078 		page = virt_to_page(vcpu->run);
3079 #ifdef CONFIG_X86
3080 	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
3081 		page = virt_to_page(vcpu->arch.pio_data);
3082 #endif
3083 #ifdef CONFIG_KVM_MMIO
3084 	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
3085 		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
3086 #endif
3087 	else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
3088 		page = kvm_dirty_ring_get_page(
3089 		    &vcpu->dirty_ring,
3090 		    vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
3091 	else
3092 		return kvm_arch_vcpu_fault(vcpu, vmf);
3093 	get_page(page);
3094 	vmf->page = page;
3095 	return 0;
3096 }
3097 
3098 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
3099 	.fault = kvm_vcpu_fault,
3100 };
3101 
3102 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
3103 {
3104 	struct kvm_vcpu *vcpu = file->private_data;
3105 	unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3106 
3107 	if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
3108 	     kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
3109 	    ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
3110 		return -EINVAL;
3111 
3112 	vma->vm_ops = &kvm_vcpu_vm_ops;
3113 	return 0;
3114 }
3115 
3116 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
3117 {
3118 	struct kvm_vcpu *vcpu = filp->private_data;
3119 
3120 	kvm_put_kvm(vcpu->kvm);
3121 	return 0;
3122 }
3123 
3124 static struct file_operations kvm_vcpu_fops = {
3125 	.release        = kvm_vcpu_release,
3126 	.unlocked_ioctl = kvm_vcpu_ioctl,
3127 	.mmap           = kvm_vcpu_mmap,
3128 	.llseek		= noop_llseek,
3129 	KVM_COMPAT(kvm_vcpu_compat_ioctl),
3130 };
3131 
3132 /*
3133  * Allocates an inode for the vcpu.
3134  */
3135 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
3136 {
3137 	char name[8 + 1 + ITOA_MAX_LEN + 1];
3138 
3139 	snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
3140 	return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
3141 }
3142 
3143 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
3144 {
3145 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
3146 	struct dentry *debugfs_dentry;
3147 	char dir_name[ITOA_MAX_LEN * 2];
3148 
3149 	if (!debugfs_initialized())
3150 		return;
3151 
3152 	snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
3153 	debugfs_dentry = debugfs_create_dir(dir_name,
3154 					    vcpu->kvm->debugfs_dentry);
3155 
3156 	kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
3157 #endif
3158 }
3159 
3160 /*
3161  * Creates some virtual cpus.  Good luck creating more than one.
3162  */
3163 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
3164 {
3165 	int r;
3166 	struct kvm_vcpu *vcpu;
3167 	struct page *page;
3168 
3169 	if (id >= KVM_MAX_VCPU_ID)
3170 		return -EINVAL;
3171 
3172 	mutex_lock(&kvm->lock);
3173 	if (kvm->created_vcpus == KVM_MAX_VCPUS) {
3174 		mutex_unlock(&kvm->lock);
3175 		return -EINVAL;
3176 	}
3177 
3178 	kvm->created_vcpus++;
3179 	mutex_unlock(&kvm->lock);
3180 
3181 	r = kvm_arch_vcpu_precreate(kvm, id);
3182 	if (r)
3183 		goto vcpu_decrement;
3184 
3185 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
3186 	if (!vcpu) {
3187 		r = -ENOMEM;
3188 		goto vcpu_decrement;
3189 	}
3190 
3191 	BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
3192 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3193 	if (!page) {
3194 		r = -ENOMEM;
3195 		goto vcpu_free;
3196 	}
3197 	vcpu->run = page_address(page);
3198 
3199 	kvm_vcpu_init(vcpu, kvm, id);
3200 
3201 	r = kvm_arch_vcpu_create(vcpu);
3202 	if (r)
3203 		goto vcpu_free_run_page;
3204 
3205 	if (kvm->dirty_ring_size) {
3206 		r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
3207 					 id, kvm->dirty_ring_size);
3208 		if (r)
3209 			goto arch_vcpu_destroy;
3210 	}
3211 
3212 	mutex_lock(&kvm->lock);
3213 	if (kvm_get_vcpu_by_id(kvm, id)) {
3214 		r = -EEXIST;
3215 		goto unlock_vcpu_destroy;
3216 	}
3217 
3218 	vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
3219 	BUG_ON(kvm->vcpus[vcpu->vcpu_idx]);
3220 
3221 	/* Now it's all set up, let userspace reach it */
3222 	kvm_get_kvm(kvm);
3223 	r = create_vcpu_fd(vcpu);
3224 	if (r < 0) {
3225 		kvm_put_kvm_no_destroy(kvm);
3226 		goto unlock_vcpu_destroy;
3227 	}
3228 
3229 	kvm->vcpus[vcpu->vcpu_idx] = vcpu;
3230 
3231 	/*
3232 	 * Pairs with smp_rmb() in kvm_get_vcpu.  Write kvm->vcpus
3233 	 * before kvm->online_vcpu's incremented value.
3234 	 */
3235 	smp_wmb();
3236 	atomic_inc(&kvm->online_vcpus);
3237 
3238 	mutex_unlock(&kvm->lock);
3239 	kvm_arch_vcpu_postcreate(vcpu);
3240 	kvm_create_vcpu_debugfs(vcpu);
3241 	return r;
3242 
3243 unlock_vcpu_destroy:
3244 	mutex_unlock(&kvm->lock);
3245 	kvm_dirty_ring_free(&vcpu->dirty_ring);
3246 arch_vcpu_destroy:
3247 	kvm_arch_vcpu_destroy(vcpu);
3248 vcpu_free_run_page:
3249 	free_page((unsigned long)vcpu->run);
3250 vcpu_free:
3251 	kmem_cache_free(kvm_vcpu_cache, vcpu);
3252 vcpu_decrement:
3253 	mutex_lock(&kvm->lock);
3254 	kvm->created_vcpus--;
3255 	mutex_unlock(&kvm->lock);
3256 	return r;
3257 }
3258 
3259 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
3260 {
3261 	if (sigset) {
3262 		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3263 		vcpu->sigset_active = 1;
3264 		vcpu->sigset = *sigset;
3265 	} else
3266 		vcpu->sigset_active = 0;
3267 	return 0;
3268 }
3269 
3270 static long kvm_vcpu_ioctl(struct file *filp,
3271 			   unsigned int ioctl, unsigned long arg)
3272 {
3273 	struct kvm_vcpu *vcpu = filp->private_data;
3274 	void __user *argp = (void __user *)arg;
3275 	int r;
3276 	struct kvm_fpu *fpu = NULL;
3277 	struct kvm_sregs *kvm_sregs = NULL;
3278 
3279 	if (vcpu->kvm->mm != current->mm)
3280 		return -EIO;
3281 
3282 	if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
3283 		return -EINVAL;
3284 
3285 	/*
3286 	 * Some architectures have vcpu ioctls that are asynchronous to vcpu
3287 	 * execution; mutex_lock() would break them.
3288 	 */
3289 	r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
3290 	if (r != -ENOIOCTLCMD)
3291 		return r;
3292 
3293 	if (mutex_lock_killable(&vcpu->mutex))
3294 		return -EINTR;
3295 	switch (ioctl) {
3296 	case KVM_RUN: {
3297 		struct pid *oldpid;
3298 		r = -EINVAL;
3299 		if (arg)
3300 			goto out;
3301 		oldpid = rcu_access_pointer(vcpu->pid);
3302 		if (unlikely(oldpid != task_pid(current))) {
3303 			/* The thread running this VCPU changed. */
3304 			struct pid *newpid;
3305 
3306 			r = kvm_arch_vcpu_run_pid_change(vcpu);
3307 			if (r)
3308 				break;
3309 
3310 			newpid = get_task_pid(current, PIDTYPE_PID);
3311 			rcu_assign_pointer(vcpu->pid, newpid);
3312 			if (oldpid)
3313 				synchronize_rcu();
3314 			put_pid(oldpid);
3315 		}
3316 		r = kvm_arch_vcpu_ioctl_run(vcpu);
3317 		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
3318 		break;
3319 	}
3320 	case KVM_GET_REGS: {
3321 		struct kvm_regs *kvm_regs;
3322 
3323 		r = -ENOMEM;
3324 		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
3325 		if (!kvm_regs)
3326 			goto out;
3327 		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
3328 		if (r)
3329 			goto out_free1;
3330 		r = -EFAULT;
3331 		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
3332 			goto out_free1;
3333 		r = 0;
3334 out_free1:
3335 		kfree(kvm_regs);
3336 		break;
3337 	}
3338 	case KVM_SET_REGS: {
3339 		struct kvm_regs *kvm_regs;
3340 
3341 		kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
3342 		if (IS_ERR(kvm_regs)) {
3343 			r = PTR_ERR(kvm_regs);
3344 			goto out;
3345 		}
3346 		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
3347 		kfree(kvm_regs);
3348 		break;
3349 	}
3350 	case KVM_GET_SREGS: {
3351 		kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
3352 				    GFP_KERNEL_ACCOUNT);
3353 		r = -ENOMEM;
3354 		if (!kvm_sregs)
3355 			goto out;
3356 		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
3357 		if (r)
3358 			goto out;
3359 		r = -EFAULT;
3360 		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
3361 			goto out;
3362 		r = 0;
3363 		break;
3364 	}
3365 	case KVM_SET_SREGS: {
3366 		kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
3367 		if (IS_ERR(kvm_sregs)) {
3368 			r = PTR_ERR(kvm_sregs);
3369 			kvm_sregs = NULL;
3370 			goto out;
3371 		}
3372 		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
3373 		break;
3374 	}
3375 	case KVM_GET_MP_STATE: {
3376 		struct kvm_mp_state mp_state;
3377 
3378 		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
3379 		if (r)
3380 			goto out;
3381 		r = -EFAULT;
3382 		if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
3383 			goto out;
3384 		r = 0;
3385 		break;
3386 	}
3387 	case KVM_SET_MP_STATE: {
3388 		struct kvm_mp_state mp_state;
3389 
3390 		r = -EFAULT;
3391 		if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
3392 			goto out;
3393 		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
3394 		break;
3395 	}
3396 	case KVM_TRANSLATE: {
3397 		struct kvm_translation tr;
3398 
3399 		r = -EFAULT;
3400 		if (copy_from_user(&tr, argp, sizeof(tr)))
3401 			goto out;
3402 		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
3403 		if (r)
3404 			goto out;
3405 		r = -EFAULT;
3406 		if (copy_to_user(argp, &tr, sizeof(tr)))
3407 			goto out;
3408 		r = 0;
3409 		break;
3410 	}
3411 	case KVM_SET_GUEST_DEBUG: {
3412 		struct kvm_guest_debug dbg;
3413 
3414 		r = -EFAULT;
3415 		if (copy_from_user(&dbg, argp, sizeof(dbg)))
3416 			goto out;
3417 		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
3418 		break;
3419 	}
3420 	case KVM_SET_SIGNAL_MASK: {
3421 		struct kvm_signal_mask __user *sigmask_arg = argp;
3422 		struct kvm_signal_mask kvm_sigmask;
3423 		sigset_t sigset, *p;
3424 
3425 		p = NULL;
3426 		if (argp) {
3427 			r = -EFAULT;
3428 			if (copy_from_user(&kvm_sigmask, argp,
3429 					   sizeof(kvm_sigmask)))
3430 				goto out;
3431 			r = -EINVAL;
3432 			if (kvm_sigmask.len != sizeof(sigset))
3433 				goto out;
3434 			r = -EFAULT;
3435 			if (copy_from_user(&sigset, sigmask_arg->sigset,
3436 					   sizeof(sigset)))
3437 				goto out;
3438 			p = &sigset;
3439 		}
3440 		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
3441 		break;
3442 	}
3443 	case KVM_GET_FPU: {
3444 		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
3445 		r = -ENOMEM;
3446 		if (!fpu)
3447 			goto out;
3448 		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
3449 		if (r)
3450 			goto out;
3451 		r = -EFAULT;
3452 		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
3453 			goto out;
3454 		r = 0;
3455 		break;
3456 	}
3457 	case KVM_SET_FPU: {
3458 		fpu = memdup_user(argp, sizeof(*fpu));
3459 		if (IS_ERR(fpu)) {
3460 			r = PTR_ERR(fpu);
3461 			fpu = NULL;
3462 			goto out;
3463 		}
3464 		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
3465 		break;
3466 	}
3467 	default:
3468 		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
3469 	}
3470 out:
3471 	mutex_unlock(&vcpu->mutex);
3472 	kfree(fpu);
3473 	kfree(kvm_sregs);
3474 	return r;
3475 }
3476 
3477 #ifdef CONFIG_KVM_COMPAT
3478 static long kvm_vcpu_compat_ioctl(struct file *filp,
3479 				  unsigned int ioctl, unsigned long arg)
3480 {
3481 	struct kvm_vcpu *vcpu = filp->private_data;
3482 	void __user *argp = compat_ptr(arg);
3483 	int r;
3484 
3485 	if (vcpu->kvm->mm != current->mm)
3486 		return -EIO;
3487 
3488 	switch (ioctl) {
3489 	case KVM_SET_SIGNAL_MASK: {
3490 		struct kvm_signal_mask __user *sigmask_arg = argp;
3491 		struct kvm_signal_mask kvm_sigmask;
3492 		sigset_t sigset;
3493 
3494 		if (argp) {
3495 			r = -EFAULT;
3496 			if (copy_from_user(&kvm_sigmask, argp,
3497 					   sizeof(kvm_sigmask)))
3498 				goto out;
3499 			r = -EINVAL;
3500 			if (kvm_sigmask.len != sizeof(compat_sigset_t))
3501 				goto out;
3502 			r = -EFAULT;
3503 			if (get_compat_sigset(&sigset,
3504 					      (compat_sigset_t __user *)sigmask_arg->sigset))
3505 				goto out;
3506 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
3507 		} else
3508 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
3509 		break;
3510 	}
3511 	default:
3512 		r = kvm_vcpu_ioctl(filp, ioctl, arg);
3513 	}
3514 
3515 out:
3516 	return r;
3517 }
3518 #endif
3519 
3520 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
3521 {
3522 	struct kvm_device *dev = filp->private_data;
3523 
3524 	if (dev->ops->mmap)
3525 		return dev->ops->mmap(dev, vma);
3526 
3527 	return -ENODEV;
3528 }
3529 
3530 static int kvm_device_ioctl_attr(struct kvm_device *dev,
3531 				 int (*accessor)(struct kvm_device *dev,
3532 						 struct kvm_device_attr *attr),
3533 				 unsigned long arg)
3534 {
3535 	struct kvm_device_attr attr;
3536 
3537 	if (!accessor)
3538 		return -EPERM;
3539 
3540 	if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3541 		return -EFAULT;
3542 
3543 	return accessor(dev, &attr);
3544 }
3545 
3546 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
3547 			     unsigned long arg)
3548 {
3549 	struct kvm_device *dev = filp->private_data;
3550 
3551 	if (dev->kvm->mm != current->mm)
3552 		return -EIO;
3553 
3554 	switch (ioctl) {
3555 	case KVM_SET_DEVICE_ATTR:
3556 		return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
3557 	case KVM_GET_DEVICE_ATTR:
3558 		return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
3559 	case KVM_HAS_DEVICE_ATTR:
3560 		return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
3561 	default:
3562 		if (dev->ops->ioctl)
3563 			return dev->ops->ioctl(dev, ioctl, arg);
3564 
3565 		return -ENOTTY;
3566 	}
3567 }
3568 
3569 static int kvm_device_release(struct inode *inode, struct file *filp)
3570 {
3571 	struct kvm_device *dev = filp->private_data;
3572 	struct kvm *kvm = dev->kvm;
3573 
3574 	if (dev->ops->release) {
3575 		mutex_lock(&kvm->lock);
3576 		list_del(&dev->vm_node);
3577 		dev->ops->release(dev);
3578 		mutex_unlock(&kvm->lock);
3579 	}
3580 
3581 	kvm_put_kvm(kvm);
3582 	return 0;
3583 }
3584 
3585 static const struct file_operations kvm_device_fops = {
3586 	.unlocked_ioctl = kvm_device_ioctl,
3587 	.release = kvm_device_release,
3588 	KVM_COMPAT(kvm_device_ioctl),
3589 	.mmap = kvm_device_mmap,
3590 };
3591 
3592 struct kvm_device *kvm_device_from_filp(struct file *filp)
3593 {
3594 	if (filp->f_op != &kvm_device_fops)
3595 		return NULL;
3596 
3597 	return filp->private_data;
3598 }
3599 
3600 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
3601 #ifdef CONFIG_KVM_MPIC
3602 	[KVM_DEV_TYPE_FSL_MPIC_20]	= &kvm_mpic_ops,
3603 	[KVM_DEV_TYPE_FSL_MPIC_42]	= &kvm_mpic_ops,
3604 #endif
3605 };
3606 
3607 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
3608 {
3609 	if (type >= ARRAY_SIZE(kvm_device_ops_table))
3610 		return -ENOSPC;
3611 
3612 	if (kvm_device_ops_table[type] != NULL)
3613 		return -EEXIST;
3614 
3615 	kvm_device_ops_table[type] = ops;
3616 	return 0;
3617 }
3618 
3619 void kvm_unregister_device_ops(u32 type)
3620 {
3621 	if (kvm_device_ops_table[type] != NULL)
3622 		kvm_device_ops_table[type] = NULL;
3623 }
3624 
3625 static int kvm_ioctl_create_device(struct kvm *kvm,
3626 				   struct kvm_create_device *cd)
3627 {
3628 	const struct kvm_device_ops *ops = NULL;
3629 	struct kvm_device *dev;
3630 	bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
3631 	int type;
3632 	int ret;
3633 
3634 	if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
3635 		return -ENODEV;
3636 
3637 	type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
3638 	ops = kvm_device_ops_table[type];
3639 	if (ops == NULL)
3640 		return -ENODEV;
3641 
3642 	if (test)
3643 		return 0;
3644 
3645 	dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
3646 	if (!dev)
3647 		return -ENOMEM;
3648 
3649 	dev->ops = ops;
3650 	dev->kvm = kvm;
3651 
3652 	mutex_lock(&kvm->lock);
3653 	ret = ops->create(dev, type);
3654 	if (ret < 0) {
3655 		mutex_unlock(&kvm->lock);
3656 		kfree(dev);
3657 		return ret;
3658 	}
3659 	list_add(&dev->vm_node, &kvm->devices);
3660 	mutex_unlock(&kvm->lock);
3661 
3662 	if (ops->init)
3663 		ops->init(dev);
3664 
3665 	kvm_get_kvm(kvm);
3666 	ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
3667 	if (ret < 0) {
3668 		kvm_put_kvm_no_destroy(kvm);
3669 		mutex_lock(&kvm->lock);
3670 		list_del(&dev->vm_node);
3671 		mutex_unlock(&kvm->lock);
3672 		ops->destroy(dev);
3673 		return ret;
3674 	}
3675 
3676 	cd->fd = ret;
3677 	return 0;
3678 }
3679 
3680 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
3681 {
3682 	switch (arg) {
3683 	case KVM_CAP_USER_MEMORY:
3684 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
3685 	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
3686 	case KVM_CAP_INTERNAL_ERROR_DATA:
3687 #ifdef CONFIG_HAVE_KVM_MSI
3688 	case KVM_CAP_SIGNAL_MSI:
3689 #endif
3690 #ifdef CONFIG_HAVE_KVM_IRQFD
3691 	case KVM_CAP_IRQFD:
3692 	case KVM_CAP_IRQFD_RESAMPLE:
3693 #endif
3694 	case KVM_CAP_IOEVENTFD_ANY_LENGTH:
3695 	case KVM_CAP_CHECK_EXTENSION_VM:
3696 	case KVM_CAP_ENABLE_CAP_VM:
3697 	case KVM_CAP_HALT_POLL:
3698 		return 1;
3699 #ifdef CONFIG_KVM_MMIO
3700 	case KVM_CAP_COALESCED_MMIO:
3701 		return KVM_COALESCED_MMIO_PAGE_OFFSET;
3702 	case KVM_CAP_COALESCED_PIO:
3703 		return 1;
3704 #endif
3705 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3706 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
3707 		return KVM_DIRTY_LOG_MANUAL_CAPS;
3708 #endif
3709 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3710 	case KVM_CAP_IRQ_ROUTING:
3711 		return KVM_MAX_IRQ_ROUTES;
3712 #endif
3713 #if KVM_ADDRESS_SPACE_NUM > 1
3714 	case KVM_CAP_MULTI_ADDRESS_SPACE:
3715 		return KVM_ADDRESS_SPACE_NUM;
3716 #endif
3717 	case KVM_CAP_NR_MEMSLOTS:
3718 		return KVM_USER_MEM_SLOTS;
3719 	case KVM_CAP_DIRTY_LOG_RING:
3720 #if KVM_DIRTY_LOG_PAGE_OFFSET > 0
3721 		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
3722 #else
3723 		return 0;
3724 #endif
3725 	default:
3726 		break;
3727 	}
3728 	return kvm_vm_ioctl_check_extension(kvm, arg);
3729 }
3730 
3731 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
3732 {
3733 	int r;
3734 
3735 	if (!KVM_DIRTY_LOG_PAGE_OFFSET)
3736 		return -EINVAL;
3737 
3738 	/* the size should be power of 2 */
3739 	if (!size || (size & (size - 1)))
3740 		return -EINVAL;
3741 
3742 	/* Should be bigger to keep the reserved entries, or a page */
3743 	if (size < kvm_dirty_ring_get_rsvd_entries() *
3744 	    sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
3745 		return -EINVAL;
3746 
3747 	if (size > KVM_DIRTY_RING_MAX_ENTRIES *
3748 	    sizeof(struct kvm_dirty_gfn))
3749 		return -E2BIG;
3750 
3751 	/* We only allow it to set once */
3752 	if (kvm->dirty_ring_size)
3753 		return -EINVAL;
3754 
3755 	mutex_lock(&kvm->lock);
3756 
3757 	if (kvm->created_vcpus) {
3758 		/* We don't allow to change this value after vcpu created */
3759 		r = -EINVAL;
3760 	} else {
3761 		kvm->dirty_ring_size = size;
3762 		r = 0;
3763 	}
3764 
3765 	mutex_unlock(&kvm->lock);
3766 	return r;
3767 }
3768 
3769 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
3770 {
3771 	int i;
3772 	struct kvm_vcpu *vcpu;
3773 	int cleared = 0;
3774 
3775 	if (!kvm->dirty_ring_size)
3776 		return -EINVAL;
3777 
3778 	mutex_lock(&kvm->slots_lock);
3779 
3780 	kvm_for_each_vcpu(i, vcpu, kvm)
3781 		cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
3782 
3783 	mutex_unlock(&kvm->slots_lock);
3784 
3785 	if (cleared)
3786 		kvm_flush_remote_tlbs(kvm);
3787 
3788 	return cleared;
3789 }
3790 
3791 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
3792 						  struct kvm_enable_cap *cap)
3793 {
3794 	return -EINVAL;
3795 }
3796 
3797 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
3798 					   struct kvm_enable_cap *cap)
3799 {
3800 	switch (cap->cap) {
3801 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3802 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
3803 		u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
3804 
3805 		if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
3806 			allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
3807 
3808 		if (cap->flags || (cap->args[0] & ~allowed_options))
3809 			return -EINVAL;
3810 		kvm->manual_dirty_log_protect = cap->args[0];
3811 		return 0;
3812 	}
3813 #endif
3814 	case KVM_CAP_HALT_POLL: {
3815 		if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
3816 			return -EINVAL;
3817 
3818 		kvm->max_halt_poll_ns = cap->args[0];
3819 		return 0;
3820 	}
3821 	case KVM_CAP_DIRTY_LOG_RING:
3822 		return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
3823 	default:
3824 		return kvm_vm_ioctl_enable_cap(kvm, cap);
3825 	}
3826 }
3827 
3828 static long kvm_vm_ioctl(struct file *filp,
3829 			   unsigned int ioctl, unsigned long arg)
3830 {
3831 	struct kvm *kvm = filp->private_data;
3832 	void __user *argp = (void __user *)arg;
3833 	int r;
3834 
3835 	if (kvm->mm != current->mm)
3836 		return -EIO;
3837 	switch (ioctl) {
3838 	case KVM_CREATE_VCPU:
3839 		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
3840 		break;
3841 	case KVM_ENABLE_CAP: {
3842 		struct kvm_enable_cap cap;
3843 
3844 		r = -EFAULT;
3845 		if (copy_from_user(&cap, argp, sizeof(cap)))
3846 			goto out;
3847 		r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
3848 		break;
3849 	}
3850 	case KVM_SET_USER_MEMORY_REGION: {
3851 		struct kvm_userspace_memory_region kvm_userspace_mem;
3852 
3853 		r = -EFAULT;
3854 		if (copy_from_user(&kvm_userspace_mem, argp,
3855 						sizeof(kvm_userspace_mem)))
3856 			goto out;
3857 
3858 		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
3859 		break;
3860 	}
3861 	case KVM_GET_DIRTY_LOG: {
3862 		struct kvm_dirty_log log;
3863 
3864 		r = -EFAULT;
3865 		if (copy_from_user(&log, argp, sizeof(log)))
3866 			goto out;
3867 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
3868 		break;
3869 	}
3870 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3871 	case KVM_CLEAR_DIRTY_LOG: {
3872 		struct kvm_clear_dirty_log log;
3873 
3874 		r = -EFAULT;
3875 		if (copy_from_user(&log, argp, sizeof(log)))
3876 			goto out;
3877 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
3878 		break;
3879 	}
3880 #endif
3881 #ifdef CONFIG_KVM_MMIO
3882 	case KVM_REGISTER_COALESCED_MMIO: {
3883 		struct kvm_coalesced_mmio_zone zone;
3884 
3885 		r = -EFAULT;
3886 		if (copy_from_user(&zone, argp, sizeof(zone)))
3887 			goto out;
3888 		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
3889 		break;
3890 	}
3891 	case KVM_UNREGISTER_COALESCED_MMIO: {
3892 		struct kvm_coalesced_mmio_zone zone;
3893 
3894 		r = -EFAULT;
3895 		if (copy_from_user(&zone, argp, sizeof(zone)))
3896 			goto out;
3897 		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
3898 		break;
3899 	}
3900 #endif
3901 	case KVM_IRQFD: {
3902 		struct kvm_irqfd data;
3903 
3904 		r = -EFAULT;
3905 		if (copy_from_user(&data, argp, sizeof(data)))
3906 			goto out;
3907 		r = kvm_irqfd(kvm, &data);
3908 		break;
3909 	}
3910 	case KVM_IOEVENTFD: {
3911 		struct kvm_ioeventfd data;
3912 
3913 		r = -EFAULT;
3914 		if (copy_from_user(&data, argp, sizeof(data)))
3915 			goto out;
3916 		r = kvm_ioeventfd(kvm, &data);
3917 		break;
3918 	}
3919 #ifdef CONFIG_HAVE_KVM_MSI
3920 	case KVM_SIGNAL_MSI: {
3921 		struct kvm_msi msi;
3922 
3923 		r = -EFAULT;
3924 		if (copy_from_user(&msi, argp, sizeof(msi)))
3925 			goto out;
3926 		r = kvm_send_userspace_msi(kvm, &msi);
3927 		break;
3928 	}
3929 #endif
3930 #ifdef __KVM_HAVE_IRQ_LINE
3931 	case KVM_IRQ_LINE_STATUS:
3932 	case KVM_IRQ_LINE: {
3933 		struct kvm_irq_level irq_event;
3934 
3935 		r = -EFAULT;
3936 		if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
3937 			goto out;
3938 
3939 		r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
3940 					ioctl == KVM_IRQ_LINE_STATUS);
3941 		if (r)
3942 			goto out;
3943 
3944 		r = -EFAULT;
3945 		if (ioctl == KVM_IRQ_LINE_STATUS) {
3946 			if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
3947 				goto out;
3948 		}
3949 
3950 		r = 0;
3951 		break;
3952 	}
3953 #endif
3954 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3955 	case KVM_SET_GSI_ROUTING: {
3956 		struct kvm_irq_routing routing;
3957 		struct kvm_irq_routing __user *urouting;
3958 		struct kvm_irq_routing_entry *entries = NULL;
3959 
3960 		r = -EFAULT;
3961 		if (copy_from_user(&routing, argp, sizeof(routing)))
3962 			goto out;
3963 		r = -EINVAL;
3964 		if (!kvm_arch_can_set_irq_routing(kvm))
3965 			goto out;
3966 		if (routing.nr > KVM_MAX_IRQ_ROUTES)
3967 			goto out;
3968 		if (routing.flags)
3969 			goto out;
3970 		if (routing.nr) {
3971 			urouting = argp;
3972 			entries = vmemdup_user(urouting->entries,
3973 					       array_size(sizeof(*entries),
3974 							  routing.nr));
3975 			if (IS_ERR(entries)) {
3976 				r = PTR_ERR(entries);
3977 				goto out;
3978 			}
3979 		}
3980 		r = kvm_set_irq_routing(kvm, entries, routing.nr,
3981 					routing.flags);
3982 		kvfree(entries);
3983 		break;
3984 	}
3985 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
3986 	case KVM_CREATE_DEVICE: {
3987 		struct kvm_create_device cd;
3988 
3989 		r = -EFAULT;
3990 		if (copy_from_user(&cd, argp, sizeof(cd)))
3991 			goto out;
3992 
3993 		r = kvm_ioctl_create_device(kvm, &cd);
3994 		if (r)
3995 			goto out;
3996 
3997 		r = -EFAULT;
3998 		if (copy_to_user(argp, &cd, sizeof(cd)))
3999 			goto out;
4000 
4001 		r = 0;
4002 		break;
4003 	}
4004 	case KVM_CHECK_EXTENSION:
4005 		r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
4006 		break;
4007 	case KVM_RESET_DIRTY_RINGS:
4008 		r = kvm_vm_ioctl_reset_dirty_pages(kvm);
4009 		break;
4010 	default:
4011 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
4012 	}
4013 out:
4014 	return r;
4015 }
4016 
4017 #ifdef CONFIG_KVM_COMPAT
4018 struct compat_kvm_dirty_log {
4019 	__u32 slot;
4020 	__u32 padding1;
4021 	union {
4022 		compat_uptr_t dirty_bitmap; /* one bit per page */
4023 		__u64 padding2;
4024 	};
4025 };
4026 
4027 static long kvm_vm_compat_ioctl(struct file *filp,
4028 			   unsigned int ioctl, unsigned long arg)
4029 {
4030 	struct kvm *kvm = filp->private_data;
4031 	int r;
4032 
4033 	if (kvm->mm != current->mm)
4034 		return -EIO;
4035 	switch (ioctl) {
4036 	case KVM_GET_DIRTY_LOG: {
4037 		struct compat_kvm_dirty_log compat_log;
4038 		struct kvm_dirty_log log;
4039 
4040 		if (copy_from_user(&compat_log, (void __user *)arg,
4041 				   sizeof(compat_log)))
4042 			return -EFAULT;
4043 		log.slot	 = compat_log.slot;
4044 		log.padding1	 = compat_log.padding1;
4045 		log.padding2	 = compat_log.padding2;
4046 		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4047 
4048 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
4049 		break;
4050 	}
4051 	default:
4052 		r = kvm_vm_ioctl(filp, ioctl, arg);
4053 	}
4054 	return r;
4055 }
4056 #endif
4057 
4058 static struct file_operations kvm_vm_fops = {
4059 	.release        = kvm_vm_release,
4060 	.unlocked_ioctl = kvm_vm_ioctl,
4061 	.llseek		= noop_llseek,
4062 	KVM_COMPAT(kvm_vm_compat_ioctl),
4063 };
4064 
4065 static int kvm_dev_ioctl_create_vm(unsigned long type)
4066 {
4067 	int r;
4068 	struct kvm *kvm;
4069 	struct file *file;
4070 
4071 	kvm = kvm_create_vm(type);
4072 	if (IS_ERR(kvm))
4073 		return PTR_ERR(kvm);
4074 #ifdef CONFIG_KVM_MMIO
4075 	r = kvm_coalesced_mmio_init(kvm);
4076 	if (r < 0)
4077 		goto put_kvm;
4078 #endif
4079 	r = get_unused_fd_flags(O_CLOEXEC);
4080 	if (r < 0)
4081 		goto put_kvm;
4082 
4083 	file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
4084 	if (IS_ERR(file)) {
4085 		put_unused_fd(r);
4086 		r = PTR_ERR(file);
4087 		goto put_kvm;
4088 	}
4089 
4090 	/*
4091 	 * Don't call kvm_put_kvm anymore at this point; file->f_op is
4092 	 * already set, with ->release() being kvm_vm_release().  In error
4093 	 * cases it will be called by the final fput(file) and will take
4094 	 * care of doing kvm_put_kvm(kvm).
4095 	 */
4096 	if (kvm_create_vm_debugfs(kvm, r) < 0) {
4097 		put_unused_fd(r);
4098 		fput(file);
4099 		return -ENOMEM;
4100 	}
4101 	kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
4102 
4103 	fd_install(r, file);
4104 	return r;
4105 
4106 put_kvm:
4107 	kvm_put_kvm(kvm);
4108 	return r;
4109 }
4110 
4111 static long kvm_dev_ioctl(struct file *filp,
4112 			  unsigned int ioctl, unsigned long arg)
4113 {
4114 	long r = -EINVAL;
4115 
4116 	switch (ioctl) {
4117 	case KVM_GET_API_VERSION:
4118 		if (arg)
4119 			goto out;
4120 		r = KVM_API_VERSION;
4121 		break;
4122 	case KVM_CREATE_VM:
4123 		r = kvm_dev_ioctl_create_vm(arg);
4124 		break;
4125 	case KVM_CHECK_EXTENSION:
4126 		r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
4127 		break;
4128 	case KVM_GET_VCPU_MMAP_SIZE:
4129 		if (arg)
4130 			goto out;
4131 		r = PAGE_SIZE;     /* struct kvm_run */
4132 #ifdef CONFIG_X86
4133 		r += PAGE_SIZE;    /* pio data page */
4134 #endif
4135 #ifdef CONFIG_KVM_MMIO
4136 		r += PAGE_SIZE;    /* coalesced mmio ring page */
4137 #endif
4138 		break;
4139 	case KVM_TRACE_ENABLE:
4140 	case KVM_TRACE_PAUSE:
4141 	case KVM_TRACE_DISABLE:
4142 		r = -EOPNOTSUPP;
4143 		break;
4144 	default:
4145 		return kvm_arch_dev_ioctl(filp, ioctl, arg);
4146 	}
4147 out:
4148 	return r;
4149 }
4150 
4151 static struct file_operations kvm_chardev_ops = {
4152 	.unlocked_ioctl = kvm_dev_ioctl,
4153 	.llseek		= noop_llseek,
4154 	KVM_COMPAT(kvm_dev_ioctl),
4155 };
4156 
4157 static struct miscdevice kvm_dev = {
4158 	KVM_MINOR,
4159 	"kvm",
4160 	&kvm_chardev_ops,
4161 };
4162 
4163 static void hardware_enable_nolock(void *junk)
4164 {
4165 	int cpu = raw_smp_processor_id();
4166 	int r;
4167 
4168 	if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
4169 		return;
4170 
4171 	cpumask_set_cpu(cpu, cpus_hardware_enabled);
4172 
4173 	r = kvm_arch_hardware_enable();
4174 
4175 	if (r) {
4176 		cpumask_clear_cpu(cpu, cpus_hardware_enabled);
4177 		atomic_inc(&hardware_enable_failed);
4178 		pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
4179 	}
4180 }
4181 
4182 static int kvm_starting_cpu(unsigned int cpu)
4183 {
4184 	raw_spin_lock(&kvm_count_lock);
4185 	if (kvm_usage_count)
4186 		hardware_enable_nolock(NULL);
4187 	raw_spin_unlock(&kvm_count_lock);
4188 	return 0;
4189 }
4190 
4191 static void hardware_disable_nolock(void *junk)
4192 {
4193 	int cpu = raw_smp_processor_id();
4194 
4195 	if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
4196 		return;
4197 	cpumask_clear_cpu(cpu, cpus_hardware_enabled);
4198 	kvm_arch_hardware_disable();
4199 }
4200 
4201 static int kvm_dying_cpu(unsigned int cpu)
4202 {
4203 	raw_spin_lock(&kvm_count_lock);
4204 	if (kvm_usage_count)
4205 		hardware_disable_nolock(NULL);
4206 	raw_spin_unlock(&kvm_count_lock);
4207 	return 0;
4208 }
4209 
4210 static void hardware_disable_all_nolock(void)
4211 {
4212 	BUG_ON(!kvm_usage_count);
4213 
4214 	kvm_usage_count--;
4215 	if (!kvm_usage_count)
4216 		on_each_cpu(hardware_disable_nolock, NULL, 1);
4217 }
4218 
4219 static void hardware_disable_all(void)
4220 {
4221 	raw_spin_lock(&kvm_count_lock);
4222 	hardware_disable_all_nolock();
4223 	raw_spin_unlock(&kvm_count_lock);
4224 }
4225 
4226 static int hardware_enable_all(void)
4227 {
4228 	int r = 0;
4229 
4230 	raw_spin_lock(&kvm_count_lock);
4231 
4232 	kvm_usage_count++;
4233 	if (kvm_usage_count == 1) {
4234 		atomic_set(&hardware_enable_failed, 0);
4235 		on_each_cpu(hardware_enable_nolock, NULL, 1);
4236 
4237 		if (atomic_read(&hardware_enable_failed)) {
4238 			hardware_disable_all_nolock();
4239 			r = -EBUSY;
4240 		}
4241 	}
4242 
4243 	raw_spin_unlock(&kvm_count_lock);
4244 
4245 	return r;
4246 }
4247 
4248 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
4249 		      void *v)
4250 {
4251 	/*
4252 	 * Some (well, at least mine) BIOSes hang on reboot if
4253 	 * in vmx root mode.
4254 	 *
4255 	 * And Intel TXT required VMX off for all cpu when system shutdown.
4256 	 */
4257 	pr_info("kvm: exiting hardware virtualization\n");
4258 	kvm_rebooting = true;
4259 	on_each_cpu(hardware_disable_nolock, NULL, 1);
4260 	return NOTIFY_OK;
4261 }
4262 
4263 static struct notifier_block kvm_reboot_notifier = {
4264 	.notifier_call = kvm_reboot,
4265 	.priority = 0,
4266 };
4267 
4268 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
4269 {
4270 	int i;
4271 
4272 	for (i = 0; i < bus->dev_count; i++) {
4273 		struct kvm_io_device *pos = bus->range[i].dev;
4274 
4275 		kvm_iodevice_destructor(pos);
4276 	}
4277 	kfree(bus);
4278 }
4279 
4280 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
4281 				 const struct kvm_io_range *r2)
4282 {
4283 	gpa_t addr1 = r1->addr;
4284 	gpa_t addr2 = r2->addr;
4285 
4286 	if (addr1 < addr2)
4287 		return -1;
4288 
4289 	/* If r2->len == 0, match the exact address.  If r2->len != 0,
4290 	 * accept any overlapping write.  Any order is acceptable for
4291 	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
4292 	 * we process all of them.
4293 	 */
4294 	if (r2->len) {
4295 		addr1 += r1->len;
4296 		addr2 += r2->len;
4297 	}
4298 
4299 	if (addr1 > addr2)
4300 		return 1;
4301 
4302 	return 0;
4303 }
4304 
4305 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
4306 {
4307 	return kvm_io_bus_cmp(p1, p2);
4308 }
4309 
4310 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
4311 			     gpa_t addr, int len)
4312 {
4313 	struct kvm_io_range *range, key;
4314 	int off;
4315 
4316 	key = (struct kvm_io_range) {
4317 		.addr = addr,
4318 		.len = len,
4319 	};
4320 
4321 	range = bsearch(&key, bus->range, bus->dev_count,
4322 			sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
4323 	if (range == NULL)
4324 		return -ENOENT;
4325 
4326 	off = range - bus->range;
4327 
4328 	while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
4329 		off--;
4330 
4331 	return off;
4332 }
4333 
4334 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
4335 			      struct kvm_io_range *range, const void *val)
4336 {
4337 	int idx;
4338 
4339 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
4340 	if (idx < 0)
4341 		return -EOPNOTSUPP;
4342 
4343 	while (idx < bus->dev_count &&
4344 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
4345 		if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
4346 					range->len, val))
4347 			return idx;
4348 		idx++;
4349 	}
4350 
4351 	return -EOPNOTSUPP;
4352 }
4353 
4354 /* kvm_io_bus_write - called under kvm->slots_lock */
4355 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
4356 		     int len, const void *val)
4357 {
4358 	struct kvm_io_bus *bus;
4359 	struct kvm_io_range range;
4360 	int r;
4361 
4362 	range = (struct kvm_io_range) {
4363 		.addr = addr,
4364 		.len = len,
4365 	};
4366 
4367 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
4368 	if (!bus)
4369 		return -ENOMEM;
4370 	r = __kvm_io_bus_write(vcpu, bus, &range, val);
4371 	return r < 0 ? r : 0;
4372 }
4373 EXPORT_SYMBOL_GPL(kvm_io_bus_write);
4374 
4375 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
4376 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
4377 			    gpa_t addr, int len, const void *val, long cookie)
4378 {
4379 	struct kvm_io_bus *bus;
4380 	struct kvm_io_range range;
4381 
4382 	range = (struct kvm_io_range) {
4383 		.addr = addr,
4384 		.len = len,
4385 	};
4386 
4387 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
4388 	if (!bus)
4389 		return -ENOMEM;
4390 
4391 	/* First try the device referenced by cookie. */
4392 	if ((cookie >= 0) && (cookie < bus->dev_count) &&
4393 	    (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
4394 		if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
4395 					val))
4396 			return cookie;
4397 
4398 	/*
4399 	 * cookie contained garbage; fall back to search and return the
4400 	 * correct cookie value.
4401 	 */
4402 	return __kvm_io_bus_write(vcpu, bus, &range, val);
4403 }
4404 
4405 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
4406 			     struct kvm_io_range *range, void *val)
4407 {
4408 	int idx;
4409 
4410 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
4411 	if (idx < 0)
4412 		return -EOPNOTSUPP;
4413 
4414 	while (idx < bus->dev_count &&
4415 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
4416 		if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
4417 				       range->len, val))
4418 			return idx;
4419 		idx++;
4420 	}
4421 
4422 	return -EOPNOTSUPP;
4423 }
4424 
4425 /* kvm_io_bus_read - called under kvm->slots_lock */
4426 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
4427 		    int len, void *val)
4428 {
4429 	struct kvm_io_bus *bus;
4430 	struct kvm_io_range range;
4431 	int r;
4432 
4433 	range = (struct kvm_io_range) {
4434 		.addr = addr,
4435 		.len = len,
4436 	};
4437 
4438 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
4439 	if (!bus)
4440 		return -ENOMEM;
4441 	r = __kvm_io_bus_read(vcpu, bus, &range, val);
4442 	return r < 0 ? r : 0;
4443 }
4444 
4445 /* Caller must hold slots_lock. */
4446 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
4447 			    int len, struct kvm_io_device *dev)
4448 {
4449 	int i;
4450 	struct kvm_io_bus *new_bus, *bus;
4451 	struct kvm_io_range range;
4452 
4453 	bus = kvm_get_bus(kvm, bus_idx);
4454 	if (!bus)
4455 		return -ENOMEM;
4456 
4457 	/* exclude ioeventfd which is limited by maximum fd */
4458 	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
4459 		return -ENOSPC;
4460 
4461 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
4462 			  GFP_KERNEL_ACCOUNT);
4463 	if (!new_bus)
4464 		return -ENOMEM;
4465 
4466 	range = (struct kvm_io_range) {
4467 		.addr = addr,
4468 		.len = len,
4469 		.dev = dev,
4470 	};
4471 
4472 	for (i = 0; i < bus->dev_count; i++)
4473 		if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
4474 			break;
4475 
4476 	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
4477 	new_bus->dev_count++;
4478 	new_bus->range[i] = range;
4479 	memcpy(new_bus->range + i + 1, bus->range + i,
4480 		(bus->dev_count - i) * sizeof(struct kvm_io_range));
4481 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
4482 	synchronize_srcu_expedited(&kvm->srcu);
4483 	kfree(bus);
4484 
4485 	return 0;
4486 }
4487 
4488 /* Caller must hold slots_lock. */
4489 void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
4490 			       struct kvm_io_device *dev)
4491 {
4492 	int i, j;
4493 	struct kvm_io_bus *new_bus, *bus;
4494 
4495 	bus = kvm_get_bus(kvm, bus_idx);
4496 	if (!bus)
4497 		return;
4498 
4499 	for (i = 0; i < bus->dev_count; i++)
4500 		if (bus->range[i].dev == dev) {
4501 			break;
4502 		}
4503 
4504 	if (i == bus->dev_count)
4505 		return;
4506 
4507 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
4508 			  GFP_KERNEL_ACCOUNT);
4509 	if (new_bus) {
4510 		memcpy(new_bus, bus, struct_size(bus, range, i));
4511 		new_bus->dev_count--;
4512 		memcpy(new_bus->range + i, bus->range + i + 1,
4513 				flex_array_size(new_bus, range, new_bus->dev_count - i));
4514 	} else {
4515 		pr_err("kvm: failed to shrink bus, removing it completely\n");
4516 		for (j = 0; j < bus->dev_count; j++) {
4517 			if (j == i)
4518 				continue;
4519 			kvm_iodevice_destructor(bus->range[j].dev);
4520 		}
4521 	}
4522 
4523 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
4524 	synchronize_srcu_expedited(&kvm->srcu);
4525 	kfree(bus);
4526 	return;
4527 }
4528 
4529 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
4530 					 gpa_t addr)
4531 {
4532 	struct kvm_io_bus *bus;
4533 	int dev_idx, srcu_idx;
4534 	struct kvm_io_device *iodev = NULL;
4535 
4536 	srcu_idx = srcu_read_lock(&kvm->srcu);
4537 
4538 	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
4539 	if (!bus)
4540 		goto out_unlock;
4541 
4542 	dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
4543 	if (dev_idx < 0)
4544 		goto out_unlock;
4545 
4546 	iodev = bus->range[dev_idx].dev;
4547 
4548 out_unlock:
4549 	srcu_read_unlock(&kvm->srcu, srcu_idx);
4550 
4551 	return iodev;
4552 }
4553 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
4554 
4555 static int kvm_debugfs_open(struct inode *inode, struct file *file,
4556 			   int (*get)(void *, u64 *), int (*set)(void *, u64),
4557 			   const char *fmt)
4558 {
4559 	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
4560 					  inode->i_private;
4561 
4562 	/* The debugfs files are a reference to the kvm struct which
4563 	 * is still valid when kvm_destroy_vm is called.
4564 	 * To avoid the race between open and the removal of the debugfs
4565 	 * directory we test against the users count.
4566 	 */
4567 	if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
4568 		return -ENOENT;
4569 
4570 	if (simple_attr_open(inode, file, get,
4571 		    KVM_DBGFS_GET_MODE(stat_data->dbgfs_item) & 0222
4572 		    ? set : NULL,
4573 		    fmt)) {
4574 		kvm_put_kvm(stat_data->kvm);
4575 		return -ENOMEM;
4576 	}
4577 
4578 	return 0;
4579 }
4580 
4581 static int kvm_debugfs_release(struct inode *inode, struct file *file)
4582 {
4583 	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
4584 					  inode->i_private;
4585 
4586 	simple_attr_release(inode, file);
4587 	kvm_put_kvm(stat_data->kvm);
4588 
4589 	return 0;
4590 }
4591 
4592 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
4593 {
4594 	*val = *(ulong *)((void *)kvm + offset);
4595 
4596 	return 0;
4597 }
4598 
4599 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
4600 {
4601 	*(ulong *)((void *)kvm + offset) = 0;
4602 
4603 	return 0;
4604 }
4605 
4606 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
4607 {
4608 	int i;
4609 	struct kvm_vcpu *vcpu;
4610 
4611 	*val = 0;
4612 
4613 	kvm_for_each_vcpu(i, vcpu, kvm)
4614 		*val += *(u64 *)((void *)vcpu + offset);
4615 
4616 	return 0;
4617 }
4618 
4619 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
4620 {
4621 	int i;
4622 	struct kvm_vcpu *vcpu;
4623 
4624 	kvm_for_each_vcpu(i, vcpu, kvm)
4625 		*(u64 *)((void *)vcpu + offset) = 0;
4626 
4627 	return 0;
4628 }
4629 
4630 static int kvm_stat_data_get(void *data, u64 *val)
4631 {
4632 	int r = -EFAULT;
4633 	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
4634 
4635 	switch (stat_data->dbgfs_item->kind) {
4636 	case KVM_STAT_VM:
4637 		r = kvm_get_stat_per_vm(stat_data->kvm,
4638 					stat_data->dbgfs_item->offset, val);
4639 		break;
4640 	case KVM_STAT_VCPU:
4641 		r = kvm_get_stat_per_vcpu(stat_data->kvm,
4642 					  stat_data->dbgfs_item->offset, val);
4643 		break;
4644 	}
4645 
4646 	return r;
4647 }
4648 
4649 static int kvm_stat_data_clear(void *data, u64 val)
4650 {
4651 	int r = -EFAULT;
4652 	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
4653 
4654 	if (val)
4655 		return -EINVAL;
4656 
4657 	switch (stat_data->dbgfs_item->kind) {
4658 	case KVM_STAT_VM:
4659 		r = kvm_clear_stat_per_vm(stat_data->kvm,
4660 					  stat_data->dbgfs_item->offset);
4661 		break;
4662 	case KVM_STAT_VCPU:
4663 		r = kvm_clear_stat_per_vcpu(stat_data->kvm,
4664 					    stat_data->dbgfs_item->offset);
4665 		break;
4666 	}
4667 
4668 	return r;
4669 }
4670 
4671 static int kvm_stat_data_open(struct inode *inode, struct file *file)
4672 {
4673 	__simple_attr_check_format("%llu\n", 0ull);
4674 	return kvm_debugfs_open(inode, file, kvm_stat_data_get,
4675 				kvm_stat_data_clear, "%llu\n");
4676 }
4677 
4678 static const struct file_operations stat_fops_per_vm = {
4679 	.owner = THIS_MODULE,
4680 	.open = kvm_stat_data_open,
4681 	.release = kvm_debugfs_release,
4682 	.read = simple_attr_read,
4683 	.write = simple_attr_write,
4684 	.llseek = no_llseek,
4685 };
4686 
4687 static int vm_stat_get(void *_offset, u64 *val)
4688 {
4689 	unsigned offset = (long)_offset;
4690 	struct kvm *kvm;
4691 	u64 tmp_val;
4692 
4693 	*val = 0;
4694 	mutex_lock(&kvm_lock);
4695 	list_for_each_entry(kvm, &vm_list, vm_list) {
4696 		kvm_get_stat_per_vm(kvm, offset, &tmp_val);
4697 		*val += tmp_val;
4698 	}
4699 	mutex_unlock(&kvm_lock);
4700 	return 0;
4701 }
4702 
4703 static int vm_stat_clear(void *_offset, u64 val)
4704 {
4705 	unsigned offset = (long)_offset;
4706 	struct kvm *kvm;
4707 
4708 	if (val)
4709 		return -EINVAL;
4710 
4711 	mutex_lock(&kvm_lock);
4712 	list_for_each_entry(kvm, &vm_list, vm_list) {
4713 		kvm_clear_stat_per_vm(kvm, offset);
4714 	}
4715 	mutex_unlock(&kvm_lock);
4716 
4717 	return 0;
4718 }
4719 
4720 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
4721 
4722 static int vcpu_stat_get(void *_offset, u64 *val)
4723 {
4724 	unsigned offset = (long)_offset;
4725 	struct kvm *kvm;
4726 	u64 tmp_val;
4727 
4728 	*val = 0;
4729 	mutex_lock(&kvm_lock);
4730 	list_for_each_entry(kvm, &vm_list, vm_list) {
4731 		kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
4732 		*val += tmp_val;
4733 	}
4734 	mutex_unlock(&kvm_lock);
4735 	return 0;
4736 }
4737 
4738 static int vcpu_stat_clear(void *_offset, u64 val)
4739 {
4740 	unsigned offset = (long)_offset;
4741 	struct kvm *kvm;
4742 
4743 	if (val)
4744 		return -EINVAL;
4745 
4746 	mutex_lock(&kvm_lock);
4747 	list_for_each_entry(kvm, &vm_list, vm_list) {
4748 		kvm_clear_stat_per_vcpu(kvm, offset);
4749 	}
4750 	mutex_unlock(&kvm_lock);
4751 
4752 	return 0;
4753 }
4754 
4755 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
4756 			"%llu\n");
4757 
4758 static const struct file_operations *stat_fops[] = {
4759 	[KVM_STAT_VCPU] = &vcpu_stat_fops,
4760 	[KVM_STAT_VM]   = &vm_stat_fops,
4761 };
4762 
4763 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
4764 {
4765 	struct kobj_uevent_env *env;
4766 	unsigned long long created, active;
4767 
4768 	if (!kvm_dev.this_device || !kvm)
4769 		return;
4770 
4771 	mutex_lock(&kvm_lock);
4772 	if (type == KVM_EVENT_CREATE_VM) {
4773 		kvm_createvm_count++;
4774 		kvm_active_vms++;
4775 	} else if (type == KVM_EVENT_DESTROY_VM) {
4776 		kvm_active_vms--;
4777 	}
4778 	created = kvm_createvm_count;
4779 	active = kvm_active_vms;
4780 	mutex_unlock(&kvm_lock);
4781 
4782 	env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
4783 	if (!env)
4784 		return;
4785 
4786 	add_uevent_var(env, "CREATED=%llu", created);
4787 	add_uevent_var(env, "COUNT=%llu", active);
4788 
4789 	if (type == KVM_EVENT_CREATE_VM) {
4790 		add_uevent_var(env, "EVENT=create");
4791 		kvm->userspace_pid = task_pid_nr(current);
4792 	} else if (type == KVM_EVENT_DESTROY_VM) {
4793 		add_uevent_var(env, "EVENT=destroy");
4794 	}
4795 	add_uevent_var(env, "PID=%d", kvm->userspace_pid);
4796 
4797 	if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
4798 		char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
4799 
4800 		if (p) {
4801 			tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
4802 			if (!IS_ERR(tmp))
4803 				add_uevent_var(env, "STATS_PATH=%s", tmp);
4804 			kfree(p);
4805 		}
4806 	}
4807 	/* no need for checks, since we are adding at most only 5 keys */
4808 	env->envp[env->envp_idx++] = NULL;
4809 	kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
4810 	kfree(env);
4811 }
4812 
4813 static void kvm_init_debug(void)
4814 {
4815 	struct kvm_stats_debugfs_item *p;
4816 
4817 	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
4818 
4819 	kvm_debugfs_num_entries = 0;
4820 	for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
4821 		debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
4822 				    kvm_debugfs_dir, (void *)(long)p->offset,
4823 				    stat_fops[p->kind]);
4824 	}
4825 }
4826 
4827 static int kvm_suspend(void)
4828 {
4829 	if (kvm_usage_count)
4830 		hardware_disable_nolock(NULL);
4831 	return 0;
4832 }
4833 
4834 static void kvm_resume(void)
4835 {
4836 	if (kvm_usage_count) {
4837 #ifdef CONFIG_LOCKDEP
4838 		WARN_ON(lockdep_is_held(&kvm_count_lock));
4839 #endif
4840 		hardware_enable_nolock(NULL);
4841 	}
4842 }
4843 
4844 static struct syscore_ops kvm_syscore_ops = {
4845 	.suspend = kvm_suspend,
4846 	.resume = kvm_resume,
4847 };
4848 
4849 static inline
4850 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
4851 {
4852 	return container_of(pn, struct kvm_vcpu, preempt_notifier);
4853 }
4854 
4855 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
4856 {
4857 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4858 
4859 	WRITE_ONCE(vcpu->preempted, false);
4860 	WRITE_ONCE(vcpu->ready, false);
4861 
4862 	__this_cpu_write(kvm_running_vcpu, vcpu);
4863 	kvm_arch_sched_in(vcpu, cpu);
4864 	kvm_arch_vcpu_load(vcpu, cpu);
4865 }
4866 
4867 static void kvm_sched_out(struct preempt_notifier *pn,
4868 			  struct task_struct *next)
4869 {
4870 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4871 
4872 	if (current->state == TASK_RUNNING) {
4873 		WRITE_ONCE(vcpu->preempted, true);
4874 		WRITE_ONCE(vcpu->ready, true);
4875 	}
4876 	kvm_arch_vcpu_put(vcpu);
4877 	__this_cpu_write(kvm_running_vcpu, NULL);
4878 }
4879 
4880 /**
4881  * kvm_get_running_vcpu - get the vcpu running on the current CPU.
4882  *
4883  * We can disable preemption locally around accessing the per-CPU variable,
4884  * and use the resolved vcpu pointer after enabling preemption again,
4885  * because even if the current thread is migrated to another CPU, reading
4886  * the per-CPU value later will give us the same value as we update the
4887  * per-CPU variable in the preempt notifier handlers.
4888  */
4889 struct kvm_vcpu *kvm_get_running_vcpu(void)
4890 {
4891 	struct kvm_vcpu *vcpu;
4892 
4893 	preempt_disable();
4894 	vcpu = __this_cpu_read(kvm_running_vcpu);
4895 	preempt_enable();
4896 
4897 	return vcpu;
4898 }
4899 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
4900 
4901 /**
4902  * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
4903  */
4904 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
4905 {
4906         return &kvm_running_vcpu;
4907 }
4908 
4909 struct kvm_cpu_compat_check {
4910 	void *opaque;
4911 	int *ret;
4912 };
4913 
4914 static void check_processor_compat(void *data)
4915 {
4916 	struct kvm_cpu_compat_check *c = data;
4917 
4918 	*c->ret = kvm_arch_check_processor_compat(c->opaque);
4919 }
4920 
4921 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
4922 		  struct module *module)
4923 {
4924 	struct kvm_cpu_compat_check c;
4925 	int r;
4926 	int cpu;
4927 
4928 	r = kvm_arch_init(opaque);
4929 	if (r)
4930 		goto out_fail;
4931 
4932 	/*
4933 	 * kvm_arch_init makes sure there's at most one caller
4934 	 * for architectures that support multiple implementations,
4935 	 * like intel and amd on x86.
4936 	 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
4937 	 * conflicts in case kvm is already setup for another implementation.
4938 	 */
4939 	r = kvm_irqfd_init();
4940 	if (r)
4941 		goto out_irqfd;
4942 
4943 	if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
4944 		r = -ENOMEM;
4945 		goto out_free_0;
4946 	}
4947 
4948 	r = kvm_arch_hardware_setup(opaque);
4949 	if (r < 0)
4950 		goto out_free_1;
4951 
4952 	c.ret = &r;
4953 	c.opaque = opaque;
4954 	for_each_online_cpu(cpu) {
4955 		smp_call_function_single(cpu, check_processor_compat, &c, 1);
4956 		if (r < 0)
4957 			goto out_free_2;
4958 	}
4959 
4960 	r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
4961 				      kvm_starting_cpu, kvm_dying_cpu);
4962 	if (r)
4963 		goto out_free_2;
4964 	register_reboot_notifier(&kvm_reboot_notifier);
4965 
4966 	/* A kmem cache lets us meet the alignment requirements of fx_save. */
4967 	if (!vcpu_align)
4968 		vcpu_align = __alignof__(struct kvm_vcpu);
4969 	kvm_vcpu_cache =
4970 		kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
4971 					   SLAB_ACCOUNT,
4972 					   offsetof(struct kvm_vcpu, arch),
4973 					   sizeof_field(struct kvm_vcpu, arch),
4974 					   NULL);
4975 	if (!kvm_vcpu_cache) {
4976 		r = -ENOMEM;
4977 		goto out_free_3;
4978 	}
4979 
4980 	r = kvm_async_pf_init();
4981 	if (r)
4982 		goto out_free;
4983 
4984 	kvm_chardev_ops.owner = module;
4985 	kvm_vm_fops.owner = module;
4986 	kvm_vcpu_fops.owner = module;
4987 
4988 	r = misc_register(&kvm_dev);
4989 	if (r) {
4990 		pr_err("kvm: misc device register failed\n");
4991 		goto out_unreg;
4992 	}
4993 
4994 	register_syscore_ops(&kvm_syscore_ops);
4995 
4996 	kvm_preempt_ops.sched_in = kvm_sched_in;
4997 	kvm_preempt_ops.sched_out = kvm_sched_out;
4998 
4999 	kvm_init_debug();
5000 
5001 	r = kvm_vfio_ops_init();
5002 	WARN_ON(r);
5003 
5004 	return 0;
5005 
5006 out_unreg:
5007 	kvm_async_pf_deinit();
5008 out_free:
5009 	kmem_cache_destroy(kvm_vcpu_cache);
5010 out_free_3:
5011 	unregister_reboot_notifier(&kvm_reboot_notifier);
5012 	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5013 out_free_2:
5014 	kvm_arch_hardware_unsetup();
5015 out_free_1:
5016 	free_cpumask_var(cpus_hardware_enabled);
5017 out_free_0:
5018 	kvm_irqfd_exit();
5019 out_irqfd:
5020 	kvm_arch_exit();
5021 out_fail:
5022 	return r;
5023 }
5024 EXPORT_SYMBOL_GPL(kvm_init);
5025 
5026 void kvm_exit(void)
5027 {
5028 	debugfs_remove_recursive(kvm_debugfs_dir);
5029 	misc_deregister(&kvm_dev);
5030 	kmem_cache_destroy(kvm_vcpu_cache);
5031 	kvm_async_pf_deinit();
5032 	unregister_syscore_ops(&kvm_syscore_ops);
5033 	unregister_reboot_notifier(&kvm_reboot_notifier);
5034 	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5035 	on_each_cpu(hardware_disable_nolock, NULL, 1);
5036 	kvm_arch_hardware_unsetup();
5037 	kvm_arch_exit();
5038 	kvm_irqfd_exit();
5039 	free_cpumask_var(cpus_hardware_enabled);
5040 	kvm_vfio_ops_exit();
5041 }
5042 EXPORT_SYMBOL_GPL(kvm_exit);
5043 
5044 struct kvm_vm_worker_thread_context {
5045 	struct kvm *kvm;
5046 	struct task_struct *parent;
5047 	struct completion init_done;
5048 	kvm_vm_thread_fn_t thread_fn;
5049 	uintptr_t data;
5050 	int err;
5051 };
5052 
5053 static int kvm_vm_worker_thread(void *context)
5054 {
5055 	/*
5056 	 * The init_context is allocated on the stack of the parent thread, so
5057 	 * we have to locally copy anything that is needed beyond initialization
5058 	 */
5059 	struct kvm_vm_worker_thread_context *init_context = context;
5060 	struct kvm *kvm = init_context->kvm;
5061 	kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
5062 	uintptr_t data = init_context->data;
5063 	int err;
5064 
5065 	err = kthread_park(current);
5066 	/* kthread_park(current) is never supposed to return an error */
5067 	WARN_ON(err != 0);
5068 	if (err)
5069 		goto init_complete;
5070 
5071 	err = cgroup_attach_task_all(init_context->parent, current);
5072 	if (err) {
5073 		kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
5074 			__func__, err);
5075 		goto init_complete;
5076 	}
5077 
5078 	set_user_nice(current, task_nice(init_context->parent));
5079 
5080 init_complete:
5081 	init_context->err = err;
5082 	complete(&init_context->init_done);
5083 	init_context = NULL;
5084 
5085 	if (err)
5086 		return err;
5087 
5088 	/* Wait to be woken up by the spawner before proceeding. */
5089 	kthread_parkme();
5090 
5091 	if (!kthread_should_stop())
5092 		err = thread_fn(kvm, data);
5093 
5094 	return err;
5095 }
5096 
5097 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
5098 				uintptr_t data, const char *name,
5099 				struct task_struct **thread_ptr)
5100 {
5101 	struct kvm_vm_worker_thread_context init_context = {};
5102 	struct task_struct *thread;
5103 
5104 	*thread_ptr = NULL;
5105 	init_context.kvm = kvm;
5106 	init_context.parent = current;
5107 	init_context.thread_fn = thread_fn;
5108 	init_context.data = data;
5109 	init_completion(&init_context.init_done);
5110 
5111 	thread = kthread_run(kvm_vm_worker_thread, &init_context,
5112 			     "%s-%d", name, task_pid_nr(current));
5113 	if (IS_ERR(thread))
5114 		return PTR_ERR(thread);
5115 
5116 	/* kthread_run is never supposed to return NULL */
5117 	WARN_ON(thread == NULL);
5118 
5119 	wait_for_completion(&init_context.init_done);
5120 
5121 	if (!init_context.err)
5122 		*thread_ptr = thread;
5123 
5124 	return init_context.err;
5125 }
5126